-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpipeline_similiar.py
More file actions
86 lines (62 loc) · 2.43 KB
/
pipeline_similiar.py
File metadata and controls
86 lines (62 loc) · 2.43 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
from representations import get_representation, store_representations
from pyspark.sql import SparkSession
from pyspark import SparkContext, SparkConf
import matplotlib.pyplot as plt
from pyspark.sql import functions as F
from pyspark.sql.types import ArrayType, StringType, DoubleType, IntegerType
import pandas as pd
import ssh.shingle as sh
import ssh.sketch as sk
import data_handler
import importlib
import ssh.hashing as mh
importlib.reload(mh)
import identify_similarities as ids
conf = SparkConf()
conf.setAppName("pipeline_similiar")
conf.setMaster("local[*]")
conf.set("spark.driver.memory", "2G")
conf.set("spark.driver.maxResultSize", "2g")
conf.set("spark.executor.memory", "1G")
sc = SparkContext(conf=conf)
spark = SparkSession.builder.getOrCreate()
process_df = data_handler.load_data("res/output.txt")
# name similarity:
theta_names = 0.4
simi_names = ids.similarity_names(process_df)
simi_names = simi_names.select(F.col("idA"), F.col("idB")).where(f"JaccardDistance < {theta_names}")
#process_df.show(10)
# PREPARE TIME DATA:
time_agg = data_handler.get_relative_time(process_df)
process_df.printSchema()
# BSH
N = 1
t = 6
d = 1
p = 30
start_times = [1, 4, 7, 10, 13, 16]
theta_times = 0.7
listids = []
for x in time_agg.select("count_agg_process_time").distinct().collect():
for _,v in x.asDict().items():
listids.append(v)
#create list of dataframes by IDs
processes_by_length = [time_agg.where(time_agg.count_agg_process_time == x) for x in listids]
res_times = None
for i,process_df in enumerate(processes_by_length):
if (i) < 10:
window_size = 2
overlap=1
else:
window_size = int(i / 5)
overlap = int(window_size /2)
process_df = sk.bsh(process_df, "process_time", window_size= window_size, overlap=overlap, number_clusters=15)
process_df = mh.add_section_minhashing(process_df, N, t, p, d, start_times)
process_df = mh.compute_section_jaccard(process_df)
if not res_times:
res_times = process_df.select(F.col("idA"), F.col("idB")).where(f"section_minhash_real < {theta_times}")
else:
res_times = res_times.union(process_df.select(F.col("idA"), F.col("idB")).where(f"section_minhash_real < {theta_times}"))
res_times.write.mode('overwrite').parquet(f'res/temp_data/similiar_times.parquet')
simi_names.write.mode('overwrite').parquet(f'res/temp_data/similiar_names.parquet')
#res.write.mode('overwrite').parquet(f'res/temp_data/similiar.parquet')