minersunion commited on
Commit
c1b2b40
·
1 Parent(s): 08c8ee9

adjusted preview

Browse files
Files changed (2) hide show
  1. README.md +0 -1
  2. dataset.py +67 -29
README.md CHANGED
@@ -185,7 +185,6 @@ from datasets import load_dataset
185
 
186
  dataset = load_dataset(
187
  "ReadyAi/5000-podcast-conversations-with-metadata-and-embedding-dataset",
188
- data_files="data/bittensor-conversational-tags-and-embeddings-part-*.parquet",
189
  split="train",
190
  streaming=True
191
  )
 
185
 
186
  dataset = load_dataset(
187
  "ReadyAi/5000-podcast-conversations-with-metadata-and-embedding-dataset",
 
188
  split="train",
189
  streaming=True
190
  )
dataset.py CHANGED
@@ -1,57 +1,95 @@
 
 
1
  import pandas as pd
2
- from datasets import DatasetInfo, GeneratorBasedBuilder, Split, SplitGenerator, Value, Features, Sequence
 
3
 
4
  class PodcastConversationsWithMetadataAndEmbedding(GeneratorBasedBuilder):
5
  def _info(self):
6
  return DatasetInfo(
7
- features=Features({
8
- "c_guid": Value("string"),
9
- "participants": Sequence(Value("string")),
10
- "transcript": Sequence({
11
- "chunk": Value("string"),
12
- "speaker": Value("string"),
13
- "text": Value("string")
14
- })
15
- })
16
  )
17
 
18
- def _split_generators(self, dl_manager):
19
- data_files = self.config.data_files
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  return [
22
  SplitGenerator(
23
  name=Split.TRAIN,
24
- gen_kwargs={"filepath": data_files["train"][0]},
25
  ),
26
  SplitGenerator(
27
  name="conversations",
28
- gen_kwargs={"filepath": data_files["conversations"][0]},
29
  ),
30
  SplitGenerator(
31
  name="conversations_to_tags",
32
- gen_kwargs={"filepath": data_files["conversations_to_tags"][0]},
33
  ),
34
  SplitGenerator(
35
  name="tag_to_id",
36
- gen_kwargs={"filepath": data_files["tag_to_id"][0]},
37
  ),
38
  SplitGenerator(
39
  name="full",
40
- gen_kwargs={"filepath": data_files["full"][0]},
41
  ),
42
  ]
43
 
44
- def _generate_examples(self, filepath):
45
- df = pd.read_parquet(filepath)
46
-
47
- for idx, row in df.iterrows():
48
- record = row.to_dict()
49
 
50
- # Normalize vector field if it exists
51
- if "vector" in record:
52
- if isinstance(record["vector"], (list, tuple)):
53
- record["vector"] = list(map(float, record["vector"]))
54
- else:
55
- record["vector"] = []
56
 
57
- yield idx, record
 
1
+ import glob
2
+
3
  import pandas as pd
4
+ from datasets import DatasetInfo, Features, GeneratorBasedBuilder, Sequence, Split, SplitGenerator, Value
5
+
6
 
7
  class PodcastConversationsWithMetadataAndEmbedding(GeneratorBasedBuilder):
8
  def _info(self):
9
  return DatasetInfo(
10
+ features=Features(
11
+ {
12
+ "c_guid": Value("string"),
13
+ "participants": Sequence(Value("string")),
14
+ "transcript": Sequence({"chunk": Value("string"), "speaker": Value("string"), "text": Value("string")}),
15
+ }
16
+ )
 
 
17
  )
18
 
19
+ def _info(self):
20
+ split_name = self.config.name
21
 
22
+ if split_name == ["train", "conversations"]:
23
+ features = Features(
24
+ {
25
+ "c_guid": Value("string"),
26
+ "transcript": Sequence({"chunk": Value("string"), "speaker": Value("string"), "text": Value("string")}),
27
+ "participants": Sequence(Value("string")),
28
+ }
29
+ )
30
+ elif split_name == "full":
31
+ features = Features(
32
+ {
33
+ "c_guid": Value("string"),
34
+ "tag_id": Value("int64"),
35
+ "tag": Value("string"),
36
+ "vector": Sequence(Value("float32")),
37
+ }
38
+ )
39
+ elif split_name == "conversations_to_tags":
40
+ features = Features(
41
+ {
42
+ "c_guid": Value("string"),
43
+ "tag_id": Value("int64"),
44
+ "tag": Value("string"),
45
+ }
46
+ )
47
+ elif split_name == "tag_to_id":
48
+ features = Features(
49
+ {
50
+ "tag_id": Value("int64"),
51
+ "tag": Value("string"),
52
+ }
53
+ )
54
+ else:
55
+ raise ValueError(f"Unknown split: {split_name}")
56
+
57
+ return DatasetInfo(features=features)
58
+
59
+ def _split_generators(self, dl_manager):
60
  return [
61
  SplitGenerator(
62
  name=Split.TRAIN,
63
+ gen_kwargs={"filepaths": ["conversations_train.parquet"]},
64
  ),
65
  SplitGenerator(
66
  name="conversations",
67
+ gen_kwargs={"filepaths": ["conversations.parquet"]},
68
  ),
69
  SplitGenerator(
70
  name="conversations_to_tags",
71
+ gen_kwargs={"filepaths": ["conversations_to_tags.parquet"]},
72
  ),
73
  SplitGenerator(
74
  name="tag_to_id",
75
+ gen_kwargs={"filepaths": ["tag_to_id.parquet"]},
76
  ),
77
  SplitGenerator(
78
  name="full",
79
+ gen_kwargs={"filepaths": sorted(glob.glob("data/bittensor-conversational-tags-and-embeddings-part-*.parquet"))},
80
  ),
81
  ]
82
 
83
+ def _generate_examples(self, filepaths):
84
+ for path in filepaths:
85
+ df = pd.read_parquet(path)
86
+ for idx, row in df.iterrows():
87
+ record = row.to_dict()
88
 
89
+ if "vector" in record:
90
+ if isinstance(record["vector"], (list, tuple)):
91
+ record["vector"] = list(map(float, record["vector"]))
92
+ else:
93
+ record["vector"] = []
 
94
 
95
+ yield f"{path}-{idx}", record