Kenneth Enevoldsen commited on
Commit
fa137c8
·
unverified ·
1 Parent(s): cbbdbd3

initial commit

Browse files
Files changed (44) hide show
  1. .gitattributes +0 -1
  2. .gitignore +25 -0
  3. .vscode/settings.json +7 -0
  4. CHANGELOG.md +18 -0
  5. CONTRIBUTING.md +117 -0
  6. README.md +336 -0
  7. data/maalfrid/create.py +134 -0
  8. data/maalfrid/create.py.lock +0 -0
  9. data/maalfrid/descriptive_stats.json +9 -0
  10. data/maalfrid/images/dist_document_length.png +3 -0
  11. data/maalfrid/maalfrid.log +10 -0
  12. data/maalfrid/maalfrid.md +136 -0
  13. data/maalfrid/maalfrid.parquet +3 -0
  14. descriptive_stats.json +9 -0
  15. docs/icon.png +3 -0
  16. images/dataset_size_plot.html +0 -0
  17. images/dataset_size_plot.svg +1 -0
  18. images/domain_distribution.png +3 -0
  19. makefile +21 -0
  20. pyproject.toml +34 -0
  21. src/dynaword/__init__.py +0 -0
  22. src/dynaword/bump_version.py +56 -0
  23. src/dynaword/dataset_structure.py +35 -0
  24. src/dynaword/datasheet.py +307 -0
  25. src/dynaword/descriptive_stats.py +95 -0
  26. src/dynaword/paths.py +5 -0
  27. src/dynaword/plots/descriptive_statistics_plots.py +44 -0
  28. src/dynaword/plots/plot_tokens_over_time.py +242 -0
  29. src/dynaword/plots/plots_dataset_size.py +134 -0
  30. src/dynaword/process_dataset.py +74 -0
  31. src/dynaword/tables.py +212 -0
  32. src/dynaword/typings.py +27 -0
  33. src/dynaword/update_descriptive_statistics.py +170 -0
  34. src/tests/__init__.py +0 -0
  35. src/tests/conftest.py +14 -0
  36. src/tests/test_dataset_schema.py +37 -0
  37. src/tests/test_datasheets.py +56 -0
  38. src/tests/test_load.py +33 -0
  39. src/tests/test_quality/__init__.py +0 -0
  40. src/tests/test_quality/test_duplicates.py +25 -0
  41. src/tests/test_quality/test_short_texts.py +20 -0
  42. src/tests/test_unique_ids.py +15 -0
  43. test_results.log +15 -0
  44. uv.lock +0 -0
.gitattributes CHANGED
@@ -9,7 +9,6 @@
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
  *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mds filter=lfs diff=lfs merge=lfs -text
13
  *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
  *.model filter=lfs diff=lfs merge=lfs -text
15
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
  *.lz4 filter=lfs diff=lfs merge=lfs -text
 
12
  *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
  *.model filter=lfs diff=lfs merge=lfs -text
14
  *.msgpack filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/*
3
+ *.pyc
4
+
5
+ # cSpell
6
+ cspell.json
7
+
8
+ # debugfile
9
+ .vscode/launch.json
10
+
11
+ # tmp files
12
+ tmp.py
13
+ tmp.png
14
+
15
+ # MacOS
16
+ .DS_Store
17
+
18
+ # tmp files
19
+ tmp.py
20
+
21
+ ## to allow temporary data drops without pushing it to the hub
22
+ data/*/tmp/*
23
+
24
+ ## node_modules
25
+ **/node_modules/
.vscode/settings.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "python.testing.pytestArgs": [
3
+ "src/tests"
4
+ ],
5
+ "python.testing.unittestEnabled": false,
6
+ "python.testing.pytestEnabled": true,
7
+ }
CHANGELOG.md ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Changelog
3
+
4
+ All notable changes to this project will be documented in this file.
5
+
6
+ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
7
+
8
+ ## [v0.0.1] - 2025-01-25
9
+
10
+
11
+ ### Added
12
+
13
+ - Added the `maalfrid` corpus
14
+
15
+
16
+ ## [v0.0.0] - 2025-01-24
17
+
18
+ Project was initialized by copying the structure of the [Danish Dynaword](https://huggingface.co/datasets/danish-foundation-models/norwegian-dynaword).
CONTRIBUTING.md ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Working with dataset locally
2
+
3
+ A huggingface datasets repository is a GitHub repository like any other. You can simply download it like so:
4
+
5
+ ```bash
6
+ git clone https://huggingface.co/datasets/danish-foundation-models/norwegian-dynaword
7
+ cd norwegian-dynaword
8
+ git lfs pull # download large files to ensure that tests works
9
+ ```
10
+
11
+ You can the work with the dataset locally like so:
12
+
13
+ ```py
14
+ from datasets import load_dataset
15
+
16
+ name = "../." # instead of "danish-foundation-models/norwegian-dynaword"
17
+ dataset = load_dataset("../.", split="train")
18
+ # make transformations here
19
+ ```
20
+
21
+ > Note: While it is local Huggingface still uses a cache, therefore you might need to reset it after changes have been made to see that it works correctly. You can do this by deleting the cached files which you can locate using `dataset.cache_files`.
22
+
23
+ ## Adding a new dataset
24
+
25
+ To add a new dataset you will have to create a folder under `data/{dataset_name}/`, which should look as follows:
26
+
27
+ ```
28
+ data/dataset_name
29
+ |- dataset_name.md
30
+ |- dataset_name.parquet
31
+ |- create.py # optional
32
+ ```
33
+
34
+ The create.py is an optional python script that allow you to recreate the dataset from the source. This is typically to allow us to reproduce the
35
+ dataset with fixes or update the dataset to the latest version using an API.
36
+
37
+ ## Installing dependencies
38
+
39
+ This repo comes with a few dependencies you need to install to make this run. It uses a [makefile](https://opensource.com/article/18/8/what-how-makefile) to run commands and a [uv](https://docs.astral.sh/uv/) for package management. Once you have uv installed you can install the dependencies using:
40
+
41
+ ```bash
42
+ make install
43
+ ```
44
+
45
+ Now you can activate the environment with:
46
+
47
+ ```
48
+ source .venv/bin/activate
49
+ ```
50
+
51
+ ## Running dataset tests
52
+
53
+ This dataset is special as it comes with a test suite, e.g. testing in the ids are unique and that the format is consistent. You can run the suite using
54
+
55
+ ```bash
56
+ make test
57
+ ```
58
+
59
+ ## Submitting a PR
60
+
61
+ Creating a PR on Huggingface is a bit different from creating one on Github.
62
+
63
+ 1) Go to the community tab on huggingface press *new pull request* and choose *on your machine*. Specify the title of the your PR. Then you can simply:
64
+
65
+ ```bash
66
+ git checkout -b {new branch name}
67
+ # make your changes here
68
+
69
+ # push to hub
70
+ # you might need to first login:
71
+ # huggingface-cli login
72
+ git push origin HEAD:refs/pr/{PR NUMBER}
73
+ ```
74
+ Where HEAD refers to the current branch.
75
+
76
+ Before you make the PR do be sure to make sure that you have completed the checklist below.
77
+
78
+ ### Making changes to an existing PR
79
+
80
+ As a contributor you might need to develop on an existing branch. To do so you you
81
+ ```bash
82
+ # fetch and checkout existing branch:
83
+ git fetch origin refs/pr/{PR NUMBER}:pr/{PR NUMBER}
84
+ git checkout pr/{PR NUMBER}
85
+ # make your changes here
86
+
87
+ # push changes
88
+ ```
89
+
90
+ ### Checklist
91
+
92
+ - [ ] I have run the test suite using `make test` and all tests pass
93
+ - [ ] I have added/changed a dataset:
94
+ - [ ] I have updated descriptive statistics using `make update-descriptive-statistics`
95
+ - [ ] I have bumped the version use `make bump-version`
96
+ - [ ] If I have added a `create.py` script I have added the [script dependencies](https://docs.astral.sh/uv/guides/scripts/#declaring-script-dependencies) required to run that script.
97
+ - [ ] I have updated the CHANGELOG.md if appropriate
98
+
99
+
100
+ ### Examples of Previous PRs
101
+ To see example PR you can see the following:
102
+
103
+ - [Restructuring columns in the dataset](https://huggingface.co/datasets/danish-foundation-models/norwegian-dynaword/discussions/11)
104
+ - [Adding a new dataset](https://huggingface.co/datasets/danish-foundation-models/norwegian-dynaword/discussions/15)
105
+ - Updated [dataset description and metadata](https://huggingface.co/datasets/danish-foundation-models/norwegian-dynaword/discussions/20)
106
+
107
+ ## Frequently asked questions
108
+
109
+ ### Do you accept synthetic dataets
110
+
111
+ Yes we do generally accept synthetic datasets since it will likely be a promising research direction for low- to mid-resource languages.
112
+ However, you should be aware that synthetic dataset will probably require a more detailed examination and description.
113
+ We will for instance examine the quality of the synthetic subset and whether the model used for the creation permits resharing of the synthetic data under permissible licenses.
114
+
115
+ ### Do you accept non-Norwegian data
116
+
117
+ Generally this repository is intended for Norwegian text, however quite broadly defined. For instance, we do accept data containing [code-switching](https://www.google.com/search?client=safari&rls=en&q=code+switching&ie=UTF-8&oe=UTF-8) and historical Norwegian text.
README.md ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - no-annotation
4
+ language_creators:
5
+ - crowdsourced
6
+ language:
7
+ - da
8
+ license: cc0-1.0
9
+ multilinguality:
10
+ - monolingual
11
+ source_datasets:
12
+ - original
13
+ task_categories:
14
+ - text-generation
15
+ task_ids:
16
+ - language-modeling
17
+ tags:
18
+ - text-corpus
19
+ - continual-development
20
+ - community-collaboration
21
+ pretty_name: Norwegian Dynaword
22
+ configs:
23
+ - config_name: default
24
+ data_files:
25
+ - split: train
26
+ path: data/*/*.parquet
27
+ - config_name: maalfrid
28
+ data_files:
29
+ - split: train
30
+ path: data/maalfrid/*.parquet
31
+ language_bcp47:
32
+ - false
33
+ - nno
34
+ - nob
35
+ - nor
36
+ ---
37
+
38
+ <!--
39
+ readme structure is inspired by:
40
+ https://github.com/huggingface/datasets/blob/main/templates/README_guide.md
41
+ -->
42
+
43
+
44
+ # 🧨 norwegian dynaword
45
+
46
+
47
+ <!-- START README TABLE -->
48
+ | | |
49
+ | ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------- |
50
+ | **Version** | 0.0.1 ([Changelog](/CHANGELOG.md)) |
51
+ | **Language** | Norwegian (no, nor), including Bokmål (nob) and Nynorsk (nno) |
52
+ | **License** | Openly Licensed, See the respective dataset |
53
+ | **Models** | Currently there is no models trained on this dataset |
54
+ | **Contact** | If you have question about this project please create an issue [here](https://huggingface.co/datasets/danish-foundation-models/norwegian-dynaword/discussions) |
55
+
56
+
57
+
58
+ <!-- END README TABLE -->
59
+
60
+ ## Table of Contents
61
+ - [🧨 norwegian dynaword](#-norwegian-dynaword)
62
+ - [Table of Contents](#table-of-contents)
63
+ - [Dataset Description](#dataset-description)
64
+ - [Dataset Summary](#dataset-summary)
65
+ - [Loading the dataset](#loading-the-dataset)
66
+ - [Languages](#languages)
67
+ - [Domains](#domains)
68
+ - [Licensing](#licensing)
69
+ - [Dataset Structure](#dataset-structure)
70
+ - [Data Instances](#data-instances)
71
+ - [Data Fields](#data-fields)
72
+ - [Data Splits](#data-splits)
73
+ - [Dataset Creation](#dataset-creation)
74
+ - [Curation Rationale](#curation-rationale)
75
+ - [Annotations](#annotations)
76
+ - [Source Data](#source-data)
77
+ - [Data Collection and Processing](#data-collection-and-processing)
78
+ - [Dataset Statistics](#dataset-statistics)
79
+ - [Contributing to the dataset](#contributing-to-the-dataset)
80
+ - [Citation Information](#citation-information)
81
+ - [License information](#license-information)
82
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
83
+ - [Bias, Risks, and Limitations](#bias-risks-and-limitations)
84
+ - [Notice and takedown policy](#notice-and-takedown-policy)
85
+
86
+ ## Dataset Description
87
+
88
+ <!-- START-DESC-STATS -->
89
+ - **Number of samples**: 3.23M
90
+ - **Number of tokens (Llama 3)**: 2.23B
91
+ - **Average document length in tokens (min, max)**: 690.62 (4, 62.24K)
92
+ <!-- END-DESC-STATS -->
93
+
94
+
95
+ ### Dataset Summary
96
+
97
+ The Norwegian dynaword is a collection of Norwegian free-form text datasets from various domains. All of the datasets in the Norwegian Dynaword are openly licensed
98
+ and deemed permissible for training large language models.
99
+
100
+ Norwegian dynaword is continually developed, which means that the dataset will actively be updated as new datasets become available. If you would like to contribute a dataset see the [contribute section](#contributing-to-the-dataset).
101
+
102
+ ### Loading the dataset
103
+
104
+ ```py
105
+ from datasets import load_dataset
106
+
107
+ name = "danish-foundation-models/norwegian-dynaword"
108
+ ds = load_dataset(name, split = "train")
109
+ sample = ds[1] # see "Data Instances" below
110
+ ```
111
+
112
+ or load it by streaming the data
113
+ ```py
114
+ ds = load_dataset(name, split = "train", streaming=True)
115
+ dataset_iter = iter(ds)
116
+ sample = next(iter(dataset_iter))
117
+ ```
118
+
119
+ You can also load a single subset at a time:
120
+ ```py
121
+ ds = load_dataset(name, "adl", split = "train")
122
+ ```
123
+
124
+
125
+ As Norwegian dynaword is continually expanding and curated you can make sure that you get the same dataset every time by specifying the revision:
126
+ You can also load a single subset at a time:
127
+ ```py
128
+ ds = load_dataset(name, revision="{desired revision}")
129
+ ```
130
+
131
+ ### Languages
132
+ This dataset includes the following languages:
133
+
134
+ - Norwegian (nor-Latn), including Bokmål (nob-Latn), and Nynorsk (nno-Latn)
135
+
136
+ In addition it likely contains small amounts of English due to code-switching and Danish due to the historical relation between the two languages and language misclassificaitons due to their similarity.
137
+
138
+ Language is denoted using [BCP-47](https://en.wikipedia.org/wiki/IETF_language_tag), using the langauge code ISO [639-3](https://en.wikipedia.org/wiki/List_of_ISO_639_language_codes) and the script code [ISO 15924](https://en.wikipedia.org/wiki/ISO_15924).
139
+
140
+ ### Domains
141
+
142
+ This dynaword consist of data from various domains (e.g., legal, books, social media). The following table and figure give an overview of the relative distributions of these domains. To see a full overview of the source check out the [source data section](#source-data)
143
+
144
+ <div style="display: flex; gap: 20px; align-items: flex-start;">
145
+
146
+ <div style="flex: 1;">
147
+
148
+
149
+ <!-- START-DOMAIN TABLE -->
150
+ | Domain | Sources | N. Tokens |
151
+ |:----------|:-----------|:------------|
152
+ | Web | [maalfrid] | 2.23B |
153
+ | **Total** | | 2.23B |
154
+
155
+ [maalfrid]: data/maalfrid/maalfrid.md
156
+ <!-- END-DOMAIN TABLE -->
157
+
158
+ </div>
159
+
160
+ <div style="flex: 1;">
161
+
162
+ <p align="center">
163
+ <img src="./images/domain_distribution.png" width="400" style="margin-right: 10px;" />
164
+ </p>
165
+
166
+ </div>
167
+
168
+ </div>
169
+
170
+
171
+ ### Licensing
172
+
173
+ The following gives an overview of the licensing in the Dynaword. To get the exact license of the individual datasets check out the [overview table](#source-data).
174
+ These license is applied to the constituent data, i.e., the text. The collection of datasets (metadata, quality control, etc.) is licensed under [CC-0](https://creativecommons.org/publicdomain/zero/1.0/legalcode.en).
175
+
176
+ <!-- START-LICENSE TABLE -->
177
+ | License | Sources | N. Tokens |
178
+ |:-----------------------------|:-----------|:------------|
179
+ | Other (Attribution required) | [maalfrid] | 2.23B |
180
+ | **Total** | | 2.23B |
181
+
182
+ [maalfrid]: data/maalfrid/maalfrid.md
183
+ <!-- END-LICENSE TABLE -->
184
+
185
+
186
+
187
+ ## Dataset Structure
188
+
189
+ The dataset contains text from different sources which are thoroughly defined in [Source Data](#source-data).
190
+
191
+ ### Data Instances
192
+
193
+ Each entry in the dataset consists of a single text with associated metadata
194
+
195
+ <!-- START-SAMPLE -->
196
+ ```py
197
+ {
198
+ "id": "maalfrid-0",
199
+ "text": "Elever med annet morsmål enn norsk og samisk har rett til særskilt norskopplæring til de har tilstre[...]",
200
+ "source": "maalfrid",
201
+ "added": "2026-01-25",
202
+ "created": "2021-01-01, 2021-12-31",
203
+ "token_count": 711
204
+ }
205
+ ```
206
+
207
+ ### Data Fields
208
+
209
+ An entry in the dataset consists of the following fields:
210
+
211
+ - `id` (`str`): An unique identifier for each document.
212
+ - `text`(`str`): The content of the document.
213
+ - `source` (`str`): The source of the document (see [Source Data](#source-data)).
214
+ - `added` (`str`): An date for when the document was added to this collection.
215
+ - `created` (`str`): An date range for when the document was originally created.
216
+ - `token_count` (`int`): The number of tokens in the sample computed using the Llama 8B tokenizer
217
+ <!-- END-SAMPLE -->
218
+
219
+ ### Data Splits
220
+
221
+ The entire corpus is provided in the `train` split.
222
+
223
+ ## Dataset Creation
224
+
225
+ ### Curation Rationale
226
+
227
+ These datasets were collected and curated with the intention of making openly license Norwegian data available. While this was collected with the intention of developing language models it is likely to have multiple other uses such as examining language development and differences across domains.
228
+
229
+
230
+ ### Annotations
231
+
232
+ This data generally contains no annotation besides the metadata attached to each sample such as what domain it belongs to.
233
+
234
+
235
+ ### Source Data
236
+
237
+ Below follows a brief overview of the sources in the corpus along with their individual license. To get more information about the individual dataset click the hyperlink in the table.
238
+
239
+ <details>
240
+ <summary><b>Overview Table (click to unfold)</b></summary>
241
+
242
+ You can learn more about each dataset by pressing the link in the first column.
243
+
244
+ <!-- START-MAIN TABLE -->
245
+ | Source | Description | Domain | N. Tokens | License |
246
+ |:-----------|:-------------------------------------------------------|:---------|:------------|:-----------|
247
+ | [maalfrid] | Norwegian content from Norwegian institutions websites | Web | 2.23B | [NLOD 2.0] |
248
+ | **Total** | | | 2.23B | |
249
+
250
+ [maalfrid]: data/maalfrid/maalfrid.md
251
+
252
+
253
+ [CC-0]: https://creativecommons.org/publicdomain/zero/1.0/legalcode.en
254
+ [CC-BY-SA 4.0]: https://creativecommons.org/licenses/by-sa/4.0/deed.en
255
+ [CC-BY 4.0]: https://creativecommons.org/licenses/by/4.0/deed.en
256
+ [Apache 2.0]: https://www.apache.org/licenses/LICENSE-2.0
257
+ [NLOD 2.0]: ./data/maalfrid/maalfrid.md#license-information
258
+ <!-- END-MAIN TABLE -->
259
+
260
+ </details>
261
+
262
+
263
+ ### Data Collection and Processing
264
+
265
+ Norwegian dynaword is continually developed, which means that the dataset will actively be updated as new datasets become available. This means that the size of Dynaword increases over time as seen in the following plot:
266
+
267
+ <p align="center">
268
+ <img src="./images/tokens_over_time.svg" width="600" style="margin-right: 10px;" />
269
+ </p>
270
+
271
+ The data collection and processing varies depending on the dataset and is documentationed the individual datasheets, which is linked in the above table. If possible the collection is documented both in the datasheet and in the reproducible script (`data/{dataset}/create.py`).
272
+
273
+ In addition to data specific processing we also run a series automated quality checks to ensure formatting (e.g. ensuring correctly formatted columns and unique IDs), quality checks (e.g. duplicate and empty string detection) and datasheet documentation checks. These checks are there to ensure a high quality of documentation and a minimal level of quality. To allow for the development of novel cleaning methodologies we do not provide more extensive cleaning.
274
+
275
+ ### Dataset Statistics
276
+ The following plot(s) are intended to give an overview of docuements length in the various sources.
277
+
278
+ <p align="center">
279
+ <img src="./images/dataset_size_plot.svg" width="600" style="margin-right: 10px;" />
280
+ </p>
281
+
282
+
283
+
284
+ ### Contributing to the dataset
285
+
286
+ We welcome contributions to the dataset, including new sources, improved data filtering, and other enhancements. To get started on contributing, please see [the contribution guidelines](CONTRIBUTING.md)
287
+
288
+ ## Citation Information
289
+
290
+ If you use this work, please cite the [scientific article](https://arxiv.org/abs/2508.02271) introducing the Dynaword approach:
291
+
292
+ > Enevoldsen, K.C., Jensen, K.N., Kostkan, J., Szab'o, B.I., Kardos, M., Vad, K., Heinsen, J., N'unez, A.B., Barmina, G., Nielsen, J., Larsen, R., Vahlstrup, P.B., Dalum, P.M., Elliott, D., Galke, L., Schneider-Kamp, P., & Nielbo, K.L. (2025). Dynaword: From One-shot to Continuously Developed Datasets.
293
+
294
+
295
+ ```
296
+ @article{enevoldsen2025dynaword,
297
+ title={Dynaword: From One-shot to Continuously Developed Datasets},
298
+ author={Enevoldsen, Kenneth and Jensen, Kristian N{\o}rgaard and Kostkan, Jan and Szab{\'o}, Bal{\'a}zs and Kardos, M{\'a}rton and Vad, Kirten and N{\'u}{\~n}ez, Andrea Blasi and Barmina, Gianluca and Nielsen, Jacob and Larsen, Rasmus and others},
299
+ journal={arXiv preprint arXiv:2508.02271},
300
+ year={2025}
301
+ }
302
+ ```
303
+
304
+ Additionally, we recommend citing the relevant source datasets as well. See the individual datasheets for more information.
305
+
306
+ ## License information
307
+
308
+ The license for each constituent dataset is supplied in the [Source data](#source-data) table. This license is applied to the constituent data, i.e., the text. The collection of datasets (metadata, quality control, etc.) is licensed under [CC-0](https://creativecommons.org/publicdomain/zero/1.0/legalcode.en).
309
+
310
+ ### Personal and Sensitive Information
311
+
312
+ As far as we are aware the dataset does not contain information identifying sexual orientation, political beliefs, religion, or health connected along with a personal identifier of any non-public or non-historic figures.
313
+
314
+
315
+ ### Bias, Risks, and Limitations
316
+
317
+ Certain works in this collection are historical works and thus reflect the linguistic, cultural, and ideological norms of their time.
318
+ As such, it includes perspectives, assumptions, and biases characteristic of the period, which may be considered offensive or exclusionary by contemporary standards.
319
+
320
+
321
+ ### Notice and takedown policy
322
+ We redistribute files shared with us under a license permitting such redistribution. If you have concerns about the licensing of these files, please [contact us](https://huggingface.co/datasets/danish-foundation-models/norwegian-dynaword/discussions/new). If you consider that the data contains material that infringe your copyright, please:
323
+ - Clearly identify yourself with detailed contact information such as an address, a telephone number, or an email address at which you can be contacted.
324
+ - Clearly reference the original work claimed to be infringed
325
+ - Clearly identify the material claimed to be infringing and information reasonably sufficient to allow us to locate the material.
326
+ You can contact us through this channel.
327
+ We will comply with legitimate requests by removing the affected sources from the next release of the corpus
328
+
329
+ ---
330
+
331
+ <h3 style="display: flex; align-items: center;">
332
+ <a href="https://www.foundationmodels.dk">
333
+ <img src="./docs/icon.png" width="30" style="margin-right: 10px;" />
334
+ </a>
335
+ A&nbsp;<a href=https://www.foundationmodels.dk>Danish Foundation Models</a>&nbsp;dataset
336
+ </h3>
data/maalfrid/create.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12"
3
+ # dependencies = [
4
+ # "datasets>=3.2.0",
5
+ # ]
6
+ # ///
7
+
8
+ """
9
+ Script for downloading and processing the dataset
10
+
11
+ Note: To run this script, you need to set `GIT_LFS_SKIP_SMUDGE=1` to be able to install dynaword:
12
+ ```bash
13
+ GIT_LFS_SKIP_SMUDGE=1 uv run data/maalfrid/create.py
14
+ ```
15
+
16
+ TODO: Add to top once first PR is merged:
17
+
18
+ # dependencies = [
19
+ # "datasets>=3.2.0",
20
+ # "dynaword"
21
+ # ]
22
+ # [tool.uv.sources]
23
+ # dynaword = { git = "https://huggingface.co/datasets/danish-foundation-models/norwegian-dynaword", rev = "00e7f2aee7f7ad2da423419f77ecbb9c0536de0d" }
24
+ """
25
+
26
+ import logging
27
+ from datetime import date
28
+ from pathlib import Path
29
+ from types import SimpleNamespace
30
+ from typing import Any
31
+
32
+ from datasets import load_dataset
33
+
34
+ from dynaword.process_dataset import (
35
+ add_token_count,
36
+ ensure_column_order,
37
+ remove_duplicate_text,
38
+ remove_empty_texts,
39
+ )
40
+
41
+ logger = logging.getLogger(__name__)
42
+
43
+
44
+ def filter_lang_id(
45
+ example: dict[str, Any], langs_to_keep: set[str] = {"no", "nn"}
46
+ ) -> bool:
47
+ return (
48
+ example["lang_fasttext"] in langs_to_keep
49
+ and float(example["lang_fasttext_conf"]) > 0.7
50
+ )
51
+
52
+
53
+ def convert_to_row(
54
+ example: dict[str, Any],
55
+ source: str,
56
+ ) -> dict[str, Any]:
57
+ publish_year = example["publish_year"]
58
+ # convert to date string with end of year
59
+ date_end = f"{publish_year}-12-31"
60
+ date_start = f"{publish_year}-01-01"
61
+ row = {
62
+ "text": example["text"],
63
+ "created": f"{date_start}, {date_end}",
64
+ }
65
+ return row
66
+
67
+
68
+ def main(hf_path: str, revision: str, source: str, num_proc: int):
69
+ save_path = Path(__file__).parent / f"{source}.parquet"
70
+
71
+ # load all splits
72
+ logger.info(f"Loading data from: {hf_path}")
73
+ ds = load_dataset(
74
+ hf_path,
75
+ streaming=False,
76
+ split="train+validation",
77
+ num_proc=num_proc,
78
+ revision=revision,
79
+ )
80
+
81
+ logger.info(f"Processing dataset - total number of rows: {len(ds)}")
82
+
83
+ ds = ds.remove_columns(["id"])
84
+ logger.info(f"Filter based on language id - total number of rows: {len(ds)}")
85
+ ds = ds.filter(filter_lang_id, num_proc=num_proc)
86
+
87
+ logger.info(f"Filter based on doctype '{source}' - total number of rows: {len(ds)}")
88
+ ds = ds.filter(lambda example: source in example["doc_type"], num_proc=num_proc)
89
+
90
+ logger.info("Converting to standard format")
91
+ ds = ds.map(
92
+ lambda example: convert_to_row(example, source),
93
+ remove_columns=ds.column_names,
94
+ num_proc=num_proc,
95
+ )
96
+ ds = ds.add_column("source", ["maalfrid"] * len(ds))
97
+ ds = ds.add_column("id", [f"maalfrid-{i}" for i in range(len(ds))])
98
+ ds = ds.add_column("added", [date.today().isoformat()] * len(ds))
99
+
100
+ ds = remove_empty_texts(ds) # remove rows with empty text
101
+ ds = remove_duplicate_text(ds) # remove rows with duplicate text
102
+ ds = add_token_count(ds)
103
+ ds = ensure_column_order(ds)
104
+
105
+ ds.to_parquet(save_path)
106
+
107
+
108
+ if __name__ == "__main__":
109
+ config = SimpleNamespace(
110
+ hf_path="NbAiLab/NCC",
111
+ revision="857a5832b73ef33c66b5674d970777c39d991c0e",
112
+ num_proc=4,
113
+ source="maalfrid",
114
+ )
115
+
116
+ log_path = Path(__file__).parent / f"{config.source}.log"
117
+ # remove existing log file
118
+ if log_path.exists():
119
+ log_path.unlink()
120
+ logging.basicConfig(
121
+ level=logging.INFO,
122
+ format="%(asctime)s - %(levelname)s - %(message)s",
123
+ handlers=[
124
+ logging.StreamHandler(),
125
+ logging.FileHandler(log_path),
126
+ ],
127
+ )
128
+
129
+ main(
130
+ hf_path=config.hf_path,
131
+ revision=config.revision,
132
+ source=config.source,
133
+ num_proc=config.num_proc,
134
+ )
data/maalfrid/create.py.lock ADDED
The diff for this file is too large to render. See raw diff
 
data/maalfrid/descriptive_stats.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "number_of_samples": 3229897,
3
+ "number_of_tokens": 2230639074,
4
+ "min_length_tokens": 4,
5
+ "max_length_tokens": 62236,
6
+ "number_of_characters": 6758480693,
7
+ "min_length_characters": 5,
8
+ "max_length_characters": 184633
9
+ }
data/maalfrid/images/dist_document_length.png ADDED

Git LFS Details

  • SHA256: 3272d2326046974f9f10e046dd03fbcefd66593189cf2a9a03b7f2ba88ca8a6b
  • Pointer size: 131 Bytes
  • Size of remote file: 603 kB
data/maalfrid/maalfrid.log ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ 2026-01-25 13:10:10,320 - INFO - Loading data from: NbAiLab/NCC
2
+ 2026-01-25 13:10:21,583 - INFO - Processing dataset - total number of rows: 8176399
3
+ 2026-01-25 13:10:21,671 - INFO - Filter based on language id - total number of rows: 8176399
4
+ 2026-01-25 13:10:21,912 - INFO - Filter based on doctype 'maalfrid' - total number of rows: 4133103
5
+ 2026-01-25 13:10:22,153 - INFO - Converting to standard format
6
+ 2026-01-25 13:10:34,441 - INFO - Removing empty texts
7
+ 2026-01-25 13:10:34,883 - INFO - Filtered 0 empty examples
8
+ 2026-01-25 13:10:34,883 - INFO - Removing duplicate texts
9
+ 2026-01-25 13:10:35,045 - INFO - Filtered 2077 duplicate examples
10
+ 2026-01-25 13:10:41,146 - INFO - Ensuring columns are in the correct order and are present
data/maalfrid/maalfrid.md ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: Maalfrid
3
+ language:
4
+ - da
5
+ license: other
6
+ license_name: NLOD 2.0
7
+ task_categories:
8
+ - text-generation
9
+ - fill-mask
10
+ task_ids:
11
+ - language-modeling
12
+ domains:
13
+ - Web
14
+ ---
15
+
16
+ # Dataset Card for Maalfrid
17
+
18
+ <!-- START-SHORT DESCRIPTION -->
19
+ Norwegian content from Norwegian institutions websites.
20
+ <!-- END-SHORT DESCRIPTION -->
21
+
22
+ Documents are derived from the [Målfrid collection](https://www.nb.no/sprakbanken/en/resource-catalogue/oai-nb-no-sbr-69/) as a subsection of the [Norwegian Colossal Corpus](https://huggingface.co/datasets/NbAiLab/NCC), which is a collection of multiple smaller Norwegian corpuses suitable for training large language models.
23
+
24
+ ## Dataset Description
25
+
26
+ <!-- START-DESC-STATS -->
27
+ - **Number of samples**: 3.23M
28
+ - **Number of tokens (Llama 3)**: 2.23B
29
+ - **Average document length in tokens (min, max)**: 690.62 (4, 62.24K)
30
+ <!-- END-DESC-STATS -->
31
+
32
+
33
+ ## Dataset Structure
34
+ An example from the dataset looks as follows.
35
+ <!-- START-SAMPLE -->
36
+ ```py
37
+ {
38
+ "id": "maalfrid-0",
39
+ "text": "Elever med annet morsmål enn norsk og samisk har rett til særskilt norskopplæring til de har tilstre[...]",
40
+ "source": "maalfrid",
41
+ "added": "2026-01-25",
42
+ "created": "2021-01-01, 2021-12-31",
43
+ "token_count": 711
44
+ }
45
+ ```
46
+
47
+ ### Data Fields
48
+
49
+ An entry in the dataset consists of the following fields:
50
+
51
+ - `id` (`str`): An unique identifier for each document.
52
+ - `text`(`str`): The content of the document.
53
+ - `source` (`str`): The source of the document (see [Source Data](#source-data)).
54
+ - `added` (`str`): An date for when the document was added to this collection.
55
+ - `created` (`str`): An date range for when the document was originally created.
56
+ - `token_count` (`int`): The number of tokens in the sample computed using the Llama 8B tokenizer
57
+ <!-- END-SAMPLE -->
58
+
59
+
60
+
61
+ ### Dataset Statistics
62
+
63
+ <!-- START-DATASET PLOTS -->
64
+ <p align="center">
65
+ <img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" />
66
+ </p>
67
+ <!-- END-DATASET PLOTS -->
68
+
69
+ ## Additional Information
70
+
71
+ ## License Information
72
+
73
+ This dataset is licensed under [NLOD 2.0](https://data.norge.no/nlod/en/2.0).
74
+ This license is derived from the original [publication](https://huggingface.co/datasets/NbAiLab/NCC), which is published by the
75
+ [National Library of Norway](https://www.nb.no/en/).
76
+
77
+ ## Filtering
78
+
79
+ This subset is the result of the following filtering from all available data splits on the [NCC](https://huggingface.co/datasets/NbAiLab/NCC):
80
+
81
+ - is_maalfrid: Documents, which are tagged as a part of the Målfrid corpus
82
+ - language_filter: Document is classified as Norwegian with a threshold of 0.75
83
+ - min_length: Document has at least 10 words (whitespace separated strings + punctuation)
84
+ - alpha_ratio: The ratio of all words / words with only alphabetical characters is at least 0.7
85
+ - min_stop_words: The document contains at least 2 Norwegian stop words
86
+ - duplicate: Duplicate documents were removed
87
+
88
+ | Filtering step | Number of document |
89
+ | --------------- | ------------------ |
90
+ | is_maalfrid | 4 719 569 |
91
+ | language_filter | 51523 |
92
+ | min_length | 49 948 |
93
+ | alpha_ratio | 33 390 |
94
+ | min_stop_words | 33 340 |
95
+ | duplicate | 33 336 |
96
+
97
+
98
+ ### Citation Information
99
+
100
+ If you use this source please cite the following articles:
101
+
102
+ ```
103
+ @inproceedings{kummervold-etal-2022-norwegian-colossal,
104
+ title = {The {N}orwegian colossal corpus: A text corpus for training large {N}orwegian language models},
105
+ author = {Kummervold, Per E and
106
+ Wetjen, Freddy and
107
+ De la Rosa, Javier},
108
+ booktitle = {Proceedings of the Thirteenth Language Resources and Evaluation Conference (LREC)},
109
+ year = {2022},
110
+ address = {Marseille, France},
111
+ publisher = {European Language Resources Association},
112
+ url = {https://aclanthology.org/2022.lrec-1.410},
113
+ pages = {3852--3860},
114
+ abstract = {Norwegian has been one of many languages lacking sufficient available text to train quality language models. In an attempt to bridge this gap, we introduce the Norwegian Colossal Corpus (NCC), which comprises 49GB of clean Norwegian textual data containing over 7B words. The NCC is composed of different and varied sources, ranging from books and newspapers to government documents and public reports, showcasing the various uses of the Norwegian language in society. The corpus contains mainly Norwegian Bokmål and Norwegian Nynorsk. Each document in the corpus is tagged with metadata that enables the creation of sub-corpora for specific needs. Its structure makes it easy to combine with large web archives that for licensing reasons could not be distributed together with the NCC. By releasing this corpus openly to the public, we hope to foster the creation of both better Norwegian language models and multilingual language models with support for Norwegian.},
115
+ }
116
+
117
+ @inproceedings{kummervold-etal-2021-operationalizing,
118
+ title = {Operationalizing a National Digital Library: The Case for a {N}orwegian Transformer Model},
119
+ author = {Kummervold, Per E and
120
+ De la Rosa, Javier and
121
+ Wetjen, Freddy and
122
+ Brygfjeld, Svein Arne},
123
+ booktitle = {Proceedings of the 23rd Nordic Conference on Computational Linguistics (NoDaLiDa)},
124
+ year = {2021},
125
+ address = {Reykjavik, Iceland (Online)},
126
+ publisher = {Linköping University Electronic Press, Sweden},
127
+ url = {https://aclanthology.org/2021.nodalida-main.3},
128
+ pages = {20--29},
129
+ abstract = {In this work, we show the process of building a large-scale training set from digital and digitized collections at a national library.
130
+ The resulting Bidirectional Encoder Representations from Transformers (BERT)-based language model for Norwegian outperforms multilingual BERT (mBERT) models
131
+ in several token and sequence classification tasks for both Norwegian Bokmål and Norwegian Nynorsk. Our model also improves the mBERT performance for other
132
+ languages present in the corpus such as English, Swedish, and Danish. For languages not included in the corpus, the weights degrade moderately while keeping strong multilingual properties. Therefore,
133
+ we show that building high-quality models within a memory institution using somewhat noisy optical character recognition (OCR) content is feasible, and we hope to pave the way for other memory institutions to follow.},
134
+ }
135
+
136
+ ```
data/maalfrid/maalfrid.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56447ced3202df58cd1ca99ab877265a77c96c60aba6f26a364b5a5ae624c5ec
3
+ size 3934944751
descriptive_stats.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "number_of_samples": 3229897,
3
+ "number_of_tokens": 2230639074,
4
+ "min_length_tokens": 4,
5
+ "max_length_tokens": 62236,
6
+ "number_of_characters": 6758480693,
7
+ "min_length_characters": 5,
8
+ "max_length_characters": 184633
9
+ }
docs/icon.png ADDED

Git LFS Details

  • SHA256: 04a40794e9081b680cb080e084ac3bd0dce0263a3f85d950ba903cb31dbfde9b
  • Pointer size: 130 Bytes
  • Size of remote file: 56.3 kB
images/dataset_size_plot.html ADDED
The diff for this file is too large to render. See raw diff
 
images/dataset_size_plot.svg ADDED
images/domain_distribution.png ADDED

Git LFS Details

  • SHA256: 645152dfa461c86c4bea14d987fe1107b08c383cde1d86a18274e4065ff35507
  • Pointer size: 131 Bytes
  • Size of remote file: 118 kB
makefile ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ install:
2
+ @echo "--- 🚀 Installing project dependencies ---"
3
+ uv sync
4
+
5
+ test:
6
+ @echo "--- 🧪 Running tests ---"
7
+ uv run pytest src/tests/ | tee test_results.log
8
+
9
+ lint:
10
+ @echo "--- 🧹 Running linters ---"
11
+ ruff format . # running ruff formatting
12
+ ruff check . --fix # running ruff linting
13
+
14
+ bump-version:
15
+ @echo "--- 🚀 Bumping patch version ---"
16
+ uv run src/dynaword/bump_version.py
17
+
18
+ update-descriptive-statistics:
19
+ @echo "--- 🚀 Recomputing Descriptive statistics ---"
20
+ uv run src/dynaword/update_descriptive_statistics.py # compute missing descriptive statistics for all datasets
21
+ uv run src/dynaword/update_descriptive_statistics.py --dataset default --force # always ensure default dataset is up to date
pyproject.toml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "dynaword"
3
+ version = "0.0.1"
4
+ description = "project code for the norwegian dynaword project"
5
+ readme = "README.md"
6
+ requires-python = ">=3.12,<3.13" # 3.13 have issues with spacy and pytorch
7
+ dependencies = [
8
+ # for commands
9
+ "datasets>=3.0.0", # loading and validating datasets
10
+ "pydantic>=2.10.4", # validating schemas
11
+ "tabulate>=0.9.0", # creating md table
12
+ "tomlkit>=0.13.2", # reading toml
13
+ "transformers>=4.47.1", # tokenization
14
+ # figures
15
+ "plotnine>=0.14.5",
16
+ "plotly>=6.0.1",
17
+ "nbformat>=4.2.0",
18
+ "kaleido==0.2.1",
19
+ ]
20
+
21
+ [dependency-groups]
22
+ dev = [
23
+ # development
24
+ "ipykernel>=6.29.5",
25
+ "pip>=25.0.1",
26
+ # test
27
+ "pytest>=8.3.4",
28
+ # formatting
29
+ "ruff>=0.8.3",
30
+ ]
31
+
32
+ [build-system]
33
+ requires = ["hatchling"]
34
+ build-backend = "hatchling.build"
src/dynaword/__init__.py ADDED
File without changes
src/dynaword/bump_version.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+
3
+ import tomlkit
4
+ from packaging.version import Version
5
+
6
+ from dynaword.paths import pyproject_path, readme_path
7
+
8
+
9
+ def get_version(pyproject_path: Path = pyproject_path) -> str:
10
+ with pyproject_path.open("r") as f:
11
+ data = tomlkit.load(f)
12
+ return data["project"]["version"] # type: ignore
13
+
14
+
15
+ def update_pyproject_version(version: str, pyproject_path: Path) -> None:
16
+ with pyproject_path.open("r") as f:
17
+ data = tomlkit.load(f)
18
+ data["project"]["version"] = version # type: ignore
19
+
20
+ with pyproject_path.open("w") as f:
21
+ tomlkit.dump(data, f)
22
+
23
+
24
+ def update_readme(version: str, readme_path: Path) -> None:
25
+ """Find version in README table and update it."""
26
+ start = "<!-- START README TABLE -->"
27
+ end = "<!-- END README TABLE -->"
28
+
29
+ with readme_path.open("r") as f:
30
+ lines = f.readlines()
31
+
32
+ in_table = False
33
+ for i, line in enumerate(lines):
34
+ if start in line:
35
+ in_table = True
36
+ if in_table:
37
+ if "**Version**" in line:
38
+ lines[i] = f"| **Version** | {version} ([Changelog](/CHANGELOG.md)) |\n"
39
+ break
40
+ if end in line:
41
+ raise ValueError("**Version** not found in README table.")
42
+
43
+ with readme_path.open("w") as f:
44
+ f.writelines(lines)
45
+
46
+
47
+ def main(pyproject_path: Path, readme_path: Path) -> None:
48
+ version = get_version(pyproject_path)
49
+ version = Version(version)
50
+ version = Version(f"{version.major}.{version.minor}.{version.micro + 1}")
51
+ update_pyproject_version(str(version), pyproject_path)
52
+ update_readme(str(version), readme_path)
53
+
54
+
55
+ if __name__ == "__main__":
56
+ main(pyproject_path, readme_path)
src/dynaword/dataset_structure.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from datetime import date
3
+ from enum import Enum
4
+
5
+ from pydantic import BaseModel, BeforeValidator
6
+ from typing_extensions import Annotated
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ def ensure_tuple(created: str | tuple) -> tuple:
12
+ if isinstance(created, str):
13
+ return tuple(created.split(", "))
14
+ return created
15
+
16
+
17
+ class SampleSchema(BaseModel):
18
+ id: str
19
+ text: str
20
+ source: str
21
+ added: date
22
+ created: Annotated[tuple[date, date], BeforeValidator(ensure_tuple)]
23
+ token_count: int
24
+
25
+
26
+ class ColumnNames(Enum):
27
+ id = "id"
28
+ text = "text"
29
+ source = "source"
30
+ added = "added"
31
+ created = "created"
32
+ token_count = "token_count"
33
+
34
+
35
+ COLUMN_ORDER = [col.value for col in ColumnNames]
src/dynaword/datasheet.py ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ from datetime import datetime
4
+ from enum import Enum
5
+ from pathlib import Path
6
+ from textwrap import dedent
7
+ from typing import Any, Literal, Self, cast
8
+
9
+ import yaml
10
+ from datasets import Dataset, IterableDataset, load_dataset
11
+ from pydantic import BaseModel, field_validator
12
+
13
+ from dynaword.descriptive_stats import DescriptiveStatsOverview
14
+ from dynaword.plots.descriptive_statistics_plots import (
15
+ create_descriptive_statistics_plots,
16
+ )
17
+ from dynaword.typings import DOMAIN, LICENSE, LICENSE_NAMES_MAPPING
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ LICENSE_HEADER = "## License Information"
23
+
24
+
25
+ class DEFAULT_SECTION_TAGS(Enum):
26
+ desc_stats = "DESC-STATS"
27
+ sample = "SAMPLE"
28
+ dataset_plots = "DATASET PLOTS"
29
+ short_description = "SHORT DESCRIPTION"
30
+
31
+
32
+ DATASET_PLOTS_template = """
33
+ <p align="center">
34
+ <img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" />
35
+ </p>
36
+ """
37
+
38
+
39
+ SAMPLE_template = """
40
+ ```py
41
+ {sample}
42
+ ```
43
+
44
+ ### Data Fields
45
+
46
+ An entry in the dataset consists of the following fields:
47
+
48
+ - `id` (`str`): An unique identifier for each document.
49
+ - `text`(`str`): The content of the document.
50
+ - `source` (`str`): The source of the document (see [Source Data](#source-data)).
51
+ - `added` (`str`): An date for when the document was added to this collection.
52
+ - `created` (`str`): An date range for when the document was originally created.
53
+ - `token_count` (`int`): The number of tokens in the sample computed using the Llama 8B tokenizer
54
+ """
55
+
56
+
57
+ def convert_to_human_readable(value: float) -> str:
58
+ thresholds = [
59
+ (1_000_000_000, "B"),
60
+ (1_000_000, "M"),
61
+ (1_000, "K"),
62
+ ]
63
+ for threshold, label in thresholds:
64
+ if value > threshold:
65
+ return f"{value / threshold:.2f}{label}"
66
+
67
+ return str(value)
68
+
69
+
70
+ def create_sample_str(sample: dict[str, Any], max_str_len: int = 100):
71
+ for k in sample:
72
+ if isinstance(sample[k], str) and len(sample[k]) > max_str_len:
73
+ sample[k] = sample[k][:max_str_len] + "[...]"
74
+ if isinstance(sample[k], datetime):
75
+ sample[k] = str(sample[k])
76
+
77
+ json_sample = json.dumps(sample, indent=2, ensure_ascii=False)
78
+ sample_str = SAMPLE_template.format(sample=json_sample)
79
+
80
+ return sample_str
81
+
82
+
83
+ class DataSheet(BaseModel):
84
+ pretty_name: str
85
+ license: LICENSE
86
+ license_name: str | None
87
+ language: list[Literal["da"]]
88
+ domains: list[DOMAIN] | None # None for main readme # TODO: make literal
89
+ path: Path
90
+ frontmatter: dict[str, Any]
91
+ body: str
92
+
93
+ # check that licence name is compatible with license
94
+ @field_validator("license_name") # type: ignore
95
+ def check_license_name(cls, v: str | None, values: dict[str, Any]) -> str | None:
96
+ if v is not None and v in LICENSE_NAMES_MAPPING:
97
+ if values["license"] != LICENSE_NAMES_MAPPING[v]:
98
+ raise ValueError(
99
+ f"License name '{v}' does not match license '{values['license']}'"
100
+ )
101
+ return v
102
+
103
+ @property
104
+ def short_description(self) -> str:
105
+ short_description = self.get_tag_content(DEFAULT_SECTION_TAGS.short_description)
106
+ if short_description.endswith("."):
107
+ short_description = short_description[:-1]
108
+ return short_description
109
+
110
+ @property
111
+ def license_information(self) -> str:
112
+ return self.get_section_by_header(LICENSE_HEADER)
113
+
114
+ @property
115
+ def frontmatter_as_str(self) -> str:
116
+ return yaml.dump(self.frontmatter, indent=2, sort_keys=False)
117
+
118
+ def to_str(self) -> str:
119
+ return f"---\n{self.frontmatter_as_str.strip()}\n---\n\n{self.body.strip()}\n"
120
+
121
+ def get_dataset(self, **kwargs) -> Dataset:
122
+ ds_path = self.path.parent
123
+ # required to avoid loading .png files for the images/ folder (e.g. for plots) instead of parquet files
124
+ ignore_dirs = {".venv", "tmp"} # add more if needed
125
+
126
+ parquet_files = [
127
+ p.as_posix()
128
+ for p in ds_path.glob("**/*.parquet")
129
+ if not any(ignored in p.parts for ignored in ignore_dirs)
130
+ ]
131
+ ds = load_dataset(
132
+ ds_path.as_posix(), split="train", data_files=parquet_files, **kwargs
133
+ )
134
+ ds = cast(Dataset, ds)
135
+ return ds
136
+
137
+ def get_descritive_stats(self) -> DescriptiveStatsOverview:
138
+ path = self.path.parent / "descriptive_stats.json"
139
+ return DescriptiveStatsOverview.from_disk(path)
140
+
141
+ def get_section_indices_by_header(self, header: str) -> tuple[int, int]:
142
+ level = header.split(" ")[0].count("#")
143
+
144
+ next_is_end_section = False
145
+ end_header = None
146
+ for _header in self.get_headers(levels=list(range(1, level + 1))):
147
+ if header.strip() == _header.strip():
148
+ next_is_end_section = True
149
+ continue
150
+
151
+ if next_is_end_section:
152
+ end_header = _header
153
+ break
154
+
155
+ if next_is_end_section is None:
156
+ raise ValueError(f"The header '{header}' is not found in the text.")
157
+
158
+ start_idx = self.body.find(header)
159
+ if end_header:
160
+ end_idx = self.body[start_idx:].find(end_header) + start_idx
161
+ else:
162
+ end_idx = len(self.body)
163
+
164
+ return start_idx, end_idx
165
+
166
+ def get_section_by_header(self, header: str) -> str:
167
+ s, e = self.get_section_indices_by_header(header)
168
+ return self.body[s:e]
169
+
170
+ def get_headers(self, levels: list[int] = [1, 2, 3, 4]) -> list[str]:
171
+ def __contains_level(text: str) -> bool:
172
+ if text.startswith("#"):
173
+ for level in levels:
174
+ if text.startswith("#" * level):
175
+ return True
176
+ return False
177
+
178
+ return [line for line in self.body.splitlines() if __contains_level(line)]
179
+
180
+ def get_tag_idx(self, tag: str | DEFAULT_SECTION_TAGS) -> tuple[int, int]:
181
+ if isinstance(tag, Enum):
182
+ tag = tag.value
183
+ tag_start = f"<!-- START-{tag} -->"
184
+ tag_end = f"<!-- END-{tag} -->"
185
+ start_idx = self.body.find(tag_start)
186
+ end_idx = self.body.find(tag_end)
187
+ if end_idx != -1 and start_idx != -1 and start_idx < end_idx:
188
+ return start_idx, end_idx
189
+ raise ValueError(f"tag ({tag}) not found in readme")
190
+
191
+ def get_tag_content(self, tag: str | DEFAULT_SECTION_TAGS) -> str:
192
+ if isinstance(tag, Enum):
193
+ tag = tag.value
194
+ s, e = self.get_tag_idx(tag=tag)
195
+ tag_start = f"<!-- START-{tag} -->"
196
+ return self.body[s + len(tag_start) : e].strip()
197
+
198
+ def add_descriptive_stats(
199
+ self, descriptive_stats: DescriptiveStatsOverview | None = None
200
+ ) -> str:
201
+ if descriptive_stats is None:
202
+ d_stats = DescriptiveStatsOverview.from_dataset(self.get_dataset())
203
+ else:
204
+ d_stats = descriptive_stats
205
+
206
+ package = (
207
+ dedent(f"""
208
+ - **Number of samples**: {convert_to_human_readable(d_stats.number_of_samples)}
209
+ - **Number of tokens (Llama 3)**: {convert_to_human_readable(d_stats.number_of_tokens)}
210
+ - **Average document length in tokens (min, max)**: {convert_to_human_readable(d_stats.average_document_length_tokens)} ({convert_to_human_readable(d_stats.min_length_tokens)}, {convert_to_human_readable(d_stats.max_length_tokens)})
211
+ """).strip()
212
+ + "\n"
213
+ )
214
+
215
+ return self.replace_tag(
216
+ package=package,
217
+ tag=DEFAULT_SECTION_TAGS.desc_stats,
218
+ )
219
+
220
+ def add_dataset_plots(self, dataset: Dataset, create_plot: bool = True) -> str:
221
+ if create_plot:
222
+ create_descriptive_statistics_plots(
223
+ dataset=dataset, save_dir=self.path.parent
224
+ )
225
+ return self.replace_tag(
226
+ package=DATASET_PLOTS_template, tag=DEFAULT_SECTION_TAGS.dataset_plots
227
+ )
228
+
229
+ def add_sample_and_description(
230
+ self, dataset: Dataset | IterableDataset | None = None
231
+ ) -> str:
232
+ if dataset is None:
233
+ dataset = self.get_dataset(streaming=True)
234
+
235
+ sample = dataset[0] if isinstance(dataset, Dataset) else next(iter(dataset))
236
+ return self.replace_tag(
237
+ package=create_sample_str(sample), tag=DEFAULT_SECTION_TAGS.sample
238
+ )
239
+
240
+ def replace_tag(self, package: str, tag: str | DEFAULT_SECTION_TAGS) -> str:
241
+ """Add replace a tag in the datasheet body.
242
+
243
+ Args:
244
+ package: What you want to replace it with
245
+ tag: What tag you want to replace
246
+
247
+ Returns:
248
+ The entire body text
249
+ """
250
+ if isinstance(tag, Enum):
251
+ tag = tag.value
252
+ tag_start = f"<!-- START-{tag} -->"
253
+ tag_end = f"<!-- END-{tag} -->"
254
+
255
+ if self.body.count(tag_start) != 1 or self.body.count(tag_end) != 1:
256
+ raise ValueError(
257
+ f"The markers ({tag_start} ... {tag_end}) does not appear in the markdown. Markers should appear exactly once in the markdown."
258
+ )
259
+
260
+ start_md, _, remainder = self.body.partition(tag_start)
261
+ _, _, end_md = remainder.partition(tag_end)
262
+
263
+ return f"{start_md}{tag_start}\n{package.strip()}\n{tag_end}{end_md}"
264
+
265
+ @staticmethod
266
+ def get_frontmatter_and_body(file_path: Path) -> tuple[dict[str, Any], str]:
267
+ with file_path.open("r") as f:
268
+ content = f.read()
269
+ if content.startswith("---"):
270
+ end_idx = content.find("---", 3)
271
+ start_idx_body = end_idx + 3
272
+ if end_idx != -1:
273
+ frontmatter = content[3:end_idx].strip()
274
+ return yaml.safe_load(frontmatter), content[start_idx_body:]
275
+ raise ValueError(f"No frontmatter found in file: {file_path}")
276
+
277
+ @classmethod
278
+ def load_from_path(cls, readme_path: Path) -> Self:
279
+ frontmatter, body = cls.get_frontmatter_and_body(readme_path)
280
+ return cls(
281
+ frontmatter=frontmatter,
282
+ body=body,
283
+ license=frontmatter["license"],
284
+ language=frontmatter["language"],
285
+ pretty_name=frontmatter["pretty_name"],
286
+ domains=frontmatter["domains"] if "domains" in frontmatter else None,
287
+ license_name=frontmatter["license_name"]
288
+ if "license_name" in frontmatter
289
+ else None,
290
+ path=readme_path,
291
+ )
292
+
293
+ def write_to_path(self, readme_path: Path | None = None) -> None:
294
+ if readme_path is None:
295
+ readme_path = self.path
296
+ with readme_path.open("w") as f:
297
+ f.write(self.to_str())
298
+
299
+
300
+ if __name__ == "__main__":
301
+ from dynaword.paths import repo_path
302
+
303
+ sheet = DataSheet.load_from_path(repo_path / "data" / "dannet" / "dannet.md")
304
+ ds = sheet.get_dataset()
305
+
306
+ sheet.body = sheet.add_descriptive_stats(descriptive_stats=None)
307
+ sheet.write_to_path()
src/dynaword/descriptive_stats.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import logging
5
+ from dataclasses import dataclass
6
+ from pathlib import Path
7
+
8
+ from datasets import Dataset
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+
13
+ def calculate_average_document_length(
14
+ dataset: Dataset, text_column: str = "text"
15
+ ) -> float:
16
+ texts = sum(len(t) for t in dataset[text_column])
17
+ return texts / len(dataset)
18
+
19
+
20
+ @dataclass()
21
+ class DescriptiveStatsOverview:
22
+ """
23
+ Overview of descriptive statistics for a dataset.
24
+
25
+ Attributes:
26
+ number_of_samples: Total number of samples in the dataset.
27
+ number_of_tokens: Total number of tokens in the dataset
28
+ min_length: Minimum document length in tokens.
29
+ max_length: Maximum document length in tokens.
30
+ average_document_length: Average document length in tokens.
31
+ """
32
+
33
+ number_of_samples: int
34
+ number_of_tokens: int
35
+ min_length_tokens: int
36
+ max_length_tokens: int
37
+ number_of_characters: int
38
+ min_length_characters: int
39
+ max_length_characters: int
40
+
41
+ @property
42
+ def average_document_length_tokens(self) -> float:
43
+ return (
44
+ round(self.number_of_tokens / self.number_of_samples, 2)
45
+ if self.number_of_samples > 0
46
+ else 0.0
47
+ )
48
+
49
+ @property
50
+ def average_document_length_characters(self) -> float:
51
+ return (
52
+ round(self.number_of_characters / self.number_of_samples, 2)
53
+ if self.number_of_samples > 0
54
+ else 0.0
55
+ )
56
+
57
+ @classmethod
58
+ def from_disk(cls, path: Path) -> DescriptiveStatsOverview:
59
+ with path.open("r") as f:
60
+ data = json.load(f)
61
+ obj = cls(**data)
62
+ return obj
63
+
64
+ def to_disk(self, path: Path) -> None:
65
+ with path.with_suffix(".json").open("w") as f:
66
+ json.dump(self.__dict__, f, indent=2)
67
+
68
+ @classmethod
69
+ def from_dataset(cls, dataset: Dataset) -> DescriptiveStatsOverview:
70
+ return cls(
71
+ number_of_samples=len(dataset),
72
+ number_of_tokens=sum(dataset["token_count"]),
73
+ min_length_tokens=min(dataset["token_count"]),
74
+ max_length_tokens=max(dataset["token_count"]),
75
+ number_of_characters=sum(len(t) for t in dataset["text"]),
76
+ min_length_characters=min(len(t) for t in dataset["text"]),
77
+ max_length_characters=max(len(t) for t in dataset["text"]),
78
+ )
79
+
80
+ def __add__(self, other: DescriptiveStatsOverview) -> DescriptiveStatsOverview:
81
+ if not isinstance(other, DescriptiveStatsOverview):
82
+ raise TypeError("Can only add DescriptiveStatsOverview objects")
83
+ return DescriptiveStatsOverview(
84
+ number_of_samples=self.number_of_samples + other.number_of_samples,
85
+ number_of_tokens=self.number_of_tokens + other.number_of_tokens,
86
+ min_length_tokens=min(self.min_length_tokens, other.min_length_tokens),
87
+ max_length_tokens=max(self.max_length_tokens, other.max_length_tokens),
88
+ number_of_characters=self.number_of_characters + other.number_of_characters,
89
+ min_length_characters=min(
90
+ self.min_length_characters, other.min_length_characters
91
+ ),
92
+ max_length_characters=max(
93
+ self.max_length_characters, other.max_length_characters
94
+ ),
95
+ )
src/dynaword/paths.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from pathlib import Path
2
+
3
+ repo_path = Path(__file__).parent.parent.parent
4
+ pyproject_path = repo_path / "pyproject.toml"
5
+ readme_path = repo_path / "README.md"
src/dynaword/plots/descriptive_statistics_plots.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from pathlib import Path
3
+
4
+ import pandas as pd
5
+ import plotnine as pn
6
+ from datasets import Dataset
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ def create_descriptive_statistics_plots(
12
+ dataset: Dataset,
13
+ save_dir: Path,
14
+ ) -> tuple[Path, pn.ggplot]:
15
+ logger.info("creating descriptive statistics plot to readme.")
16
+ lengths = dataset["token_count"]
17
+ df = pd.DataFrame({"lengths": lengths, "Source": dataset["source"]})
18
+
19
+ plot = (
20
+ pn.ggplot(df, pn.aes(x="lengths", y=pn.after_stat("count")))
21
+ + pn.geom_histogram(bins=100)
22
+ + pn.labs(
23
+ x="Document Length (Tokens)",
24
+ y="Count",
25
+ title="Distribution of Document Lengths",
26
+ )
27
+ + pn.theme_minimal()
28
+ + pn.facet_wrap("Source", scales="free", ncol=3)
29
+ )
30
+
31
+ img_path = save_dir / "images"
32
+ img_path.mkdir(parents=False, exist_ok=True)
33
+ save_path = img_path / "dist_document_length.png"
34
+ pn.ggsave(
35
+ plot,
36
+ save_path,
37
+ dpi=500,
38
+ width=10,
39
+ height=10,
40
+ units="in",
41
+ verbose=False,
42
+ )
43
+
44
+ return save_path, plot
src/dynaword/plots/plot_tokens_over_time.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import subprocess
4
+ from datetime import datetime
5
+ from typing import Any, Dict, List, Optional, Tuple
6
+
7
+ import pandas as pd
8
+ import plotly.graph_objects as go
9
+
10
+ from dynaword.paths import repo_path
11
+
12
+ # Configure logging
13
+ logging.basicConfig(
14
+ level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
15
+ )
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ def get_file_history(
20
+ filename: str = "descriptive_stats.json",
21
+ ) -> List[Tuple[str, str, str]]:
22
+ """Get commit history for a file with commit messages"""
23
+ logger.info(f"Retrieving git history for {filename}")
24
+
25
+ cmd = [
26
+ "git",
27
+ "log",
28
+ "--format=%H|%ci|%s", # commit hash | commit date | subject
29
+ "--",
30
+ filename,
31
+ ]
32
+
33
+ try:
34
+ result = subprocess.run(
35
+ cmd, capture_output=True, text=True, cwd=repo_path, check=True
36
+ )
37
+ commits = []
38
+
39
+ for line in result.stdout.strip().split("\n"):
40
+ if line:
41
+ parts = line.split("|", 2) # Split on first 2 pipes only
42
+ if len(parts) == 3:
43
+ commit_hash, date_str, message = parts
44
+ commits.append((commit_hash, date_str, message))
45
+
46
+ logger.info(f"Found {len(commits)} commits for {filename}")
47
+ return commits
48
+
49
+ except subprocess.CalledProcessError as e:
50
+ logger.error(f"Failed to get git history: {e}")
51
+ return []
52
+
53
+
54
+ def get_file_at_commit(commit_hash: str, filename: str) -> Optional[Dict[str, Any]]:
55
+ """Get file content at specific commit"""
56
+ cmd = ["git", "show", f"{commit_hash}:{filename}"]
57
+
58
+ try:
59
+ result = subprocess.run(
60
+ cmd, capture_output=True, text=True, cwd=repo_path, check=True
61
+ )
62
+ return json.loads(result.stdout)
63
+ except (subprocess.CalledProcessError, json.JSONDecodeError) as e:
64
+ logger.warning(f"Failed to parse {filename} at commit {commit_hash[:8]}: {e}")
65
+ return None
66
+
67
+
68
+ def create_token_dataframe(filename: str = "descriptive_stats.json") -> pd.DataFrame:
69
+ """Create DataFrame with token history from git commits"""
70
+ logger.info("Building token history dataframe from git commits")
71
+
72
+ commits = get_file_history(filename)
73
+ if not commits:
74
+ logger.warning("No commits found")
75
+ return pd.DataFrame()
76
+
77
+ data = []
78
+ for commit_hash, date_str, commit_message in commits:
79
+ file_data = get_file_at_commit(commit_hash, filename)
80
+ if file_data and "number_of_tokens" in file_data:
81
+ try:
82
+ date = datetime.fromisoformat(date_str.split(" ")[0])
83
+ data.append(
84
+ {
85
+ "date": date,
86
+ "tokens": file_data["number_of_tokens"],
87
+ "samples": file_data.get("number_of_samples", 0),
88
+ "avg_length": file_data.get("average_document_length", 0),
89
+ "commit": commit_hash,
90
+ "commit_short": commit_hash[:8],
91
+ "commit_message": commit_message,
92
+ }
93
+ )
94
+ except ValueError as e:
95
+ logger.warning(f"Failed to parse date {date_str}: {e}")
96
+
97
+ # Convert to DataFrame and sort by date
98
+ df = pd.DataFrame(data)
99
+ if df.empty:
100
+ logger.warning("No valid data found in commits")
101
+ return df
102
+
103
+ df = df.sort_values("date").reset_index(drop=True)
104
+
105
+ # Calculate token changes
106
+ if len(df) > 1:
107
+ df["token_change"] = df["tokens"].diff()
108
+
109
+ logger.info(
110
+ f"Created dataframe with {len(df)} data points spanning {df['date'].min().date()} to {df['date'].max().date()}"
111
+ )
112
+ return df
113
+
114
+
115
+ def _format_tokens(value: float) -> str:
116
+ """Format tokens with human-readable suffixes"""
117
+ if value >= 1e12:
118
+ return f"{value / 1e12:.2f}T"
119
+ elif value >= 1e9:
120
+ return f"{value / 1e9:.2f}G"
121
+ elif value >= 1e6:
122
+ return f"{value / 1e6:.2f}M"
123
+ elif value >= 1e3:
124
+ return f"{value / 1e3:.2f}k"
125
+ else:
126
+ return f"{value:.0f}"
127
+
128
+
129
+ def _create_hover_text(df: pd.DataFrame) -> List[str]:
130
+ """Create hover text for each data point"""
131
+ hover_text = []
132
+ for _, row in df.iterrows():
133
+ hover_info = (
134
+ f"Date: {row['date'].strftime('%Y-%m-%d')}<br>"
135
+ f"Tokens: {_format_tokens(row['tokens'])}<br>"
136
+ )
137
+
138
+ if pd.notna(row.get("token_change")):
139
+ change_sign = "+" if row["token_change"] >= 0 else ""
140
+ hover_info += (
141
+ f"Change: {change_sign}{_format_tokens(abs(row['token_change']))}<br>"
142
+ )
143
+
144
+ hover_info += (
145
+ f"Samples: {row['samples']:,}<br>"
146
+ f"Commit: {row['commit_short']}<br>"
147
+ f"Message: {row['commit_message']}"
148
+ )
149
+ hover_text.append(hover_info)
150
+
151
+ return hover_text
152
+
153
+
154
+ def _add_reference_lines(fig: go.Figure) -> None:
155
+ """Add reference lines for other Danish corpora"""
156
+ references = [
157
+ (300_000_000, "Common Corpus (dan) (Langlais et al., 2025)"),
158
+ (1_000_000_000, "Danish Gigaword (Derczynski et al., 2021)"),
159
+ ]
160
+
161
+ for y_value, annotation in references:
162
+ fig.add_hline(
163
+ y=y_value,
164
+ line_dash="dash",
165
+ line_color="gray",
166
+ line_width=1,
167
+ annotation_text=annotation,
168
+ annotation_position="top left",
169
+ annotation_font_size=12,
170
+ annotation_font_color="gray",
171
+ )
172
+
173
+
174
+ def plot_tokens_over_time(
175
+ df: pd.DataFrame, width: int = 600, height: int = 400
176
+ ) -> go.Figure:
177
+ """Plot tokens over time using Plotly with interactive hover info"""
178
+ hover_text = _create_hover_text(df)
179
+
180
+ # Create the plot
181
+ fig = go.Figure()
182
+
183
+ # Add main data line
184
+ fig.add_trace(
185
+ go.Scatter(
186
+ x=df["date"],
187
+ y=df["tokens"],
188
+ mode="lines+markers",
189
+ name="Tokens",
190
+ line=dict(width=3, color="#DC2626"), # Saturated red
191
+ marker=dict(size=5, color="#DC2626"),
192
+ hovertemplate="%{text}<extra></extra>",
193
+ text=hover_text,
194
+ )
195
+ )
196
+
197
+ # Add reference lines
198
+ _add_reference_lines(fig)
199
+
200
+ # Update layout
201
+ fig.update_layout(
202
+ title="Number of Tokens Over Time in Danish Dynaword",
203
+ xaxis_title="Date",
204
+ yaxis_title="Number of Tokens (Llama 3)",
205
+ hovermode="closest",
206
+ width=width,
207
+ height=height,
208
+ showlegend=False,
209
+ plot_bgcolor="rgba(0,0,0,0)", # Transparent plot background
210
+ paper_bgcolor="rgba(0,0,0,0)", # Transparent paper background
211
+ )
212
+
213
+ # Set x-axis and y-axis properties
214
+ # x_min = df["date"].min() - pd.Timedelta(days=)
215
+ # x_max = df["date"].max() + pd.Timedelta(days=1)
216
+
217
+ # Format y-axis
218
+ fig.update_yaxes(tickformat=".2s", ticksuffix="")
219
+ # fig.update_xaxes(range=[x_min, x_max]) # Explicitly set x-axis range
220
+ return fig
221
+
222
+
223
+ def create_tokens_over_time_plot() -> None:
224
+ """Main function to create DataFrame and plot tokens over time"""
225
+ df = create_token_dataframe()
226
+ if not df.empty:
227
+ logger.info("Generating interactive plot")
228
+ fig = plot_tokens_over_time(df)
229
+ else:
230
+ logger.warning("No data available to plot")
231
+ return
232
+
233
+ save_path = repo_path / "images" / "tokens_over_time.html"
234
+ save_path_svg = repo_path / "images" / "tokens_over_time.svg"
235
+
236
+ save_path.parent.mkdir(parents=True, exist_ok=True)
237
+ fig.write_html(save_path, include_plotlyjs="cdn")
238
+ fig.write_image(save_path_svg)
239
+
240
+
241
+ if __name__ == "__main__":
242
+ create_tokens_over_time_plot()
src/dynaword/plots/plots_dataset_size.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ from pathlib import Path
4
+
5
+ import pandas as pd
6
+ import plotly.graph_objects as go
7
+
8
+ from dynaword.datasheet import DataSheet
9
+ from dynaword.paths import repo_path
10
+
11
+ # Configure logging
12
+ logging.basicConfig(
13
+ level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
14
+ )
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ def _create_descriptive_stats_table(
19
+ repo_path: Path = repo_path,
20
+ ) -> pd.DataFrame:
21
+ """
22
+ Create a DataFrame from the descriptive statistics data.
23
+ """
24
+ p = (repo_path / "data").glob("**/*descriptive_stats.json")
25
+
26
+ data = []
27
+ for path in p:
28
+ with path.open("r") as f:
29
+ package = json.load(f)
30
+ sheet = DataSheet.load_from_path(path.parent / f"{path.parent.name}.md")
31
+ package["dataset_name"] = path.parent.name
32
+ package["pretty_name"] = sheet.pretty_name
33
+ data.append(package)
34
+
35
+ df = pd.DataFrame(data)
36
+ df["mean_length_tokens"] = df["number_of_tokens"] / df["number_of_samples"]
37
+ df["mean_length_characters"] = df["number_of_characters"] / df["number_of_samples"]
38
+ return df
39
+
40
+
41
+ def plot_dataset_size(df: pd.DataFrame) -> go.Figure:
42
+ """Plot dataset size using a range plot with min, max, and mean token lengths."""
43
+ # Calculate mean token length per document
44
+ df["mean_length_tokens"] = df["number_of_tokens"] / df["number_of_samples"]
45
+
46
+ # Create the range plot
47
+ fig = go.Figure()
48
+
49
+ # Add range bars (from min to max)
50
+ for i, row in df.iterrows():
51
+ fig.add_trace(
52
+ go.Scatter(
53
+ x=[row["min_length_tokens"], row["max_length_tokens"]],
54
+ y=[row["dataset_name"], row["dataset_name"]],
55
+ mode="lines",
56
+ line=dict(color="lightgray", width=3),
57
+ showlegend=False,
58
+ hoverinfo="skip",
59
+ )
60
+ )
61
+
62
+ # Add min points
63
+ fig.add_trace(
64
+ go.Scatter(
65
+ x=df["min_length_tokens"],
66
+ y=df["dataset_name"],
67
+ mode="markers",
68
+ marker=dict(color="lightblue", size=6, symbol="circle"),
69
+ name="Min tokens",
70
+ hovertemplate="<b>%{y}</b><br>Min: %{x:,} tokens<extra></extra>",
71
+ )
72
+ )
73
+
74
+ # Add max points
75
+ fig.add_trace(
76
+ go.Scatter(
77
+ x=df["max_length_tokens"],
78
+ y=df["dataset_name"],
79
+ mode="markers",
80
+ marker=dict(color="darkred", size=6, symbol="circle"),
81
+ name="Max tokens",
82
+ hovertemplate="<b>%{y}</b><br>Max: %{x:,} tokens<extra></extra>",
83
+ )
84
+ )
85
+
86
+ # Add mean points
87
+ fig.add_trace(
88
+ go.Scatter(
89
+ x=df["mean_length_tokens"],
90
+ y=df["dataset_name"],
91
+ mode="markers",
92
+ marker=dict(color="orange", size=8, symbol="diamond"),
93
+ name="Mean tokens",
94
+ hovertemplate="<b>%{y}</b><br>Mean: %{x:,.0f} tokens<extra></extra>",
95
+ )
96
+ )
97
+
98
+ fig.update_layout(
99
+ title="Token Length Distribution by Dataset<br><sub>Range (min-max) with mean values</sub>",
100
+ xaxis_title="Number of Tokens (log scale)",
101
+ xaxis_type="log",
102
+ yaxis_title="Dataset",
103
+ height=len(df["dataset_name"]) * 20, # Scaling based on number of datasets
104
+ template="plotly_white",
105
+ margin=dict(l=120), # More space for dataset names
106
+ yaxis=dict(
107
+ tickmode="array",
108
+ tickvals=df["dataset_name"],
109
+ ticktext=df["pretty_name"],
110
+ categoryorder="array", # keep dataset order
111
+ categoryarray=df["dataset_name"].tolist(),
112
+ range=[-0.5, len(df["dataset_name"]) - 0.5], # <-- fixes top/bottom padding
113
+ ),
114
+ )
115
+
116
+ return fig
117
+
118
+
119
+ def create_dataset_size_plot() -> None:
120
+ logger.info("Creating range plot of dataset sizes using `descriptive_stats.json`.")
121
+ df = _create_descriptive_stats_table()
122
+ fig = plot_dataset_size(df)
123
+
124
+ save_path = repo_path / "images" / "dataset_size_plot.html"
125
+ save_path_svg = repo_path / "images" / "dataset_size_plot.svg"
126
+
127
+ logger.info(f"Saving dataset size plot to {save_path} and {save_path_svg}.")
128
+ save_path.parent.mkdir(parents=True, exist_ok=True)
129
+ fig.write_html(save_path)
130
+ fig.write_image(save_path_svg)
131
+
132
+
133
+ if __name__ == "__main__":
134
+ create_dataset_size_plot()
src/dynaword/process_dataset.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """"""
2
+
3
+ import logging
4
+ from functools import partial
5
+ from typing import Any
6
+
7
+ from datasets import Dataset
8
+ from transformers import AutoTokenizer
9
+
10
+ from dynaword.dataset_structure import COLUMN_ORDER, ColumnNames
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+ # TODO: Add a step to compute the size categories and update the frontmatter
15
+
16
+
17
+ def _tokenize_function(
18
+ examples: dict[str, Any], tokenizer: AutoTokenizer
19
+ ) -> dict[str, Any]:
20
+ encodings = tokenizer(
21
+ examples["text"],
22
+ padding=False,
23
+ truncation=False,
24
+ return_length=True, # much faster, avoids storing all IDs
25
+ ) # type: ignore
26
+ return {"token_count": encodings["length"]}
27
+
28
+
29
+ def add_token_count(
30
+ ds: Dataset,
31
+ tokenizer_name: str = "AI-Sweden-Models/Llama-3-8B-instruct",
32
+ num_proc: int = 4,
33
+ ) -> Dataset:
34
+ tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, use_fast=True)
35
+
36
+ tokenize = partial(_tokenize_function, tokenizer=tokenizer) # type: ignore
37
+
38
+ ds = ds.map(tokenize, batched=True, num_proc=num_proc)
39
+ return ds
40
+
41
+
42
+ def _filter_duplicates(example: dict[str, Any], seen_set: set) -> bool:
43
+ if example[ColumnNames.text.value] in seen_set:
44
+ return False
45
+ seen_set.add(example[ColumnNames.text.value])
46
+ return True
47
+
48
+
49
+ def remove_duplicate_text(ds: Dataset) -> Dataset:
50
+ logger.info("Removing duplicate texts")
51
+ seen_texts = set()
52
+ len_ds = len(ds)
53
+ ds = ds.filter(partial(_filter_duplicates, seen_set=seen_texts))
54
+ logger.info(f"Filtered {len_ds - len(ds)} duplicate examples")
55
+ return ds
56
+
57
+
58
+ def _filter_empty(example: dict[str, Any]) -> bool:
59
+ return len(example[ColumnNames.text.value].strip()) > 0
60
+
61
+
62
+ def remove_empty_texts(ds: Dataset, num_proc: int = 4) -> Dataset:
63
+ logger.info("Removing empty texts")
64
+ len_ds = len(ds)
65
+ ds = ds.filter(_filter_empty, num_proc=num_proc)
66
+ logger.info(f"Filtered {len_ds - len(ds)} empty examples")
67
+
68
+ return ds
69
+
70
+
71
+ def ensure_column_order(ds: Dataset) -> Dataset:
72
+ logger.info("Ensuring columns are in the correct order and are present")
73
+ ds = ds.select_columns(COLUMN_ORDER)
74
+ return ds
src/dynaword/tables.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Literal
3
+
4
+ import pandas as pd
5
+
6
+ from dynaword.datasheet import DataSheet, convert_to_human_readable
7
+ from dynaword.paths import repo_path
8
+
9
+ main_sheet = DataSheet.load_from_path(repo_path / "README.md")
10
+ _datasets = [
11
+ cfg["config_name"] # type: ignore
12
+ for cfg in main_sheet.frontmatter["configs"] # type: ignore
13
+ if cfg["config_name"] != "default" # type: ignore
14
+ ]
15
+
16
+ DEFAULT_LICENSE_REFERENCES = """[CC-0]: https://creativecommons.org/publicdomain/zero/1.0/legalcode.en
17
+ [CC-BY-SA 4.0]: https://creativecommons.org/licenses/by-sa/4.0/deed.en
18
+ [CC-BY 4.0]: https://creativecommons.org/licenses/by/4.0/deed.en
19
+ [Apache 2.0]: https://www.apache.org/licenses/LICENSE-2.0
20
+ """
21
+
22
+
23
+ def create_license_references() -> str:
24
+ license_references = DEFAULT_LICENSE_REFERENCES
25
+ for dataset in _datasets:
26
+ dataset_path = repo_path / "data" / dataset
27
+ readme_path = dataset_path / f"{dataset_path.name}.md"
28
+
29
+ sheet = DataSheet.load_from_path(readme_path)
30
+
31
+ if sheet.license == "other":
32
+ license_name = sheet.frontmatter["license_name"]
33
+ license_references += f"[{license_name}]: ./data/{dataset_path.name}/{dataset_path.name}.md#license-information\n"
34
+
35
+ return license_references
36
+
37
+
38
+ def create_dataset_readme_references():
39
+ readme_references = ""
40
+
41
+ for dataset in _datasets:
42
+ dataset_path = repo_path / "data" / dataset
43
+
44
+ readme_references += (
45
+ f"[{dataset_path.name}]: data/{dataset_path.name}/{dataset_path.name}.md\n"
46
+ )
47
+ return readme_references
48
+
49
+
50
+ def create_overview_table(
51
+ repo_path: Path = repo_path,
52
+ add_readable_tokens: bool = True,
53
+ add_total_row: bool = True,
54
+ add_readme_references: bool = True,
55
+ ) -> pd.DataFrame:
56
+ table = {
57
+ "Source": [],
58
+ "Sources": [],
59
+ "Description": [],
60
+ "Domain": [],
61
+ "N. Tokens": [],
62
+ "License": [],
63
+ }
64
+
65
+ for dataset in _datasets:
66
+ dataset_path = repo_path / "data" / dataset
67
+ readme_path = dataset_path / f"{dataset_path.name}.md"
68
+
69
+ sheet = DataSheet.load_from_path(readme_path)
70
+ desc_stats = sheet.get_descritive_stats()
71
+ main_domain = sheet.domains[0] if sheet.domains else ""
72
+
73
+ table["Source"] += [f"{dataset_path.name}"]
74
+ table["Sources"] += [f"[{dataset_path.name}]"]
75
+ table["License"] += [f"[{sheet.license_name}]"]
76
+ table["Domain"] += [main_domain]
77
+ table["Description"] += [sheet.short_description]
78
+ table["N. Tokens"] += [desc_stats.number_of_tokens]
79
+
80
+ df = pd.DataFrame.from_dict(table)
81
+ df = df.sort_values("N. Tokens", ascending=False)
82
+
83
+ if add_total_row:
84
+ total_row = {
85
+ "Source": "**Total**",
86
+ "Sources": "**Total**",
87
+ "Domain": "",
88
+ "License": "",
89
+ "Description": "",
90
+ "N. Tokens": sum(table["N. Tokens"]),
91
+ }
92
+ df = pd.concat(
93
+ [
94
+ df,
95
+ pd.DataFrame([total_row]),
96
+ ],
97
+ ignore_index=True,
98
+ )
99
+ if add_readme_references:
100
+ # replace Source with Sources
101
+ df["Source"] = df["Sources"]
102
+ df = df.drop(columns=["Sources"])
103
+ else:
104
+ # remove Sources
105
+ df = df.drop(columns=["Sources"])
106
+
107
+ if add_readable_tokens:
108
+ df["N. Tokens"] = df["N. Tokens"].apply(convert_to_human_readable)
109
+
110
+ return df
111
+
112
+
113
+ def _get_normalized_license(ds: DataSheet) -> str:
114
+ non_standard_license_names = {
115
+ "Apache 2.0": "Other (Attribution required)",
116
+ "NLOD 2.0": "Other (Attribution required)",
117
+ "DanNet 1.0": "Other (Attribution required)",
118
+ "Gutenberg": "Other (Attribution required)",
119
+ "Danish Copyright Law": "Other (No attribution required)",
120
+ }
121
+ if (
122
+ ds.license_name not in non_standard_license_names
123
+ and ds.license_name is not None
124
+ ):
125
+ return ds.license_name
126
+ if ds.license_name is None:
127
+ raise ValueError(
128
+ f"Datasheet {ds.pretty_name} has no license name specified in the frontmatter."
129
+ )
130
+ return non_standard_license_names[ds.license_name]
131
+
132
+
133
+ def _get_feature_by_string(
134
+ datasheet: DataSheet, feature_name: Literal["Domain", "Language", "License"]
135
+ ) -> str:
136
+ """Get a specific feature from the frontmatter."""
137
+
138
+ match feature_name:
139
+ case "Domain":
140
+ return datasheet.domains[0] if datasheet.domains else "N/A"
141
+ case "Language":
142
+ return ", ".join(datasheet.language)
143
+ case "License":
144
+ return _get_normalized_license(datasheet)
145
+ case _:
146
+ raise ValueError(f"Unknown feature: {feature_name}")
147
+
148
+
149
+ def create_grouped_table(
150
+ group: Literal["Domain", "Language", "License"] = "Domain",
151
+ repo_path: Path = repo_path,
152
+ add_readable_tokens: bool = True,
153
+ add_total_row: bool = True,
154
+ ) -> pd.DataFrame:
155
+ table = {
156
+ "Sources": [],
157
+ group: [],
158
+ "N. Tokens": [],
159
+ }
160
+
161
+ for dataset in _datasets:
162
+ dataset_path = repo_path / "data" / dataset
163
+ readme_path = dataset_path / f"{dataset_path.name}.md"
164
+
165
+ sheet = DataSheet.load_from_path(readme_path)
166
+ desc_stats = sheet.get_descritive_stats()
167
+ feature = _get_feature_by_string(sheet, group)
168
+
169
+ table["Sources"] += [f"[{dataset_path.name}]"]
170
+ table[group] += [feature]
171
+ table["N. Tokens"] += [desc_stats.number_of_tokens]
172
+
173
+ if add_total_row:
174
+ table["Sources"] += [""]
175
+ table[group] += ["**Total**"]
176
+ table["N. Tokens"] += [sum(table["N. Tokens"])]
177
+
178
+ df = pd.DataFrame.from_dict(table)
179
+
180
+ df = df.groupby(group).agg({"Sources": lambda x: ", ".join(x), "N. Tokens": "sum"})
181
+
182
+ df = df.sort_values("N. Tokens", ascending=False)
183
+
184
+ df.index.name = group
185
+ df = df.reset_index()
186
+
187
+ # Trick the Total row to be at the bottom.
188
+ new_index = list(df.index.drop(0)) + [0]
189
+ df = df.reindex(new_index)
190
+
191
+ if add_readable_tokens:
192
+ df["N. Tokens"] = df["N. Tokens"].apply(convert_to_human_readable)
193
+
194
+ return df
195
+
196
+
197
+ def create_grouped_table_str(
198
+ repo_path: Path = repo_path,
199
+ group: Literal["Domain", "Language", "License"] = "Domain",
200
+ ) -> str:
201
+ table = create_grouped_table(group=group, repo_path=repo_path)
202
+ readme_references = create_dataset_readme_references()
203
+ package = f"{table.to_markdown(index=False, maxcolwidths=[None, None, None])}\n\n{readme_references}\n\n"
204
+ return package
205
+
206
+
207
+ def create_overview_table_str(repo_path: Path = repo_path) -> str:
208
+ main_table = create_overview_table(repo_path)
209
+ readme_references = create_dataset_readme_references()
210
+ license_references = create_license_references()
211
+ package = f"{main_table.to_markdown(index=False)}\n\n{readme_references}\n\n{license_references}\n\n"
212
+ return package
src/dynaword/typings.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Literal
2
+
3
+ DOMAIN = Literal[
4
+ "Books",
5
+ "Conversation",
6
+ "Dialect",
7
+ "Encyclopedic",
8
+ "Legal",
9
+ "Medical",
10
+ "News",
11
+ "Other",
12
+ "Readaloud",
13
+ "Social Media",
14
+ "Speeches",
15
+ "Spoken",
16
+ "Subtitles",
17
+ "Web",
18
+ ]
19
+
20
+ LICENSE = Literal["cc0-1.0", "other", "cc-by-sa-4.0", "apache-2.0", "cc-by-4.0"]
21
+
22
+ LICENSE_NAMES_MAPPING = {
23
+ "cc0-1.0": "CC0",
24
+ "cc-by-sa-4.0": "CC BY-SA 4.0",
25
+ "cc-by-4.0": "CC-BY 4.0",
26
+ "apache-2.0": "Apache 2.0",
27
+ }
src/dynaword/update_descriptive_statistics.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A simple CLI to updates descriptive statistics on all datasets.
3
+
4
+ Example use:
5
+
6
+ uv run src/dynaword/update_descriptive_statistics.py --dataset wikisource
7
+
8
+ """
9
+
10
+ import argparse
11
+ import logging
12
+ from pathlib import Path
13
+ from typing import cast
14
+
15
+ import plotly.express as px
16
+ from datasets import Dataset, load_dataset
17
+
18
+ from dynaword.datasheet import DataSheet
19
+ from dynaword.descriptive_stats import DescriptiveStatsOverview
20
+ from dynaword.paths import repo_path
21
+ from dynaword.plots.plot_tokens_over_time import create_tokens_over_time_plot
22
+ from dynaword.plots.plots_dataset_size import create_dataset_size_plot
23
+ from dynaword.tables import (
24
+ create_grouped_table_str,
25
+ create_overview_table,
26
+ create_overview_table_str,
27
+ )
28
+
29
+ logger = logging.getLogger(__name__)
30
+
31
+ main_sheet = DataSheet.load_from_path(repo_path / "README.md")
32
+ _datasets = [
33
+ cfg["config_name"] # type: ignore
34
+ for cfg in main_sheet.frontmatter["configs"] # type: ignore
35
+ if cfg["config_name"] != "default" # type: ignore
36
+ ]
37
+
38
+
39
+ logger = logging.getLogger(__name__)
40
+
41
+
42
+ def create_domain_distribution_plot(
43
+ save_dir: Path = repo_path,
44
+ ):
45
+ df = create_overview_table(
46
+ add_readable_tokens=False, add_total_row=False, add_readme_references=False
47
+ )
48
+ fig = px.sunburst(df, path=["Domain", "Source"], values="N. Tokens")
49
+
50
+ fig.update_traces(textinfo="label+percent entry")
51
+ fig.update_layout(title="Dataset Distribution by Domain and Source")
52
+
53
+ img_path = save_dir / "images"
54
+ img_path.mkdir(parents=False, exist_ok=True)
55
+ save_path = img_path / "domain_distribution.png"
56
+ fig.write_image(
57
+ save_path,
58
+ width=800,
59
+ height=800,
60
+ scale=2,
61
+ )
62
+
63
+
64
+ def update_dataset(
65
+ dataset_name: str,
66
+ force: bool = False,
67
+ ) -> None:
68
+ dataset_path = (
69
+ repo_path / "data" / dataset_name if dataset_name != "default" else repo_path
70
+ )
71
+
72
+ if dataset_name == "default":
73
+ readme_name = "README.md"
74
+ else:
75
+ readme_name = f"{dataset_name}.md"
76
+
77
+ desc_stats_path = dataset_path / "descriptive_stats.json"
78
+ markdown_path = dataset_path / readme_name
79
+
80
+ if desc_stats_path.exists() and force is False:
81
+ logger.info(
82
+ f"descriptive statistics for '{dataset_name}' is already exists (``{desc_stats_path}``), skipping."
83
+ )
84
+ return
85
+
86
+ logger.info(f"Updating datasheet for: {dataset_name}")
87
+ sheet = DataSheet.load_from_path(markdown_path)
88
+
89
+ if dataset_name != "default":
90
+ ds = load_dataset(str(repo_path), dataset_name, split="train")
91
+ ds = cast(Dataset, ds)
92
+ desc_stats = DescriptiveStatsOverview.from_dataset(ds)
93
+ sheet.body = sheet.add_dataset_plots(ds, create_plot=True)
94
+ else:
95
+ # compute descriptive stats from existing files
96
+ desc_paths = (repo_path / "data").glob("**/*descriptive_stats.json")
97
+ _desc_stats = [DescriptiveStatsOverview.from_disk(p) for p in desc_paths]
98
+ desc_stats = sum(_desc_stats[1:], start=_desc_stats[0])
99
+ desc_stats.to_disk(desc_stats_path)
100
+
101
+ sheet.body = sheet.add_descriptive_stats(descriptive_stats=desc_stats)
102
+ sheet.body = sheet.add_sample_and_description()
103
+
104
+ if dataset_name == "default":
105
+ logger.info("Updating Overview table")
106
+ overview_table = create_overview_table_str()
107
+ sheet.body = sheet.replace_tag(package=overview_table, tag="MAIN TABLE")
108
+ logger.info("Updating domain table")
109
+ domain_table = create_grouped_table_str(group="Domain")
110
+ sheet.body = sheet.replace_tag(package=domain_table, tag="DOMAIN TABLE")
111
+ logger.info("Updating license table")
112
+ domain_table = create_grouped_table_str(group="License")
113
+ sheet.body = sheet.replace_tag(package=domain_table, tag="LICENSE TABLE")
114
+ create_domain_distribution_plot()
115
+ create_tokens_over_time_plot()
116
+ create_dataset_size_plot()
117
+
118
+ sheet.write_to_path()
119
+
120
+
121
+ def create_parser():
122
+ parser = argparse.ArgumentParser(
123
+ description="Calculated descriptive statistics of the datasets in tha data folder"
124
+ )
125
+ parser.add_argument(
126
+ "--dataset",
127
+ default=None,
128
+ type=str,
129
+ help="Use to specify if you only want to compute the statistics from a singular dataset.",
130
+ )
131
+ parser.add_argument(
132
+ "--logging_level",
133
+ default=20,
134
+ type=int,
135
+ help="Sets the logging level. Default to 20 (INFO), other reasonable levels are 10 (DEBUG) and 30 (WARNING).",
136
+ )
137
+ parser.add_argument(
138
+ "--force",
139
+ type=bool,
140
+ default=False,
141
+ action=argparse.BooleanOptionalAction,
142
+ help="Should the statistics be forcefully recomputed. By default it checks the difference in commit ids.",
143
+ )
144
+ return parser
145
+
146
+
147
+ def main(
148
+ dataset: str | None = None,
149
+ logging_level: int = 20,
150
+ force: bool = False,
151
+ ) -> None:
152
+ logging.basicConfig(level=logging_level)
153
+
154
+ if dataset:
155
+ update_dataset(dataset, force=force)
156
+ else:
157
+ for dataset_name in _datasets:
158
+ update_dataset(dataset_name, force=force)
159
+ update_dataset("default", force=force)
160
+
161
+
162
+ if __name__ == "__main__":
163
+ parser = create_parser()
164
+ args = parser.parse_args()
165
+
166
+ main(
167
+ args.dataset,
168
+ logging_level=args.logging_level,
169
+ force=args.force,
170
+ )
src/tests/__init__.py ADDED
File without changes
src/tests/conftest.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+
3
+ from dynaword.datasheet import DataSheet
4
+
5
+ root_path = Path(__file__).parent.parent.parent
6
+ main_readme = root_path / "README.md"
7
+
8
+ main_sheet = DataSheet.load_from_path(main_readme)
9
+
10
+ DATASET_NAMES = [
11
+ cfg["config_name"]
12
+ for cfg in main_sheet.frontmatter["configs"]
13
+ if cfg["config_name"] != "default"
14
+ ]
src/tests/test_dataset_schema.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ from datasets import load_dataset
3
+
4
+ from dynaword.dataset_structure import SampleSchema
5
+ from dynaword.paths import repo_path
6
+
7
+ from .conftest import DATASET_NAMES
8
+
9
+
10
+ @pytest.mark.parametrize("dataset_name", DATASET_NAMES)
11
+ def test_sample_schema(dataset_name: str):
12
+ """Ensure that the dataset samples follow the correct schema"""
13
+
14
+ ds = load_dataset(
15
+ str(repo_path.resolve()), dataset_name, split="train", streaming=True
16
+ )
17
+ sample = next(iter(ds))
18
+ SampleSchema(**sample)
19
+
20
+
21
+ @pytest.mark.parametrize("dataset_name", DATASET_NAMES)
22
+ def test_dataset_folder_structure(dataset_name: str):
23
+ """tests that the dataset folder structure is as follows.
24
+
25
+ dataset_name
26
+ |- dataset_name.md
27
+ |- dataset_name.parquet
28
+
29
+ If there is a python file, there should at least be one called `create.py`, but there can be additional.
30
+ """
31
+ path = repo_path / "data" / dataset_name
32
+
33
+ assert (path / f"{path.name}.parquet").exists()
34
+ assert (path / f"{path.name}.md").exists()
35
+
36
+ if any(p.name.endswith(".py") for p in path.glob("*")):
37
+ assert (path / "create.py").exists()
src/tests/test_datasheets.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from dynaword.datasheet import DEFAULT_SECTION_TAGS, DataSheet
4
+ from dynaword.paths import repo_path
5
+
6
+ from .conftest import DATASET_NAMES
7
+
8
+
9
+ @pytest.mark.parametrize("dataset_name", DATASET_NAMES)
10
+ def test_datasheet_load(dataset_name: str):
11
+ """tests that the dataset frontmatter and markdown follows the correct format."""
12
+
13
+ readme = repo_path / "data" / dataset_name / f"{dataset_name}.md"
14
+ ds_sheet = DataSheet.load_from_path( # noqa: F841
15
+ readme
16
+ ) # will fail if format is not correct
17
+
18
+
19
+ @pytest.mark.parametrize("dataset_name", DATASET_NAMES)
20
+ def test_datasheet_content_tags(dataset_name: str):
21
+ readme = repo_path / "data" / dataset_name / f"{dataset_name}.md"
22
+ ds_sheet = DataSheet.load_from_path(readme)
23
+
24
+ # ensure tags:
25
+ tags = [v.value for v in DEFAULT_SECTION_TAGS]
26
+ for tag in tags:
27
+ ds_sheet.get_tag_idx(tag)
28
+
29
+
30
+ @pytest.mark.parametrize("dataset_name", DATASET_NAMES)
31
+ def test_datasheet_license_info(dataset_name: str):
32
+ """Ensure that license information is present is license is other"""
33
+ readme = repo_path / "data" / dataset_name / f"{dataset_name}.md"
34
+ ds_sheet = DataSheet.load_from_path(readme)
35
+
36
+ if ds_sheet.license == "other": # ensure description of underspecified licenses
37
+ assert ds_sheet.license_information.strip()
38
+ assert ds_sheet.license_name
39
+
40
+
41
+ @pytest.mark.parametrize("dataset_name", DATASET_NAMES)
42
+ def test_datasheet_required_headings(dataset_name: str):
43
+ readme = repo_path / "data" / dataset_name / f"{dataset_name}.md"
44
+ ds_sheet = DataSheet.load_from_path(readme)
45
+
46
+ req_h2_headings = ["## Dataset Description", "## Additional Information"]
47
+ for req_h2 in req_h2_headings:
48
+ assert ds_sheet.get_section_by_header(req_h2)
49
+
50
+
51
+ @pytest.mark.parametrize("dataset_name", DATASET_NAMES)
52
+ def test_domains_in_frontmatter(dataset_name: str):
53
+ readme = repo_path / "data" / dataset_name / f"{dataset_name}.md"
54
+ ds_sheet = DataSheet.load_from_path(readme)
55
+
56
+ assert ds_sheet.domains, "domains annotations are missing"
src/tests/test_load.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+
3
+ from dynaword.datasheet import DataSheet
4
+ from dynaword.paths import repo_path
5
+
6
+ REMOVED_DATA = [
7
+ "lexdk"
8
+ ] # data that has been removed due to legal disputes, question about legality, or similar
9
+
10
+
11
+ def test_dataset_loads():
12
+ """Ensures that the dataset can load as intended"""
13
+ name = str(repo_path.resolve())
14
+ ds = load_dataset(name, split="train", streaming=True)
15
+ sample = next(iter(ds))
16
+ assert isinstance(sample, dict)
17
+
18
+
19
+ def test_all_datasets_in_yaml():
20
+ ds_sheet = DataSheet.load_from_path(repo_path / "README.md")
21
+
22
+ ds_names = {
23
+ cfg["config_name"]
24
+ for cfg in ds_sheet.frontmatter["configs"]
25
+ if cfg["config_name"] != "default"
26
+ }
27
+
28
+ data_folder = repo_path / "data"
29
+ datasets = data_folder.glob("*")
30
+
31
+ for dataset in datasets:
32
+ if dataset.name not in REMOVED_DATA:
33
+ assert dataset.name in ds_names
src/tests/test_quality/__init__.py ADDED
File without changes
src/tests/test_quality/test_duplicates.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import cast
2
+
3
+ import pytest
4
+ from datasets import Dataset, load_dataset
5
+
6
+ from dynaword.paths import repo_path
7
+ from ..conftest import DATASET_NAMES
8
+
9
+
10
+ @pytest.mark.parametrize("dataset_name", DATASET_NAMES)
11
+ def test_no_within_data_duplicates(dataset_name: str):
12
+ ds = load_dataset(str(repo_path.resolve()), dataset_name, split="train")
13
+ ds = cast(Dataset, ds)
14
+
15
+ assert len(set(ds["text"])) == len(ds)
16
+
17
+
18
+ @pytest.mark.skip(
19
+ "This tests takes too long to run"
20
+ ) # there seems to be some duplicate across
21
+ def test_no_data_duplicates():
22
+ ds = load_dataset(str(repo_path.resolve()), split="train")
23
+ ds = cast(Dataset, ds)
24
+
25
+ assert len(set(ds["text"])) == len(ds)
src/tests/test_quality/test_short_texts.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import cast
2
+
3
+ import pytest
4
+ from datasets import Dataset, load_dataset
5
+
6
+ from dynaword.paths import repo_path
7
+
8
+ from ..conftest import DATASET_NAMES
9
+
10
+
11
+ @pytest.mark.parametrize("dataset_name", DATASET_NAMES)
12
+ def test_no_one_word_documents(dataset_name: str):
13
+ ds = load_dataset(str(repo_path.resolve()), dataset_name, split="train")
14
+ ds = cast(Dataset, ds)
15
+
16
+ one_word_docs = ds.filter(lambda x: x["token_count"] <= 1)
17
+
18
+ assert len(one_word_docs) == 0, (
19
+ f"Found {len(one_word_docs)} one-word documents in dataset '{dataset_name}'"
20
+ )
src/tests/test_unique_ids.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import Counter
2
+ from typing import cast
3
+
4
+ from datasets import Dataset, load_dataset
5
+
6
+ from dynaword.paths import repo_path
7
+
8
+
9
+ def test_ensure_ids_are_unique():
10
+ name = str(repo_path.resolve())
11
+ ds = load_dataset(name, split="train")
12
+ ds = cast(Dataset, ds)
13
+ counter = Counter(ds["id"])
14
+ duplicates = [item for item, count in counter.items() if count > 1]
15
+ assert len(duplicates) == 0, f"Duplicate IDs found: {duplicates}"
test_results.log ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ============================= test session starts ==============================
2
+ platform darwin -- Python 3.12.0, pytest-9.0.2, pluggy-1.6.0
3
+ rootdir: /Users/au561649/Github/norwegian-dynaword
4
+ configfile: pyproject.toml
5
+ plugins: anyio-4.12.1
6
+ collected 13 items
7
+
8
+ src/tests/test_dataset_schema.py .. [ 15%]
9
+ src/tests/test_datasheets.py ..... [ 53%]
10
+ src/tests/test_load.py .. [ 69%]
11
+ src/tests/test_quality/test_duplicates.py .s [ 84%]
12
+ src/tests/test_quality/test_short_texts.py . [ 92%]
13
+ src/tests/test_unique_ids.py . [100%]
14
+
15
+ =================== 12 passed, 1 skipped in 96.50s (0:01:36) ===================
uv.lock ADDED
The diff for this file is too large to render. See raw diff