{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Note: you may need to restart the kernel to use updated packages.\n", "Note: you may need to restart the kernel to use updated packages.\n", "Note: you may need to restart the kernel to use updated packages.\n", "Note: you may need to restart the kernel to use updated packages.\n", "Note: you may need to restart the kernel to use updated packages.\n" ] } ], "source": [ "%pip install -q -U tiktoken\n", "%pip install -q -U langchain\n", "%pip install -q -U langdetect\n", "%pip install -q -U git+https://github.com/huggingface/transformers\n", "%pip install -q -U sentencepiece\n", "%pip install -q -U protobuf\n", "%pip install -q -U tqdm" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "import os\n", "import json\n", "import warnings\n", "from tqdm import tqdm\n", "\n", "# import tiktoken\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", "from langdetect import detect, detect_langs\n", "from transformers import AutoTokenizer\n", "\n", "os.environ[\"TOKENIZERS_PARALLELISM\"] = \"True\"" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [], "source": [ "tokeniser = AutoTokenizer.from_pretrained(\n", " \"mistralai/Mistral-7B-v0.1\", use_fast=True)\n", "\n", "\n", "def string_token_length(text):\n", " return len(tokeniser(text, add_special_tokens=False).input_ids)\n", "\n", "\n", "def write_jsonl(data, filename):\n", " with open(filename, \"w\") as f:\n", " for entry in data:\n", " f.write(json.dumps(entry) + \"\\n\")\n", "\n", "\n", "def chunkify_tex(tex_text, chunk_size=4086, chunk_overlap=1536):\n", " splitter = RecursiveCharacterTextSplitter(\n", " [\n", " r\"(?<=\\n)(?=\\\\section{)\",\n", " r\"(?<=\\n)(?=\\\\subsection{)\",\n", " r\"(?<=\\n)(?=\\\\subsubsection{)\",\n", " r\"(?<=\\\\end{proof})\",\n", " r\"(?<=\\\\qed)\",\n", " r\"\\n\\n\\n\",\n", " r\"\\n\\n\",\n", " ],\n", " keep_separator=True,\n", " is_separator_regex=True,\n", " chunk_size=chunk_size,\n", " chunk_overlap=chunk_overlap,\n", " length_function=string_token_length,\n", " )\n", "\n", " bits = splitter.split_text(tex_text)\n", "\n", " return bits" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "# Metadata object\n", "# {\n", "# \"set\": str,\n", "# \"id\": int,\n", "# \"char_len\": int,\n", "# \"tok_len\": int,\n", "# \"lang\": str,\n", "# \"text\": str\n", "# \"num_chunks\": int\n", "# \"chunk_lengths\": List[int]\n", "# \"chunks\": List[str]\n", "# }\n", "\n", "with open(\"mathpile_arxiv_subset_tiny/train.jsonl\", \"r\") as f:\n", " train = [json.loads(l) for l in f.readlines()]\n", "\n", "with open(\"mathpile_arxiv_subset_tiny/test.jsonl\", \"r\") as f:\n", " test = [json.loads(l) for l in f.readlines()]" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "8438it [02:30, 56.19it/s]\n" ] } ], "source": [ "data = [\n", " {\n", " \"set\": \"train\",\n", " \"id\": f'0.{j}',\n", " \"char_len\": len(entry[\"text\"]),\n", " # \"tok_len\": string_token_length(entry[\"text\"]),\n", " \"lang\": detect(entry[\"text\"]),\n", " \"text\": entry[\"text\"],\n", " }\n", " for j, entry in tqdm(enumerate(train), total=len(train))\n", "] + [\n", " {\n", " \"set\": \"test\",\n", " \"id\": f'1.{j}',\n", " \"char_len\": len(entry[\"text\"]),\n", " # \"tok_len\": string_token_length(entry[\"text\"]),\n", " \"lang\": detect(entry[\"text\"]),\n", " \"text\": entry[\"text\"],\n", " }\n", " for j, entry in tqdm(enumerate(test), total=len(test))\n", "]" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████| 8438/8438 [15:17<00:00, 9.20it/s]\n" ] } ], "source": [ "for j, datum in tqdm(enumerate(data), total=len(data)):\n", " text = datum[\"text\"]\n", " chunks = chunkify_tex(text, chunk_size=4095, chunk_overlap=1536)\n", " chunk_lengths = [string_token_length(chunk) for chunk in chunks]\n", " data[j][\"num_chunks\"] = len(chunks)\n", " data[j][\"chunk_lengths\"] = chunk_lengths\n", " data[j][\"chunks\"] = chunks\n", " data[j][\"tok_len\"] = sum(chunk_lengths)" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [], "source": [ "data_train = [datum for datum in data if datum[\"set\"] == \"train\"]\n", "data_test = [datum for datum in data if datum[\"set\"] == \"test\"]\n", "\n", "chunked_train = []\n", "chunked_test = []\n", "\n", "for datum in data_train:\n", " total_document_tokens = datum[\"tok_len\"]\n", " document_id = datum[\"id\"]\n", " for j in range(len(datum[\"chunks\"])):\n", " chunk_id = f\"{document_id}.{j}\"\n", " chunk_text = datum[\"chunks\"][j]\n", " chunk_num_tokens = datum[\"chunk_lengths\"][j]\n", " chunked_train.append(\n", " {\n", " \"set\": \"train\",\n", " \"id\": chunk_id,\n", " \"chunk_text\": chunk_text,\n", " \"chunk_num_tokens\": chunk_num_tokens,\n", " \"document_num_tokens\": total_document_tokens,\n", " \"document_language\": datum[\"lang\"],\n", " }\n", " )\n", "\n", "for datum in data_test:\n", " total_document_tokens = datum[\"tok_len\"]\n", " document_id = datum[\"id\"]\n", " for j in range(len(datum[\"chunks\"])):\n", " chunk_id = f\"{document_id}.{j}\"\n", " chunk_text = datum[\"chunks\"][j]\n", " chunk_num_tokens = datum[\"chunk_lengths\"][j]\n", " chunked_test.append(\n", " {\n", " \"set\": \"test\",\n", " \"id\": chunk_id,\n", " \"chunk_text\": chunk_text,\n", " \"chunk_num_tokens\": chunk_num_tokens,\n", " \"document_num_tokens\": total_document_tokens,\n", " \"document_language\": datum[\"lang\"],\n", " }\n", " )" ] }, { "cell_type": "code", "execution_count": 21, "metadata": {}, "outputs": [], "source": [ "write_jsonl(chunked_train, \"mathpile_arxiv_subset_tiny/train_chunked.jsonl\")\n", "write_jsonl(chunked_test, \"mathpile_arxiv_subset_tiny/test_chunked.jsonl\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "venv", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" } }, "nbformat": 4, "nbformat_minor": 2 }