From da617042a56f0f103f4ba4b02abfdf303c336053 Mon Sep 17 00:00:00 2001 From: SayaZhang Date: Mon, 22 Jan 2024 14:33:40 +0000 Subject: [PATCH 1/5] Fix: move import to init. --- example/extract/extract_html.ipynb | 75 +++++++++++++++++------------ uniflow/op/extract/load/html_op.py | 28 ++++++----- uniflow/op/extract/load/ipynb_op.py | 29 ++++++----- 3 files changed, 79 insertions(+), 53 deletions(-) diff --git a/example/extract/extract_html.ipynb b/example/extract/extract_html.ipynb index a7e27aa5..6df0547c 100644 --- a/example/extract/extract_html.ipynb +++ b/example/extract/extract_html.ipynb @@ -67,13 +67,13 @@ { "data": { "text/plain": [ - "{'extract': ['ExtractImageFlow',\n", + "{'extract': ['ExtractHTMLFlow',\n", + " 'ExtractImageFlow',\n", " 'ExtractIpynbFlow',\n", " 'ExtractMarkdownFlow',\n", " 'ExtractPDFFlow',\n", " 'ExtractTxtFlow',\n", - " 'ExtractS3TxtFlow',\n", - " 'ExtractHTMLFlow'],\n", + " 'ExtractS3TxtFlow'],\n", " 'transform': ['TransformAzureOpenAIFlow',\n", " 'TransformCopyFlow',\n", " 'TransformHuggingFaceFlow',\n", @@ -111,16 +111,16 @@ "metadata": {}, "outputs": [], "source": [ - "# data = [{\"url\": f'https://github.com/CambioML/uniflow'}]" + "data = [{\"url\": f'https://github.com/CambioML/uniflow'}]" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ - "data = [{\"filename\": f'../transform/data/raw_input/22.11_information-theory.html'}]" + "# data = [{\"filename\": f'../transform/data/raw_input/22.11_information-theory.html'}]" ] }, { @@ -132,7 +132,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -141,7 +141,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -155,7 +155,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████| 1/1 [00:00<00:00, 4.53it/s]\n" + "100%|██████████| 1/1 [00:00<00:00, 1.91it/s]\n" ] } ], @@ -174,33 +174,35 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "['22.11. Information Theory — Dive into Deep Learning 1.0.3 documentation22.',\n", - " 'Appendix: Mathematics for Deep Learning',\n", - " '2.1. Data Manipulation',\n", - " '2.2. Data Preprocessing',\n", - " '2.5. Automatic Differentiation',\n", - " '2.6. Probability and Statistics',\n", - " '3. Linear Neural Networks for Regression',\n", - " '3.1. Linear Regression',\n", - " '3.2. Object-Oriented Design for Implementation',\n", - " '3.3. Synthetic Regression Data',\n", - " '3.4. Linear Regression Implementation from Scratch',\n", - " '3.5. Concise Implementation of Linear Regression',\n", - " '4. Linear Neural Networks for Classification',\n", - " '4.1. Softmax Regression',\n", - " '4.2. The Image Classification Dataset',\n", - " '4.3. The Base Classification Model',\n", - " '4.4. Softmax Regression Implementation from Scratch',\n", - " '4.5. Concise Implementation of Softmax Regression',\n", - " '4.6. Generalization in Classification',\n", - " '4.7. Environment and Distribution Shift']\n" + "['GitHub - CambioML/uniflow: Unified interface for pre-training data '\n", + " 'augmentation and post-training evaluation of Large Language Models '\n", + " '(LLMs).Skip to content',\n", + " 'Automate any workflow',\n", + " 'Host and manage packages',\n", + " 'Find and fix vulnerabilities',\n", + " 'Instant dev environments',\n", + " 'Write better code with AI',\n", + " 'Collaborate outside of code',\n", + " 'White papers, Ebooks, Webinars',\n", + " 'Fund open source developers',\n", + " 'GitHub community articles',\n", + " 'Search code, repositories, users, issues, pull requests...',\n", + " 'We read every piece of feedback, and take your input very seriously.',\n", + " 'Include my email address so I can be contacted',\n", + " 'Use saved searches to filter your results more quickly',\n", + " 'To see all available qualifiers, see our',\n", + " 'You signed in with another tab or window.',\n", + " 'to refresh your session.',\n", + " 'You signed out in another tab or window.',\n", + " 'to refresh your session.',\n", + " 'You switched accounts on another tab or window.']\n" ] } ], @@ -209,6 +211,19 @@ "text = [p for p in text.split(\"\\n\") if len(p) > 20]\n", "pprint.pprint(text[:20])" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## End of the notebook\n", + "\n", + "Check more Uniflow use cases in the [example folder](https://github.com/CambioML/uniflow/tree/main/example/model#examples)!\n", + "\n", + "\n", + " \n", + "" + ] } ], "metadata": { diff --git a/uniflow/op/extract/load/html_op.py b/uniflow/op/extract/load/html_op.py index 4751b5cf..2dda14e1 100644 --- a/uniflow/op/extract/load/html_op.py +++ b/uniflow/op/extract/load/html_op.py @@ -9,6 +9,19 @@ class ExtractHTMLOp(Op): """Extract HTML Op Class.""" + def __init__(self, name: str) -> None: + try: + import requests # pylint: disable=import-outside-toplevel + from bs4 import BeautifulSoup # pylint: disable=import-outside-toplevel + except ModuleNotFoundError as exc: + raise ModuleNotFoundError( + "Please install bs4. You can use `pip install bs4` to install them." + ) from exc + + super().__init__(name) + self._requests_client = requests + self._beautiful_soup_parser = BeautifulSoup + def __call__(self, nodes: Sequence[Node]) -> Sequence[Node]: """Run Model Op. @@ -22,9 +35,7 @@ def __call__(self, nodes: Sequence[Node]) -> Sequence[Node]: for node in nodes: value_dict = copy.deepcopy(node.value_dict) if "url" in value_dict: - import requests # pylint: disable=import-outside-toplevel - - resp = requests.get(url=value_dict["url"], timeout=300) + resp = self._requests_client.get(url=value_dict["url"], timeout=300) text = resp.text else: with open( @@ -43,16 +54,9 @@ def __call__(self, nodes: Sequence[Node]) -> Sequence[Node]: ) return output_nodes - def parse_html(self, text): + def parse_html(self, text) -> str: """Function Parse Html.""" - try: - from bs4 import BeautifulSoup # pylint: disable=import-outside-toplevel - except ModuleNotFoundError as exc: - raise ModuleNotFoundError( - "Please install bs4. You can use `pip install bs4` to install them." - ) from exc - - soup = BeautifulSoup(text, "html.parser") + soup = self._beautiful_soup_parser(text, "html.parser") if soup.title: title = str(soup.title.string) diff --git a/uniflow/op/extract/load/ipynb_op.py b/uniflow/op/extract/load/ipynb_op.py index 36c2351e..fb2aed63 100644 --- a/uniflow/op/extract/load/ipynb_op.py +++ b/uniflow/op/extract/load/ipynb_op.py @@ -9,15 +9,7 @@ class ExtractIpynbOp(Op): """Extract ipynb Op Class.""" - def __call__(self, nodes: Sequence[Node]) -> Sequence[Node]: - """Run Model Op. - - Args: - nodes (Sequence[Node]): Nodes to run. - - Returns: - Sequence[Node]: Nodes after running. - """ + def __init__(self, name: str) -> None: try: import nbformat # pylint: disable=import-outside-toplevel from nbconvert import ( # pylint: disable=import-outside-toplevel @@ -27,11 +19,26 @@ def __call__(self, nodes: Sequence[Node]) -> Sequence[Node]: raise ModuleNotFoundError( "Please install nbformat and nbconvert to load ipynb file. You can use `pip install nbformat nbconvert` to install them." ) from exc + + super().__init__(name) + self._nbformat = nbformat + self._markdown_exporter = MarkdownExporter + + def __call__(self, nodes: Sequence[Node]) -> Sequence[Node]: + """Run Model Op. + + Args: + nodes (Sequence[Node]): Nodes to run. + + Returns: + Sequence[Node]: Nodes after running. + """ + output_nodes = [] for node in nodes: value_dict = copy.deepcopy(node.value_dict) - nb = nbformat.read(value_dict["filename"], as_version=4) - md_exporter = MarkdownExporter() + nb = self._nbformat.read(value_dict["filename"], as_version=4) + md_exporter = self._markdown_exporter() (text, _) = md_exporter.from_notebook_node(nb) output_nodes.append( Node( From b5e69b482c2688fdf74e945497fee5a0d1aefb9b Mon Sep 17 00:00:00 2001 From: SayaZhang Date: Tue, 23 Jan 2024 15:29:33 +0000 Subject: [PATCH 2/5] Add recursive character splitter --- .../extract_txt_with_recursive_splitter.ipynb | 198 ++++++++++++++++++ uniflow/op/extract/split/constants.py | 1 + .../split/recursive_character_splitter.py | 147 +++++++++++++ uniflow/op/extract/split/splitter_factory.py | 7 + 4 files changed, 353 insertions(+) create mode 100644 example/extract/extract_txt_with_recursive_splitter.ipynb create mode 100644 uniflow/op/extract/split/recursive_character_splitter.py diff --git a/example/extract/extract_txt_with_recursive_splitter.ipynb b/example/extract/extract_txt_with_recursive_splitter.ipynb new file mode 100644 index 00000000..e8aae586 --- /dev/null +++ b/example/extract/extract_txt_with_recursive_splitter.ipynb @@ -0,0 +1,198 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "%reload_ext autoreload\n", + "%autoreload 2\n", + "\n", + "import sys\n", + "\n", + "sys.path.append(\".\")\n", + "sys.path.append(\"..\")\n", + "sys.path.append(\"../..\")" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/ubuntu/anaconda3/envs/uniflow/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + } + ], + "source": [ + "import os\n", + "import pandas as pd\n", + "from uniflow.flow.client import ExtractClient, TransformClient\n", + "from uniflow.flow.config import TransformOpenAIConfig, ExtractPDFConfig\n", + "from uniflow.op.model.model_config import OpenAIModelConfig, NougatModelConfig\n", + "from uniflow.op.prompt import PromptTemplate, Context\n", + "from uniflow.op.extract.split.splitter_factory import SplitterOpsFactory\n", + "from uniflow.op.extract.split.constants import RECURSIVE_CHARACTER_SPLITTER" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "dir_cur = os.getcwd()\n", + "pdf_file = \"1408.5882_page-1.pdf\"\n", + "input_file = os.path.join(f\"{dir_cur}/data/raw_input/\", pdf_file)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['ParagraphSplitter', 'MarkdownHeaderSplitter', 'RecursiveCharacterSplitter']" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "SplitterOpsFactory.list()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/ubuntu/anaconda3/envs/uniflow/lib/python3.10/site-packages/torch/functional.py:504: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:3526.)\n", + " return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\n" + ] + } + ], + "source": [ + "data = [\n", + " {\"filename\": input_file},\n", + "]\n", + "\n", + "config = ExtractPDFConfig(\n", + " model_config=NougatModelConfig(\n", + " model_name = \"0.1.0-small\",\n", + " batch_size = 1 # When batch_size>1, nougat will run on CUDA, otherwise it will run on CPU\n", + " ),\n", + " splitter=RECURSIVE_CHARACTER_SPLITTER,\n", + ")\n", + "nougat_client = ExtractClient(config)\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + " 0%| | 0/1 [00:00}]" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "output = nougat_client.run(data)\n", + "output" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "contexts = output[0]['output'][0]['text']" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0 # Convolutional Neural Networks for Sentence Classification Yoon KimNew York Universityyhk255@nyu.edu###### AbstractWe report on a series of experiments with convolutional neural networks (CNN) trained on top of pre-trained word vectors for sentence-level classification tasks. We show that a simple CNN with little hyperparameter tuning and static vectors achieves excellent results on multiple benchmarks. Learning task-specific vectors through fine-tuning offers further gains in performance. We additionally propose a simple modification to the architecture to allow for the use of both task-specific and static vectors. The CNN models discussed herein improve upon the state of the art on 4 out of 7 tasks, which include sentiment analysis and question classification.## 1 Introduction\n", + "1 Deep learning models have achieved remarkable results in computer vision [11] and speech recognition [1] in recent years. Within natural language processing, much of the work with deep learning methods has involved learning word vector representations through neural language models [1, 1, 2] and performing composition over the learned word vectors for classification [1]. Word vectors, wherein words are projected from a sparse, 1-of-\\(V\\) encoding (here \\(V\\) is the vocabulary size) onto a lower dimensional vector space via a hidden layer, are essentially feature extractors that encode semantic features of words in their dimensions. In such dense representations, semantically close words are likewise close--in euclidean or cosine distance--in the lower dimensional vector space.\n", + "2 Convolutional neural networks (CNN) utilize layers with convolving filters that are applied to local features [1]. Originally invented for computer vision, CNN models have subsequently been shown to be effective for NLP and have achieved excellent results in semantic parsing [13], search query retrieval [2], sentence modeling [1], and other traditional NLP tasks [1].\n", + "3 In the present work, we train a simple CNN with one layer of convolution on top of word vectors obtained from an unsupervised neural language model. These vectors were trained by Mikolov et al. (2013) on 100 billion words of Google News, and are publicly available.1 We initially keep the word vectors static and learn only the other parameters of the model. Despite little tuning of hyperparameters, this simple model achieves excellent results on multiple benchmarks, suggesting that the pre-trained vectors are 'universal' feature extractors that can be utilized for various classification tasks. Learning task-specific vectors through fine-tuning results in further improvements. We finally describe a simple modification to the architecture to allow for the use of both pre-trained and task-specific vectors by having multiple channels.Footnote 1: [https://code.google.com/p/word2vec/](https://code.google.com/p/word2vec/)\n", + "4 Our work is philosophically similar to Razavian et al. (2014) which showed that for image classification, feature extractors obtained from a pre-trained deep learning model perform well on a variety of tasks--including tasks that are very different from the original task for which the feature extractors were trained.## 2 ModelThe model architecture, shown in figure 1, is a slight variant of the CNN architecture of Collobert et al. (2011). Let \\(\\mathbf{x}_{i}\\in\\mathbb{R}^{k}\\) be the \\(k\\)-dimensional word vector corresponding to the \\(i\\)-th word in the sentence. A sentence of length \\(n\\) (padded where\n" + ] + } + ], + "source": [ + "for i, _s in enumerate(contexts):\n", + " print(i, _s)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "uniflow", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/uniflow/op/extract/split/constants.py b/uniflow/op/extract/split/constants.py index b751eef2..c7e9192c 100644 --- a/uniflow/op/extract/split/constants.py +++ b/uniflow/op/extract/split/constants.py @@ -2,3 +2,4 @@ PARAGRAPH_SPLITTER = "ParagraphSplitter" MARKDOWN_HEADER_SPLITTER = "MarkdownHeaderSplitter" +RECURSIVE_CHARACTER_SPLITTER = "RecursiveCharacterSplitter" diff --git a/uniflow/op/extract/split/recursive_character_splitter.py b/uniflow/op/extract/split/recursive_character_splitter.py new file mode 100644 index 00000000..e2bed0f2 --- /dev/null +++ b/uniflow/op/extract/split/recursive_character_splitter.py @@ -0,0 +1,147 @@ +"""Recursive character split op.""" + +import copy +import re +from typing import Iterable, List, Optional, Sequence + +from uniflow.node import Node +from uniflow.op.op import Op + + +class RecursiveCharacterSplitter(Op): + """Recursive character splitter class.""" + + default_separators = ["\n\n", "\n", " ", ""] + + def __init__( + self, + name: str, + chunk_size: int = 1024, + chunk_overlap_size: int = 0, + separators: Optional[List[str]] = None, + ) -> None: + """Recursive Splitter Op Constructor + + Args: + name (str): Name of the op. + chunk_size (int): Maximum size of chunks to return. + chunk_overlap_size (int): Overlap in characters between chunks. + separators (List[str]): Separators to use. + keep_separator: Whether to keep the separator in the chunks. + """ + super().__init__(name) + self._chunk_size = chunk_size + self._chunk_overlap_size = chunk_overlap_size + self._separators = separators or self.default_separators + + def __call__(self, nodes: Sequence[Node]) -> Sequence[Node]: + """Run Model Op. + + Args: + nodes (Sequence[Node]): Nodes to run. + separators(List[str]): separators for split. + + Returns: + Sequence[Node]: Nodes after running. + """ + output_nodes = [] + for node in nodes: + value_dict = copy.deepcopy(node.value_dict) + text = value_dict["text"] + text = self.recursive_splitter(text.strip(), self._separators) + output_nodes.append( + Node( + name=self.unique_name(), + value_dict={"text": text}, + prev_nodes=[node], + ) + ) + return output_nodes + + def recursive_splitter(self, text: str, separators: List[str]) -> List[str]: + """Split incoming text and return chunks.""" + final_chunks, next_separators = [], [] + + if len(separators) == 0: + return final_chunks + + # Get current and next separators + cur_separator = separators[-1] + for i, _s in enumerate(separators): + _separator = re.escape(_s) + if _s == "": + cur_separator = _s + break + if re.search(_separator, text): + cur_separator = _s + next_separators = separators[(i + 1) :] + break + + cur_separator = re.escape(cur_separator) + splits = [s for s in re.split(cur_separator, text) if s != ""] + + # Now go merging things, recursively splitting longer texts. + _tmp_splits, _separator = [], "" + for s in splits: + if len(s) < self._chunk_size: + _tmp_splits.append(s) + else: + if _tmp_splits: + merged_text = self._merge_splits(_tmp_splits, _separator) + final_chunks.extend(merged_text) + _tmp_splits = [] + if not next_separators: + final_chunks.append(s) + else: + other_info = self.recursive_splitter(s, next_separators) + final_chunks.extend(other_info) + + if _tmp_splits: + merged_text = self._merge_splits(_tmp_splits, _separator) + final_chunks.extend(merged_text) + + return final_chunks + + def _merge_splits(self, splits: Iterable[str], separator: str) -> List[str]: + # Combine these smaller pieces into medium size chunks. + separator_len = len(separator) + + docs, total = [], 0 + current_doc: List[str] = [] + for s in splits: + _len = len(s) + if ( + total + _len + (separator_len if len(current_doc) > 0 else 0) + > self._chunk_size + ): + if total > self._chunk_size: + print( + f"Created a chunk of size {total}, " + f"which is longer than the specified {self._chunk_size}" + ) + if len(current_doc) > 0: + doc = separator.join(current_doc).strip() + if doc is not None: + docs.append(doc) + # Keep on popping if: + # - we have a larger chunk than in the chunk overlap + # - or if we still have any chunks and the length is long + while total > self._chunk_overlap_size or ( + total + _len + (separator_len if len(current_doc) > 0 else 0) + > self._chunk_size + and total > 0 + ): + total -= len(current_doc[0]) + ( + separator_len if len(current_doc) > 1 else 0 + ) + current_doc = current_doc[1:] + + current_doc.append(s) + total += _len + (separator_len if len(current_doc) > 1 else 0) + + doc = separator.join(current_doc).strip() + + if doc is not None: + docs.append(doc) + + return docs diff --git a/uniflow/op/extract/split/splitter_factory.py b/uniflow/op/extract/split/splitter_factory.py index f814b98d..e52ab96e 100644 --- a/uniflow/op/extract/split/splitter_factory.py +++ b/uniflow/op/extract/split/splitter_factory.py @@ -4,9 +4,13 @@ from uniflow.op.extract.split.constants import ( MARKDOWN_HEADER_SPLITTER, PARAGRAPH_SPLITTER, + RECURSIVE_CHARACTER_SPLITTER, ) from uniflow.op.extract.split.markdown_header_splitter import MarkdownHeaderSplitter from uniflow.op.extract.split.pattern_splitter_op import PatternSplitter +from uniflow.op.extract.split.recursive_character_splitter import ( + RecursiveCharacterSplitter, +) class SplitterOpsFactory: @@ -17,6 +21,9 @@ class SplitterOpsFactory: MARKDOWN_HEADER_SPLITTER: MarkdownHeaderSplitter( name="markdown_header_split_op" ), + RECURSIVE_CHARACTER_SPLITTER: RecursiveCharacterSplitter( + name="recursive_character_split_op" + ), } @staticmethod From b9bfad6176dc5cbf232b9da9a1b6a371c1ff3192 Mon Sep 17 00:00:00 2001 From: SayaZhang Date: Sat, 27 Jan 2024 13:21:59 +0000 Subject: [PATCH 3/5] Update recursive splitter --- .../extract_pdf_with_recursive_splitter.ipynb | 265 ++++++++++++++++++ .../extract_txt_with_recursive_splitter.ipynb | 198 ------------- uniflow/flow/extract/extract_txt_flow.py | 6 +- uniflow/op/extract/load/html_op.py | 13 +- .../split/recursive_character_splitter.py | 53 +++- 5 files changed, 320 insertions(+), 215 deletions(-) create mode 100644 example/extract/extract_pdf_with_recursive_splitter.ipynb delete mode 100644 example/extract/extract_txt_with_recursive_splitter.ipynb diff --git a/example/extract/extract_pdf_with_recursive_splitter.ipynb b/example/extract/extract_pdf_with_recursive_splitter.ipynb new file mode 100644 index 00000000..d524e467 --- /dev/null +++ b/example/extract/extract_pdf_with_recursive_splitter.ipynb @@ -0,0 +1,265 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Example of loading PDF using recursive splitter\n", + "\n", + "Recursive Splitter: Splitting text by recursively look at characters.\n", + "Recursively tries to split by different characters to find one that works." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Before running the code\n", + "\n", + "You will need to `uniflow` conda environment to run this notebook. You can set up the environment following the instruction: https://github.com/CambioML/uniflow/tree/main#installation. Furthermore, make sure you have the following packages installed:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# pip3 install nougat-ocr" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load packages" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "%reload_ext autoreload\n", + "%autoreload 2\n", + "\n", + "import sys\n", + "\n", + "sys.path.append(\".\")\n", + "sys.path.append(\"..\")\n", + "sys.path.append(\"../..\")" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/ubuntu/anaconda3/envs/uniflow/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + } + ], + "source": [ + "import os\n", + "import pandas as pd\n", + "import pprint\n", + "from uniflow.flow.client import ExtractClient, TransformClient\n", + "from uniflow.flow.config import TransformOpenAIConfig, ExtractPDFConfig\n", + "from uniflow.op.model.model_config import OpenAIModelConfig, NougatModelConfig\n", + "from uniflow.op.prompt import PromptTemplate, Context\n", + "from uniflow.op.extract.split.splitter_factory import SplitterOpsFactory\n", + "from uniflow.op.extract.split.constants import RECURSIVE_CHARACTER_SPLITTER" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Prepare the input data\n", + "\n", + "First, let's set current directory and input data directory, and load the raw data." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "dir_cur = os.getcwd()\n", + "pdf_file = \"1408.5882_page-1.pdf\"\n", + "input_file = os.path.join(f\"{dir_cur}/data/raw_input/\", pdf_file)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### List all the available splitters\n", + "These are the different splitters we can use to post-process the loaded PDF." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['ParagraphSplitter', 'MarkdownHeaderSplitter', 'RecursiveCharacterSplitter']" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "SplitterOpsFactory.list()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Load the pdf using recursive splitter" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/ubuntu/anaconda3/envs/uniflow/lib/python3.10/site-packages/torch/functional.py:504: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:3526.)\n", + " return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\n" + ] + } + ], + "source": [ + "data = [\n", + " {\"filename\": input_file},\n", + "]\n", + "\n", + "config = ExtractPDFConfig(\n", + " model_config=NougatModelConfig(\n", + " model_name = \"0.1.0-small\",\n", + " batch_size = 1 # When batch_size>1, nougat will run on CUDA, otherwise it will run on CPU\n", + " ),\n", + " splitter=RECURSIVE_CHARACTER_SPLITTER,\n", + ")\n", + "nougat_client = ExtractClient(config)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + " 0%| | 0/1 [00:00\n", + " \n", + "" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "uniflow", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/example/extract/extract_txt_with_recursive_splitter.ipynb b/example/extract/extract_txt_with_recursive_splitter.ipynb deleted file mode 100644 index e8aae586..00000000 --- a/example/extract/extract_txt_with_recursive_splitter.ipynb +++ /dev/null @@ -1,198 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "%reload_ext autoreload\n", - "%autoreload 2\n", - "\n", - "import sys\n", - "\n", - "sys.path.append(\".\")\n", - "sys.path.append(\"..\")\n", - "sys.path.append(\"../..\")" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/ubuntu/anaconda3/envs/uniflow/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n" - ] - } - ], - "source": [ - "import os\n", - "import pandas as pd\n", - "from uniflow.flow.client import ExtractClient, TransformClient\n", - "from uniflow.flow.config import TransformOpenAIConfig, ExtractPDFConfig\n", - "from uniflow.op.model.model_config import OpenAIModelConfig, NougatModelConfig\n", - "from uniflow.op.prompt import PromptTemplate, Context\n", - "from uniflow.op.extract.split.splitter_factory import SplitterOpsFactory\n", - "from uniflow.op.extract.split.constants import RECURSIVE_CHARACTER_SPLITTER" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "dir_cur = os.getcwd()\n", - "pdf_file = \"1408.5882_page-1.pdf\"\n", - "input_file = os.path.join(f\"{dir_cur}/data/raw_input/\", pdf_file)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['ParagraphSplitter', 'MarkdownHeaderSplitter', 'RecursiveCharacterSplitter']" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "SplitterOpsFactory.list()" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/ubuntu/anaconda3/envs/uniflow/lib/python3.10/site-packages/torch/functional.py:504: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:3526.)\n", - " return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\n" - ] - } - ], - "source": [ - "data = [\n", - " {\"filename\": input_file},\n", - "]\n", - "\n", - "config = ExtractPDFConfig(\n", - " model_config=NougatModelConfig(\n", - " model_name = \"0.1.0-small\",\n", - " batch_size = 1 # When batch_size>1, nougat will run on CUDA, otherwise it will run on CPU\n", - " ),\n", - " splitter=RECURSIVE_CHARACTER_SPLITTER,\n", - ")\n", - "nougat_client = ExtractClient(config)\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - " 0%| | 0/1 [00:00}]" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "output = nougat_client.run(data)\n", - "output" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "contexts = output[0]['output'][0]['text']" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0 # Convolutional Neural Networks for Sentence Classification Yoon KimNew York Universityyhk255@nyu.edu###### AbstractWe report on a series of experiments with convolutional neural networks (CNN) trained on top of pre-trained word vectors for sentence-level classification tasks. We show that a simple CNN with little hyperparameter tuning and static vectors achieves excellent results on multiple benchmarks. Learning task-specific vectors through fine-tuning offers further gains in performance. We additionally propose a simple modification to the architecture to allow for the use of both task-specific and static vectors. The CNN models discussed herein improve upon the state of the art on 4 out of 7 tasks, which include sentiment analysis and question classification.## 1 Introduction\n", - "1 Deep learning models have achieved remarkable results in computer vision [11] and speech recognition [1] in recent years. Within natural language processing, much of the work with deep learning methods has involved learning word vector representations through neural language models [1, 1, 2] and performing composition over the learned word vectors for classification [1]. Word vectors, wherein words are projected from a sparse, 1-of-\\(V\\) encoding (here \\(V\\) is the vocabulary size) onto a lower dimensional vector space via a hidden layer, are essentially feature extractors that encode semantic features of words in their dimensions. In such dense representations, semantically close words are likewise close--in euclidean or cosine distance--in the lower dimensional vector space.\n", - "2 Convolutional neural networks (CNN) utilize layers with convolving filters that are applied to local features [1]. Originally invented for computer vision, CNN models have subsequently been shown to be effective for NLP and have achieved excellent results in semantic parsing [13], search query retrieval [2], sentence modeling [1], and other traditional NLP tasks [1].\n", - "3 In the present work, we train a simple CNN with one layer of convolution on top of word vectors obtained from an unsupervised neural language model. These vectors were trained by Mikolov et al. (2013) on 100 billion words of Google News, and are publicly available.1 We initially keep the word vectors static and learn only the other parameters of the model. Despite little tuning of hyperparameters, this simple model achieves excellent results on multiple benchmarks, suggesting that the pre-trained vectors are 'universal' feature extractors that can be utilized for various classification tasks. Learning task-specific vectors through fine-tuning results in further improvements. We finally describe a simple modification to the architecture to allow for the use of both pre-trained and task-specific vectors by having multiple channels.Footnote 1: [https://code.google.com/p/word2vec/](https://code.google.com/p/word2vec/)\n", - "4 Our work is philosophically similar to Razavian et al. (2014) which showed that for image classification, feature extractors obtained from a pre-trained deep learning model perform well on a variety of tasks--including tasks that are very different from the original task for which the feature extractors were trained.## 2 ModelThe model architecture, shown in figure 1, is a slight variant of the CNN architecture of Collobert et al. (2011). Let \\(\\mathbf{x}_{i}\\in\\mathbb{R}^{k}\\) be the \\(k\\)-dimensional word vector corresponding to the \\(i\\)-th word in the sentence. A sentence of length \\(n\\) (padded where\n" - ] - } - ], - "source": [ - "for i, _s in enumerate(contexts):\n", - " print(i, _s)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "uniflow", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.13" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/uniflow/flow/extract/extract_txt_flow.py b/uniflow/flow/extract/extract_txt_flow.py index 9d61e55d..abf3f9a5 100644 --- a/uniflow/flow/extract/extract_txt_flow.py +++ b/uniflow/flow/extract/extract_txt_flow.py @@ -7,6 +7,8 @@ from uniflow.node import Node from uniflow.op.extract.load.aws.s3_op import ExtractS3Op from uniflow.op.extract.load.txt_op import ExtractTxtOp, ProcessTxtOp +from uniflow.op.extract.split.constants import PARAGRAPH_SPLITTER +from uniflow.op.extract.split.splitter_factory import SplitterOpsFactory class ExtractTxtFlow(Flow): @@ -14,11 +16,12 @@ class ExtractTxtFlow(Flow): TAG = EXTRACT - def __init__(self) -> None: + def __init__(self, splitter: str = PARAGRAPH_SPLITTER) -> None: """Extract txt Flow Constructor.""" super().__init__() self._extract_txt_op = ExtractTxtOp(name="extract_txt_op") self._process_txt_op = ProcessTxtOp(name="process_txt_op") + self._split_op = SplitterOpsFactory.get(splitter) def run(self, nodes: Sequence[Node]) -> Sequence[Node]: """Run Extract txt Flow. @@ -31,6 +34,7 @@ def run(self, nodes: Sequence[Node]) -> Sequence[Node]: """ nodes = self._extract_txt_op(nodes) nodes = self._process_txt_op(nodes) + nodes = self._split_op(nodes) return nodes diff --git a/uniflow/op/extract/load/html_op.py b/uniflow/op/extract/load/html_op.py index 446cff6e..618e90c9 100644 --- a/uniflow/op/extract/load/html_op.py +++ b/uniflow/op/extract/load/html_op.py @@ -45,7 +45,7 @@ def __call__(self, nodes: Sequence[Node]) -> Sequence[Node]: encoding=value_dict.get("encoding", "utf-8"), ) as f: text = f.read() - text = self.parse_html(text) + text = self._parse_html(text) output_nodes.append( Node( name=self.unique_name(), @@ -55,8 +55,15 @@ def __call__(self, nodes: Sequence[Node]) -> Sequence[Node]: ) return output_nodes - def parse_html(self, text) -> str: - """Function Parse Html.""" + def _parse_html(self, text: str) -> str: + """Function Parse Html. + + Args: + text (str): Raw html text. + + Returns: + str: Parsed html text. + """ soup = self._beautiful_soup_parser(text, "html.parser") if soup.title: diff --git a/uniflow/op/extract/split/recursive_character_splitter.py b/uniflow/op/extract/split/recursive_character_splitter.py index e2bed0f2..1c90fc2b 100644 --- a/uniflow/op/extract/split/recursive_character_splitter.py +++ b/uniflow/op/extract/split/recursive_character_splitter.py @@ -22,6 +22,9 @@ def __init__( ) -> None: """Recursive Splitter Op Constructor + This has the effect of trying to keep all paragraphs (and then sentences, and then words) together + as long as possible, as those would generically seem to be the strongest semantically related pieces of text. + Args: name (str): Name of the op. chunk_size (int): Maximum size of chunks to return. @@ -39,7 +42,6 @@ def __call__(self, nodes: Sequence[Node]) -> Sequence[Node]: Args: nodes (Sequence[Node]): Nodes to run. - separators(List[str]): separators for split. Returns: Sequence[Node]: Nodes after running. @@ -48,7 +50,7 @@ def __call__(self, nodes: Sequence[Node]) -> Sequence[Node]: for node in nodes: value_dict = copy.deepcopy(node.value_dict) text = value_dict["text"] - text = self.recursive_splitter(text.strip(), self._separators) + text = self._recursive_splitter(text.strip(), self._separators) output_nodes.append( Node( name=self.unique_name(), @@ -58,8 +60,21 @@ def __call__(self, nodes: Sequence[Node]) -> Sequence[Node]: ) return output_nodes - def recursive_splitter(self, text: str, separators: List[str]) -> List[str]: - """Split incoming text and return chunks.""" + def _recursive_splitter(self, text: str, separators: List[str]) -> List[str]: + """Split incoming text and return chunks. + + It takes in the large text then tries to split it by the first character \n\n. If the first split by \n\n is + still large then it moves to the next character which is \n and tries to split by it. If it is still larger + than our specified chunk size it moves to the next character in the set until we get a split that is less than + our specified chunk size. The default separators list is ["\n\n", "\n", " ", ""]) + + Args: + text (str): Text to split. + separators(List[str]): separators for split. + + Returns: + List[str]: Chunks after split. + """ final_chunks, next_separators = [], [] if len(separators) == 0: @@ -77,23 +92,28 @@ def recursive_splitter(self, text: str, separators: List[str]) -> List[str]: next_separators = separators[(i + 1) :] break + # Splited by current separator firstly cur_separator = re.escape(cur_separator) splits = [s for s in re.split(cur_separator, text) if s != ""] - # Now go merging things, recursively splitting longer texts. + # Then go merging things, recursively splitting longer texts. _tmp_splits, _separator = [], "" for s in splits: if len(s) < self._chunk_size: _tmp_splits.append(s) else: + # merge splitted texts into a chunk if _tmp_splits: merged_text = self._merge_splits(_tmp_splits, _separator) final_chunks.extend(merged_text) + # reset tmp_splits _tmp_splits = [] + + # recursively split using next separators if not next_separators: final_chunks.append(s) else: - other_info = self.recursive_splitter(s, next_separators) + other_info = self._recursive_splitter(s, next_separators) final_chunks.extend(other_info) if _tmp_splits: @@ -103,17 +123,26 @@ def recursive_splitter(self, text: str, separators: List[str]) -> List[str]: return final_chunks def _merge_splits(self, splits: Iterable[str], separator: str) -> List[str]: - # Combine these smaller pieces into medium size chunks. + """Combine these smaller pieces into medium size chunks. + + Args: + splits (Iterable[str]): Smaller pieces before merge. + separator (str): Separator for merge. + + Returns: + List[str]: Merged medium size chunks. + """ separator_len = len(separator) docs, total = [], 0 current_doc: List[str] = [] for s in splits: _len = len(s) - if ( + current_length = ( total + _len + (separator_len if len(current_doc) > 0 else 0) - > self._chunk_size - ): + ) + + if current_length > self._chunk_size: if total > self._chunk_size: print( f"Created a chunk of size {total}, " @@ -127,9 +156,7 @@ def _merge_splits(self, splits: Iterable[str], separator: str) -> List[str]: # - we have a larger chunk than in the chunk overlap # - or if we still have any chunks and the length is long while total > self._chunk_overlap_size or ( - total + _len + (separator_len if len(current_doc) > 0 else 0) - > self._chunk_size - and total > 0 + current_length > self._chunk_size and total > 0 ): total -= len(current_doc[0]) + ( separator_len if len(current_doc) > 1 else 0 From c3e5c155984c9a79c70783bc006f8d58b155c161 Mon Sep 17 00:00:00 2001 From: SayaZhang Date: Sat, 3 Feb 2024 02:23:07 +0000 Subject: [PATCH 4/5] Update html_op param --- example/extract/extract_html.ipynb | 70 ++++++++++++------------ uniflow/flow/extract/extract_txt_flow.py | 6 +- uniflow/op/extract/load/html_op.py | 19 ++++++- 3 files changed, 53 insertions(+), 42 deletions(-) diff --git a/example/extract/extract_html.ipynb b/example/extract/extract_html.ipynb index 6df0547c..a08efa32 100644 --- a/example/extract/extract_html.ipynb +++ b/example/extract/extract_html.ipynb @@ -116,11 +116,20 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "data = [{\"filename\": f'../transform/data/raw_input/22.11_information-theory.html'}]" + ] + }, + { + "cell_type": "code", + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "# data = [{\"filename\": f'../transform/data/raw_input/22.11_information-theory.html'}]" + "data = [{\"tt\": \"11\"}]" ] }, { @@ -132,7 +141,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 9, "metadata": {}, "outputs": [], "source": [ @@ -141,21 +150,14 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - " 0%| | 0/1 [00:00 None: + def __init__(self) -> None: """Extract txt Flow Constructor.""" super().__init__() self._extract_txt_op = ExtractTxtOp(name="extract_txt_op") self._process_txt_op = ProcessTxtOp(name="process_txt_op") - self._split_op = SplitterOpsFactory.get(splitter) def run(self, nodes: Sequence[Node]) -> Sequence[Node]: """Run Extract txt Flow. @@ -34,7 +31,6 @@ def run(self, nodes: Sequence[Node]) -> Sequence[Node]: """ nodes = self._extract_txt_op(nodes) nodes = self._process_txt_op(nodes) - nodes = self._split_op(nodes) return nodes diff --git a/uniflow/op/extract/load/html_op.py b/uniflow/op/extract/load/html_op.py index 618e90c9..19a5ebbb 100644 --- a/uniflow/op/extract/load/html_op.py +++ b/uniflow/op/extract/load/html_op.py @@ -35,16 +35,31 @@ def __call__(self, nodes: Sequence[Node]) -> Sequence[Node]: output_nodes = [] for node in nodes: value_dict = copy.deepcopy(node.value_dict) + if "url" in value_dict: resp = self._requests_client.get(url=value_dict["url"], timeout=300) + if not resp.ok: + raise ValueError(f"URL return an error: {resp.status_code}") + + content_type = resp.headers.get("Content-Type", "") + if not content_type.startswith("text/html"): + raise ValueError( + f"Expected content type text/html. Got {content_type}." + ) + text = resp.text - else: + + elif "filename" in value_dict: with open( value_dict["filename"], "r", encoding=value_dict.get("encoding", "utf-8"), ) as f: text = f.read() + + else: + raise ValueError("Expected url or filename param.") + text = self._parse_html(text) output_nodes.append( Node( @@ -71,7 +86,7 @@ def _parse_html(self, text: str) -> str: else: title = "" - return title + "\n".join(soup.body.stripped_strings) + return "\n\n".join([title] + soup.body.stripped_strings) class ProcessHTMLOp(Op): From 98e32f0384b2442d3788f77aac61c38507030d0f Mon Sep 17 00:00:00 2001 From: SayaZhang Date: Sat, 3 Feb 2024 02:34:03 +0000 Subject: [PATCH 5/5] Update extract html example --- example/extract/extract_html.ipynb | 55 ++++++++++++------------------ uniflow/op/extract/load/html_op.py | 4 +-- 2 files changed, 24 insertions(+), 35 deletions(-) diff --git a/example/extract/extract_html.ipynb b/example/extract/extract_html.ipynb index a08efa32..0aac390d 100644 --- a/example/extract/extract_html.ipynb +++ b/example/extract/extract_html.ipynb @@ -111,27 +111,18 @@ "metadata": {}, "outputs": [], "source": [ - "data = [{\"url\": f'https://github.com/CambioML/uniflow'}]" + "# data = [{\"url\": f'https://github.com/CambioML/uniflow'}]" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "data = [{\"filename\": f'../transform/data/raw_input/22.11_information-theory.html'}]" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "data = [{\"tt\": \"11\"}]" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -141,7 +132,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -150,14 +141,14 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████| 1/1 [00:00<00:00, 10.74it/s]\n" + "100%|██████████| 1/1 [00:00<00:00, 10330.80it/s]\n" ] } ], @@ -176,40 +167,38 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "['22.11. Information Theory — Dive into Deep Learning 1.0.3 documentation22.',\n", + "['22.11. Information Theory — Dive into Deep Learning 1.0.3 documentation',\n", " 'Appendix: Mathematics for Deep Learning',\n", + " 'navigate_next',\n", + " 'Information Theory',\n", + " 'Quick search',\n", + " 'Show Source',\n", + " 'Preview Version',\n", + " 'Table Of Contents',\n", + " 'Installation',\n", + " '1. Introduction',\n", + " '2. Preliminaries',\n", " '2.1. Data Manipulation',\n", " '2.2. Data Preprocessing',\n", + " '2.3. Linear Algebra',\n", + " '2.4. Calculus',\n", " '2.5. Automatic Differentiation',\n", " '2.6. Probability and Statistics',\n", - " '3. Linear Neural Networks for Regression',\n", - " '3.1. Linear Regression',\n", - " '3.2. Object-Oriented Design for Implementation',\n", - " '3.3. Synthetic Regression Data',\n", - " '3.4. Linear Regression Implementation from Scratch',\n", - " '3.5. Concise Implementation of Linear Regression',\n", - " '4. Linear Neural Networks for Classification',\n", - " '4.1. Softmax Regression',\n", - " '4.2. The Image Classification Dataset',\n", - " '4.3. The Base Classification Model',\n", - " '4.4. Softmax Regression Implementation from Scratch',\n", - " '4.5. Concise Implementation of Softmax Regression',\n", - " '4.6. Generalization in Classification',\n", - " '4.7. Environment and Distribution Shift']\n" + " '2.7. Documentation']\n" ] } ], "source": [ - "text = output[0]['output'][0]['text'][0]\n", - "text = [p for p in text.split(\"\\n\") if len(p) > 20]\n", - "pprint.pprint(text[:20])" + "text = output[0]['output'][0]['text'][0:30]\n", + "text = [p for p in text if len(p) > 10]\n", + "pprint.pprint(text)" ] }, { diff --git a/uniflow/op/extract/load/html_op.py b/uniflow/op/extract/load/html_op.py index 19a5ebbb..134a37c6 100644 --- a/uniflow/op/extract/load/html_op.py +++ b/uniflow/op/extract/load/html_op.py @@ -82,11 +82,11 @@ def _parse_html(self, text: str) -> str: soup = self._beautiful_soup_parser(text, "html.parser") if soup.title: - title = str(soup.title.string) + title = str(soup.title.string) + "\n\n" else: title = "" - return "\n\n".join([title] + soup.body.stripped_strings) + return title + "\n\n".join(soup.body.stripped_strings) class ProcessHTMLOp(Op):