Skip to content

Commit

Permalink
Update WhitespaceTokenizer, update version
Browse files Browse the repository at this point in the history
  • Loading branch information
Riccorl committed May 12, 2023
1 parent 7b69720 commit 54289c0
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 6 deletions.
22 changes: 17 additions & 5 deletions ipa/preprocessing/tokenizers/whitespace_tokenizer.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import re
from typing import List, Union

from overrides import overrides
Expand All @@ -13,6 +14,10 @@ class WhitespaceTokenizer(BaseTokenizer):
A :obj:`Tokenizer` that splits the text on spaces.
"""

def __init__(self):
super(WhitespaceTokenizer, self).__init__()
self.finditer_regex = re.compile(r"\S+")

def __call__(
self,
texts: Union[str, List[str], List[List[str]]],
Expand Down Expand Up @@ -51,11 +56,18 @@ def __call__(

@overrides
def tokenize(self, text: Union[str, List[str]]) -> List[Word]:
if isinstance(text, str):
return [Word(t, i) for i, t in enumerate(text.split())]
elif isinstance(text, list):
return [Word(t, i) for i, t in enumerate(text)]
else:

if not isinstance(text, (str, list)):
raise ValueError(
f"text must be either `str` or `list`, found: `{type(text)}`"
)

if isinstance(text, list):
text = " ".join(text)
return [
Word(t[0], i, start_char=t[1], end_char=t[2])
for i, t in enumerate(
(m.group(0), m.start(), m.end())
for m in self.finditer_regex.finditer(text)
)
]
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def fix_url_dependencies(req: str) -> str:

setuptools.setup(
name="ipa-core", # Replace with your own project name
version="0.1.2",
version="0.1.3",
author="Riccardo Orlando",
author_email="orlandoricc@gmail.com",
description="NLP Preprocessing Pipeline Wrappers",
Expand Down

0 comments on commit 54289c0

Please sign in to comment.