We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent c74a606 commit 7b02814Copy full SHA for 7b02814
imodelsx/embgam/embed.py
@@ -4,14 +4,14 @@
4
import torch
5
6
def generate_ngrams_list(
7
- sentence,
+ sentence: str,
8
ngrams: int,
9
tokenizer_ngrams,
10
all_ngrams=False,
11
parsing: str='',
12
nlp_chunks=None,
13
):
14
- """get list of grams
+ """Get list of ngrams from sentence
15
16
Params
17
------
@@ -119,7 +119,7 @@ def embed_and_sum_function(
119
tokenizer_embeddings
120
tokenizing for the embedding model
121
tokenizer_ngrams
122
- tokenizing the ngrams (word-based tokenization is probably more interpretable)
+ tokenizing the ngrams (word-based tokenization is more interpretable)
123
parsing: str
124
whether to use parsing rather than extracting all ngrams
125
nlp_chunks
0 commit comments