source_code stringlengths 45 7.24k | function_name stringlengths 1 111 | messages list |
|---|---|---|
def x(self):
self._y = self._get_y_and_populate_category_idx_store()
self._df.apply(self._add_to_x_factory, axis=1)
self._X = self._X_factory.set_last_row_idx(len(self._y) - 1
).get_csr_matrix()
self._mX = self._mX_factory.set_last_row_idx(len(self._y) - 1
).get_csr_matrix()
if self._parsed_col is not None and self._parsed_col in self._df:
return ParsedCorpus(self._df, self._X, self._mX, self._y, self.
_term_idx_store, self._category_idx_store, self.
_metadata_idx_store, self._parsed_col, self._category_col)
else:
return CorpusDF(self._df, self._X, self._mX, self._y, self.
_text_col, self._term_idx_store, self._category_idx_store, self
._metadata_idx_store)
| build | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n self._y = self._get_y_and_populate_category_idx_store()\n self._df.apply(self._add_to_x_factory, axis=1)\n self._X = self._X_factory.set_last... |
def x(self, doc_domains):
assert len(doc_domains) == self.term_doc_matrix.get_num_docs()
doc_domain_set = set(doc_domains)
num_terms = self.term_doc_matrix.get_num_terms()
num_domains = len(doc_domain_set)
domain_mat = lil_matrix((num_domains, num_terms), dtype=int)
X = self.term_doc_matrix.get_term_doc_mat()
for i, domain in enumerate(doc_domain_set):
domain_mat[i, :] = X[np.array(doc_domains == domain)].sum(axis=0)
return domain_mat.tocsr()
| get_new_term_doc_mat | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, doc_domains):\n assert len(doc_domains) == self.term_doc_matrix.get_num_docs()\n doc_domain_set = set(doc_domains)\n num_terms = self.term_doc_m... |
def x(self):
self._y = self._get_y_and_populate_category_idx_store()
self._df.apply(self._add_to_x_factory, axis=1)
self._X = self._X_factory.set_last_row_idx(len(self._y) - 1
).get_csr_matrix()
self._mX = self._mX_factory.set_last_row_idx(len(self._y) - 1
).get_csr_matrix()
return ParsedCorpus(self._df, self._X, self._mX, self._y, self.
_term_idx_store, self._category_idx_store, self._metadata_idx_store,
self._parsed_col, self._category_col)
| build | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n self._y = self._get_y_and_populate_category_idx_store()\n self._df.apply(self._add_to_x_factory, axis=1)\n self._X = self._X_factory.set_last... |
def x(self):
background_df = self._get_background_unigram_frequencies()
corpus_freq_df = pd.DataFrame({'corpus': self.term_category_freq_df.sum
(axis=1)})
corpus_unigram_freq = corpus_freq_df.loc[[w for w in corpus_freq_df.
index if ' ' not in w]]
df = corpus_unigram_freq.join(background_df, how='outer').fillna(0)
del df.index.name
return df
| get_term_and_background_counts | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n background_df = self._get_background_unigram_frequencies()\n corpus_freq_df = pd.DataFrame({'corpus': self.term_category_freq_df.sum\n (a... |
def x(self):
if self._document_category_df is None:
return pd.np.array([])
categories_d = {d: i for i, d in enumerate(self.get_categories())}
return self._document_category_df.category.apply(categories_d.get).values
| get_doc_indices | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if self._document_category_df is None:\n return pd.np.array([])\n categories_d = {d: i for i, d in enumerate(self.get_categories())}\n ... |
def x(self):
if self._document_category_df is None:
return pd.np.array([])
return self._document_category_df.text.values
| get_texts | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if self._document_category_df is None:\n return pd.np.array([])\n return self._document_category_df.text.values\n",
"role": "user"
... |
def x(self, scatterchartdata):
df = self.term_category_freq_df.rename(columns={c: (c + ' freq') for c in
self.term_category_freq_df})
df.index.name = 'term'
return df
| get_term_category_frequencies | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, scatterchartdata):\n df = self.term_category_freq_df.rename(columns={c: (c + ' freq') for c in\n self.term_category_freq_df})\n df.index.nam... |
def x(self, term_doc_matrix):
df = term_doc_matrix.get_term_freq_df()
if len(df) == 0:
return term_doc_matrix
low_pmi_bigrams = get_low_pmi_bigrams(self._threshold_coef, df).index
infrequent_terms = df[df.sum(axis=1) < self._min_freq].index
filtered_term_doc_mat = term_doc_matrix.remove_terms(set(
low_pmi_bigrams | infrequent_terms))
try:
filtered_term_doc_mat.get_term_freq_df()
except ValueError:
raise AtLeastOneCategoryHasNoTermsException()
return filtered_term_doc_mat
| filter | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, term_doc_matrix):\n df = term_doc_matrix.get_term_freq_df()\n if len(df) == 0:\n return term_doc_matrix\n low_pmi_bigrams = get_low_pmi_b... |
def x(term_doc_matrix, category, category_name, not_category_name, protocol
='https', minimum_term_frequency=DEFAULT_MINIMUM_TERM_FREQUENCY,
pmi_threshold_coefficient=DEFAULT_PMI_THRESHOLD_COEFFICIENT, max_terms=
None, filter_unigrams=False, height_in_pixels=None, width_in_pixels=
None, term_ranker=termranking.AbsoluteFrequencyRanker):
scatter_chart_data = ScatterChart(term_doc_matrix=term_doc_matrix,
minimum_term_frequency=minimum_term_frequency,
pmi_threshold_coefficient=pmi_threshold_coefficient,
filter_unigrams=filter_unigrams, max_terms=max_terms, term_ranker=
term_ranker).to_dict(category=category, category_name=category_name,
not_category_name=not_category_name, transform=percentile_alphabetical)
scatterplot_structure = ScatterplotStructure(VizDataAdapter(
scatter_chart_data), width_in_pixels, height_in_pixels)
return BasicHTMLFromScatterplotStructure(scatterplot_structure).to_html(
protocol=protocol)
| produce_scattertext_html | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(term_doc_matrix, category, category_name, not_category_name, protocol\n ='https', minimum_term_frequency=DEFAULT_MINIMUM_TERM_FREQUENCY,\n pmi_threshold_... |
def x(corpus, category, target_term, category_name=None, not_category_name=
None, word2vec=None, alpha=0.01, max_p_val=0.1, term_significance=None,
**kwargs):
if word2vec is None:
word2vec = Word2VecFromParsedCorpus(corpus).train()
if term_significance is None:
term_significance = LogOddsRatioUninformativeDirichletPrior(alpha)
assert issubclass(type(term_significance), TermSignificance)
scores = []
for tok in corpus._term_idx_store._i2val:
try:
scores.append(word2vec.similarity(target_term, tok.replace(' ',
'_')))
except:
try:
scores.append(np.mean([word2vec.similarity(target_term,
tok_part) for tok_part in tok.split()]))
except:
scores.append(0)
scores = np.array(scores)
return produce_scattertext_explorer(corpus, category, category_name,
not_category_name, scores=scores, sort_by_dist=False,
reverse_sort_scores_for_not_category=False, word_vec_use_p_vals=
True, term_significance=term_significance, max_p_val=max_p_val,
p_value_colors=True, **kwargs)
| word_similarity_explorer_gensim | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(corpus, category, target_term, category_name=None, not_category_name=\n None, word2vec=None, alpha=0.01, max_p_val=0.1, term_significance=None,\n **kwarg... |
def x(corpus, category, category_name, not_category_name, target_term, nlp=
None, alpha=0.01, max_p_val=0.1, **kwargs):
if nlp is None:
import spacy
nlp = spacy.load('en')
base_term = nlp(target_term)
scores = np.array([base_term.similarity(nlp(tok)) for tok in corpus.
_term_idx_store._i2val])
return produce_scattertext_explorer(corpus, category, category_name,
not_category_name, scores=scores, sort_by_dist=False,
reverse_sort_scores_for_not_category=False, word_vec_use_p_vals=
True, term_significance=LogOddsRatioUninformativeDirichletPrior(
alpha), max_p_val=max_p_val, p_value_colors=True, **kwargs)
| word_similarity_explorer | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(corpus, category, category_name, not_category_name, target_term, nlp=\n None, alpha=0.01, max_p_val=0.1, **kwargs):\n if nlp is None:\n import spa... |
def x(corpus, category, category_name=None, not_category_name=None,
term_ranker=termranking.AbsoluteFrequencyRanker, alpha=0.01,
use_term_significance=False, term_scorer=None, not_categories=None,
grey_threshold=1.96, y_axis_values=None, frequency_transform=lambda x:
scale(np.log(x) - np.log(1)), **kwargs):
if not_categories is None:
not_categories = [c for c in corpus.get_categories() if c != category]
if term_scorer is None:
term_scorer = LogOddsRatioUninformativeDirichletPrior(alpha)
my_term_ranker = term_ranker(corpus)
if kwargs.get('use_non_text_features', False):
my_term_ranker.use_non_text_features()
term_freq_df = my_term_ranker.get_ranks() + 1
freqs = term_freq_df[[(c + ' freq') for c in [category] + not_categories]
].sum(axis=1).values
x_axis_values = [round_downer(10 ** x) for x in np.linspace(0, np.log(
freqs.max()) / np.log(10), 5)]
x_axis_values = [x for x in x_axis_values if x > 1 and x <= freqs.max()]
frequencies_log_scaled = frequency_transform(freqs)
if 'scores' not in kwargs:
kwargs['scores'] = get_term_scorer_scores(category, corpus, kwargs.
get('neutral_categories', False), not_categories, kwargs.get(
'show_neutral', False), term_ranker, term_scorer, kwargs.get(
'use_non_text_features', False))
def x(coords):
return ((coords - 0.5) / np.abs(coords - 0.5).max() + 1) / 2
def x(x):
if x == 0:
return 0
return round(x, -int(np.floor(np.log10(abs(x)))))
if y_axis_values is None:
max_score = np.floor(np.max(kwargs['scores']) * 100) / 100
min_score = np.ceil(np.min(kwargs['scores']) * 100) / 100
if min_score < 0 and max_score > 0:
central = 0
else:
central = 0.5
y_axis_values = [x for x in [min_score, central, max_score] if x >=
min_score and x <= max_score]
scores_scaled_for_charting = scale_neg_1_to_1_with_zero_mean_abs_max(kwargs
['scores'])
if use_term_significance:
kwargs['term_significance'] = term_scorer
kwargs['y_label'] = kwargs.get('y_label', term_scorer.get_name())
kwargs['color_func'] = kwargs.get('color_func',
"""(function(d) {
return (Math.abs(d.os) < %s)
? d3.interpolate(d3.rgb(230, 230, 230), d3.rgb(130, 130, 130))(Math.abs(d.os)/%s)
: d3.interpolateRdYlBu(d.y);
})"""
% (grey_threshold, grey_threshold))
return produce_scattertext_explorer(corpus, category=category,
category_name=category_name, not_category_name=not_category_name,
x_coords=frequencies_log_scaled, y_coords=
scores_scaled_for_charting, original_x=freqs, original_y=kwargs[
'scores'], x_axis_values=x_axis_values, y_axis_values=y_axis_values,
rescale_x=scale, rescale_y=y_axis_rescale, sort_by_dist=False,
term_ranker=term_ranker, not_categories=not_categories, x_label=
kwargs.get('x_label', 'Log Frequency'), **kwargs)
| produce_frequency_explorer | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(corpus, category, category_name=None, not_category_name=None,\n term_ranker=termranking.AbsoluteFrequencyRanker, alpha=0.01,\n use_term_significance=Fals... |
def x(semiotic_square, x_label, y_label, category_name=None,
not_category_name=None, neutral_category_name=None,
num_terms_semiotic_square=None, get_tooltip_content=None, x_axis_values
=None, y_axis_values=None, color_func=None, axis_scaler=
scale_neg_1_to_1_with_zero_mean, **kwargs):
if category_name is None:
category_name = semiotic_square.category_a_
if not_category_name is None:
not_category_name = semiotic_square.category_b_
if get_tooltip_content is None:
get_tooltip_content = (
'(function(d) {return d.term + "<br/>%s: " + Math.round(d.ox*1000)/1000+"<br/>%s: " + Math.round(d.oy*1000)/1000})'
% (x_label, y_label))
if color_func is None:
color_func = '(function(d) {return d3.interpolateRdYlBu(d.x)})'
"""
my_scaler = scale_neg_1_to_1_with_zero_mean_abs_max
if foveate:
my_scaler = scale_neg_1_to_1_with_zero_mean_rank_abs_max
"""
axes = semiotic_square.get_axes()
return produce_scattertext_explorer(semiotic_square.term_doc_matrix_,
category=semiotic_square.category_a_, category_name=category_name,
not_category_name=not_category_name, not_categories=[
semiotic_square.category_b_], scores=-axes['x'], sort_by_dist=False,
x_coords=axis_scaler(-axes['x']), y_coords=axis_scaler(axes['y']),
original_x=-axes['x'], original_y=axes['y'], show_characteristic=
False, show_top_terms=False, x_label=x_label, y_label=y_label,
semiotic_square=semiotic_square, neutral_categories=semiotic_square
.neutral_categories_, show_neutral=True, neutral_category_name=
neutral_category_name, num_terms_semiotic_square=
num_terms_semiotic_square, get_tooltip_content=get_tooltip_content,
x_axis_values=x_axis_values, y_axis_values=y_axis_values,
color_func=color_func, show_axes=False, **kwargs)
| produce_semiotic_square_explorer | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(semiotic_square, x_label, y_label, category_name=None,\n not_category_name=None, neutral_category_name=None,\n num_terms_semiotic_square=None, get_toolti... |
def x(four_square, x_label=None, y_label=None, a_category_name=None,
b_category_name=None, not_a_category_name=None, not_b_category_name=
None, num_terms_semiotic_square=None, get_tooltip_content=None,
x_axis_values=None, y_axis_values=None, color_func=None, axis_scaler=
scale_neg_1_to_1_with_zero_mean, **kwargs):
if a_category_name is None:
a_category_name = four_square.get_labels()['a_label']
if a_category_name is None or a_category_name == '':
a_category_name = four_square.category_a_list_[0]
if b_category_name is None:
b_category_name = four_square.get_labels()['b_label']
if b_category_name is None or b_category_name == '':
b_category_name = four_square.category_b_list_[0]
if not_a_category_name is None:
not_a_category_name = four_square.get_labels()['not_a_label']
if not_a_category_name is None or not_a_category_name == '':
not_a_category_name = four_square.not_category_a_list_[0]
if not_b_category_name is None:
not_b_category_name = four_square.get_labels()['not_b_label']
if not_b_category_name is None or not_b_category_name == '':
not_b_category_name = four_square.not_category_b_list_[0]
if x_label is None:
x_label = a_category_name + '-' + b_category_name
if y_label is None:
y_label = not_a_category_name + '-' + not_b_category_name
if get_tooltip_content is None:
get_tooltip_content = (
'(function(d) {return d.term + "<br/>%s: " + Math.round(d.ox*1000)/1000+"<br/>%s: " + Math.round(d.oy*1000)/1000})'
% (x_label, y_label))
if color_func is None:
color_func = '(function(d) {return d3.interpolateRdYlBu(d.x)})'
"""
my_scaler = scale_neg_1_to_1_with_zero_mean_abs_max
if foveate:
my_scaler = scale_neg_1_to_1_with_zero_mean_rank_abs_max
"""
axes = four_square.get_axes()
if 'scores' not in kwargs:
kwargs['scores'] = -axes['x']
return produce_scattertext_explorer(four_square.term_doc_matrix_,
category=list(set(four_square.category_a_list_) - set(four_square.
category_b_list_))[0], category_name=a_category_name,
not_category_name=b_category_name, not_categories=four_square.
category_b_list_, neutral_categories=four_square.
not_category_a_list_, extra_categories=four_square.
not_category_b_list_, sort_by_dist=False, x_coords=axis_scaler(-
axes['x']), y_coords=axis_scaler(axes['y']), original_x=-axes['x'],
original_y=axes['y'], show_characteristic=False, show_top_terms=
False, x_label=x_label, y_label=y_label, semiotic_square=
four_square, show_neutral=True, neutral_category_name=
not_a_category_name, show_extra=True, extra_category_name=
not_b_category_name, num_terms_semiotic_square=
num_terms_semiotic_square, get_tooltip_content=get_tooltip_content,
x_axis_values=x_axis_values, y_axis_values=y_axis_values,
color_func=color_func, show_axes=False, **kwargs)
| produce_four_square_explorer | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(four_square, x_label=None, y_label=None, a_category_name=None,\n b_category_name=None, not_a_category_name=None, not_b_category_name=\n None, num_terms_s... |
def x(four_square_axes, x_label=None, y_label=None,
num_terms_semiotic_square=None, get_tooltip_content=None, x_axis_values
=None, y_axis_values=None, color_func=None, axis_scaler=
scale_neg_1_to_1_with_zero_mean, **kwargs):
if x_label is None:
x_label = (four_square_axes.left_category_name_ + '-' +
four_square_axes.right_category_name_)
if y_label is None:
y_label = (four_square_axes.top_category_name_ + '-' +
four_square_axes.bottom_category_name_)
if get_tooltip_content is None:
get_tooltip_content = (
'(function(d) {return d.term + "<br/>%s: " + Math.round(d.ox*1000)/1000+"<br/>%s: " + Math.round(d.oy*1000)/1000})'
% (x_label, y_label))
if color_func is None:
color_func = '(function(d) {return d3.interpolateRdYlBu(d.x)})'
axes = four_square_axes.get_axes()
if 'scores' not in kwargs:
kwargs['scores'] = -axes['x']
"""
my_scaler = scale_neg_1_to_1_with_zero_mean_abs_max
if foveate:
my_scaler = scale_neg_1_to_1_with_zero_mean_rank_abs_max
"""
return produce_scattertext_explorer(four_square_axes.term_doc_matrix_,
category=four_square_axes.left_categories_[0], category_name=
four_square_axes.left_category_name_, not_categories=
four_square_axes.right_categories_, not_category_name=
four_square_axes.right_category_name_, neutral_categories=
four_square_axes.top_categories_, neutral_category_name=
four_square_axes.top_category_name_, extra_categories=
four_square_axes.bottom_categories_, extra_category_name=
four_square_axes.bottom_category_name_, sort_by_dist=False,
x_coords=axis_scaler(-axes['x']), y_coords=axis_scaler(axes['y']),
original_x=-axes['x'], original_y=axes['y'], show_characteristic=
False, show_top_terms=False, x_label=x_label, y_label=y_label,
semiotic_square=four_square_axes, show_neutral=True, show_extra=
True, num_terms_semiotic_square=num_terms_semiotic_square,
get_tooltip_content=get_tooltip_content, x_axis_values=
x_axis_values, y_axis_values=y_axis_values, color_func=color_func,
show_axes=False, **kwargs)
| produce_four_square_axes_explorer | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(four_square_axes, x_label=None, y_label=None,\n num_terms_semiotic_square=None, get_tooltip_content=None, x_axis_values\n =None, y_axis_values=None, colo... |
def x(corpus, category, word2vec_model=None, projection_model=None,
embeddings=None, term_acceptance_re=re.compile('[a-z]{3,}'), show_axes=
False, **kwargs):
embeddings_resolover = EmbeddingsResolver(corpus)
if embeddings is not None:
embeddings_resolover.set_embeddings(embeddings)
else:
embeddings_resolover.set_embeddings_model(word2vec_model,
term_acceptance_re)
corpus, word_axes = embeddings_resolover.project_embeddings(
projection_model, x_dim=0, y_dim=1)
html = produce_scattertext_explorer(corpus=corpus, category=category,
minimum_term_frequency=0, sort_by_dist=False, x_coords=scale(
word_axes['x']), y_coords=scale(word_axes['y']), y_label='',
x_label='', show_axes=show_axes, **kwargs)
return html
| produce_projection_explorer | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(corpus, category, word2vec_model=None, projection_model=None,\n embeddings=None, term_acceptance_re=re.compile('[a-z]{3,}'), show_axes=\n False, **kwargs... |
def x(corpus, category, word2vec_model=None, projection_model=None,
embeddings=None, projection=None, term_acceptance_re=re.compile(
'[a-z]{3,}'), x_dim=0, y_dim=1, scaler=scale, show_axes=False,
show_dimensions_on_tooltip=True, **kwargs):
if projection is None:
embeddings_resolover = EmbeddingsResolver(corpus)
if embeddings is not None:
embeddings_resolover.set_embeddings(embeddings)
else:
embeddings_resolover.set_embeddings_model(word2vec_model,
term_acceptance_re)
corpus, projection = embeddings_resolover.project_embeddings(
projection_model, x_dim=x_dim, y_dim=y_dim)
else:
assert type(projection) == pd.DataFrame
assert 'x' in projection and 'y' in projection
if kwargs.get('use_non_text_features', False):
assert set(projection.index) == set(corpus.get_metadata())
else:
assert set(projection.index) == set(corpus.get_terms())
if show_dimensions_on_tooltip:
kwargs['get_tooltip_content'] = (
"""(function(d) {
return d.term + "<br/>Dim %s: " + Math.round(d.ox*1000)/1000 + "<br/>Dim %s: " + Math.round(d.oy*1000)/1000
})"""
% (x_dim, y_dim))
html = produce_scattertext_explorer(corpus=corpus, category=category,
minimum_term_frequency=0, sort_by_dist=False, original_x=projection
['x'], original_y=projection['y'], x_coords=scaler(projection['x']),
y_coords=scaler(projection['y']), y_label='', x_label='', show_axes
=show_axes, horizontal_line_y_position=0, vertical_line_x_position=
0, **kwargs)
return html
| produce_pca_explorer | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(corpus, category, word2vec_model=None, projection_model=None,\n embeddings=None, projection=None, term_acceptance_re=re.compile(\n '[a-z]{3,}'), x_dim=0,... |
def x(corpus, category, category_name=None, not_category_name=None,
not_categories=None, characteristic_scorer=DenseRankCharacteristicness(
), term_ranker=termranking.AbsoluteFrequencyRanker, term_scorer=
RankDifference(), **kwargs):
if not_categories is None:
not_categories = [c for c in corpus.get_categories() if c != category]
category_name, not_category_name = get_category_names(category,
category_name, not_categories, not_category_name)
zero_point, characteristic_scores = characteristic_scorer.get_scores(corpus
)
corpus = corpus.remove_terms(set(corpus.get_terms()) - set(
characteristic_scores.index))
characteristic_scores = characteristic_scores.loc[corpus.get_terms()]
term_freq_df = term_ranker(corpus).get_ranks()
scores = term_scorer.get_scores(term_freq_df[category + ' freq'],
term_freq_df[[(c + ' freq') for c in not_categories]].sum(axis=1))
kwargs['scores'] = kwargs.get('scores', scores)
max_score = np.floor(np.max(kwargs['scores']) * 100) / 100
min_score = np.ceil(np.min(kwargs['scores']) * 100) / 100
if min_score < 0 and max_score > 0:
central = 0
else:
central = 0.5
scores_scaled_for_charting = scale_neg_1_to_1_with_zero_mean_abs_max(kwargs
['scores'])
html = produce_scattertext_explorer(corpus=corpus, category=category,
category_name=category_name, not_category_name=not_category_name,
not_categories=not_categories, minimum_term_frequency=0,
sort_by_dist=False, x_coords=characteristic_scores, y_coords=
scores_scaled_for_charting, y_axis_labels=kwargs.get(
'y_axis_labels', ['More ' + not_category_name, 'Even', 'More ' +
category_name]), x_label=kwargs.get('x_label',
'Characteristic to Corpus'), y_label=kwargs.get('y_label',
term_scorer.get_name()), vertical_lines=kwargs.get('vertical_lines',
[]), characteristic_scorer=kwargs.get('characteristic_scorer',
characteristic_scorer), **kwargs)
return html
| produce_characteristic_explorer | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(corpus, category, category_name=None, not_category_name=None,\n not_categories=None, characteristic_scorer=DenseRankCharacteristicness(\n ), term_ranker=... |
def x(corpus, category, scores, category_name=None, not_category_name=None,
**kwargs):
return produce_scattertext_explorer(corpus, category, category_name,
not_category_name, scores=scores, sort_by_dist=False,
gray_zero_scores=True, **kwargs)
| sparse_explorer | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(corpus, category, scores, category_name=None, not_category_name=None,\n **kwargs):\n return produce_scattertext_explorer(corpus, category, category_name,... |
def x(self, term_doc_matrix):
tdf = self.term_ranker(term_doc_matrix).get_ranks()
tdf_sum = tdf.sum(axis=0)
tdf_portions = tdf / tdf_sum
threshold = np.max(self.term_count / tdf_sum)
terms_to_remove = tdf_portions[~(tdf_portions > threshold).any(axis=1)
].index
return term_doc_matrix.remove_terms(terms_to_remove)
| compact | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, term_doc_matrix):\n tdf = self.term_ranker(term_doc_matrix).get_ranks()\n tdf_sum = tdf.sum(axis=0)\n tdf_portions = tdf / tdf_sum\n threshol... |
def x(values):
idxstore = IndexStore()
idxstore._i2val = list(values)
idxstore._val2i = {term: i for i, term in enumerate(values)}
idxstore._next_i = len(values)
return idxstore
| build | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(values):\n idxstore = IndexStore()\n idxstore._i2val = list(values)\n idxstore._val2i = {term: i for i, term in enumerate(values)}\n idxstore._next... |
def x(self, metadata=None):
to_ret = {'categories': self._corpus.get_categories(), 'labels': [],
'texts': []}
labels = self._corpus._y.astype(int)
texts = self._get_texts_to_display()
if self._use_non_text_features:
to_ret['extra'] = []
extrafeats = self._corpus.list_extra_features()
if metadata is not None:
to_ret['meta'] = []
for label_i in range(len(self._corpus._category_idx_store)):
label_indices = np.arange(0, len(labels))[labels == label_i]
if self.max_per_category < len(label_indices):
label_indices = np.random.choice(label_indices, self.
max_per_category, replace=False)
to_ret['labels'] += list([int(e) for e in labels[label_indices]])
to_ret['texts'] += list(texts[label_indices])
if metadata is not None:
to_ret['meta'] += [metadata[i] for i in label_indices]
if self._use_non_text_features:
to_ret['extra'] += [extrafeats[i] for i in label_indices]
return to_ret
| get_labels_and_texts | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, metadata=None):\n to_ret = {'categories': self._corpus.get_categories(), 'labels': [],\n 'texts': []}\n labels = self._corpus._y.astype(int)... |
def x(cat_scores, not_cat_scores):
scores = np.zeros(len(cat_scores)).astype(np.float)
scores[cat_scores > not_cat_scores] = cat_scores[cat_scores >
not_cat_scores]
scores[cat_scores < not_cat_scores] = -not_cat_scores[cat_scores <
not_cat_scores]
return scores
| balance_scores_and_dont_scale | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(cat_scores, not_cat_scores):\n scores = np.zeros(len(cat_scores)).astype(np.float)\n scores[cat_scores > not_cat_scores] = cat_scores[cat_scores >\n ... |
def x(self, cat_word_counts, not_cat_word_counts):
cat_scores = self.get_scores_for_category(cat_word_counts,
not_cat_word_counts)
not_cat_scores = self.get_scores_for_category(not_cat_word_counts,
cat_word_counts)
if self.use_score_difference_:
scores = (cat_scores - not_cat_scores + 1.0) / 2.0
else:
scores = ScoreBalancer.balance_scores(cat_scores, not_cat_scores)
if self.one_to_neg_one_:
return 2 * scores - 1
else:
return scores
| get_scores | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, cat_word_counts, not_cat_word_counts):\n cat_scores = self.get_scores_for_category(cat_word_counts,\n not_cat_word_counts)\n not_cat_scores ... |
def x(self, cat_word_counts, not_cat_word_counts):
beta = self.beta_
assert len(cat_word_counts) == len(not_cat_word_counts)
if self.priors_ is not None:
p = self.priors_
assert len(p) == len(cat_word_counts)
precision = (cat_word_counts + p * 1.0) / (cat_word_counts +
not_cat_word_counts + 2 * p)
recall = (cat_word_counts + p) * 1.0 / (cat_word_counts.sum() + p.sum()
)
else:
precision = cat_word_counts * 1.0 / (cat_word_counts +
not_cat_word_counts)
recall = cat_word_counts * 1.0 / cat_word_counts.sum()
precision_normcdf = ScaledFScore._safe_scaler(self.scaler_algo_, precision)
recall_normcdf = ScaledFScore._safe_scaler(self.scaler_algo_, recall)
scores = self._weighted_h_mean(precision_normcdf, recall_normcdf)
scores[np.isnan(scores)] = 0.0
return scores
| get_scores_for_category | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, cat_word_counts, not_cat_word_counts):\n beta = self.beta_\n assert len(cat_word_counts) == len(not_cat_word_counts)\n if self.priors_ is not No... |
def x(self, X):
z_scores = self.get_scores(X[:, 0], X[:, 1])
return norm.cdf(z_scores)
| get_p_vals | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, X):\n z_scores = self.get_scores(X[:, 0], X[:, 1])\n return norm.cdf(z_scores)\n",
"role": "user"
},
{
"content": "get_p_vals",
"ro... |
def x(cat_word_counts, not_cat_word_counts, scaler_algo=DEFAULT_SCALER_ALGO,
beta=DEFAULT_BETA):
cat_scores = ScaledFScore.get_scores_for_category(cat_word_counts,
not_cat_word_counts, scaler_algo, beta)
not_cat_scores = ScaledFScore.get_scores_for_category(not_cat_word_counts,
cat_word_counts, scaler_algo, beta)
return ScoreBalancer.balance_scores(cat_scores, not_cat_scores)
| get_scores | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(cat_word_counts, not_cat_word_counts, scaler_algo=DEFAULT_SCALER_ALGO,\n beta=DEFAULT_BETA):\n cat_scores = ScaledFScore.get_scores_for_category(cat_word... |
def x(cat_word_counts, not_cat_word_counts, scaler_algo=DEFAULT_SCALER_ALGO,
beta=DEFAULT_BETA):
assert beta > 0
precision = cat_word_counts * 1.0 / (cat_word_counts + not_cat_word_counts)
recall = cat_word_counts * 1.0 / cat_word_counts.sum()
precision_normcdf = ScaledFScore._safe_scaler(scaler_algo, precision)
recall_normcdf = ScaledFScore._safe_scaler(scaler_algo, recall)
scores = (1 + beta ** 2) * (precision_normcdf * recall_normcdf) / (beta **
2 * precision_normcdf + recall_normcdf)
scores[np.isnan(scores)] = 0.0
return scores
| get_scores_for_category | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(cat_word_counts, not_cat_word_counts, scaler_algo=DEFAULT_SCALER_ALGO,\n beta=DEFAULT_BETA):\n assert beta > 0\n precision = cat_word_counts * 1.0 / (... |
def x(text):
found_emojis = []
len_text = len(text)
i = 0
while i < len_text:
cur_char = ord(text[i])
try:
VALID_EMOJIS[cur_char]
except:
i += 1
continue
found = False
for dict_len, candidates in VALID_EMOJIS[cur_char]:
if i + dict_len <= len_text:
if dict_len == 0:
_append_if_valid(found_emojis, text[i])
i += 1
found = True
break
candidate = tuple(ord(c) for c in text[i + 1:i + 1 + dict_len])
if candidate in candidates:
_append_if_valid(found_emojis, text[i:i + 1 + dict_len])
i += 1 + dict_len
found = True
break
if found:
break
if not found:
_append_if_valid(found_emojis, text[i])
i += 1
return found_emojis
| extract_emoji | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(text):\n found_emojis = []\n len_text = len(text)\n i = 0\n while i < len_text:\n cur_char = ord(text[i])\n try:\n VALID_E... |
def x(self):
task_df = self.get_task_df()
import altair as alt
chart = alt.Chart(task_df).mark_bar().encode(x='start', x2='end', y='term')
return chart
| make_chart | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n task_df = self.get_task_df()\n import altair as alt\n chart = alt.Chart(task_df).mark_bar().encode(x='start', x2='end', y='term')\n return... |
def x(self):
scoredf = {}
tdf = self.term_ranker(self.corpus).get_ranks()
for cat in sorted(self.corpus.get_categories()):
if cat >= self.starting_time_step:
negative_categories = self._get_negative_categories(cat, tdf)
scores = self.term_scorer.get_scores(tdf[cat + ' freq'].astype(
int), tdf[negative_categories].sum(axis=1))
scoredf[cat + ' score'] = scores
scoredf[cat + ' freq'] = tdf[cat + ' freq'].astype(int)
return pd.DataFrame(scoredf)
| get_temporal_score_df | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n scoredf = {}\n tdf = self.term_ranker(self.corpus).get_ranks()\n for cat in sorted(self.corpus.get_categories()):\n if cat >= self.sta... |
def x(self):
term_time_df = self._get_term_time_df()
terms_to_include = term_time_df.groupby('term')['top'].sum().sort_values(
ascending=False).iloc[:self.num_terms_to_include].index
task_df = term_time_df[term_time_df.term.isin(terms_to_include)][[
'time', 'term']].groupby('term').apply(lambda x: pd.Series(self.
_find_sequences(x['time']))).reset_index().rename({(0): 'sequence'},
axis=1).reset_index().assign(start=lambda x: x['sequence'].apply(lambda
x: x[0])).assign(end=lambda x: x['sequence'].apply(lambda x: x[1]))[[
'term', 'start', 'end']]
return task_df
| get_task_df | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n term_time_df = self._get_term_time_df()\n terms_to_include = term_time_df.groupby('term')['top'].sum().sort_values(\n ascending=False).il... |
def x(self, ngram):
mask = self._document_index_mask(ngram)
return self._df[mask]
| search | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, ngram):\n mask = self._document_index_mask(ngram)\n return self._df[mask]\n",
"role": "user"
},
{
"content": "search",
"role": "ass... |
def x(df, positive_category, term_significance):
df_pos = df[[positive_category]]
df_pos.columns = ['pos']
df_neg = pd.DataFrame(df[[c for c in df.columns if c !=
positive_category and c.endswith(' freq')]].sum(axis=1))
df_neg.columns = ['neg']
X = df_pos.join(df_neg)[['pos', 'neg']].values
return term_significance.get_p_vals(X)
| get_p_vals | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(df, positive_category, term_significance):\n df_pos = df[[positive_category]]\n df_pos.columns = ['pos']\n df_neg = pd.DataFrame(df[[c for c in df.col... |
def x(self):
if self._use_non_text_features:
return self._term_doc_matrix._mX
else:
return self._term_doc_matrix._X
| get_X | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if self._use_non_text_features:\n return self._term_doc_matrix._mX\n else:\n return self._term_doc_matrix._X\n",
"role": "user... |
def x(v):
df = pd.DataFrame({'v': v, 'sign': (v > 0) * 2 - 1})
df['lg'] = np.log(np.abs(v)) / np.log(1.96)
df['exclude'] = np.isinf(df.lg) | np.isneginf(df.lg)
for mask in [(df['sign'] == -1) & (df['exclude'] == False), (df['sign'] ==
1) & (df['exclude'] == False)]:
df[mask]['lg'] = df[mask]['lg'].max() - df[mask]['lg']
df['lg'] *= df['sign']
df['lg'] = df['lg'].fillna(0)
print(df[df['exclude']]['lg'].values)
df['to_out'] = scale_neg_1_to_1_with_zero_mean_abs_max(df['lg'])
print('right')
print(df.sort_values(by='lg').iloc[:5])
print(df.sort_values(by='lg').iloc[-5:])
print('to_out')
print(df.sort_values(by='to_out').iloc[:5])
print(df.sort_values(by='to_out').iloc[-5:])
print(len(df), len(df.dropna()))
return df['to_out']
| scale_neg_1_to_1_with_zero_mean_log_abs_max | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(v):\n df = pd.DataFrame({'v': v, 'sign': (v > 0) * 2 - 1})\n df['lg'] = np.log(np.abs(v)) / np.log(1.96)\n df['exclude'] = np.isinf(df.lg) | np.isnegi... |
def x(self):
(X_factory, mX_factory, category_idx_store, term_idx_store,
metadata_idx_store, y) = self._init_term_doc_matrix_variables()
parse_pipeline = ParsePipelineFactory(self.get_nlp(), X_factory,
mX_factory, category_idx_store, term_idx_store, metadata_idx_store,
y, self)
df = self._clean_and_filter_nulls_and_empties_from_dataframe()
tdm = self._apply_pipeline_and_get_build_instance(X_factory, mX_factory,
category_idx_store, df, parse_pipeline, term_idx_store,
metadata_idx_store, y)
return tdm
| build | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n (X_factory, mX_factory, category_idx_store, term_idx_store,\n metadata_idx_store, y) = self._init_term_doc_matrix_variables()\n parse_pip... |
def x(self):
X_factory = CSRMatrixFactory()
mX_factory = CSRMatrixFactory()
term_idx_store = IndexStore()
metadata_idx_store = IndexStore()
parse_pipeline = ParsePipelineFactoryWithoutCategories(self.get_nlp(),
X_factory, mX_factory, term_idx_store, metadata_idx_store, self)
df = self._clean_and_filter_nulls_and_empties_from_dataframe()
tdm = self._apply_pipeline_and_get_build_instance(X_factory, mX_factory,
df, parse_pipeline, term_idx_store, metadata_idx_store)
return tdm
| build | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n X_factory = CSRMatrixFactory()\n mX_factory = CSRMatrixFactory()\n term_idx_store = IndexStore()\n metadata_idx_store = IndexStore()\n ... |
def x(corpus):
assert isinstance(corpus, ParsedCorpus)
return itertools.chain(*[[[corpus._term_idx_store.getidxstrict(t.lower_
) for t in sent if not t.is_punct] for sent in doc.sents] for doc in
corpus.get_parsed_docs()])
| get_sentences | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(corpus):\n assert isinstance(corpus, ParsedCorpus)\n return itertools.chain(*[[[corpus._term_idx_store.getidxstrict(t.lower_\n ) for t in sent if ... |
def x(self, corpus):
term_ranks = self.term_ranker(corpus).get_ranks()
freq_df = pd.DataFrame({'corpus': term_ranks.sum(axis=1), 'standard':
self.background_frequencies.get_background_frequency_df()[
'background']}).dropna()
corpus_rank = rankdata(freq_df.corpus, 'dense')
standard_rank = rankdata(freq_df.standard, 'dense')
scores = corpus_rank / corpus_rank.max(
) - standard_rank / standard_rank.max()
if self.rerank_ranks:
rank_scores, zero_marker = self._rerank_scores(scores)
freq_df['score'] = pd.Series(rank_scores, index=freq_df.index)
else:
if scores.min() < 0 and scores.max() > 0:
zero_marker = -scores.min() / (scores.max() - scores.min())
elif scores.min() > 0:
zero_marker = 0
else:
zero_marker = 1
freq_df['score'] = scale(scores)
return zero_marker, freq_df.sort_values(by='score', ascending=False)[
'score']
| get_scores | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, corpus):\n term_ranks = self.term_ranker(corpus).get_ranks()\n freq_df = pd.DataFrame({'corpus': term_ranks.sum(axis=1), 'standard':\n self.... |
def x(self):
javascript_to_insert = '\n'.join([PackedDataUtils.
full_content_of_javascript_files(), self.
category_scatterplot_structure._visualization_data.to_javascript(
'getCategoryDataAndInfo'), self.category_scatterplot_structure.
get_js_to_call_build_scatterplot(self.category_plot_interface),
self.term_scatterplot_structure._visualization_data.to_javascript(
'getTermDataAndInfo'), self.term_scatterplot_structure.
get_js_to_call_build_scatterplot(self.term_plot_interface)])
html_template = self._get_html_template()
html_content = html_template.replace('<!-- INSERT SCRIPT -->',
javascript_to_insert, 1).replace('<!--D3URL-->', self.d3_url_struct
.get_d3_url(), 1).replace('<!--D3SCALECHROMATIC-->', self.
d3_url_struct.get_d3_scale_chromatic_url())
html_content = html_content.replace('http://', self.protocol + '://')
if self.show_halo:
axes_labels = self.category_projection.get_nearest_terms(num_terms=
self.num_terms)
for position, terms in axes_labels.items():
html_content = html_content.replace('{%s}' % position, self.
_get_lexicon_html(terms))
return html_content.replace('{width}', str(self.category_width)).replace(
'{height}', str(self.category_height))
| to_html | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n javascript_to_insert = '\\n'.join([PackedDataUtils.\n full_content_of_javascript_files(), self.\n category_scatterplot_structure._vis... |
def x(self):
X = self.corpus_.get_term_doc_mat()
categories = pd.Series(self.corpus_.get_category_ids())
cat_ar = np.array(self.corpus_.get_categories())
cat_idx_sort = np.argsort(cat_ar)
if self.seasonality_column_:
print('YES')
seasonality_ar = np.array(self.corpus_.get_df()[self.
seasonality_column_])
terms = self.corpus_.get_terms()
category_idx_store = self.corpus_.get_category_index_store()
data = {}
seasondata = {}
for i, cati in enumerate(cat_idx_sort):
cat = cat_ar[cati]
if cat >= self.start_category_ and i > self.timesteps_to_lag_:
neg_cats = self.sorted_categores_[i - self.timesteps_to_lag_:i]
neg_mask = categories.isin(category_idx_store.getidxstrictbatch
(neg_cats)).values
scores = self._regress_terms(X, cat, categories,
category_idx_store, neg_mask, terms)
data[cat] = scores
if self.seasonality_column_:
neg_cats = set(categories[(seasonality_ar == seasonality_ar
[cati]) & (categories != categories[cati])])
neg_mask = categories.isin(neg_cats).values
scores = self._regress_terms(X, cat, categories,
category_idx_store, neg_mask, terms)
seasondata[cat] = scores
coefs = pd.DataFrame(data)
pos_coefs = coefs.apply(lambda x: (x > 0) * x, axis=1).sum(axis=1
).sort_values(ascending=False)
term_cat_counts = self.corpus_.get_term_freq_df('')[coefs.columns]
def x(x):
return pd.Series(x / x.max(), index=x.index)
rank_df = pd.DataFrame({'coefr': dense_percentile(pos_coefs), 'freqr':
dense_percentile(term_cat_counts.max(axis=1)), 'coef': pos_coefs,
'freq': term_cat_counts.max(axis=1)})
if self.seasonality_column_:
seasoncoefs = pd.DataFrame(seasondata).sum(axis=1)
rank_df['seasoncoefr'] = dense_percentile(seasoncoefs.sort_values(
ascending=False) + np.abs(seasoncoefs.min()))
weights = [2, 1, 1]
vals = ['freqr', 'coefr', 'seasoncoefr']
def x(x):
if min(x[vals]) == 0:
return 0
return sum(weights) * 1.0 / sum([(weights[i] / x[val]) for i,
val in enumerate(vals)])
rank_df['hmean'] = rank_df.apply(gethmean, axis=1)
else:
beta = 0.5
rank_df['hmean'] = rank_df.apply(lambda x: 0 if min(x) == 0 else (1 +
beta ** 2) * (x.coefr * x.freqr) / (beta ** 2 * x.coefr + x.
freqr), axis=1)
rank_df = rank_df.sort_values(by='hmean', ascending=False)
display_df = pd.merge(term_cat_counts.loc[rank_df.iloc[:self.num_terms_
].index].reset_index().melt(id_vars=['index']).rename(columns={
'index': 'term', 'value': 'frequency'}), coefs.loc[rank_df.iloc[:
self.num_terms_].index].reset_index().melt(id_vars=['index']).
rename(columns={'index': 'term', 'value': 'trending'}), on=['term',
'variable'])
display_df[display_df['frequency'] == 0] = np.nan
display_df = display_df.dropna()
return display_df[display_df.term.isin(rank_df.index)]
| get_display_dataframe | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n X = self.corpus_.get_term_doc_mat()\n categories = pd.Series(self.corpus_.get_category_ids())\n cat_ar = np.array(self.corpus_.get_categories... |
def x(self, doc):
ngram_counter = Counter()
for sent in doc.sents:
ngram_counter += _phrase_counts(sent)
return ngram_counter
| get_feats | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, doc):\n ngram_counter = Counter()\n for sent in doc.sents:\n ngram_counter += _phrase_counts(sent)\n return ngram_counter\n",
"role":... |
def x(self, doc):
ngram_counter = Counter()
for sent in doc.sents:
unigrams = self._get_unigram_feats(sent)
ngram_counter += Counter(unigrams) + _phrase_counts(sent)
return ngram_counter
| get_feats | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, doc):\n ngram_counter = Counter()\n for sent in doc.sents:\n unigrams = self._get_unigram_feats(sent)\n ngram_counter += Counter(unig... |
def x(self, protocol='http', d3_url=None, d3_scale_chromatic_url=None,
html_base=None):
d3_url_struct = D3URLs(d3_url, d3_scale_chromatic_url)
ExternalJSUtilts.ensure_valid_protocol(protocol)
javascript_to_insert = '\n'.join([PackedDataUtils.
full_content_of_javascript_files(), self.scatterplot_structure.
_visualization_data.to_javascript(), self.scatterplot_structure.
get_js_to_call_build_scatterplot()])
html_template = PackedDataUtils.full_content_of_default_html_template(
) if html_base is None else self._format_html_base(html_base)
html_content = html_template.replace('<!-- INSERT SCRIPT -->',
javascript_to_insert, 1).replace('<!--D3URL-->', d3_url_struct.
get_d3_url(), 1).replace('<!--D3SCALECHROMATIC-->', d3_url_struct.
get_d3_scale_chromatic_url())
"""
if html_base is not None:
html_file = html_file.replace('<!-- INSERT SEMIOTIC SQUARE -->',
html_base)
"""
extra_libs = ''
if self.scatterplot_structure._save_svg_button:
extra_libs = ''
html_content = html_content.replace('<!-- EXTRA LIBS -->', extra_libs, 1
).replace('http://', protocol + '://')
return html_content
| to_html | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, protocol='http', d3_url=None, d3_scale_chromatic_url=None,\n html_base=None):\n d3_url_struct = D3URLs(d3_url, d3_scale_chromatic_url)\n Externa... |
def x(self, ngram):
mask = self._document_index_mask(ngram)
return pd.DataFrame({'text': self.get_texts()[mask], 'category': [self.
_category_idx_store.getval(idx) for idx in self._y[mask]]})
| search | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, ngram):\n mask = self._document_index_mask(ngram)\n return pd.DataFrame({'text': self.get_texts()[mask], 'category': [self.\n _category_idx_... |
def x(self, label_append=' freq'):
"""
row = self._row_category_ids()
newX = csr_matrix((self._X.data, (row, self._X.indices)))
return self._term_freq_df_from_matrix(newX)
"""
mat = self.get_term_freq_mat()
return pd.DataFrame(mat, index=pd.Series(self.get_terms(), name='term'),
columns=[(c + label_append) for c in self.get_categories()])
| get_term_freq_df | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, label_append=' freq'):\n \"\"\"\n row = self._row_category_ids()\n newX = csr_matrix((self._X.data, (row, self._X.indices)))\n re... |
def x(self):
freq_mat = np.zeros(shape=(self.get_num_terms(), self.
get_num_categories()), dtype=int)
for cat_i in range(self.get_num_categories()):
freq_mat[:, cat_i] = self._X[self._y == cat_i, :].sum(axis=0)
return freq_mat
| get_term_freq_mat | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n freq_mat = np.zeros(shape=(self.get_num_terms(), self.\n get_num_categories()), dtype=int)\n for cat_i in range(self.get_num_categories()... |
def x(self):
freq_mat = np.zeros(shape=(self.get_num_terms(), self.
get_num_categories()), dtype=int)
for cat_i in range(self.get_num_categories()):
X = (self._X[self._y == cat_i, :] > 0).astype(int)
freq_mat[:, cat_i] = X.sum(axis=0)
return freq_mat
| get_term_count_mat | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n freq_mat = np.zeros(shape=(self.get_num_terms(), self.\n get_num_categories()), dtype=int)\n for cat_i in range(self.get_num_categories()... |
def x(self):
freq_mat = np.zeros(shape=(self.get_num_metadata(), self.
get_num_categories()), dtype=int)
for cat_i in range(self.get_num_categories()):
mX = (self._mX[self._y == cat_i, :] > 0).astype(int)
freq_mat[:, cat_i] = mX.sum(axis=0)
return freq_mat
| get_metadata_count_mat | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n freq_mat = np.zeros(shape=(self.get_num_metadata(), self.\n get_num_categories()), dtype=int)\n for cat_i in range(self.get_num_categorie... |
def x(self, label_append=' freq'):
row = self._row_category_ids_for_meta()
newX = csr_matrix((self._mX.data, (row, self._mX.indices)))
return self._metadata_freq_df_from_matrix(newX, label_append)
| get_metadata_freq_df | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, label_append=' freq'):\n row = self._row_category_ids_for_meta()\n newX = csr_matrix((self._mX.data, (row, self._mX.indices)))\n return self._me... |
def x(self, categories, ignore_absences=False):
if not ignore_absences:
assert set(self.get_categories()) & set(categories) == set(categories)
categories_to_remove = [c for c in self.get_categories() if c not in
categories]
return self.remove_categories(categories_to_remove)
| keep_only_these_categories | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, categories, ignore_absences=False):\n if not ignore_absences:\n assert set(self.get_categories()) & set(categories) == set(categories)\n cat... |
def x(self, categories, ignore_absences=False):
idx_to_delete_list = []
existing_categories = set(self.get_categories())
for category in categories:
if category not in existing_categories:
if not ignore_absences:
raise KeyError('Category %s not found' % category)
continue
idx_to_delete_list.append(self._category_idx_store.getidx(category))
new_category_idx_store = self._category_idx_store.batch_delete_idx(
idx_to_delete_list)
columns_to_delete = np.nonzero(np.isin(self._y, idx_to_delete_list))
new_X = delete_columns(self._X.T, columns_to_delete).T
new_mX = delete_columns(self._mX.T, columns_to_delete).T
intermediate_y = self._y[~np.isin(self._y, idx_to_delete_list)]
old_y_to_new_y = [self._category_idx_store.getidx(x) for x in
new_category_idx_store._i2val]
new_y = np.array([(old_y_to_new_y.index(i) if i in old_y_to_new_y else
None) for i in range(intermediate_y.max() + 1)])[intermediate_y]
new_metadata_idx_store = self._metadata_idx_store
if self.metadata_in_use():
meta_idx_to_delete = np.nonzero(new_mX.sum(axis=0).A1 == 0)[0]
new_metadata_idx_store = self._metadata_idx_store.batch_delete_idx(
meta_idx_to_delete)
term_idx_to_delete = np.nonzero(new_X.sum(axis=0).A1 == 0)[0]
new_term_idx_store = self._term_idx_store.batch_delete_idx(
term_idx_to_delete)
new_X = delete_columns(new_X, term_idx_to_delete)
term_doc_mat_to_ret = self._make_new_term_doc_matrix(new_X, new_mX,
new_y, new_term_idx_store, new_category_idx_store,
new_metadata_idx_store, ~np.isin(self._y, idx_to_delete_list))
return term_doc_mat_to_ret
| remove_categories | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, categories, ignore_absences=False):\n idx_to_delete_list = []\n existing_categories = set(self.get_categories())\n for category in categories:\n... |
def x(self, idx_to_delete_list):
new_X, new_term_idx_store = self._get_X_after_delete_terms(
idx_to_delete_list)
return self._make_new_term_doc_matrix(new_X, self._mX, self._y,
new_term_idx_store, self._category_idx_store, self.
_metadata_idx_store, self._y == self._y)
| remove_terms_by_indices | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, idx_to_delete_list):\n new_X, new_term_idx_store = self._get_X_after_delete_terms(\n idx_to_delete_list)\n return self._make_new_term_doc_ma... |
def x(self, category):
category_percentiles = self._get_term_percentiles_in_category(category)
not_category_percentiles = self._get_term_percentiles_not_in_category(
category)
rudder_scores = self._get_rudder_scores_for_percentile_pair(
category_percentiles, not_category_percentiles)
return rudder_scores
| get_rudder_scores | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, category):\n category_percentiles = self._get_term_percentiles_in_category(category)\n not_category_percentiles = self._get_term_percentiles_not_in... |
def x(self, category, clf=RidgeClassifierCV()):
try:
from sklearn.cross_validation import cross_val_predict
except:
from sklearn.model_selection import cross_val_predict
y = self._get_mask_from_category(category)
X = TfidfTransformer().fit_transform(self._X)
clf.fit(X, y)
y_hat = cross_val_predict(clf, X, y)
acc, baseline = self._get_accuracy_and_baseline_accuracy(y, y_hat)
return clf.coef_[0], acc, baseline
| get_logistic_regression_coefs_l2 | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, category, clf=RidgeClassifierCV()):\n try:\n from sklearn.cross_validation import cross_val_predict\n except:\n from sklearn.model_se... |
def x(self, category, clf=LassoCV(alphas=[0.1, 0.001], max_iter=10000,
n_jobs=-1)):
try:
from sklearn.cross_validation import cross_val_predict
except:
from sklearn.model_selection import cross_val_predict
y = self._get_mask_from_category(category)
y_continuous = self._get_continuous_version_boolean_y(y)
X = self._X
clf.fit(X, y_continuous)
y_hat = cross_val_predict(clf, X, y_continuous) > 0
acc, baseline = self._get_accuracy_and_baseline_accuracy(y, y_hat)
clf.fit(X, y_continuous)
return clf.coef_, acc, baseline
| get_logistic_regression_coefs_l1 | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, category, clf=LassoCV(alphas=[0.1, 0.001], max_iter=10000,\n n_jobs=-1)):\n try:\n from sklearn.cross_validation import cross_val_predict\n ... |
def x(self, category, clf=ElasticNet()):
self._fit_tfidf_model(category, clf)
return clf.coef_
| get_regression_coefs | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, category, clf=ElasticNet()):\n self._fit_tfidf_model(category, clf)\n return clf.coef_\n",
"role": "user"
},
{
"content": "get_regressi... |
def x(self, category, clf=LogisticRegression()):
self._fit_tfidf_model(category, clf)
return clf.coef_[0]
| get_logreg_coefs | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, category, clf=LogisticRegression()):\n self._fit_tfidf_model(category, clf)\n return clf.coef_[0]\n",
"role": "user"
},
{
"content": "g... |
def x(self, category, scaler_algo=DEFAULT_SCALER_ALGO, beta=DEFAULT_BETA):
assert beta > 0
cat_word_counts, not_cat_word_counts = (self.
_get_catetgory_and_non_category_word_counts(category))
scores = self._get_scaled_f_score_from_counts(cat_word_counts,
not_cat_word_counts, scaler_algo, beta)
return np.array(scores)
| get_scaled_f_scores | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, category, scaler_algo=DEFAULT_SCALER_ALGO, beta=DEFAULT_BETA):\n assert beta > 0\n cat_word_counts, not_cat_word_counts = (self.\n _get_cate... |
def x(self, cat_word_counts, not_cat_word_counts, scaler_algo, beta=
DEFAULT_BETA):
return ScaledFScore.get_scores(cat_word_counts, not_cat_word_counts,
scaler_algo, beta=beta)
| get_scaled_f_score_from_counts | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, cat_word_counts, not_cat_word_counts, scaler_algo, beta=\n DEFAULT_BETA):\n return ScaledFScore.get_scores(cat_word_counts, not_cat_word_counts,\n ... |
def x(self):
df = self.get_term_and_background_counts()
odds_ratio, p_values = self._get_fisher_scores_from_counts(df['corpus'],
df['background'])
df['Odds ratio'] = odds_ratio
df['Bonferroni-corrected p-values'] = p_values * len(df)
df.sort_values(by=['Bonferroni-corrected p-values', 'Odds ratio'],
ascending=[True, False])
return df
| get_fisher_scores_vs_background | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n df = self.get_term_and_background_counts()\n odds_ratio, p_values = self._get_fisher_scores_from_counts(df['corpus'],\n df['background'])... |
def x(self):
df = self.get_term_and_background_counts()
df['Log Posterior Mean Ratio'
] = self._get_posterior_mean_ratio_from_counts(df['corpus'], df[
'background'])
return df.sort_values('Log Posterior Mean Ratio', ascending=False)
| get_posterior_mean_ratio_scores_vs_background | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n df = self.get_term_and_background_counts()\n df['Log Posterior Mean Ratio'\n ] = self._get_posterior_mean_ratio_from_counts(df['corpus'],... |
def x(self):
df = self.get_term_and_background_counts()
corpus_percentiles = self._get_percentiles_from_freqs(df['corpus'])
background_percentiles = self._get_percentiles_from_freqs(df['background'])
df['Rudder'] = self._get_rudder_scores_for_percentile_pair(
corpus_percentiles, background_percentiles)
df = df.sort_values(by='Rudder', ascending=True)
return df
| get_rudder_scores_vs_background | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n df = self.get_term_and_background_counts()\n corpus_percentiles = self._get_percentiles_from_freqs(df['corpus'])\n background_percentiles = s... |
def x(self, scatterchartdata):
term_ranker = scatterchartdata.term_ranker(self)
if scatterchartdata.use_non_text_features:
term_ranker.use_non_text_features()
return term_ranker.get_ranks()
| get_term_category_frequencies | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, scatterchartdata):\n term_ranker = scatterchartdata.term_ranker(self)\n if scatterchartdata.use_non_text_features:\n term_ranker.use_non_tex... |
def x(self, new_categories):
assert len(new_categories) == self.get_num_docs()
new_category_idx_store = IndexStoreFromList.build(set(new_categories))
new_y = np.array(new_category_idx_store.getidxstrictbatch(new_categories))
new_tdm = self._make_new_term_doc_matrix(self._X, self._mX, new_y, self
._term_idx_store, new_category_idx_store, self._metadata_idx_store,
new_y == new_y)
return new_tdm
| recategorize | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, new_categories):\n assert len(new_categories) == self.get_num_docs()\n new_category_idx_store = IndexStoreFromList.build(set(new_categories))\n ... |
def x(self):
new_metadata_factory = CSRMatrixFactory()
for i, category_idx in enumerate(self.get_category_ids()):
new_metadata_factory[i, category_idx] = 1
new_metadata = new_metadata_factory.get_csr_matrix()
new_tdm = self._make_new_term_doc_matrix(self._X, new_metadata, self._y,
self._term_idx_store, self._category_idx_store, copy(self.
_category_idx_store), self._y == self._y)
return new_tdm
| use_categories_as_metadata | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n new_metadata_factory = CSRMatrixFactory()\n for i, category_idx in enumerate(self.get_category_ids()):\n new_metadata_factory[i, category... |
def x(self):
new_metadata_factory = CSRMatrixFactory()
for i, category_idx in enumerate(self.get_category_ids()):
new_metadata_factory[i, category_idx] = 1
new_metadata = new_metadata_factory.get_csr_matrix()
new_tdm = self._make_new_term_doc_matrix(self._mX, new_metadata, self.
_y, self._metadata_idx_store, self._category_idx_store, copy(self.
_category_idx_store), self._y == self._y)
return new_tdm
| use_categories_as_metadata_and_replace_terms | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n new_metadata_factory = CSRMatrixFactory()\n for i, category_idx in enumerate(self.get_category_ids()):\n new_metadata_factory[i, category... |
def x():
try:
data_stream = pkgutil.get_data('scattertext',
'data/rotten_tomatoes_corpus.csv.bz2')
except:
url = ROTTEN_TOMATOES_DATA_URL
data_stream = urlopen(url).read()
return pd.read_csv(io.BytesIO(bz2.decompress(data_stream)))
| get_data | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x():\n try:\n data_stream = pkgutil.get_data('scattertext',\n 'data/rotten_tomatoes_corpus.csv.bz2')\n except:\n url = ROTTEN_TOMA... |
def x(self):
tdf = self._get_relevant_term_freq()
bg_df = self.term_doc_mat.get_term_and_background_counts()[['background']]
bg_df = pd.merge(tdf, bg_df, left_index=True, right_index=True, how='left'
).fillna(0.0)
self._store_priors_from_background_dataframe(bg_df)
return self
| use_general_term_frequencies | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n tdf = self._get_relevant_term_freq()\n bg_df = self.term_doc_mat.get_term_and_background_counts()[['background']]\n bg_df = pd.merge(tdf, bg_... |
def x(self, custom_term_frequencies):
self.priors += custom_term_frequencies.reindex(self.priors.index).fillna(0)
return self
| use_custom_term_frequencies | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, custom_term_frequencies):\n self.priors += custom_term_frequencies.reindex(self.priors.index).fillna(0)\n return self\n",
"role": "user"
},
... |
def x(self):
term_df = self.term_ranker.get_ranks()
self.priors += term_df.sum(axis=1).fillna(0.0)
return self
| use_all_categories | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n term_df = self.term_ranker.get_ranks()\n self.priors += term_df.sum(axis=1).fillna(0.0)\n return self\n",
"role": "user"
},
{
"co... |
def x(self):
term_df = self.term_ranker.get_ranks()
self.priors += term_df[[(c + ' freq') for c in self.
_get_neutral_categories()]].sum(axis=1)
return self
| use_neutral_categories | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n term_df = self.term_ranker.get_ranks()\n self.priors += term_df[[(c + ' freq') for c in self.\n _get_neutral_categories()]].sum(axis=1)\n... |
def x(self):
neutral_categories = self._get_neutral_categories()
self.term_doc_mat = self.term_doc_mat.remove_categories(neutral_categories)
self._reindex_priors()
return self
| drop_neutral_categories_from_corpus | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n neutral_categories = self._get_neutral_categories()\n self.term_doc_mat = self.term_doc_mat.remove_categories(neutral_categories)\n self._rei... |
def x(self):
self.term_doc_mat = self.term_doc_mat.remove_terms(set(self.
term_doc_mat.get_terms()) - set(self.priors.index))
self._reindex_priors()
return self
| drop_unused_terms | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n self.term_doc_mat = self.term_doc_mat.remove_terms(set(self.\n term_doc_mat.get_terms()) - set(self.priors.index))\n self._reindex_priors... |
def x(self):
self.term_doc_mat = self.term_doc_mat.remove_terms(self.priors[self.
priors == 0].index)
self._reindex_priors()
return self
| drop_zero_priors | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n self.term_doc_mat = self.term_doc_mat.remove_terms(self.priors[self.\n priors == 0].index)\n self._reindex_priors()\n return self\n",
... |
def x(self, target_term_doc_mat):
self.priors = self.priors[target_term_doc_mat.get_terms()].fillna(0)
return self
| align_to_target | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, target_term_doc_mat):\n self.priors = self.priors[target_term_doc_mat.get_terms()].fillna(0)\n return self\n",
"role": "user"
},
{
"con... |
def x(self):
priors = self.priors
priors[~np.isfinite(priors)] = 0
priors += self.starting_count
return priors
| get_priors | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n priors = self.priors\n priors[~np.isfinite(priors)] = 0\n priors += self.starting_count\n return priors\n",
"role": "user"
},
{
... |
def x(self, y_i, y_j):
yp_i = y_i + self.alpha_w
yp_j = y_j + self.alpha_w
np_i = np.sum(yp_i)
np_j = np.sum(yp_j)
delta_i_j = np.log(yp_i / (np_i - yp_i)) - np.log(yp_j / (np_j - yp_j))
var_delta_i_j = 1.0 / yp_i + 1.0 / (np_i - yp_i) + 1.0 / yp_j + 1.0 / (np_j
- yp_j)
zeta_i_j = delta_i_j / np.sqrt(var_delta_i_j)
return zeta_i_j
| get_zeta_i_j_given_separate_counts | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, y_i, y_j):\n yp_i = y_i + self.alpha_w\n yp_j = y_j + self.alpha_w\n np_i = np.sum(yp_i)\n np_j = np.sum(yp_j)\n delta_i_j = np.log(yp_i /... |
def x(self, X):
y_i, y_j = X.T[0], X.T[1]
return self.get_zeta_i_j_given_separate_counts(y_i, y_j)
| get_zeta_i_j | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, X):\n y_i, y_j = X.T[0], X.T[1]\n return self.get_zeta_i_j_given_separate_counts(y_i, y_j)\n",
"role": "user"
},
{
"content": "get_zeta... |
def x(self, y_i, y_j):
z_scores = self.get_zeta_i_j_given_separate_counts(y_i, y_j)
return z_scores
| get_scores | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, y_i, y_j):\n z_scores = self.get_zeta_i_j_given_separate_counts(y_i, y_j)\n return z_scores\n",
"role": "user"
},
{
"content": "get_sco... |
def x(self, term_doc_matrix):
count_df = self._get_statistics_dataframe(term_doc_matrix)
return term_doc_matrix.remove_terms(count_df[count_df['pmi'] < self.
minimum_pmi].index)
| compact | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, term_doc_matrix):\n count_df = self._get_statistics_dataframe(term_doc_matrix)\n return term_doc_matrix.remove_terms(count_df[count_df['pmi'] < sel... |
def x(term_dict):
if type(term_dict) != dict:
raise TypeError(
'Argument for term_dict must be a dict, keyed on strings, and contain a list of strings.'
)
for k, v in term_dict.items():
if type(v) != list:
raise TypeError('Values in term dict must only be lists.')
if sys.version_info[0] == 2:
if type(k) != str and type(k) != unicode:
raise TypeError(
'Keys in term dict must be of type str or unicode.')
for e in v:
if type(k) != str and type(k) != unicode:
raise TypeError(
'Values in term lists must be str or unicode.')
if sys.version_info[0] == 3:
if type(k) != str:
raise TypeError('Keys in term dict must be of type str.')
for e in v:
if type(e) != str:
raise TypeError('Values in term lists must be str.')
| check_topic_model_string_format | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(term_dict):\n if type(term_dict) != dict:\n raise TypeError(\n 'Argument for term_dict must be a dict, keyed on strings, and contain a lis... |
def x(self, term_dict):
check_topic_model_string_format(term_dict)
if not self.term_doc_matrix.metadata_in_use():
raise TermDocMatrixHasNoMetadataException(
'No metadata is present in the term document matrix')
self.metadata_term_lists = term_dict
return self
| inject_metadata_term_lists | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, term_dict):\n check_topic_model_string_format(term_dict)\n if not self.term_doc_matrix.metadata_in_use():\n raise TermDocMatrixHasNoMetadata... |
def x(self, term_dict):
assert type(term_dict) == dict
if not self.term_doc_matrix.metadata_in_use():
raise TermDocMatrixHasNoMetadataException(
'No metadata is present in the term document matrix')
if sys.version_info[0] == 2:
assert set([type(v) for v in term_dict.values()]) - set([str, unicode]
) == set()
else:
assert set([type(v) for v in term_dict.values()]) - set([str]) == set()
self.metadata_descriptions = term_dict
return self
| inject_metadata_descriptions | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, term_dict):\n assert type(term_dict) == dict\n if not self.term_doc_matrix.metadata_in_use():\n raise TermDocMatrixHasNoMetadataException(\n... |
def x(self, x_coords, y_coords, rescale_x=None, rescale_y=None, original_x=
None, original_y=None):
self._verify_coordinates(x_coords, 'x')
self._verify_coordinates(y_coords, 'y')
self.x_coords = x_coords
self.y_coords = y_coords
self._rescale_x = rescale_x
self._rescale_y = rescale_y
self.original_x = x_coords if original_x is None else original_x
self.original_y = y_coords if original_y is None else original_y
| inject_coordinates | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, x_coords, y_coords, rescale_x=None, rescale_y=None, original_x=\n None, original_y=None):\n self._verify_coordinates(x_coords, 'x')\n self._veri... |
def x(self, vec):
if (self.scatterchartdata.jitter == 0 or self.scatterchartdata.jitter is
None):
return vec
return vec + np.random.rand(1, len(vec))[0] * self.scatterchartdata.jitter
| add_jitter | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, vec):\n if (self.scatterchartdata.jitter == 0 or self.scatterchartdata.jitter is\n None):\n return vec\n return vec + np.random.rand(... |
def x(self, category, num_top_words_to_annotate=4, words_to_annotate=[],
scores=None, transform=percentile_alphabetical):
try:
import matplotlib.pyplot as plt
except:
raise Exception(
'matplotlib and mpld3 need to be installed to use this function.')
try:
from mpld3 import plugins, fig_to_html
except:
raise Exception('mpld3 need to be installed to use this function.')
all_categories, other_categories = self._get_category_names(category)
df = self._term_rank_score_and_frequency_df(all_categories, category,
other_categories, scores)
if self.x_coords is None:
df['x'], df['y'
] = self._get_coordinates_from_transform_and_jitter_frequencies(
category, df, other_categories, transform)
df_to_annotate = df[(df['not category score rank'] <=
num_top_words_to_annotate) | (df['category score rank'] <=
num_top_words_to_annotate) | df['term'].isin(words_to_annotate)]
words = list(df['term'])
font = {'family': 'sans-serif', 'color': 'black', 'weight': 'normal',
'size': 'large'}
fig, ax = plt.subplots()
plt.figure(figsize=(10, 10))
plt.gcf().subplots_adjust(bottom=0.2)
plt.gcf().subplots_adjust(right=0.2)
points = ax.scatter(self.x_coords, self.y_coords, c=-df['color_scores'],
cmap='seismic', s=10, edgecolors='none', alpha=0.9)
tooltip = plugins.PointHTMLTooltip(points, [('<span id=a>%s</span>' % w
) for w in words], css='#a {background-color: white;}')
plugins.connect(fig, tooltip)
ax.set_ylim([-0.2, 1.2])
ax.set_xlim([-0.2, 1.2])
ax.xaxis.set_ticks([0.0, 0.5, 1.0])
ax.yaxis.set_ticks([0.0, 0.5, 1.0])
ax.set_ylabel(category.title() + ' Frequency Percentile', fontdict=font,
labelpad=20)
ax.set_xlabel('Not ' + category.title() + ' Frequency Percentile',
fontdict=font, labelpad=20)
for i, row in df_to_annotate.iterrows():
alignment_criteria = i % 2 == 0
horizontalalignment = 'right' if alignment_criteria else 'left'
verticalalignment = 'bottom' if alignment_criteria else 'top'
term = row['term']
ax.annotate(term, (self.x_coords[i], self.y_data[i]), size=15,
horizontalalignment=horizontalalignment, verticalalignment=
verticalalignment)
plt.show()
return df, fig_to_html(fig)
| draw | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, category, num_top_words_to_annotate=4, words_to_annotate=[],\n scores=None, transform=percentile_alphabetical):\n try:\n import matplotlib.p... |
def x(self, y_i, y_j):
n_i, n_j = y_i.sum(), y_j.sum()
delta_i_j = np.log((y_i + 1) / (1.0 + n_i - y_i)) - np.log((y_j + 1) /
(1.0 + n_j - y_j))
return delta_i_j
| get_zeta_i_j_given_separate_counts | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, y_i, y_j):\n n_i, n_j = y_i.sum(), y_j.sum()\n delta_i_j = np.log((y_i + 1) / (1.0 + n_i - y_i)) - np.log((y_j + 1) /\n (1.0 + n_j - y_j))\n... |
def x(self, tdm, category):
X = tdm._X
y = self._make_response_variable_1_or_negative_1(category, tdm)
pX = X / X.sum(axis=1)
ansX = self._anscombe_transform(pX.copy())
B, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var = lsqr(A
=ansX, b=y, calc_var=True)
| get_scores_and_p_values | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, tdm, category):\n X = tdm._X\n y = self._make_response_variable_1_or_negative_1(category, tdm)\n pX = X / X.sum(axis=1)\n ansX = self._anscom... |
def x(mat, columns_to_delete):
column_mask = np.ones(mat.shape[1], dtype=bool)
column_mask[columns_to_delete] = 0
return mat.tocsc()[:, column_mask].tocsr()
| delete_columns | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(mat, columns_to_delete):\n column_mask = np.ones(mat.shape[1], dtype=bool)\n column_mask[columns_to_delete] = 0\n return mat.tocsc()[:, column_mask].t... |
def x(self, last_col_idx):
assert last_col_idx >= self._max_col
self._max_col = last_col_idx
return self
| set_last_col_idx | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, last_col_idx):\n assert last_col_idx >= self._max_col\n self._max_col = last_col_idx\n return self\n",
"role": "user"
},
{
"content"... |
def x(self, last_row_idx):
assert last_row_idx >= self._max_row
self._max_row = last_row_idx
return self
| set_last_row_idx | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, last_row_idx):\n assert last_row_idx >= self._max_row\n self._max_row = last_row_idx\n return self\n",
"role": "user"
},
{
"content"... |
def x(self, doc):
return Counter([str(c).lower() for c in doc.noun_chunks])
| get_feats | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, doc):\n return Counter([str(c).lower() for c in doc.noun_chunks])\n",
"role": "user"
},
{
"content": "get_feats",
"role": "assistant"
... |
def x(nltk_tokenizer):
def x(text):
toks = []
for tok in nltk_tokenizer.tokenize(text):
if len(tok) > 0:
toks.append(Tok(_get_pos_tag(tok), tok.lower(), tok.lower(),
ent_type='', tag=''))
return Doc([toks], text)
return tokenize
| nltk_tokenzier_factory | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(nltk_tokenizer):\n\n def x(text):\n toks = []\n for tok in nltk_tokenizer.tokenize(text):\n if len(tok) > 0:\n toks.... |
def x(self, embeddings):
if self.embeddings_ is not None:
raise Exception(
'You have already set embeddings by running set_embeddings or set_embeddings_model.'
)
assert embeddings.shape[1] == self.corpus_.get_num_terms()
self.embeddings_ = embeddings.T
self.vocab_ = self.corpus_.get_terms()
return self
| set_embeddings | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, embeddings):\n if self.embeddings_ is not None:\n raise Exception(\n 'You have already set embeddings by running set_embeddings or s... |
def x(self, projection_model=None):
if self.embeddings_ is None:
raise Exception(
'Run set_embeddings_model or set_embeddings to get embeddings')
if projection_model is None:
try:
import umap
except:
raise Exception(
'Please install umap (pip install umap-learn) to use the default projection_model.'
)
projection_model = umap.UMAP(min_dist=0.5, metric='cosine')
axes = projection_model.fit_transform(self.embeddings_)
return axes
| project | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, projection_model=None):\n if self.embeddings_ is None:\n raise Exception(\n 'Run set_embeddings_model or set_embeddings to get embed... |
def x(self, terms=None, num_terms_per_topic=10, scorer=RankDifference()):
topic_model = {}
if terms is None:
terms = self.corpus.get_scaled_f_scores_vs_background().index[:30]
for term in terms:
termidx = self.termidxstore.getidxstrict(term)
labels = self.sentX[:, termidx].astype(bool).todense().A1
poscnts = self.sentX[labels, :].astype(bool).sum(axis=0).A1
negcnts = self.sentX[~labels, :].astype(bool).sum(axis=0).A1
scores = scorer.get_scores(poscnts, negcnts)
topic_model[term] = [self.termidxstore.getval(i) for i in np.
argsort(-scores)[:num_terms_per_topic]]
return topic_model
| get_topics_from_terms | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, terms=None, num_terms_per_topic=10, scorer=RankDifference()):\n topic_model = {}\n if terms is None:\n terms = self.corpus.get_scaled_f_scor... |
def x(self):
if self._use_non_text_features:
return self._term_doc_matrix.get_metadata_freq_df()
else:
return self._term_doc_matrix.get_term_freq_df()
| get_ranks | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if self._use_non_text_features:\n return self._term_doc_matrix.get_metadata_freq_df()\n else:\n return self._term_doc_matrix.get_t... |
def x(self):
if self._category_text_iter is None:
raise CategoryTextIterNotSetError()
nlp = self.get_nlp()
category_document_iter = ((category, self._clean_function(raw_text)) for
category, raw_text in self._category_text_iter)
term_doc_matrix = self._build_from_category_spacy_doc_iter((category,
nlp(text)) for category, text in category_document_iter if text.
strip() != '')
return term_doc_matrix
| build | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if self._category_text_iter is None:\n raise CategoryTextIterNotSetError()\n nlp = self.get_nlp()\n category_document_iter = ((categor... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.