_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q20300
|
_Paragraph.runs
|
train
|
def runs(self):
"""
Immutable sequence of |_Run| objects corresponding to the runs in
this paragraph.
"""
return tuple(_Run(r, self) for r in self._element.r_lst)
|
python
|
{
"resource": ""
}
|
q20301
|
BaseShape.click_action
|
train
|
def click_action(self):
"""|ActionSetting| instance providing access to click behaviors.
Click behaviors are hyperlink-like behaviors including jumping to
a hyperlink (web page) or to another slide in the presentation. The
click action is that defined on the overall shape, not a run of text
within the shape. An |ActionSetting| object is always returned, even
when no click behavior is defined on the shape.
"""
cNvPr = self._element._nvXxPr.cNvPr
return ActionSetting(cNvPr, self)
|
python
|
{
"resource": ""
}
|
q20302
|
_BaseShapes._next_shape_id
|
train
|
def _next_shape_id(self):
"""Return a unique shape id suitable for use with a new shape.
The returned id is 1 greater than the maximum shape id used so far.
In practice, the minimum id is 2 because the spTree element is always
assigned id="1".
"""
# ---presence of cached-max-shape-id indicates turbo mode is on---
if self._cached_max_shape_id is not None:
self._cached_max_shape_id += 1
return self._cached_max_shape_id
return self._spTree.max_shape_id + 1
|
python
|
{
"resource": ""
}
|
q20303
|
_BaseGroupShapes.add_connector
|
train
|
def add_connector(self, connector_type, begin_x, begin_y, end_x, end_y):
"""Add a newly created connector shape to the end of this shape tree.
*connector_type* is a member of the :ref:`MsoConnectorType`
enumeration and the end-point values are specified as EMU values. The
returned connector is of type *connector_type* and has begin and end
points as specified.
"""
cxnSp = self._add_cxnSp(
connector_type, begin_x, begin_y, end_x, end_y
)
self._recalculate_extents()
return self._shape_factory(cxnSp)
|
python
|
{
"resource": ""
}
|
q20304
|
_BaseGroupShapes.add_group_shape
|
train
|
def add_group_shape(self, shapes=[]):
"""Return a |GroupShape| object newly appended to this shape tree.
The group shape is empty and must be populated with shapes using
methods on its shape tree, available on its `.shapes` property. The
position and extents of the group shape are determined by the shapes
it contains; its position and extents are recalculated each time
a shape is added to it.
"""
grpSp = self._element.add_grpSp()
for shape in shapes:
grpSp.insert_element_before(shape._element, 'p:extLst')
if shapes:
grpSp.recalculate_extents()
return self._shape_factory(grpSp)
|
python
|
{
"resource": ""
}
|
q20305
|
_BaseGroupShapes.add_shape
|
train
|
def add_shape(self, autoshape_type_id, left, top, width, height):
"""Return new |Shape| object appended to this shape tree.
*autoshape_type_id* is a member of :ref:`MsoAutoShapeType` e.g.
``MSO_SHAPE.RECTANGLE`` specifying the type of shape to be added. The
remaining arguments specify the new shape's position and size.
"""
autoshape_type = AutoShapeType(autoshape_type_id)
sp = self._add_sp(autoshape_type, left, top, width, height)
self._recalculate_extents()
return self._shape_factory(sp)
|
python
|
{
"resource": ""
}
|
q20306
|
_BaseGroupShapes.add_textbox
|
train
|
def add_textbox(self, left, top, width, height):
"""Return newly added text box shape appended to this shape tree.
The text box is of the specified size, located at the specified
position on the slide.
"""
sp = self._add_textbox_sp(left, top, width, height)
self._recalculate_extents()
return self._shape_factory(sp)
|
python
|
{
"resource": ""
}
|
q20307
|
_BaseGroupShapes.build_freeform
|
train
|
def build_freeform(self, start_x=0, start_y=0, scale=1.0):
"""Return |FreeformBuilder| object to specify a freeform shape.
The optional *start_x* and *start_y* arguments specify the starting
pen position in local coordinates. They will be rounded to the
nearest integer before use and each default to zero.
The optional *scale* argument specifies the size of local coordinates
proportional to slide coordinates (EMU). If the vertical scale is
different than the horizontal scale (local coordinate units are
"rectangular"), a pair of numeric values can be provided as the
*scale* argument, e.g. `scale=(1.0, 2.0)`. In this case the first
number is interpreted as the horizontal (X) scale and the second as
the vertical (Y) scale.
A convenient method for calculating scale is to divide a |Length|
object by an equivalent count of local coordinate units, e.g.
`scale = Inches(1)/1000` for 1000 local units per inch.
"""
try:
x_scale, y_scale = scale
except TypeError:
x_scale = y_scale = scale
return FreeformBuilder.new(self, start_x, start_y, x_scale, y_scale)
|
python
|
{
"resource": ""
}
|
q20308
|
SlideShapes.title
|
train
|
def title(self):
"""
The title placeholder shape on the slide or |None| if the slide has
no title placeholder.
"""
for elm in self._spTree.iter_ph_elms():
if elm.ph_idx == 0:
return self._shape_factory(elm)
return None
|
python
|
{
"resource": ""
}
|
q20309
|
_MoviePicElementCreator._poster_frame_rId
|
train
|
def _poster_frame_rId(self):
"""Return the rId of relationship to poster frame image.
The poster frame is the image used to represent the video before it's
played.
"""
_, poster_frame_rId = self._slide_part.get_or_add_image_part(
self._poster_frame_image_file
)
return poster_frame_rId
|
python
|
{
"resource": ""
}
|
q20310
|
_MoviePicElementCreator._video_part_rIds
|
train
|
def _video_part_rIds(self):
"""Return the rIds for relationships to media part for video.
This is where the media part and its relationships to the slide are
actually created.
"""
media_rId, video_rId = self._slide_part.get_or_add_video_media_part(
self._video
)
return media_rId, video_rId
|
python
|
{
"resource": ""
}
|
q20311
|
Shape.shape_type
|
train
|
def shape_type(self):
"""
Unique integer identifying the type of this shape, like
``MSO_SHAPE_TYPE.TEXT_BOX``.
"""
if self.is_placeholder:
return MSO_SHAPE_TYPE.PLACEHOLDER
if self._sp.has_custom_geometry:
return MSO_SHAPE_TYPE.FREEFORM
if self._sp.is_autoshape:
return MSO_SHAPE_TYPE.AUTO_SHAPE
if self._sp.is_textbox:
return MSO_SHAPE_TYPE.TEXT_BOX
msg = 'Shape instance of unrecognized shape type'
raise NotImplementedError(msg)
|
python
|
{
"resource": ""
}
|
q20312
|
Package.next_media_partname
|
train
|
def next_media_partname(self, ext):
"""Return |PackURI| instance for next available media partname.
Partname is first available, starting at sequence number 1. Empty
sequence numbers are reused. *ext* is used as the extension on the
returned partname.
"""
def first_available_media_idx():
media_idxs = sorted([
part.partname.idx for part in self.iter_parts()
if part.partname.startswith('/ppt/media/media')
])
for i, media_idx in enumerate(media_idxs):
idx = i + 1
if idx < media_idx:
return idx
return len(media_idxs)+1
idx = first_available_media_idx()
return PackURI('/ppt/media/media%d.%s' % (idx, ext))
|
python
|
{
"resource": ""
}
|
q20313
|
_ImageParts._find_by_sha1
|
train
|
def _find_by_sha1(self, sha1):
"""
Return an |ImagePart| object belonging to this package or |None| if
no matching image part is found. The image part is identified by the
SHA1 hash digest of the image binary it contains.
"""
for image_part in self:
# ---skip unknown/unsupported image types, like SVG---
if not hasattr(image_part, 'sha1'):
continue
if image_part.sha1 == sha1:
return image_part
return None
|
python
|
{
"resource": ""
}
|
q20314
|
NotesMasterPart.create_default
|
train
|
def create_default(cls, package):
"""
Create and return a default notes master part, including creating the
new theme it requires.
"""
notes_master_part = cls._new(package)
theme_part = cls._new_theme_part(package)
notes_master_part.relate_to(theme_part, RT.THEME)
return notes_master_part
|
python
|
{
"resource": ""
}
|
q20315
|
NotesMasterPart._new_theme_part
|
train
|
def _new_theme_part(cls, package):
"""
Create and return a default theme part suitable for use with a notes
master.
"""
partname = package.next_partname('/ppt/theme/theme%d.xml')
content_type = CT.OFC_THEME
theme = CT_OfficeStyleSheet.new_default()
return XmlPart(partname, content_type, theme, package)
|
python
|
{
"resource": ""
}
|
q20316
|
SlidePart.get_or_add_video_media_part
|
train
|
def get_or_add_video_media_part(self, video):
"""Return rIds for media and video relationships to media part.
A new |MediaPart| object is created if it does not already exist
(such as would occur if the same video appeared more than once in
a presentation). Two relationships to the media part are created,
one each with MEDIA and VIDEO relationship types. The need for two
appears to be for legacy support for an earlier (pre-Office 2010)
PowerPoint media embedding strategy.
"""
media_part = self._package.get_or_add_media_part(video)
media_rId = self.relate_to(media_part, RT.MEDIA)
video_rId = self.relate_to(media_part, RT.VIDEO)
return media_rId, video_rId
|
python
|
{
"resource": ""
}
|
q20317
|
SlidePart.notes_slide
|
train
|
def notes_slide(self):
"""
The |NotesSlide| instance associated with this slide. If the slide
does not have a notes slide, a new one is created. The same single
instance is returned on each call.
"""
try:
notes_slide_part = self.part_related_by(RT.NOTES_SLIDE)
except KeyError:
notes_slide_part = self._add_notes_slide_part()
return notes_slide_part.notes_slide
|
python
|
{
"resource": ""
}
|
q20318
|
SlidePart._add_notes_slide_part
|
train
|
def _add_notes_slide_part(self):
"""
Return a newly created |NotesSlidePart| object related to this slide
part. Caller is responsible for ensuring this slide doesn't already
have a notes slide part.
"""
notes_slide_part = NotesSlidePart.new(self.package, self)
self.relate_to(notes_slide_part, RT.NOTES_SLIDE)
return notes_slide_part
|
python
|
{
"resource": ""
}
|
q20319
|
_BaseWorkbookWriter.xlsx_blob
|
train
|
def xlsx_blob(self):
"""
Return the byte stream of an Excel file formatted as chart data for
the category chart specified in the chart data object.
"""
xlsx_file = BytesIO()
with self._open_worksheet(xlsx_file) as (workbook, worksheet):
self._populate_worksheet(workbook, worksheet)
return xlsx_file.getvalue()
|
python
|
{
"resource": ""
}
|
q20320
|
CategoryWorkbookWriter._series_col_letter
|
train
|
def _series_col_letter(self, series):
"""
The letter of the Excel worksheet column in which the data for a
series appears.
"""
column_number = 1 + series.categories.depth + series.index
return self._column_reference(column_number)
|
python
|
{
"resource": ""
}
|
q20321
|
_BasePlot.data_labels
|
train
|
def data_labels(self):
"""
|DataLabels| instance providing properties and methods on the
collection of data labels associated with this plot.
"""
dLbls = self._element.dLbls
if dLbls is None:
raise ValueError(
'plot has no data labels, set has_data_labels = True first'
)
return DataLabels(dLbls)
|
python
|
{
"resource": ""
}
|
q20322
|
Chart.font
|
train
|
def font(self):
"""Font object controlling text format defaults for this chart."""
defRPr = (
self._chartSpace
.get_or_add_txPr()
.p_lst[0]
.get_or_add_pPr()
.get_or_add_defRPr()
)
return Font(defRPr)
|
python
|
{
"resource": ""
}
|
q20323
|
Chart.legend
|
train
|
def legend(self):
"""
A |Legend| object providing access to the properties of the legend
for this chart.
"""
legend_elm = self._chartSpace.chart.legend
if legend_elm is None:
return None
return Legend(legend_elm)
|
python
|
{
"resource": ""
}
|
q20324
|
Chart.value_axis
|
train
|
def value_axis(self):
"""
The |ValueAxis| object providing access to properties of the value
axis of this chart. Raises |ValueError| if the chart has no value
axis.
"""
valAx_lst = self._chartSpace.valAx_lst
if not valAx_lst:
raise ValueError('chart has no value axis')
idx = 1 if len(valAx_lst) > 1 else 0
return ValueAxis(valAx_lst[idx])
|
python
|
{
"resource": ""
}
|
q20325
|
CT_TextBodyProperties.autofit
|
train
|
def autofit(self):
"""
The autofit setting for the text frame, a member of the
``MSO_AUTO_SIZE`` enumeration.
"""
if self.noAutofit is not None:
return MSO_AUTO_SIZE.NONE
if self.normAutofit is not None:
return MSO_AUTO_SIZE.TEXT_TO_FIT_SHAPE
if self.spAutoFit is not None:
return MSO_AUTO_SIZE.SHAPE_TO_FIT_TEXT
return None
|
python
|
{
"resource": ""
}
|
q20326
|
Categories.depth
|
train
|
def depth(self):
"""
The number of hierarchy levels in this category graph. Returns 0 if
it contains no categories.
"""
categories = self._categories
if not categories:
return 0
first_depth = categories[0].depth
for category in categories[1:]:
if category.depth != first_depth:
raise ValueError('category depth not uniform')
return first_depth
|
python
|
{
"resource": ""
}
|
q20327
|
Category.depth
|
train
|
def depth(self):
"""
The number of hierarchy levels rooted at this category node. Returns
1 if this category has no sub-categories.
"""
sub_categories = self._sub_categories
if not sub_categories:
return 1
first_depth = sub_categories[0].depth
for category in sub_categories[1:]:
if category.depth != first_depth:
raise ValueError('category depth not uniform')
return first_depth + 1
|
python
|
{
"resource": ""
}
|
q20328
|
Category.leaf_count
|
train
|
def leaf_count(self):
"""
The number of leaf category nodes under this category. Returns
1 if this category has no sub-categories.
"""
if not self._sub_categories:
return 1
return sum(category.leaf_count for category in self._sub_categories)
|
python
|
{
"resource": ""
}
|
q20329
|
CT_SlideIdList._next_id
|
train
|
def _next_id(self):
"""
Return the next available slide ID as an int. Valid slide IDs start
at 256. The next integer value greater than the max value in use is
chosen, which minimizes that chance of reusing the id of a deleted
slide.
"""
id_str_lst = self.xpath('./p:sldId/@id')
return max([255]+[int(id_str) for id_str in id_str_lst])+1
|
python
|
{
"resource": ""
}
|
q20330
|
_default_pptx_path
|
train
|
def _default_pptx_path():
"""
Return the path to the built-in default .pptx package.
"""
_thisdir = os.path.split(__file__)[0]
return os.path.join(_thisdir, 'templates', 'default.pptx')
|
python
|
{
"resource": ""
}
|
q20331
|
API.__get_url
|
train
|
def __get_url(self, endpoint):
""" Get URL for requests """
url = self.url
api = "wc-api"
if url.endswith("/") is False:
url = "%s/" % url
if self.wp_api:
api = "wp-json"
return "%s%s/%s/%s" % (url, api, self.version, endpoint)
|
python
|
{
"resource": ""
}
|
q20332
|
API.__get_oauth_url
|
train
|
def __get_oauth_url(self, url, method, **kwargs):
""" Generate oAuth1.0a URL """
oauth = OAuth(
url=url,
consumer_key=self.consumer_key,
consumer_secret=self.consumer_secret,
version=self.version,
method=method,
oauth_timestamp=kwargs.get("oauth_timestamp", int(time()))
)
return oauth.get_oauth_url()
|
python
|
{
"resource": ""
}
|
q20333
|
OAuth.get_oauth_url
|
train
|
def get_oauth_url(self):
""" Returns the URL with OAuth params """
params = OrderedDict()
if "?" in self.url:
url = self.url[:self.url.find("?")]
for key, value in parse_qsl(urlparse(self.url).query):
params[key] = value
else:
url = self.url
params["oauth_consumer_key"] = self.consumer_key
params["oauth_timestamp"] = self.timestamp
params["oauth_nonce"] = self.generate_nonce()
params["oauth_signature_method"] = "HMAC-SHA256"
params["oauth_signature"] = self.generate_oauth_signature(params, url)
query_string = urlencode(params)
return "%s?%s" % (url, query_string)
|
python
|
{
"resource": ""
}
|
q20334
|
OAuth.generate_oauth_signature
|
train
|
def generate_oauth_signature(self, params, url):
""" Generate OAuth Signature """
if "oauth_signature" in params.keys():
del params["oauth_signature"]
base_request_uri = quote(url, "")
params = self.sorted_params(params)
params = self.normalize_parameters(params)
query_params = ["{param_key}%3D{param_value}".format(param_key=key, param_value=value)
for key, value in params.items()]
query_string = "%26".join(query_params)
string_to_sign = "%s&%s&%s" % (self.method, base_request_uri, query_string)
consumer_secret = str(self.consumer_secret)
if self.version not in ["v1", "v2"]:
consumer_secret += "&"
hash_signature = HMAC(
consumer_secret.encode(),
str(string_to_sign).encode(),
sha256
).digest()
return b64encode(hash_signature).decode("utf-8").replace("\n", "")
|
python
|
{
"resource": ""
}
|
q20335
|
OAuth.generate_nonce
|
train
|
def generate_nonce():
""" Generate nonce number """
nonce = ''.join([str(randint(0, 9)) for i in range(8)])
return HMAC(
nonce.encode(),
"secret".encode(),
sha1
).hexdigest()
|
python
|
{
"resource": ""
}
|
q20336
|
print_ldamodel_distribution
|
train
|
def print_ldamodel_distribution(distrib, row_labels, val_labels, top_n=10):
"""
Print `n_top` top values from a LDA model's distribution `distrib`. Can be used for topic-word distributions and
document-topic distributions.
"""
df_values = top_n_from_distribution(distrib, top_n=top_n, row_labels=row_labels, val_labels=None)
df_labels = top_n_from_distribution(distrib, top_n=top_n, row_labels=row_labels, val_labels=val_labels)
for i, (ind, row) in enumerate(df_labels.iterrows()):
print(ind)
for j, label in enumerate(row):
val = df_values.iloc[i, j]
print('> #%d. %s (%f)' % (j + 1, label, val))
|
python
|
{
"resource": ""
}
|
q20337
|
print_ldamodel_topic_words
|
train
|
def print_ldamodel_topic_words(topic_word_distrib, vocab, n_top=10, row_labels=DEFAULT_TOPIC_NAME_FMT):
"""Print `n_top` values from a LDA model's topic-word distributions."""
print_ldamodel_distribution(topic_word_distrib, row_labels=row_labels, val_labels=vocab,
top_n=n_top)
|
python
|
{
"resource": ""
}
|
q20338
|
print_ldamodel_doc_topics
|
train
|
def print_ldamodel_doc_topics(doc_topic_distrib, doc_labels, n_top=3, val_labels=DEFAULT_TOPIC_NAME_FMT):
"""Print `n_top` values from a LDA model's document-topic distributions."""
print_ldamodel_distribution(doc_topic_distrib, row_labels=doc_labels, val_labels=val_labels,
top_n=n_top)
|
python
|
{
"resource": ""
}
|
q20339
|
save_ldamodel_to_pickle
|
train
|
def save_ldamodel_to_pickle(picklefile, model, vocab, doc_labels, dtm=None, **kwargs):
"""Save a LDA model as pickle file."""
pickle_data({'model': model, 'vocab': vocab, 'doc_labels': doc_labels, 'dtm': dtm}, picklefile)
|
python
|
{
"resource": ""
}
|
q20340
|
plot_heatmap
|
train
|
def plot_heatmap(fig, ax, data,
xaxislabel=None, yaxislabel=None,
xticklabels=None, yticklabels=None,
title=None, grid=True,
values_in_cells=True, round_values_in_cells=2,
legend=False,
fontsize_axislabel=None,
fontsize_axisticks=None,
fontsize_cell_values=None):
""""
helper function to plot a heatmap for a 2D matrix `data` using matplotlib's "matshow" function
"""
if not isinstance(data, np.ndarray):
data = np.array(data)
if data.ndim != 2:
raise ValueError('`data` must be a 2D matrix/array')
# draw basic heatmap
cax = ax.matshow(data)
# draw legend
if legend:
fig.colorbar(cax)
# set title
if title:
ax.set_title(title, y=1.25)
n_rows, n_cols = data.shape
# draw values in cells
if values_in_cells:
textcol_thresh = data.min() + (data.max() - data.min()) / 2
x_indices, y_indices = np.meshgrid(np.arange(n_cols), np.arange(n_rows))
for x, y in zip(x_indices.flatten(), y_indices.flatten()):
val = data[y, x]
# lower values get white text color for better visibility
textcol = 'white' if val < textcol_thresh else 'black'
disp_val = round(val, round_values_in_cells) if round_values_in_cells is not None else val
ax.text(x, y, disp_val, va='center', ha='center', color=textcol, fontsize=fontsize_cell_values)
# customize axes
if xaxislabel:
ax.set_xlabel(xaxislabel)
if yaxislabel:
ax.set_ylabel(yaxislabel)
if fontsize_axislabel:
for item in (ax.xaxis.label, ax.yaxis.label):
item.set_fontsize(fontsize_axislabel)
ax.set_xticks(np.arange(0, n_cols))
ax.set_yticks(np.arange(0, n_rows))
if xticklabels is not None:
ax.set_xticklabels(xticklabels, rotation=45, ha='left')
if yticklabels is not None:
ax.set_yticklabels(yticklabels)
if fontsize_axisticks:
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(fontsize_axisticks)
# gridlines based on minor ticks
if grid:
ax.set_xticks(np.arange(-.5, n_cols), minor=True)
ax.set_yticks(np.arange(-.5, n_rows), minor=True)
ax.grid(which='minor', color='w', linestyle='-', linewidth=1)
return fig, ax
|
python
|
{
"resource": ""
}
|
q20341
|
get_term_proportions
|
train
|
def get_term_proportions(dtm):
"""
Return the term proportions given the document-term matrix `dtm`
"""
unnorm = get_term_frequencies(dtm)
if unnorm.sum() == 0:
raise ValueError('`dtm` does not contain any tokens (is all-zero)')
else:
return unnorm / unnorm.sum()
|
python
|
{
"resource": ""
}
|
q20342
|
TMPreproc._setup_workers
|
train
|
def _setup_workers(self, initial_states=None):
"""
Create worker processes and queues. Distribute the work evenly across worker processes. Optionally
send initial states defined in list `initial_states` to each worker process.
"""
if initial_states is not None:
require_listlike(initial_states)
self.tasks_queues = []
self.results_queue = mp.Queue()
self.workers = []
common_kwargs = dict(tokenizer=self.tokenizer,
stemmer=self.stemmer,
lemmata_dict=self.lemmata_dict,
pos_tagger=self.pos_tagger)
if initial_states is not None:
logger.info('setting up %d worker processes with initial states' % len(initial_states))
for i_worker, w_state in enumerate(initial_states):
task_q = mp.JoinableQueue()
w = _PreprocWorker(i_worker, w_state.pop('docs'), self.language, task_q, self.results_queue,
name='_PreprocWorker#%d' % i_worker, **common_kwargs)
w.start()
task_q.put(('set_state', w_state))
self.workers.append(w)
self.tasks_queues.append(task_q)
[q.join() for q in self.tasks_queues]
else:
# distribute work evenly across the worker processes
# we assume that the longer a document is, the longer the processing time for it is
# hence we distribute the work evenly by document length
logger.info('distributing work via greedy partitioning')
docs_and_lengths = {dl: len(dt) for dl, dt in self.docs.items()}
docs_per_worker = greedy_partitioning(docs_and_lengths, k=self.n_max_workers)
logger.info('setting up %d worker processes' % len(docs_per_worker))
for i_worker, doc_labels in enumerate(docs_per_worker):
if not doc_labels: continue
task_q = mp.JoinableQueue()
w_docs = {dl: self.docs.get(dl) for dl in doc_labels}
w = _PreprocWorker(i_worker, w_docs, self.language, task_q, self.results_queue,
name='_PreprocWorker#%d' % i_worker, **common_kwargs)
w.start()
self.workers.append(w)
self.tasks_queues.append(task_q)
self.n_workers = len(self.workers)
|
python
|
{
"resource": ""
}
|
q20343
|
_words_by_score
|
train
|
def _words_by_score(words, score, least_to_most, n=None):
"""
Order a vector of `words` by a `score`, either `least_to_most` or reverse. Optionally return only the top `n`
results.
"""
if words.shape != score.shape:
raise ValueError('`words` and `score` must have the same shape')
if n is not None and (n <= 0 or n > len(words)):
raise ValueError('`n` must be in range [0, len(words)]')
indices = np.argsort(score)
if not least_to_most:
indices = indices[::-1]
ordered_words = words[indices]
if n is not None:
return ordered_words[:n]
else:
return ordered_words
|
python
|
{
"resource": ""
}
|
q20344
|
_words_by_salience_score
|
train
|
def _words_by_salience_score(vocab, topic_word_distrib, doc_topic_distrib, doc_lengths, n=None, least_to_most=False):
"""Return words in `vocab` ordered by saliency score."""
saliency = get_word_saliency(topic_word_distrib, doc_topic_distrib, doc_lengths)
return _words_by_score(vocab, saliency, least_to_most=least_to_most, n=n)
|
python
|
{
"resource": ""
}
|
q20345
|
_words_by_distinctiveness_score
|
train
|
def _words_by_distinctiveness_score(vocab, topic_word_distrib, doc_topic_distrib, doc_lengths, n=None,
least_to_most=False):
"""Return words in `vocab` ordered by distinctiveness score."""
p_t = get_marginal_topic_distrib(doc_topic_distrib, doc_lengths)
distinct = get_word_distinctiveness(topic_word_distrib, p_t)
return _words_by_score(vocab, distinct, least_to_most=least_to_most, n=n)
|
python
|
{
"resource": ""
}
|
q20346
|
pickle_data
|
train
|
def pickle_data(data, picklefile):
"""Helper function to pickle `data` in `picklefile`."""
with open(picklefile, 'wb') as f:
pickle.dump(data, f, protocol=2)
|
python
|
{
"resource": ""
}
|
q20347
|
unpickle_file
|
train
|
def unpickle_file(picklefile, **kwargs):
"""Helper function to unpickle data from `picklefile`."""
with open(picklefile, 'rb') as f:
return pickle.load(f, **kwargs)
|
python
|
{
"resource": ""
}
|
q20348
|
dyndoc_insert
|
train
|
def dyndoc_insert(src):
"""docstring_insert - a decorator to insert API-docparts dynamically."""
# manipulating docstrings this way is tricky due to indentation
# the JSON needs leading whitespace to be interpreted correctly
import json
import re
def mkblock(d, flag=0):
# response, pretty formatted
v = json.dumps(d, indent=2)
if flag == 1:
# strip the '[' and ']' in case of a list holding items
# that stand on their own (example: tick records from a stream)
nw = re.findall('.*?\[(.*)\]', v, flags=re.S)
v = nw[0]
# add leading whitespace for each line and start with a newline
return "\n{}".format("".join(["{0:>16}{1}\n".format("", L)
for L in v.split('\n')]))
def dec(obj):
allSlots = re.findall("\{(_v3.*?)\}", obj.__doc__)
docsub = {}
sub = {}
for k in allSlots:
p = re.findall("^(_v3.*)_(.*)", k)
p = list(*p)
sub.update({p[1]: p[0]})
for v in sub.values():
for k in sub.keys():
docsub["{}_url".format(v)] = "{}".format(src[v]["url"])
if "resp" == k:
docsub.update({"{}_resp".format(v):
mkblock(src[v]["response"])})
if "body" == k:
docsub.update({"{}_body".format(v):
mkblock(src[v]["body"])})
if "params" == k:
docsub.update({"{}_params".format(v):
mkblock(src[v]["params"])})
if "ciresp" == k:
docsub.update({"{}_ciresp".format(v):
mkblock(src[v]["response"], 1)})
obj.__doc__ = obj.__doc__.format(**docsub)
return obj
return dec
|
python
|
{
"resource": ""
}
|
q20349
|
endpoint
|
train
|
def endpoint(url, method="GET", expected_status=200):
"""endpoint - decorator to manipulate the REST-service endpoint.
The endpoint decorator sets the endpoint and the method for the class
to access the REST-service.
"""
def dec(obj):
obj.ENDPOINT = url
obj.METHOD = method
obj.EXPECTED_STATUS = expected_status
return obj
return dec
|
python
|
{
"resource": ""
}
|
q20350
|
abstractclass
|
train
|
def abstractclass(cls):
"""abstractclass - class decorator.
make sure the class is abstract and cannot be used on it's own.
@abstractclass
class A(object):
def __init__(self, *args, **kwargs):
# logic
pass
class B(A):
pass
a = A() # results in an AssertionError
b = B() # works fine
"""
setattr(cls, "_ISNEVER", cls.__bases__[0].__name__)
origInit = cls.__dict__["__init__"]
def wrapInit(self, *args, **kwargs):
# when the class is instantiated we can check for bases
# we don't want it to be the base class
try:
assert self.__class__.__bases__[-1].__name__ != self._ISNEVER
origInit(self, *args, **kwargs)
except AssertionError:
raise TypeError("Use of abstract base class")
# replace the original __init__
setattr(wrapInit, "__doc__", getattr(origInit, "__doc__"))
setattr(origInit, "__doc__", "")
setattr(cls, "__init__", wrapInit)
return cls
|
python
|
{
"resource": ""
}
|
q20351
|
granularity_to_time
|
train
|
def granularity_to_time(s):
"""convert a named granularity into seconds.
get value in seconds for named granularities: M1, M5 ... H1 etc.
>>> print(granularity_to_time("M5"))
300
"""
mfact = {
'S': 1,
'M': 60,
'H': 3600,
'D': 86400,
'W': 604800,
}
try:
f, n = re.match("(?P<f>[SMHDW])(?:(?P<n>\d+)|)", s).groups()
n = n if n else 1
return mfact[f] * int(n)
except Exception as e:
raise ValueError(e)
|
python
|
{
"resource": ""
}
|
q20352
|
get_classes
|
train
|
def get_classes(modName):
"""return a list of all classes in a module."""
classNames = []
for name, obj in inspect.getmembers(sys.modules[modName]):
if inspect.isclass(obj):
classNames.append(name)
return classNames
|
python
|
{
"resource": ""
}
|
q20353
|
InstrumentsCandlesFactory
|
train
|
def InstrumentsCandlesFactory(instrument, params=None):
"""InstrumentsCandlesFactory - generate InstrumentCandles requests.
InstrumentsCandlesFactory is used to retrieve historical data by
automatically generating consecutive requests when the OANDA limit
of *count* records is exceeded.
This is known by calculating the number of candles between *from* and
*to*. If *to* is not specified *to* will be equal to *now*.
The *count* parameter is only used to control the number of records to
retrieve in a single request.
The *includeFirst* parameter is forced to make sure that results do
no have a 1-record gap between consecutive requests.
Parameters
----------
instrument : string (required)
the instrument to create the order for
params: params (optional)
the parameters to specify the historical range,
see the REST-V20 docs regarding 'instrument' at developer.oanda.com
If no params are specified, just a single InstrumentsCandles request
will be generated acting the same as if you had just created it
directly.
Example
-------
The *oandapyV20.API* client processes requests as objects. So,
downloading large historical batches simply comes down to:
>>> import json
>>> from oandapyV20 import API
>>> from oandapyV20.contrib.factories import InstrumentsCandlesFactory
>>>
>>> client = API(access_token=...)
>>> instrument, granularity = "EUR_USD", "M15"
>>> _from = "2017-01-01T00:00:00Z"
>>> params = {
... "from": _from,
... "granularity": granularity,
... "count": 2500,
... }
>>> with open("/tmp/{}.{}".format(instrument, granularity), "w") as OUT:
>>> # The factory returns a generator generating consecutive
>>> # requests to retrieve full history from date 'from' till 'to'
>>> for r in InstrumentsCandlesFactory(instrument=instrument,
... params=params)
>>> client.request(r)
>>> OUT.write(json.dumps(r.response.get('candles'), indent=2))
.. note:: Normally you can't combine *from*, *to* and *count*.
When *count* specified, it is used to calculate the gap between
*to* and *from*. The *params* passed to the generated request
itself does contain the *count* parameter.
"""
RFC3339 = "%Y-%m-%dT%H:%M:%SZ"
# if not specified use the default of 'S5' as OANDA does
gs = granularity_to_time(params.get('granularity', 'S5'))
_from = None
_epoch_from = None
if 'from' in params:
_from = datetime.strptime(params.get('from'), RFC3339)
_epoch_from = int(calendar.timegm(_from.timetuple()))
_to = datetime.utcnow()
if 'to' in params:
_tmp = datetime.strptime(params.get('to'), RFC3339)
# if specified datetime > now, we use 'now' instead
if _tmp > _to:
logger.info("datetime %s is in the future, will be set to 'now'",
params.get('to'))
else:
_to = _tmp
_epoch_to = int(calendar.timegm(_to.timetuple()))
_count = params.get('count', DEFAULT_BATCH)
# OANDA will respond with a V20Error if count > MAX_BATCH
if 'to' in params and 'from' not in params:
raise ValueError("'to' specified without 'from'")
if not params or 'from' not in params:
yield instruments.InstrumentsCandles(instrument=instrument,
params=params)
else:
delta = _epoch_to - _epoch_from
nbars = delta / gs
cpparams = params.copy()
for k in ['count', 'from', 'to']:
if k in cpparams:
del cpparams[k]
# force includeFirst
cpparams.update({"includeFirst": True})
# generate InstrumentsCandles requests for all 'bars', each request
# requesting max. count records
for _ in range(_count, int(((nbars//_count)+1))*_count+1, _count):
to = _epoch_from + _count * gs
if to > _epoch_to:
to = _epoch_to
yparams = cpparams.copy()
yparams.update({"from": secs2time(_epoch_from).strftime(RFC3339)})
yparams.update({"to": secs2time(to).strftime(RFC3339)})
yield instruments.InstrumentsCandles(instrument=instrument,
params=yparams)
_epoch_from = to
|
python
|
{
"resource": ""
}
|
q20354
|
API.request
|
train
|
def request(self, endpoint):
"""Perform a request for the APIRequest instance 'endpoint'.
Parameters
----------
endpoint : APIRequest
The endpoint parameter contains an instance of an APIRequest
containing the endpoint, method and optionally other parameters
or body data.
Raises
------
V20Error in case of HTTP response code >= 400
"""
method = endpoint.method
method = method.lower()
params = None
try:
params = getattr(endpoint, "params")
except AttributeError:
# request does not have params
params = {}
headers = {}
if hasattr(endpoint, "HEADERS"):
headers = getattr(endpoint, "HEADERS")
request_args = {}
if method == 'get':
request_args['params'] = params
elif hasattr(endpoint, "data") and endpoint.data:
request_args['json'] = endpoint.data
# if any parameter for request then merge them
request_args.update(self._request_params)
# which API to access ?
if not (hasattr(endpoint, "STREAM") and
getattr(endpoint, "STREAM") is True):
url = "{}/{}".format(
TRADING_ENVIRONMENTS[self.environment]["api"],
endpoint)
response = self.__request(method, url,
request_args, headers=headers)
content = response.content.decode('utf-8')
content = json.loads(content)
# update endpoint
endpoint.response = content
endpoint.status_code = response.status_code
return content
else:
url = "{}/{}".format(
TRADING_ENVIRONMENTS[self.environment]["stream"],
endpoint)
endpoint.response = self.__stream_request(method,
url,
request_args,
headers=headers)
return endpoint.response
|
python
|
{
"resource": ""
}
|
q20355
|
make_definition_classes
|
train
|
def make_definition_classes(mod):
"""Dynamically create the definition classes from module 'mod'."""
rootpath = "oandapyV20"
PTH = "{}.definitions.{}".format(rootpath, mod)
M = import_module(PTH)
__ALL__ = [] # construct the __all__ variable
for cls, cldef in M.definitions.items():
orig, fiV = next(six.iteritems(cldef))
fiK = orig.replace('-', '_')
# create the docstring dynamically
clsdoc = dyndoc.format(cls=cls,
PTH=PTH,
mod=mod,
firstItem=fiK, orig=orig,
firstItemVal=fiV)
# Since we can't change the docstring afterwards (it's readonly)
# figure this out before and not during ...
for K, V in cldef.items():
attrName = K
if "-" in K:
attrName = K.replace('-', '_')
adoc = _doc.format(K, attrName, K)
clsdoc += adoc
# the class
dyncls = type(cls, (object,), {'__doc__': clsdoc})
definitions = dict()
for K, V in cldef.items():
attrName = K
if "-" in K:
attrName = K.replace('-', '_')
setattr(dyncls, attrName, K) # set as class attributes
definitions.update({K: V}) # for mapping by __getitem__
def mkgi():
def __getitem__(self, definitionID):
"""return description for definitionID."""
return self._definitions[definitionID]
return __getitem__
def mkinit(definitions):
def __init__(self):
self._definitions = definitions
return __init__
def mkPropDefinitions():
def definitions(self):
"""readonly property holding definition dict."""
return self._definitions
return property(definitions)
setattr(dyncls, "__getitem__", mkgi())
setattr(dyncls, "__init__", mkinit(definitions))
setattr(dyncls, "definitions", mkPropDefinitions())
setattr(sys.modules["{}.definitions.{}".format(rootpath, mod)],
cls, dyncls)
__ALL__.append(cls)
setattr(sys.modules["{}.definitions.{}".format(rootpath, mod)],
"__all__", tuple(__ALL__))
|
python
|
{
"resource": ""
}
|
q20356
|
_fix_integrity_error
|
train
|
def _fix_integrity_error(f):
"""Ensure raising of IntegrityError on unique constraint violations.
In earlier versions of hdbcli it doesn't raise the hdbcli.dbapi.IntegrityError
exception for unique constraint violations. To support also older versions
of hdbcli this decorator inspects the raised exception and will rewrite the
exception based on HANA's error code.
"""
@wraps(f)
def wrapper(dialect, *args, **kwargs):
try:
return f(dialect, *args, **kwargs)
except dialect.dbapi.Error as exc:
if exc.errorcode == 301 and not isinstance(exc, dialect.dbapi.IntegrityError):
raise dialect.dbapi.IntegrityError(exc)
raise
return wrapper
|
python
|
{
"resource": ""
}
|
q20357
|
pp_xml
|
train
|
def pp_xml(body):
"""Pretty print format some XML so it's readable."""
pretty = xml.dom.minidom.parseString(body)
return pretty.toprettyxml(indent=" ")
|
python
|
{
"resource": ""
}
|
q20358
|
Client.power_on
|
train
|
def power_on(self):
"""Power on the box."""
payload = amt.wsman.power_state_request(self.uri, "on")
return self.post(payload, CIM_PowerManagementService)
|
python
|
{
"resource": ""
}
|
q20359
|
Client.set_next_boot
|
train
|
def set_next_boot(self, boot_device):
"""Sets the machine to boot to boot_device on its next reboot
Will default back to normal boot list on the reboot that follows.
"""
payload = amt.wsman.change_boot_order_request(self.uri, boot_device)
self.post(payload)
payload = amt.wsman.enable_boot_config_request(self.uri)
self.post(payload)
|
python
|
{
"resource": ""
}
|
q20360
|
dumps
|
train
|
def dumps(obj, **kwargs):
"""Serialize ``obj`` to a JSON5-formatted ``str``."""
t = type(obj)
if obj is True:
return u'true'
elif obj is False:
return u'false'
elif obj == None:
return u'null'
elif t == type('') or t == type(u''):
single = "'" in obj
double = '"' in obj
if single and double:
return json.dumps(obj)
elif single:
return '"' + obj + '"'
else:
return "'" + obj + "'"
elif t is float or t is int:
return str(obj)
elif t is dict:
return u'{' + u','.join([
_dumpkey(k) + u':' + dumps(v) for k, v in obj.items()
]) + '}'
elif t is list:
return u'[' + ','.join([dumps(el) for el in obj]) + u']'
else: # pragma: no cover
return u''
|
python
|
{
"resource": ""
}
|
q20361
|
Connection.destroy
|
train
|
def destroy(self):
"""Close the connection, and close any associated
CBS authentication session.
"""
try:
self.lock()
_logger.debug("Unlocked connection %r to close.", self.container_id)
self._close()
finally:
self.release()
uamqp._Platform.deinitialize()
|
python
|
{
"resource": ""
}
|
q20362
|
Connection.work
|
train
|
def work(self):
"""Perform a single Connection iteration."""
try:
raise self._error
except TypeError:
pass
except Exception as e:
_logger.warning("%r", e)
raise
try:
self.lock()
self._conn.do_work()
except compat.TimeoutException:
_logger.debug("Connection %r timed out while waiting for lock acquisition.", self.container_id)
finally:
self.release()
|
python
|
{
"resource": ""
}
|
q20363
|
MessageSender._detach_received
|
train
|
def _detach_received(self, error):
"""Callback called when a link DETACH frame is received.
This callback will process the received DETACH error to determine if
the link is recoverable or whether it should be shutdown.
:param error: The error information from the detach
frame.
:type error: ~uamqp.errors.ErrorResponse
"""
# pylint: disable=protected-access
if error:
condition = error.condition
description = error.description
info = error.info
else:
condition = b"amqp:unknown-error"
description = None
info = None
self._error = errors._process_link_error(self.error_policy, condition, description, info)
_logger.info("Received Link detach event: %r\nLink: %r\nDescription: %r"
"\nDetails: %r\nRetryable: %r\nConnection: %r",
condition, self.name, description, info, self._error.action.retry,
self._session._connection.container_id)
|
python
|
{
"resource": ""
}
|
q20364
|
MessageSender.get_state
|
train
|
def get_state(self):
"""Get the state of the MessageSender and its underlying Link.
:rtype: ~uamqp.constants.MessageSenderState
"""
try:
raise self._error
except TypeError:
pass
except Exception as e:
_logger.warning("%r", e)
raise
return self._state
|
python
|
{
"resource": ""
}
|
q20365
|
AMQPClientAsync.open_async
|
train
|
async def open_async(self, connection=None):
"""Asynchronously open the client. The client can create a new Connection
or an existing Connection can be passed in. This existing Connection
may have an existing CBS authentication Session, which will be
used for this client as well. Otherwise a new Session will be
created.
:param connection: An existing Connection that may be shared between
multiple clients.
:type connetion: ~uamqp.async_ops.connection_async.ConnectionAsync
"""
# pylint: disable=protected-access
if self._session:
return # already open
if connection:
_logger.info("Using existing connection.")
self._auth = connection.auth
self._ext_connection = True
self._connection = connection or self.connection_type(
self._hostname,
self._auth,
container_id=self._name,
max_frame_size=self._max_frame_size,
channel_max=self._channel_max,
idle_timeout=self._idle_timeout,
properties=self._properties,
remote_idle_timeout_empty_frame_send_ratio=self._remote_idle_timeout_empty_frame_send_ratio,
error_policy=self._error_policy,
debug=self._debug_trace,
loop=self.loop)
if not self._connection.cbs and isinstance(self._auth, authentication.CBSAsyncAuthMixin):
self._connection.cbs = await asyncio.shield(self._auth.create_authenticator_async(
self._connection,
debug=self._debug_trace,
incoming_window=self._incoming_window,
outgoing_window=self._outgoing_window,
handle_max=self._handle_max,
on_attach=self._on_attach,
loop=self.loop))
self._session = self._auth._session
elif self._connection.cbs:
self._session = self._auth._session
else:
self._session = self.session_type(
self._connection,
incoming_window=self._incoming_window,
outgoing_window=self._outgoing_window,
handle_max=self._handle_max,
on_attach=self._on_attach,
loop=self.loop)
if self._keep_alive_interval:
self._keep_alive_thread = asyncio.ensure_future(self._keep_alive_async(), loop=self.loop)
|
python
|
{
"resource": ""
}
|
q20366
|
AMQPClientAsync.close_async
|
train
|
async def close_async(self):
"""Close the client asynchronously. This includes closing the Session
and CBS authentication layer as well as the Connection.
If the client was opened using an external Connection,
this will be left intact.
"""
if self.message_handler:
await self.message_handler.destroy_async()
self.message_handler = None
self._shutdown = True
if self._keep_alive_thread:
await self._keep_alive_thread
self._keep_alive_thread = None
if not self._session:
return # already closed.
if not self._connection.cbs:
_logger.info("Closing non-CBS session.")
await asyncio.shield(self._session.destroy_async())
else:
_logger.info("CBS session pending %r.", self._connection.container_id)
self._session = None
if not self._ext_connection:
_logger.info("Closing exclusive connection %r.", self._connection.container_id)
await asyncio.shield(self._connection.destroy_async())
else:
_logger.info("Shared connection remaining open.")
self._connection = None
|
python
|
{
"resource": ""
}
|
q20367
|
AMQPClientAsync.do_work_async
|
train
|
async def do_work_async(self):
"""Run a single connection iteration asynchronously.
This will return `True` if the connection is still open
and ready to be used for further work, or `False` if it needs
to be shut down.
:rtype: bool
:raises: TimeoutError or ~uamqp.errors.ClientTimeout if CBS authentication timeout reached.
"""
if self._shutdown:
return False
if not await self.client_ready_async():
return True
return await self._client_run_async()
|
python
|
{
"resource": ""
}
|
q20368
|
SendClientAsync.send_message_async
|
train
|
async def send_message_async(self, messages, close_on_done=False):
"""Send a single message or batched message asynchronously.
:param messages: A message to send. This can either be a single instance
of ~uamqp.message.Message, or multiple messages wrapped in an instance
of ~uamqp.message.BatchMessage.
:type message: ~uamqp.message.Message
:param close_on_done: Close the client once the message is sent. Default is `False`.
:type close_on_done: bool
:raises: ~uamqp.errors.MessageException if message fails to send after retry policy
is exhausted.
"""
batch = messages.gather()
pending_batch = []
for message in batch:
message.idle_time = self._counter.get_current_ms()
async with self._pending_messages_lock:
self._pending_messages.append(message)
pending_batch.append(message)
await self.open_async()
try:
while any([m for m in pending_batch if m.state not in constants.DONE_STATES]):
await self.do_work_async()
failed = [m for m in pending_batch if m.state == constants.MessageState.SendFailed]
if any(failed):
details = {"total_messages": len(pending_batch), "number_failed": len(failed)}
details['failed_messages'] = {}
exception = None
for failed_message in failed:
exception = failed_message._response # pylint: disable=protected-access
details['failed_messages'][failed_message] = exception
raise errors.ClientMessageError(exception, info=details)
finally:
if close_on_done:
await self.close_async()
|
python
|
{
"resource": ""
}
|
q20369
|
SendClientAsync.send_all_messages_async
|
train
|
async def send_all_messages_async(self, close_on_done=True):
"""Send all pending messages in the queue asynchronously.
This will return a list of the send result of all the pending
messages so it can be determined if any messages failed to send.
This function will open the client if it is not already open.
:param close_on_done: Close the client once the messages are sent.
Default is `True`.
:type close_on_done: bool
:rtype: list[~uamqp.constants.MessageState]
"""
await self.open_async()
try:
async with self._pending_messages_lock:
messages = self._pending_messages[:]
await self.wait_async()
results = [m.state for m in messages]
return results
finally:
if close_on_done:
await self.close_async()
|
python
|
{
"resource": ""
}
|
q20370
|
ReceiveClientAsync._client_ready_async
|
train
|
async def _client_ready_async(self):
"""Determine whether the client is ready to start receiving messages.
To be ready, the connection must be open and authentication complete,
The Session, Link and MessageReceiver must be open and in non-errored
states.
:rtype: bool
:raises: ~uamqp.errors.MessageHandlerError if the MessageReceiver
goes into an error state.
"""
# pylint: disable=protected-access
if not self.message_handler:
self.message_handler = self.receiver_type(
self._session, self._remote_address, self._name,
on_message_received=self._message_received,
name='receiver-link-{}'.format(uuid.uuid4()),
debug=self._debug_trace,
receive_settle_mode=self._receive_settle_mode,
prefetch=self._prefetch,
max_message_size=self._max_message_size,
properties=self._link_properties,
error_policy=self._error_policy,
encoding=self._encoding,
loop=self.loop)
await asyncio.shield(self.message_handler.open_async())
return False
if self.message_handler.get_state() == constants.MessageReceiverState.Error:
raise errors.MessageHandlerError(
"Message Receiver Client is in an error state. "
"Please confirm credentials and access permissions."
"\nSee debug trace for more details.")
if self.message_handler.get_state() != constants.MessageReceiverState.Open:
self._last_activity_timestamp = self._counter.get_current_ms()
return False
return True
|
python
|
{
"resource": ""
}
|
q20371
|
ReceiveClientAsync.receive_message_batch_async
|
train
|
async def receive_message_batch_async(self, max_batch_size=None, on_message_received=None, timeout=0):
"""Receive a batch of messages asynchronously. This method will return as soon as some
messages are available rather than waiting to achieve a specific batch size, and
therefore the number of messages returned per call will vary up to the maximum allowed.
If the receive client is configured with `auto_complete=True` then the messages received
in the batch returned by this function will already be settled. Alternatively, if
`auto_complete=False`, then each message will need to be explicitly settled before
it expires and is released.
:param max_batch_size: The maximum number of messages that can be returned in
one call. This value cannot be larger than the prefetch value, and if not specified,
the prefetch value will be used.
:type max_batch_size: int
:param on_message_received: A callback to process messages as they arrive from the
service. It takes a single argument, a ~uamqp.message.Message object.
:type on_message_received: callable[~uamqp.message.Message]
:param timeout: I timeout in milliseconds for which to wait to receive any messages.
If no messages are received in this time, an empty list will be returned. If set to
0, the client will continue to wait until at least one message is received. The
default is 0.
:type timeout: int
"""
self._message_received_callback = on_message_received
max_batch_size = max_batch_size or self._prefetch
if max_batch_size > self._prefetch:
raise ValueError(
'Maximum batch size {} cannot be greater than the '
'connection link credit: {}'.format(max_batch_size, self._prefetch))
timeout = self._counter.get_current_ms() + int(timeout) if timeout else 0
expired = False
self._received_messages = self._received_messages or queue.Queue()
await self.open_async()
receiving = True
batch = []
while not self._received_messages.empty() and len(batch) < max_batch_size:
batch.append(self._received_messages.get())
self._received_messages.task_done()
if len(batch) >= max_batch_size:
return batch
while receiving and not expired and len(batch) < max_batch_size:
while receiving and self._received_messages.qsize() < max_batch_size:
if timeout and self._counter.get_current_ms() > timeout:
expired = True
break
before = self._received_messages.qsize()
receiving = await self.do_work_async()
received = self._received_messages.qsize() - before
if self._received_messages.qsize() > 0 and received == 0:
# No new messages arrived, but we have some - so return what we have.
expired = True
break
while not self._received_messages.empty() and len(batch) < max_batch_size:
batch.append(self._received_messages.get())
self._received_messages.task_done()
return batch
|
python
|
{
"resource": ""
}
|
q20372
|
ReceiveClientAsync.receive_messages_iter_async
|
train
|
def receive_messages_iter_async(self, on_message_received=None):
"""Receive messages by asynchronous generator. Messages returned in the
generator have already been accepted - if you wish to add logic to accept
or reject messages based on custom criteria, pass in a callback.
If the receive client is configured with `auto_complete=True` then the messages received
from the iterator returned by this function will be automatically settled when the iterator
is incremented. Alternatively, if `auto_complete=False`, then each message will need to
be explicitly settled before it expires and is released.
:param on_message_received: A callback to process messages as they arrive from the
service. It takes a single argument, a ~uamqp.message.Message object.
:type on_message_received: callable[~uamqp.message.Message]
:rtype: Generator[~uamqp.message.Message]
"""
self._message_received_callback = on_message_received
self._received_messages = queue.Queue()
return AsyncMessageIter(self, auto_complete=self.auto_complete)
|
python
|
{
"resource": ""
}
|
q20373
|
create_sas_token
|
train
|
def create_sas_token(key_name, shared_access_key, scope, expiry=timedelta(hours=1)):
"""Create a SAS token.
:param key_name: The username/key name/policy name for the token.
:type key_name: bytes
:param shared_access_key: The shared access key to generate the token from.
:type shared_access_key: bytes
:param scope: The token permissions scope.
:type scope: bytes
:param expiry: The lifetime of the generated token. Default is 1 hour.
:type expiry: ~datetime.timedelta
:rtype: bytes
"""
shared_access_key = base64.b64encode(shared_access_key)
abs_expiry = int(time.time()) + expiry.seconds
return c_uamqp.create_sas_token(shared_access_key, scope, key_name, abs_expiry)
|
python
|
{
"resource": ""
}
|
q20374
|
_convert_py_number
|
train
|
def _convert_py_number(value):
"""Convert a Python integer value into equivalent C object.
Will attempt to use the smallest possible conversion, starting with int, then long
then double.
"""
try:
return c_uamqp.int_value(value)
except OverflowError:
pass
try:
return c_uamqp.long_value(value)
except OverflowError:
pass
return c_uamqp.double_value(value)
|
python
|
{
"resource": ""
}
|
q20375
|
ConnectionAsync.work_async
|
train
|
async def work_async(self):
"""Perform a single Connection iteration asynchronously."""
try:
raise self._error
except TypeError:
pass
except Exception as e:
_logger.warning("%r", e)
raise
try:
await self.lock_async()
if self._closing:
_logger.debug("Connection unlocked but shutting down.")
return
await self.loop.run_in_executor(self._executor, functools.partial(self._conn.do_work))
except asyncio.TimeoutError:
_logger.debug("Connection %r timed out while waiting for lock acquisition.", self.container_id)
finally:
self.release_async()
|
python
|
{
"resource": ""
}
|
q20376
|
ConnectionAsync.destroy_async
|
train
|
async def destroy_async(self):
"""Close the connection asynchronously, and close any associated
CBS authentication session.
"""
try:
await self.lock_async()
_logger.debug("Unlocked connection %r to close.", self.container_id)
await self._close_async()
except asyncio.TimeoutError:
_logger.debug(
"Connection %r timed out while waiting for lock acquisition on destroy. Destroying anyway.",
self.container_id)
await self._close_async()
finally:
self.release_async()
uamqp._Platform.deinitialize()
|
python
|
{
"resource": ""
}
|
q20377
|
AMQPAuth.set_tlsio
|
train
|
def set_tlsio(self, hostname, port, http_proxy):
"""Setup the default underlying TLS IO layer. On Windows this is
Schannel, on Linux and MacOS this is OpenSSL.
:param hostname: The endpoint hostname.
:type hostname: bytes
:param port: The TLS port.
:type port: int
"""
_default_tlsio = c_uamqp.get_default_tlsio()
_tlsio_config = c_uamqp.TLSIOConfig()
_tlsio_config.hostname = hostname
_tlsio_config.port = int(port)
if http_proxy:
proxy_config = self._build_proxy_config(hostname, port, http_proxy)
_tlsio_config.set_proxy_config(proxy_config)
self._underlying_xio = c_uamqp.xio_from_tlsioconfig(_default_tlsio, _tlsio_config)
cert = self.cert_file or certifi.where()
with open(cert, 'rb') as cert_handle:
cert_data = cert_handle.read()
try:
self._underlying_xio.set_certificates(cert_data)
except ValueError:
_logger.warning('Unable to set external certificates.')
self.sasl_client = _SASLClient(self._underlying_xio, self.sasl)
self.consumed = False
|
python
|
{
"resource": ""
}
|
q20378
|
AMQPAuth.close
|
train
|
def close(self):
"""Close the authentication layer and cleanup
all the authentication wrapper objects.
"""
self.sasl.mechanism.destroy()
self.sasl_client.get_client().destroy()
self._underlying_xio.destroy()
|
python
|
{
"resource": ""
}
|
q20379
|
Message.decode_from_bytes
|
train
|
def decode_from_bytes(cls, data):
"""Decode an AMQP message from a bytearray.
The returned message will not have a delivery context and
therefore will be considered to be in an "already settled" state.
:param data: The AMQP wire-encoded bytes to decode.
:type data: bytes or bytearray
"""
decoded_message = c_uamqp.decode_message(len(data), data)
return cls(message=decoded_message)
|
python
|
{
"resource": ""
}
|
q20380
|
Message._parse_message
|
train
|
def _parse_message(self, message):
"""Parse a message received from an AMQP service.
:param message: The received C message.
:type message: uamqp.c_uamqp.cMessage
"""
_logger.debug("Parsing received message %r.", self.delivery_no)
self._message = message
body_type = message.body_type
if body_type == c_uamqp.MessageBodyType.NoneType:
self._body = None
elif body_type == c_uamqp.MessageBodyType.DataType:
self._body = DataBody(self._message)
elif body_type == c_uamqp.MessageBodyType.SequenceType:
raise TypeError("Message body type Sequence not supported.")
else:
self._body = ValueBody(self._message)
_props = self._message.properties
if _props:
_logger.debug("Parsing received message properties %r.", self.delivery_no)
self.properties = MessageProperties(properties=_props, encoding=self._encoding)
_header = self._message.header
if _header:
_logger.debug("Parsing received message header %r.", self.delivery_no)
self.header = MessageHeader(header=_header)
_footer = self._message.footer
if _footer:
_logger.debug("Parsing received message footer %r.", self.delivery_no)
self.footer = _footer.map
_app_props = self._message.application_properties
if _app_props:
_logger.debug("Parsing received message application properties %r.", self.delivery_no)
self.application_properties = _app_props.map
_ann = self._message.message_annotations
if _ann:
_logger.debug("Parsing received message annotations %r.", self.delivery_no)
self.annotations = _ann.map
_delivery_ann = self._message.delivery_annotations
if _delivery_ann:
_logger.debug("Parsing received message delivery annotations %r.", self.delivery_no)
self.delivery_annotations = _delivery_ann.map
|
python
|
{
"resource": ""
}
|
q20381
|
Message.get_message_encoded_size
|
train
|
def get_message_encoded_size(self):
"""Pre-emptively get the size of the message once it has been encoded
to go over the wire so we can raise an error if the message will be
rejected for being to large.
This method is not available for messages that have been received.
:rtype: int
"""
if not self._message:
raise ValueError("No message data to encode.")
cloned_data = self._message.clone()
self._populate_message_attributes(cloned_data)
encoded_data = []
return c_uamqp.get_encoded_message_size(cloned_data, encoded_data)
|
python
|
{
"resource": ""
}
|
q20382
|
Message.encode_message
|
train
|
def encode_message(self):
"""Encode message to AMQP wire-encoded bytearray.
:rtype: bytearray
"""
if not self._message:
raise ValueError("No message data to encode.")
cloned_data = self._message.clone()
self._populate_message_attributes(cloned_data)
encoded_data = []
c_uamqp.get_encoded_message_size(cloned_data, encoded_data)
return b"".join(encoded_data)
|
python
|
{
"resource": ""
}
|
q20383
|
Message.gather
|
train
|
def gather(self):
"""Return all the messages represented by this object.
This will always be a list of a single message.
:rtype: list[~uamqp.message.Message]
"""
if self.state in constants.RECEIVE_STATES:
raise TypeError("Only new messages can be gathered.")
if not self._message:
raise ValueError("Message data already consumed.")
try:
raise self._response
except TypeError:
pass
return [self]
|
python
|
{
"resource": ""
}
|
q20384
|
Message.get_message
|
train
|
def get_message(self):
"""Get the underlying C message from this object.
:rtype: uamqp.c_uamqp.cMessage
"""
if not self._message:
return None
self._populate_message_attributes(self._message)
return self._message
|
python
|
{
"resource": ""
}
|
q20385
|
Message.accept
|
train
|
def accept(self):
"""Send a response disposition to the service to indicate that
a received message has been accepted. If the client is running in PeekLock
mode, the service will wait on this disposition. Otherwise it will
be ignored. Returns `True` is message was accepted, or `False` if the message
was already settled.
:rtype: bool
:raises: TypeError if the message is being sent rather than received.
"""
if self._can_settle_message():
self._response = errors.MessageAccepted()
self._settler(self._response)
self.state = constants.MessageState.ReceivedSettled
return True
return False
|
python
|
{
"resource": ""
}
|
q20386
|
Message.reject
|
train
|
def reject(self, condition=None, description=None):
"""Send a response disposition to the service to indicate that
a received message has been rejected. If the client is running in PeekLock
mode, the service will wait on this disposition. Otherwise it will
be ignored. A rejected message will increment the messages delivery count.
Returns `True` is message was rejected, or `False` if the message
was already settled.
:param condition: The AMQP rejection code. By default this is `amqp:internal-error`.
:type condition: bytes or str
:param description: A description/reason to accompany the rejection.
:type description: bytes or str
:rtype: bool
:raises: TypeError if the message is being sent rather than received.
"""
if self._can_settle_message():
self._response = errors.MessageRejected(
condition=condition,
description=description,
encoding=self._encoding)
self._settler(self._response)
self.state = constants.MessageState.ReceivedSettled
return True
return False
|
python
|
{
"resource": ""
}
|
q20387
|
Message.release
|
train
|
def release(self):
"""Send a response disposition to the service to indicate that
a received message has been released. If the client is running in PeekLock
mode, the service will wait on this disposition. Otherwise it will
be ignored. A released message will not incremenet the messages
delivery count. Returns `True` is message was released, or `False` if the message
was already settled.
:rtype: bool
:raises: TypeError if the message is being sent rather than received.
"""
if self._can_settle_message():
self._response = errors.MessageReleased()
self._settler(self._response)
self.state = constants.MessageState.ReceivedSettled
return True
return False
|
python
|
{
"resource": ""
}
|
q20388
|
Message.modify
|
train
|
def modify(self, failed, deliverable, annotations=None):
"""Send a response disposition to the service to indicate that
a received message has been modified. If the client is running in PeekLock
mode, the service will wait on this disposition. Otherwise it will
be ignored. Returns `True` is message was modified, or `False` if the message
was already settled.
:param failed: Whether this delivery of this message failed. This does not
indicate whether subsequence deliveries of this message would also fail.
:type failed: bool
:param deliverable: Whether this message will be deliverable to this client
on subsequent deliveries - i.e. whether delivery is retryable.
:type deliverable: bool
:param annotations: Annotations to attach to response.
:type annotations: dict
:rtype: bool
:raises: TypeError if the message is being sent rather than received.
"""
if self._can_settle_message():
self._response = errors.MessageModified(
failed,
deliverable,
annotations=annotations,
encoding=self._encoding)
self._settler(self._response)
self.state = constants.MessageState.ReceivedSettled
return True
return False
|
python
|
{
"resource": ""
}
|
q20389
|
BatchMessage._create_batch_message
|
train
|
def _create_batch_message(self):
"""Create a ~uamqp.message.Message for a value supplied by the data
generator. Applies all properties and annotations to the message.
:rtype: ~uamqp.message.Message
"""
return Message(body=[],
properties=self.properties,
annotations=self.annotations,
msg_format=self.batch_format,
header=self.header,
encoding=self._encoding)
|
python
|
{
"resource": ""
}
|
q20390
|
BatchMessage._multi_message_generator
|
train
|
def _multi_message_generator(self):
"""Generate multiple ~uamqp.message.Message objects from a single data
stream that in total may exceed the maximum individual message size.
Data will be continuously added to a single message until that message
reaches a max allowable size, at which point it will be yielded and
a new message will be started.
:rtype: generator[~uamqp.message.Message]
"""
unappended_message_bytes = None
while True:
new_message = self._create_batch_message()
message_size = new_message.get_message_encoded_size() + self.size_offset
body_size = 0
if unappended_message_bytes:
new_message._body.append(unappended_message_bytes) # pylint: disable=protected-access
body_size += len(unappended_message_bytes)
try:
for data in self._body_gen:
message_bytes = None
try:
if not data.application_properties: # Message-like object
data.application_properties = self.application_properties
message_bytes = data.encode_message()
except AttributeError: # raw data
wrap_message = Message(body=data, application_properties=self.application_properties)
message_bytes = wrap_message.encode_message()
body_size += len(message_bytes)
if (body_size + message_size) > self.max_message_length:
new_message.on_send_complete = self.on_send_complete
unappended_message_bytes = message_bytes
yield new_message
raise StopIteration()
new_message._body.append(message_bytes) # pylint: disable=protected-access
except StopIteration:
_logger.debug("Sent partial message.")
continue
else:
new_message.on_send_complete = self.on_send_complete
yield new_message
_logger.debug("Sent all batched data.")
break
|
python
|
{
"resource": ""
}
|
q20391
|
BatchMessage.gather
|
train
|
def gather(self):
"""Return all the messages represented by this object. This will convert
the batch data into individual Message objects, which may be one
or more if multi_messages is set to `True`.
:rtype: list[~uamqp.message.Message]
"""
if self._multi_messages:
return self._multi_message_generator()
new_message = self._create_batch_message()
message_size = new_message.get_message_encoded_size() + self.size_offset
body_size = 0
for data in self._body_gen:
message_bytes = None
try:
if not data.application_properties: # Message-like object
data.application_properties = self.application_properties
message_bytes = data.encode_message()
except AttributeError: # raw data
wrap_message = Message(body=data, application_properties=self.application_properties)
message_bytes = wrap_message.encode_message()
body_size += len(message_bytes)
if (body_size + message_size) > self.max_message_length:
raise ValueError(
"Data set too large for a single message."
"Set multi_messages to True to split data across multiple messages.")
new_message._body.append(message_bytes) # pylint: disable=protected-access
new_message.on_send_complete = self.on_send_complete
return [new_message]
|
python
|
{
"resource": ""
}
|
q20392
|
DataBody.append
|
train
|
def append(self, data):
"""Append a section to the body.
:param data: The data to append.
:type data: str or bytes
"""
if isinstance(data, six.text_type):
self._message.add_body_data(data.encode(self._encoding))
elif isinstance(data, six.binary_type):
self._message.add_body_data(data)
|
python
|
{
"resource": ""
}
|
q20393
|
ValueBody.set
|
train
|
def set(self, value):
"""Set a value as the message body. This can be any
Python data type and it will be automatically encoded
into an AMQP type. If a specific AMQP type is required, a
`types.AMQPType` can be used.
:param data: The data to send in the body.
:type data: ~uamqp.types.AMQPType
"""
value = utils.data_factory(value)
self._message.set_body_value(value)
|
python
|
{
"resource": ""
}
|
q20394
|
CBSAuthMixin.create_authenticator
|
train
|
def create_authenticator(self, connection, debug=False, **kwargs):
"""Create the AMQP session and the CBS channel with which
to negotiate the token.
:param connection: The underlying AMQP connection on which
to create the session.
:type connection: ~uamqp.connection.Connection
:param debug: Whether to emit network trace logging events for the
CBS session. Default is `False`. Logging events are set at INFO level.
:type debug: bool
:rtype: uamqp.c_uamqp.CBSTokenAuth
"""
self._connection = connection
self._session = Session(connection, **kwargs)
try:
self._cbs_auth = c_uamqp.CBSTokenAuth(
self.audience,
self.token_type,
self.token,
int(self.expires_at),
self._session._session, # pylint: disable=protected-access
self.timeout,
self._connection.container_id)
self._cbs_auth.set_trace(debug)
except ValueError:
self._session.destroy()
raise errors.AMQPConnectionError(
"Unable to open authentication session on connection {}.\n"
"Please confirm target hostname exists: {}".format(connection.container_id, connection.hostname))
return self._cbs_auth
|
python
|
{
"resource": ""
}
|
q20395
|
CBSAuthMixin.close_authenticator
|
train
|
def close_authenticator(self):
"""Close the CBS auth channel and session."""
_logger.info("Shutting down CBS session on connection: %r.", self._connection.container_id)
try:
_logger.debug("Unlocked CBS to close on connection: %r.", self._connection.container_id)
self._cbs_auth.destroy()
_logger.info("Auth closed, destroying session on connection: %r.", self._connection.container_id)
self._session.destroy()
finally:
_logger.info("Finished shutting down CBS session on connection: %r.", self._connection.container_id)
|
python
|
{
"resource": ""
}
|
q20396
|
SASTokenAuth.update_token
|
train
|
def update_token(self):
"""If a username and password are present - attempt to use them to
request a fresh SAS token.
"""
if not self.username or not self.password:
raise errors.TokenExpired("Unable to refresh token - no username or password.")
encoded_uri = compat.quote_plus(self.uri).encode(self._encoding) # pylint: disable=no-member
encoded_key = compat.quote_plus(self.username).encode(self._encoding) # pylint: disable=no-member
self.expires_at = time.time() + self.expires_in.seconds
self.token = utils.create_sas_token(
encoded_key,
self.password.encode(self._encoding),
encoded_uri,
self.expires_in)
|
python
|
{
"resource": ""
}
|
q20397
|
SASTokenAuth.from_shared_access_key
|
train
|
def from_shared_access_key(
cls,
uri,
key_name,
shared_access_key,
expiry=None,
port=constants.DEFAULT_AMQPS_PORT,
timeout=10,
retry_policy=TokenRetryPolicy(),
verify=None,
http_proxy=None,
encoding='UTF-8'):
"""Attempt to create a CBS token session using a Shared Access Key such
as is used to connect to Azure services.
:param uri: The AMQP endpoint URI. This must be provided as
a decoded string.
:type uri: str
:param key_name: The SAS token username, also referred to as the key
name or policy name.
:type key_name: str
:param shared_access_key: The SAS token password, also referred to as the key.
:type shared_access_key: str
:param expiry: The lifetime in seconds for the generated token. Default is 1 hour.
:type expiry: int
:param port: The TLS port - default for AMQP is 5671.
:type port: int
:param timeout: The timeout in seconds in which to negotiate the token.
The default value is 10 seconds.
:type timeout: int
:param retry_policy: The retry policy for the PUT token request. The default
retry policy has 3 retries.
:type retry_policy: ~uamqp.authentication.cbs_auth.TokenRetryPolicy
:param verify: The path to a user-defined certificate.
:type verify: str
:param http_proxy: HTTP proxy configuration. This should be a dictionary with
the following keys present: 'proxy_hostname' and 'proxy_port'. Additional optional
keys are 'username' and 'password'.
:type http_proxy: dict
:param encoding: The encoding to use if hostname is provided as a str.
Default is 'UTF-8'.
:type encoding: str
"""
expires_in = datetime.timedelta(seconds=expiry or constants.AUTH_EXPIRATION_SECS)
encoded_uri = compat.quote_plus(uri).encode(encoding) # pylint: disable=no-member
encoded_key = compat.quote_plus(key_name).encode(encoding) # pylint: disable=no-member
expires_at = time.time() + expires_in.seconds
token = utils.create_sas_token(
encoded_key,
shared_access_key.encode(encoding),
encoded_uri,
expires_in)
return cls(
uri, uri, token,
expires_in=expires_in,
expires_at=expires_at,
username=key_name,
password=shared_access_key,
port=port,
timeout=timeout,
retry_policy=retry_policy,
verify=verify,
http_proxy=http_proxy,
encoding=encoding)
|
python
|
{
"resource": ""
}
|
q20398
|
MessageReceiver._state_changed
|
train
|
def _state_changed(self, previous_state, new_state):
"""Callback called whenever the underlying Receiver undergoes a change
of state. This function wraps the states as Enums to prepare for
calling the public callback.
:param previous_state: The previous Receiver state.
:type previous_state: int
:param new_state: The new Receiver state.
:type new_state: int
"""
try:
try:
_previous_state = constants.MessageReceiverState(previous_state)
except ValueError:
_previous_state = previous_state
try:
_new_state = constants.MessageReceiverState(new_state)
except ValueError:
_new_state = new_state
if _previous_state == constants.MessageReceiverState.Opening \
and _new_state == constants.MessageReceiverState.Error:
_logger.info("Receiver link failed to open - expecting to receive DETACH frame.")
elif self._session._link_error: # pylint: disable=protected-access
_logger.info("Receiver link ATTACH frame invalid - expecting to receive DETACH frame.")
else:
self.on_state_changed(_previous_state, _new_state)
except KeyboardInterrupt:
_logger.error("Received shutdown signal while updating receiver state from {} to {}".format(
previous_state, new_state))
self._error = errors.AMQPClientShutdown()
|
python
|
{
"resource": ""
}
|
q20399
|
MessageReceiver._settle_message
|
train
|
def _settle_message(self, message_number, response):
"""Send a settle dispostition for a received message.
:param message_number: The delivery number of the message
to settle.
:type message_number: int
:response: The type of disposition to respond with, e.g. whether
the message was accepted, rejected or abandoned.
:type response: ~uamqp.errors.MessageResponse
"""
if not response or isinstance(response, errors.MessageAlreadySettled):
return
if isinstance(response, errors.MessageAccepted):
self._receiver.settle_accepted_message(message_number)
elif isinstance(response, errors.MessageReleased):
self._receiver.settle_released_message(message_number)
elif isinstance(response, errors.MessageRejected):
self._receiver.settle_rejected_message(
message_number,
response.error_condition,
response.error_description)
elif isinstance(response, errors.MessageModified):
self._receiver.settle_modified_message(
message_number,
response.failed,
response.undeliverable,
response.annotations)
else:
raise ValueError("Invalid message response type: {}".format(response))
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.