_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q43700
get_product_version
train
def get_product_version(path: typing.Union[str, Path]) -> VersionInfo: """ Get version info from executable Args: path: path to the executable Returns: VersionInfo """ path = Path(path).absolute() pe_info = pefile.PE(str(path)) try: for file_info in pe_info.FileInfo: # pragma: no branch if isinstance(file_info, list): result = _parse_file_info(file_info) if result: return result else: result = _parse_file_info(pe_info.FileInfo) if result: return result raise RuntimeError(f'unable to obtain version from {path}') except (KeyError, AttributeError) as exc: traceback.print_exc() raise RuntimeError(f'unable to obtain version from {path}') from exc
python
{ "resource": "" }
q43701
ThreadedTaskManager.start
train
def start(self): """this function will start the queing thread that executes the iterator and feeds jobs into the queue. It also starts the worker threads that just sit and wait for items to appear on the queue. This is a non blocking call, so the executing thread is free to do other things while the other threads work.""" self.logger.debug('start') # start each of the task threads. for x in range(self.number_of_threads): # each thread is given the config object as well as a reference to # this manager class. The manager class is where the queue lives # and the task threads will refer to it to get their next jobs. new_thread = TaskThread(self.config, self.task_queue) self.thread_list.append(new_thread) new_thread.start() self.queuing_thread = threading.Thread( name="QueuingThread", target=self._queuing_thread_func ) self.queuing_thread.start()
python
{ "resource": "" }
q43702
ThreadedTaskManager.wait_for_completion
train
def wait_for_completion(self, waiting_func=None): """This is a blocking function call that will wait for the queuing thread to complete. parameters: waiting_func - this function will be called every one second while waiting for the queuing thread to quit. This allows for logging timers, status indicators, etc.""" self.logger.debug("waiting to join queuingThread") self._responsive_join(self.queuing_thread, waiting_func)
python
{ "resource": "" }
q43703
ThreadedTaskManager.wait_for_empty_queue
train
def wait_for_empty_queue(self, wait_log_interval=0, wait_reason=''): """Sit around and wait for the queue to become empty parameters: wait_log_interval - while sleeping, it is helpful if the thread periodically announces itself so that we know that it is still alive. This number is the time in seconds between log entries. wait_reason - the is for the explaination of why the thread is sleeping. This is likely to be a message like: 'there is no work to do'.""" seconds = 0 while True: if self.task_queue.empty(): break self.quit_check() if wait_log_interval and not seconds % wait_log_interval: self.logger.info('%s: %dsec so far', wait_reason, seconds) self.quit_check() seconds += 1 time.sleep(1.0)
python
{ "resource": "" }
q43704
ThreadedTaskManager._responsive_join
train
def _responsive_join(self, thread, waiting_func=None): """similar to the responsive sleep, a join function blocks a thread until some other thread dies. If that takes a long time, we'd like to have some indicaition as to what the waiting thread is doing. This method will wait for another thread while calling the waiting_func once every second. parameters: thread - an instance of the TaskThread class representing the thread to wait for waiting_func - a function to call every second while waiting for the thread to die""" while True: try: thread.join(1.0) if not thread.isAlive(): break if waiting_func: waiting_func() except KeyboardInterrupt: self.logger.debug('quit detected by _responsive_join') self.quit = True
python
{ "resource": "" }
q43705
ThreadedTaskManager._queuing_thread_func
train
def _queuing_thread_func(self): """This is the function responsible for reading the iterator and putting contents into the queue. It loops as long as there are items in the iterator. Should something go wrong with this thread, or it detects the quit flag, it will calmly kill its workers and then quit itself.""" self.logger.debug('_queuing_thread_func start') try: for job_params in self._get_iterator(): # may never raise # StopIteration self.config.logger.debug('received %r', job_params) if job_params is None: if self.config.quit_on_empty_queue: self.wait_for_empty_queue( wait_log_interval=10, wait_reason='waiting for queue to drain' ) raise KeyboardInterrupt self.logger.info("there is nothing to do. Sleeping " "for %d seconds" % self.config.idle_delay) self._responsive_sleep(self.config.idle_delay) continue self.quit_check() #self.logger.debug("queuing job %s", job_params) self.task_queue.put((self.task_func, job_params)) except Exception: self.logger.error('queuing jobs has failed', exc_info=True) except KeyboardInterrupt: self.logger.debug('queuingThread gets quit request') finally: self.logger.debug("we're quitting queuingThread") self._kill_worker_threads() self.logger.debug("all worker threads stopped") # now that we've killed all the workers, we can set the quit flag # to True. This will cause any other threads to die and shut down # the application. Originally, the setting of this flag was at the # start of this "finally" block. However, that meant that the # workers would abort their currently running jobs. In the case of # of the natural ending of an application where an iterater ran to # exhaustion, the workers would die before completing their tasks. # Moving the setting of the flag to this location allows the # workers to finish and then the app shuts down. self.quit = True
python
{ "resource": "" }
q43706
patch
train
def patch(module, external=(), internal=()): """ Temporarily monkey-patch dependencies which can be external to, or internal to the supplied module. :param module: Module object :param external: External dependencies to patch (full paths as strings) :param internal: Internal dependencies to patch (short names as strings) :return: """ external = tuple(external) internal = tuple(internal) def decorator(fn): @wraps(fn) def wrapper(*args, **kwargs): # The master mock is used to contain all of the sub-mocks. It is a # useful container and can also be used to determine the order of # calls to all sub-mocks. master_mock = mock.MagicMock() def get_mock(name): return getattr(master_mock, __patch_name(name)) def patch_external(name): return mock.patch(name, get_mock(name)) def patch_internal(name): return mock.patch(module.__name__ + '.' + name, get_mock(name)) try: with __nested(patch_external(n) for n in external): if external: # Reload the module to ensure that patched external # dependencies are accounted for. reload_module(module) # Patch objects in the module itself. with __nested(patch_internal(n) for n in internal): return fn(master_mock, *args, **kwargs) finally: if external: # When all patches have been discarded, reload the module # to bring it back to its original state (except for all of # the references which have been reassigned). reload_module(module) return wrapper return decorator
python
{ "resource": "" }
q43707
_linear_interp
train
def _linear_interp(curve, test_x, round_result=False): """ Take a series of points and interpolate between them at ``test_x``. Args: curve (list[tuple]): A list of ``(x, y)`` points sorted in nondecreasing ``x`` value. If multiple points have the same ``x`` value, all but the last will be ignored. test_x (float): The ``x`` value to find the ``y`` value of Returns: float: The ``y`` value of the curve at ``test_x`` if ``round_result is False`` int: if ``round_result is True`` or the result is a whole number, the ``y`` value of the curve at ``test_x`` rounded to the nearest whole number. Raises: ProbabilityUndefinedError: if ``test_x`` is out of the domain of ``curve`` Example: >>> curve = [(0, 0), (2, 1)] >>> _linear_interp(curve, 0.5) 0.25 >>> _linear_interp(curve, 0.5, round_result=True) 0 """ index = 0 for index in range(len(curve) - 1): # Ignore points which share an x value with the following point if curve[index][0] == curve[index + 1][0]: continue if curve[index][0] <= test_x <= curve[index + 1][0]: slope = ((curve[index + 1][1] - curve[index][1]) / (curve[index + 1][0] - curve[index][0])) y_intercept = curve[index][1] - (slope * curve[index][0]) result = (slope * test_x) + y_intercept if round_result: return int(round(result)) else: if result.is_integer(): return int(result) else: return result else: raise ProbabilityUndefinedError
python
{ "resource": "" }
q43708
_clamp_value
train
def _clamp_value(value, minimum, maximum): """ Clamp a value to fit between a minimum and a maximum. * If ``value`` is between ``minimum`` and ``maximum``, return ``value`` * If ``value`` is below ``minimum``, return ``minimum`` * If ``value is above ``maximum``, return ``maximum`` Args: value (float or int): The number to clamp minimum (float or int): The lowest allowed return value maximum (float or int): The highest allowed return value Returns: float or int: the clamped value Raises: ValueError: if maximum < minimum Example: >>> _clamp_value(3, 5, 10) 5 >>> _clamp_value(11, 5, 10) 10 >>> _clamp_value(8, 5, 10) 8 """ if maximum < minimum: raise ValueError if value < minimum: return minimum elif value > maximum: return maximum else: return value
python
{ "resource": "" }
q43709
_normal_function
train
def _normal_function(x, mean, variance): """ Find a value in the cumulative distribution function of a normal curve. See https://en.wikipedia.org/wiki/Normal_distribution Args: x (float): Value to feed into the normal function mean (float): Mean of the normal function variance (float): Variance of the normal function Returns: float Example: >>> round(_normal_function(0, 0, 5), 4) 0.1784 """ e_power = -1 * (((x - mean) ** 2) / (2 * variance)) return (1 / math.sqrt(2 * variance * math.pi)) * (math.e ** e_power)
python
{ "resource": "" }
q43710
_is_valid_options_weights_list
train
def _is_valid_options_weights_list(value): '''Check whether ``values`` is a valid argument for ``weighted_choice``.''' return ((isinstance(value, list)) and len(value) > 1 and (all(isinstance(opt, tuple) and len(opt) == 2 and isinstance(opt[1], (int, float)) for opt in value)))
python
{ "resource": "" }
q43711
bound_weights
train
def bound_weights(weights, minimum=None, maximum=None): """ Bound a weight list so that all outcomes fit within specified bounds. The probability distribution within the ``minimum`` and ``maximum`` values remains the same. Weights in the list with outcomes outside of ``minimum`` and ``maximum`` are removed. If weights are removed from either end, attach weights at the modified edges at the same weight (y-axis) position they had interpolated in the original list. If neither ``minimum`` nor ``maximum`` are set, ``weights`` will be returned unmodified. If both are set, ``minimum`` must be less than ``maximum``. Args: weights (list): the list of weights where each weight is a ``tuple`` of form ``(float, float)`` corresponding to ``(outcome, weight)``. Must be sorted in increasing order of outcomes minimum (float): Lowest allowed outcome for the weight list maximum (float): Highest allowed outcome for the weight list Returns: list: A list of 2-tuples of form ``(float, float)``, the bounded weight list. Raises: ValueError: if ``maximum < minimum`` Example: >>> weights = [(0, 0), (2, 2), (4, 0)] >>> bound_weights(weights, 1, 3) [(1, 1), (2, 2), (3, 1)] """ # Copy weights to avoid side-effects bounded_weights = weights[:] # Remove weights outside of minimum and maximum if minimum is not None and maximum is not None: if maximum < minimum: raise ValueError bounded_weights = [bw for bw in bounded_weights if minimum <= bw[0] <= maximum] elif minimum is not None: bounded_weights = [bw for bw in bounded_weights if minimum <= bw[0]] elif maximum is not None: bounded_weights = [bw for bw in bounded_weights if bw[0] <= maximum] else: # Both minimum and maximum are None - the bound list is the same # as the original return bounded_weights # If weights were removed, attach new endpoints where they would have # appeared in the original curve if (bounded_weights[0][0] > weights[0][0] and bounded_weights[0][0] != minimum): bounded_weights.insert(0, (minimum, _linear_interp(weights, minimum))) if (bounded_weights[-1][0] < weights[-1][0] and bounded_weights[-1][0] != maximum): bounded_weights.append((maximum, _linear_interp(weights, maximum))) return bounded_weights
python
{ "resource": "" }
q43712
normal_distribution
train
def normal_distribution(mean, variance, minimum=None, maximum=None, weight_count=23): """ Return a list of weights approximating a normal distribution. Args: mean (float): The mean of the distribution variance (float): The variance of the distribution minimum (float): The minimum outcome possible to bound the output distribution to maximum (float): The maximum outcome possible to bound the output distribution to weight_count (int): The number of weights that will be used to approximate the distribution Returns: list: a list of ``(float, float)`` weight tuples approximating a normal distribution. Raises: ValueError: ``if maximum < minimum`` TypeError: if both ``minimum`` and ``maximum`` are ``None`` Example: >>> weights = normal_distribution(10, 3, ... minimum=0, maximum=20, ... weight_count=5) >>> rounded_weights = [(round(value, 2), round(strength, 2)) ... for value, strength in weights] >>> rounded_weights [(1.34, 0.0), (4.8, 0.0), (8.27, 0.14), (11.73, 0.14), (15.2, 0.0)] """ # Pin 0 to +- 5 sigma as bounds, or minimum and maximum # if they cross +/- sigma standard_deviation = math.sqrt(variance) min_x = (standard_deviation * -5) + mean max_x = (standard_deviation * 5) + mean step = (max_x - min_x) / weight_count current_x = min_x weights = [] while current_x < max_x: weights.append( (current_x, _normal_function(current_x, mean, variance)) ) current_x += step if minimum is not None or maximum is not None: return bound_weights(weights, minimum, maximum) else: return weights
python
{ "resource": "" }
q43713
weighted_rand
train
def weighted_rand(weights, round_result=False): """ Generate a non-uniform random value based on a list of weight tuples. Treats weights as coordinates for a probability distribution curve and rolls accordingly. Constructs a piece-wise linear curve according to coordinates given in ``weights`` and rolls random values in the curve's bounding box until a value is found under the curve Weight tuples should be of the form: (outcome, strength). Args: weights: (list): the list of weights where each weight is a tuple of form ``(float, float)`` corresponding to ``(outcome, strength)``. Weights with strength ``0`` or less will have no chance to be rolled. The list must be sorted in increasing order of outcomes. round_result (bool): Whether or not to round the resulting value to the nearest integer. Returns: float: A weighted random number int: A weighted random number rounded to the nearest ``int`` Example: >>> weighted_rand([(-3, 4), (0, 10), (5, 1)]) # doctest: +SKIP -0.650612268193731 >>> weighted_rand([(-3, 4), (0, 10), (5, 1)]) # doctest: +SKIP -2 """ # If just one weight is passed, simply return the weight's name if len(weights) == 1: return weights[0][0] # Is there a way to do this more efficiently? Maybe even require that # ``weights`` already be sorted? weights = sorted(weights, key=lambda w: w[0]) x_min = weights[0][0] x_max = weights[-1][0] y_min = 0 y_max = max([point[1] for point in weights]) # Roll random numbers until a valid one is found attempt_count = 0 while attempt_count < 500000: # Get sample point sample = (random.uniform(x_min, x_max), random.uniform(y_min, y_max)) if _point_under_curve(weights, sample): # The sample point is under the curve if round_result: return int(round(sample[0])) else: return sample[0] attempt_count += 1 else: warnings.warn( 'Point not being found in weighted_rand() after 500000 ' 'attempts, defaulting to a random weight point. ' 'If this happens often, it is probably a bug.') return random.choice(weights)[0]
python
{ "resource": "" }
q43714
weighted_choice
train
def weighted_choice(weights, as_index_and_value_tuple=False): """ Generate a non-uniform random choice based on a list of option tuples. Treats each outcome as a discreet unit with a chance to occur. Args: weights (list): a list of options where each option is a tuple of form ``(Any, float)`` corresponding to ``(outcome, strength)``. Outcome values may be of any type. Options with strength ``0`` or less will have no chance to be chosen. as_index_and_value_tuple (bool): Option to return an ``(index, value)`` tuple instead of just a single ``value``. This is useful when multiple outcomes in ``weights`` are the same and you need to know exactly which one was picked. Returns: Any: If ``as_index_and_value_tuple is False``, any one of the items in the outcomes of ``weights`` tuple (int, Any): If ``as_index_and_value_tuple is True``, a 2-tuple of form ``(int, Any)`` corresponding to ``(index, value)``. the index as well as value of the item that was picked. Example: >>> choices = [('choice one', 10), ('choice two', 3)] >>> weighted_choice(choices) # doctest: +SKIP # Often will be... 'choice one' >>> weighted_choice(choices, ... as_index_and_value_tuple=True) # doctest: +SKIP # Often will be... (0, 'choice one') """ if not len(weights): raise ValueError('List passed to weighted_choice() cannot be empty.') # Construct a line segment where each weight outcome is # allotted a length equal to the outcome's weight, # pick a uniformally random point along the line, and take # the outcome that point corresponds to prob_sum = sum(w[1] for w in weights) if prob_sum <= 0: raise ProbabilityUndefinedError( 'No item weights in weighted_choice() are greater than 0. ' 'Probability distribution is undefined.') sample = random.uniform(0, prob_sum) current_pos = 0 i = 0 while i < len(weights): if current_pos <= sample <= (current_pos + weights[i][1]): if as_index_and_value_tuple: return (i, weights[i][0]) else: return weights[i][0] current_pos += weights[i][1] i += 1 else: raise AssertionError('Something went wrong in weighted_choice(). ' 'Please submit a bug report!')
python
{ "resource": "" }
q43715
weighted_order
train
def weighted_order(weights): """ Non-uniformally order a list according to weighted priorities. ``weights`` is a list of tuples of form ``(Any, float or int)`` corresponding to ``(item, strength)``. The output list is constructed by repeatedly calling ``weighted_choice()`` on the weights, adding items to the end of the list as they are picked. Higher strength weights will have a higher chance of appearing near the beginning of the output list. A list weights with uniform strengths is equivalent to calling ``random.shuffle()`` on the list of items. If any weight strengths are ``<= 0``, a ``ProbabilityUndefinedError`` is be raised. Passing an empty list will return an empty list. Args: weights (list): a list of tuples of form ``(Any, float or int)`` corresponding to ``(item, strength)``. The output list is constructed by repeatedly calling ``weighted_choice()`` on the weights, appending items to the output list as they are picked. Returns: list: the newly ordered list Raises: ProbabilityUndefinedError: if any weight's strength is below 0. Example: >>> weights = [('Probably Earlier', 100), ... ('Probably Middle', 20), ... ('Probably Last', 1)] >>> weighted_order(weights) # doctest: +SKIP ['Probably Earlier', 'Probably Middle', 'Probably Last'] """ if not len(weights): return [] if any(w[1] <= 0 for w in weights): raise ProbabilityUndefinedError( 'All weight values must be greater than 0.') working_list = weights[:] output_list = [] while working_list: picked_item = weighted_choice(working_list, as_index_and_value_tuple=True) output_list.append(picked_item[1]) del working_list[picked_item[0]] return output_list
python
{ "resource": "" }
q43716
SocialLM.tokenize
train
def tokenize(cls, text, mode='c'): """ Converts text into tokens :param text: string to be tokenized :param mode: split into chars (c) or words (w) """ if mode == 'c': return [ch for ch in text] else: return [w for w in text.split()]
python
{ "resource": "" }
q43717
SocialLM.karbasa
train
def karbasa(self, result): """ Finding if class probabilities are close to eachother Ratio of the distance between 1st and 2nd class, to the distance between 1st and last class. :param result: The dict returned by LM.calculate() """ probs = result['all_probs'] probs.sort() return float(probs[1] - probs[0]) / float(probs[-1] - probs[0])
python
{ "resource": "" }
q43718
SocialLM.is_mention_line
train
def is_mention_line(cls, word): """ Detects links and mentions :param word: Token to be evaluated """ if word.startswith('@'): return True elif word.startswith('http://'): return True elif word.startswith('https://'): return True else: return False
python
{ "resource": "" }
q43719
SocialLM.strip_mentions_links
train
def strip_mentions_links(self, text): """ Strips Mentions and Links :param text: Text to be stripped from. """ #print 'Before:', text new_text = [word for word in text.split() if not self.is_mention_line(word)] #print 'After:', u' '.join(new_text) return u' '.join(new_text)
python
{ "resource": "" }
q43720
SocialLM.normalize
train
def normalize(self, text): """ Normalizes text. Converts to lowercase, Unicode NFC normalization and removes mentions and links :param text: Text to be normalized. """ #print 'Normalize...\n' text = text.lower() text = unicodedata.normalize('NFC', text) text = self.strip_mentions_links(text) return text
python
{ "resource": "" }
q43721
VirtualTarget.output_files
train
def output_files(self): """Returns all output files from all of the current module's rules.""" for dep in self.subgraph.successors(self.address): dep_rule = self.subgraph.node[dep]['target_obj'] for out_file in dep_rule.output_files: yield out_file
python
{ "resource": "" }
q43722
HttpClientProtocol.write_request
train
async def write_request( self, method: constants.HttpRequestMethod, *, uri: str="/", authority: Optional[str]=None, scheme: Optional[str]=None, headers: Optional[_HeaderType]=None) -> \ "writers.HttpRequestWriter": """ Send next request to the server. """ return await self._delegate.write_request( method, uri=uri, authority=authority, scheme=scheme, headers=headers)
python
{ "resource": "" }
q43723
Interval.is_disjoint
train
def is_disjoint(self,other): """ Check whether two Intervals are disjoint. :param Interval other: The Interval to check disjointedness with. """ if self.is_empty() or other.is_empty(): return True if self.bounds[0] < other.bounds[0]: i1,i2 = self,other elif self.bounds[0] > other.bounds[0]: i2,i1 = self,other else: #coincident lower bounds if self.is_discrete() and not other.included[0]: return True elif other.is_discrete() and not self.included[0]: return True else: return False return not i2.bounds[0] in i1
python
{ "resource": "" }
q43724
Interval.intersection
train
def intersection(self,other): """ Return a new Interval with the intersection of the two intervals, i.e. all elements that are in both self and other. :param Interval other: Interval to intersect with :rtype: Interval """ if self.bounds[0] < other.bounds[0]: i1,i2 = self,other else: i2,i1 = self,other if self.is_disjoint(other): return Interval((1,0),(True,True)) bounds = [None,None] included = [None,None] #sets are not disjoint, so i2.bounds[0] in i1: bounds[0] = i2.bounds[0] included[0] = i2.included[0] if i2.bounds[1] in i1: bounds[1] = i2.bounds[1] included[1] = i2.included[1] else: bounds[1] = i1.bounds[1] included[1] = i1.included[1] return Interval(bounds,included)
python
{ "resource": "" }
q43725
Interval.is_empty
train
def is_empty(self): """ Check whether this interval is empty. :rtype: bool """ if self.bounds[1] < self.bounds[0]: return True if self.bounds[1] == self.bounds[0]: return not (self.included[0] and self.included[1])
python
{ "resource": "" }
q43726
Interval.is_discrete
train
def is_discrete(self): """ Check whether this interval contains exactly one number :rtype: bool """ return self.bounds[1] == self.bounds[0] and\ self.included == (True,True)
python
{ "resource": "" }
q43727
IntervalSet.intersection
train
def intersection(self,other): """ Return a new IntervalSet with the intersection of the two sets, i.e. all elements that are both in self and other. :param IntervalSet other: Set to intersect with :rtype: IntervalSet """ res = [] for i1 in self.ints: for i2 in other.ints: res.append(i1.intersection(i2)) return IntervalSet(res)
python
{ "resource": "" }
q43728
IntervalSet.difference
train
def difference(self,other): """ Return a new IntervalSet with the difference of the two sets, i.e. all elements that are in self but not in other. :param IntervalSet other: Set to subtract :rtype: IntervalSet """ res = IntervalSet.everything() for j in other.ints: tmp = [] for i in self.ints: tmp.extend(i._difference(j)) res = res.intersection(IntervalSet(tmp)) return res
python
{ "resource": "" }
q43729
DiscreteSet.intersection
train
def intersection(self,other): """ Return a new DiscreteSet with the intersection of the two sets, i.e. all elements that are in both self and other. :param DiscreteSet other: Set to intersect with :rtype: DiscreteSet """ if self.everything: if other.everything: return DiscreteSet() else: return DiscreteSet(other.elements) else: if other.everything: return DiscreteSet(self.elements) else: return DiscreteSet(self.elements.intersection(other.elements))
python
{ "resource": "" }
q43730
DiscreteSet.difference
train
def difference(self,other): """ Return a new DiscreteSet with the difference of the two sets, i.e. all elements that are in self but not in other. :param DiscreteSet other: Set to subtract :rtype: DiscreteSet :raises ValueError: if self is a set of everything """ if self.everything: raise ValueError("Can not remove from everything") elif other.everything: return DiscreteSet([]) else: return DiscreteSet(self.elements.difference(other.elements))
python
{ "resource": "" }
q43731
DiscreteSet.union
train
def union(self,other): """ Return a new DiscreteSet with the union of the two sets, i.e. all elements that are in self or in other. :param DiscreteSet other: Set to unite with :rtype: DiscreteSet """ if self.everything: return self elif other.everything: return other else: return DiscreteSet(self.elements.union(other.elements))
python
{ "resource": "" }
q43732
Patch.intersection
train
def intersection(self,other): "intersection with another patch" res = {} if set(self.sets.keys()) != set(other.sets.keys()): raise KeyError('Incompatible patches in intersection') for name,s1 in self.sets.items(): s2 = other.sets[name] res[name] = s1.intersection(s2) return Patch(res)
python
{ "resource": "" }
q43733
Patch.iter_points
train
def iter_points(self): "returns a list of tuples of names and values" if not self.is_discrete(): raise ValueError("Patch is not discrete") names = sorted(self.sets.keys()) icoords = [self.sets[name].iter_members() for name in names] for coordinates in product(*icoords): yield tuple(zip(names,coordinates))
python
{ "resource": "" }
q43734
FormLabelModelAdminMixin.update_form_labels
train
def update_form_labels(self, request=None, obj=None, form=None): """Returns a form obj after modifying form labels referred to in custom_form_labels. """ for form_label in self.custom_form_labels: if form_label.field in form.base_fields: label = form_label.get_form_label( request=request, obj=obj, model=self.model, form=form ) if label: form.base_fields[form_label.field].label = mark_safe(label) return form
python
{ "resource": "" }
q43735
open_filezip
train
def open_filezip(file_path, find_str): """ Open the wrapped file. Read directly from the zip without extracting its content. """ if zipfile.is_zipfile(file_path): zipf = zipfile.ZipFile(file_path) interesting_files = [f for f in zipf.infolist() if find_str in f] for inside_file in interesting_files: yield zipf.open(inside_file)
python
{ "resource": "" }
q43736
extract_filezip
train
def extract_filezip(path_to_file, dest_path, target_zipfiles=None): """ Extract file zip to destiny path folder targeting only some kind of files. """ target_zipfiles = ['.*'] if target_zipfiles is None else target_zipfiles files = [] _, ext = os.path.splitext(path_to_file) if ext == '.zip': file = open(path_to_file, 'rb') with zipfile.ZipFile(file) as zip_file: regexp = '|'.join(target_zipfiles) if target_zipfiles else '.*' search_regex = re.compile(regexp) lista = [m.group() for x in zip_file.namelist() for m in [search_regex.search(x)] if m] for zp_file in lista: try: zip_file.extract(zp_file, dest_path) files.append(os.path.join(dest_path, zp_file)) except Exception as ex: msg = 'Fail to extract {} in {} to {} - {}'.format( zp_file, path_to_file, dest_path, ex) logger.error(msg) file.close() else: logger.warning('Not zipfile passed in args') return files
python
{ "resource": "" }
q43737
copy_remote_file
train
def copy_remote_file(web_file, destination): """ Check if exist the destination path, and copy the online resource file to local. Args: :web_file: reference to online file resource to take. :destination: path to store the file. """ size = 0 dir_name = os.path.dirname(destination) if not os.path.exists(dir_name): os.makedirs(dir_name) with open(destination, 'wb') as file_: chunk_size = 8 * 1024 for chunk in web_file.iter_content(chunk_size=chunk_size): if chunk: file_.write(chunk) size += len(chunk) return size
python
{ "resource": "" }
q43738
remove_file
train
def remove_file(paths): """ Remove file from paths introduced. """ for path in force_list(paths): if os.path.exists(path): os.remove(path)
python
{ "resource": "" }
q43739
Signature.generate_headers
train
def generate_headers(self, client_type, client_id, secret): """ generate_headers is used to generate the headers automatically for your http request :param client_type (str): remoteci or feeder :param client_id (str): remoteci or feeder id :param secret (str): api secret :return: Authorization headers (dict) """ self.request.add_header(self.dci_datetime_header, self.dci_datetime_str) signature = self._sign(secret) return self.request.build_headers(client_type, client_id, signature)
python
{ "resource": "" }
q43740
addLadder
train
def addLadder(settings): """define a new Ladder setting and save to disk file""" ladder = Ladder(settings) ladder.save() getKnownLadders()[ladder.name] = ladder return ladder
python
{ "resource": "" }
q43741
delLadder
train
def delLadder(name): """forget about a previously defined Ladder setting by deleting its disk file""" ladders = getKnownLadders() try: ladder = ladders[name] os.remove(ladder.filename) # delete from disk del ladders[name] # deallocate object return ladder except KeyError: raise ValueError("given ladder name '%s' is not a known ladder definition"%(name))
python
{ "resource": "" }
q43742
getKnownLadders
train
def getKnownLadders(reset=False): """identify all of the currently defined ladders""" if not ladderCache or reset: jsonFiles = os.path.join(c.LADDER_FOLDER, "*.json") for ladderFilepath in glob.glob(jsonFiles): filename = os.path.basename(ladderFilepath) name = re.search("^ladder_(.*?).json$", filename).groups()[0] ladder = Ladder(name) ladderCache[ladder.name] = ladder return ladderCache
python
{ "resource": "" }
q43743
Oscillator.get_samples
train
def get_samples(self, sample_count): """ Fetch a number of samples from self.wave_cache Args: sample_count (int): Number of samples to fetch Returns: ndarray """ if self.amplitude.value <= 0: return None # Build samples by rolling the period cache through the buffer rolled_array = numpy.roll(self.wave_cache, -1 * self.last_played_sample) # Append remaining partial period full_count, remainder = divmod(sample_count, self.cache_length) final_subarray = rolled_array[:int(remainder)] return_array = numpy.concatenate((numpy.tile(rolled_array, full_count), final_subarray)) # Keep track of where we left off to prevent popping between chunks self.last_played_sample = int(((self.last_played_sample + remainder) % self.cache_length)) # Multiply output by amplitude return return_array * (self.amplitude.value * self.amplitude_multiplier)
python
{ "resource": "" }
q43744
LinterOutput._cmp_key
train
def _cmp_key(self, obj=None): """Comparison key for sorting results from all linters. The sort should group files and lines from different linters to make it easier for refactoring. """ if not obj: obj = self line_nr = int(obj.line_nr) if obj.line_nr else 0 col = int(obj.col) if obj.col else 0 return (obj.path, line_nr, col, obj.msg)
python
{ "resource": "" }
q43745
Linter._get_relative_path
train
def _get_relative_path(self, full_path): """Return the relative path from current path.""" try: rel_path = Path(full_path).relative_to(Path().absolute()) except ValueError: LOG.error("%s: Couldn't find relative path of '%s' from '%s'.", self.name, full_path, Path().absolute()) return full_path return str(rel_path)
python
{ "resource": "" }
q43746
Linter._parse_by_pattern
train
def _parse_by_pattern(self, lines, pattern): """Match pattern line by line and return Results. Use ``_create_output_from_match`` to convert pattern match groups to Result instances. Args: lines (iterable): Output lines to be parsed. pattern: Compiled pattern to match against lines. result_fn (function): Receive results of one match and return a Result. Return: generator: Result instances. """ for line in lines: match = pattern.match(line) if match: params = match.groupdict() if not params: params = match.groups() yield self._create_output_from_match(params)
python
{ "resource": "" }
q43747
Linter._create_output_from_match
train
def _create_output_from_match(self, match_result): """Create Result instance from pattern match results. Args: match: Pattern match. """ if isinstance(match_result, dict): return LinterOutput(self.name, **match_result) return LinterOutput(self.name, *match_result)
python
{ "resource": "" }
q43748
plain_storage.get_single_file_info
train
def get_single_file_info(self, rel_path): """ Gets last change time for a single file """ f_path = self.get_full_file_path(rel_path) return get_single_file_info(f_path, rel_path)
python
{ "resource": "" }
q43749
plain_storage.read_local_manifest
train
def read_local_manifest(self): """ Read the file manifest, or create a new one if there isn't one already """ manifest = file_or_default(self.get_full_file_path(self.manifest_file), { 'format_version' : 2, 'root' : '/', 'have_revision' : 'root', 'files' : {}}, json.loads) if 'format_version' not in manifest or manifest['format_version'] < 2: raise SystemExit('Please update the client manifest format') return manifest
python
{ "resource": "" }
q43750
plain_storage.fs_put
train
def fs_put(self, rpath, data): """ Add a file to the FS """ try: self.begin() # Add the file to the fs self.file_put_contents(rpath, data) # Add to the manifest manifest = self.read_local_manifest() manifest['files'][rpath] = self.get_single_file_info(rpath) self.write_local_manifest(manifest) self.commit() except: self.rollback(); raise
python
{ "resource": "" }
q43751
respond_to_SIGTERM
train
def respond_to_SIGTERM(signal_number, frame, target=None): """ these classes are instrumented to respond to a KeyboardInterrupt by cleanly shutting down. This function, when given as a handler to for a SIGTERM event, will make the program respond to a SIGTERM as neatly as it responds to ^C. This function is used in registering a signal handler from the signal module. It should be registered for any signal for which the desired behavior is to kill the application: signal.signal(signal.SIGTERM, respondToSIGTERM) signal.signal(signal.SIGHUP, respondToSIGTERM) parameters: signal_number - unused in this function but required by the api. frame - unused in this function but required by the api. target - an instance of a class that has a member called 'task_manager' that is a derivative of the TaskManager class below. """ if target: target.config.logger.info('detected SIGTERM') # by setting the quit flag to true, any calls to the 'quit_check' # method that is so liberally passed around in this framework will # result in raising the quit exception. The current quit exception # is KeyboardInterrupt target.task_manager.quit = True else: raise KeyboardInterrupt
python
{ "resource": "" }
q43752
TaskManager.blocking_start
train
def blocking_start(self, waiting_func=None): """this function starts the task manager running to do tasks. The waiting_func is normally used to do something while other threads are running, but here we don't have other threads. So the waiting func will never get called. I can see wanting this function to be called at least once after the end of the task loop.""" self.logger.debug('threadless start') try: for job_params in self._get_iterator(): # may never raise # StopIteration self.config.logger.debug('received %r', job_params) self.quit_check() if job_params is None: if self.config.quit_on_empty_queue: raise KeyboardInterrupt self.logger.info("there is nothing to do. Sleeping " "for %d seconds" % self.config.idle_delay) self._responsive_sleep(self.config.idle_delay) continue self.quit_check() try: args, kwargs = job_params except ValueError: args = job_params kwargs = {} try: self.task_func(*args, **kwargs) except Exception: self.config.logger.error("Error in processing a job", exc_info=True) except KeyboardInterrupt: self.logger.debug('queuingThread gets quit request') finally: self.quit = True self.logger.debug("ThreadlessTaskManager dies quietly")
python
{ "resource": "" }
q43753
FSong.makePartitions
train
def makePartitions(self): """Make partitions with gmane help. """ class NetworkMeasures: pass self.nm=nm=NetworkMeasures() nm.degrees=self.network.degree() nm.nodes_= sorted(self.network.nodes(), key=lambda x : nm.degrees[x]) nm.degrees_=[nm.degrees[i] for i in nm.nodes_] nm.edges= self.network.edges(data=True) nm.E=self.network.number_of_edges() nm.N=self.network.number_of_nodes() self.np=g.NetworkPartitioning(nm,10,metric="g")
python
{ "resource": "" }
q43754
FSong.makeImages
train
def makeImages(self): """Make spiral images in sectors and steps. Plain, reversed, sectorialized, negative sectorialized outline, outline reversed, lonely only nodes, only edges, both """ # make layout self.makeLayout() self.setAgraph() # make function that accepts a mode, a sector # and nodes and edges True and False self.plotGraph() self.plotGraph("reversed",filename="tgraphR.png") agents=n.concatenate(self.np.sectorialized_agents__) for i, sector in enumerate(self.np.sectorialized_agents__): self.plotGraph("plain", sector,"sector{:02}.png".format(i)) self.plotGraph("reversed",sector,"sector{:02}R.png".format(i)) self.plotGraph("plain", n.setdiff1d(agents,sector),"sector{:02}N.png".format(i)) self.plotGraph("reversed",n.setdiff1d(agents,sector),"sector{:02}RN.png".format(i)) self.plotGraph("plain", [],"BLANK.png")
python
{ "resource": "" }
q43755
FSong.makeSong
train
def makeSong(self): """Render abstract animation """ self.makeVisualSong() self.makeAudibleSong() if self.make_video: self.makeAnimation()
python
{ "resource": "" }
q43756
cutoff_filename
train
def cutoff_filename(prefix, suffix, input_str): """ Cuts off the start and end of a string, as specified by 2 parameters Parameters ---------- prefix : string, if input_str starts with prefix, will cut off prefix suffix : string, if input_str end with suffix, will cut off suffix input_str : the string to be processed Returns ------- A string, from which the start and end have been cut """ if prefix is not '': if input_str.startswith(prefix): input_str = input_str[len(prefix):] if suffix is not '': if input_str.endswith(suffix): input_str = input_str[:-len(suffix)] return input_str
python
{ "resource": "" }
q43757
get_frame_src
train
def get_frame_src(f:Frame) -> str: ''' inspects a frame and returns a string with the following <src-path>:<src-line> -> <function-name> <source-code> ''' path, line, src, fn = _get_frame( inspect.getframeinfo(f) ) return '{}:{} -> {}\n{}'.format( path.split(os.sep)[-1], line, fn, repr(src[0][:-1]) # shave off \n )
python
{ "resource": "" }
q43758
trace
train
def trace(fn=None, profiler=None) -> Callable: ''' This decorator allows you to visually trace the steps of a function as it executes to see what happens to the data as things are being processed. If you want to use a custom profiler, use the @trace(profiler=my_custom_profiler) syntax. Example Usage: def count_to(target): for i in range(1, target+1): yield i @trace def sum_of_count(target): total = 0 for i in count_to(target): total += i return total sum_of_count(10) ''' # analyze usage custom_profiler = fn is None and profiler is not None no_profiler = profiler is None and fn is not None no_args = profiler is None and fn is None # adjust for usage if custom_profiler: # for @trace(profiler=...) return partial(trace, profiler=profiler) elif no_args: # for @trace() return trace elif no_profiler: # for @trace profiler = default_profiler # validate input assert callable(fn) assert callable(profiler) # build the decorator @wraps(fn) def wrapper(*args, **kwargs): # flag for default_profiler to know to ignore this scope wafflestwaffles = None # save the previous profiler old_profiler = sys.getprofile() # set the new profiler sys.setprofile(profiler) try: # run the function return fn(*args, **kwargs) finally: # revert the profiler sys.setprofile(old_profiler) return wrapper
python
{ "resource": "" }
q43759
MoveFileCallback.on_close
train
def on_close(self, filename): """Move this file to destination folder.""" shutil.move(filename, self.destination_folder) path, fn = os.path.split(filename) return os.path.join(self.destination_folder, fn)
python
{ "resource": "" }
q43760
CRCPubkey.from_str
train
def from_str(cls: Type[CRCPubkeyType], crc_pubkey: str) -> CRCPubkeyType: """ Return CRCPubkey instance from CRC public key string :param crc_pubkey: CRC public key :return: """ data = CRCPubkey.re_crc_pubkey.match(crc_pubkey) if data is None: raise Exception("Could not parse CRC public key {0}".format(crc_pubkey)) pubkey = data.group(1) crc = data.group(2) return cls(pubkey, crc)
python
{ "resource": "" }
q43761
CRCPubkey.from_pubkey
train
def from_pubkey(cls: Type[CRCPubkeyType], pubkey: str) -> CRCPubkeyType: """ Return CRCPubkey instance from public key string :param pubkey: Public key :return: """ hash_root = hashlib.sha256() hash_root.update(base58.b58decode(pubkey)) hash_squared = hashlib.sha256() hash_squared.update(hash_root.digest()) b58_checksum = ensure_str(base58.b58encode(hash_squared.digest())) crc = b58_checksum[:3] return cls(pubkey, crc)
python
{ "resource": "" }
q43762
Statement.delete
train
def delete(self): """Remove the statement from a ProcmailRC structure, raise a RuntimeError if the statement is not inside a ProcmailRC structure return the parent id""" if self.parent is None: raise RuntimeError( "Current statement has no parent, so it cannot " + "be deleted form a procmailrc structure" ) elif self.id is None: raise RuntimeError("id not set but have a parent, this should not be happening") else: parent_id = self.parent.id index = int(self.id.split('.')[-1]) self.parent.pop(index) self.parent = None self.id = None return parent_id
python
{ "resource": "" }
q43763
GenRuleBuilder._metahash
train
def _metahash(self): """Include genrule cmd in the metahash.""" if self._cached_metahash: return self._cached_metahash mhash = base.BaseBuilder._metahash(self) log.debug('[%s]: Metahash input: cmd="%s"', self.address, self.cmd) mhash.update(self.cmd) self._cached_metahash = mhash return mhash
python
{ "resource": "" }
q43764
GenRule.output_files
train
def output_files(self): """Returns list of output files from this rule, relative to buildroot. In this case it's simple (for now) - the output files are enumerated in the rule definition. """ outs = [os.path.join(self.address.repo, self.address.path, x) for x in self.params['outs']] return outs
python
{ "resource": "" }
q43765
Repo.tag
train
def tag(self, tag: str, overwrite: bool = False) -> None: """ Tags the current commit :param tag: tag :type tag: str :param overwrite: overwrite existing tag :type overwrite: bool """ LOGGER.info('tagging repo: %s', tag) try: self.repo.create_tag(tag) except GitCommandError as exc: if 'already exists' in exc.stderr and overwrite: LOGGER.info('overwriting existing tag') self.remove_tag(tag) self.repo.create_tag(tag) else: LOGGER.exception('error while tagging repo') raise
python
{ "resource": "" }
q43766
Repo.list_tags
train
def list_tags(self, pattern: str = None) -> typing.List[str]: """ Returns list of tags, optionally matching "pattern" :param pattern: optional pattern to filter results :type pattern: str :return: existing tags :rtype: list of str """ tags: typing.List[str] = [str(tag) for tag in self.repo.tags] if not pattern: LOGGER.debug('tags found in repo: %s', tags) return tags LOGGER.debug('filtering tags with pattern: %s', pattern) filtered_tags: typing.List[str] = [tag for tag in tags if pattern in tag] LOGGER.debug('filtered tags: %s', filtered_tags) return filtered_tags
python
{ "resource": "" }
q43767
Repo.stash
train
def stash(self, stash_name: str): """ Stashes the current working tree changes :param stash_name: name of the stash :type stash_name: str """ if self.stashed: LOGGER.error('already stashed') sys.exit(-1) else: if not self.index_is_empty(): LOGGER.error('cannot stash; index is not empty') sys.exit(-1) if self.untracked_files(): LOGGER.error('cannot stash; there are untracked files') sys.exit(-1) if self.changed_files(): LOGGER.info('stashing changes') self.repo.git.stash('push', '-u', '-k', '-m', f'"{stash_name}"') self.stashed = True else: LOGGER.info('no changes to stash')
python
{ "resource": "" }
q43768
Repo.unstash
train
def unstash(self): """ Pops the last stash if EPAB made a stash before """ if not self.stashed: LOGGER.error('no stash') else: LOGGER.info('popping stash') self.repo.git.stash('pop') self.stashed = False
python
{ "resource": "" }
q43769
Repo.ensure
train
def ensure(): """ Makes sure the current working directory is a Git repository. """ LOGGER.debug('checking repository') if not os.path.exists('.git'): LOGGER.error('This command is meant to be ran in a Git repository.') sys.exit(-1) LOGGER.debug('repository OK')
python
{ "resource": "" }
q43770
Repo.stage_all
train
def stage_all(self): """ Stages all changed and untracked files """ LOGGER.info('Staging all files') self.repo.git.add(A=True)
python
{ "resource": "" }
q43771
Repo.stage_subset
train
def stage_subset(self, *files_to_add: str): """ Stages a subset of files :param files_to_add: files to stage :type files_to_add: str """ LOGGER.info('staging files: %s', files_to_add) self.repo.git.add(*files_to_add, A=True)
python
{ "resource": "" }
q43772
Repo.commit
train
def commit( self, message: str, files_to_add: typing.Optional[typing.Union[typing.List[str], str]] = None, allow_empty: bool = False, ): """ Commits changes to the repo :param message: first line of the message :type message: str :param files_to_add: files to commit :type files_to_add: optional list of str :param allow_empty: allow dummy commit :type allow_empty: bool """ message = str(message) LOGGER.debug('message: %s', message) files_to_add = self._sanitize_files_to_add(files_to_add) LOGGER.debug('files to add: %s', files_to_add) if not message: LOGGER.error('empty commit message') sys.exit(-1) if os.getenv('APPVEYOR'): LOGGER.info('committing on AV, adding skip_ci tag') message = self.add_skip_ci_to_commit_msg(message) if files_to_add is None: self.stage_all() else: self.reset_index() self.stage_subset(*files_to_add) if self.index_is_empty() and not allow_empty: LOGGER.error('empty commit') sys.exit(-1) self.repo.index.commit(message=message)
python
{ "resource": "" }
q43773
Repo.amend_commit
train
def amend_commit( self, append_to_msg: typing.Optional[str] = None, new_message: typing.Optional[str] = None, files_to_add: typing.Optional[typing.Union[typing.List[str], str]] = None, ): """ Amends last commit with either an entirely new commit message, or an edited version of the previous one Note: it is an error to provide both "append_to_msg" and "new_message" :param append_to_msg: message to append to previous commit message :type append_to_msg: str :param new_message: new commit message :type new_message: str :param files_to_add: optional list of files to add to this commit :type files_to_add: str or list of str """ if new_message and append_to_msg: LOGGER.error('Cannot use "new_message" and "append_to_msg" together') sys.exit(-1) files_to_add = self._sanitize_files_to_add(files_to_add) message = self._sanitize_amend_commit_message(append_to_msg, new_message) if os.getenv('APPVEYOR'): message = f'{message} [skip ci]' LOGGER.info('amending commit with new message: %s', message) latest_tag = self.get_current_tag() if latest_tag: LOGGER.info('removing tag: %s', latest_tag) self.remove_tag(latest_tag) LOGGER.info('going back one commit') branch = self.repo.head.reference try: branch.commit = self.repo.head.commit.parents[0] except IndexError: LOGGER.error('cannot amend the first commit') sys.exit(-1) if files_to_add: self.stage_subset(*files_to_add) else: self.stage_all() self.repo.index.commit(message, skip_hooks=True) if latest_tag: LOGGER.info('resetting tag: %s', latest_tag) self.tag(latest_tag)
python
{ "resource": "" }
q43774
Repo.merge
train
def merge(self, ref_name: str): """ Merges two refs Args: ref_name: ref to merge in the current one """ if self.is_dirty(): LOGGER.error('repository is dirty; cannot merge: %s', ref_name) sys.exit(-1) LOGGER.info('merging ref: "%s" into branch: %s', ref_name, self.get_current_branch()) self.repo.git.merge(ref_name)
python
{ "resource": "" }
q43775
Repo.checkout
train
def checkout(self, reference: str): """ Checks out a reference. If the index is dirty, or if the repository contains untracked files, the function will fail. :param reference: reference to check out :type reference: str """ LOGGER.info('checking out: %s', reference) if not self.index_is_empty(): LOGGER.error('index contains change; cannot checkout. Status:\n %s', self.status()) sys.exit(-1) if self.is_dirty(untracked=True): LOGGER.error('repository is dirty; cannot checkout "%s"', reference) LOGGER.error('repository is dirty; cannot checkout. Status:\n %s', self.status()) sys.exit(-1) LOGGER.debug('going through all present references') for head in self.repo.heads: if head.name == reference: LOGGER.debug('resetting repo index and working tree to: %s', reference) self.repo.head.reference = head self.repo.head.reset(index=True, working_tree=True) break else: LOGGER.error('reference not found: %s', reference) sys.exit(-1)
python
{ "resource": "" }
q43776
Repo.create_branch
train
def create_branch(self, branch_name: str): """ Creates a new branch Args: branch_name: name of the branch """ LOGGER.info('creating branch: %s', branch_name) self._validate_branch_name(branch_name) if branch_name in self.list_branches(): LOGGER.error('branch already exists') sys.exit(-1) new_branch = self.repo.create_head(branch_name) new_branch.commit = self.repo.head.commit
python
{ "resource": "" }
q43777
Repo.create_branch_and_checkout
train
def create_branch_and_checkout(self, branch_name: str): """ Creates a new branch if it doesn't exist Args: branch_name: branch name """ self.create_branch(branch_name) self.checkout(branch_name)
python
{ "resource": "" }
q43778
Repo.is_dirty
train
def is_dirty(self, untracked=False) -> bool: """ Checks if the current repository contains uncommitted or untracked changes Returns: true if the repository is clean """ result = False if not self.index_is_empty(): LOGGER.error('index is not empty') result = True changed_files = self.changed_files() if bool(changed_files): LOGGER.error(f'Repo has %s modified files: %s', len(changed_files), changed_files) result = True if untracked: result = result or bool(self.untracked_files()) return result
python
{ "resource": "" }
q43779
Text.startswith
train
def startswith(text, ignore_case=True): """ Test if a string-field start with ``text``. Example:: filters = {"path": Text.startswith(r"C:\\")} """ if ignore_case: compiled = re.compile( "^%s" % text.replace("\\", "\\\\"), re.IGNORECASE) else: # pragma: no cover compiled = re.compile("^%s" % text.replace("\\", "\\\\")) return {"$regex": compiled}
python
{ "resource": "" }
q43780
Text.fulltext
train
def fulltext(search, lang=Lang.English, ignore_case=True): """Full text search. Example:: filters = Text.fulltext("python pymongo_mate") .. note:: This field doesn't need to specify field. """ return { "$text": { "$search": search, "$language": lang, "$caseSensitive": not ignore_case, "$diacriticSensitive": False, } }
python
{ "resource": "" }
q43781
Geo2DSphere.near
train
def near(lat, lng, max_dist=None, unit_miles=False): """Find document near a point. For example:: find all document with in 25 miles radius from 32.0, -73.0. """ filters = { "$nearSphere": { "$geometry": { "type": "Point", "coordinates": [lng, lat], } } } if max_dist: if unit_miles: # pragma: no cover max_dist = max_dist / 1609.344 filters["$nearSphere"]["$maxDistance"] = max_dist return filters
python
{ "resource": "" }
q43782
FSNode.children
train
def children(self) : "If the FSNode is a directory, returns a list of the children" if not self.isdir() : raise Exception("FSQuery tried to return the children of a node which is not a directory : %s" % self.abs) return [FSNode(self.abs + "/" + x,self.root,self.depth+1) for x in os.listdir(self.abs)]
python
{ "resource": "" }
q43783
FSNode.add_file
train
def add_file(self,fName,content) : """If this FSNode is a directory, write a file called fName containing content inside it""" if not self.isdir() : raise Exception("FSQuery tried to add a file in a node which is not a directory : %s" % self.abs) self.write_file("%s/%s"%(self.abs,fName),content)
python
{ "resource": "" }
q43784
FSNode.open_file
train
def open_file(self) : """If this FSNode is a file, open it for reading and return the file handle""" if self.isdir() : raise Exception("FSQuery tried to open a directory as a file : %s" % self.abs) return open(self.abs)
python
{ "resource": "" }
q43785
FSNode.mk_dir
train
def mk_dir(self) : """If this FSNode doesn't currently exist, then make a directory with this name.""" if not os.path.exists(self.abs) : os.makedirs(self.abs)
python
{ "resource": "" }
q43786
FSQuery.walk
train
def walk(self,depth=0,fsNode=None) : """Note, this is a filtered walk""" if not fsNode : fsNode = FSNode(self.init_path,self.init_path,0) if fsNode.isdir() : if self.check_dir(fsNode) : if self.check_return(fsNode) : yield fsNode for n in fsNode.children() : if n.islink() : # currently we don't follow links continue for n2 in self.walk(depth+1,n) : if self.check_return(n2) : yield n2 else : if self.check_file(fsNode) : if self.check_return(fsNode) : yield fsNode raise StopIteration
python
{ "resource": "" }
q43787
FSQuery.shadow
train
def shadow(self,new_root,visitor) : """ Runs through the query, creating a clone directory structure in the new_root. Then applies process""" for n in self.walk() : sn = n.clone(new_root) if n.isdir() : visitor.process_dir(n,sn) else : visitor.process_file(n,sn)
python
{ "resource": "" }
q43788
FSQuery.DirContains
train
def DirContains(self,f) : """ Matches dirs that have a child that matches filter f""" def match(fsNode) : if not fsNode.isdir() : return False for c in fsNode.children() : if f(c) : return True return False return self.make_return(match)
python
{ "resource": "" }
q43789
Directory.register
train
def register(self, peer): """ Registers a peer according to its description :param peer: A Peer description bean :raise KeyError: """ assert isinstance(peer, beans.Peer) with self.__lock: # Check presence peer_id = peer.peer_id if peer_id in self.peers: raise KeyError("Already known peer: {0}".format(peer)) # Store the description self.peers[peer_id] = peer # Store in the groups for name in peer.groups: self.groups.setdefault(name, set()).add(peer_id)
python
{ "resource": "" }
q43790
Directory.unregister
train
def unregister(self, peer_id): """ Unregisters the given peer :param peer_id: A peer UUID :raise KeyError: Unknown peer """ with self.__lock: # Pop it from accesses (will raise a KeyError if absent) peer = self.peers.pop(peer_id) assert isinstance(peer, beans.Peer) # Remove it from groups for name in peer.groups: try: # Clean up the group group = self.groups[name] group.remove(peer_id) # Remove it if it's empty if not group: del self.groups[name] except KeyError: # Be tolerant here pass
python
{ "resource": "" }
q43791
canonicalize_spec
train
def canonicalize_spec(spec, parent_context): """Push all context declarations to the leaves of a nested test specification.""" test_specs = {k:v for (k,v) in spec.items() if k.startswith("Test")} local_context = {k:v for (k,v) in spec.items() if not k.startswith("Test")} context = reduce_contexts(parent_context, local_context) if test_specs: return {k: canonicalize_spec(v, context) for (k,v) in test_specs.items()} else: program_chunks = sum([resolve_module(m,context['Definitions']) for m in context['Modules']],[]) + [context['Program']] test_spec = { 'Arguments': context['Arguments'], 'Program': "\n".join(program_chunks), 'Expect': context['Expect'], } return test_spec
python
{ "resource": "" }
q43792
flatten_spec
train
def flatten_spec(spec, prefix,joiner=" :: "): """Flatten a canonical specification with nesting into one without nesting. When building unique names, concatenate the given prefix to the local test name without the "Test " tag.""" if any(filter(operator.methodcaller("startswith","Test"),spec.keys())): flat_spec = {} for (k,v) in spec.items(): flat_spec.update(flatten_spec(v,prefix + joiner + k[5:])) return flat_spec else: return {"Test "+prefix: spec}
python
{ "resource": "" }
q43793
load_stanzas
train
def load_stanzas(stanzas_file): """ Load stanzas from gold standard file """ f = stanzas_file.readlines() stanzas = [] for i, line in enumerate(f): if i % 4 == 0: stanza_words = line.strip().split()[1:] stanzas.append(Stanza(stanza_words)) return stanzas
python
{ "resource": "" }
q43794
get_wordlist
train
def get_wordlist(stanzas): """ Get an iterable of all final words in all stanzas """ return sorted(list(set().union(*[stanza.words for stanza in stanzas])))
python
{ "resource": "" }
q43795
get_rhymelists
train
def get_rhymelists(stanza, scheme): """ Returns ordered lists of the stanza's word indices as defined by given scheme """ rhymelists = defaultdict(list) for rhyme_group, word_index in zip(scheme, stanza.word_indices): rhymelists[rhyme_group].append(word_index) return list(rhymelists.values())
python
{ "resource": "" }
q43796
init_distance_ttable
train
def init_distance_ttable(wordlist, distance_function): """ Initialize pair-wise rhyme strenghts according to the given word distance function """ n = len(wordlist) t_table = numpy.zeros((n, n + 1)) # Initialize P(c|r) accordingly for r, w in enumerate(wordlist): for c, v in enumerate(wordlist): if c < r: t_table[r, c] = t_table[c, r] # Similarity is symmetric else: t_table[r, c] = distance_function(w, v) + 0.001 # For backoff t_table[:, n] = numpy.mean(t_table[:, :-1], axis=1) # Normalize t_totals = numpy.sum(t_table, axis=0) for i, t_total in enumerate(t_totals.tolist()): t_table[:, i] /= t_total return t_table
python
{ "resource": "" }
q43797
post_prob_scheme
train
def post_prob_scheme(t_table, stanza, scheme): """ Compute posterior probability of a scheme for a stanza, with probability of every word in rhymelist rhyming with all the ones before it """ myprob = 1 rhymelists = get_rhymelists(stanza, scheme) for rhymelist in rhymelists: for i, word_index in enumerate(rhymelist): if i == 0: # first word, use P(w|x) myprob *= t_table[word_index, -1] else: for word_index2 in rhymelist[:i]: # history myprob *= t_table[word_index, word_index2] if myprob == 0 and len(stanza) > 30: # probably underflow myprob = 1e-300 return myprob
python
{ "resource": "" }
q43798
expectation_step
train
def expectation_step(t_table, stanzas, schemes, rprobs): """ Compute posterior probability of schemes for each stanza """ probs = numpy.zeros((len(stanzas), schemes.num_schemes)) for i, stanza in enumerate(stanzas): scheme_indices = schemes.get_schemes_for_len(len(stanza)) for scheme_index in scheme_indices: scheme = schemes.scheme_list[scheme_index] probs[i, scheme_index] = post_prob_scheme(t_table, stanza, scheme) probs = numpy.dot(probs, numpy.diag(rprobs)) # Normalize scheme_sums = numpy.sum(probs, axis=1) for i, scheme_sum in enumerate(scheme_sums.tolist()): if scheme_sum > 0: probs[i, :] /= scheme_sum return probs
python
{ "resource": "" }
q43799
maximization_step
train
def maximization_step(num_words, stanzas, schemes, probs): """ Update latent variables t_table, rprobs """ t_table = numpy.zeros((num_words, num_words + 1)) rprobs = numpy.ones(schemes.num_schemes) for i, stanza in enumerate(stanzas): scheme_indices = schemes.get_schemes_for_len(len(stanza)) for scheme_index in scheme_indices: myprob = probs[i, scheme_index] rprobs[scheme_index] += myprob scheme = schemes.scheme_list[scheme_index] rhymelists = get_rhymelists(stanza, scheme) for rhymelist in rhymelists: for j, word_index in enumerate(rhymelist): t_table[word_index, -1] += myprob for word_index2 in rhymelist[:j] + rhymelist[j + 1:]: t_table[word_index, word_index2] += myprob # Normalize t_table t_table_sums = numpy.sum(t_table, axis=0) for i, t_table_sum in enumerate(t_table_sums.tolist()): if t_table_sum != 0: t_table[:, i] /= t_table_sum # Normalize rprobs totrprob = numpy.sum(rprobs) rprobs /= totrprob return t_table, rprobs
python
{ "resource": "" }