_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q41200
Component.get_state_string
train
def get_state_string(self, add_colour=True): '''Get the state of this component as an optionally-coloured string. @param add_colour If True, ANSI colour codes will be added to the string. @return A string describing the state of this component. ''' with self._mutex: if self.state == self.INACTIVE: result = 'Inactive', ['bold', 'blue'] elif self.state == self.ACTIVE: result = 'Active', ['bold', 'green'] elif self.state == self.ERROR: result = 'Error', ['bold', 'white', 'bgred'] elif self.state == self.UNKNOWN: result = 'Unknown', ['bold', 'red'] elif self.state == self.CREATED: result = 'Created', ['reset'] if add_colour: return utils.build_attr_string(result[1], supported=add_colour) + \ result[0] + utils.build_attr_string('reset', supported=add_colour) else: return result[0]
python
{ "resource": "" }
q41201
Component.get_state_in_ec_string
train
def get_state_in_ec_string(self, ec_index, add_colour=True): '''Get the state of the component in an execution context as a string. @param ec_index The index of the execution context to check the state in. This index is into the total array of contexts, that is both owned and participating contexts. If the value of ec_index is greater than the length of @ref owned_ecs, that length is subtracted from ec_index and the result used as an index into @ref participating_ecs. ''' with self._mutex: if ec_index >= len(self.owned_ecs): ec_index -= len(self.owned_ecs) if ec_index >= len(self.participating_ecs): raise exceptions.BadECIndexError(ec_index) state = self.participating_ec_states[ec_index] else: state = self.owned_ec_states[ec_index] if state == self.INACTIVE: result = 'Inactive', ['bold', 'blue'] elif state == self.ACTIVE: result = 'Active', ['bold', 'green'] elif state == self.ERROR: result = 'Error', ['bold', 'white', 'bgred'] elif state == self.UNKNOWN: result = 'Unknown', ['bold', 'red'] elif state == self.CREATED: result = 'Created', ['reset'] if add_colour: return utils.build_attr_string(result[1], supported=add_colour) + \ result[0] + utils.build_attr_string('reset', supported=add_colour) else: return result[0]
python
{ "resource": "" }
q41202
Component.reset_in_ec
train
def reset_in_ec(self, ec_index): '''Reset this component in an execution context. @param ec_index The index of the execution context to reset in. This index is into the total array of contexts, that is both owned and participating contexts. If the value of ec_index is greater than the length of @ref owned_ecs, that length is subtracted from ec_index and the result used as an index into @ref participating_ecs. ''' with self._mutex: if ec_index >= len(self.owned_ecs): ec_index -= len(self.owned_ecs) if ec_index >= len(self.participating_ecs): raise exceptions.BadECIndexError(ec_index) ec = self.participating_ecs[ec_index] else: ec = self.owned_ecs[ec_index] ec.reset_component(self._obj)
python
{ "resource": "" }
q41203
Component.state_in_ec
train
def state_in_ec(self, ec_index): '''Get the state of the component in an execution context. @param ec_index The index of the execution context to check the state in. This index is into the total array of contexts, that is both owned and participating contexts. If the value of ec_index is greater than the length of @ref owned_ecs, that length is subtracted from ec_index and the result used as an index into @ref participating_ecs. ''' with self._mutex: if ec_index >= len(self.owned_ecs): ec_index -= len(self.owned_ecs) if ec_index >= len(self.participating_ecs): raise exceptions.BadECIndexError(ec_index) return self.participating_ec_states[ec_index] else: return self.owned_ec_states[ec_index]
python
{ "resource": "" }
q41204
Component.refresh_state_in_ec
train
def refresh_state_in_ec(self, ec_index): '''Get the up-to-date state of the component in an execution context. This function will update the state, rather than using the cached value. This may take time, if the component is executing on a remote node. @param ec_index The index of the execution context to check the state in. This index is into the total array of contexts, that is both owned and participating contexts. If the value of ec_index is greater than the length of @ref owned_ecs, that length is subtracted from ec_index and the result used as an index into @ref participating_ecs. ''' with self._mutex: if ec_index >= len(self.owned_ecs): ec_index -= len(self.owned_ecs) if ec_index >= len(self.participating_ecs): raise exceptions.BadECIndexError(ec_index) state = self._get_ec_state(self.participating_ecs[ec_index]) self.participating_ec_states[ec_index] = state else: state = self._get_ec_state(self.owned_ecs[ec_index]) self.owned_ec_states[ec_index] = state return state
python
{ "resource": "" }
q41205
Component.alive
train
def alive(self): '''Is this component alive?''' with self._mutex: if self.exec_contexts: for ec in self.exec_contexts: if self._obj.is_alive(ec): return True return False
python
{ "resource": "" }
q41206
Component.owned_ec_states
train
def owned_ec_states(self): '''The state of each execution context this component owns.''' with self._mutex: if not self._owned_ec_states: if self.owned_ecs: states = [] for ec in self.owned_ecs: states.append(self._get_ec_state(ec)) self._owned_ec_states = states else: self._owned_ec_states = [] return self._owned_ec_states
python
{ "resource": "" }
q41207
Component.owned_ecs
train
def owned_ecs(self): '''A list of the execution contexts owned by this component.''' with self._mutex: if not self._owned_ecs: self._owned_ecs = [ExecutionContext(ec, self._obj.get_context_handle(ec)) \ for ec in self._obj.get_owned_contexts()] return self._owned_ecs
python
{ "resource": "" }
q41208
Component.participating_ec_states
train
def participating_ec_states(self): '''The state of each execution context this component is participating in. ''' with self._mutex: if not self._participating_ec_states: if self.participating_ecs: states = [] for ec in self.participating_ecs: states.append(self._get_ec_state(ec)) self._participating_ec_states = states else: self._participating_ec_states = [] return self._participating_ec_states
python
{ "resource": "" }
q41209
Component.participating_ecs
train
def participating_ecs(self): '''A list of the execution contexts this component is participating in. ''' with self._mutex: if not self._participating_ecs: self._participating_ecs = [ExecutionContext(ec, self._obj.get_context_handle(ec)) \ for ec in self._obj.get_participating_contexts()] return self._participating_ecs
python
{ "resource": "" }
q41210
Component.state
train
def state(self): '''The merged state of all the execution context states, which can be used as the overall state of this component. The order of precedence is: Error > Active > Inactive > Created > Unknown ''' def merge_state(current, new): if new == self.ERROR: return self.ERROR elif new == self.ACTIVE and current != self.ERROR: return self.ACTIVE elif new == self.INACTIVE and \ current not in [self.ACTIVE, self.ERROR]: return self.INACTIVE elif new == self.CREATED and \ current not in [self.ACTIVE, self.ERROR, self.INACTIVE]: return self.CREATED elif current not in [self.ACTIVE, self.ERROR, self.INACTIVE, self.CREATED]: return self.UNKNOWN return current with self._mutex: if not self.owned_ec_states and not self.participating_ec_states: return self.UNKNOWN merged_state = self.CREATED if self.owned_ec_states: for ec_state in self.owned_ec_states: merged_state = merge_state(merged_state, ec_state) if self.participating_ec_states: for ec_state in self.participating_ec_states: merged_state = merge_state(merged_state, ec_state) return merged_state
python
{ "resource": "" }
q41211
Component.get_extended_fsm_service
train
def get_extended_fsm_service(self): '''Get a reference to the ExtendedFsmService. @return A reference to the ExtendedFsmService object @raises InvalidSdoServiceError ''' with self._mutex: try: return self._obj.get_sdo_service(RTC.ExtendedFsmService._NP_RepositoryId)._narrow(RTC.ExtendedFsmService) except: raise exceptions.InvalidSdoServiceError('ExtendedFsmService')
python
{ "resource": "" }
q41212
Component.get_port_by_name
train
def get_port_by_name(self, port_name): '''Get a port of this component by name.''' with self._mutex: for p in self.ports: if p.name == port_name: return p return None
python
{ "resource": "" }
q41213
Component.get_port_by_ref
train
def get_port_by_ref(self, port_ref): '''Get a port of this component by reference to a CORBA PortService object. ''' with self._mutex: for p in self.ports: if p.object._is_equivalent(port_ref): return p return None
python
{ "resource": "" }
q41214
Component.has_port_by_name
train
def has_port_by_name(self, port_name): '''Check if this component has a port by the given name.''' with self._mutex: if self.get_port_by_name(port_name): return True return False
python
{ "resource": "" }
q41215
Component.has_port_by_ref
train
def has_port_by_ref(self, port_ref): '''Check if this component has a port by the given reference to a CORBA PortService object. ''' with self._mutex: if self.get_port_by_ref(self, port_ref): return True return False
python
{ "resource": "" }
q41216
Component.connected_inports
train
def connected_inports(self): '''The list of all input ports belonging to this component that are connected to one or more other ports. ''' return [p for p in self.ports \ if p.__class__.__name__ == 'DataInPort' and p.is_connected]
python
{ "resource": "" }
q41217
Component.connected_outports
train
def connected_outports(self): '''The list of all output ports belonging to this component that are connected to one or more other ports. ''' return [p for p in self.ports \ if p.__class__.__name__ == 'DataOutPort' \ and p.is_connected]
python
{ "resource": "" }
q41218
Component.connected_svcports
train
def connected_svcports(self): '''The list of all service ports belonging to this component that are connected to one or more other ports. ''' return [p for p in self.ports \ if p.__class__.__name__ == 'CorbaPort' and p.is_connected]
python
{ "resource": "" }
q41219
Component.ports
train
def ports(self): '''The list of all ports belonging to this component.''' with self._mutex: if not self._ports: self._ports = [ports.parse_port(port, self) \ for port in self._obj.get_ports()] return self._ports
python
{ "resource": "" }
q41220
Component.add_logger
train
def add_logger(self, cb, level='NORMAL', filters='ALL'): '''Add a callback to receive log events from this component. @param cb The callback function to receive log events. It must have the signature cb(name, time, source, level, message), where name is the name of the component the log record came from, time is a floating-point time stamp, source is the name of the logger that provided the log record, level is the log level of the record and message is a text string. @param level The maximum level of log records to receive. @param filters Filter the objects from which to receive log messages. @return An ID for this logger. Use this ID in future operations such as removing this logger. @raises AddLoggerError ''' with self._mutex: obs = sdo.RTCLogger(self, cb) uuid_val = uuid.uuid4() intf_type = obs._this()._NP_RepositoryId props = {'logger.log_level': level, 'logger.filter': filters} props = utils.dict_to_nvlist(props) sprof = SDOPackage.ServiceProfile(id=uuid_val.get_bytes(), interface_type=intf_type, service=obs._this(), properties=props) conf = self.object.get_configuration() res = conf.add_service_profile(sprof) if res: self._loggers[uuid_val] = obs return uuid_val raise exceptions.AddLoggerError(self.name)
python
{ "resource": "" }
q41221
Component.remove_logger
train
def remove_logger(self, cb_id): '''Remove a logger. @param cb_id The ID of the logger to remove. @raises NoLoggerError ''' if cb_id not in self._loggers: raise exceptions.NoLoggerError(cb_id, self.name) conf = self.object.get_configuration() res = conf.remove_service_profile(cb_id.get_bytes()) del self._loggers[cb_id]
python
{ "resource": "" }
q41222
Component.activate_conf_set
train
def activate_conf_set(self, set_name): '''Activate a configuration set by name. @raises NoSuchConfSetError ''' with self._mutex: if not set_name in self.conf_sets: raise exceptions.NoSuchConfSetError(set_name) self._conf.activate_configuration_set(set_name)
python
{ "resource": "" }
q41223
Component.set_conf_set_value
train
def set_conf_set_value(self, set_name, param, value): '''Set a configuration set parameter value. @param set_name The name of the configuration set the destination parameter is in. @param param The name of the parameter to set. @param value The new value for the parameter. @raises NoSuchConfSetError, NoSuchConfParamError ''' with self._mutex: if not set_name in self.conf_sets: raise exceptions.NoSuchConfSetError(set_name) if not self.conf_sets[set_name].has_param(param): raise exceptions.NoSuchConfParamError(param) self.conf_sets[set_name].set_param(param, value) self._conf.set_configuration_set_values(\ self.conf_sets[set_name].object)
python
{ "resource": "" }
q41224
Component.active_conf_set
train
def active_conf_set(self): '''The currently-active configuration set.''' with self._mutex: if not self.conf_sets: return None if not self._active_conf_set: return None return self.conf_sets[self._active_conf_set]
python
{ "resource": "" }
q41225
Component.active_conf_set_name
train
def active_conf_set_name(self): '''The name of the currently-active configuration set.''' with self._mutex: if not self.conf_sets: return '' if not self._active_conf_set: return '' return self._active_conf_set
python
{ "resource": "" }
q41226
Component.conf_sets
train
def conf_sets(self): '''The dictionary of configuration sets in this component, if any.''' with self._mutex: if not self._conf_sets: self._parse_configuration() return self._conf_sets
python
{ "resource": "" }
q41227
CacheTableManager.setup
train
def setup(self): """Setup cache tables.""" for table_spec in self._table_specs: with self._conn: table_spec.setup(self._conn)
python
{ "resource": "" }
q41228
CacheTableManager.teardown
train
def teardown(self): """Cleanup cache tables.""" for table_spec in reversed(self._table_specs): with self._conn: table_spec.teardown(self._conn)
python
{ "resource": "" }
q41229
ElasticsearchClient.from_normal
train
def from_normal(self, hosts=default.ELASTICSEARCH_HOSTS, **kwargs): """ Initialize a Elasticsearch client by specified hosts list. :param hosts: list of nodes we should connect to. Node should be a dictionary ({"host": "localhost", "port": 9200}), the entire dictionary will be passed to the :class:`~elasticsearch.Connection` class as kwargs, or a string in the format of ``host[:port]`` which will be translated to a dictionary automatically. If no value is given the :class:`~elasticsearch.Urllib3HttpConnection` class defaults will be used :return: void """ self.client = Elasticsearch(hosts=hosts, **kwargs) logger.info('Initialize normal Elasticsearch Client: %s.' % self.client)
python
{ "resource": "" }
q41230
ElasticsearchClient.from_ssl
train
def from_ssl(self, ca_certs, client_cert, client_key, hosts=default.ELASTICSEARCH_HOSTS, use_ssl=True, verify_certs=True, **kwargs): """ Initialize a Elasticsearch client by SSL. :param ca_certs: optional path to CA bundle. See https://urllib3.readthedocs.io/en/latest/security.html#using-certifi-with-urllib3 :param client_cert: path to the file containing the private key and the certificate, or cert only if using client_key :param client_key: path to the file containing the private key if using separate cert and key files (client_cert will contain only the cert) :param hosts: hostname of the node :param use_ssl: use ssl for the connection if `True` :param verify_certs: whether to verify SSL certificates :return: void """ self.client = Elasticsearch(hosts=hosts, use_ssl=use_ssl, verify_certs=verify_certs, ca_certs=ca_certs, client_cert=client_cert, client_key=client_key, **kwargs) logger.info('Initialize SSL Elasticsearch Client: %s.' % self.client)
python
{ "resource": "" }
q41231
ElasticsearchClient.transfer_data_from_mongo
train
def transfer_data_from_mongo(self, index, doc_type, use_mongo_id=False, indexed_flag_field_name='', mongo_query_params={}, mongo_host=default.MONGO_HOST, mongo_port=default.MONGO_PORT, mongo_db=default.MONGO_DB, mongo_collection=default.MONGO_COLLECTION): """ Transfer data from MongoDB into the Elasticsearch, the hostname, port, database and collection name in MongoDB default from load in default.py :param index: The name of the index :param doc_type: The type of the document :param use_mongo_id: Use id of MongoDB in the Elasticsearch if is true otherwise automatic generation :param indexed_flag_field_name: the name of the field of the document, if associated value is False will synchronize data for it :param mongo_client_params: The dictionary for client params of MongoDB :param mongo_query_params: The dictionary for query params of MongoDB :param mongo_host: The name of the hostname from MongoDB :param mongo_port: The number of the port from MongoDB :param mongo_db: The name of the database from MongoDB :param mongo_collection: The name of the collection from MongoDB :return: void """ mongo_client = MongoClient(host=mongo_host, port=int(mongo_port)) try: collection = mongo_client[mongo_db][mongo_collection] if indexed_flag_field_name != '': mongo_query_params.update({indexed_flag_field_name: False}) mongo_docs = collection.find(mongo_query_params) finally: mongo_client.close() # Joint actions of Elasticsearch for execute bulk api actions = [] id_array = [] for doc in mongo_docs: action = { '_op_type': 'index', '_index': index, '_type': doc_type } id_array.append(doc['_id']) if not use_mongo_id: doc.pop('_id') else: doc['id'] = str(doc['_id']) doc.pop('_id') action['_source'] = doc actions.append(action) success, failed = es_helpers.bulk(self.client, actions, request_timeout=60 * 60) logger.info( 'Transfer data from MongoDB(%s:%s) into the Elasticsearch(%s) success: %s, failed: %s' % ( mongo_host, mongo_port, self.client, success, failed)) # Back update flag if indexed_flag_field_name != '': t = threading.Thread(target=ElasticsearchClient._back_update_mongo, args=(self, mongo_host, mongo_port, mongo_db, mongo_collection, id_array, {indexed_flag_field_name: True}), name='mongodb_back_update') t.start() return success, failed
python
{ "resource": "" }
q41232
ElasticsearchClient.bulk
train
def bulk(self, actions, stats_only=False, **kwargs): """ Executes bulk api by elasticsearch.helpers.bulk. :param actions: iterator containing the actions :param stats_only:if `True` only report number of successful/failed operations instead of just number of successful and a list of error responses Any additional keyword arguments will be passed to :func:`~elasticsearch.helpers.streaming_bulk` which is used to execute the operation, see :func:`~elasticsearch.helpers.streaming_bulk` for more accepted parameters. """ success, failed = es_helpers.bulk(self.client, actions, stats_only, **kwargs) logger.info('Bulk is done success %s failed %s actions: \n %s' % (success, failed, actions))
python
{ "resource": "" }
q41233
ElasticsearchClient.automatic_syn_data_from_mongo
train
def automatic_syn_data_from_mongo(self, index, doc_type, indexed_flag_field_name, thread_name='automatic_syn_data_thread', interval=60, use_mongo_id=False, mongo_query_params={}, mongo_host=default.MONGO_HOST, mongo_port=default.MONGO_PORT, mongo_db=default.MONGO_DB, mongo_collection=default.MONGO_COLLECTION): """ Automatic synchronize data that from MongoDB into the Elasticsearch by schedule task, it will synchronize this data if the indexed_flag_field_name of the field of the document is False. Noteworthy that the function may be no good please you caution use it. :param indexed_flag_field_name: the name of the field of the document, if associated value is False will synchronize data for it :param thread_name: the name of the schedule task thread :param interval: the time that executes interval of the scheduled task every time (unit second) :return: the thread id, you can use this id to cancel associated task """ thread_id = self._generate_thread_id(thread_name) if thread_id in ElasticsearchClient.automatic_syn_data_flag: lock.acquire() try: thread_name = thread_name + '-%s' % ElasticsearchClient.automatic_thread_name_counter ElasticsearchClient.automatic_thread_name_counter += 1 thread_id = self._generate_thread_id(thread_name) finally: lock.release() ElasticsearchClient.automatic_syn_data_flag[thread_id] = True t = threading.Thread(target=ElasticsearchClient._automatic_syn_data_from_mongo_worker, args=(self, thread_id, index, doc_type, indexed_flag_field_name, interval, use_mongo_id, mongo_query_params, mongo_host, mongo_port, mongo_db, mongo_collection), name=thread_name) t.start() return thread_id
python
{ "resource": "" }
q41234
__run_blast
train
def __run_blast(blast_command, input_file, *args, **kwargs): ''' Run a blast variant on the given input file. ''' # XXX: Eventually, translate results on the fly as requested? Or # just always use our parsed object? if 'outfmt' in kwargs: raise Exception('Use of the -outfmt option is not supported') num_processes = kwargs.get( 'pb_num_processes', os.sysconf('SC_NPROCESSORS_ONLN')) fields = kwargs.get('pb_fields', DEFAULT_HIT_FIELDS) blast_args = [blast_command] blast_args += ['-outfmt', '7 {}'.format(' '.join(fields))] for a in args: blast_args += ['-' + a] for k, v in kwargs.iteritems(): if not k.startswith('pb_'): blast_args += ['-' + k, str(v)] popens = [] for _ in range(num_processes): popens.append( subprocess.Popen( args=blast_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None, close_fds=True)) try: for r in __run_blast_select_loop(input_file, popens, fields): yield r finally: for p in popens: if p.poll() is None: p.terminate() p.wait()
python
{ "resource": "" }
q41235
Segment.get_frames
train
def get_frames(self, channels=2): """Get numpy array of frames corresponding to the segment. :param integer channels: Number of channels in output array :returns: Array of frames in the segment :rtype: numpy array """ tmp_frame = self.track.current_frame self.track.current_frame = self.start frames = self.track.read_frames(self.duration, channels=channels) self.track.current_frame = tmp_frame for effect in self.effects: frames = effect.apply_to(frames, self.samplerate) return frames.copy()
python
{ "resource": "" }
q41236
Composition.duration
train
def duration(self): """Get duration of composition """ return max([x.comp_location + x.duration for x in self.segments])
python
{ "resource": "" }
q41237
Composition.add_segment
train
def add_segment(self, segment): """Add a segment to the composition :param segment: Segment to add to composition :type segment: :py:class:`radiotool.composer.Segment` """ self.tracks.add(segment.track) self.segments.append(segment)
python
{ "resource": "" }
q41238
Composition.add_segments
train
def add_segments(self, segments): """Add a list of segments to the composition :param segments: Segments to add to composition :type segments: list of :py:class:`radiotool.composer.Segment` """ self.tracks.update([seg.track for seg in segments]) self.segments.extend(segments)
python
{ "resource": "" }
q41239
Composition.fade_in
train
def fade_in(self, segment, duration, fade_type="linear"): """Adds a fade in to a segment in the composition :param segment: Segment to fade in to :type segment: :py:class:`radiotool.composer.Segment` :param duration: Duration of fade-in (in seconds) :type duration: float :returns: The fade that has been added to the composition :rtype: :py:class:`Fade` """ f = Fade(segment.track, segment.comp_location_in_seconds, duration, 0.0, 1.0, fade_type=fade_type) self.add_dynamic(f) return f
python
{ "resource": "" }
q41240
Composition.fade_out
train
def fade_out(self, segment, duration, fade_type="linear"): """Adds a fade out to a segment in the composition :param segment: Segment to fade out :type segment: :py:class:`radiotool.composer.Segment` :param duration: Duration of fade-out (in seconds) :type duration: float :returns: The fade that has been added to the composition :rtype: :py:class:`Fade` """ score_loc_in_seconds = segment.comp_location_in_seconds +\ segment.duration_in_seconds - duration f = Fade(segment.track, score_loc_in_seconds, duration, 1.0, 0.0, fade_type=fade_type) # bug fixing... perhaps f.comp_location = segment.comp_location + segment.duration -\ int(duration * segment.track.samplerate) self.add_dynamic(f) return f
python
{ "resource": "" }
q41241
Composition.extended_fade_in
train
def extended_fade_in(self, segment, duration): """Add a fade-in to a segment that extends the beginning of the segment. :param segment: Segment to fade in :type segment: :py:class:`radiotool.composer.Segment` :param duration: Duration of fade-in (in seconds) :returns: The fade that has been added to the composition :rtype: :py:class:`Fade` """ dur = int(duration * segment.track.samplerate) if segment.start - dur >= 0: segment.start -= dur else: raise Exception( "Cannot create fade-in that extends " "past the track's beginning") if segment.comp_location - dur >= 0: segment.comp_location -= dur else: raise Exception( "Cannot create fade-in the extends past the score's beginning") segment.duration += dur f = Fade(segment.track, segment.comp_location_in_seconds, duration, 0.0, 1.0) self.add_dynamic(f) return f
python
{ "resource": "" }
q41242
Composition.extended_fade_out
train
def extended_fade_out(self, segment, duration): """Add a fade-out to a segment that extends the beginning of the segment. :param segment: Segment to fade out :type segment: :py:class:`radiotool.composer.Segment` :param duration: Duration of fade-out (in seconds) :returns: The fade that has been added to the composition :rtype: :py:class:`Fade` """ dur = int(duration * segment.track.samplerate) if segment.start + segment.duration + dur <\ segment.track.duration: segment.duration += dur else: raise Exception( "Cannot create fade-out that extends past the track's end") score_loc_in_seconds = segment.comp_location_in_seconds +\ segment.duration_in_seconds - duration f = Fade(segment.track, score_loc_in_seconds, duration, 1.0, 0.0) self.add_dynamic(f) return f
python
{ "resource": "" }
q41243
Composition.cross_fade
train
def cross_fade(self, seg1, seg2, duration): """Add a linear crossfade to the composition between two segments. :param seg1: First segment (fading out) :type seg1: :py:class:`radiotool.composer.Segment` :param seg2: Second segment (fading in) :type seg2: :py:class:`radiotool.composer.Segment` :param duration: Duration of crossfade (in seconds) """ if seg1.comp_location + seg1.duration - seg2.comp_location < 2: dur = int(duration * seg1.track.samplerate) if dur % 2 == 1: dur -= 1 if dur / 2 > seg1.duration: dur = seg1.duration * 2 if dur / 2 > seg2.duration: dur = seg2.duration * 2 # we're going to compute the crossfade and then create a RawTrack # for the resulting frames if seg2.start - (dur / 2) < 0: diff = seg2.start seg2.start = 0 seg2.duration -= diff seg2.comp_location -= diff dur = 2 * diff else: seg2.start -= (dur / 2) seg2.duration += (dur / 2) seg2.comp_location -= (dur / 2) seg1.duration += (dur / 2) out_frames = seg1.get_frames(channels=self.channels)[-dur:] seg1.duration -= dur in_frames = seg2.get_frames(channels=self.channels)[:dur] seg2.start += dur seg2.duration -= dur seg2.comp_location += dur # compute the crossfade in_frames = in_frames[:min(map(len, [in_frames, out_frames]))] out_frames = out_frames[:min(map(len, [in_frames, out_frames]))] cf_frames = radiotool.utils.linear(out_frames, in_frames) # cf_frames = equal_power(out_frames, in_frames) raw_track = RawTrack(cf_frames, name="crossfade", samplerate=seg1.track.samplerate) rs_comp_location = (seg1.comp_location + seg1.duration) /\ float(seg1.track.samplerate) rs_duration = raw_track.duration / float(raw_track.samplerate) raw_seg = Segment(raw_track, rs_comp_location, 0.0, rs_duration) # will this fix a bug? raw_seg.duration = raw_track.duration raw_seg.comp_location = seg1.comp_location + seg1.duration self.add_track(raw_track) self.add_segment(raw_seg) return raw_seg else: print seg1.comp_location + seg1.duration, seg2.comp_location raise Exception("Segments must be adjacent" "to add a crossfade ({}, {})".format( seg1.comp_location + seg1.duration, seg2.comp_location))
python
{ "resource": "" }
q41244
Composition.empty_over_span
train
def empty_over_span(self, time, duration): """Helper method that tests whether composition contains any segments at a given time for a given duration. :param time: Time (in seconds) to start span :param duration: Duration (in seconds) of span :returns: `True` if there are no segments in the composition that overlap the span starting at `time` and lasting for `duration` seconds. `False` otherwise. """ for seg in self.segments: # starts in range if seg.comp_location_in_seconds >= time and\ seg.comp_location_in_seconds < time + duration: return False # or, ends in range elif seg.comp_location_in_seconds + seg.duration_in_seconds >= time and\ seg.comp_location_in_seconds + seg.duration_in_seconds < time + duration: return False # or, spans entire range elif seg.comp_location_in_seconds < time and\ seg.comp_location_in_seconds + seg.duration_in_seconds >= time + duration: return False return True
python
{ "resource": "" }
q41245
Composition.contract
train
def contract(self, time, duration, min_contraction=0.0): """Remove empty gaps from the composition starting at a given time for a given duration. """ # remove audio from the composition starting at time # for duration contract_dur = 0.0 contract_start = time if self.empty_over_span(time, duration): contract_dur = duration contract_start = time else: starts = [s.comp_location_in_seconds for s in self.segments] ends = [s.comp_location_in_seconds + s.duration_in_seconds for s in self.segments] key_starts = [] key_ends = [] for start in starts: if start >= time and start < time + duration: # does a segment cover the location right before this start? is_key_start = True for seg in self.segments: if seg.comp_location_in_seconds < start and\ seg.comp_location_in_seconds + seg.duration_in_seconds >= start: is_key_start = False break if is_key_start: key_starts.append(start) for end in ends: if end >= time and end < time + duration: # does a segment cover the location right before this start? is_key_end = True for seg in self.segments: if seg.comp_location_in_seconds <= end and\ seg.comp_location_in_seconds + seg.duration_in_seconds > end: is_key_end = False break if is_key_end: key_ends.append(end) if len(key_starts) + len(key_ends) == 0: return 0, 0 # combine key starts and key ends key_both = [s for s in key_starts] key_both.extend([s for s in key_ends]) key_both = sorted(key_both) first_key = key_both[0] if first_key in key_starts: contract_start = time contract_dur = first_key - time else: contract_start = first_key if len(key_both) >= 2: contract_dur = key_both[1] - first_key else: contract_dur = time + duration - first_key if contract_dur > min_contraction: for seg in self.segments: if seg.comp_location_in_seconds > contract_start: dur_samples = int(seg.samplerate * contract_dur) seg.comp_location -= dur_samples for dyn in self.dynamics: if dyn.comp_location_in_seconds > contract_start: dur_samples = int(seg.samplerate * contract_dur) dyn.comp_location -= dur_samples return contract_start, contract_dur else: return 0.0, 0.0
python
{ "resource": "" }
q41246
Composition.export
train
def export(self, **kwargs): """ Generate audio file from composition. :param str. filename: Output filename (no extension) :param str. filetype: Output file type (only .wav supported for now) :param integer samplerate: Sample rate of output audio :param integer channels: Channels in output audio, if different than originally specified :param bool. separate_tracks: Also generate audio file for each track in composition :param int min_length: Minimum length of output array (in frames). Will zero pad extra length. :param bool. adjust_dynamics: Automatically adjust dynamics (will document later) """ # get optional args filename = kwargs.pop('filename', 'out') filetype = kwargs.pop('filetype', 'wav') adjust_dynamics = kwargs.pop('adjust_dynamics', False) samplerate = kwargs.pop('samplerate', None) channels = kwargs.pop('channels', self.channels) separate_tracks = kwargs.pop('separate_tracks', False) min_length = kwargs.pop('min_length', None) if samplerate is None: samplerate = np.min([track.samplerate for track in self.tracks]) encoding = 'pcm16' to_mp3 = False if filetype == 'ogg': encoding = 'vorbis' elif filetype == 'mp3': filetype = 'wav' to_mp3 = True if separate_tracks: # build the separate parts of the composition if desired for track in self.tracks: out = self.build(track_list=[track], adjust_dynamics=adjust_dynamics, min_length=min_length, channels=channels) out_file = Sndfile("%s-%s.%s" % (filename, track.name, filetype), 'w', Format(filetype, encoding=encoding), channels, samplerate) out_file.write_frames(out) out_file.close() # always build the complete composition out = self.build(adjust_dynamics=adjust_dynamics, min_length=min_length, channels=channels) out_filename = "%s.%s" % (filename, filetype) out_file = Sndfile(out_filename, 'w', Format(filetype, encoding=encoding), channels, samplerate) out_file.write_frames(out) out_file.close() if LIBXMP and filetype == "wav": xmp = libxmp.XMPMeta() ns = libxmp.consts.XMP_NS_DM p = xmp.get_prefix_for_namespace(ns) xpath = p + 'Tracks' xmp.append_array_item(ns, xpath, None, array_options={"prop_value_is_array": True}, prop_value_is_struct=True) xpath += '[1]/' + p xmp.set_property(ns, xpath + "trackName", "CuePoint Markers") xmp.set_property(ns, xpath + "trackType", "Cue") xmp.set_property(ns, xpath + "frameRate", "f%d" % samplerate) for i, lab in enumerate(self.labels): xmp.append_array_item(ns, xpath + "markers", None, array_options={"prop_value_is_array": True}, prop_value_is_struct=True) xmp.set_property(ns, xpath + "markers[%d]/%sname" % (i + 1, p), lab.name) xmp.set_property(ns, xpath + "markers[%d]/%sstartTime" % (i + 1, p), str(lab.sample(samplerate))) xmpfile = libxmp.XMPFiles(file_path=out_filename, open_forupdate=True) if xmpfile.can_put_xmp(xmp): xmpfile.put_xmp(xmp) xmpfile.close_file() if to_mp3: wav_to_mp3(out_filename, delete_wav=True) return out
python
{ "resource": "" }
q41247
get_episode_types
train
def get_episode_types(db) -> Iterator[EpisodeType]: """Get all episode types.""" cur = db.cursor() cur.execute('SELECT id, name, prefix FROM episode_type') for type_id, name, prefix in cur: yield EpisodeType(type_id, name, prefix)
python
{ "resource": "" }
q41248
EpisodeTypes.get_epno
train
def get_epno(self, episode: Episode): """Return epno for an Episode instance. epno is a string formatted with the episode number and type, e.g., S1, T2. >>> x = EpisodeTypes([EpisodeType(1, 'foo', 'F')]) >>> ep = Episode(type=1, number=2) >>> x.get_epno(ep) 'F2' """ return '{}{}'.format(self[episode.type].prefix, episode.number)
python
{ "resource": "" }
q41249
Reporter.on_service_add
train
def on_service_add(self, service): """ When a new service is added, a worker thread is launched to periodically run the checks for that service. """ self.launch_thread(service.name, self.check_loop, service)
python
{ "resource": "" }
q41250
Reporter.check_loop
train
def check_loop(self, service): """ While the reporter is not shutting down and the service being checked is present in the reporter's configuration, this method will launch a job to run all of the service's checks and then pause for the configured interval. """ logger.info("Starting check loop for service '%s'", service.name) def handle_checks_result(f): try: came_up, went_down = f.result() except Exception: logger.exception("Error checking service '%s'", service.name) return if not came_up and not went_down: return discovery = self.configurables[Discovery][service.discovery] for port in came_up: logger.debug("Reporting %s, port %d up", service.name, port) discovery.report_up(service, port) for port in went_down: logger.debug("Reporting %s, port %d down", service.name, port) discovery.report_down(service, port) while ( service in self.configurables[Service].values() and not self.shutdown.is_set() ): self.work_pool.submit( self.run_checks, service ).add_done_callback( handle_checks_result ) logger.debug("sleeping for %s seconds", service.check_interval) wait_on_event(self.shutdown, timeout=service.check_interval)
python
{ "resource": "" }
q41251
Reporter.run_checks
train
def run_checks(self, service): """ Runs each check for the service and reports to the service's discovery method based on the results. If all checks pass and the service's present node was previously reported as down, the present node is reported as up. Conversely, if any of the checks fail and the service's present node was previously reported as up, the present node will be reported as down. """ logger.debug("Running checks. (%s)", service.name) if service.discovery not in self.configurables[Discovery]: logger.warn( "Service %s is using Unknown/unavailable discovery '%s'.", service.name, service.discovery ) return set(), set() service.update_ports() came_up, went_down = service.run_checks() return came_up, went_down
python
{ "resource": "" }
q41252
PetFinderClient._do_api_call
train
def _do_api_call(self, method, data): """ Convenience method to carry out a standard API call against the Petfinder API. :param basestring method: The API method name to call. :param dict data: Key/value parameters to send to the API method. This varies based on the method. :raises: A number of :py:exc:`petfinder.exceptions.PetfinderAPIError`` sub-classes, depending on what went wrong. :rtype: lxml.etree._Element :returns: The parsed document. """ # Developer API keys, auth tokens, and other standard, required args. data.update({ "key": self.api_key, # No API methods currently use this, but we're ready for it, # should that change. "token": self.api_auth_token, }) # Ends up being a full URL+path. url = "%s%s" % (self.endpoint, method) # Bombs away! response = requests.get(url, params=data) # Parse and return an ElementTree instance containing the document. root = etree.fromstring(response.content) # If this is anything but '100', it's an error. status_code = root.find("header/status/code").text # If this comes back as non-None, we know we've got problems. exc_class = _get_exception_class_from_status_code(status_code) if exc_class: # Sheet, sheet, errar! Raise the appropriate error, and pass # the accompanying error message as the exception message. error_message = root.find("header/status/message").text #noinspection PyCallingNonCallable raise exc_class(error_message) return root
python
{ "resource": "" }
q41253
PetFinderClient._do_autopaginating_api_call
train
def _do_autopaginating_api_call(self, method, kwargs, parser_func): """ Given an API method, the arguments passed to it, and a function to hand parsing off to, loop through the record sets in the API call until all records have been yielded. This is mostly done this way to reduce duplication through the various API methods. :param basestring method: The API method on the endpoint. :param dict kwargs: The kwargs from the top-level API method. :param callable parser_func: A callable that is used for parsing the output from the API call. :rtype: generator :returns: Returns a generator that may be returned by the top-level API method. """ # Used to determine whether to fail noisily if no results are returned. has_records = {"has_records": False} while True: try: root = self._do_api_call(method, kwargs) except RecordDoesNotExistError: if not has_records["has_records"]: # No records seen yet, this really is empty. raise # We've seen some records come through. We must have hit the # end of the result set. Finish up silently. return # This is used to track whether this go around the call->parse # loop yielded any records. records_returned_by_this_loop = False for record in parser_func(root, has_records): yield record # We saw a record, mark our tracker accordingly. records_returned_by_this_loop = True # There is a really fun bug in the Petfinder API with # shelter.getpets where an offset is returned with no pets, # causing an infinite loop. if not records_returned_by_this_loop: return # This will determine at what offset we start the next query. last_offset = root.find("lastOffset").text kwargs["offset"] = last_offset
python
{ "resource": "" }
q41254
PetFinderClient.breed_list
train
def breed_list(self, **kwargs): """ breed.list wrapper. Returns a list of breed name strings. :rtype: list :returns: A list of breed names. """ root = self._do_api_call("breed.list", kwargs) breeds = [] for breed in root.find("breeds"): breeds.append(breed.text) return breeds
python
{ "resource": "" }
q41255
PetFinderClient.pet_get
train
def pet_get(self, **kwargs): """ pet.get wrapper. Returns a record dict for the requested pet. :rtype: dict :returns: The pet's record dict. """ root = self._do_api_call("pet.get", kwargs) return self._parse_pet_record(root.find("pet"))
python
{ "resource": "" }
q41256
PetFinderClient.pet_getrandom
train
def pet_getrandom(self, **kwargs): """ pet.getRandom wrapper. Returns a record dict or Petfinder ID for a random pet. :rtype: dict or str :returns: A dict of pet data if ``output`` is ``'basic'`` or ``'full'``, and a string if ``output`` is ``'id'``. """ root = self._do_api_call("pet.getRandom", kwargs) output_brevity = kwargs.get("output", "id") if output_brevity == "id": return root.find("petIds/id").text else: return self._parse_pet_record(root.find("pet"))
python
{ "resource": "" }
q41257
PetFinderClient.pet_find
train
def pet_find(self, **kwargs): """ pet.find wrapper. Returns a generator of pet record dicts matching your search criteria. :rtype: generator :returns: A generator of pet record dicts. :raises: :py:exc:`petfinder.exceptions.LimitExceeded` once you have reached the maximum number of records your credentials allow you to receive. """ def pet_find_parser(root, has_records): """ The parser that is used with the ``_do_autopaginating_api_call`` method for auto-pagination. :param lxml.etree._Element root: The root Element in the response. :param dict has_records: A dict that we track the loop state in. dicts are passed by references, which is how this works. """ for pet in root.findall("pets/pet"): # This is changed in the original record, since it's passed # by reference. has_records["has_records"] = True yield self._parse_pet_record(pet) return self._do_autopaginating_api_call( "pet.find", kwargs, pet_find_parser )
python
{ "resource": "" }
q41258
PetFinderClient.shelter_find
train
def shelter_find(self, **kwargs): """ shelter.find wrapper. Returns a generator of shelter record dicts matching your search criteria. :rtype: generator :returns: A generator of shelter record dicts. :raises: :py:exc:`petfinder.exceptions.LimitExceeded` once you have reached the maximum number of records your credentials allow you to receive. """ def shelter_find_parser(root, has_records): """ The parser that is used with the ``_do_autopaginating_api_call`` method for auto-pagination. :param lxml.etree._Element root: The root Element in the response. :param dict has_records: A dict that we track the loop state in. dicts are passed by references, which is how this works. """ for shelter in root.find("shelters"): has_records["has_records"] = True record = {} for field in shelter: record[field.tag] = field.text yield record return self._do_autopaginating_api_call( "shelter.find", kwargs, shelter_find_parser )
python
{ "resource": "" }
q41259
PetFinderClient.shelter_get
train
def shelter_get(self, **kwargs): """ shelter.get wrapper. Given a shelter ID, retrieve its details in dict form. :rtype: dict :returns: The shelter's details. """ root = self._do_api_call("shelter.get", kwargs) shelter = root.find("shelter") for field in shelter: record = {} for field in shelter: record[field.tag] = field.text return record
python
{ "resource": "" }
q41260
PetFinderClient.shelter_listbybreed
train
def shelter_listbybreed(self, **kwargs): """ shelter.listByBreed wrapper. Given a breed and an animal type, list the shelter IDs with pets of said breed. :rtype: generator :returns: A generator of shelter IDs that have breed matches. """ root = self._do_api_call("shelter.listByBreed", kwargs) shelter_ids = root.findall("shelterIds/id") for shelter_id in shelter_ids: yield shelter_id.text
python
{ "resource": "" }
q41261
TreeNode.add_callback
train
def add_callback(self, event, cb, args=None): '''Add a callback to this node. Callbacks are called when the specified event occurs. The available events depends on the specific node type. Args should be a value to pass to the callback when it is called. The callback should be of the format: def callback(node, value, cb_args): where node will be the node that called the function, value is the relevant information for the event, and cb_args are the arguments you registered with the callback. ''' if event not in self._cbs: raise exceptions.NoSuchEventError self._cbs[event] = [(cb, args)]
python
{ "resource": "" }
q41262
TreeNode.get_node
train
def get_node(self, path): '''Get a child node of this node, or this node, based on a path. @param path A list of path elements pointing to a node in the tree. For example, ['/', 'localhost', 'dir.host']. The first element in this path should be this node's name. @return The node pointed to by @ref path, or None if the path does not point to a node in the tree below this node. Example: >>> c1 = TreeNode(name='c1') >>> c2 = TreeNode(name='c2') >>> p = TreeNode(name='p', children={'c1':c1, 'c2':c2}) >>> c1._parent = p >>> c2._parent = p >>> p.get_node(['p', 'c1']) == c1 True >>> p.get_node(['p', 'c2']) == c2 True ''' with self._mutex: if path[0] == self._name: if len(path) == 1: return self elif path[1] in self._children: return self._children[path[1]].get_node(path[1:]) else: return None else: return None
python
{ "resource": "" }
q41263
TreeNode.has_path
train
def has_path(self, path): '''Check if a path exists below this node. @param path A list of path elements pointing to a node in the tree. For example, ['/', 'localhost', 'dir.host']. The first element in this path should be this node's name. @return True if the path points to a node in the tree below this node, or this node itself (for paths one element long). False otherwise. Example: >>> c1 = TreeNode(name='c1') >>> c2 = TreeNode(name='c2') >>> p = TreeNode(name='p', children={'c1':c1, 'c2':c2}) >>> c1._parent = p >>> c2._parent = p >>> p.has_path(['p', 'c1']) True >>> p.has_path(['p', 'c3']) False ''' with self._mutex: if path[0] == self._name: if len(path) == 1: return True elif path[1] in self._children: return self._children[path[1]].has_path(path[1:]) else: return False else: return False
python
{ "resource": "" }
q41264
TreeNode.iterate
train
def iterate(self, func, args=None, filter=[]): '''Call a function on this node, and recursively all its children. This is a depth-first iteration. @param func The function to call. Its declaration must be 'def blag(node, args)', where 'node' is the current node in the iteration and args is the value of @ref args. @param args Extra arguments to pass to the function at each iteration. Pass multiple arguments in as a tuple. @param filter A list of filters to apply before calling func for each node in the iteration. If the filter is not True, @ref func will not be called for that node. Each filter entry should be a string, representing one of the is_* properties (is_component, etc), or a function object. @return The results of the calls to @ref func in a list. Example: >>> c1 = TreeNode(name='c1') >>> c2 = TreeNode(name='c2') >>> p = TreeNode(name='p', children={'c1':c1, 'c2':c2}) >>> c1._parent = p >>> c2._parent = p >>> def hello(n, args): ... return args[0] + ' ' + n._name >>> p.iterate(hello, args=['hello']) ['hello p', 'hello c2', 'hello c1'] >>> p.iterate(hello, args=['hello'], filter=['_name=="c1"']) ['hello c1'] ''' with self._mutex: result = [] if filter: filters_passed = True for f in filter: if type(f) == str: if not eval('self.' + f): filters_passed = False break else: if not f(self): filters_passed = False break if filters_passed: result = [func(self, args)] else: result = [func(self, args)] for child in self._children: result += self._children[child].iterate(func, args, filter) return result
python
{ "resource": "" }
q41265
TreeNode.rem_callback
train
def rem_callback(self, event, cb): '''Remove a callback from this node. The callback is removed from the specified event. @param cb The callback function to remove. ''' if event not in self._cbs: raise exceptions.NoSuchEventError(self.name, event) c = [(x[0], x[1]) for x in self._cbs[event]] if not c: raise exceptions.NoCBError(self.name, event, cb) self._cbs[event].remove(c[0])
python
{ "resource": "" }
q41266
TreeNode.full_path
train
def full_path(self): '''The full path of this node.''' with self._mutex: if self._parent: return self._parent.full_path + [self._name] else: return [self._name]
python
{ "resource": "" }
q41267
TreeNode.full_path_str
train
def full_path_str(self): '''The full path of this node as a string.''' with self._mutex: if self._parent: if self._parent._name == '/': return self._parent.full_path_str + self._name else: return self._parent.full_path_str + '/' + self._name else: return self._name
python
{ "resource": "" }
q41268
TreeNode.orb
train
def orb(self): '''The ORB used to access this object. This property's value will be None if no object above this object is a name server. ''' with self._mutex: if self._parent.name == '/': return None return self._parent.orb
python
{ "resource": "" }
q41269
TreeNode.root
train
def root(self): '''The root node of the tree this node is in.''' with self._mutex: if self._parent: return self._parent.root else: return self
python
{ "resource": "" }
q41270
command
train
def command(state, args): """Delete priority rule.""" args = parser.parse_args(args[1:]) query.files.delete_priority_rule(state.db, args.id) del state.file_picker
python
{ "resource": "" }
q41271
upsert
train
def upsert(db, table, key_cols, update_dict): """Fabled upsert for SQLiteDB. Perform an upsert based on primary key. :param SQLiteDB db: database :param str table: table to upsert into :param str key_cols: name of key columns :param dict update_dict: key-value pairs to upsert """ with db: cur = db.cursor() cur.execute( 'UPDATE {} SET {} WHERE {}'.format( table, ','.join(_sqlpformat(col) for col in update_dict.keys()), ' AND '.join(_sqlpformat(col) for col in key_cols), ), update_dict, ) if db.changes() == 0: keys, values = zip(*update_dict.items()) cur.execute( 'INSERT INTO {} ({}) VALUES ({})'.format( table, ','.join(keys), ','.join('?' for _ in values)), values)
python
{ "resource": "" }
q41272
Node.current
train
def current(cls, service, port): """ Returns a Node instance representing the current service node. Collects the host and IP information for the current machine and the port information from the given service. """ host = socket.getfqdn() return cls( host=host, ip=socket.gethostbyname(host), port=port, metadata=service.metadata )
python
{ "resource": "" }
q41273
Node.serialize
train
def serialize(self): """ Serializes the node data as a JSON map string. """ return json.dumps({ "port": self.port, "ip": self.ip, "host": self.host, "peer": self.peer.serialize() if self.peer else None, "metadata": json.dumps(self.metadata or {}, sort_keys=True), }, sort_keys=True)
python
{ "resource": "" }
q41274
Node.deserialize
train
def deserialize(cls, value): """ Creates a new Node instance via a JSON map string. Note that `port` and `ip` and are required keys for the JSON map, `peer` and `host` are optional. If `peer` is not present, the new Node instance will use the current peer. If `host` is not present, the hostname of the given `ip` is looked up. """ if getattr(value, "decode", None): value = value.decode() logger.debug("Deserializing node data: '%s'", value) parsed = json.loads(value) if "port" not in parsed: raise ValueError("No port defined for node.") if "ip" not in parsed: raise ValueError("No IP address defined for node.") if "host" not in parsed: host, aliases, ip_list = socket.gethostbyaddr(parsed["ip"]) parsed["host"] = socket.get_fqdn(host) if "peer" in parsed: peer = Peer.deserialize(parsed["peer"]) else: peer = None return cls( parsed["host"], parsed["ip"], parsed["port"], peer=peer, metadata=parsed.get("metadata") )
python
{ "resource": "" }
q41275
ConfigWatcher.start
train
def start(self): """ Iterates over the `watched_configurabes` attribute and starts a config file monitor for each. The resulting observer threads are kept in an `observers` list attribute. """ for config_class in self.watched_configurables: monitor = ConfigFileMonitor(config_class, self.config_dir) self.observers.append( monitor.start( self.add_configurable, self.update_configurable, self.remove_configurable ) ) wait_on_event(self.shutdown)
python
{ "resource": "" }
q41276
ConfigWatcher.launch_thread
train
def launch_thread(self, name, fn, *args, **kwargs): """ Adds a named thread to the "thread pool" dictionary of Thread objects. A daemon thread that executes the passed-in function `fn` with the given args and keyword args is started and tracked in the `thread_pool` attribute with the given `name` as the key. """ logger.debug( "Launching thread '%s': %s(%s, %s)", name, fn, args, kwargs ) self.thread_pool[name] = threading.Thread( target=fn, args=args, kwargs=kwargs ) self.thread_pool[name].daemon = True self.thread_pool[name].start()
python
{ "resource": "" }
q41277
ConfigWatcher.kill_thread
train
def kill_thread(self, name): """ Joins the thread in the `thread_pool` dict with the given `name` key. """ if name not in self.thread_pool: return self.thread_pool[name].join() del self.thread_pool[name]
python
{ "resource": "" }
q41278
ConfigWatcher.update_configurable
train
def update_configurable(self, configurable_class, name, config): """ Callback fired when a configurable instance is updated. Looks up the existing configurable in the proper "registry" and `apply_config()` is called on it. If a method named "on_<configurable classname>_update" is defined it is called in the work pool and passed the configurable's name, the old config and the new config. If the updated configurable is not present, `add_configurable()` is called instead. """ configurable_class_name = configurable_class.__name__.lower() logger.info( "updating %s: '%s'", configurable_class_name, name ) registry = self.registry_for(configurable_class) if name not in registry: logger.warn( "Tried to update unknown %s: '%s'", configurable_class_name, name ) self.add_configurable( configurable_class, configurable_class.from_config(name, config) ) return registry[name].apply_config(config) hook = self.hook_for(configurable_class, "update") if not hook: return def done(f): try: f.result() except Exception: logger.exception("Error updating configurable '%s'", name) self.work_pool.submit(hook, name, config).add_done_callback(done)
python
{ "resource": "" }
q41279
ConfigWatcher.remove_configurable
train
def remove_configurable(self, configurable_class, name): """ Callback fired when a configurable instance is removed. Looks up the existing configurable in the proper "registry" and removes it. If a method named "on_<configurable classname>_remove" is defined it is called via the work pooland passed the configurable's name. If the removed configurable is not present, a warning is given and no further action is taken. """ configurable_class_name = configurable_class.__name__.lower() logger.info("Removing %s: '%s'", configurable_class_name, name) registry = self.registry_for(configurable_class) if name not in registry: logger.warn( "Tried to remove unknown active %s: '%s'", configurable_class_name, name ) return hook = self.hook_for(configurable_class, action="remove") if not hook: registry.pop(name) return def done(f): try: f.result() registry.pop(name) except Exception: logger.exception("Error removing configurable '%s'", name) self.work_pool.submit(hook, name).add_done_callback(done)
python
{ "resource": "" }
q41280
ConfigWatcher.stop
train
def stop(self): """ Method for shutting down the watcher. All config file observers are stopped and their threads joined, along with the worker thread pool. """ self.shutdown.set() for monitor in self.observers: monitor.stop() self.wind_down() for monitor in self.observers: monitor.join() for thread in self.thread_pool.values(): thread.join() self.work_pool.shutdown()
python
{ "resource": "" }
q41281
BaseResource.get_resource_url
train
def get_resource_url(cls, resource, base_url): """ Construct the URL for talking to this resource. i.e.: http://myapi.com/api/resource Note that this is NOT the method for calling individual instances i.e. http://myapi.com/api/resource/1 Args: resource: The resource class instance base_url: The Base URL of this API service. returns: resource_url: The URL for this resource """ if resource.Meta.resource_name: url = '{}/{}'.format(base_url, resource.Meta.resource_name) else: p = inflect.engine() plural_name = p.plural(resource.Meta.name.lower()) url = '{}/{}'.format(base_url, plural_name) return cls._parse_url_and_validate(url)
python
{ "resource": "" }
q41282
BaseResource.get_url
train
def get_url(cls, url, uid, **kwargs): """ Construct the URL for talking to an individual resource. http://myapi.com/api/resource/1 Args: url: The url for this resource uid: The unique identifier for an individual resource kwargs: Additional keyword argueents returns: final_url: The URL for this individual resource """ if uid: url = '{}/{}'.format(url, uid) else: url = url return cls._parse_url_and_validate(url)
python
{ "resource": "" }
q41283
BaseResource.get_method_name
train
def get_method_name(resource, method_type): """ Generate a method name for this resource based on the method type. """ return '{}_{}'.format(method_type.lower(), resource.Meta.name.lower())
python
{ "resource": "" }
q41284
BaseResource._parse_url_and_validate
train
def _parse_url_and_validate(cls, url): """ Recieves a URL string and validates it using urlparse. Args: url: A URL string Returns: parsed_url: A validated URL Raises: BadURLException """ parsed_url = urlparse(url) if parsed_url.scheme and parsed_url.netloc: final_url = parsed_url.geturl() else: raise BadURLException return final_url
python
{ "resource": "" }
q41285
HypermediaResource.set_related_method
train
def set_related_method(self, resource, full_resource_url): """ Using reflection, generate the related method and return it. """ method_name = self.get_method_name(resource, 'get') def get(self, **kwargs): return self._call_api_single_related_resource( resource, full_resource_url, method_name, **kwargs ) def get_list(self, **kwargs): return self._call_api_many_related_resources( resource, full_resource_url, method_name, **kwargs ) if isinstance(full_resource_url, list): setattr( self, method_name, types.MethodType(get_list, self) ) else: setattr( self, method_name, types.MethodType(get, self) )
python
{ "resource": "" }
q41286
HypermediaResource.match_urls_to_resources
train
def match_urls_to_resources(self, url_values): """ For the list of valid URLs, try and match them up to resources in the related_resources attribute. Args: url_values: A dictionary of keys and URL strings that could be related resources. Returns: valid_values: The values that are valid """ valid_values = {} for resource in self.Meta.related_resources: for k, v in url_values.items(): resource_url = resource.get_resource_url( resource, resource.Meta.base_url) if isinstance(v, list): if all([resource_url in i for i in v]): self.set_related_method(resource, v) valid_values[k] = v elif resource_url in v: self.set_related_method(resource, v) valid_values[k] = v return valid_values
python
{ "resource": "" }
q41287
add_episode
train
def add_episode(db, aid, episode): """Add an episode.""" values = { 'aid': aid, 'type': episode.type, 'number': episode.number, 'title': episode.title, 'length': episode.length, } upsert(db, 'episode', ['aid', 'type', 'number'], values)
python
{ "resource": "" }
q41288
delete_episode
train
def delete_episode(db, aid, episode): """Delete an episode.""" db.cursor().execute( 'DELETE FROM episode WHERE aid=:aid AND type=:type AND number=:number', { 'aid': aid, 'type': episode.type, 'number': episode.number, })
python
{ "resource": "" }
q41289
bump
train
def bump(db, aid): """Bump anime regular episode count.""" anime = lookup(db, aid) if anime.complete: return episode = anime.watched_episodes + 1 with db: set_watched(db, aid, get_eptype(db, 'regular').id, episode) set_status( db, aid, anime.enddate and episode >= anime.episodecount, episode)
python
{ "resource": "" }
q41290
MistClient.__authenticate
train
def __authenticate(self): """ Sends a json payload with the email and password in order to get the authentication api_token to be used with the rest of the requests """ if self.api_token: # verify current API token check_auth_uri = self.uri.split('/api/v1')[0] + '/check_token' req = self.request(check_auth_uri) try: ping = req.get().json() except Exception as exc: if str(exc).startswith('User not authenticated'): self.api_token = None else: if self.email == ping['hello']: return print "Authentication failed" sys.exit(1) auth_uri = self.uri.split('/api/v1')[0] + '/auth' payload = { 'email': self.email, 'password': self.password, 'org_id': self.org_name } data = json.dumps(payload) req = self.request(auth_uri, data=data) response = req.post().json() token = response.get('mist_api_token', None) if token: # backwards compatibility with old Authentication system self.api_token = "mist_1 %s:%s" % (self.email, token) else: self.api_token = response.get('token', None)
python
{ "resource": "" }
q41291
MistClient.supported_providers
train
def supported_providers(self): """ Request a list of all available providers :returns: A list of all available providers (e.g. {'provider': 'ec2_ap_northeast', 'title': 'EC2 AP NORTHEAST'}) """ req = self.request(self.uri + '/providers', api_version=2) providers = req.get().json() supported_providers = providers['supported_providers'] return supported_providers
python
{ "resource": "" }
q41292
MistClient._list_clouds
train
def _list_clouds(self): """ Request a list of all added clouds. Populates self._clouds dict with mist.client.model.Cloud instances """ req = self.request(self.uri + '/clouds') clouds = req.get().json() if clouds: for cloud in clouds: self._clouds[cloud['id']] = Cloud(cloud, self) else: self._clouds = {}
python
{ "resource": "" }
q41293
MistClient.clouds
train
def clouds(self, id=None, name=None, provider=None, search=None): """ Property-like function to call the _list_clouds function in order to populate self._clouds dict :returns: A list of Cloud instances. """ if self._clouds is None: self._clouds = {} self._list_clouds() if id: return [self._clouds[cloud_id] for cloud_id in self._clouds.keys() if id == self._clouds[cloud_id].id] elif name: return [self._clouds[cloud_id] for cloud_id in self._clouds.keys() if name == self._clouds[cloud_id].title] elif provider: return [self._clouds[cloud_id] for cloud_id in self._clouds.keys() if provider == self._clouds[cloud_id].provider] elif search: return [self._clouds[cloud_id] for cloud_id in self._clouds.keys() if search in self._clouds[cloud_id].title or search in self._clouds[cloud_id].id or search in self._clouds[cloud_id].provider] else: return [self._clouds[cloud_id] for cloud_id in self._clouds.keys()]
python
{ "resource": "" }
q41294
MistClient._list_keys
train
def _list_keys(self): """ Retrieves a list of all added Keys and populates the self._keys dict with Key instances :returns: A list of Keys instances """ req = self.request(self.uri + '/keys') keys = req.get().json() if keys: self._keys = {} for key in keys: self._keys[key['id']] = Key(key, self) else: self._keys = {}
python
{ "resource": "" }
q41295
MistClient.keys
train
def keys(self, id=None, search=None): """ Property-like function to call the _list_keys function in order to populate self._keys dict :returns: A list of Key instances """ if self._keys is None: self._keys = {} self._list_keys() if id: return [self._keys[key_id] for key_id in self._keys.keys() if id == self._keys[key_id].id] elif search: return [self._keys[key_id] for key_id in self._keys.keys() if (search in self._keys[key_id].id) or (search in self._keys[key_id].name)] else: return [self._keys[key_id] for key_id in self._keys.keys()]
python
{ "resource": "" }
q41296
MistClient.generate_key
train
def generate_key(self): """ Ask mist.io to randomly generate a private ssh-key to be used with the creation of a new Key :returns: A string of a randomly generated ssh private key """ req = self.request(self.uri + "/keys") private_key = req.post().json() return private_key['priv']
python
{ "resource": "" }
q41297
MistClient.add_key
train
def add_key(self, key_name, private): """ Add a new key to mist.io :param key_name: Name of the new key (it will be used as the key's id as well). :param private: Private ssh-key in string format (see also generate_key() ). :returns: An updated list of added keys. """ payload = { 'name': key_name, 'priv': private } data = json.dumps(payload) req = self.request(self.uri + '/keys', data=data) response = req.put().json() self.update_keys() return response
python
{ "resource": "" }
q41298
command
train
def command(state, args): """Add a priority rule for files.""" args = parser.parse_args(args[1:]) row_id = query.files.add_priority_rule(state.db, args.regexp, args.priority) del state.file_picker print('Added rule {}'.format(row_id))
python
{ "resource": "" }
q41299
Episode.number
train
def number(self) -> int: """Episode number. Unique for an anime and episode type, but not unique across episode types for the same anime. """ match = self._NUMBER_SUFFIX.search(self.epno) return int(match.group(1))
python
{ "resource": "" }