_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q40900
Bugsy.put
train
def put(self, bug): """ This method allows you to create or update a bug on Bugzilla. You will have had to pass in a valid username and password to the object initialisation and recieved back a token. :param bug: A Bug object either created by hand or by using get() If there is no valid token then a BugsyException will be raised. If the object passed in is not a Bug then a BugsyException will be raised. >>> bugzilla = Bugsy() >>> bug = bugzilla.get(123456) >>> bug.summary = "I like cheese and sausages" >>> bugzilla.put(bug) """ if not self._have_auth: raise BugsyException("Unfortunately you can't put bugs in Bugzilla" " without credentials") if not isinstance(bug, Bug): raise BugsyException("Please pass in a Bug object when posting" " to Bugzilla") if not bug.id: result = self.request('bug', 'POST', json=bug.to_dict()) if 'error' not in result: bug._bug['id'] = result['id'] bug._bugsy = self try: bug._bug.pop('comment') except Exception: # If we don't have a `comment` we will error so let's just # swallow it. pass else: raise BugsyException(result['message']) else: result = self.request('bug/%s' % bug.id, 'PUT', json=bug.to_dict()) updated_bug = self.get(bug.id) return updated_bug
python
{ "resource": "" }
q40901
Bugsy.request
train
def request(self, path, method='GET', headers=None, **kwargs): """Perform a HTTP request. Given a relative Bugzilla URL path, an optional request method, and arguments suitable for requests.Request(), perform a HTTP request. """ headers = {} if headers is None else headers.copy() headers["User-Agent"] = "Bugsy" kwargs['headers'] = headers url = '%s/%s' % (self.bugzilla_url, path) return self._handle_errors(self.session.request(method, url, **kwargs))
python
{ "resource": "" }
q40902
estimate_k
train
def estimate_k(X, max_k): """ Estimate k for K-Means. Adapted from <https://datasciencelab.wordpress.com/2014/01/21/selection-of-k-in-k-means-clustering-reloaded/> """ ks = range(1, max_k) fs = np.zeros(len(ks)) # Special case K=1 fs[0], Sk = _fK(1) # Rest of Ks for k in ks[1:]: fs[k-1], Sk = _fK(k, Skm1=Sk) return np.argmin(fs) + 1
python
{ "resource": "" }
q40903
_clean_fields
train
def _clean_fields(allowed_fields: dict, fields: FieldsParam) -> Iterable[str]: """Clean lookup fields and check for errors.""" if fields == ALL: fields = allowed_fields.keys() else: fields = tuple(fields) unknown_fields = set(fields) - allowed_fields.keys() if unknown_fields: raise ValueError('Unknown fields: {}'.format(unknown_fields)) return fields
python
{ "resource": "" }
q40904
select
train
def select( db, where_query: str, where_params: SQLParams, fields: FieldsParam = ALL, episode_fields: FieldsParam = (), ) -> Iterator[Anime]: """Perform an arbitrary SQL SELECT WHERE on the anime table. By nature of "arbitrary query", this is vulnerable to injection, use only trusted values for `where_query`. This will "lazily" fetch the requested fields as needed. For example, episodes (which require a separate query per anime) will only be fetched if `episode_fields` is provided. Anime status will be cached only if status fields are requested. :param str where_query: SELECT WHERE query :param where_params: parameters for WHERE query :param fields: anime fields to get. If :const:`ALL`, get all fields. Default is :const:`ALL`. :param episode_fields: episode fields to get. If :const:`ALL`, get all fields. If empty, don't get episodes. `fields` must contain 'aid' to get episodes. :param bool force_status: whether to force status calculation. :returns: iterator of Anime """ logger.debug( 'select(%r, %r, %r, %r, %r)', db, where_query, where_params, fields, episode_fields) fields = _clean_fields(ANIME_FIELDS, fields) if not fields: raise ValueError('Fields cannot be empty') if set(fields) & STATUS_FIELDS.keys(): cur = db.cursor().execute( ANIME_QUERY.format('aid', where_query), where_params) for row in cur: cache_status(db, row[0]) if 'aid' in fields: episode_fields = _clean_fields(EPISODE_FIELDS, episode_fields) else: episode_fields = () with db: anime_query = ANIME_QUERY.format( ','.join(ANIME_FIELDS[field] for field in fields), where_query, ) anime_rows = db.cursor().execute(anime_query, where_params) for row in anime_rows: anime = Anime(**{ field: value for field, value in zip(fields, row)}) if episode_fields: episode_query = 'SELECT {} FROM episode WHERE aid=?' episode_query = episode_query.format( ','.join(EPISODE_FIELDS[field] for field in episode_fields)) episode_rows = db.cursor().execute(episode_query, (anime.aid,)) episodes = [ Episode(**{ field: value for field, value in zip(episode_fields, row)}) for row in episode_rows] anime.episodes = episodes yield anime
python
{ "resource": "" }
q40905
lookup
train
def lookup( db, aid: int, fields: FieldsParam = ALL, episode_fields: FieldsParam = (), ) -> Anime: """Look up information for a single anime. :param fields: anime fields to get. If ``None``, get all fields. :param episode_fields: episode fields to get. If ``None``, get all fields. If empty, don't get episodes. """ return next(select( db, 'aid=?', (aid,), fields=fields, episode_fields=episode_fields, ))
python
{ "resource": "" }
q40906
_get_default_letters
train
def _get_default_letters(model_admin=None): """ Returns the set of letters defined in the configuration variable DEFAULT_ALPHABET. DEFAULT_ALPHABET can be a callable, string, tuple, or list and returns a set. If a ModelAdmin class is passed, it will look for a DEFAULT_ALPHABET attribute and use it instead. """ from django.conf import settings import string default_ltrs = string.digits + string.ascii_uppercase default_letters = getattr(settings, 'DEFAULT_ALPHABET', default_ltrs) if model_admin and hasattr(model_admin, 'DEFAULT_ALPHABET'): default_letters = model_admin.DEFAULT_ALPHABET if callable(default_letters): return set(default_letters()) elif isinstance(default_letters, str): return set([x for x in default_letters]) elif isinstance(default_letters, str): return set([x for x in default_letters.decode('utf8')]) elif isinstance(default_letters, (tuple, list)): return set(default_letters)
python
{ "resource": "" }
q40907
_get_available_letters
train
def _get_available_letters(field_name, queryset): """ Makes a query to the database to return the first character of each value of the field and table passed in. Returns a set that represents the letters that exist in the database. """ if django.VERSION[1] <= 4: result = queryset.values(field_name).annotate( fl=FirstLetter(field_name) ).values('fl').distinct() return set([res['fl'] for res in result if res['fl'] is not None]) else: from django.db import connection qn = connection.ops.quote_name db_table = queryset.model._meta.db_table sql = "SELECT DISTINCT UPPER(SUBSTR(%s, 1, 1)) as letter FROM %s" % (qn(field_name), qn(db_table)) cursor = connection.cursor() cursor.execute(sql) rows = cursor.fetchall() or () return set([row[0] for row in rows if row[0] is not None])
python
{ "resource": "" }
q40908
Pluggable.get_installed_classes
train
def get_installed_classes(cls): """ Iterates over installed plugins associated with the `entry_point` and returns a dictionary of viable ones keyed off of their names. A viable installed plugin is one that is both loadable *and* a subclass of the Pluggable subclass in question. """ installed_classes = {} for entry_point in pkg_resources.iter_entry_points(cls.entry_point): try: plugin = entry_point.load() except ImportError as e: logger.error( "Could not load plugin %s: %s", entry_point.name, str(e) ) continue if not issubclass(plugin, cls): logger.error( "Could not load plugin %s:" + " %s class is not subclass of %s", entry_point.name, plugin.__class__.__name__, cls.__name__ ) continue if not plugin.validate_dependencies(): logger.error( "Could not load plugin %s:" + " %s class dependencies not met", entry_point.name, plugin.__name__ ) continue installed_classes[entry_point.name] = plugin return installed_classes
python
{ "resource": "" }
q40909
ConfigFileChangeHandler.file_name
train
def file_name(self, event): """ Helper method for determining the basename of the affected file. """ name = os.path.basename(event.src_path) name = name.replace(".yaml", "") name = name.replace(".yml", "") return name
python
{ "resource": "" }
q40910
ConfigFileChangeHandler.on_created
train
def on_created(self, event): """ Newly created config file handler. Parses the file's yaml contents and creates a new instance of the target_class with the results. Fires the on_add callback with the new instance. """ if os.path.isdir(event.src_path): return logger.debug("File created: %s", event.src_path) name = self.file_name(event) try: result = self.target_class.from_config( name, yaml.load(open(event.src_path)) ) except Exception as e: logger.exception( "Error when loading new config file %s: %s", event.src_path, str(e) ) return if not result: return self.on_add(self.target_class, name, result)
python
{ "resource": "" }
q40911
ConfigFileChangeHandler.on_modified
train
def on_modified(self, event): """ Modified config file handler. If a config file is modified, the yaml contents are parsed and the new results are validated by the target class. Once validated, the new config is passed to the on_update callback. """ if os.path.isdir(event.src_path): return logger.debug("file modified: %s", event.src_path) name = self.file_name(event) try: config = yaml.load(open(event.src_path)) self.target_class.from_config(name, config) except Exception: logger.exception( "Error when loading updated config file %s", event.src_path, ) return self.on_update(self.target_class, name, config)
python
{ "resource": "" }
q40912
ConfigFileChangeHandler.on_deleted
train
def on_deleted(self, event): """ Deleted config file handler. Simply fires the on_delete callback with the name of the deleted item. """ logger.debug("file removed: %s", event.src_path) name = self.file_name(event) self.on_delete(self.target_class, name)
python
{ "resource": "" }
q40913
ConfigFileChangeHandler.on_moved
train
def on_moved(self, event): """ A move event is just proxied to an on_deleted call followed by an on_created call. """ self.on_deleted(events.FileDeletedEvent(event.src_path)) self.on_created(events.FileCreatedEvent(event.dest_path))
python
{ "resource": "" }
q40914
command
train
def command(state, args): """Search AniDB.""" args = parser.parse_args(args[1:]) if not args.query: print('Must supply query.') return search_query = _compile_re_query(args.query) results = state.titles.search(search_query) results = [(anime.aid, anime.main_title) for anime in results] state.results['anidb'].set(results) state.results['anidb'].print()
python
{ "resource": "" }
q40915
Session.regenerate
train
def regenerate(self): """Regenerate the session id. This function creates a new session id and stores all information associated with the current id in that new id. It then destroys the old session id. This is useful for preventing session fixation attacks and should be done whenever someone uses a login to obtain additional authorizaiton. """ oldhash = self.session_hash self.new_session_id() try: self.rdb.rename(oldhash,self.session_hash) self.rdb.expire(self.session_hash,self.ttl) except: pass
python
{ "resource": "" }
q40916
Session.get
train
def get(self,key,default=None): """Get a value from the dictionary. Args: key (str): The dictionary key. default (any): The default to return if the key is not in the dictionary. Defaults to None. Returns: str or any: The dictionary value or the default if the key is not in the dictionary. """ retval = self.__getitem__(key) if not retval: retval = default return retval
python
{ "resource": "" }
q40917
Session.items
train
def items(self): """Return a list of all the key, value pair tuples in the dictionary. Returns: list of tuples: [(key1,value1),(key2,value2),...,(keyN,valueN)] """ all_items = [(k.decode('utf-8'),v.decode('utf-8')) for k,v in self.rdb.hgetall(self.session_hash).items()] return all_items
python
{ "resource": "" }
q40918
Session.keys
train
def keys(self): """Return a list of all keys in the dictionary. Returns: list of str: [key1,key2,...,keyN] """ all_keys = [k.decode('utf-8') for k,v in self.rdb.hgetall(self.session_hash).items()] return all_keys
python
{ "resource": "" }
q40919
Session.values
train
def values(self): """Returns a list of all values in the dictionary. Returns: list of str: [value1,value2,...,valueN] """ all_values = [v.decode('utf-8') for k,v in self.rdb.hgetall(self.session_hash).items()] return all_values
python
{ "resource": "" }
q40920
sbesselh1
train
def sbesselh1(x, N): "Spherical Hankel of the first kind" jn = sbesselj(x, N) yn = sbessely(x, N) return jn + 1j * yn
python
{ "resource": "" }
q40921
sbesselh2
train
def sbesselh2(x, N): "Spherical Hankel of the second kind" jn = sbesselj(x, N) yn = sbessely(x, N) return jn - 1j * yn
python
{ "resource": "" }
q40922
_count
train
def _count(dicts): """ Merge a list of dicts, summing their values. """ counts = defaultdict(int) for d in dicts: for k, v in d.items(): counts[k] += v return counts
python
{ "resource": "" }
q40923
_chunks
train
def _chunks(iterable, n): """ Splits an iterable into chunks of size n. """ iterable = iter(iterable) while True: # store one line in memory, # chain it to an iterator on the rest of the chunk yield chain([next(iterable)], islice(iterable, n-1))
python
{ "resource": "" }
q40924
split_file
train
def split_file(path, chunk_size=50000): """ Splits the specified file into smaller files. """ with open(path) as f: for i, lines in enumerate(_chunks(f, chunk_size)): file_split = '{}.{}'.format(os.path.basename(path), i) chunk_path = os.path.join('/tmp', file_split) with open(chunk_path, 'w') as f: f.writelines(lines) yield chunk_path
python
{ "resource": "" }
q40925
Stanza.add_line
train
def add_line(self, line): """ Adds a given line string to the list of lines, validating the line first. """ if not self.is_valid_line(line): logger.warn( "Invalid line for %s section: '%s'", self.section_name, line ) return self.lines.append(line)
python
{ "resource": "" }
q40926
load_stop_words
train
def load_stop_words(stop_word_file): """ Utility function to load stop words from a file and return as a list of words @param stop_word_file Path and file name of a file containing stop words. @return list A list of stop words. """ stop_words = [] for line in open(stop_word_file): if line.strip()[0:1] != "#": for word in line.split(): # in case more than one per line stop_words.append(word) return stop_words
python
{ "resource": "" }
q40927
separate_words
train
def separate_words(text, min_word_return_size): """ Utility function to return a list of all words that are have a length greater than a specified number of characters. @param text The text that must be split in to words. @param min_word_return_size The minimum no of characters a word must have to be included. """ splitter = re.compile('[^a-zA-Z0-9_\\+\\-/]') words = [] for single_word in splitter.split(text): current_word = single_word.strip().lower() #leave numbers in phrase, but don't count as words, since they tend to invalidate scores of their phrases if len(current_word) > min_word_return_size and current_word != '' and not is_number(current_word): words.append(current_word) return words
python
{ "resource": "" }
q40928
param_redirect
train
def param_redirect(request, viewname, *args): """ Redirect and keep URL parameters if any. """ url = reverse(viewname, PARAMS_URL_CONF, args) params = request.GET.urlencode().split('&') if hasattr(request, 'cparam'): for k, v in request.cparam.items(): params.append('{0}={1}'.format(k, v)) new_params = '&'.join(x for x in params if x != '') if len(new_params) > 0: return HttpResponseRedirect('{0}?{1}'.format(url, new_params)) return HttpResponseRedirect(url)
python
{ "resource": "" }
q40929
ExecutionContext.kind_as_string
train
def kind_as_string(self, add_colour=True): '''Get the type of this context as an optionally coloured string. @param add_colour If True, ANSI colour codes will be added. @return A string describing the kind of execution context this is. ''' with self._mutex: if self.kind == self.PERIODIC: result = 'Periodic', ['reset'] elif self.kind == self.EVENT_DRIVEN: result = 'Event-driven', ['reset'] elif self.kind == self.OTHER: result = 'Other', ['reset'] if add_colour: return utils.build_attr_string(result[1], supported=add_colour) + \ result[0] + utils.build_attr_string('reset', supported=add_colour) else: return result[0]
python
{ "resource": "" }
q40930
ExecutionContext.running_as_string
train
def running_as_string(self, add_colour=True): '''Get the state of this context as an optionally coloured string. @param add_colour If True, ANSI colour codes will be added. @return A string describing this context's running state. ''' with self._mutex: if self.running: result = 'Running', ['bold', 'green'] else: result = 'Stopped', ['reset'] if add_colour: return utils.build_attr_string(result[1], supported=add_colour) + \ result[0] + utils.build_attr_string('reset', supported=add_colour) else: return result[0]
python
{ "resource": "" }
q40931
ExecutionContext.kind
train
def kind(self): '''The kind of this execution context.''' with self._mutex: kind = self._obj.get_kind() if kind == RTC.PERIODIC: return self.PERIODIC elif kind == RTC.EVENT_DRIVEN: return self.EVENT_DRIVEN else: return self.OTHER
python
{ "resource": "" }
q40932
ExecutionContext.owner_name
train
def owner_name(self): '''The name of the RTObject that owns this context.''' with self._mutex: if self._owner: return self._owner.get_component_profile().instance_name else: return ''
python
{ "resource": "" }
q40933
ExecutionContext.participant_names
train
def participant_names(self): '''The names of the RTObjects participating in this context.''' with self._mutex: return [obj.get_component_profile().instance_name \ for obj in self._participants]
python
{ "resource": "" }
q40934
RedisBloomFilter.is_contains
train
def is_contains(self, data): """ Judge the data whether is already exist if each bit of hash code is 1 then data exist. """ if not data: return False data = self._compress_by_md5(data) result = True # cut the first two place,route to different block by block_num name = self.key + str(int(data[0:2], 16) % self.block_num) for h in self.hash_function: local_hash = h.hash(data) result = result & self.server.getbit(name, local_hash) return result
python
{ "resource": "" }
q40935
RedisBloomFilter.insert
train
def insert(self, data): """ Insert 1 into each bit by local_hash """ if not data: return data = self._compress_by_md5(data) # cut the first two place,route to different block by block_num name = self.key + str(int(data[0:2], 16) % self.block_num) for h in self.hash_function: local_hash = h.hash(data) self.server.setbit(name, local_hash, 1)
python
{ "resource": "" }
q40936
HAProxyConfig.generate
train
def generate(self, clusters, version=None): """ Generates HAProxy config file content based on a given list of clusters. """ now = datetime.datetime.now() sections = [ Section( "Auto-generated by Lighthouse (%s)" % now.strftime("%c"), self.global_stanza, self.defaults_stanza ) ] meta_stanzas = [ MetaFrontendStanza( name, self.meta_clusters[name]["port"], self.meta_clusters[name].get("frontend", []), members, self.bind_address ) for name, members in six.iteritems(self.get_meta_clusters(clusters)) ] frontend_stanzas = [ FrontendStanza(cluster, self.bind_address) for cluster in clusters if "port" in cluster.haproxy ] backend_stanzas = [BackendStanza(cluster) for cluster in clusters] if version and version >= (1, 5, 0): peers_stanzas = [PeersStanza(cluster) for cluster in clusters] else: peers_stanzas = [] sections.extend([ Section("Frontend stanzas for ACL meta clusters", *meta_stanzas), Section("Per-cluster frontend definitions", *frontend_stanzas), Section("Per-cluster backend definitions", *backend_stanzas), Section("Per-cluster peer listings", *peers_stanzas), Section("Individual proxy definitions", *self.proxy_stanzas), ]) if self.stats_stanza: sections.append( Section("Listener for stats web interface", self.stats_stanza) ) return "\n\n\n".join([str(section) for section in sections]) + "\n"
python
{ "resource": "" }
q40937
HAProxyConfig.get_meta_clusters
train
def get_meta_clusters(self, clusters): """ Returns a dictionary keyed off of meta cluster names, where the values are lists of clusters associated with the meta cluster name. If a meta cluster name doesn't have a port defined in the `meta_cluster_ports` attribute an error is given and the meta cluster is removed from the mapping. """ meta_clusters = collections.defaultdict(list) for cluster in clusters: if not cluster.meta_cluster: continue meta_clusters[cluster.meta_cluster].append(cluster) unconfigured_meta_clusters = [ name for name in meta_clusters.keys() if name not in self.meta_clusters ] for name in unconfigured_meta_clusters: logger.error("Meta cluster %s not configured!") del meta_clusters[name] return meta_clusters
python
{ "resource": "" }
q40938
GenAvatar._text_position
train
def _text_position(size, text, font): """ Returns the left-top point where the text should be positioned. """ width, height = font.getsize(text) left = (size - width) / 2.0 top = (size - height) / 3.0 return left, top
python
{ "resource": "" }
q40939
Animal.breeding_male_location_type
train
def breeding_male_location_type(self): """This attribute defines whether a male's current location is the same as the breeding cage to which it belongs. This attribute is used to color breeding table entries such that male mice which are currently in a different cage can quickly be identified. The location is relative to the first breeding cage an animal is assigned to.""" try: self.breeding_males.all()[0].Cage if int(self.breeding_males.all()[0].Cage) == int(self.Cage): type = "resident-breeder" else: type = "non-resident-breeder" except IndexError: type = "unknown-breeder" except ValueError: type = "unknown-breeder" return type
python
{ "resource": "" }
q40940
Animal.breeding_female_location_type
train
def breeding_female_location_type(self): """This attribute defines whether a female's current location is the same as the breeding cage to which it belongs. This attribute is used to color breeding table entries such that male mice which are currently in a different cage can quickly be identified. The location is relative to the first breeding cage an animal is assigned to.""" try: self.breeding_females.all()[0].Cage if int(self.breeding_females.all()[0].Cage) == int(self.Cage): type = "resident-breeder" else: type = "non-resident-breeder" except IndexError: type = "unknown-breeder" except ValueError: type = "unknown-breeder" return type
python
{ "resource": "" }
q40941
Animal.save
train
def save(self): """The save method for Animal class is over-ridden to set Alive=False when a Death date is entered. This is not the case for a cause of death.""" if self.Death: self.Alive = False super(Animal, self).save()
python
{ "resource": "" }
q40942
Breeding.duration
train
def duration(self): """Calculates the breeding cage's duration. This is relative to the current date (if alive) or the date of inactivation (if not). The duration is formatted in days.""" if self.End: age = self.End - self.Start else: age = datetime.date.today() - self.Start return age.days
python
{ "resource": "" }
q40943
Breeding.unweaned
train
def unweaned(self): """This attribute generates a queryset of unweaned animals for this breeding cage. It is filtered for only Alive animals.""" return Animal.objects.filter(Breeding=self, Weaned__isnull=True, Alive=True)
python
{ "resource": "" }
q40944
Breeding.male_breeding_location_type
train
def male_breeding_location_type(self): """This attribute defines whether a breeding male's current location is the same as the breeding cage. This attribute is used to color breeding table entries such that male mice which are currently in a different cage can quickly be identified.""" if int(self.Male.all()[0].Cage) == int(self.Cage): type = "resident breeder" else: type = "non-resident breeder" return type
python
{ "resource": "" }
q40945
Breeding.save
train
def save(self): """The save function for a breeding cage has to automatic over-rides, Active and the Cage for the Breeder. In the case of Active, if an End field is specified, then the Active field is set to False. In the case of Cage, if a Cage is provided, and animals are specified under Male or Females for a Breeding object, then the Cage field for those animals is set to that of the breeding cage. The same is true for both Rack and Rack Position.""" if self.End: self.Active = False #if self.Cage: # if self.Females: # for female_breeder in self.Females: # female_breeder.Cage = self.Cage # female_breeder.save() # if self.Male: # for male_breeder in self.Male: # male_breeder.Cage = self.Cage # male_breeder.save() super(Breeding, self).save()
python
{ "resource": "" }
q40946
matplotlibensure
train
def matplotlibensure(func): """If matplotlib isn't installed, this decorator alerts the user and suggests how one might obtain the package.""" @wraps(func) def wrap(*args): if MPLINSTALLED == False: raise ImportError(msg) return func(*args) return wrap
python
{ "resource": "" }
q40947
to_ts
train
def to_ts(date: datetime.date) -> float: """Convert date to timestamp. >>> to_ts(datetime.date(2001, 1, 2)) 978393600.0 """ return datetime.datetime( date.year, date.month, date.day, tzinfo=datetime.timezone.utc).timestamp()
python
{ "resource": "" }
q40948
to_date
train
def to_date(ts: float) -> datetime.date: """Convert timestamp to date. >>> to_date(978393600.0) datetime.date(2001, 1, 2) """ return datetime.datetime.fromtimestamp( ts, tz=datetime.timezone.utc).date()
python
{ "resource": "" }
q40949
get_response
train
def get_response(sock, buffer_size=4096): """ Helper method for retrieving a response from a given socket. Returns two values in a tuple, the first is the reponse line and the second is any extra data after the newline. """ response = "" extra = "" while True: try: chunk = sock.recv(buffer_size) if chunk: response += chunk except socket.error as e: if e.errno not in [errno.EAGAIN, errno.EINTR]: raise if not response: break if "\n" in response: response, extra = response.split("\n", 1) break return response, extra
python
{ "resource": "" }
q40950
RTCTree.add_name_server
train
def add_name_server(self, server, filter=[], dynamic=None): '''Parse a name server, adding its contents to the tree. @param server The address of the name server, in standard address format. e.g. 'localhost', 'localhost:2809', '59.7.0.1'. @param filter Restrict the parsed objects to only those in this path. For example, setting filter to [['/', 'localhost', 'host.cxt', 'comp1.rtc']] will prevent 'comp2.rtc' in the same naming context from being parsed. @param dynamic Override the tree-wide dynamic setting. If not provided, the value given when the tree was created will be used. ''' if dynamic == None: dynamic = self._dynamic self._parse_name_server(server, filter, dynamic=dynamic)
python
{ "resource": "" }
q40951
RTCTree.iterate
train
def iterate(self, func, args=None, filter=[]): '''Call a function on the root node, and recursively all its children. This is a depth-first iteration. @param func The function to call. Its declaration must be 'def blag(node, args)', where 'node' is the current node in the iteration and args is the value of @ref args. @param args Extra arguments to pass to the function at each iteration. Pass multiple arguments in as a tuple. @param filter A list of filters to apply before calling func for each node in the iteration. If the filter is not True, @ref func will not be called for that node. Each filter entry should be a string, representing on of the is_* properties (is_component, etc), or a function object. @return The results of the calls to @ref func in a list. ''' return self._root.iterate(func, args, filter)
python
{ "resource": "" }
q40952
RTCTree.load_servers_from_env
train
def load_servers_from_env(self, filter=[], dynamic=None): '''Load the name servers environment variable and parse each server in the list. @param filter Restrict the parsed objects to only those in this path. For example, setting filter to [['/', 'localhost', 'host.cxt', 'comp1.rtc']] will prevent 'comp2.rtc' in the same naming context from being parsed. @param dynamic Override the tree-wide dynamic setting. If not provided, the value given when the tree was created will be used. ''' if dynamic == None: dynamic = self._dynamic if NAMESERVERS_ENV_VAR in os.environ: servers = [s for s in os.environ[NAMESERVERS_ENV_VAR].split(';') \ if s] self._parse_name_servers(servers, filter, dynamic)
python
{ "resource": "" }
q40953
parse_port
train
def parse_port(port_obj, owner): '''Create a port object of the correct type. The correct port object type is chosen based on the port.port_type property of port_obj. @param port_obj The CORBA PortService object to wrap. @param owner The owner of this port. Should be a Component object or None. @return The created port object. ''' profile = port_obj.get_port_profile() props = utils.nvlist_to_dict(profile.properties) if props['port.port_type'] == 'DataInPort': return DataInPort(port_obj, owner) elif props['port.port_type'] == 'DataOutPort': return DataOutPort(port_obj, owner) elif props['port.port_type'] == 'CorbaPort': return CorbaPort(port_obj, owner) else: return Port(port_obj, owner)
python
{ "resource": "" }
q40954
Port.connect
train
def connect(self, dests=[], name=None, id='', props={}): '''Connect this port to other ports. After the connection has been made, a delayed reparse of the connections for this and the destination port will be triggered. @param dests A list of the destination Port objects. Must be provided. @param name The name of the connection. If None, a suitable default will be created based on the names of the two ports. @param id The ID of this connection. If None, one will be generated by the RTC implementation. @param props Properties of the connection. Required values depend on the type of the two ports being connected. @raises IncompatibleDataPortConnectionPropsError, FailedToConnectError ''' with self._mutex: if self.porttype == 'DataInPort' or self.porttype == 'DataOutPort': for prop in props: if prop in self.properties: if props[prop] not in [x.strip() for x in self.properties[prop].split(',')] and \ 'any' not in self.properties[prop].lower(): # Invalid property selected raise exceptions.IncompatibleDataPortConnectionPropsError for d in dests: if prop in d.properties: if props[prop] not in [x.strip() for x in d.properties[prop].split(',')] and \ 'any' not in d.properties[prop].lower(): # Invalid property selected raise exceptions.IncompatibleDataPortConnectionPropsError if not name: name = self.name + '_'.join([d.name for d in dests]) props = utils.dict_to_nvlist(props) profile = RTC.ConnectorProfile(name, id, [self._obj] + [d._obj for d in dests], props) return_code, profile = self._obj.connect(profile) if return_code != RTC.RTC_OK: raise exceptions.FailedToConnectError(return_code) self.reparse_connections() for d in dests: d.reparse_connections()
python
{ "resource": "" }
q40955
Port.disconnect_all
train
def disconnect_all(self): '''Disconnect all connections to this port.''' with self._mutex: for conn in self.connections: self.object.disconnect(conn.id) self.reparse_connections()
python
{ "resource": "" }
q40956
Port.get_connection_by_dest
train
def get_connection_by_dest(self, dest): '''DEPRECATED. Search for a connection between this and another port.''' with self._mutex: for conn in self.connections: if conn.has_port(self) and conn.has_port(dest): return conn return None
python
{ "resource": "" }
q40957
Port.get_connections_by_dest
train
def get_connections_by_dest(self, dest): '''Search for all connections between this and another port.''' with self._mutex: res = [] for c in self.connections: if c.has_port(self) and c.has_port(dest): res.append(c) return res
python
{ "resource": "" }
q40958
Port.get_connections_by_dests
train
def get_connections_by_dests(self, dests): '''Search for all connections involving this and all other ports.''' with self._mutex: res = [] for c in self.connections: if not c.has_port(self): continue has_dest = False for d in dests: if c.has_port(d): has_dest = True break if has_dest: res.append(c) return res
python
{ "resource": "" }
q40959
Port.get_connection_by_id
train
def get_connection_by_id(self, id): '''Search for a connection on this port by its ID.''' with self._mutex: for conn in self.connections: if conn.id == id: return conn return None
python
{ "resource": "" }
q40960
Port.get_connection_by_name
train
def get_connection_by_name(self, name): '''Search for a connection to or from this port by name.''' with self._mutex: for conn in self.connections: if conn.name == name: return conn return None
python
{ "resource": "" }
q40961
Port.connections
train
def connections(self): '''A list of connections to or from this port. This list will be created at the first reference to this property. This means that the first reference may be delayed by CORBA calls, but others will return quickly (unless a delayed reparse has been triggered). ''' with self._mutex: if not self._connections: self._connections = [Connection(cp, self) \ for cp in self._obj.get_connector_profiles()] return self._connections
python
{ "resource": "" }
q40962
DataPort.connect
train
def connect(self, dests=[], name=None, id='', props={}): '''Connect this port to other DataPorts. After the connection has been made, a delayed reparse of the connections for this and the destination port will be triggered. @param dests A list of the destination Port objects. Must be provided. @param name The name of the connection. If None, a suitable default will be created based on the names of the two ports. @param id The ID of this connection. If None, one will be generated by the RTC implementation. @param props Properties of the connection. Suitable defaults will be set for required values if they are not already present. @raises WrongPortTypeError ''' # Data ports can only connect to opposite data ports with self._mutex: new_props = props.copy() ptypes = [d.porttype for d in dests] if self.porttype == 'DataInPort': if 'DataOutPort' not in ptypes: raise exceptions.WrongPortTypeError if self.porttype == 'DataOutPort': if 'DataInPort' not in ptypes: raise exceptions.WrongPortTypeError if 'dataport.dataflow_type' not in new_props: new_props['dataport.dataflow_type'] = 'push' if 'dataport.interface_type' not in new_props: new_props['dataport.interface_type'] = 'corba_cdr' if 'dataport.subscription_type' not in new_props: new_props['dataport.subscription_type'] = 'new' if 'dataport.data_type' not in new_props: new_props['dataport.data_type'] = \ self.properties['dataport.data_type'] super(DataPort, self).connect(dests=dests, name=name, id=id, props=new_props)
python
{ "resource": "" }
q40963
CorbaPort.connect
train
def connect(self, dests=None, name=None, id='', props={}): '''Connect this port to other CorbaPorts. After the connection has been made, a delayed reparse of the connections for this and the destination port will be triggered. @param dests A list of the destination Port objects. Must be provided. @param name The name of the connection. If None, a suitable default will be created based on the names of the two ports. @param id The ID of this connection. If None, one will be generated by the RTC implementation. @param props Properties of the connection. Suitable defaults will be set for required values if they are not already present. @raises WrongPortTypeError, MismatchedInterfacesError, MismatchedPolarityError ''' with self._mutex: # Corba ports can only connect to corba ports of the opposite # polarity for d in dests: if not d.porttype == 'CorbaPort': raise exceptions.WrongPortTypeError # Check the interfaces and their respective polarities match if self.interfaces: for d in dests: if not d.interfaces: raise exceptions.MismatchedInterfacesError for intf in self.interfaces: for d in dests: match = d.get_interface_by_instance_name( intf.instance_name) if not match: raise exceptions.MismatchedInterfacesError if intf.polarity == match.polarity: # Polarity should be opposite raise exceptions.MismatchedPolarityError else: for d in dests: if d.interfaces: raise exceptions.MismatchedInterfacesError # Make the connection new_props = props.copy() if 'port.port_type' not in new_props: new_props['port.port_type'] = 'CorbaPort' super(CorbaPort, self).connect(dests=dests, name=name, id=id, props=new_props)
python
{ "resource": "" }
q40964
CorbaPort.get_interface_by_instance_name
train
def get_interface_by_instance_name(self, name): '''Get an interface of this port by instance name.''' with self._mutex: for intf in self.interfaces: if intf.instance_name == name: return intf return None
python
{ "resource": "" }
q40965
CorbaPort.interfaces
train
def interfaces(self): '''The list of interfaces this port provides or uses. This list will be created at the first reference to this property. This means that the first reference may be delayed by CORBA calls, but others will return quickly (unless a delayed reparse has been triggered). ''' with self._mutex: if not self._interfaces: profile = self._obj.get_port_profile() self._interfaces = [SvcInterface(intf) \ for intf in profile.interfaces] return self._interfaces
python
{ "resource": "" }
q40966
SvcInterface.polarity_as_string
train
def polarity_as_string(self, add_colour=True): '''Get the polarity of this interface as a string. @param add_colour If True, ANSI colour codes will be added to the string. @return A string describing the polarity of this interface. ''' with self._mutex: if self.polarity == self.PROVIDED: result = 'Provided', ['reset'] elif self.polarity == self.REQUIRED: result = 'Required', ['reset'] if add_colour: return utils.build_attr_string(result[1], supported=add_colour) + \ result[0] + utils.build_attr_string('reset', supported=add_colour) else: return result[0]
python
{ "resource": "" }
q40967
Connection.disconnect
train
def disconnect(self): '''Disconnect this connection.''' with self._mutex: if not self.ports: raise exceptions.NotConnectedError # Some of the connection participants may not be in the tree, # causing the port search in self.ports to return ('Unknown', None) # for those participants. Search the list to find the first # participant that is in the tree (there must be at least one). p = self.ports[0][1] ii = 1 while not p and ii < len(self.ports): p = self.ports[ii][1] ii += 1 if not p: raise exceptions.UnknownConnectionOwnerError p.object.disconnect(self.id)
python
{ "resource": "" }
q40968
Connection.has_port
train
def has_port(self, port): '''Return True if this connection involves the given Port object. @param port The Port object to search for in this connection's ports. ''' with self._mutex: for p in self.ports: if not p[1]: # Port owner not in tree, so unknown continue if port.object._is_equivalent(p[1].object): return True return False
python
{ "resource": "" }
q40969
Connection.ports
train
def ports(self): '''The list of ports involved in this connection. The result is a list of tuples, (port name, port object). Each port name is a full path to the port (e.g. /localhost/Comp0.rtc:in) if this Connection object is owned by a Port, which is in turn owned by a Component in the tree. Otherwise, only the port's name will be used (in which case it will be the full port name, which will include the component name, e.g. 'ConsoleIn0.in'). The full path can be used to find ports in the tree. If, for some reason, the owner node of a port cannot be found, that entry in the list will contain ('Unknown', None). This typically means that a component's name has been clobbered on the name server. This list will be created at the first reference to this property. This means that the first reference may be delayed by CORBA calls, but others will return quickly (unless a delayed reparse has been triggered). ''' def has_port(node, args): if node.get_port_by_ref(args): return node return None with self._mutex: if not self._ports: self._ports = [] for p in self._obj.ports: # My owner's owner is a component node in the tree if self.owner and self.owner.owner: root = self.owner.owner.root owner_nodes = [n for n in root.iterate(has_port, args=p, filter=['is_component']) if n] if not owner_nodes: self._ports.append(('Unknown', None)) else: port_owner = owner_nodes[0] port_owner_path = port_owner.full_path_str port_name = p.get_port_profile().name prefix = port_owner.instance_name + '.' if port_name.startswith(prefix): port_name = port_name[len(prefix):] self._ports.append((port_owner_path + ':' + \ port_name, parse_port(p, self.owner.owner))) else: self._ports.append((p.get_port_profile().name, parse_port(p, None))) return self._ports
python
{ "resource": "" }
q40970
Song.analysis
train
def analysis(self): """Get musical analysis of the song using the librosa library """ if self._analysis is not None: return self._analysis if self.cache_dir is not None: path = os.path.join(self.cache_dir, self.checksum) try: if self.refresh_cache: raise IOError with open(path + '.pickle', 'rb') as pickle_file: self._analysis = pickle.load(pickle_file) except IOError: self._analysis = librosa_analysis.analyze_frames(self.all_as_mono(), self.samplerate) with open(path + '.pickle', 'wb') as pickle_file: pickle.dump(self._analysis, pickle_file, pickle.HIGHEST_PROTOCOL) else: self._analysis = librosa_analysis.analyze_frames(self.all_as_mono(), self.samplerate) return self._analysis
python
{ "resource": "" }
q40971
multiple_pups
train
def multiple_pups(request): """This view is used to enter multiple animals at the same time. It will generate a form containing animal information and a number of mice. It is intended to create several identical animals with the same attributes. """ if request.method == "POST": form = MultipleAnimalForm(request.POST) if form.is_valid(): count = form.cleaned_data['count'] for i in range(count): animal = Animal( Strain = form.cleaned_data['Strain'], Background = form.cleaned_data['Background'], Breeding = form.cleaned_data['Breeding'], Cage = form.cleaned_data['Cage'], Rack = form.cleaned_data['Rack'], Rack_Position = form.cleaned_data['Rack_Position'], Genotype = form.cleaned_data['Genotype'], Gender = form.cleaned_data['Gender'], Born = form.cleaned_data['Born'], Weaned = form.cleaned_data['Weaned'], Backcross = form.cleaned_data['Backcross'], Generation = form.cleaned_data['Generation'], Father = form.cleaned_data['Father'], Mother = form.cleaned_data['Mother'], Markings = form.cleaned_data['Markings'], Notes = form.cleaned_data['Notes']) animal.save() return HttpResponseRedirect( reverse('strain-list') ) else: form = MultipleAnimalForm() return render(request, "animal_multiple_form.html", {"form":form,})
python
{ "resource": "" }
q40972
multiple_breeding_pups
train
def multiple_breeding_pups(request, breeding_id): """This view is used to enter multiple animals at the same time from a breeding cage. It will generate a form containing animal information and a number of mice. It is intended to create several identical animals with the same attributes. This view requres an input of a breeding_id to generate the correct form. """ breeding = Breeding.objects.get(id=breeding_id) if request.method == "POST": form = MultipleBreedingAnimalForm(request.POST) if form.is_valid(): count = form.cleaned_data['count'] for i in range(count): animal = Animal( Strain = breeding.Strain, Background = breeding.background, Breeding = breeding, Cage = breeding.Cage, Rack = breeding.Rack, Rack_Position = breeding.Rack_Position, Genotype = breeding.genotype, Gender = form.cleaned_data['Gender'], Born = form.cleaned_data['Born'], Weaned = form.cleaned_data['Weaned'], Backcross = breeding.backcross, Generation = breeding.generation) animal.save() return HttpResponseRedirect( breeding.get_absolute_url() ) else: form = MultipleBreedingAnimalForm() return render(request, "animal_multiple_form.html", {"form":form, "breeding":breeding})
python
{ "resource": "" }
q40973
date_archive_year
train
def date_archive_year(request): """This view will generate a table of the number of mice born on an annual basis. This view is associated with the url name archive-home, and returns an dictionary of a date and a animal count.""" oldest_animal = Animal.objects.filter(Born__isnull=False).order_by('Born')[0] archive_dict = {} tested_year = oldest_animal.Born.year while tested_year <= datetime.date.today().year: archive_dict[tested_year] = Animal.objects.filter(Born__year=tested_year).count() tested_year = tested_year + 1 return render(request, 'animal_archive.html', {"archive_dict": archive_dict})
python
{ "resource": "" }
q40974
todo
train
def todo(request): """This view generates a summary of the todo lists. The login restricted view passes lists for ear tagging, genotyping and weaning and passes them to the template todo.html.""" eartag_list = Animal.objects.filter(Born__lt=(datetime.date.today() - datetime.timedelta(days=settings.WEAN_AGE))).filter(MouseID__isnull=True, Alive=True) genotype_list = Animal.objects.filter(Q(Genotype='N.D.')|Q(Genotype__icontains='?')).filter(Alive=True, Born__lt=(datetime.date.today() - datetime.timedelta(days=settings.GENOTYPE_AGE))) wean = datetime.date.today() - datetime.timedelta(days=settings.WEAN_AGE) wean_list = Animal.objects.filter(Born__lt=wean).filter(Weaned=None,Alive=True).exclude(Strain=2).order_by('Strain','Background','Rack','Cage') return render(request, 'todo.html', {'eartag_list':eartag_list, 'wean_list':wean_list, 'genotype_list':genotype_list})
python
{ "resource": "" }
q40975
AnimalListAlive.get_context_data
train
def get_context_data(self, **kwargs): """This add in the context of list_type and returns this as Alive.""" context = super(AnimalListAlive, self).get_context_data(**kwargs) context['list_type'] = 'Alive' return context
python
{ "resource": "" }
q40976
BreedingList.get_context_data
train
def get_context_data(self, **kwargs): """This adds into the context of breeding_type and sets it to Active.""" context = super(BreedingList, self).get_context_data(**kwargs) context['breeding_type'] = "Active" return context
python
{ "resource": "" }
q40977
BreedingSearch.get_context_data
train
def get_context_data(self, **kwargs): """This add in the context of breeding_type and sets it to Search it also returns the query and the queryset.""" query = self.request.GET.get('q', '') context = super(BreedingSearch, self).get_context_data(**kwargs) context['breeding_type'] = "Search" context['query'] = query if query: context['results'] = Breeding.objects.filter(Cage__icontains=query).distinct() else: context['results'] = [] return context
python
{ "resource": "" }
q40978
CrossTypeAnimalList.get_context_data
train
def get_context_data(self, **kwargs): """This add in the context of list_type and returns this as whatever the crosstype was.""" context = super(CrossTypeAnimalList, self).get_context_data(**kwargs) context['list_type'] = self.kwargs['breeding_type'] return context
python
{ "resource": "" }
q40979
VeterinaryHome.get_context_data
train
def get_context_data(self, **kwargs): '''Adds to the context all issues, conditions and treatments.''' context = super(VeterinaryHome, self).get_context_data(**kwargs) context['medical_issues'] = MedicalIssue.objects.all() context['medical_conditions'] = MedicalCondition.objects.all() context['medical_treatments'] = MedicalTreatment.objects.all() return context
python
{ "resource": "" }
q40980
Track.read_frames
train
def read_frames(self, n, channels=None): """Read ``n`` frames from the track, starting with the current frame :param integer n: Number of frames to read :param integer channels: Number of channels to return (default is number of channels in track) :returns: Next ``n`` frames from the track, starting with ``current_frame`` :rtype: numpy array """ if channels is None: channels = self.channels if channels == 1: out = np.zeros(n) elif channels == 2: out = np.zeros((n, 2)) else: print "Input needs to be 1 or 2 channels" return if n > self.remaining_frames(): print "Trying to retrieve too many frames!" print "Asked for", n n = self.remaining_frames() print "Returning", n if self.channels == 1 and channels == 1: out = self.sound.read_frames(n) elif self.channels == 1 and channels == 2: frames = self.sound.read_frames(n) out = np.vstack((frames.copy(), frames.copy())).T elif self.channels == 2 and channels == 1: frames = self.sound.read_frames(n) out = np.mean(frames, axis=1) elif self.channels == 2 and channels == 2: out[:n, :] = self.sound.read_frames(n) self.current_frame += n return out
python
{ "resource": "" }
q40981
Track.current_frame
train
def current_frame(self, n): """Sets current frame to ``n`` :param integer n: Frame to set to ``current_frame`` """ self.sound.seek(n) self._current_frame = n
python
{ "resource": "" }
q40982
Track.range_as_mono
train
def range_as_mono(self, start_sample, end_sample): """Get a range of frames as 1 combined channel :param integer start_sample: First frame in range :param integer end_sample: Last frame in range (exclusive) :returns: Track frames in range as 1 combined channel :rtype: 1d numpy array of length ``end_sample - start_sample`` """ tmp_current = self.current_frame self.current_frame = start_sample tmp_frames = self.read_frames(end_sample - start_sample) if self.channels == 2: frames = np.mean(tmp_frames, axis=1) elif self.channels == 1: frames = tmp_frames else: raise IOError("Input audio must have either 1 or 2 channels") self.current_frame = tmp_current return frames
python
{ "resource": "" }
q40983
Track.loudest_time
train
def loudest_time(self, start=0, duration=0): """Find the loudest time in the window given by start and duration Returns frame number in context of entire track, not just the window. :param integer start: Start frame :param integer duration: Number of frames to consider from start :returns: Frame number of loudest frame :rtype: integer """ if duration == 0: duration = self.sound.nframes self.current_frame = start arr = self.read_frames(duration) # get the frame of the maximum amplitude # different names for the same thing... # max_amp_sample = a.argmax(axis=0)[a.max(axis=0).argmax()] max_amp_sample = int(np.floor(arr.argmax()/2)) + start return max_amp_sample
python
{ "resource": "" }
q40984
Track.zero_crossing_before
train
def zero_crossing_before(self, n): """Find nearest zero crossing in waveform before frame ``n``""" n_in_samples = int(n * self.samplerate) search_start = n_in_samples - self.samplerate if search_start < 0: search_start = 0 frame = zero_crossing_last( self.range_as_mono(search_start, n_in_samples)) + search_start return frame / float(self.samplerate)
python
{ "resource": "" }
q40985
Track.zero_crossing_after
train
def zero_crossing_after(self, n): """Find nearest zero crossing in waveform after frame ``n``""" n_in_samples = int(n * self.samplerate) search_end = n_in_samples + self.samplerate if search_end > self.duration: search_end = self.duration frame = zero_crossing_first( self.range_as_mono(n_in_samples, search_end)) + n_in_samples return frame / float(self.samplerate)
python
{ "resource": "" }
q40986
Track.label
train
def label(self, t): """Get the label of the song at a given time in seconds """ if self.labels is None: return None prev_label = None for l in self.labels: if l.time > t: break prev_label = l if prev_label is None: return None return prev_label.name
python
{ "resource": "" }
q40987
ConfigurationSet.set_param
train
def set_param(self, param, value): '''Set a parameter in this configuration set.''' self.data[param] = value self._object.configuration_data = utils.dict_to_nvlist(self.data)
python
{ "resource": "" }
q40988
ConfigurationSet._reload
train
def _reload(self, object, description, data): '''Reload the configuration set data.''' self._object = object self._description = description self._data = data
python
{ "resource": "" }
q40989
Program.partial_steps_data
train
def partial_steps_data(self, start=0): """ Iterates 5 steps from start position and provides tuple for packing into buffer. returns (0, 0) if stpe doesn't exist. :param start: Position to start from (typically 0 or 5) :yield: (setting, duration) """ cnt = 0 if len(self._prog_steps) >= start: # yields actual steps for encoding for step in self._prog_steps[start:start+5]: yield((step.raw_data)) cnt += 1 while cnt < 5: yield((0, 0)) cnt += 1
python
{ "resource": "" }
q40990
Load.set_load_resistance
train
def set_load_resistance(self, resistance): """ Changes load to resistance mode and sets resistance value. Rounds to nearest 0.01 Ohms :param resistance: Load Resistance in Ohms (0-500 ohms) :return: None """ new_val = int(round(resistance * 100)) if not 0 <= new_val <= 50000: raise ValueError("Load Resistance should be between 0-500 ohms") self._load_mode = self.SET_TYPE_RESISTANCE self._load_value = new_val self.__set_parameters()
python
{ "resource": "" }
q40991
Load.set_load_power
train
def set_load_power(self, power_watts): """ Changes load to power mode and sets power value. Rounds to nearest 0.1W. :param power_watts: Power in Watts (0-200) :return: """ new_val = int(round(power_watts * 10)) if not 0 <= new_val <= 2000: raise ValueError("Load Power should be between 0-200 W") self._load_mode = self.SET_TYPE_POWER self._load_value = new_val self.__set_parameters()
python
{ "resource": "" }
q40992
Load.set_load_current
train
def set_load_current(self, current_amps): """ Changes load to current mode and sets current value. Rounds to nearest mA. :param current_amps: Current in Amps (0-30A) :return: None """ new_val = int(round(current_amps * 1000)) if not 0 <= new_val <= 30000: raise ValueError("Load Current should be between 0-30A") self._load_mode = self.SET_TYPE_CURRENT self._load_value = new_val self.__set_parameters()
python
{ "resource": "" }
q40993
Check.last_n_results
train
def last_n_results(self, n): """ Helper method for returning a set number of the previous check results. """ return list( itertools.islice( self.results, len(self.results) - n, len(self.results) ) )
python
{ "resource": "" }
q40994
Check.apply_config
train
def apply_config(self, config): """ Sets attributes based on the given config. Also adjusts the `results` deque to either expand (padding itself with False results) or contract (by removing the oldest results) until it matches the required length. """ self.rise = int(config["rise"]) self.fall = int(config["fall"]) self.apply_check_config(config) if self.results.maxlen == max(self.rise, self.fall): return results = list(self.results) while len(results) > max(self.rise, self.fall): results.pop(0) while len(results) < max(self.rise, self.fall): results.insert(0, False) self.results = deque( results, maxlen=max(self.rise, self.fall) )
python
{ "resource": "" }
q40995
Check.validate_config
train
def validate_config(cls, config): """ Validates that required config entries are present. Each check requires a `host`, `port`, `rise` and `fall` to be configured. The rise and fall variables are integers denoting how many times a check must pass before being considered passing and how many times a check must fail before being considered failing. """ if "rise" not in config: raise ValueError("No 'rise' configured") if "fall" not in config: raise ValueError("No 'fall' configured") cls.validate_check_config(config)
python
{ "resource": "" }
q40996
DCSVectorizer.vectorize
train
def vectorize(self, docs): """ Vectorizes a list of documents using their DCS representations. """ doc_core_sems, all_concepts = self._extract_core_semantics(docs) shape = (len(docs), len(all_concepts)) vecs = np.zeros(shape) for i, core_sems in enumerate(doc_core_sems): for con, weight in core_sems: j = all_concepts.index(con) vecs[i,j] = weight # Normalize return vecs/np.max(vecs)
python
{ "resource": "" }
q40997
DCSVectorizer._process_doc
train
def _process_doc(self, doc): """ Applies DCS to a document to extract its core concepts and their weights. """ # Prep doc = doc.lower() tagged_tokens = [(t, penn_to_wordnet(t.tag_)) for t in spacy(doc, tag=True, parse=False, entity=False)] tokens = [t for t, tag in tagged_tokens] term_concept_map = self._disambiguate_doc(tagged_tokens) concept_weights = self._weight_concepts(tokens, term_concept_map) # Compute core semantics lexical_chains = self._lexical_chains(doc, term_concept_map) core_semantics = self._core_semantics(lexical_chains, concept_weights) core_concepts = [c for chain in core_semantics for c in chain] return [(con, concept_weights[con]) for con in core_concepts]
python
{ "resource": "" }
q40998
DCSVectorizer._disambiguate_pos
train
def _disambiguate_pos(self, terms, pos): """ Disambiguates a list of tokens of a given PoS. """ # Map the terms to candidate concepts # Consider only the top 3 most common senses candidate_map = {term: wn.synsets(term, pos=pos)[:3] for term in terms} # Filter to unique concepts concepts = set(c for cons in candidate_map.values() for c in cons) # Back to list for consistent ordering concepts = list(concepts) sim_mat = self._similarity_matrix(concepts) # Final map of terms to their disambiguated concepts map = {} # This is terrible # For each term, select the candidate concept # which has the maximum aggregate similarity score against # all other candidate concepts of all other terms sharing the same PoS for term, cons in candidate_map.items(): # Some words may not be in WordNet # and thus have no candidate concepts, so skip if not cons: continue scores = [] for con in cons: i = concepts.index(con) scores_ = [] for term_, cons_ in candidate_map.items(): # Some words may not be in WordNet # and thus have no candidate concepts, so skip if term == term_ or not cons_: continue cons_idx = [concepts.index(c) for c in cons_] top_sim = max(sim_mat[i,cons_idx]) scores_.append(top_sim) scores.append(sum(scores_)) best_idx = np.argmax(scores) map[term] = cons[best_idx] return map
python
{ "resource": "" }
q40999
DCSVectorizer._similarity_matrix
train
def _similarity_matrix(self, concepts): """ Computes a semantic similarity matrix for a set of concepts. """ n_cons = len(concepts) sim_mat = np.zeros((n_cons, n_cons)) for i, c1 in enumerate(concepts): for j, c2 in enumerate(concepts): # Just build the lower triangle if i >= j: sim_mat[i,j] = self._semsim(c1, c2) if i != j else 1. return sim_mat + sim_mat.T - np.diag(sim_mat.diagonal())
python
{ "resource": "" }