_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q41300
Episode.title
train
def title(self) -> str: """Episode title.""" for title in self.titles: if title.lang == 'ja': return title.title # In case there's no Japanese title. return self.titles[0].title
python
{ "resource": "" }
q41301
TitleSearcher.search
train
def search(self, query: 're.Pattern') -> 'Iterable[_WorkTitles]': """Search titles using a compiled RE query.""" titles: 'Titles' for titles in self._titles_list: title: 'AnimeTitle' for title in titles.titles: if query.search(title.title): yield WorkTitles( aid=titles.aid, main_title=_get_main_title(titles.titles), titles=[t.title for t in titles.titles], ) continue
python
{ "resource": "" }
q41302
_get_packages
train
def _get_packages(): # type: () -> List[Package] """Convert `pkg_resources.working_set` into a list of `Package` objects. :return: list """ return [Package(pkg_obj=pkg) for pkg in sorted(pkg_resources.working_set, key=lambda x: str(x).lower())]
python
{ "resource": "" }
q41303
_get_whitelist_licenses
train
def _get_whitelist_licenses(config_path): # type: (str) -> List[str] """Get whitelist license names from config file. :param config_path: str :return: list """ whitelist_licenses = [] try: print('config path', config_path) with open(config_path) as config: whitelist_licenses = [line.rstrip() for line in config] except IOError: # pragma: no cover print('Warning: No {} file was found.'.format(LICENSE_CHECKER_CONFIG_NAME)) return whitelist_licenses
python
{ "resource": "" }
q41304
run_license_checker
train
def run_license_checker(config_path): # type: (str) -> None """Generate table of installed packages and check for license warnings based off user defined restricted license values. :param config_path: str :return: """ whitelist_licenses = _get_whitelist_licenses(config_path) table = PrintTable(ROW_HEADERS) warnings = [] for pkg in _get_packages(): allowed = pkg.license in whitelist_licenses table.add_row((pkg.name, pkg.version, pkg.license, str(allowed))) if not allowed: warnings.append(pkg) print(table) print('{} RESTRICTED LICENSES DETECTED'.format(len(warnings)))
python
{ "resource": "" }
q41305
Search.include_fields
train
def include_fields(self, *args): r""" Include fields is the fields that you want to be returned when searching. These are in addition to the fields that are always included below. :param args: items passed in will be turned into a list :returns: :class:`Search` >>> bugzilla.search_for.include_fields("flags") The following fields are always included in search: 'version', 'id', 'summary', 'status', 'op_sys', 'resolution', 'product', 'component', 'platform' """ for arg in args: self._includefields.append(arg) return self
python
{ "resource": "" }
q41306
Search.product
train
def product(self, *products): r""" When search is called, it will limit the results to items in a Product. :param product: items passed in will be turned into a list :returns: :class:`Search` """ for product in products: self._product.append(product) return self
python
{ "resource": "" }
q41307
Search.timeframe
train
def timeframe(self, start, end): r""" When you want to search bugs for a certain time frame. :param start: :param end: :returns: :class:`Search` """ if start: self._time_frame['chfieldfrom'] = start if end: self._time_frame['chfieldto'] = end return self
python
{ "resource": "" }
q41308
Search.search
train
def search(self): r""" Call the Bugzilla endpoint that will do the search. It will take the information used in other methods on the Search object and build up the query string. If no bugs are found then an empty list is returned. >>> bugs = bugzilla.search_for\ ... .keywords("checkin-needed")\ ... .include_fields("flags")\ ... .search() """ params = {} params.update(self._time_frame.items()) if self._includefields: params['include_fields'] = list(self._includefields) if self._bug_numbers: bugs = [] for bug in self._bug_numbers: result = self._bugsy.request('bug/%s' % bug, params=params) bugs.append(Bug(self._bugsy, **result['bugs'][0])) return bugs else: if self._component: params['component'] = list(self._component) if self._product: params['product'] = list(self._product) if self._keywords: params['keywords'] = list(self._keywords) if self._assigned: params['assigned_to'] = list(self._assigned) if self._summaries: params['short_desc_type'] = 'allwordssubstr' params['short_desc'] = list(self._summaries) if self._whiteboard: params['short_desc_type'] = 'allwordssubstr' params['whiteboard'] = list(self._whiteboard) if self._change_history['fields']: params['chfield'] = self._change_history['fields'] if self._change_history.get('value', None): params['chfieldvalue'] = self._change_history['value'] try: results = self._bugsy.request('bug', params=params) except Exception as e: raise SearchException(e.msg, e.code) return [Bug(self._bugsy, **bug) for bug in results['bugs']]
python
{ "resource": "" }
q41309
sync_remote_to_local
train
def sync_remote_to_local(force="no"): """ Replace your remote db with your local Example: sync_remote_to_local:force=yes """ assert "local_wp_dir" in env, "Missing local_wp_dir in env" if force != "yes": message = "This will replace your local database with your "\ "remote, are you sure [y/n]" answer = prompt(message, "y") if answer != "y": logger.info("Sync stopped") return init_tasks() # Bootstrap fabrik remote_file = "sync_%s.sql" % int(time.time()*1000) remote_path = "/tmp/%s" % remote_file with env.cd(paths.get_current_path()): env.run("wp db export %s" % remote_path) local_wp_dir = env.local_wp_dir local_path = "/tmp/%s" % remote_file # Download sync file get(remote_path, local_path) with lcd(local_wp_dir): elocal("wp db import %s" % local_path) # Cleanup env.run("rm %s" % remote_path) elocal("rm %s" % local_path)
python
{ "resource": "" }
q41310
get_priority_rules
train
def get_priority_rules(db) -> Iterable[PriorityRule]: """Get file priority rules.""" cur = db.cursor() cur.execute('SELECT id, regexp, priority FROM file_priority') for row in cur: yield PriorityRule(*row)
python
{ "resource": "" }
q41311
delete_priority_rule
train
def delete_priority_rule(db, rule_id: int) -> None: """Delete a file priority rule.""" with db: cur = db.cursor() cur.execute('DELETE FROM file_priority WHERE id=?', (rule_id,))
python
{ "resource": "" }
q41312
get_files
train
def get_files(conn, aid: int) -> AnimeFiles: """Get cached files for anime.""" with conn: cur = conn.cursor().execute( 'SELECT anime_files FROM cache_anime WHERE aid=?', (aid,)) row = cur.fetchone() if row is None: raise ValueError('No cached files') return AnimeFiles.from_json(row[0])
python
{ "resource": "" }
q41313
AnimeFiles.add
train
def add(self, filename): """Try to add a file.""" basename = os.path.basename(filename) match = self.regexp.search(basename) if match: self.by_episode[int(match.group('ep'))].add(filename)
python
{ "resource": "" }
q41314
AnimeFiles.available_string
train
def available_string(self, episode): """Return a string of available episodes.""" available = [ep for ep in self if ep > episode] string = ','.join(str(ep) for ep in available[:self.EPISODES_TO_SHOW]) if len(available) > self.EPISODES_TO_SHOW: string += '...' return string
python
{ "resource": "" }
q41315
AnimeFiles.from_json
train
def from_json(cls, string): """Create AnimeFiles from JSON string.""" obj = json.loads(string) return cls(obj['regexp'], obj['files'])
python
{ "resource": "" }
q41316
_get_exception_class_from_status_code
train
def _get_exception_class_from_status_code(status_code): """ Utility function that accepts a status code, and spits out a reference to the correct exception class to raise. :param str status_code: The status code to return an exception class for. :rtype: PetfinderAPIError or None :returns: The appropriate PetfinderAPIError subclass. If the status code is not an error, return ``None``. """ if status_code == '100': return None exc_class = STATUS_CODE_MAPPING.get(status_code) if not exc_class: # No status code match, return the "I don't know wtf this is" # exception class. return STATUS_CODE_MAPPING['UNKNOWN'] else: # Match found, yay. return exc_class
python
{ "resource": "" }
q41317
MongodbPipeline.open_spider
train
def open_spider(self, spider): """ Initialize Mongodb client. """ if self.url == "": self.client = pymongo.MongoClient(self.host, self.port) else: self.client = pymongo.MongoClient(self.url) self.db_name, self.collection_name = self._replace_placeholder(spider) self.db = self.client[self.db_name]
python
{ "resource": "" }
q41318
PlexRequest.construct_url
train
def construct_url(self): """Construct a full plex request URI, with `params`.""" path = [self.path] path.extend([str(x) for x in self.params]) url = self.client.base_url + '/'.join(x for x in path if x) query = self.kwargs.get('query') if query: # Dict -> List if type(query) is dict: query = query.items() # Remove items with `None` value query = [ (k, v) for (k, v) in query if v is not None ] # Encode query, append to URL url += '?' + urlencode(query) return url
python
{ "resource": "" }
q41319
load_config
train
def load_config(): """Load configuration file containing API KEY and other settings. :rtype: str """ configfile = get_configfile() if not os.path.exists(configfile): data = { 'apikey': 'GET KEY AT: https://www.filemail.com/apidoc/ApiKey.aspx' } save_config(data) with open(configfile, 'rb') as f: return json.load(f)
python
{ "resource": "" }
q41320
save_config
train
def save_config(config): """Save configuration file to users data location. - Linux: ~/.local/share/pyfilemail - OSX: ~/Library/Application Support/pyfilemail - Windows: C:\\\Users\\\{username}\\\AppData\\\Local\\\pyfilemail :rtype: str """ configfile = get_configfile() if not os.path.exists(configfile): configdir = os.path.dirname(configfile) if not os.path.exists(configdir): os.makedirs(configdir) data = config with open(configfile, 'wb') as f: json.dump(data, f, indent=2)
python
{ "resource": "" }
q41321
get_configfile
train
def get_configfile(): """Return full path to configuration file. - Linux: ~/.local/share/pyfilemail - OSX: ~/Library/Application Support/pyfilemail - Windows: C:\\\Users\\\{username}\\\AppData\\\Local\\\pyfilemail :rtype: str """ ad = appdirs.AppDirs('pyfilemail') configdir = ad.user_data_dir configfile = os.path.join(configdir, 'pyfilemail.cfg') return configfile
python
{ "resource": "" }
q41322
get_flake8_options
train
def get_flake8_options(config_dir='.'): # type: (str) -> List[str] """Checks for local config overrides for `flake8` and add them in the correct `flake8` `options` format. :param config_dir: :return: List[str] """ if FLAKE8_CONFIG_NAME in os.listdir(config_dir): flake8_config_path = FLAKE8_CONFIG_NAME else: flake8_config_path = DEFAULT_FLAKE8_CONFIG_PATH return ['--config={}'.format(flake8_config_path)]
python
{ "resource": "" }
q41323
get_license_checker_config_path
train
def get_license_checker_config_path(config_dir='.'): # type: (str) -> List[str] """Checks for local config overrides for license checker, if not found it returns the package default. :param config_dir: :return: str """ if LICENSE_CHECKER_CONFIG_NAME in os.listdir(config_dir): license_checker_config_path = LICENSE_CHECKER_CONFIG_NAME else: license_checker_config_path = DEFAULT_LICENSE_CHECKER_CONFIG_PATH return license_checker_config_path
python
{ "resource": "" }
q41324
get_pylint_options
train
def get_pylint_options(config_dir='.'): # type: (str) -> List[str] """Checks for local config overrides for `pylint` and add them in the correct `pylint` `options` format. :param config_dir: :return: List [str] """ if PYLINT_CONFIG_NAME in os.listdir(config_dir): pylint_config_path = PYLINT_CONFIG_NAME else: pylint_config_path = DEFAULT_PYLINT_CONFIG_PATH return ['--rcfile={}'.format(pylint_config_path)]
python
{ "resource": "" }
q41325
wait_on_any
train
def wait_on_any(*events, **kwargs): """ Helper method for waiting for any of the given threading events to be set. The standard threading lib doesn't include any mechanism for waiting on more than one event at a time so we have to monkey patch the events so that their `set()` and `clear()` methods fire a callback we can use to determine how a composite event should react. """ timeout = kwargs.get("timeout") composite_event = threading.Event() if any([event.is_set() for event in events]): return def on_change(): if any([event.is_set() for event in events]): composite_event.set() else: composite_event.clear() def patch(original): def patched(): original() on_change() return patched for event in events: event.set = patch(event.set) event.clear = patch(event.clear) wait_on_event(composite_event, timeout=timeout)
python
{ "resource": "" }
q41326
wait_on_event
train
def wait_on_event(event, timeout=None): """ Waits on a single threading Event, with an optional timeout. This is here for compatibility reasons as python 2 can't reliably wait on an event without a timeout and python 3 doesn't define a `maxint`. """ if timeout is not None: event.wait(timeout) return if six.PY2: # Thanks to a bug in python 2's threading lib, we can't simply call # .wait() with no timeout since it would wind up ignoring signals. while not event.is_set(): event.wait(sys.maxint) else: event.wait()
python
{ "resource": "" }
q41327
HAProxy.validate_config
train
def validate_config(cls, config): """ Validates that a config file path and a control socket file path and pid file path are all present in the HAProxy config. """ if "config_file" not in config: raise ValueError("No config file path given") if "socket_file" not in config: raise ValueError("No control socket path given") if "pid_file" not in config: raise ValueError("No PID file path given") if "stats" in config and "port" not in config["stats"]: raise ValueError("Stats interface defined, but no port given") if "proxies" in config: cls.validate_proxies_config(config["proxies"]) return config
python
{ "resource": "" }
q41328
HAProxy.validate_proxies_config
train
def validate_proxies_config(cls, proxies): """ Specific config validation method for the "proxies" portion of a config. Checks that each proxy defines a port and a list of `upstreams`, and that each upstream entry has a host and port defined. """ for name, proxy in six.iteritems(proxies): if "port" not in proxy: raise ValueError("No port defined for proxy %s" % name) if "upstreams" not in proxy: raise ValueError( "No upstreams defined for proxy %s" % name ) for upstream in proxy["upstreams"]: if "host" not in upstream: raise ValueError( "No host defined for upstream in proxy %s" % name ) if "port" not in upstream: raise ValueError( "No port defined for upstream in proxy %s" % name )
python
{ "resource": "" }
q41329
HAProxy.apply_config
train
def apply_config(self, config): """ Constructs HAProxyConfig and HAProxyControl instances based on the contents of the config. This is mostly a matter of constructing the configuration stanzas. """ self.haproxy_config_path = config["config_file"] global_stanza = Stanza("global") global_stanza.add_lines(config.get("global", [])) global_stanza.add_lines([ "stats socket %s mode 600 level admin" % config["socket_file"], "stats timeout 2m" ]) defaults_stanza = Stanza("defaults") defaults_stanza.add_lines(config.get("defaults", [])) proxy_stanzas = [ ProxyStanza( name, proxy["port"], proxy["upstreams"], proxy.get("options", []), proxy.get("bind_address") ) for name, proxy in six.iteritems(config.get("proxies", {})) ] stats_stanza = None if "stats" in config: stats_stanza = StatsStanza( config["stats"]["port"], config["stats"].get("uri", "/") ) for timeout in ("client", "connect", "server"): if timeout in config["stats"].get("timeouts", {}): stats_stanza.add_line( "timeout %s %d" % ( timeout, config["stats"]["timeouts"][timeout] ) ) self.config_file = HAProxyConfig( global_stanza, defaults_stanza, proxy_stanzas=proxy_stanzas, stats_stanza=stats_stanza, meta_clusters=config.get("meta_clusters", {}), bind_address=config.get("bind_address") ) self.control = HAProxyControl( config["config_file"], config["socket_file"], config["pid_file"], )
python
{ "resource": "" }
q41330
HAProxy.sync_file
train
def sync_file(self, clusters): """ Generates new HAProxy config file content and writes it to the file at `haproxy_config_path`. If a restart is not necessary the nodes configured in HAProxy will be synced on the fly. If a restart *is* necessary, one will be triggered. """ logger.info("Updating HAProxy config file.") if not self.restart_required: self.sync_nodes(clusters) version = self.control.get_version() with open(self.haproxy_config_path, "w") as f: f.write(self.config_file.generate(clusters, version=version)) if self.restart_required: with self.restart_lock: self.restart()
python
{ "resource": "" }
q41331
HAProxy.restart
train
def restart(self): """ Tells the HAProxy control object to restart the process. If it's been fewer than `restart_interval` seconds since the previous restart, it will wait until the interval has passed. This staves off situations where the process is constantly restarting, as it is possible to drop packets for a short interval while doing so. """ delay = (self.last_restart - time.time()) + self.restart_interval if delay > 0: time.sleep(delay) self.control.restart() self.last_restart = time.time() self.restart_required = False
python
{ "resource": "" }
q41332
HAProxy.get_current_nodes
train
def get_current_nodes(self, clusters): """ Returns two dictionaries, the current nodes and the enabled nodes. The current_nodes dictionary is keyed off of the cluster name and values are a list of nodes known to HAProxy. The enabled_nodes dictionary is also keyed off of the cluster name and values are list of *enabled* nodes, i.e. the same values as current_nodes but limited to servers currently taking traffic. """ current_nodes = self.control.get_active_nodes() enabled_nodes = collections.defaultdict(list) for cluster in clusters: if not cluster.nodes: continue if cluster.name not in current_nodes: logger.debug( "New cluster '%s' added, restart required.", cluster.name ) self.restart_required = True for node in cluster.nodes: if node.name not in [ current_node["svname"] for current_node in current_nodes.get(cluster.name, []) ]: logger.debug( "New node added to cluster '%s', restart required.", cluster.name ) self.restart_required = True enabled_nodes[cluster.name].append(node.name) return current_nodes, enabled_nodes
python
{ "resource": "" }
q41333
Service.validate_check_configs
train
def validate_check_configs(cls, config): """ Config validation specific to the health check options. Verifies that checks are defined along with an interval, and calls out to the `Check` class to make sure each individual check's config is valid. """ if "checks" not in config: raise ValueError("No checks defined.") if "interval" not in config["checks"]: raise ValueError("No check interval defined.") for check_name, check_config in six.iteritems(config["checks"]): if check_name == "interval": continue Check.from_config(check_name, check_config)
python
{ "resource": "" }
q41334
Service.apply_config
train
def apply_config(self, config): """ Takes a given validated config dictionary and sets an instance attribute for each one. For check definitions, a Check instance is is created and a `checks` attribute set to a dictionary keyed off of the checks' names. If the Check instance has some sort of error while being created an error is logged and the check skipped. """ self.host = config.get("host", "127.0.0.1") self.configured_ports = config.get("ports", [config.get("port")]) self.discovery = config["discovery"] self.metadata = config.get("metadata", {}) self.update_ports() self.check_interval = config["checks"]["interval"] self.update_checks(config["checks"])
python
{ "resource": "" }
q41335
Service.update_ports
train
def update_ports(self): """ Sets the `ports` attribute to the set of valid port values set in the configuration. """ ports = set() for port in self.configured_ports: try: ports.add(int(port)) except ValueError: logger.error("Invalid port value: %s", port) continue self.ports = ports
python
{ "resource": "" }
q41336
Service.update_checks
train
def update_checks(self, check_configs): """ Maintains the values in the `checks` attribute's dictionary. Each key in the dictionary is a port, and each value is a nested dictionary mapping each check's name to the Check instance. This method makes sure the attribute reflects all of the properly configured checks and ports. Removing no-longer-configured ports is left to the `run_checks` method. """ for check_name, check_config in six.iteritems(check_configs): if check_name == "interval": continue for port in self.ports: try: check = Check.from_config(check_name, check_config) check.host = self.host check.port = port self.checks[port][check_name] = check except ValueError as e: logger.error( "Error when configuring check '%s' for service %s: %s", check_name, self.name, str(e) ) continue
python
{ "resource": "" }
q41337
Service.run_checks
train
def run_checks(self): """ Iterates over the configured ports and runs the checks on each one. Returns a two-element tuple: the first is the set of ports that transitioned from down to up, the second is the set of ports that transitioned from up to down. Also handles the case where a check for a since-removed port is run, marking the port as down regardless of the check's result and removing the check(s) for the port. """ came_up = set() went_down = set() for port in self.ports: checks = self.checks[port].values() if not checks: logger.warn("No checks defined for self: %s", self.name) for check in checks: check.run() checks_pass = all([check.passing for check in checks]) if self.is_up[port] in (False, None) and checks_pass: came_up.add(port) self.is_up[port] = True elif self.is_up[port] in (True, None) and not checks_pass: went_down.add(port) self.is_up[port] = False for unused_port in set(self.checks.keys()) - self.ports: went_down.add(unused_port) del self.checks[unused_port] return came_up, went_down
python
{ "resource": "" }
q41338
AnimalAdmin.mark_sacrificed
train
def mark_sacrificed(self,request,queryset): """An admin action for marking several animals as sacrificed. This action sets the selected animals as Alive=False, Death=today and Cause_of_Death as sacrificed. To use other paramters, mice muse be individually marked as sacrificed. This admin action also shows as the output the number of mice sacrificed.""" rows_updated = queryset.update(Alive=False, Death=datetime.date.today(), Cause_of_Death='Sacrificed') if rows_updated == 1: message_bit = "1 animal was" else: message_bit = "%s animals were" % rows_updated self.message_user(request, "%s successfully marked as sacrificed." % message_bit)
python
{ "resource": "" }
q41339
BreedingAdmin.mark_deactivated
train
def mark_deactivated(self,request,queryset): """An admin action for marking several cages as inactive. This action sets the selected cages as Active=False and Death=today. This admin action also shows as the output the number of mice sacrificed.""" rows_updated = queryset.update(Active=False, End=datetime.date.today() ) if rows_updated == 1: message_bit = "1 cage was" else: message_bit = "%s cages were" % rows_updated self.message_user(request, "%s successfully marked as deactivated." % message_bit)
python
{ "resource": "" }
q41340
make_unicode
train
def make_unicode(s, encoding='utf-8', encoding_errors='strict'): """ Return the unicode version of an input. """ if not isinstance(s, unicode): if not isinstance(s, basestring): return unicode(str(s), encoding, encoding_errors) return unicode(s, encoding, encoding_errors) return s
python
{ "resource": "" }
q41341
html_escape
train
def html_escape(s, encoding='utf-8', encoding_errors='strict'): """ Return the HTML-escaped version of an input. """ return escape(make_unicode(s, encoding, encoding_errors), quote=True)
python
{ "resource": "" }
q41342
retarget_to_length
train
def retarget_to_length(song, duration, start=True, end=True, slack=5, beats_per_measure=None): """Create a composition of a song that changes its length to a given duration. :param song: Song to retarget :type song: :py:class:`radiotool.composer.Song` :param duration: Duration of retargeted song (in seconds) :type duration: float :param start: Start the retargeted song at the beginning of the original song :type start: boolean :param end: End the retargeted song at the end of the original song :type end: boolean :param slack: Track will be within slack seconds of the target duration (more slack allows for better-sounding music) :type slack: float :returns: Composition of retargeted song :rtype: :py:class:`radiotool.composer.Composition` """ duration = float(duration) constraints = [ rt_constraints.TimbrePitchConstraint( context=0, timbre_weight=1.0, chroma_weight=1.0), rt_constraints.EnergyConstraint(penalty=.5), rt_constraints.MinimumLoopConstraint(8), ] if beats_per_measure is not None: constraints.append( rt_constraints.RhythmConstraint(beats_per_measure, .125)) if start: constraints.append( rt_constraints.StartAtStartConstraint(padding=0)) if end: constraints.append( rt_constraints.EndAtEndConstraint(padding=slack)) comp, info = retarget( [song], duration, constraints=[constraints], fade_in_len=None, fade_out_len=None) # force the new track to extend to the end of the song if end: last_seg = sorted( comp.segments, key=lambda seg: seg.comp_location_in_seconds + seg.duration_in_seconds )[-1] last_seg.duration_in_seconds = ( song.duration_in_seconds - last_seg.start_in_seconds) path_cost = info["path_cost"] total_nonzero_cost = [] total_nonzero_points = [] for node in path_cost: if float(node.name) > 0.0: total_nonzero_cost.append(float(node.name)) total_nonzero_points.append(float(node.time)) transitions = zip(total_nonzero_points, total_nonzero_cost) for transition in transitions: comp.add_label(Label("crossfade", transition[0])) return comp
python
{ "resource": "" }
q41343
retarget_with_change_points
train
def retarget_with_change_points(song, cp_times, duration): """Create a composition of a song of a given duration that reaches music change points at specified times. This is still under construction. It might not work as well with more than 2 ``cp_times`` at the moment. Here's an example of retargeting music to be 40 seconds long and hit a change point at the 10 and 30 second marks:: song = Song("instrumental_music.wav") composition, change_points =\ retarget.retarget_with_change_points(song, [10, 30], 40) composition.export(filename="retargeted_instrumental_music.") :param song: Song to retarget :type song: :py:class:`radiotool.composer.Song` :param cp_times: Times to reach change points (in seconds) :type cp_times: list of floats :param duration: Target length of retargeted music (in seconds) :type duration: float :returns: Composition of retargeted song and list of locations of change points in the retargeted composition :rtype: (:py:class:`radiotool.composer.Composition`, list) """ analysis = song.analysis beat_length = analysis[BEAT_DUR_KEY] beats = np.array(analysis["beats"]) # find change points cps = np.array(novelty(song, nchangepoints=4)) cp_times = np.array(cp_times) # mark change points in original music def music_labels(t): # find beat closest to t closest_beat_idx = np.argmin(np.abs(beats - t)) closest_beat = beats[closest_beat_idx] closest_cp = cps[np.argmin(np.abs(cps - closest_beat))] if np.argmin(np.abs(beats - closest_cp)) == closest_beat_idx: return "cp" else: return "noncp" # mark where we want change points in the output music # (a few beats of slack to improve the quality of the end result) def out_labels(t): if np.min(np.abs(cp_times - t)) < 1.5 * beat_length: return "cp" return "noncp" m_labels = [music_labels(i) for i in np.arange(0, song.duration_in_seconds, beat_length)] o_labels = [out_labels(i) for i in np.arange(0, duration, beat_length)] constraints = [ rt_constraints.TimbrePitchConstraint( context=0, timbre_weight=1.0, chroma_weight=1.0), rt_constraints.EnergyConstraint(penalty=.5), rt_constraints.MinimumLoopConstraint(8), rt_constraints.NoveltyConstraint(m_labels, o_labels, 1.0) ] comp, info = retarget( [song], duration, constraints=[constraints], fade_in_len=None, fade_out_len=None) final_cp_locations = [beat_length * i for i, label in enumerate(info['result_labels']) if label == 'cp'] return comp, final_cp_locations
python
{ "resource": "" }
q41344
get_data_files
train
def get_data_files(): """ Returns the path of data files, which are installed to the package directory. """ import os path = os.path.dirname(__file__) path = os.path.join(path, 'data') r = dict( Alpha_inf_hyrec_file = os.path.join(path, 'hyrec', 'Alpha_inf.dat'), R_inf_hyrec_file = os.path.join(path, 'hyrec', 'R_inf.dat'), two_photon_tables_hyrec_file = os.path.join(path, 'hyrec', 'two_photon_tables.dat'), sBBN_file = os.path.join(path, 'bbn', 'sBBN.dat'), ) return r
python
{ "resource": "" }
q41345
_find_file
train
def _find_file(filename): """ Find the file path, first checking if it exists and then looking in the data directory """ import os if os.path.exists(filename): path = filename else: path = os.path.dirname(__file__) path = os.path.join(path, 'data', filename) if not os.path.exists(path): raise ValueError("cannot locate file '%s'" %filename) return path
python
{ "resource": "" }
q41346
load_precision
train
def load_precision(filename): """ Load a CLASS precision file into a dictionary. Parameters ---------- filename : str the name of an existing file to load, or one in the files included as part of the CLASS source Returns ------- dict : the precision parameters loaded from file """ # also look in data dir path = _find_file(filename) r = dict() with open(path, 'r') as f: exec(f.read(), {}, r) return r
python
{ "resource": "" }
q41347
load_ini
train
def load_ini(filename): """ Read a CLASS ``.ini`` file, returning a dictionary of parameters Parameters ---------- filename : str the name of an existing parameter file to load, or one included as part of the CLASS source Returns ------- dict : the input parameters loaded from file """ # also look in data dir path = _find_file(filename) pars = {} with open(path, 'r') as ff: # loop over lines for lineno, line in enumerate(ff): if not line: continue # skip any commented lines with # if '#' in line: line = line[line.index('#')+1:] # must have an equals sign to be valid if "=" not in line: continue # extract key and value pairs fields = line.split("=") if len(fields) != 2: import warnings warnings.warn("skipping line number %d: '%s'" %(lineno,line)) continue pars[fields[0].strip()] = fields[1].strip() return pars
python
{ "resource": "" }
q41348
save_coef
train
def save_coef(scoef, filename): """Saves ScalarCoeffs object 'scoef' to file. The first line of the file has the max number N and the max number M of the scoef structure separated by a comma. The remaining lines have the form 3.14, 2.718 The first number is the real part of the mode and the second is the imaginary. """ nmax = scoef.nmax mmax = scoef.mmax frmstr = "{0:.16e},{1:.16e}\n" L = (nmax + 1) + mmax * (2 * nmax - mmax + 1); with open(filename, 'w') as f: f.write("{0},{1}\n".format(nmax, mmax)) for n in xrange(0, L): f.write(frmstr.format(scoef._vec[n].real, scoef._vec[n].imag))
python
{ "resource": "" }
q41349
load_patt
train
def load_patt(filename): """Loads a file that was saved with the save_patt routine.""" with open(filename) as f: lines = f.readlines() lst = lines[0].split(',') patt = np.zeros([int(lst[0]), int(lst[1])], dtype=np.complex128) lines.pop(0) for line in lines: lst = line.split(',') n = int(lst[0]) m = int(lst[1]) re = float(lst[2]) im = float(lst[3]) patt[n, m] = re + 1j * im return sp.ScalarPatternUniform(patt, doublesphere=False)
python
{ "resource": "" }
q41350
load_vpatt
train
def load_vpatt(filename1, filename2): """Loads a VectorPatternUniform pattern that is saved between two files. """ with open(filename1) as f: lines = f.readlines() lst = lines[0].split(',') patt1 = np.zeros([int(lst[0]), int(lst[1])], dtype=np.complex128) lines.pop(0) for line in lines: lst = line.split(',') n = int(lst[0]) m = int(lst[1]) re = float(lst[2]) im = float(lst[3]) patt1[n, m] = re + 1j * im with open(filename2) as f: lines2 = f.readlines() lst = lines2[0].split(',') patt2 = np.zeros([int(lst[0]), int(lst[1])], dtype=np.complex128) lines2.pop(0) for line in lines2: lst = line.split(',') n = int(lst[0]) m = int(lst[1]) re = float(lst[2]) im = float(lst[3]) patt2[n, m] = re + 1j * im return sp.VectorPatternUniform(patt1, patt2)
python
{ "resource": "" }
q41351
load_coef
train
def load_coef(filename): """Loads a file that was saved with save_coef.""" with open(filename) as f: lines = f.readlines() lst = lines[0].split(',') nmax = int(lst[0]) mmax = int(lst[1]) L = (nmax + 1) + mmax * (2 * nmax - mmax + 1); vec = np.zeros(L, dtype=np.complex128) lines.pop(0) for n, line in enumerate(lines): lst = line.split(',') re = float(lst[0]) im = float(lst[1]) vec[n] = re + 1j * im return sp.ScalarCoefs(vec, nmax, mmax)
python
{ "resource": "" }
q41352
load_vcoef
train
def load_vcoef(filename): """Loads a set of vector coefficients that were saved in MATLAB. The third number on the first line is the directivity calculated within the MATLAB code.""" with open(filename) as f: lines = f.readlines() lst = lines[0].split(',') nmax = int(lst[0]) mmax = int(lst[1]) directivity = float(lst[2]) L = (nmax + 1) + mmax * (2 * nmax - mmax + 1); vec1 = np.zeros(L, dtype=np.complex128) vec2 = np.zeros(L, dtype=np.complex128) lines.pop(0) n = 0 in_vec2 = False for line in lines: if line.strip() == 'break': n = 0 in_vec2 = True; else: lst = line.split(',') re = float(lst[0]) im = float(lst[1]) if in_vec2: vec2[n] = re + 1j * im else: vec1[n] = re + 1j * im n += 1 return (sp.VectorCoefs(vec1, vec2, nmax, mmax), directivity)
python
{ "resource": "" }
q41353
BarcodeParser.parse
train
def parse(cls, filename, max_life=None): """ Parse barcode from gudhi output. """ data = np.genfromtxt(filename) #data = np.genfromtxt(filename, dtype= (int, int, float, float)) if max_life is not None: data[np.isinf(data)] = max_life return data
python
{ "resource": "" }
q41354
BarcodeParser.plot
train
def plot(self, dimension): """ Plot barcode using matplotlib. """ import matplotlib.pyplot as plt life_lines = self.get_life_lines(dimension) x, y = zip(*life_lines) plt.scatter(x, y) plt.xlabel("Birth") plt.ylabel("Death") if self.max_life is not None: plt.xlim([0, self.max_life]) plt.title("Persistence Homology Dimension {}".format(dimension)) #TODO: remove this plt.show()
python
{ "resource": "" }
q41355
Configurable.from_config
train
def from_config(cls, name, config): """ Returns a Configurable instance with the given name and config. By default this is a simple matter of calling the constructor, but subclasses that are also `Pluggable` instances override this in order to check that the plugin is installed correctly first. """ cls.validate_config(config) instance = cls() if not instance.name: instance.name = config.get("name", name) instance.apply_config(config) return instance
python
{ "resource": "" }
q41356
command
train
def command(state, args): """List file priority rules.""" rules = query.files.get_priority_rules(state.db) print(tabulate(rules, headers=['ID', 'Regexp', 'Priority']))
python
{ "resource": "" }
q41357
filter_support
train
def filter_support(candidates, transactions, min_sup): """ Filter candidates to a frequent set by some minimum support. """ counts = defaultdict(lambda: 0) for transaction in transactions: for c in (c for c in candidates if set(c).issubset(transaction)): counts[c] += 1 return {i for i in candidates if counts[i]/len(transactions) >= min_sup}
python
{ "resource": "" }
q41358
generate_candidates
train
def generate_candidates(freq_set, k): """ Generate candidates for an iteration. Use this only for k >= 2. """ single_set = {(i,) for i in set(flatten(freq_set))} # TO DO generating all combinations gets very slow for large documents. # Is there a way of doing this without exhaustively searching all combinations? cands = [flatten(f) for f in combinations(single_set, k)] return [cand for cand in cands if validate_candidate(cand, freq_set, k)]
python
{ "resource": "" }
q41359
validate_candidate
train
def validate_candidate(candidate, freq_set, k): """ Checks if we should keep a candidate. We keep a candidate if all its k-1-sized subsets are present in the frequent sets. """ for subcand in combinations(candidate, k-1): if subcand not in freq_set: return False return True
python
{ "resource": "" }
q41360
State.compile_tag_re
train
def compile_tag_re(self, tags): """ Return the regex used to look for Mustache tags compiled to work with specific opening tags, close tags, and tag types. """ return re.compile(self.raw_tag_re % tags, self.re_flags)
python
{ "resource": "" }
q41361
_rdumpq
train
def _rdumpq(q,size,value,encoding=None): """Dump value as a tnetstring, to a deque instance, last chunks first. This function generates the tnetstring representation of the given value, pushing chunks of the output onto the given deque instance. It pushes the last chunk first, then recursively generates more chunks. When passed in the current size of the string in the queue, it will return the new size of the string in the queue. Operating last-chunk-first makes it easy to calculate the size written for recursive structures without having to build their representation as a string. This is measurably faster than generating the intermediate strings, especially on deeply nested structures. """ write = q.appendleft if value is None: write("0:~") return size + 3 if value is True: write("4:true!") return size + 7 if value is False: write("5:false!") return size + 8 if isinstance(value,(int,long)): data = str(value) ldata = len(data) span = str(ldata) write("#") write(data) write(":") write(span) return size + 2 + len(span) + ldata if isinstance(value,(float,)): # Use repr() for float rather than str(). # It round-trips more accurately. # Probably unnecessary in later python versions that # use David Gay's ftoa routines. data = repr(value) ldata = len(data) span = str(ldata) write("^") write(data) write(":") write(span) return size + 2 + len(span) + ldata if isinstance(value,str): lvalue = len(value) span = str(lvalue) write(",") write(value) write(":") write(span) return size + 2 + len(span) + lvalue if isinstance(value,(list,tuple,)): write("]") init_size = size = size + 1 for item in reversed(value): size = _rdumpq(q,size,item,encoding) span = str(size - init_size) write(":") write(span) return size + 1 + len(span) if isinstance(value,dict): write("}") init_size = size = size + 1 for (k,v) in value.iteritems(): size = _rdumpq(q,size,v,encoding) size = _rdumpq(q,size,k,encoding) span = str(size - init_size) write(":") write(span) return size + 1 + len(span) if isinstance(value,unicode): if encoding is None: raise ValueError("must specify encoding to dump unicode strings") value = value.encode(encoding) lvalue = len(value) span = str(lvalue) write(",") write(value) write(":") write(span) return size + 2 + len(span) + lvalue raise ValueError("unserializable object")
python
{ "resource": "" }
q41362
_gdumps
train
def _gdumps(value,encoding): """Generate fragments of value dumped as a tnetstring. This is the naive dumping algorithm, implemented as a generator so that it's easy to pass to "".join() without building a new list. This is mainly here for comparison purposes; the _rdumpq version is measurably faster as it doesn't have to build intermediate strins. """ if value is None: yield "0:~" elif value is True: yield "4:true!" elif value is False: yield "5:false!" elif isinstance(value,(int,long)): data = str(value) yield str(len(data)) yield ":" yield data yield "#" elif isinstance(value,(float,)): data = repr(value) yield str(len(data)) yield ":" yield data yield "^" elif isinstance(value,(str,)): yield str(len(value)) yield ":" yield value yield "," elif isinstance(value,(list,tuple,)): sub = [] for item in value: sub.extend(_gdumps(item)) sub = "".join(sub) yield str(len(sub)) yield ":" yield sub yield "]" elif isinstance(value,(dict,)): sub = [] for (k,v) in value.iteritems(): sub.extend(_gdumps(k)) sub.extend(_gdumps(v)) sub = "".join(sub) yield str(len(sub)) yield ":" yield sub yield "}" elif isinstance(value,(unicode,)): if encoding is None: raise ValueError("must specify encoding to dump unicode strings") value = value.encode(encoding) yield str(len(value)) yield ":" yield value yield "," else: raise ValueError("unserializable object")
python
{ "resource": "" }
q41363
log_magnitude_spectrum
train
def log_magnitude_spectrum(frames): """Compute the log of the magnitude spectrum of frames""" return N.log(N.abs(N.fft.rfft(frames)).clip(1e-5, N.inf))
python
{ "resource": "" }
q41364
RMS_energy
train
def RMS_energy(frames): """Computes the RMS energy of frames""" f = frames.flatten() return N.sqrt(N.mean(f * f))
python
{ "resource": "" }
q41365
normalize_features
train
def normalize_features(features): """Standardizes features array to fall between 0 and 1""" return (features - N.min(features)) / (N.max(features) - N.min(features))
python
{ "resource": "" }
q41366
zero_crossing_last
train
def zero_crossing_last(frames): """Finds the last zero crossing in frames""" frames = N.array(frames) crossings = N.where(N.diff(N.sign(frames))) # crossings = N.where(frames[:n] * frames[1:n + 1] < 0) if len(crossings[0]) == 0: print "No zero crossing" return len(frames) - 1 return crossings[0][-1]
python
{ "resource": "" }
q41367
limiter
train
def limiter(arr): """ Restrict the maximum and minimum values of arr """ dyn_range = 32767.0 / 32767.0 lim_thresh = 30000.0 / 32767.0 lim_range = dyn_range - lim_thresh new_arr = arr.copy() inds = N.where(arr > lim_thresh)[0] new_arr[inds] = (new_arr[inds] - lim_thresh) / lim_range new_arr[inds] = (N.arctan(new_arr[inds]) * 2.0 / N.pi) *\ lim_range + lim_thresh inds = N.where(arr < -lim_thresh)[0] new_arr[inds] = -(new_arr[inds] + lim_thresh) / lim_range new_arr[inds] = -( N.arctan(new_arr[inds]) * 2.0 / N.pi * lim_range + lim_thresh) return new_arr
python
{ "resource": "" }
q41368
segment_array
train
def segment_array(arr, length, overlap=.5): """ Segment array into chunks of a specified length, with a specified proportion overlap. Operates on axis 0. :param integer length: Length of each segment :param float overlap: Proportion overlap of each frame """ arr = N.array(arr) offset = float(overlap) * length total_segments = int((N.shape(arr)[0] - length) / offset) + 1 # print "total segments", total_segments other_shape = N.shape(arr)[1:] out_shape = [total_segments, length] out_shape.extend(other_shape) out = N.empty(out_shape) for i in xrange(total_segments): out[i][:] = arr[i * offset:i * offset + length] return out
python
{ "resource": "" }
q41369
radpress_get_markup_descriptions
train
def radpress_get_markup_descriptions(): """ Provides markup options. It used for adding descriptions in admin and zen mode. :return: list """ result = [] for markup in get_markup_choices(): markup_name = markup[0] result.append({ 'name': markup_name, 'title': markup[1], 'description': trim(get_reader(markup=markup_name).description) }) return result
python
{ "resource": "" }
q41370
_get_parser
train
def _get_parser(description): """Build an ArgumentParser with common arguments for both operations.""" parser = argparse.ArgumentParser(description=description) parser.add_argument('key', help="Camellia key.") parser.add_argument('input_file', nargs='*', help="File(s) to read as input data. If none are " "provided, assume STDIN.") parser.add_argument('-o', '--output_file', help="Output file. If not provided, assume STDOUT.") parser.add_argument('-l', '--keylen', type=int, default=128, help="Length of 'key' in bits, must be in one of %s " "(default 128)." % camcrypt.ACCEPTABLE_KEY_LENGTHS) parser.add_argument('-H', '--hexkey', action='store_true', help="Treat 'key' as a hex string rather than binary.") return parser
python
{ "resource": "" }
q41371
_get_crypto
train
def _get_crypto(keylen, hexkey, key): """Return a camcrypt.CamCrypt object based on keylen, hexkey, and key.""" if keylen not in camcrypt.ACCEPTABLE_KEY_LENGTHS: raise ValueError("key length must be one of 128, 192, or 256") if hexkey: key = key.decode('hex') return camcrypt.CamCrypt(keylen=keylen, key=key)
python
{ "resource": "" }
q41372
_print_results
train
def _print_results(filename, data): """Print data to a file or STDOUT. Args: filename (str or None): If None, print to STDOUT; otherwise, print to the file with this name. data (str): Data to print. """ if filename: with open(filename, 'wb') as f: f.write(data) else: print data
python
{ "resource": "" }
q41373
HEADER.timestamp
train
def timestamp(self, value): """ The local time when the message was written. Must follow the format 'Mmm DD HH:MM:SS'. If the day of the month is less than 10, then it MUST be represented as a space and then the number. """ if not self._timestamp_is_valid(value): value = self._calculate_current_timestamp() self._timestamp = value
python
{ "resource": "" }
q41374
HEADER.hostname
train
def hostname(self, value): """ The hostname where the log message was created. Should be the first part of the hostname, or an IP address. Should NOT be set to a fully qualified domain name. """ if value is None: value = socket.gethostname() self._hostname = value
python
{ "resource": "" }
q41375
MSG.tag
train
def tag(self, value): """The name of the program that generated the log message. The tag can only contain alphanumeric characters. If the tag is longer than {MAX_TAG_LEN} characters it will be truncated automatically. """ if value is None: value = sys.argv[0] self._tag = value[:self.MAX_TAG_LEN]
python
{ "resource": "" }
q41376
MSG.content
train
def content(self, value): """The main component of the log message. The content field is a freeform field that often begins with the process ID (pid) of the program that created the message. """ value = self._prepend_seperator(value) self._content = value
python
{ "resource": "" }
q41377
Syslog.log
train
def log(self, facility, level, text, pid=False): """Send the message text to all registered hosts. The facility and level will be used to create the packet's PRI part. The HEADER will be automatically determined from the current time and hostname. The MSG will be set from the running program's name and the text parameter. This is the simplest way to use reSyslog.Syslog, creating log messages containing the current time, hostname, program name, etc. This is how you do it:: logger = syslog.Syslog() logger.add_host("localhost") logger.log(Facility.USER, Level.INFO, "Hello World") If pid is True the process ID will be prepended to the text parameter, enclosed in square brackets and followed by a colon. """ pri = PRI(facility, level) header = HEADER() if pid: msg = MSG(content=text, pid=os.getpid()) else: msg = MSG(content=text) packet = Packet(pri, header, msg) self._send_packet_to_hosts(packet)
python
{ "resource": "" }
q41378
GeneratePassword.new_pin
train
def new_pin(self, min_length=4, min_common=1000, timeout=20, refresh_timeout=3): """ Return a suggested PIN :param int min_length: minimum length of the PIN generated :param int min_common: the minimal commonness to be considered convertible to a PIN :param float timeout: main timeout in seconds :param float refresh_timeout: timeout to new sentence :return str: a string of digits >>> GeneratePassword().new_pin() ('32700', [('His', False), ('mouth', True), ('was', False), ('open', False), (',', False), ('his', False), ('neck', True), ('corded', True), ('with', False), ('the', False), ('strain', True), ('of', False), ('his', False), ('screams', True)]) """ self.refresh(count_common=min_length, min_common=min_common, timeout=refresh_timeout) rating = self.sentence_tool.rate(self.tokens) start = time() while time() - start < timeout: pin = '' for token, commonness in rating: if commonness >= min_common: key = self.mnemonic.word_to_key('major_system', token.lower()) if key is not None: pin += key if len(pin) < min_length: self.refresh(count_common=min_length, min_common=min_common, timeout=refresh_timeout) rating = self.sentence_tool.rate(self.tokens) else: return pin, list(self.overlap_pin(pin, self.tokens)) return None
python
{ "resource": "" }
q41379
HTTPCheck.apply_check_config
train
def apply_check_config(self, config): """ Takes a validated config dictionary and sets the `uri`, `use_https` and `method` attributes based on the config's contents. """ self.uri = config["uri"] self.use_https = config.get("https", False) self.method = config.get("method", "GET")
python
{ "resource": "" }
q41380
HTTPCheck.perform
train
def perform(self): """ Performs a simple HTTP request against the configured url and returns true if the response has a 2xx code. The url can be configured to use https via the "https" boolean flag in the config, as well as a custom HTTP method via the "method" key. The default is to not use https and the GET method. """ if self.use_https: conn = client.HTTPSConnection(self.host, self.port) else: conn = client.HTTPConnection(self.host, self.port) conn.request(self.method, self.uri) response = conn.getresponse() conn.close() return bool(response.status >= 200 and response.status < 300)
python
{ "resource": "" }
q41381
Writer.sync_balancer_files
train
def sync_balancer_files(self): """ Syncs the config files for each present Balancer instance. Submits the work to sync each file as a work pool job. """ def sync(): for balancer in self.configurables[Balancer].values(): balancer.sync_file(self.configurables[Cluster].values()) self.work_pool.submit(sync)
python
{ "resource": "" }
q41382
Writer.on_balancer_remove
train
def on_balancer_remove(self, name): """ The removal of a load balancer config isn't supported just yet. If the balancer being removed is the only configured one we fire a critical log message saying so. A writer setup with no balancers is less than useless. """ if len(self.configurables[Balancer]) == 1: logger.critical( "'%s' config file removed! It was the only balancer left!", name )
python
{ "resource": "" }
q41383
Writer.on_cluster_update
train
def on_cluster_update(self, name, new_config): """ Callback hook for when a cluster is updated. Or main concern when a cluster is updated is whether or not the associated discovery method changed. If it did, we make sure that the old discovery method stops watching for the cluster's changes (if the old method is around) and that the new method *starts* watching for the cluster's changes (if the new method is actually around). Regardless of how the discovery method shuffling plays out the `sync_balancer_files` method is called. """ cluster = self.configurables[Cluster][name] old_discovery = cluster.discovery new_discovery = new_config["discovery"] if old_discovery == new_discovery: self.sync_balancer_files() return logger.info( "Switching '%s' cluster discovery from '%s' to '%s'", name, old_discovery, new_discovery ) if old_discovery in self.configurables[Discovery]: self.configurables[Discovery][old_discovery].stop_watching( cluster ) self.kill_thread(cluster.name) if new_discovery not in self.configurables[Discovery]: logger.warn( "New discovery '%s' for cluster '%s' is unknown/unavailable.", new_discovery, name ) self.sync_balancer_files() return discovery = self.configurables[Discovery][new_discovery] self.launch_thread( cluster.name, discovery.start_watching, cluster, self.sync_balancer_files )
python
{ "resource": "" }
q41384
Writer.on_cluster_remove
train
def on_cluster_remove(self, name): """ Stops the cluster's associated discovery method from watching for changes to the cluster's nodes. """ discovery_name = self.configurables[Cluster][name].discovery if discovery_name in self.configurables[Discovery]: self.configurables[Discovery][discovery_name].stop_watching( self.configurables[Cluster][name] ) self.kill_thread(name) self.sync_balancer_files()
python
{ "resource": "" }
q41385
color_string
train
def color_string(color, string): """ Colorizes a given string, if coloring is available. """ if not color_available: return string return color + string + colorama.Fore.RESET
python
{ "resource": "" }
q41386
color_for_level
train
def color_for_level(level): """ Returns the colorama Fore color for a given log level. If color is not available, returns None. """ if not color_available: return None return { logging.DEBUG: colorama.Fore.WHITE, logging.INFO: colorama.Fore.BLUE, logging.WARNING: colorama.Fore.YELLOW, logging.ERROR: colorama.Fore.RED, logging.CRITICAL: colorama.Fore.MAGENTA }.get(level, colorama.Fore.WHITE)
python
{ "resource": "" }
q41387
create_thread_color_cycle
train
def create_thread_color_cycle(): """ Generates a never-ending cycle of colors to choose from for individual threads. If color is not available, a cycle that repeats None every time is returned instead. """ if not color_available: return itertools.cycle([None]) return itertools.cycle( ( colorama.Fore.CYAN, colorama.Fore.BLUE, colorama.Fore.MAGENTA, colorama.Fore.GREEN, ) )
python
{ "resource": "" }
q41388
color_for_thread
train
def color_for_thread(thread_id): """ Associates the thread ID with the next color in the `thread_colors` cycle, so that thread-specific parts of a log have a consistent separate color. """ if thread_id not in seen_thread_colors: seen_thread_colors[thread_id] = next(thread_colors) return seen_thread_colors[thread_id]
python
{ "resource": "" }
q41389
CLIHandler.format
train
def format(self, record): """ Formats a given log record to include the timestamp, log level, thread ID and message. Colorized if coloring is available. """ if not self.is_tty: return super(CLIHandler, self).format(record) level_abbrev = record.levelname[0] time_and_level = color_string( color_for_level(record.levelno), "[%(asctime)s " + level_abbrev + "]" ) thread = color_string( color_for_thread(record.thread), "[%(threadName)s]" ) formatter = logging.Formatter( time_and_level + thread + " %(message)s", "%Y-%m-%d %H:%M:%S" ) return formatter.format(record)
python
{ "resource": "" }
q41390
add_bare_metal_cloud
train
def add_bare_metal_cloud(client, cloud, keys): """ Black magic is happening here. All of this wil change when we sanitize our API, however, this works until then """ title = cloud.get('title') provider = cloud.get('provider') key = cloud.get('apikey', "") secret = cloud.get('apisecret', "") tenant_name = cloud.get('tenant_name', "") region = cloud.get('region', "") apiurl = cloud.get('apiurl', "") compute_endpoint = cloud.get('compute_endpoint', None) machine_ip = cloud.get('machine_ip', None) machine_key = cloud.get('machine_key', None) machine_user = cloud.get('machine_user', None) machine_port = cloud.get('machine_port', None) if provider == "bare_metal": machine_ids = cloud['machines'].keys() bare_machine = cloud['machines'][machine_ids[0]] machine_hostname = bare_machine.get('dns_name', None) if not machine_hostname: machine_hostname = bare_machine['public_ips'][0] if not machine_ip: machine_ip = machine_hostname key = machine_hostname machine_name = cloud['machines'][machine_ids[0]]['name'] machine_id = machine_ids[0] keypairs = keys.keys() for i in keypairs: keypair_machines = keys[i]['machines'] if keypair_machines: keypair_machs = keys[i]['machines'] for mach in keypair_machs: if mach[1] == machine_id: machine_key = i break else: pass client.add_cloud(title, provider, key, secret, tenant_name=tenant_name, region=region, apiurl=apiurl, machine_ip=machine_ip, machine_key=machine_key, machine_user=machine_user, compute_endpoint=compute_endpoint, machine_port=machine_port)
python
{ "resource": "" }
q41391
associate_keys
train
def associate_keys(user_dict, client): """ This whole function is black magic, had to however cause of the way we keep key-machine association """ added_keys = user_dict['keypairs'] print ">>>Updating Keys-Machines association" for key in added_keys: machines = added_keys[key]['machines'] if machines: try: for machine in machines: cloud_id = machine[0] machine_id = machine[1] ssh_user = machine[3] ssh_port = machine[-1] key = client.keys[key] cloud = cloud_from_id(client, cloud_id) cloud.update_machines() mach = machine_from_id(cloud, machine_id) public_ips = mach.info.get('public_ips', None) if public_ips: host = public_ips[0] else: host = "" key.associate_to_machine(cloud_id=cloud_id, machine_id=machine_id, host=host, ssh_port=ssh_port, ssh_user=ssh_user) print "associated machine %s" % machine_id except Exception as e: pass client.update_keys() print
python
{ "resource": "" }
q41392
Fade.to_array
train
def to_array(self, channels=2): """Generate the array of volume multipliers for the dynamic""" if self.fade_type == "linear": return np.linspace(self.in_volume, self.out_volume, self.duration * channels)\ .reshape(self.duration, channels) elif self.fade_type == "exponential": if self.in_volume < self.out_volume: return (np.logspace(8, 1, self.duration * channels, base=.5) * ( self.out_volume - self.in_volume) / 0.5 + self.in_volume).reshape(self.duration, channels) else: return (np.logspace(1, 8, self.duration * channels, base=.5 ) * (self.in_volume - self.out_volume) / 0.5 + self.out_volume).reshape(self.duration, channels) elif self.fade_type == "cosine": return
python
{ "resource": "" }
q41393
PlugEvents.save
train
def save(self): """Over-rides the default save function for PlugEvents. If a sacrifice date is set for an object in this model, then Active is set to False.""" if self.SacrificeDate: self.Active = False super(PlugEvents, self).save()
python
{ "resource": "" }
q41394
command
train
def command(state, args): """Show anime data.""" args = parser.parse_args(args[1:]) aid = state.results.parse_aid(args.aid, default_key='db') anime = query.select.lookup(state.db, aid, episode_fields=args.episode_fields) complete_string = 'yes' if anime.complete else 'no' print(SHOW_MSG.format( anime.aid, anime.title, anime.type, anime.watched_episodes, anime.episodecount, datets.to_date(anime.startdate) if anime.startdate else 'N/A', datets.to_date(anime.enddate) if anime.enddate else 'N/A', complete_string, )) if anime.regexp: print('Watching regexp: {}'.format(anime.regexp)) if hasattr(anime, 'episodes'): episodes = sorted(anime.episodes, key=lambda x: (x.type, x.number)) print('\n', tabulate( ( ( EpisodeTypes.from_db(state.db).get_epno(episode), episode.title, episode.length, 'yes' if episode.user_watched else '', ) for episode in episodes ), headers=['Number', 'Title', 'min', 'Watched'], ))
python
{ "resource": "" }
q41395
load_prefix
train
def load_prefix(s3_loc, success_only=None, recent_versions=None, exclude_regex=None, just_sql=False): """Get a bash command which will load every dataset in a bucket at a prefix. For this to work, all datasets must be of the form `s3://$BUCKET_NAME/$PREFIX/$DATASET_NAME/v$VERSION/$PARTITIONS`. Any other formats will be ignored. :param bucket_name :param prefix """ bucket_name, prefix = _get_bucket_and_prefix(s3_loc) datasets = _get_common_prefixes(bucket_name, prefix) bash_cmd = '' for dataset in datasets: dataset = _remove_trailing_backslash(dataset) try: bash_cmd += get_bash_cmd('s3://{}/{}'.format(bucket_name, dataset), success_only=success_only, recent_versions=recent_versions, exclude_regex=exclude_regex, just_sql=just_sql) except Exception as e: sys.stderr.write('Failed to process {}, {}\n'.format(dataset, str(e))) return bash_cmd
python
{ "resource": "" }
q41396
read_unicode
train
def read_unicode(path, encoding, encoding_errors): """ Return the contents of a file as a unicode string. """ try: f = open(path, 'rb') return make_unicode(f.read(), encoding, encoding_errors) finally: f.close()
python
{ "resource": "" }
q41397
get_abs_template_path
train
def get_abs_template_path(template_name, directory, extension): """ Given a template name, a directory, and an extension, return the absolute path to the template. """ # Get the relative path relative_path = join(directory, template_name) file_with_ext = template_name if extension: # If there is a default extension, but no file extension, then add it file_name, file_ext = splitext(file_with_ext) if not file_ext: file_with_ext = extsep.join( (file_name, extension.replace(extsep, ''))) # Rebuild the relative path relative_path = join(directory, file_with_ext) return abspath(relative_path)
python
{ "resource": "" }
q41398
load_file
train
def load_file(path, encoding, encoding_errors): """ Given an existing path, attempt to load it as a unicode string. """ abs_path = abspath(path) if exists(abs_path): return read_unicode(abs_path, encoding, encoding_errors) raise IOError('File %s does not exist' % (abs_path))
python
{ "resource": "" }
q41399
load_template
train
def load_template(name, directory, extension, encoding, encoding_errors): """ Load a template and return its contents as a unicode string. """ abs_path = get_abs_template_path(name, directory, extension) return load_file(abs_path, encoding, encoding_errors)
python
{ "resource": "" }