_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q39100 | ids_from_seqs_iterative | train | def ids_from_seqs_iterative(seqs, app, query_parser, \
scorer=keep_everything_scorer, max_iterations=None, blast_db=None,\
max_seqs=None, ):
"""Gets the ids from each seq, then does each additional id until all done.
If scorer is passed in as an int, uses shotgun scorer with that # hits.
"""
if isinstance(scorer, int):
scorer = make_shotgun_scorer(scorer)
seqs_to_check = list(seqs)
checked_ids = {}
curr_iteration = 0
while seqs_to_check:
unchecked_ids = {}
#pass seqs to command
all_output = app(seqs_to_check)
output = all_output.get('BlastOut', all_output['StdOut'])
for query_id, match_id, match_score in query_parser(output):
if query_id not in checked_ids:
checked_ids[query_id] = {}
checked_ids[query_id][match_id] = match_score
if match_id not in checked_ids:
unchecked_ids[match_id] = True
all_output.cleanUp()
if unchecked_ids:
seq_file = fasta_cmd_get_seqs(unchecked_ids.keys(),
app.Parameters['-d'].Value)['StdOut']
seqs_to_check = []
for s in FastaCmdFinder(fasta_cmd_get_seqs(\
unchecked_ids.keys(), app.Parameters['-d'].Value)['StdOut']):
seqs_to_check.extend(s)
else:
seqs_to_check = []
#bail out if max iterations or max seqs was defined and we've reached it
curr_iteration += 1
if max_iterations and (curr_iteration >= max_iterations):
break
if max_seqs:
curr = scorer(checked_ids)
if len(curr) >= max_seqs:
return curr
return scorer(checked_ids) | python | {
"resource": ""
} |
q39101 | blastp | train | def blastp(seqs, blast_db="nr", e_value="1e-20", max_hits=200,
working_dir=tempfile.gettempdir(), blast_mat_root=None,
extra_params={}):
"""
Returns BlastResult from input seqs, using blastp.
Need to add doc string
"""
# set up params to use with blastp
params = {
# matrix
"-M":"BLOSUM62",
# max procs
"-a":"1",
# expectation
"-e":e_value,
# max seqs to show
"-b":max_hits,
# max one line descriptions
"-v":max_hits,
# program
"-p":"blastp"
}
params.update(extra_params)
# blast
blast_res = blast_seqs(seqs,
Blastall,
blast_mat_root=blast_mat_root,
blast_db=blast_db,
params=params,
add_seq_names=False,
WorkingDir=working_dir
)
# get prot id map
if blast_res['StdOut']:
lines = [x for x in blast_res['StdOut']]
return BlastResult(lines)
return None | python | {
"resource": ""
} |
q39102 | set_lock | train | def set_lock(fname):
"""
Try to lock file and write PID.
Return the status of operation.
"""
global fh
fh = open(fname, 'w')
if os.name == 'nt':
# Code for NT systems got from: http://code.activestate.com/recipes/65203/
import win32con
import win32file
import pywintypes
LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
LOCK_SH = 0 # the default
LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY
# is there any reason not to reuse the following structure?
__overlapped = pywintypes.OVERLAPPED()
hfile = win32file._get_osfhandle(fh.fileno())
try:
win32file.LockFileEx(hfile, LOCK_EX | LOCK_NB, 0, -0x10000, __overlapped)
except pywintypes.error as exc_value:
# error: (33, 'LockFileEx', 'The process cannot access
# the file because another process has locked a portion
# of the file.')
if exc_value[0] == 33:
return False
else:
from fcntl import flock, LOCK_EX, LOCK_NB
try:
flock(fh.fileno(), LOCK_EX | LOCK_NB)
except Exception as ex:
return False
fh.write(str(os.getpid()))
fh.flush()
return True | python | {
"resource": ""
} |
q39103 | assert_lock | train | def assert_lock(fname):
"""
If file is locked then terminate program else lock file.
"""
if not set_lock(fname):
logger.error('File {} is already locked. Terminating.'.format(fname))
sys.exit() | python | {
"resource": ""
} |
q39104 | build_blast_db_from_seqs | train | def build_blast_db_from_seqs(seqs, is_protein=False, output_dir='./',
HALT_EXEC=False):
"""Build blast db from seqs; return db name and list of files created
**If using to create temporary blast databases, you can call
cogent.util.misc.remove_files(db_filepaths) to clean up all the
files created by formatdb when you're done with the database.
seqs: sequence collection or alignment object
is_protein: True if working on protein seqs (default: False)
output_dir: directory where output should be written
(default: current directory)
HALT_EXEC: halt just before running the formatdb command and
print the command -- useful for debugging
"""
# Build a temp filepath
_, tmp_fasta_filepath = mkstemp(prefix='Blast_tmp_db', suffix='.fasta')
# open the temp file
tmp_fasta_file = open(tmp_fasta_filepath, 'w')
# write the sequence collection to file
tmp_fasta_file.write(seqs.toFasta())
tmp_fasta_file.close()
# build the bast database
db_name, db_filepaths = build_blast_db_from_fasta_path(tmp_fasta_filepath,
is_protein=is_protein,
output_dir=output_dir,
HALT_EXEC=HALT_EXEC)
# clean-up the temporary file
remove(tmp_fasta_filepath)
# return the results
return db_name, db_filepaths | python | {
"resource": ""
} |
q39105 | RedisSessionMiddleware._session_key | train | def _session_key(self):
"""Gets the redis key for a session"""
if not hasattr(self, "_cached_session_key"):
session_id_bytes = self.get_secure_cookie("session_id")
session_id = None
if session_id_bytes:
try:
session_id = session_id_bytes.decode('utf-8')
except:
pass
if not session_id:
session_id = oz.redis_sessions.random_hex(20)
session_time = oz.settings["session_time"]
kwargs = dict(
name="session_id",
value=session_id.encode('utf-8'),
domain=oz.settings.get("cookie_domain"),
httponly=True,
)
if session_time:
kwargs["expires_days"] = round(session_time/60/60/24)
self.set_secure_cookie(**kwargs)
password_salt = oz.settings["session_salt"]
self._cached_session_key = "session:%s:v4" % oz.redis_sessions.password_hash(session_id, password_salt=password_salt)
return self._cached_session_key | python | {
"resource": ""
} |
q39106 | RedisSessionMiddleware._update_session_expiration | train | def _update_session_expiration(self):
"""
Updates a redis item to expire later since it has been interacted with
recently
"""
session_time = oz.settings["session_time"]
if session_time:
self.redis().expire(self._session_key, session_time) | python | {
"resource": ""
} |
q39107 | RedisSessionMiddleware.get_session_value | train | def get_session_value(self, name, default=None):
"""Gets a session value"""
value = self.redis().hget(self._session_key, name) or default
self._update_session_expiration()
return value | python | {
"resource": ""
} |
q39108 | RedisSessionMiddleware.set_session_value | train | def set_session_value(self, name, value):
"""Sets a session value"""
self.redis().hset(self._session_key, name, value)
self._update_session_expiration() | python | {
"resource": ""
} |
q39109 | RedisSessionMiddleware.clear_session_value | train | def clear_session_value(self, name):
"""Removes a session value"""
self.redis().hdel(self._session_key, name)
self._update_session_expiration() | python | {
"resource": ""
} |
q39110 | rewrite_middleware | train | async def rewrite_middleware(server, request):
'''
Sanic middleware that utilizes a security class's "rewrite" method to
check
'''
if singletons.settings.SECURITY is not None:
security_class = singletons.settings.load('SECURITY')
else:
security_class = DummySecurity
security = security_class()
try:
new_path = await security.rewrite(request)
except SecurityException as e:
msg = ''
if DEBUG:
msg = str(e)
return server.response.text(msg, status=400)
request.path = new_path | python | {
"resource": ""
} |
q39111 | Commit.get_objects | train | def get_objects(self, uri, pull=True, **kwargs):
'''
Walk through repo commits to generate a list of repo commit
objects.
Each object has the following properties:
* repo uri
* general commit info
* files added, removed fnames
* lines added, removed
* acked_by
* signed_off_by
* resolves
* related
'''
self.repo = repo = git_clone(uri, pull=pull, reflect=True)
# get a full list of all commit SHAs in the repo (all branches)
cmd = 'git rev-list --all'
output = sys_call(cmd, cwd=repo.path)
repo_shas = set(x.strip() for x in output.split('\n') if x)
logger.debug("Total Commits: %s" % len(repo_shas))
cmd = 'git --no-pager log --all --format=sha:%H --numstat'
output = sys_call(cmd)
all_logs = re.sub('\n+', '\n', output)
c_logs = [x for x in [s.strip() for s in all_logs.split('sha:')] if x]
_end = None # once was true, always is true...
objs = []
for c_log in c_logs:
sha, s, all_changes = c_log.partition('\n')
#try:
c = repo.get_object(sha)
# FIXME: not normalizing to UTC
_start = ts2dt(c.commit_time)
#except Exception as e:
# _start = now
# obj = dict(_oid=sha, _start=_start, _end=_end,
# repo_uri=uri, _e={sha: to_encoding(e)})
# self.objects.add(obj)
# continue
# and some basic stuff...
obj = dict(_oid=sha, _start=_start, _end=_end,
repo_uri=uri, tree=c.tree, parents=c.parents,
author=c.author, committer=c.committer,
author_time=c.author_time, message=c.message,
mergetag=c.mergetag, extra=c.extra)
for _file in all_changes.split('\n'):
_file = _file.strip()
obj.setdefault('files', {})
if not _file:
added, removed, fname = 0, 0, None
else:
added, removed, fname = _file.split('\t')
added = 0 if added == '-' else int(added)
removed = 0 if removed == '-' else int(removed)
# FIXME: sql doesn't nest well..
changes = {'added': added,
'removed': removed}
obj['files'][fname] = changes
# file +/- totals
obj['added'] = sum(
[v.get('added', 0) for v in obj['files'].itervalues()])
obj['removed'] = sum(
[v.get('removed', 0) for v in obj['files'].itervalues()])
# extract interesting bits from the message
obj['acked_by'] = acked_by_re.findall(c.message)
obj['signed_off_by'] = signed_off_by_re.findall(c.message)
obj['resolves'] = resolves_re.findall(c.message)
obj['related'] = related_re.findall(c.message)
objs.append(obj)
self.objects.extend(objs)
return super(Commit, self).get_objects(**kwargs) | python | {
"resource": ""
} |
q39112 | _dict_values_sorted_by_key | train | def _dict_values_sorted_by_key(dictionary):
# This should be a yield from instead.
"""Internal helper to return the values of a dictionary, sorted by key.
"""
for _, value in sorted(dictionary.iteritems(), key=operator.itemgetter(0)):
yield value | python | {
"resource": ""
} |
q39113 | _ondemand | train | def _ondemand(f):
"""Decorator to only request information if not in cache already.
"""
name = f.__name__
def func(self, *args, **kwargs):
if not args and not kwargs:
if hasattr(self, '_%s' % name):
return getattr(self, '_%s' % name)
a = f(self, *args, **kwargs)
setattr(self, '_%s' % name, a)
return a
else:
return f(self, *args, **kwargs)
func.__name__ = name
return func | python | {
"resource": ""
} |
q39114 | CachedIDAMemory.get_memory | train | def get_memory(self, start, size):
"""Retrieve an area of memory from IDA.
Returns a sparse dictionary of address -> value.
"""
LOG.debug('get_memory: %d bytes from %x', size, start)
return get_memory(self.ida.idaapi, start, size,
default_byte=self.default_byte) | python | {
"resource": ""
} |
q39115 | handle_404 | train | def handle_404(request, exception):
'''Handle 404 Not Found
This handler should be used to handle error http 404 not found for all
endpoints or if resource not available.
'''
error = format_error(title='Resource not found', detail=str(exception))
return json(return_an_error(error), status=HTTPStatus.NOT_FOUND) | python | {
"resource": ""
} |
q39116 | _add_to_dict | train | def _add_to_dict(t, container, name, value):
"""
Adds an item to a dictionary, or raises an exception if an item with the
specified key already exists in the dictionary.
"""
if name in container:
raise Exception("%s '%s' already exists" % (t, name))
else:
container[name] = value | python | {
"resource": ""
} |
q39117 | RequestHandler.trigger | train | def trigger(self, name, *args, **kwargs):
"""
Triggers an event to run through middleware. This method will execute
a chain of relevant trigger callbacks, until one of the callbacks
returns the `break_trigger`.
"""
# Relevant middleware is cached so we don't have to rediscover it
# every time. Fetch the cached value if possible.
listeners = self._triggers.get(name, [])
# Execute each piece of middleware
for listener in listeners:
result = listener(*args, **kwargs)
if result == break_trigger:
return False
return True | python | {
"resource": ""
} |
q39118 | Resource.cache_makedirs | train | def cache_makedirs(self, subdir=None):
'''
Make necessary directories to hold cache value
'''
if subdir is not None:
dirname = self.cache_path
if subdir:
dirname = os.path.join(dirname, subdir)
else:
dirname = os.path.dirname(self.cache_path)
os.makedirs(dirname, exist_ok=True) | python | {
"resource": ""
} |
q39119 | session | train | def session(connection_string=None):
"""Gets a SQLAlchemy session"""
global _session_makers
connection_string = connection_string or oz.settings["db"]
if not connection_string in _session_makers:
_session_makers[connection_string] = sessionmaker(bind=engine(connection_string=connection_string))
return _session_makers[connection_string]() | python | {
"resource": ""
} |
q39120 | assign_dna_reads_to_dna_database | train | def assign_dna_reads_to_dna_database(query_fasta_fp, database_fasta_fp,
output_fp, params=None):
"""Assign DNA reads to a database fasta of DNA sequences.
Wraps assign_reads_to_database, setting database and query types. All
parameters are set to default unless params is passed.
query_fasta_fp: absolute path to the query fasta file containing DNA
sequences.
database_fasta_fp: absolute path to the database fasta file containing
DNA sequences.
output_fp: absolute path where the output file will be generated.
params: optional. dict containing parameter settings to be used
instead of default values. Cannot change database or query
file types from dna and dna, respectively.
This method returns an open file object. The output format
defaults to blast9 and should be parsable by the PyCogent BLAST parsers.
"""
if params is None:
params = {}
my_params = {'-t': 'dna',
'-q': 'dna'
}
# if the user specified parameters other than default, then use them.
# However, if they try to change the database or query types, raise an
# applciation error.
if '-t' in params or '-q' in params:
raise ApplicationError("Cannot change database or query types when " +
"using assign_dna_reads_to_dna_database. " +
"Use assign_reads_to_database instead.\n")
my_params.update(params)
result = assign_reads_to_database(query_fasta_fp, database_fasta_fp,
output_fp, my_params)
return result | python | {
"resource": ""
} |
q39121 | assign_dna_reads_to_protein_database | train | def assign_dna_reads_to_protein_database(query_fasta_fp, database_fasta_fp,
output_fp, temp_dir="/tmp", params=None):
"""Assign DNA reads to a database fasta of protein sequences.
Wraps assign_reads_to_database, setting database and query types. All
parameters are set to default unless params is passed. A temporary
file must be written containing the translated sequences from the input
query fasta file because BLAT cannot do this automatically.
query_fasta_fp: absolute path to the query fasta file containing DNA
sequences.
database_fasta_fp: absolute path to the database fasta file containing
protein sequences.
output_fp: absolute path where the output file will be generated.
temp_dir: optional. Change the location where the translated sequences
will be written before being used as the query. Defaults to
/tmp.
params: optional. dict containing parameter settings to be used
instead of default values. Cannot change database or query
file types from protein and dna, respectively.
This method returns an open file object. The output format
defaults to blast9 and should be parsable by the PyCogent BLAST parsers.
"""
if params is None:
params = {}
my_params = {'-t': 'prot', '-q': 'prot'}
# make sure temp_dir specifies an absolute path
if not isabs(temp_dir):
raise ApplicationError("temp_dir must be an absolute path.")
# if the user specified parameters other than default, then use them.
# However, if they try to change the database or query types, raise an
# applciation error.
if '-t' in params or '-q' in params:
raise ApplicationError("Cannot change database or query types "
"when using assign_dna_reads_to_dna_database. Use "
"assign_reads_to_database instead.")
if 'genetic_code' in params:
my_genetic_code = GeneticCodes[params['genetic_code']]
del params['genetic_code']
else:
my_genetic_code = GeneticCodes[1]
my_params.update(params)
# get six-frame translation of the input DNA sequences and write them to
# temporary file.
_, tmp = mkstemp(dir=temp_dir)
tmp_out = open(tmp, 'w')
for label, sequence in parse_fasta(open(query_fasta_fp)):
seq_id = label.split()[0]
s = DNA.makeSequence(sequence)
translations = my_genetic_code.sixframes(s)
frames = [1, 2, 3, -1, -2, -3]
translations = dict(zip(frames, translations))
for frame, translation in sorted(translations.iteritems()):
entry = '>{seq_id}_frame_{frame}\n{trans}\n'
entry = entry.format(seq_id=seq_id, frame=frame, trans=translation)
tmp_out.write(entry)
tmp_out.close()
result = assign_reads_to_database(tmp, database_fasta_fp, output_fp,
params=my_params)
remove(tmp)
return result | python | {
"resource": ""
} |
q39122 | Blat._get_base_command | train | def _get_base_command(self):
"""Gets the command that will be run when the app controller is
called.
"""
command_parts = []
cd_command = ''.join(['cd ', str(self.WorkingDir), ';'])
if self._command is None:
raise ApplicationError('_command has not been set.')
command = self._command
parameters = sorted([str(x) for x in self.Parameters.values()
if str(x)])
synonyms = self._synonyms
command_parts.append(cd_command)
command_parts.append(command)
command_parts.append(self._database) # Positional argument
command_parts.append(self._query) # Positional argument
command_parts += parameters
if self._output:
command_parts.append(self._output.Path) # Positional
return (
self._command_delimiter.join(filter(None, command_parts)).strip()
) | python | {
"resource": ""
} |
q39123 | woa_profile_from_dap | train | def woa_profile_from_dap(var, d, lat, lon, depth, cfg):
"""
Monthly Climatologic Mean and Standard Deviation from WOA,
used either for temperature or salinity.
INPUTS
time: [day of the year]
lat: [-90<lat<90]
lon: [-180<lon<180]
depth: [meters]
Reads the WOA Monthly Climatology NetCDF file and
returns the corresponding WOA values of salinity or temperature mean and
standard deviation for the given time, lat, lon, depth.
"""
if lon < 0:
lon = lon+360
url = cfg['url']
doy = int(d.strftime('%j'))
dataset = open_url(url)
dn = (np.abs(doy-dataset['time'][:])).argmin()
xn = (np.abs(lon-dataset['lon'][:])).argmin()
yn = (np.abs(lat-dataset['lat'][:])).argmin()
if re.match("temperature\d?$", var):
mn = ma.masked_values(dataset.t_mn.t_mn[dn, :, yn, xn].reshape(
dataset['depth'].shape[0]), dataset.t_mn.attributes['_FillValue'])
sd = ma.masked_values(dataset.t_sd.t_sd[dn, :, yn, xn].reshape(
dataset['depth'].shape[0]), dataset.t_sd.attributes['_FillValue'])
# se = ma.masked_values(dataset.t_se.t_se[dn, :, yn, xn].reshape(
# dataset['depth'].shape[0]), dataset.t_se.attributes['_FillValue'])
# Use this in the future. A minimum # of samples
# dd = ma.masked_values(dataset.t_dd.t_dd[dn, :, yn, xn].reshape(
# dataset['depth'].shape[0]), dataset.t_dd.attributes['_FillValue'])
elif re.match("salinity\d?$", var):
mn = ma.masked_values(dataset.s_mn.s_mn[dn, :, yn, xn].reshape(
dataset['depth'].shape[0]), dataset.s_mn.attributes['_FillValue'])
sd = ma.masked_values(dataset.s_sd.s_sd[dn, :, yn, xn].reshape(
dataset['depth'].shape[0]), dataset.s_sd.attributes['_FillValue'])
# dd = ma.masked_values(dataset.s_dd.s_dd[dn, :, yn, xn].reshape(
# dataset['depth'].shape[0]), dataset.s_dd.attributes['_FillValue'])
zwoa = ma.array(dataset.depth[:])
ind = (depth <= zwoa.max()) & (depth >= zwoa.min())
# Mean value profile
f = interp1d(zwoa[~ma.getmaskarray(mn)].compressed(), mn.compressed())
mn_interp = ma.masked_all(depth.shape)
mn_interp[ind] = f(depth[ind])
# The stdev profile
f = interp1d(zwoa[~ma.getmaskarray(sd)].compressed(), sd.compressed())
sd_interp = ma.masked_all(depth.shape)
sd_interp[ind] = f(depth[ind])
output = {'woa_an': mn_interp, 'woa_sd': sd_interp}
return output | python | {
"resource": ""
} |
q39124 | DatabaseCollection.iterator | train | def iterator(cls, path=None, objtype=None, query=None, page_size=1000, **kwargs):
""""
Linear time, constant memory, iterator for a mongo collection.
@param path: the path of the database to query, in the form
"database.colletion"; pass None to use the value of the
PATH property of the object or, if that is none, the
PATH property of OBJTYPE
@param objtype: the object type to use for these DatabaseObjects;
pass None to use the OBJTYPE property of the class
@param query: a dictionary specifying key-value pairs that the result
must match. If query is None, use kwargs in it's place
@param page_size: the number of items to fetch per page of iteration
@param **kwargs: used as query parameters if query is None
"""
if not objtype:
objtype = cls.OBJTYPE
if not path:
path = cls.PATH
db = objtype.db(path)
if not query:
query = kwargs
results = list(db.find(query).sort(ID_KEY, ASCENDING).limit(page_size))
while results:
page = [objtype(path=path, _new_object=result) for result in results]
for obj in page:
yield obj
query[ID_KEY] = {GT: results[-1][ID_KEY]}
results = list(db.find(query).sort(ID_KEY, ASCENDING).limit(page_size)) | python | {
"resource": ""
} |
q39125 | pair_hmm_align_unaligned_seqs | train | def pair_hmm_align_unaligned_seqs(seqs, moltype=DNA_cogent, params={}):
"""
Checks parameters for pairwise alignment, returns alignment.
Code from Greg Caporaso.
"""
seqs = LoadSeqs(data=seqs, moltype=moltype, aligned=False)
try:
s1, s2 = seqs.values()
except ValueError:
raise ValueError(
"Pairwise aligning of seqs requires exactly two seqs.")
try:
gap_open = params['gap_open']
except KeyError:
gap_open = 5
try:
gap_extend = params['gap_extend']
except KeyError:
gap_extend = 2
try:
score_matrix = params['score_matrix']
except KeyError:
score_matrix = make_dna_scoring_dict(
match=1, transition=-1, transversion=-1)
return local_pairwise(s1, s2, score_matrix, gap_open, gap_extend) | python | {
"resource": ""
} |
q39126 | viewers_js | train | async def viewers_js(request):
'''
Viewers determines the viewers installed based on settings, then uses the
conversion infrastructure to convert all these JS files into a single JS
bundle, that is then served. As with media, it will simply serve a cached
version if necessary.
'''
# Generates single bundle as such:
# BytesResource -> ViewerNodePackageBuilder -> nodepackage -> ... -> min.js
response = singletons.server.response
# Create a viewers resource, which is simply a JSON encoded description of
# the viewers necessary for this viewers bundle.
viewers_resource = singletons.viewers.get_resource()
url_string = viewers_resource.url_string
target_ts = TypeString('min.js') # get a minified JS bundle
target_resource = TypedResource(url_string, target_ts)
if target_resource.cache_exists():
return await response.file(target_resource.cache_path, headers={
'Content-Type': 'application/javascript',
})
# Otherwise, does not exist, save this descriptor to cache and kick off
# conversion process
if not viewers_resource.cache_exists():
viewers_resource.save()
# Queue up a single function that will in turn queue up conversion process
await singletons.workers.async_enqueue_sync(
enqueue_conversion_path,
url_string,
str(target_ts),
singletons.workers.enqueue_convert
)
return response.text(NOT_LOADED_JS, headers={
'Content-Type': 'application/javascript',
}) | python | {
"resource": ""
} |
q39127 | Rows.get_objects | train | def get_objects(self, uri, _oid=None, _start=None, _end=None,
load_kwargs=None, **kwargs):
'''
Load and transform csv data into a list of dictionaries.
Each row in the csv will result in one dictionary in the list.
:param uri: uri (file://, http(s)://) of csv file to load
:param _oid:
column or func to apply to map _oid in all resulting objects
:param _start:
column or func to apply to map _start in all resulting objects
:param _end:
column or func to apply to map _end in all resulting objects
:param kwargs: kwargs to pass to pandas.read_csv method
_start and _oid arguments can be a column name or a function
which accepts a single argument -- the row being extracted.
If either is a column name (string) then that column will be applied
as _oid for each object generated.
If either is a function, the function will be applied per each row
and the result of the function will be assigned to the _start
or _oid, respectively.
'''
load_kwargs = load_kwargs or {}
objects = load(path=uri, filetype='csv', **load_kwargs)
k = itertools.count(1)
now = utcnow()
__oid = lambda o: k.next()
_oid = _oid or __oid
_start = _start or now
_end = _end or None
def is_callable(v):
_v = type(v)
_ = True if _v is type or hasattr(v, '__call__') else False
return _
for obj in objects:
obj['_oid'] = _oid(obj) if is_callable(_oid) else _oid
obj['_start'] = _start(obj) if is_callable(_start) else _start
obj['_end'] = _end(obj) if is_callable(_end) else _end
self.container.add(obj)
return super(Rows, self).get_objects(**kwargs) | python | {
"resource": ""
} |
q39128 | vsearch_dereplicate_exact_seqs | train | def vsearch_dereplicate_exact_seqs(
fasta_filepath,
output_filepath,
output_uc=False,
working_dir=None,
strand="both",
maxuniquesize=None,
minuniquesize=None,
sizein=False,
sizeout=True,
log_name="derep.log",
HALT_EXEC=False):
""" Generates clusters and fasta file of
dereplicated subsequences
Parameters
----------
fasta_filepath : string
input filepath of fasta file to be dereplicated
output_filepath : string
write the dereplicated sequences to output_filepath
working_dir : string, optional
directory path for storing intermediate output
output_uc : boolean, optional
uutput dereplication results in a file using a
uclust-like format
strand : string, optional
when searching for strictly identical sequences,
check the 'strand' only (default: both) or
check the plus strand only
maxuniquesize : integer, optional
discard sequences with an abundance value greater
than maxuniquesize
minuniquesize : integer, optional
discard sequences with an abundance value smaller
than integer
sizein : boolean, optional
take into account the abundance annotations present in
the input fasta file, (search for the pattern
"[>;]size=integer[;]" in sequence headers)
sizeout : boolean, optional
add abundance annotations to the output fasta file
(add the pattern ";size=integer;" to sequence headers)
log_name : string, optional
specifies log filename
HALT_EXEC : boolean, optional
used for debugging app controller
Return
------
output_filepath : string
filepath to dereplicated fasta file
uc_filepath : string
filepath to dereplication results in uclust-like format
log_filepath : string
filepath to log file
"""
# write all vsearch output files to same directory
# as output_filepath if working_dir is not specified
if not working_dir:
working_dir = dirname(abspath(output_filepath))
app = Vsearch(WorkingDir=working_dir, HALT_EXEC=HALT_EXEC)
log_filepath = join(working_dir, log_name)
uc_filepath = None
if output_uc:
root_name = splitext(abspath(output_filepath))[0]
uc_filepath = join(working_dir, '%s.uc' % root_name)
app.Parameters['--uc'].on(uc_filepath)
if maxuniquesize:
app.Parameters['--maxuniquesize'].on(maxuniquesize)
if minuniquesize:
app.Parameters['--minuniquesize'].on(minuniquesize)
if sizein:
app.Parameters['--sizein'].on()
if sizeout:
app.Parameters['--sizeout'].on()
if (strand == "both" or strand == "plus"):
app.Parameters['--strand'].on(strand)
else:
raise ValueError("Option --strand accepts only 'both'"
"or 'plus' values")
app.Parameters['--derep_fulllength'].on(fasta_filepath)
app.Parameters['--output'].on(output_filepath)
app.Parameters['--log'].on(log_filepath)
app_result = app()
return output_filepath, uc_filepath, log_filepath | python | {
"resource": ""
} |
q39129 | PaaSProvider.init | train | def init(cls, site):
"""
put site settings in the header of the script
"""
bash_header = ""
for k,v in site.items():
bash_header += "%s=%s" % (k.upper(), v)
bash_header += '\n'
site['bash_header'] = bash_header
# TODO: execute before_deploy
# P.S. running init_before seems like impossible, because the file hasn't been rendered.
if cls.git_template:
# do render from git repo
print "Cloning template files..."
repo_local_copy = utils.clone_git_repo(cls.git_template_url)
print "Rendering files from templates..."
target_path = os.getcwd()
settings_dir = '/'.join(site['django_settings'].split('.')[:-1])
site['project_name'] = settings_dir.replace('/', '.')
settings_dir_path = target_path
if settings_dir:
settings_dir_path += '/' + settings_dir
utils.render_from_repo(repo_local_copy, target_path, site, settings_dir_path)
else:
cls._create_configs(site)
print cls.setup_instructions
# TODO: execute after_deploy
run_hooks('init_after') | python | {
"resource": ""
} |
q39130 | PaaSProvider._render_config | train | def _render_config(cls, dest, template_name, template_args):
"""
Renders and writes a template_name to a dest given some template_args.
This is for platform-specific configurations
"""
template_args = template_args.copy()
# Substitute values here
pyversion = template_args['pyversion']
template_args['pyversion'] = cls.PYVERSIONS[pyversion]
template = template_env.get_template(template_name)
contents = template.render(**template_args)
_write_file(dest, contents) | python | {
"resource": ""
} |
q39131 | CommandParser.parse_args_to_action_args | train | def parse_args_to_action_args(self, argv=None):
'''
Parses args and returns an action and the args that were parsed
'''
args = self.parse_args(argv)
action = self.subcommands[args.subcommand][1]
return action, args | python | {
"resource": ""
} |
q39132 | CommandParser.register_subparser | train | def register_subparser(self, action, name, description='', arguments={}):
'''
Registers a new subcommand with a given function action.
If the function action is synchronous
'''
action = coerce_to_synchronous(action)
opts = []
for flags, kwargs in arguments.items():
if isinstance(flags, str):
flags = tuple([flags])
opts.append((flags, kwargs))
self.subcommands[name] = (description, action, opts) | python | {
"resource": ""
} |
q39133 | CommandParser.subcommand | train | def subcommand(self, description='', arguments={}):
'''
Decorator for quickly adding subcommands to the omnic CLI
'''
def decorator(func):
self.register_subparser(
func,
func.__name__.replace('_', '-'),
description=description,
arguments=arguments,
)
return func
return decorator | python | {
"resource": ""
} |
q39134 | CommandParser.print | train | def print(self, *args, **kwargs):
'''
Utility function that behaves identically to 'print' except it only
prints if verbose
'''
if self._last_args and self._last_args.verbose:
print(*args, **kwargs) | python | {
"resource": ""
} |
q39135 | process_uclust_pw_alignment_results | train | def process_uclust_pw_alignment_results(fasta_pairs_lines, uc_lines):
""" Process results of uclust search and align """
alignments = get_next_two_fasta_records(fasta_pairs_lines)
for hit in get_next_record_type(uc_lines, 'H'):
matching_strand = hit[4]
if matching_strand == '-':
strand_id = '-'
target_rev_match = True
elif matching_strand == '+':
strand_id = '+'
target_rev_match = False
elif matching_strand == '.':
# protein sequence, so no strand information
strand_id = ''
target_rev_match = False
else:
raise UclustParseError("Unknown strand type: %s" % matching_strand)
uc_query_id = hit[8]
uc_target_id = hit[9]
percent_id = float(hit[3])
fasta_pair = alignments.next()
fasta_query_id = fasta_pair[0][0]
aligned_query = fasta_pair[0][1]
if fasta_query_id != uc_query_id:
raise UclustParseError("Order of fasta and uc files do not match." +
" Got query %s but expected %s." %
(fasta_query_id, uc_query_id))
fasta_target_id = fasta_pair[1][0]
aligned_target = fasta_pair[1][1]
if fasta_target_id != uc_target_id + strand_id:
raise UclustParseError("Order of fasta and uc files do not match." +
" Got target %s but expected %s." %
(fasta_target_id, uc_target_id + strand_id))
if target_rev_match:
query_id = uc_query_id + ' RC'
aligned_query = DNA.rc(aligned_query)
target_id = uc_target_id
aligned_target = DNA.rc(aligned_target)
else:
query_id = uc_query_id
aligned_query = aligned_query
target_id = uc_target_id
aligned_target = aligned_target
yield (query_id, target_id, aligned_query, aligned_target, percent_id) | python | {
"resource": ""
} |
q39136 | uclust_search_and_align_from_fasta_filepath | train | def uclust_search_and_align_from_fasta_filepath(
query_fasta_filepath,
subject_fasta_filepath,
percent_ID=0.75,
enable_rev_strand_matching=True,
max_accepts=8,
max_rejects=32,
tmp_dir=gettempdir(),
HALT_EXEC=False):
""" query seqs against subject fasta using uclust,
return global pw alignment of best match
"""
# Explanation of parameter settings
# id - min percent id to count a match
# maxaccepts = 8 , searches for best match rather than first match
# (0 => infinite accepts, or good matches before
# quitting search)
# maxaccepts = 32,
# libonly = True , does not add sequences to the library if they don't
# match something there already. this effectively makes
# uclust a search tool rather than a clustering tool
params = {'--id': percent_ID,
'--maxaccepts': max_accepts,
'--maxrejects': max_rejects,
'--libonly': True,
'--lib': subject_fasta_filepath,
'--tmpdir': tmp_dir}
if enable_rev_strand_matching:
params['--rev'] = True
# instantiate the application controller
app = Uclust(params,
TmpDir=tmp_dir, HALT_EXEC=HALT_EXEC)
# apply uclust
_, alignment_filepath = mkstemp(dir=tmp_dir, prefix='uclust_alignments',
suffix='.fasta')
_, uc_filepath = mkstemp(dir=tmp_dir, prefix='uclust_results',
suffix='.uc')
input_data = {'--input': query_fasta_filepath,
'--fastapairs': alignment_filepath,
'--uc': uc_filepath}
app_result = app(input_data)
# yield the pairwise alignments
for result in process_uclust_pw_alignment_results(
app_result['PairwiseAlignments'], app_result['ClusterFile']):
try:
yield result
except GeneratorExit:
break
# clean up the temp files that were generated
app_result.cleanUp()
return | python | {
"resource": ""
} |
q39137 | uclust_cluster_from_sorted_fasta_filepath | train | def uclust_cluster_from_sorted_fasta_filepath(
fasta_filepath,
uc_save_filepath=None,
percent_ID=0.97,
max_accepts=1,
max_rejects=8,
stepwords=8,
word_length=8,
optimal=False,
exact=False,
suppress_sort=False,
enable_rev_strand_matching=False,
subject_fasta_filepath=None,
suppress_new_clusters=False,
stable_sort=False,
tmp_dir=gettempdir(),
HALT_EXEC=False):
""" Returns clustered uclust file from sorted fasta"""
output_filepath = uc_save_filepath
if not output_filepath:
_, output_filepath = mkstemp(dir=tmp_dir, prefix='uclust_clusters',
suffix='.uc')
params = {'--id': percent_ID,
'--maxaccepts': max_accepts,
'--maxrejects': max_rejects,
'--stepwords': stepwords,
'--w': word_length,
'--tmpdir': tmp_dir}
app = Uclust(params,
TmpDir=tmp_dir, HALT_EXEC=HALT_EXEC)
# Set any additional parameters specified by the user
if enable_rev_strand_matching:
app.Parameters['--rev'].on()
if optimal:
app.Parameters['--optimal'].on()
if exact:
app.Parameters['--exact'].on()
if suppress_sort:
app.Parameters['--usersort'].on()
if subject_fasta_filepath:
app.Parameters['--lib'].on(subject_fasta_filepath)
if suppress_new_clusters:
app.Parameters['--libonly'].on()
if stable_sort:
app.Parameters['--stable_sort'].on()
app_result = app({'--input': fasta_filepath, '--uc': output_filepath})
return app_result | python | {
"resource": ""
} |
q39138 | get_clusters_from_fasta_filepath | train | def get_clusters_from_fasta_filepath(
fasta_filepath,
original_fasta_path,
percent_ID=0.97,
max_accepts=1,
max_rejects=8,
stepwords=8,
word_length=8,
optimal=False,
exact=False,
suppress_sort=False,
output_dir=None,
enable_rev_strand_matching=False,
subject_fasta_filepath=None,
suppress_new_clusters=False,
return_cluster_maps=False,
stable_sort=False,
tmp_dir=gettempdir(),
save_uc_files=True,
HALT_EXEC=False):
""" Main convenience wrapper for using uclust to generate cluster files
A source fasta file is required for the fasta_filepath. This will be
sorted to be in order of longest to shortest length sequences. Following
this, the sorted fasta file is used to generate a cluster file in the
uclust (.uc) format. Next the .uc file is converted to cd-hit format
(.clstr). Finally this file is parsed and returned as a list of lists,
where each sublist a cluster of sequences. If an output_dir is
specified, the intermediate files will be preserved, otherwise all
files created are temporary and will be deleted at the end of this
function
The percent_ID parameter specifies the percent identity for a clusters,
i.e., if 99% were the parameter, all sequences that were 99% identical
would be grouped as a cluster.
"""
# Create readable intermediate filenames if they are to be kept
fasta_output_filepath = None
uc_output_filepath = None
cd_hit_filepath = None
if output_dir and not output_dir.endswith('/'):
output_dir += '/'
if save_uc_files:
uc_save_filepath = get_output_filepaths(
output_dir,
original_fasta_path)
else:
uc_save_filepath = None
sorted_fasta_filepath = ""
uc_filepath = ""
clstr_filepath = ""
# Error check in case any app controller fails
files_to_remove = []
try:
if not suppress_sort:
# Sort fasta input file from largest to smallest sequence
sort_fasta = uclust_fasta_sort_from_filepath(fasta_filepath,
output_filepath=fasta_output_filepath)
# Get sorted fasta name from application wrapper
sorted_fasta_filepath = sort_fasta['Output'].name
files_to_remove.append(sorted_fasta_filepath)
else:
sort_fasta = None
sorted_fasta_filepath = fasta_filepath
# Generate uclust cluster file (.uc format)
uclust_cluster = uclust_cluster_from_sorted_fasta_filepath(
sorted_fasta_filepath,
uc_save_filepath,
percent_ID=percent_ID,
max_accepts=max_accepts,
max_rejects=max_rejects,
stepwords=stepwords,
word_length=word_length,
optimal=optimal,
exact=exact,
suppress_sort=suppress_sort,
enable_rev_strand_matching=enable_rev_strand_matching,
subject_fasta_filepath=subject_fasta_filepath,
suppress_new_clusters=suppress_new_clusters,
stable_sort=stable_sort,
tmp_dir=tmp_dir,
HALT_EXEC=HALT_EXEC)
# Get cluster file name from application wrapper
remove_files(files_to_remove)
except ApplicationError:
remove_files(files_to_remove)
raise ApplicationError('Error running uclust. Possible causes are '
'unsupported version (current supported version is v1.2.22) is installed or '
'improperly formatted input file was provided')
except ApplicationNotFoundError:
remove_files(files_to_remove)
raise ApplicationNotFoundError('uclust not found, is it properly ' +
'installed?')
# Get list of lists for each cluster
clusters, failures, seeds = \
clusters_from_uc_file(uclust_cluster['ClusterFile'])
# Remove temp files unless user specifies output filepath
if not save_uc_files:
uclust_cluster.cleanUp()
if return_cluster_maps:
return clusters, failures, seeds
else:
return clusters.values(), failures, seeds | python | {
"resource": ""
} |
q39139 | Generic._activity_import_doc | train | def _activity_import_doc(self, time_doc, activities):
'''
Import activities for a single document into timeline.
'''
batch_updates = [time_doc]
# We want to consider only activities that happend before time_doc
# do not move this, because time_doc._start changes
# time_doc['_start'] is a timestamp, whereas act[0] is a datetime
# we need to be sure to convert act[0] (when) to timestamp!
td_start = time_doc['_start']
activities = filter(lambda act: (act[0] < td_start and
act[1] in time_doc), activities)
creation_field = self.lconfig.get('cfield')
# make sure that activities are sorted by when descending
activities.sort(reverse=True, key=lambda o: o[0])
new_doc = {}
for when, field, removed, added in activities:
last_doc = batch_updates.pop()
# check if this activity happened at the same time as the last one,
# if it did then we need to group them together
if last_doc['_end'] == when:
new_doc = deepcopy(last_doc)
last_doc = batch_updates.pop()
else:
new_doc = deepcopy(last_doc)
new_doc['_start'] = when
new_doc['_end'] = when
last_doc['_start'] = when
last_val = last_doc[field]
new_val, inconsistent = self._activity_backwards(new_doc[field],
removed, added)
new_doc[field] = new_val
# Check if the object has the correct field value.
if inconsistent:
self._log_inconsistency(last_doc, last_val, field,
removed, added, when)
new_doc['_e'] = {} if not new_doc.get('_e') else new_doc['_e']
# set curreupted field value to the the value that was added
# and continue processing as if that issue didn't exist
new_doc['_e'][field] = added
# Add the objects to the batch
batch_updates.extend([last_doc, new_doc])
# try to set the _start of the first version to the creation time
try:
# set start to creation time if available
last_doc = batch_updates[-1]
if creation_field:
# again, we expect _start to be epoch float...
creation_ts = dt2ts(last_doc[creation_field])
if creation_ts < last_doc['_start']:
last_doc['_start'] = creation_ts
elif len(batch_updates) == 1:
# we have only one version, that we did not change
return []
else:
pass # leave as-is
except Exception as e:
logger.error('Error updating creation time; %s' % e)
return batch_updates | python | {
"resource": ""
} |
q39140 | Generic.get_changed_oids | train | def get_changed_oids(self, last_update=None):
'''
Returns a list of object ids of those objects that have changed since
`mtime`. This method expects that the changed objects can be
determined based on the `delta_mtime` property of the cube which
specifies the field name that carries the time of the last change.
This method is expected to be overriden in the cube if it is not
possible to use a single field to determine the time of the change and
if another approach of determining the oids is available. In such
cubes the `delta_mtime` property is expected to be set to `True`.
If `delta_mtime` evaluates to False then this method is not expected
to be used.
:param mtime: datetime string used as 'change since date'
'''
mtime_columns = self.lconfig.get('delta_mtime', [])
if not (mtime_columns and last_update):
return []
mtime_columns = str2list(mtime_columns)
where = []
for _column in mtime_columns:
_sql = "%s >= %s" % (_column, last_update)
where.append(_sql)
return self.sql_get_oids(where) | python | {
"resource": ""
} |
q39141 | Generic.get_objects | train | def get_objects(self, force=None, last_update=None, flush=False):
'''
Extract routine for SQL based cubes.
:param force:
for querying for all objects (True) or only those passed in as list
:param last_update: manual override for 'changed since date'
'''
return self._run_object_import(force=force, last_update=last_update,
flush=flush, full_history=False) | python | {
"resource": ""
} |
q39142 | Generic.get_new_oids | train | def get_new_oids(self):
'''
Returns a list of unique oids that have not been extracted yet.
Essentially, a diff of distinct oids in the source database
compared to cube.
'''
table = self.lconfig.get('table')
_oid = self.lconfig.get('_oid')
if is_array(_oid):
_oid = _oid[0] # get the db column, not the field alias
last_id = self.container.get_last_field(field='_oid')
ids = []
if last_id:
try: # try to convert to integer... if not, assume unicode value
last_id = float(last_id)
where = "%s.%s > %s" % (table, _oid, last_id)
except (TypeError, ValueError):
where = "%s.%s > '%s'" % (table, _oid, last_id)
ids = self.sql_get_oids(where)
return ids | python | {
"resource": ""
} |
q39143 | Generic.get_full_history | train | def get_full_history(self, force=None, last_update=None, flush=False):
'''
Fields change depending on when you run activity_import,
such as "last_updated" type fields which don't have activity
being tracked, which means we'll always end up with different
hash values, so we need to always remove all existing object
states and import fresh
'''
return self._run_object_import(force=force, last_update=last_update,
flush=flush, full_history=True) | python | {
"resource": ""
} |
q39144 | Generic.sql_get_oids | train | def sql_get_oids(self, where=None):
'''
Query source database for a distinct list of oids.
'''
table = self.lconfig.get('table')
db = self.lconfig.get('db_schema_name') or self.lconfig.get('db')
_oid = self.lconfig.get('_oid')
if is_array(_oid):
_oid = _oid[0] # get the db column, not the field alias
sql = 'SELECT DISTINCT %s.%s FROM %s.%s' % (table, _oid, db, table)
if where:
where = [where] if isinstance(where, basestring) else list(where)
sql += ' WHERE %s' % ' OR '.join(where)
result = sorted([r[_oid] for r in self._load_sql(sql)])
return result | python | {
"resource": ""
} |
q39145 | Pplacer.getTmpFilename | train | def getTmpFilename(self, tmp_dir="/tmp",prefix='tmp',suffix='.fasta',\
include_class_id=False,result_constructor=FilePath):
""" Define Tmp filename to contain .fasta suffix, since pplacer requires
the suffix to be .fasta """
return super(Pplacer,self).getTmpFilename(tmp_dir=tmp_dir,
prefix=prefix,
suffix=suffix,
include_class_id=include_class_id,
result_constructor=result_constructor) | python | {
"resource": ""
} |
q39146 | Pplacer._get_result_paths | train | def _get_result_paths(self,data):
""" Define the output filepaths """
output_dir = self.Parameters['--out-dir'].Value
result = {}
result['json'] = ResultPath(Path=join(output_dir,
splitext(split(self._input_filename)[-1])[0] + \
'.jplace'))
return result | python | {
"resource": ""
} |
q39147 | chi_squared | train | def chi_squared(*choices):
"""Calculates the chi squared"""
term = lambda expected, observed: float((expected - observed) ** 2) / max(expected, 1)
mean_success_rate = float(sum([c.rewards for c in choices])) / max(sum([c.plays for c in choices]), 1)
mean_failure_rate = 1 - mean_success_rate
return sum([
term(mean_success_rate * c.plays, c.rewards)
+ term(mean_failure_rate * c.plays, c.plays - c.rewards
) for c in choices]) | python | {
"resource": ""
} |
q39148 | get_experiments | train | def get_experiments(redis, active=True):
"""Gets the full list of experiments"""
key = ACTIVE_EXPERIMENTS_REDIS_KEY if active else ARCHIVED_EXPERIMENTS_REDIS_KEY
return [Experiment(redis, escape.to_unicode(name)) for name in redis.smembers(key)] | python | {
"resource": ""
} |
q39149 | Experiment.choices | train | def choices(self):
"""Gets the experiment choices"""
if self._choices == None:
self._choices = [ExperimentChoice(self, choice_name) for choice_name in self.choice_names]
return self._choices | python | {
"resource": ""
} |
q39150 | Experiment.add_play | train | def add_play(self, choice, count=1):
"""Increments the play count for a given experiment choice"""
self.redis.hincrby(EXPERIMENT_REDIS_KEY_TEMPLATE % self.name, "%s:plays" % choice, count)
self._choices = None | python | {
"resource": ""
} |
q39151 | Experiment.compute_default_choice | train | def compute_default_choice(self):
"""Computes and sets the default choice"""
choices = self.choices
if len(choices) == 0:
return None
high_choice = max(choices, key=lambda choice: choice.performance)
self.redis.hset(EXPERIMENT_REDIS_KEY_TEMPLATE % self.name, "default-choice", high_choice.name)
self.refresh()
return high_choice | python | {
"resource": ""
} |
q39152 | await_all | train | async def await_all():
'''
Simple utility function that drains all pending tasks
'''
tasks = asyncio.Task.all_tasks()
for task in tasks:
try:
await task
except RuntimeError as e:
# Python 3.5.x: Error if attempting to await parent task
if 'Task cannot await on itself' not in str(e):
raise e
except AssertionError as e:
# Python 3.6.x: Error if attempting to await parent task
if 'yield from wasn\'t used with future' not in str(e):
raise e | python | {
"resource": ""
} |
q39153 | coerce_to_synchronous | train | def coerce_to_synchronous(func):
'''
Given a function that might be async, wrap it in an explicit loop so it can
be run in a synchronous context.
'''
if inspect.iscoroutinefunction(func):
@functools.wraps(func)
def sync_wrapper(*args, **kwargs):
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(func(*args, **kwargs))
finally:
loop.close()
return sync_wrapper
return func | python | {
"resource": ""
} |
q39154 | _get_matchable_segments | train | def _get_matchable_segments(segments):
"""
Performs a depth-first search of the segment tree to get all matchable
segments.
"""
for subsegment in segments:
if isinstance(subsegment, Token):
break # No tokens allowed next to segments
if isinstance(subsegment, Segment):
if isinstance(subsegment, MatchableSegment):
yield subsegment
for matchable_subsegment in _get_matchable_segments(subsegment):
yield matchable_subsegment | python | {
"resource": ""
} |
q39155 | VW._get_response | train | def _get_response(self, parse_result=True):
"""If 'parse_result' is False, ignore the received output and return None."""
# expect_exact is faster than just exact, and fine for our purpose
# (http://pexpect.readthedocs.org/en/latest/api/pexpect.html#pexpect.spawn.expect_exact)
# searchwindowsize and other attributes may also affect efficiency
self.vw_process.expect_exact('\r\n', searchwindowsize=-1) # Wait until process outputs a complete line
if parse_result:
output = self.vw_process.before
result_struct = VWResult(output, active_mode=self.active_mode)
else:
result_struct = None
return result_struct | python | {
"resource": ""
} |
q39156 | VW.send_example | train | def send_example(self,
*args,
**kwargs
):
"""Send a labeled or unlabeled example to the VW instance.
If 'parse_result' kwarg is False, ignore the result and return None.
All other parameters are passed to self.send_line().
Returns a VWResult object.
"""
# Pop out the keyword argument 'parse_result' if given
parse_result = kwargs.pop('parse_result', True)
line = self.make_line(*args, **kwargs)
result = self.send_line(line, parse_result=parse_result)
return result | python | {
"resource": ""
} |
q39157 | VW.save_model | train | def save_model(self, model_filename):
"""Pass a "command example" to the VW subprocess requesting
that the current model be serialized to model_filename immediately.
"""
line = "save_{}|".format(model_filename)
self.vw_process.sendline(line) | python | {
"resource": ""
} |
q39158 | WebServer.route_path | train | def route_path(self, path):
'''
Hacky function that's presently only useful for testing, gets the view
that handles the given path.
Later may be incorporated into the URL routing
'''
path = path.strip('/')
name, _, subpath = path.partition('/')
for service in singletons.settings.load_all('SERVICES'):
if service.SERVICE_NAME == name: # Found service!
break
else:
return [], None # found no service
for partial_url, view in service.urls.items():
partial_url = partial_url.strip('/')
if isinstance(view, str):
view = getattr(service, view)
regexp = re.sub(r'<[^>]+>', r'([^/]+)', partial_url)
matches = re.findall('^%s$' % regexp, subpath)
if matches:
if '(' not in regexp:
matches = []
return matches, view
return [], None | python | {
"resource": ""
} |
q39159 | pair_looper | train | def pair_looper(iterator):
'''
Loop through iterator yielding items in adjacent pairs
'''
left = START
for item in iterator:
if left is not START:
yield (left, item)
left = item | python | {
"resource": ""
} |
q39160 | clear_stale_pids | train | def clear_stale_pids(pids, pid_dir='/tmp', prefix='', multi=False):
'check for and remove any pids which have no corresponding process'
if isinstance(pids, (int, float, long)):
pids = [pids]
pids = str2list(pids, map_=unicode)
procs = map(unicode, os.listdir('/proc'))
running = [pid for pid in pids if pid in procs]
logger.warn(
"Found %s pids running: %s" % (len(running),
running))
prefix = prefix.rstrip('.') if prefix else None
for pid in pids:
if prefix:
_prefix = prefix
else:
_prefix = unicode(pid)
# remove non-running procs
if pid in running:
continue
if multi:
pid_file = '%s%s.pid' % (_prefix, pid)
else:
pid_file = '%s.pid' % (_prefix)
path = os.path.join(pid_dir, pid_file)
if os.path.exists(path):
logger.debug("Removing pidfile: %s" % path)
try:
remove_file(path)
except OSError as e:
logger.warn(e)
return running | python | {
"resource": ""
} |
q39161 | cube_pkg_mod_cls | train | def cube_pkg_mod_cls(cube):
'''
Used to dynamically importing cube classes
based on string slug name.
Converts 'pkg_mod' -> pkg, mod, Cls
eg: tw_tweet -> tw, tweet, Tweet
Assumes `Metrique Cube Naming Convention` is used
:param cube: cube name to use when searching for cube pkg.mod.class to load
'''
_cube = cube.split('_')
pkg = _cube[0]
mod = '_'.join(_cube[1:])
_cls = ''.join([s[0].upper() + s[1:] for s in _cube[1:]])
return pkg, mod, _cls | python | {
"resource": ""
} |
q39162 | debug_setup | train | def debug_setup(logger=None, level=None, log2file=None,
log_file=None, log_format=None, log_dir=None,
log2stdout=None, truncate=False):
'''
Local object instance logger setup.
Verbosity levels are determined as such::
if level in [-1, False]:
logger.setLevel(logging.WARN)
elif level in [0, None]:
logger.setLevel(logging.INFO)
elif level in [True, 1, 2]:
logger.setLevel(logging.DEBUG)
If (level == 2) `logging.DEBUG` will be set even for
the "root logger".
Configuration options available for customized logger behaivor:
* debug (bool)
* log2stdout (bool)
* log2file (bool)
* log_file (path)
'''
log2stdout = False if log2stdout is None else log2stdout
_log_format = "%(levelname)s.%(name)s.%(process)s:%(asctime)s:%(message)s"
log_format = log_format or _log_format
if isinstance(log_format, basestring):
log_format = logging.Formatter(log_format, "%Y%m%dT%H%M%S")
log2file = True if log2file is None else log2file
logger = logger or 'metrique'
if isinstance(logger, basestring):
logger = logging.getLogger(logger)
else:
logger = logger or logging.getLogger(logger)
logger.propagate = 0
logger.handlers = []
if log2file:
log_dir = log_dir or LOGS_DIR
log_file = log_file or 'metrique'
log_file = os.path.join(log_dir, log_file)
if truncate:
# clear the existing data before writing (truncate)
open(log_file, 'w+').close()
hdlr = logging.FileHandler(log_file)
hdlr.setFormatter(log_format)
logger.addHandler(hdlr)
else:
log2stdout = True
if log2stdout:
hdlr = logging.StreamHandler()
hdlr.setFormatter(log_format)
logger.addHandler(hdlr)
logger = _debug_set_level(logger, level)
return logger | python | {
"resource": ""
} |
q39163 | get_cube | train | def get_cube(cube, init=False, pkgs=None, cube_paths=None, config=None,
backends=None, **kwargs):
'''
Dynamically locate and load a metrique cube
:param cube: name of the cube class to import from given module
:param init: flag to request initialized instance or uninitialized class
:param config: config dict to pass on initialization (implies init=True)
:param pkgs: list of package names to search for the cubes in
:param cube_path: additional paths to search for modules in (sys.path)
:param kwargs: additional kwargs to pass to cube during initialization
'''
pkgs = pkgs or ['cubes']
pkgs = [pkgs] if isinstance(pkgs, basestring) else pkgs
# search in the given path too, if provided
cube_paths = cube_paths or []
cube_paths_is_basestring = isinstance(cube_paths, basestring)
cube_paths = [cube_paths] if cube_paths_is_basestring else cube_paths
cube_paths = [os.path.expanduser(path) for path in cube_paths]
# append paths which don't already exist in sys.path to sys.path
[sys.path.append(path) for path in cube_paths if path not in sys.path]
pkgs = pkgs + DEFAULT_PKGS
err = False
for pkg in pkgs:
try:
_cube = _load_cube_pkg(pkg, cube)
except ImportError as err:
_cube = None
if _cube:
break
else:
logger.error(err)
raise RuntimeError('"%s" not found! %s; %s \n%s)' % (
cube, pkgs, cube_paths, sys.path))
if init:
_cube = _cube(config=config, **kwargs)
return _cube | python | {
"resource": ""
} |
q39164 | get_timezone_converter | train | def get_timezone_converter(from_timezone, to_tz=None, tz_aware=False):
'''
return a function that converts a given
datetime object from a timezone to utc
:param from_timezone: timezone name as string
'''
if not from_timezone:
return None
is_true(HAS_DATEUTIL, "`pip install python_dateutil` required")
is_true(HAS_PYTZ, "`pip install pytz` required")
from_tz = pytz.timezone(from_timezone)
return partial(_get_timezone_converter, from_tz=from_tz, to_tz=to_tz,
tz_aware=tz_aware) | python | {
"resource": ""
} |
q39165 | is_empty | train | def is_empty(value, msg=None, except_=None, inc_zeros=True):
'''
is defined, but null or empty like value
'''
if hasattr(value, 'empty'):
# dataframes must check for .empty
# since they don't define truth value attr
# take the negative, since below we're
# checking for cases where value 'is_null'
value = not bool(value.empty)
elif inc_zeros and value in ZEROS:
# also consider 0, 0.0, 0L as 'empty'
# will check for the negative below
value = True
else:
pass
_is_null = is_null(value, except_=False)
result = bool(_is_null or not value)
if except_:
return is_true(result, msg=msg, except_=except_)
else:
return bool(result) | python | {
"resource": ""
} |
q39166 | is_null | train | def is_null(value, msg=None, except_=None):
'''
ie, "is not defined"
'''
# dataframes, even if empty, are not considered null
value = False if hasattr(value, 'empty') else value
result = bool(
value is None or
value != value or
repr(value) == 'NaT')
if except_:
return is_true(result, msg=msg, except_=except_)
else:
return bool(result) | python | {
"resource": ""
} |
q39167 | json_encode_default | train | def json_encode_default(obj):
'''
Convert datetime.datetime to timestamp
:param obj: value to (possibly) convert
'''
if isinstance(obj, (datetime, date)):
result = dt2ts(obj)
else:
result = json_encoder.default(obj)
return to_encoding(result) | python | {
"resource": ""
} |
q39168 | jsonhash | train | def jsonhash(obj, root=True, exclude=None, hash_func=_jsonhash_sha1):
'''
calculate the objects hash based on all field values
'''
if isinstance(obj, Mapping):
# assumption: using in against set() is faster than in against list()
if root and exclude:
obj = {k: v for k, v in obj.iteritems() if k not in exclude}
# frozenset's don't guarantee order; use sorted tuples
# which means different python interpreters can return
# back frozensets with different hash values even when
# the content of the object is exactly the same
result = sorted(
(k, jsonhash(v, False)) for k, v in obj.iteritems())
elif isinstance(obj, list):
# FIXME: should lists be sorted for consistent hashes?
# when the object is the same, just different list order?
result = tuple(jsonhash(e, False) for e in obj)
else:
result = obj
if root:
result = unicode(hash_func(result))
return result | python | {
"resource": ""
} |
q39169 | load | train | def load(path, filetype=None, as_df=False, retries=None,
_oid=None, quiet=False, **kwargs):
'''Load multiple files from various file types automatically.
Supports glob paths, eg::
path = 'data/*.csv'
Filetypes are autodetected by common extension strings.
Currently supports loadings from:
* csv (pd.read_csv)
* json (pd.read_json)
:param path: path to config json file
:param filetype: override filetype autodetection
:param kwargs: additional filetype loader method kwargs
'''
is_true(HAS_PANDAS, "`pip install pandas` required")
set_oid = set_oid_func(_oid)
# kwargs are for passing ftype load options (csv.delimiter, etc)
# expect the use of globs; eg, file* might result in fileN (file1,
# file2, file3), etc
if not isinstance(path, basestring):
# assume we're getting a raw dataframe
objects = path
if not isinstance(objects, pd.DataFrame):
raise ValueError("loading raw values must be DataFrames")
elif re.match('https?://', path):
logger.debug('Saving %s to tmp file' % path)
_path = urlretrieve(path, retries)
logger.debug('%s saved to tmp file: %s' % (path, _path))
try:
objects = load_file(_path, filetype, **kwargs)
finally:
remove_file(_path)
else:
path = re.sub('^file://', '', path)
path = os.path.expanduser(path)
# assume relative to cwd if not already absolute path
path = path if os.path.isabs(path) else pjoin(os.getcwd(), path)
files = sorted(glob.glob(os.path.expanduser(path)))
if not files:
raise IOError("failed to load: %s" % path)
# buid up a single dataframe by concatting
# all globbed files together
objects = []
[objects.extend(load_file(ds, filetype, **kwargs))
for ds in files]
if is_empty(objects, except_=False) and not quiet:
raise RuntimeError("no objects extracted!")
else:
logger.debug("Data loaded successfully from %s" % path)
if set_oid:
# set _oids, if we have a _oid generator func defined
objects = [set_oid(o) for o in objects]
if as_df:
return pd.DataFrame(objects)
else:
return objects | python | {
"resource": ""
} |
q39170 | read_file | train | def read_file(rel_path, paths=None, raw=False, as_list=False, as_iter=False,
*args, **kwargs):
'''
find a file that lives somewhere within a set of paths and
return its contents. Default paths include 'static_dir'
'''
if not rel_path:
raise ValueError("rel_path can not be null!")
paths = str2list(paths)
# try looking the file up in a directory called static relative
# to SRC_DIR, eg assuming metrique git repo is in ~/metrique
# we'd look in ~/metrique/static
paths.extend([STATIC_DIR, os.path.join(SRC_DIR, 'static')])
paths = [os.path.expanduser(p) for p in set(paths)]
for path in paths:
path = os.path.join(path, rel_path)
logger.debug("trying to read: %s " % path)
if os.path.exists(path):
break
else:
raise IOError("path %s does not exist!" % rel_path)
args = args if args else ['rU']
fd = open(path, *args, **kwargs)
if raw:
return fd
if as_iter:
return read_in_chunks(fd)
else:
fd_lines = fd.readlines()
if as_list:
return fd_lines
else:
return ''.join(fd_lines) | python | {
"resource": ""
} |
q39171 | safestr | train | def safestr(str_):
''' get back an alphanumeric only version of source '''
str_ = str_ or ""
return "".join(x for x in str_ if x.isalnum()) | python | {
"resource": ""
} |
q39172 | urlretrieve | train | def urlretrieve(uri, saveas=None, retries=3, cache_dir=None):
'''urllib.urlretrieve wrapper'''
retries = int(retries) if retries else 3
# FIXME: make random filename (saveas) in cache_dir...
# cache_dir = cache_dir or CACHE_DIR
while retries:
try:
_path, headers = urllib.urlretrieve(uri, saveas)
except Exception as e:
retries -= 1
logger.warn(
'Failed getting uri "%s": %s (retry:%s in 1s)' % (
uri, e, retries))
time.sleep(.2)
continue
else:
break
else:
raise RuntimeError("Failed to retrieve uri: %s" % uri)
return _path | python | {
"resource": ""
} |
q39173 | reverse_media_url | train | def reverse_media_url(target_type, url_string, *args, **kwargs):
'''
Given a target type and an resource URL, generates a valid URL to this via
'''
args_str = '<%s>' % '><'.join(args)
kwargs_str = '<%s>' % '><'.join('%s:%s' % pair for pair in kwargs.items())
url_str = ''.join([url_string, args_str, kwargs_str])
normalized_url = str(ResourceURL(url_str))
query_tuples = []
if singletons.settings.SECURITY and 'Sha1' in singletons.settings.SECURITY:
secret = singletons.settings.HMAC_SECRET
digest = get_hmac_sha1_digest(secret, normalized_url, target_type)
query_tuples.append(('digest', digest))
# Add in URL as last querystring argument
query_tuples.append(('url', normalized_url))
querystring = urlencode(query_tuples)
scheme = singletons.settings.EXTERNAL_SCHEME
host = singletons.settings.EXTERNAL_HOST
port = singletons.settings.EXTERNAL_PORT
if not host:
host = singletons.settings.HOST
if not port:
port = singletons.settings.PORT
port_suffix = ':%s' % port if port != 80 else ''
typestring_normalized = str(TypeString(target_type))
return '%s://%s%s/media/%s/?%s' % (
scheme,
host,
port_suffix,
typestring_normalized,
querystring,
) | python | {
"resource": ""
} |
q39174 | _isbn_cleanse | train | def _isbn_cleanse(isbn, checksum=True):
"""Check ISBN is a string, and passes basic sanity checks.
Args:
isbn (str): SBN, ISBN-10 or ISBN-13
checksum (bool): ``True`` if ``isbn`` includes checksum character
Returns:
``str``: ISBN with hyphenation removed, including when called with a
SBN
Raises:
TypeError: ``isbn`` is not a ``str`` type
IsbnError: Incorrect length for ``isbn``
IsbnError: Incorrect SBN or ISBN formatting
"""
if not isinstance(isbn, string_types):
raise TypeError('ISBN must be a string, received %r' % isbn)
if PY2 and isinstance(isbn, str): # pragma: Python 2
isbn = unicode(isbn)
uni_input = False
else: # pragma: Python 3
uni_input = True
for dash in DASHES:
isbn = isbn.replace(dash, unicode())
if checksum:
if not isbn[:-1].isdigit():
raise IsbnError('non-digit parts')
if len(isbn) == 9:
isbn = '0' + isbn
if len(isbn) == 10:
if not (isbn[-1].isdigit() or isbn[-1] in 'Xx'):
raise IsbnError('non-digit or X checksum')
elif len(isbn) == 13:
if not isbn[-1].isdigit():
raise IsbnError('non-digit checksum')
if not isbn.startswith(('978', '979')):
raise IsbnError('invalid Bookland region')
else:
raise IsbnError('ISBN must be either 10 or 13 characters long')
else:
if len(isbn) == 8:
isbn = '0' + isbn
elif len(isbn) == 12 and not isbn[:3].startswith(('978', '979')):
raise IsbnError('invalid Bookland region')
if not isbn.isdigit():
raise IsbnError('non-digit parts')
if not len(isbn) in (9, 12):
raise IsbnError('ISBN must be either 9 or 12 characters long '
'without checksum')
if PY2 and not uni_input: # pragma: Python 2
# Sadly, type ping-pong is required to maintain backwards compatibility
# with previous pyisbn releases for Python 2 users.
return str(isbn)
else: # pragma: Python 3
return isbn | python | {
"resource": ""
} |
q39175 | convert | train | def convert(isbn, code='978'):
"""Convert ISBNs between ISBN-10 and ISBN-13.
Note:
No attempt to hyphenate converted ISBNs is made, because the
specification requires that *any* hyphenation must be correct but
allows ISBNs without hyphenation.
Args:
isbn (str): SBN, ISBN-10 or ISBN-13
code (str): EAN Bookland code
Returns:
``str``: Converted ISBN-10 or ISBN-13
Raise:
IsbnError: When ISBN-13 isn't convertible to an ISBN-10
"""
isbn = _isbn_cleanse(isbn)
if len(isbn) == 10:
isbn = code + isbn[:-1]
return isbn + calculate_checksum(isbn)
else:
if isbn.startswith('978'):
return isbn[3:-1] + calculate_checksum(isbn[3:-1])
else:
raise IsbnError('Only ISBN-13s with 978 Bookland code can be '
'converted to ISBN-10.') | python | {
"resource": ""
} |
q39176 | Isbn.to_url | train | def to_url(self, site='amazon', country='us'):
"""Generate a link to an online book site.
Args:
site (str): Site to create link to
country (str): Country specific version of ``site``
Returns:
``str``: URL on ``site`` for book
Raises:
SiteError: Unknown site value
CountryError: Unknown country value
"""
try:
try:
url, tlds = URL_MAP[site]
except ValueError:
tlds = None
url = URL_MAP[site]
except KeyError:
raise SiteError(site)
inject = {'isbn': self._isbn}
if tlds:
if country not in tlds:
raise CountryError(country)
tld = tlds[country]
if not tld:
tld = country
inject['tld'] = tld
return url % inject | python | {
"resource": ""
} |
q39177 | RegexTokenizer._tokenize | train | def _tokenize(self, text, token_class=None):
"""
Tokenizes a text
:Returns:
A `list` of tokens
"""
token_class = token_class or Token
tokens = {}
for i, match in enumerate(self.regex.finditer(text)):
value = match.group(0)
try:
token = tokens[value]
except KeyError:
type = match.lastgroup
token = token_class(value, type=type)
tokens[value] = token
yield token | python | {
"resource": ""
} |
q39178 | _parse_cli_facter_results | train | def _parse_cli_facter_results(facter_results):
'''Parse key value pairs printed with "=>" separators.
YAML is preferred output scheme for facter.
>>> list(_parse_cli_facter_results("""foo => bar
... baz => 1
... foo_bar => True"""))
[('foo', 'bar'), ('baz', '1'), ('foo_bar', 'True')]
>>> list(_parse_cli_facter_results("""foo => bar
... babababababababab
... baz => 2"""))
[('foo', 'bar\nbabababababababab'), ('baz', '2')]
>>> list(_parse_cli_facter_results("""3434"""))
Traceback (most recent call last):
...
ValueError: parse error
Uses a generator interface:
>>> _parse_cli_facter_results("foo => bar").next()
('foo', 'bar')
'''
last_key, last_value = None, []
for line in filter(None, facter_results.splitlines()):
res = line.split(six.u(" => "), 1)
if len(res)==1:
if not last_key:
raise ValueError("parse error")
else:
last_value.append(res[0])
else:
if last_key:
yield last_key, os.linesep.join(last_value)
last_key, last_value = res[0], [res[1]]
else:
if last_key:
yield last_key, os.linesep.join(last_value) | python | {
"resource": ""
} |
q39179 | Facter.run_facter | train | def run_facter(self, key=None):
"""Run the facter executable with an optional specfic
fact. Output is parsed to yaml if available and
selected. Puppet facts are always selected. Returns a
dictionary if no key is given, and the value if a key is
passed."""
args = [self.facter_path]
#this seems to not cause problems, but leaving it separate
args.append("--puppet")
if self.external_dir is not None:
args.append('--external-dir')
args.append(self.external_dir)
if self.uses_yaml:
args.append("--yaml")
if key is not None:
args.append(key)
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
results = proc.stdout.read()
if self.uses_yaml:
parsed_results = yaml.load(results)
if key is not None:
return parsed_results[key]
else:
return parsed_results
results = results.decode()
if key is not None:
return results.strip()
else:
return dict(_parse_cli_facter_results(results)) | python | {
"resource": ""
} |
q39180 | Facter.has_cache | train | def has_cache(self):
"""Intended to be called before any call that might access the
cache. If the cache is not selected, then returns False,
otherwise the cache is build if needed and returns True."""
if not self.cache_enabled:
return False
if self._cache is None:
self.build_cache()
return True | python | {
"resource": ""
} |
q39181 | Facter.lookup | train | def lookup(self, fact, cache=True):
"""Return the value of a given fact and raise a KeyError if
it is not available. If `cache` is False, force the lookup of
the fact."""
if (not cache) or (not self.has_cache()):
val = self.run_facter(fact)
if val is None or val == '':
raise KeyError(fact)
return val
return self._cache[fact] | python | {
"resource": ""
} |
q39182 | Frisbee._reset | train | def _reset(self) -> None:
"""Reset some of the state in the class for multi-searches."""
self.project: str = namesgenerator.get_random_name()
self._processed: List = list()
self.results: List = list() | python | {
"resource": ""
} |
q39183 | Frisbee._config_bootstrap | train | def _config_bootstrap(self) -> None:
"""Handle the basic setup of the tool prior to user control.
Bootstrap will load all the available modules for searching and set
them up for use by this main class.
"""
if self.output:
self.folder: str = os.getcwd() + "/" + self.project
os.mkdir(self.folder) | python | {
"resource": ""
} |
q39184 | Frisbee._dyn_loader | train | def _dyn_loader(self, module: str, kwargs: str):
"""Dynamically load a specific module instance."""
package_directory: str = os.path.dirname(os.path.abspath(__file__))
modules: str = package_directory + "/modules"
module = module + ".py"
if module not in os.listdir(modules):
raise Exception("Module %s is not valid" % module)
module_name: str = module[:-3]
import_path: str = "%s.%s" % (self.MODULE_PATH, module_name)
imported = import_module(import_path)
obj = getattr(imported, 'Module')
return obj(**kwargs) | python | {
"resource": ""
} |
q39185 | Frisbee._job_handler | train | def _job_handler(self) -> bool:
"""Process the work items."""
while True:
try:
task = self._unfullfilled.get_nowait()
except queue.Empty:
break
else:
self._log.debug("Job: %s" % str(task))
engine = self._dyn_loader(task['engine'], task)
task['start_time'] = now_time()
results = engine.search()
task['end_time'] = now_time()
duration: str = str((task['end_time'] - task['start_time']).seconds)
task['duration'] = duration
task.update({'results': results})
self._fulfilled.put(task)
return True | python | {
"resource": ""
} |
q39186 | Frisbee._save | train | def _save(self) -> None:
"""Save output to a directory."""
self._log.info("Saving results to '%s'" % self.folder)
path: str = self.folder + "/"
for job in self.results:
if job['domain'] in self.saved:
continue
job['start_time'] = str_datetime(job['start_time'])
job['end_time'] = str_datetime(job['end_time'])
jid: int = random.randint(100000, 999999)
filename: str = "%s_%s_%d_job.json" % (self.project, job['domain'], jid)
handle = open(path + filename, 'w')
handle.write(json.dumps(job, indent=4))
handle.close()
filename = "%s_%s_%d_emails.txt" % (self.project, job['domain'], jid)
handle = open(path + filename, 'w')
for email in job['results']['emails']:
handle.write(email + "\n")
handle.close()
self.saved.append(job['domain']) | python | {
"resource": ""
} |
q39187 | Frisbee.search | train | def search(self, jobs: List[Dict[str, str]]) -> None:
"""Perform searches based on job orders."""
if not isinstance(jobs, list):
raise Exception("Jobs must be of type list.")
self._log.info("Project: %s" % self.project)
self._log.info("Processing jobs: %d", len(jobs))
for _, job in enumerate(jobs):
self._unfullfilled.put(job)
for _ in range(self.PROCESSES):
proc: Process = Process(target=self._job_handler)
self._processes.append(proc)
proc.start()
for proc in self._processes:
proc.join()
while not self._fulfilled.empty():
output: Dict = self._fulfilled.get()
output.update({'project': self.project})
self._processed.append(output['domain'])
self.results.append(output)
if output['greedy']:
bonus_jobs: List = list()
observed: List = list()
for item in output['results']['emails']:
found: str = item.split('@')[1]
if found in self._processed or found in observed:
continue
observed.append(found)
base: Dict = dict()
base['limit'] = output['limit']
base['modifier'] = output['modifier']
base['engine'] = output['engine']
base['greedy'] = False
base['domain'] = found
bonus_jobs.append(base)
if len(bonus_jobs) > 0:
self.search(bonus_jobs)
self._log.info("All jobs processed")
if self.output:
self._save() | python | {
"resource": ""
} |
q39188 | step_along_mag_unit_vector | train | def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5.,
step_size=5., scalar=1):
"""
Move along 'lines' formed by following the magnetic unit vector directions.
Moving along the field is effectively the same as a field line trace though
extended movement along a field should use the specific field_line_trace
method.
Parameters
----------
x : ECEF-x (km)
Location to step from in ECEF (km). Scalar input.
y : ECEF-y (km)
Location to step from in ECEF (km). Scalar input.
z : ECEF-z (km)
Location to step from in ECEF (km). Scalar input.
date : list-like of datetimes
Date and time for magnetic field
direction : string
String identifier for which unit vector directino to move along.
Supported inputs, 'meridional', 'zonal', 'aligned'
num_steps : int
Number of steps to take along unit vector direction
step_size = float
Distance taken for each step (km)
scalar : int
Scalar modifier for step size distance. Input a -1 to move along
negative unit vector direction.
Returns
-------
np.array
[x, y, z] of ECEF location after taking num_steps along direction,
each step_size long.
"""
# set parameters for the field line tracing routines
field_step_size = 100.
field_max_steps = 1000
field_steps = np.arange(field_max_steps)
for i in np.arange(num_steps):
# x, y, z in ECEF
# convert to geodetic
lat, lon, alt = ecef_to_geodetic(x, y, z)
# get unit vector directions
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(
[lat], [lon], [alt], [date],
steps=field_steps,
max_steps=field_max_steps,
step_size=field_step_size,
ref_height=0.)
# pull out the direction we need
if direction == 'meridional':
ux, uy, uz = mx, my, mz
elif direction == 'zonal':
ux, uy, uz = zvx, zvy, zvz
elif direction == 'aligned':
ux, uy, uz = bx, by, bz
# take steps along direction
x = x + step_size*ux[0]*scalar
y = y + step_size*uy[0]*scalar
z = z + step_size*uz[0]*scalar
return np.array([x, y, z]) | python | {
"resource": ""
} |
q39189 | add_fabfile | train | def add_fabfile():
"""
Copy the base fabfile.py to the current working directory.
"""
fabfile_src = os.path.join(PACKAGE_ROOT, 'fabfile.py')
fabfile_dest = os.path.join(os.getcwd(), 'fabfile_deployer.py')
if os.path.exists(fabfile_dest):
print "`fabfile.py` exists in the current directory. " \
"Please remove or rename it and try again."
return
shutil.copyfile(fabfile_src, fabfile_dest) | python | {
"resource": ""
} |
q39190 | Record.delete | train | def delete(self):
"""Remove the item from the infoblox server.
:rtype: bool
:raises: AssertionError
:raises: ValueError
:raises: infoblox.exceptions.ProtocolError
"""
if not self._ref:
raise ValueError('Object has no reference id for deletion')
if 'save' not in self._supports:
raise AssertionError('Can not save this object type')
response = self._session.delete(self._path)
if response.status_code == 200:
self._ref = None
self.clear()
return True
try:
error = response.json()
raise exceptions.ProtocolError(error['text'])
except ValueError:
raise exceptions.ProtocolError(response.content) | python | {
"resource": ""
} |
q39191 | Record.fetch | train | def fetch(self):
"""Attempt to fetch the object from the Infoblox device. If successful
the object will be updated and the method will return True.
:rtype: bool
:raises: infoblox.exceptions.ProtocolError
"""
LOGGER.debug('Fetching %s, %s', self._path, self._search_values)
response = self._session.get(self._path, self._search_values,
{'_return_fields': self._return_fields})
if response.status_code == 200:
values = response.json()
self._assign(values)
return bool(values)
elif response.status_code >= 400:
try:
error = response.json()
raise exceptions.ProtocolError(error['text'])
except ValueError:
raise exceptions.ProtocolError(response.content)
return False | python | {
"resource": ""
} |
q39192 | Record.save | train | def save(self):
"""Update the infoblox with new values for the specified object, or add
the values if it's a new object all together.
:raises: AssertionError
:raises: infoblox.exceptions.ProtocolError
"""
if 'save' not in self._supports:
raise AssertionError('Can not save this object type')
values = {}
for key in [key for key in self.keys() if key not in self._save_ignore]:
if not getattr(self, key) and getattr(self, key) != False:
continue
if isinstance(getattr(self, key, None), list):
value = list()
for item in getattr(self, key):
if isinstance(item, dict):
value.append(item)
elif hasattr(item, '_save_as'):
value.append(item._save_as())
elif hasattr(item, '_ref') and getattr(item, '_ref'):
value.append(getattr(item, '_ref'))
else:
LOGGER.warning('Cant assign %r', item)
values[key] = value
elif getattr(self, key, None):
values[key] = getattr(self, key)
if not self._ref:
response = self._session.post(self._path, values)
else:
values['_ref'] = self._ref
response = self._session.put(self._path, values)
LOGGER.debug('Response: %r, %r', response.status_code, response.content)
if 200 <= response.status_code <= 201:
self.fetch()
return True
else:
try:
error = response.json()
raise exceptions.ProtocolError(error['text'])
except ValueError:
raise exceptions.ProtocolError(response.content) | python | {
"resource": ""
} |
q39193 | Record._assign | train | def _assign(self, values):
"""Assign the values passed as either a dict or list to the object if
the key for each value matches an available attribute on the object.
:param dict values: The values to assign
"""
LOGGER.debug('Assigning values: %r', values)
if not values:
return
keys = self.keys()
if not self._ref:
keys.append('_ref')
if isinstance(values, dict):
for key in keys:
if values.get(key):
if isinstance(values.get(key), list):
items = list()
for item in values[key]:
if isinstance(item, dict):
if '_ref' in item:
obj_class = get_class(item['_ref'])
if obj_class:
items.append(obj_class(self._session,
**item))
else:
items.append(item)
setattr(self, key, items)
else:
setattr(self, key, values[key])
elif isinstance(values, list):
self._assign(values[0])
else:
LOGGER.critical('Unhandled return type: %r', values) | python | {
"resource": ""
} |
q39194 | Record._build_search_values | train | def _build_search_values(self, kwargs):
"""Build the search criteria dictionary. It will first try and build
the values from already set attributes on the object, falling back
to the passed in kwargs.
:param dict kwargs: Values to build the dict from
:rtype: dict
"""
criteria = {}
for key in self._search_by:
if getattr(self, key, None):
criteria[key] = getattr(self, key)
elif key in kwargs and kwargs.get(key):
criteria[key] = kwargs.get(key)
return criteria | python | {
"resource": ""
} |
q39195 | Host.add_ipv4addr | train | def add_ipv4addr(self, ipv4addr):
"""Add an IPv4 address to the host.
:param str ipv4addr: The IP address to add.
:raises: ValueError
"""
for addr in self.ipv4addrs:
if ((isinstance(addr, dict) and addr['ipv4addr'] == ipv4addr) or
(isinstance(addr, HostIPv4) and addr.ipv4addr == ipv4addr)):
raise ValueError('Already exists')
self.ipv4addrs.append({'ipv4addr': ipv4addr}) | python | {
"resource": ""
} |
q39196 | Host.remove_ipv4addr | train | def remove_ipv4addr(self, ipv4addr):
"""Remove an IPv4 address from the host.
:param str ipv4addr: The IP address to remove
"""
for addr in self.ipv4addrs:
if ((isinstance(addr, dict) and addr['ipv4addr'] == ipv4addr) or
(isinstance(addr, HostIPv4) and addr.ipv4addr == ipv4addr)):
self.ipv4addrs.remove(addr)
break | python | {
"resource": ""
} |
q39197 | Host.add_ipv6addr | train | def add_ipv6addr(self, ipv6addr):
"""Add an IPv6 address to the host.
:param str ipv6addr: The IP address to add.
:raises: ValueError
"""
for addr in self.ipv6addrs:
if ((isinstance(addr, dict) and addr['ipv6addr'] == ipv6addr) or
(isinstance(addr, HostIPv4) and addr.ipv6addr == ipv6addr)):
raise ValueError('Already exists')
self.ipv6addrs.append({'ipv6addr': ipv6addr}) | python | {
"resource": ""
} |
q39198 | Host.remove_ipv6addr | train | def remove_ipv6addr(self, ipv6addr):
"""Remove an IPv6 address from the host.
:param str ipv6addr: The IP address to remove
"""
for addr in self.ipv6addrs:
if ((isinstance(addr, dict) and addr['ipv6addr'] == ipv6addr) or
(isinstance(addr, HostIPv4) and addr.ipv6addr == ipv6addr)):
self.ipv6addrs.remove(addr)
break | python | {
"resource": ""
} |
q39199 | SQLAlchemyProxy.autoschema | train | def autoschema(self, objects, **kwargs):
''' wrapper around utils.autoschema function '''
return autoschema(objects=objects, exclude_keys=self.RESTRICTED_KEYS,
**kwargs) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.