content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def build_parser(args):
""" A method to handle argparse.
"""
parser = argparse.ArgumentParser(usage='$ python verdict.py',
description='''Downloads, filters and
re-publishes the Google
sheet.''',
epilog='')
parser.add_argument("-v", "--verbose", dest="verbose", default=False, action="store_true")
return parser.parse_args()
| 12,800
|
def parse_query(qd, session, config):
"""Parses the given query dictionary to produce a BaseQuery object."""
from mlalchemy.parser import parse_query as mlalchemy_parse_query
defaults = {
"limit": config["default_limit"],
"backref_limit": config["default_backref_limit"],
"backref_depth": config["default_backref_depth"],
"join_depth": config["default_join_depth"],
"exclude": [],
"include": [],
}
qd.setdefault("limit", defaults["limit"])
full_qd = merge_dicts(defaults, qd)
if qd["limit"] in (None, False):
qd.pop("limit")
if isinstance(full_qd["exclude"], str):
full_qd["exclude"] = [full_qd["exclude"]]
full_qd["exclude"] = list(set(full_qd["exclude"] + config["global_exclude"]))
if isinstance(full_qd["include"], str):
full_qd["include"] = [full_qd["include"]]
mlquery = mlalchemy_parse_query(qd)
query = mlquery.to_query(session, session.bind._db.models)
order_by = full_qd.pop("order-by", None)
if order_by:
full_qd["order_by"] = order_by
qd_key_sort = [
"from",
"where",
"order_by",
"offset",
"limit",
"backref_limit",
"backref_depth",
"join_depth",
"exclude",
"include",
]
if full_qd["include"]:
full_qd["join_depth"] = full_qd["backref_depth"] = None
else:
full_qd["join_depth"] = full_qd["join_depth"] or 0
full_qd["backref_depth"] = full_qd["backref_depth"] or 0
query.query_dict = OrderedDict(
sorted(full_qd.items(), key=lambda x: qd_key_sort.index(x[0]))
)
query = query.with_loaded_relations(
full_qd["join_depth"],
full_qd["backref_depth"],
full_qd["exclude"],
full_qd["include"],
)
query = mlquery.apply_filters(query)
query.session.parsed_query = query
return query
| 12,801
|
def get_covid19_us_bears(
url_root=CSV_URL_ROOT,
file_prefix=CSV_FILE_PREFIX,
file_suffix=CSV_FILE_SUFFIX,
encoding=CSV_ENCODING) -> Dict[Dict[Bears]]:
"""Converts USAFACTS confirmed and deaths CSV files to state and county
`Bears` to a dictionary of dictionaries.
Args:
url_root (str): URL prefix for the CSV
file_prefix (str): CSV file prefix
uid_col_label (str): Unique ID column label
encoding (str): CSV encoding
Returns:
Dict[Dict[Bears]]:
::
{'confirmed': {'counties': Bears,
'states': Bears},
'deaths': {'counties': Bears,
'states': Bears}}
"""
covid19 = {'confirmed': {'counties': None, 'states': None},
'deaths': {'counties': None, 'states': None}}
for db_type in ['confirmed', 'deaths']:
covid19[db_type]['counties'] = Usafacts(
from_csv=True,
csv_specs=CsvSpecs(
url=stitch_time_series_csv_url(
db_type=db_type, url_root=url_root, file_prefix=file_prefix,
file_suffix=file_suffix),
uid_col_label=CSV_COL_UID,
encoding=encoding))
for db_type in ['confirmed', 'deaths']:
counties = covid19[db_type]['counties']
covid19[db_type]['states'] = Usafacts(
dataframe=counties2states_df(counties.df, counties.datetime_index))
return covid19
| 12,802
|
def jsonify_promise(
future_obj: Input[Jsonable],
indent: Input[Optional[Union[int, str]]]=None,
separators: Input[Optional[Tuple[str, str]]]=None
) -> Output[str]:
"""Convert a Promise object to a Promise to jsonify the result of that Promise.
An asyncronous (Promise) version of json.dumps() that operates on Pulumi output
values that have not yet been evaluated. Sorts keys to provide stability of result strings.
The result is another Pulumi output value that when evaluated will generate the
json string associated with future_obj
Args:
future_obj(Input[Jsonable]): A Pulumi Input Jsonable value that is not yet evaluated
Returns:
Output[str] A Pulumi "output" value that will resolve to the json string corresponding to future_obj
"""
def gen_json(
obj: Jsonable,
indent: Optional[Union[int, str]],
separators: Optional[Tuple[str, str]]
) -> str:
return json.dumps(obj, sort_keys=True, indent=indent, separators=separators)
# "pulumi.Output.all(*future_args).apply(lambda args: sync_func(*args))"" is a pattern
# provided by pulumi. It waits until all promises in future_args have been satisfied,
# then invokes sync_func with the realized values of all the future_args as *args. Finally
# it wraps the synchronous function as a promise and returns the new promise as the result.
# this allows you to write synchronous code in pulumi that depends on future values, and
# turn it into asynchronous code
result = Output.all(future_obj, indent, separators).apply(lambda args: gen_json(*args)) # type: ignore[arg-type]
return result
| 12,803
|
def run_adriz(flc_files):
""" Runs AstroDrizzle in order to create cosmic ray masks and to obtain
an estimate of the global sky background in new keyword 'mdrizsky'.
"""
# Assumes first 6 letters of a visit's images are all the same.
common = flc_files[0][:6]
search = '{}*flc.fits'.format(common)
print('search term for AstroDrizzle: {}'.format(search))
# Check that really did capture all files with the search term.
# You'll need do some hacking if this error should occur.
for flc in flc_files:
if common not in flc:
print("Error!! File {} does not match rest of visit with {}.".format(flc, common))
return
astrodrizzle.AstroDrizzle(search,
runfile='',
output='',
preserve=False,
updatewcs=False,
skysub=True,
driz_cr=True,
driz_cr_corr=True,
driz_combine=False)
# Remove unneeded files.
unneeded_files = glob.glob('*med.fits') + glob.glob('*crclean.fits') \
+ glob.glob('*blt.fits') + glob.glob('*single_mask.fits') \
+ glob.glob('*wht.fits') + glob.glob('*sci.fits') \
+ glob.glob('*staticMask.fits') + glob.glob('*skymatch*')
for unneeded_file in unneeded_files:
os.remove(unneeded_file)
| 12,804
|
def randnums(start, stop, n_samples):
"""
Helper function to select real samples and generate fake samples
"""
ix = []
for i in range(n_samples):
ix.append(randint(start, stop))
ix = np.array(ix)
return ix
| 12,805
|
def shift_1_spectra(spectra, shift):
""" This method find the relative position of the FFT of the two spectras \
in order to later k-linearize.
Args:
:param spectra1: OCT spectra of first mirror.
:type spectra1: list
Return:
:rname: Zspace: - pi to pi linear vector space
:rtype: list
"""
L = len(spectra)
mean = np.max(spectra)
x = np.arange(L)
j = complex(0,1)
shifted_spectra = np.real( hilbert(spectra) * np.exp(j * x * shift ) )
shift_mean = np.max(shifted_spectra)
shifted_spectra = (shifted_spectra / shift_mean) * mean
return shifted_spectra
| 12,806
|
def hpat_pandas_series_div(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.div` and :meth:`pandas.Series.truediv` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method div() or truediv().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if level is not None or fill_value is not None or axis != 0:
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_div_impl(self, other):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data / other._data)
return hpat_pandas_series_div_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_div_number_impl(self, other):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return pandas.Series(self._data / other)
return hpat_pandas_series_div_number_impl
raise TypingError('{} The object must be a pandas.series or scalar. Given other: {}'.format(_func_name, other))
| 12,807
|
def alias(self, arg):
"""
set the new alias to magic
*alias alias1 string*
alias1 is added into magic command
"""
if arg == '' or arg.lower() == 'help':
return dbhelp(self, 'alias')
name, fstring = arg.split(" ", 1)
print "new alias: %s <%s>" % (DBPRINT.msg_green(name), fstring)
__alias_table__[name] = fstring
func, params = fstring.split(" ", 1)
def afunc(self, arg):
"""replacing func"""
DBPRINT.print_blue(fstring)
IP.magic("%%%s" % fstring)
IP.expose_magic(name, afunc)
| 12,808
|
def _transform_cat_options(metadata: dict) -> pd.DataFrame:
"""Transform category options metadata into a formatted DataFrame."""
df = pd.DataFrame.from_dict(metadata.get("categoryOptions"))
df = df[["id", "code", "shortName", "name"]]
df.columns = ["co_uid", "co_code", "co_shortname", "co_name"]
return df
| 12,809
|
def idaview(request, idadb, idadf):
"""
IdaDataFrame fixture to be used for the whole testing session. Open a view
based on idadf fixture.
"""
def fin():
try:
idadb.drop_view("TEST_VIEW_ibmdbpy")
idadb.commit()
except:
pass
request.addfinalizer(fin)
if idadb.exists_view("TEST_VIEW_ibmdbpy"):
idadb.drop_view("TEST_VIEW_ibmdbpy")
idadb._create_view(idadf, "TEST_VIEW_ibmdbpy")
return ibmdbpy.IdaDataFrame(idadb, "TEST_VIEW_ibmdbpy")
| 12,810
|
def fields() -> None:
"""IoT Fields"""
| 12,811
|
def get_openmp_flag(compiler):
"""Returns list of flags for using OpenMP depending on compiler and
platform.
Parameters
----------
compiler : numpy.distutils.compiler
Compiler used when invoking setup.py build
"""
if hasattr(compiler, 'compiler'):
compiler = compiler.compiler[0]
else:
compiler = compiler.__class__.__name__
if sys.platform == "win32" and ('icc' in compiler or 'icl' in compiler):
return ['/Qopenmp']
elif sys.platform == "win32":
return ['/openmp']
elif sys.platform in ("darwin", "linux") and "icc" in compiler:
return ['-qopenmp']
elif sys.platform == "darwin" and 'openmp' in os.getenv('CPPFLAGS', ''):
return ['-openmp']
# Default flag for GCC and clang:
return ['-fopenmp']
| 12,812
|
def heuristical_lengths(items):
"""
heuristical_lengths tries to deriver the lengths of the content of items.
It always returns a list.
a) If typeof(items) is a string, it'll return [len(items)]
b) If typeof(items) is a dict, it'll return [len(items)]
c) If typeof(items) is either list or tuple, it'll best case try to iterate
over each element and record those lengths and return them all flattened.
If it can't retrieve the lengths yet len(items) > 0, then it will return [len(items)]
d) If items has the '__len__' attribute, it'll return [len(items)]
e) Otherwise if it can't derive the type, it'll return []
"""
if items is None:
return []
elif isinstance(items, str):
return [len(items)]
elif isinstance(items, dict):
return [len(items)]
elif isinstance(items, tuple) or isinstance(items, list):
lengths = []
for item in items:
i_lengths = heuristical_lengths(item)
lengths.extend(i_lengths)
# In the best case, if len(lengths) == 0
# yet len(items) > 0, just use len(items)
if len(lengths) == 0 and len(items) > 0:
lengths = [len(items)]
return lengths
elif hasattr(items, '__len__'):
return [len(items)]
elif hasattr(items, '__iter__'):
lengths = []
itr = iter(items)
for it in itr:
it_lengths = heuristical_lengths(it)
lengths.extend(it_lengths)
return lengths
else:
return []
| 12,813
|
def tifpages(file_id, filename, db_cursor):
"""
Check if TIF has multiple pages
"""
p = subprocess.Popen(['identify', '-format', '%n\\n', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = p.communicate()
try:
if int(len(out.split())) == 1:
pages_vals = 0
no_pages = str(int(len(out.split()))) + " page"
else:
pages_vals = 1
no_pages = str(int(len(out.split()))) + " pages"
except Exception as e:
no_pages = "Unknown ({})".format(e)
pages_vals = 1
db_cursor.execute(queries.file_check, {'file_id': file_id, 'file_check': 'tifpages', 'check_results': pages_vals,
'check_info': no_pages})
db_cursor.execute(queries.insert_log, {'project_id': settings.project_id, 'file_id': file_id,
'log_area': 'tifpages',
'log_text': db_cursor.query.decode("utf-8")})
return True
| 12,814
|
def next(space, w_arr):
""" Advance the internal array pointer of an array """
length = w_arr.arraylen()
current_idx = w_arr.current_idx + 1
if current_idx >= length:
w_arr.current_idx = length
return space.w_False
w_arr.current_idx = current_idx
return w_arr._current(space)
| 12,815
|
def geocoordinatess_id_get(id, username=None): # noqa: E501
"""Get a single GeoCoordinates by its id
Gets the details of a given GeoCoordinates (more information in https://w3id.org/okn/o/sdm#GeoCoordinates) # noqa: E501
:param id: The ID of the GeoCoordinates to be retrieved
:type id: str
:param username: Name of the user graph to query
:type username: str
:rtype: GeoCoordinates
"""
return query_manager.get_resource(id=id,
username=username,
rdf_type_uri=GEOCOORDINATES_TYPE_URI,
rdf_type_name=GEOCOORDINATES_TYPE_NAME,
kls=GeoCoordinates)
| 12,816
|
def pprint(object, stream=None, indent=1, width=80, depth=None, *,
compact=False):
"""Pretty-print a Python object to a stream [default is sys.stdout]."""
printer = PrettyPrinter(
stream=stream, indent=indent, width=width, depth=depth, compact=compact
)
printer.pprint(object)
| 12,817
|
def check_auth(request):
"""Check authentication on request.
:param request: Flask request
:raises: utils.Error if access is denied
"""
if not conf.getboolean('discoverd', 'authenticate'):
return
if request.headers.get('X-Identity-Status').lower() == 'invalid':
raise Error('Authentication required', code=401)
roles = (request.headers.get('X-Roles') or '').split(',')
if 'admin' not in roles:
LOG.error('Role "admin" not in user role list %s', roles)
raise Error('Access denied', code=403)
| 12,818
|
def timer_cb(watcher, revents):
""" Timed callback, right out of the book. """
watcher.data += 1
print("timer.data: {0}".format(watcher.data))
print("timer.loop.iteration: {0}".format(watcher.loop.iteration))
print("timer.loop.now(): {0}".format(watcher.loop.now()))
| 12,819
|
def check_callable(target, label=None):
"""Checks target is callable and then returns it."""
if not callable(target):
raise TypeError('Expected {} callable, found non-callable {}.'.format(
'{} to be'.format(label) if label is not None else 'a',
type_string(type(target))))
return target
| 12,820
|
def convert_dictionary_values(d, map={}):
"""convert string values in a dictionary to numeric types.
Arguments
d : dict
The dictionary to convert
map : dict
If map contains 'default', a default conversion is enforced.
For example, to force int for every column but column ``id``,
supply map = {'default' : "int", "id" : "str" }
"""
rx_int = re.compile(r"^\s*[+-]*[0-9]+\s*$")
rx_float = re.compile(r"^[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?$")
# pre-process with 'default'
if "default" in map:
k = "default"
if map[k] == "int":
default = int
elif map[k] == "float":
default = float
elif map[k] == "string":
default = str
else:
default = False
for k, vv in list(d.items()):
if vv is None:
continue
v = vv.strip()
try:
if k in map:
if map[k] == "int":
d[k] = int(v)
elif map[k] == "float":
d[k] = float(v)
elif map[k] == "string":
pass
continue
elif default:
if v != "":
d[k] = default(v)
else:
d[k] = v
continue
except TypeError as msg:
raise TypeError("conversion in field: %s, %s" % (k, msg))
try:
if rx_int.match(v):
d[k] = int(v)
elif rx_float.match(v):
d[k] = float(v)
except TypeError as msg:
raise TypeError(
"expected string or buffer: offending value = '%s' " % str(v))
except ValueError as msg:
raise ValueError("conversion error: %s, %s" % (msg, str(d)))
return d
| 12,821
|
def GetLimitPB(user, action_type):
"""Return the apporiate action limit PB part of the given User PB."""
if action_type == PROJECT_CREATION:
if not user.project_creation_limit:
user.project_creation_limit = user_pb2.ActionLimit()
return user.project_creation_limit
elif action_type == ISSUE_COMMENT:
if not user.issue_comment_limit:
user.issue_comment_limit = user_pb2.ActionLimit()
return user.issue_comment_limit
elif action_type == ISSUE_ATTACHMENT:
if not user.issue_attachment_limit:
user.issue_attachment_limit = user_pb2.ActionLimit()
return user.issue_attachment_limit
elif action_type == ISSUE_BULK_EDIT:
if not user.issue_bulk_edit_limit:
user.issue_bulk_edit_limit = user_pb2.ActionLimit()
return user.issue_bulk_edit_limit
elif action_type == FLAG_SPAM:
if not user.flag_spam_limit:
user.flag_spam_limit = user_pb2.ActionLimit()
return user.flag_spam_limit
elif action_type == API_REQUEST:
if not user.api_request_limit:
user.api_request_limit = user_pb2.ActionLimit()
return user.api_request_limit
raise Exception('unexpected action type %r' % action_type)
| 12,822
|
def test_compress_bam_dry_run(bam_tmp_file, base_context):
"""Test to run the compress bam command"""
# GIVEN the path to a existing bam file and a cli runner
runner = CliRunner()
bam_path = bam_tmp_file
assert bam_path.exists()
# WHEN running the compress command with dry_run
result = runner.invoke(
bam, ["--bam-path", str(bam_path), "--dry-run"], obj=base_context
)
# THEN assert the command was succesful
assert result.exit_code == 0
| 12,823
|
def best_int_dtype(data):
"""get bit depth required to best represent float data as int"""
d, r = divmod(np.log2(data.ptp()), 8)
d = max(d, 1)
i = (2 ** (int(np.log2(d)) + bool(r)))
return np.dtype('i%d' % i)
| 12,824
|
def gen_decorate_name(*args):
"""
gen_decorate_name(name, mangle, cc, type) -> bool
Generic function for 'decorate_name()' (may be used in IDP modules)
@param name (C++: const char *)
@param mangle (C++: bool)
@param cc (C++: cm_t)
@param type (C++: const tinfo_t *)
"""
return _ida_typeinf.gen_decorate_name(*args)
| 12,825
|
def main(self, count=10):
"""
kosmos -p 'j.servers.myjobs.test("start")'
"""
self.reset()
def wait_1sec():
gevent.sleep(1)
return "OK"
ids = []
for x in range(count):
job_sch = self.schedule(wait_1sec)
ids.append(job_sch.id)
self._workers_gipc_nr_max = 1
self.workers_subprocess_start()
res = self.results(ids, timeout=120)
print(res)
self.stop(reset=True)
print("TEST OK")
| 12,826
|
async def uptime(ctx):
"""Displays how long the bot has been online for"""
second = time.time() - start_time
minute, second = divmod(second, 60)
hour, minute = divmod(minute, 60)
day, hour = divmod(hour, 24)
week, day = divmod(day, 7)
await ctx.send(
"I've been online for %d weeks, %d days, %d hours, %d minutes, %d seconds" % (week, day, hour, minute, second))
| 12,827
|
def get_asc() -> pd.DataFrame:
"""Get Yahoo Finance small cap stocks with earnings growth rates better than 25%. [Source: Yahoo Finance]
Returns
-------
pd.DataFrame
Most aggressive small cap stocks
"""
url = "https://finance.yahoo.com/screener/predefined/aggressive_small_caps"
data = pd.read_html(requests.get(url).text)[0]
return data
| 12,828
|
def getEnabled(chat_id):
"""Gets the status of a conversation"""
status = EnableStatus.get_by_id(str(chat_id))
if status:
return status.enabled
return False
| 12,829
|
def create_app(config=DevelopConfig):
"""App factory."""
app = Flask(
__name__.split('.')[0],
static_url_path='/static',
static_folder=f'{config.PROJECT_PATH}/src/static'
)
app.url_map.strict_slashes = False
app.config.from_object(config)
register_extensions(app)
register_blueprints(app)
register_shellcontext(app)
register_adminpanel(app)
register_sessions(app)
register_github_oauth(app)
register_before_hooks(app)
register_commands(app)
register_mail_settings(app)
register_secret(app)
return app
| 12,830
|
def write_charging_cost_results(
record, calculated_annual_charging_cost,
calculated_annual_charging_kwh, ev_specific_rate,
csv_writer
):
"""
Write the charging cost results for a record.
:param record:
:param calculated_annual_charging_cost:
:param calculated_annual_charging_kwh:
:param ev_specific_rate:
:param csv_writer:
:return:
"""
csv_writer.writerow([
record["label"].encode("utf-8"),
record["utility"].encode("utf-8"),
record["eiaid"] if "eiaid" in record.keys() else None,
record["name"].encode("utf-8"),
record["description"].encode("utf-8")
if "description" in record.keys() else None,
record["enddate"] if "enddate" in record.keys() else None,
record["source"].encode("utf-8")
if "source" in record.keys() else None,
record["uri"].encode("utf-8"),
record["fixedmonthlycharge"]
if "fixedmonthlycharge" in record.keys() else None,
calculated_annual_charging_cost,
calculated_annual_charging_kwh,
"yes" if ev_specific_rate else "no"
]
)
| 12,831
|
def merge_inputs_for_create(task_create_func):
"""Merge all inputs for start operation into one dict"""
# Needed to wrap the wrapper because I was seeing issues with
# "RuntimeError: No context set in current execution thread"
def wrapper(**kwargs):
# NOTE: ctx.node.properties is an ImmutableProperties instance which is
# why it is passed into a mutable dict so that it can be deep copied
return _wrapper_merge_inputs(task_create_func,
dict(ctx.node.properties), **kwargs)
return wrapper
| 12,832
|
def embedding_lookup(params, ids):
"""Wrapper around ``tf.nn.embedding_lookup``.
This converts gradients of the embedding variable to tensors which allows
to use of optimizers that don't support sparse gradients (e.g. Adafactor).
Args:
params: The embedding tensor.
ids: The ids to lookup in :obj:`params`.
Returns:
A ``tf.Tensor``, the embeddings that correspond to :obj:`ids`.
"""
params = convert_gradient_to_tensor(params)
return tf.nn.embedding_lookup(params, ids)
| 12,833
|
def value_as_unit(value: T | None, unit: Unit = None) -> T | Quantity[T] | None:
"""Return value as specified unit or sensor fault if value is none."""
if value is None:
return None
if unit is None:
return value
return value * unit
| 12,834
|
def get_static_spatial_noise_image(image) :
""" The first step is to sum all of the odd-numbered images (sumODD image)
and separately sum all of the even-numbered images (sumEVEN image). The
difference between the sum of the odd images and the sum of the even
images (DIFF = sumODD - sumEVEN) is taken as a raw measure of static
spatial noise. (p. 828-829)
"""
image_odd = image[range(1, image.shape[0],2)].astype(numpy.single)
sum_odd = numpy.sum(image_odd, 0)
image_even = image[range(0, image.shape[0],2)].astype(numpy.single)
sum_even = numpy.sum(image_even, 0)
diff = sum_odd-sum_even
return medipy.base.Image(data=diff,
origin=image.origin[1:], spacing=image.spacing[1:],
direction=image.direction[1:,1:])
| 12,835
|
def get_gallery_dir() -> str:
"""
Return the path to the mephisto task gallery
"""
return os.path.join(get_root_dir(), "gallery")
| 12,836
|
def get_next_action():
""" gets the next action to perform, based on get_action_odds """
action_odds = get_action_odds()
#print(f"DEBUG action_odds {action_odds}")
# get the sum of all the action odds values
total = 0
for action in action_odds:
#print(f"DEBUG get_next_action total {total} adding action {action} odds {action_odds[action]}")
total += action_odds[action]
#print(f"DEBUG get_next_action total now {total}")
# get a random number from 1..sum
val = random.randint(1,total)
#print(f"DEBUG get_next_action val {val} is 1..{total}")
# now, check if the value is <= the first action.
# If so, use that. If not, reduce the sum by that number, and check the next action.
for action in action_odds:
odds = action_odds[action]
if val <= odds:
return action
val -= odds
raise Exception("random action was greater than sum of odds, this shouldn't be possible")
| 12,837
|
def get_movie_title(movie_id):
"""
Takes in an ID, returns a title
"""
movie_id = int(movie_id)-1
return items.iloc[movie_id]['TITLE']
| 12,838
|
def draw_material(material, face=GL_FRONT_AND_BACK):
"""Draw a single material"""
if material.gl_floats is None:
material.gl_floats = (GLfloat * len(material.vertices))(*material.vertices)
material.triangle_count = len(material.vertices) / material.vertex_size
vertex_format = VERTEX_FORMATS.get(material.vertex_format)
if not vertex_format:
raise ValueError("Vertex format {} not supported by pyglet".format(vertex_format))
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
glPushAttrib(GL_CURRENT_BIT | GL_ENABLE_BIT | GL_LIGHTING_BIT)
glEnable(GL_CULL_FACE)
glCullFace(GL_BACK)
# Fall back to ambient texture if no diffuse
texture = material.texture or material.texture_ambient
if texture and material.has_uvs:
bind_texture(texture)
else:
glDisable(GL_TEXTURE_2D)
glMaterialfv(face, GL_DIFFUSE, gl_light(material.diffuse))
glMaterialfv(face, GL_AMBIENT, gl_light(material.ambient))
glMaterialfv(face, GL_SPECULAR, gl_light(material.specular))
glMaterialfv(face, GL_EMISSION, gl_light(material.emissive))
glMaterialf(face, GL_SHININESS, min(128.0, material.shininess))
glEnable(GL_LIGHT0)
if material.has_normals:
glEnable(GL_LIGHTING)
else:
glDisable(GL_LIGHTING)
glInterleavedArrays(vertex_format, 0, material.gl_floats)
glDrawArrays(GL_TRIANGLES, 0, int(material.triangle_count))
glPopAttrib()
glPopClientAttrib()
| 12,839
|
def get_logger():
"""
Return a logger object
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
| 12,840
|
def test_employment():
"""Test module employment.py by downloading
employment.csv and testing shape of
extracted data has 24 rows and 4 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = employment(test_path)
try:
assert x_train.shape == (24, 4)
except:
shutil.rmtree(test_path)
raise()
| 12,841
|
def wav(args, scope_data, infile):
"""Create an audible .wav file for use in LTSpice."""
wav_name = os.path.splitext(infile)[0] + '.wav'
if os.path.isfile(wav_name) and not args.force:
print("'%s' exists, use --force to overwrite" % wav_name)
return
scope_data.wav(wav_name, channel=args.channel)
| 12,842
|
def vim_image_api_delete_image(connection, msg):
"""
Handle Delete-Image API request
"""
global _image_delete_operations
DLOG.verbose("Delete image, uuid=%s." % msg.uuid)
_image_delete_operations[msg.uuid] = connection
image_director = directors.get_image_director()
image_director.image_delete(msg.uuid, _delete_image_callback)
| 12,843
|
def enough_gap_since_last_obs(df, current_state, obs_log):
"""
Determine if a sufficient time has passed since the last observation
in this subprogram (in any filter):
"""
now = current_state['current_time'].mjd
# don't mess up with the upstream data structure
df = df.copy()
grp = df.groupby(['program_id','subprogram_name'])
df['ref_obs_mjd'] = np.nan
for grpi, dfi in grp:
ref_obs = obs_log.select_last_observed_time_by_field(
field_ids = set(dfi['field_id'].tolist()),
program_ids = [grpi[0]],
subprogram_names = [grpi[1]])
if len(ref_obs) > 0:
tmp = pd.merge(df, ref_obs, left_on='field_id', right_index=True,
how='inner')
df.loc[tmp.index, 'ref_obs_mjd'] = tmp.expMJD.values
# give a fake value for fields unobserved
df.loc[df['ref_obs_mjd'].isnull(), 'ref_obs_mjd'] = 58119.0
# calculate dt
df['dt'] = now - df['ref_obs_mjd']
return df['dt'] >= (df['intranight_gap_min']*(1*u.minute).to(u.day).value)
| 12,844
|
def find_nearest_feature_to_attribute(sentence, features, attribute):
"""
Parameters
----------
sentence: str,
One sentence from the info text of a mushroom species
features: list of strs
List of possible features as in dataset_categories.features_list
attribute: str,
Mushroom feature attribute that is in the sentence (e.g. 'red' for 'cap color').
Return
------
str,
The feature in features that is closest to attribute in word steps.
Example
-------
sentences[2] = "The entire young fruitbody is enclosed in a white veil which leaves fragments (which may wash off)
on the shiny red, marginally grooved cap." (for simplicity only one sentence is considered)
features = dataset_categories.features_list (relevant here: 'cap', 'veil')
attribute = 'white'
return:
'veil' (since 'veil' is closer to 'white' than 'cap')
"""
min_distance = float('inf')
min_distance_index = 0
for i in range(0, len(features)):
if features[i] in sentence:
word_distance = get_word_distance(sentence, features[i], attribute)
if word_distance < min_distance:
min_distance = word_distance
min_distance_index = i
return features[min_distance_index]
| 12,845
|
def test_if_elif_else():
"""
>>> d = {'v':1}
>>> txt = '''
... {{if v==1:}}1{{elif v==2:}}2{{else:}}other{{pass}}
... '''
>>> print (template(txt, d))
<BLANKLINE>
1
<BLANKLINE>
>>> print (template(txt, {'v':2}))
<BLANKLINE>
2
<BLANKLINE>
>>> print (template(txt, {'v':3}))
<BLANKLINE>
other
<BLANKLINE>
"""
| 12,846
|
def lists():
"""
库存列表
:return:
"""
template_name = 'inventory/lists.html'
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('inventory lists')
# 搜索条件
form = InventorySearchForm(request.form)
form.warehouse_id.choices = get_warehouse_choices()
form.rack_id.choices = get_rack_choices(form.warehouse_id.data)
# app.logger.info('')
inventory_brand_choices = [(brand, brand) for brand in get_distinct_inventory_brand(status_delete=STATUS_DEL_NO) if
brand != '']
form.production_brand.choices = DEFAULT_SEARCH_CHOICES_STR + inventory_brand_choices
search_condition = [
Inventory.status_delete == STATUS_DEL_NO,
Inventory.stock_qty_current > 0,
]
if request.method == 'POST':
# 表单校验失败
if not form.validate_on_submit():
flash(_('Search Failure'), 'danger')
# 单独处理csrf_token
if hasattr(form, 'csrf_token') and getattr(form, 'csrf_token').errors:
map(lambda x: flash(x, 'danger'), form.csrf_token.errors)
else:
if form.warehouse_id.data != DEFAULT_SEARCH_CHOICES_INT_OPTION:
search_condition.append(Inventory.warehouse_id == form.warehouse_id.data)
if form.rack_id.data != DEFAULT_SEARCH_CHOICES_INT_OPTION:
search_condition.append(Inventory.rack_id == form.rack_id.data)
if form.production_brand.data != DEFAULT_SEARCH_CHOICES_STR_OPTION:
search_condition.append(Inventory.production_brand == form.production_brand.data)
if form.production_model.data:
search_condition.append(Inventory.production_model.like('%%%s%%' % form.production_model.data))
# 处理导出
if form.op.data == OPERATION_EXPORT:
# 检查导出权限
if not permission_inventory_section_export.can():
abort(403)
column_names = Inventory.__table__.columns.keys()
query_sets = get_inventory_rows(*search_condition)
return excel.make_response_from_query_sets(
query_sets=query_sets,
column_names=column_names,
file_type='csv',
file_name='%s.csv' % _('inventory lists')
)
# 批量删除
if form.op.data == OPERATION_DELETE:
# 检查删除权限
if not permission_inventory_section_del.can():
abort(403)
inventory_ids = request.form.getlist('inventory_id')
result_total = True
for inventory_id in inventory_ids:
current_time = datetime.utcnow()
inventory_data = {
'status_delete': STATUS_DEL_OK,
'delete_time': current_time,
'update_time': current_time,
}
result = edit_inventory(inventory_id, inventory_data)
result_total = result_total and result
if result_total:
flash(_('Del Success'), 'success')
else:
flash(_('Del Failure'), 'danger')
# 翻页数据
pagination = get_inventory_pagination(form.page.data, PER_PAGE_BACKEND, *search_condition)
# 渲染模板
return render_template(
template_name,
form=form,
pagination=pagination,
**document_info
)
| 12,847
|
def open_1d_txt(filename, xaxcol=0, datacol=1, errorcol=2,
text_reader='simple', format=None, **kwargs):
"""
Attempt to read a 1D spectrum from a text file assuming wavelength as the
first column, data as the second, and (optionally) error as the third.
Reading can be done either with astropy.io.ascii or a 'simple' reader. If
you have an IPAC, CDS, or formally formatted table, you'll want to use
astropy.io.ascii and spceify a format.
If you have a simply formatted file of the form, e.g.
# name name
# unit unit
data data
data data
kwargs are passed to astropy.io.ascii.read
"""
if text_reader in ('simple','readcol'):
if text_reader == 'simple':
data, error, XAxis, T = simple_txt(filename, xaxcol=xaxcol,
datacol=datacol,
errorcol=errorcol, **kwargs)
elif text_reader == 'readcol':
Tlist = readcol.readcol(filename, twod=False, **kwargs)
XAxis = units.SpectroscopicAxis(Tlist[xaxcol])
data = Tlist[datacol]
error = Tlist[errorcol]
T = dummy_class()
T.data = dummy_class()
T.data.dtype = dummy_class()
T.columns = {}
T.columns[T.data.dtype.names[xaxcol]] = dummy_class()
T.columns[T.data.dtype.names[datacol]] = dummy_class()
elif text_reader in ('ascii', 'astropy', 'asciitable'):
T = ascii.read(filename, format=format, **kwargs)
xarr = T.data[T.data.dtype.names[xaxcol]]
data = T.data[T.data.dtype.names[datacol]]
if len(T.columns) > errorcol:
error = T.data[T.data.dtype.names[errorcol]]
else:
# assume uniform, zero error
error = data*0
if 'xunits' in T.keywords:
xunits = T.keywords['xunits']
else:
xunits = 'unknown'
XAxis = units.SpectroscopicAxis(xarr,xunits)
# Need this in Spectrum class to correctly parse header
T.xaxcol = xaxcol
T.datacol = datacol
return data, error, XAxis, T
| 12,848
|
def create_tables(hpo_id, drop_existing=False):
"""
Create the achilles related tables
:param hpo_id: associated hpo id
:param drop_existing: if True, drop existing tables
:return:
"""
for table_name in ACHILLES_HEEL_TABLES:
table_id = bq_utils.get_table_id(hpo_id, table_name)
bq_utils.create_standard_table(table_name, table_id, drop_existing)
| 12,849
|
def flux_to_sql(con, solute_db, site_key,leg,site,hole,solute,flux,
burial_flux,gradient,porosity,z,dp,bottom_conc,conc_fit,
r_squared,age_depth_boundaries,sedrate,advection,precision,ds,
temp_d,bottom_temp,bottom_temp_est,cycles,por_error,mean_flux,
median_flux,stdev_flux,skewness,p_value,mean_flux_log,
median_flux_log,stdev_flux_log,stdev_flux_lower,
stdev_flux_upper,skewness_log,p_value_log,runtime_errors,date,
comments,complete):
"""
Send the output of interface_flux.py or flux_rerun.py to the
MySQL database. If data for a particular site already exists, replaces
old data with new data.
con: database engine connection
solute_db: solute name for inserting into database
site_key: MySQL database site key
leg: drilling leg/expedition number
site: drilling site number
hole: drilling hole Ids
solute: solute name in database
flux: solute flux at z (mol m^-2 y^-1). Positive flux value is
downward (into the sediment)
burial_flux: solute flux due to pore water burial at z (mol m^-2 y^-1)
gradient: pore water solute concentration gradient at z (mol m^-1)
porosity: porosity at z
z: depth at which flux is calculated (mbsf)
dp: concentration datapoints below seafloor used for line fit
bottom_conc: ocean bottom water solute concentration (mM)
conc_fit: parameters of solute concentration curve (see conc_curve)
r_squared: R-squared of regression between model and measurements
age_depth_boundaries: bounds between discrete sedimentation rate regimes
sedrate: modern sediment accumulation rate (solid volume per year)
advection: external advection rate (m/y)
precision: relative standard deviation of concentration measurements
ds: diffusion coefficient at reference temperature (m^2 y^-1)
temp_d: reference temperature of diffusion coefficient (C)
bottom_temp: ocean bottom water temperature (C)
bottom_temp_est: ocean bottom water temperature parameter for estimation
cycles: number of monte carlo simulations to run
por_error: relative standard deviation of porosity fit
mean_flux: average solute flux from monte carlo simulation
median_flux: median solute flux from monte carlo simulation
stdev_flux: standard deviation of solute flux from monte carlo sim.
skewness: skewness of distribution of fluxes from monte carlo sim.
p_value: Kolmogorov-Smirvov p-value of distribution of fluxes
mean_flux_log: log-normal average of fluxes from monte carlo sim.
median_flux_log: log-normal median of fluxes from monte carlo sim.
stdev_flux_log: log-normal standard deviation of fluxes from m.c. sim.
stdev_flux_lower: upper log-normal value for 1 standard deviation
stdev_flux_upper: lower log-normal value for 1 standard deviation
skewness_log: skewness of distribution of log-normal fluxes
p_value_log: Kolmogorov-Smirvov p-value of log-normal fluxes
runtime_errors: number of errors from line-fitting procedure
date: date the site was modeled
complete: if "yes", modeling is complete for this site
comments: comments for this location or dataset
"""
# Send metadata to database
sql= """insert into metadata_{}_flux (site_key,leg,site,hole,solute,
interface_flux,burial_flux,gradient,
top_por,flux_depth,datapoints,
bottom_conc,conc_fit,r_squared,
age_depth_boundaries,sed_rate,
advection,measurement_precision,ds,
ds_reference_temp,bottom_temp,
bottom_temp_est,mc_cycles,
porosity_error,mean_flux,median_flux,
stdev_flux,skewness,p_value,
mean_flux_log,median_flux_log,
stdev_flux_log,stdev_flux_lower,
stdev_flux_upper,skewness_log,
p_value_log,runtime_errors,run_date,
comments,complete)
VALUES ({}, '{}', '{}', '{}', '{}', {}, {}, {}, {}, {}, {}, {},
'{}', {},'{}', {}, {}, {}, {}, {}, {}, '{}', {}, {}, {},
{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {},'{}','{}',
'{}')
ON DUPLICATE KEY UPDATE hole='{}', solute='{}', interface_flux={},
burial_flux={}, gradient={}, top_por={}, flux_depth={},
datapoints={}, bottom_conc={}, conc_fit='{}', r_squared={},
age_depth_boundaries='{}', sed_rate={}, advection={},
measurement_precision={}, ds={}, ds_reference_temp={},
bottom_temp={}, bottom_temp_est='{}', mc_cycles={},
porosity_error={}, mean_flux={}, median_flux={},
stdev_flux={}, skewness={}, p_value={}, mean_flux_log={},
median_flux_log={}, stdev_flux_log={}, stdev_flux_lower={},
stdev_flux_upper={}, skewness_log={}, p_value_log={},
runtime_errors={}, run_date='{}', comments='{}', complete='{}'
;""".format(solute_db, site_key,leg,site,hole,solute,
flux,burial_flux,gradient,porosity,
z,dp,bottom_conc,conc_fit,r_squared,
age_depth_boundaries,sedrate,advection,
precision,ds,temp_d,bottom_temp,bottom_temp_est,cycles,
por_error,mean_flux,median_flux,stdev_flux,skewness,
p_value,mean_flux_log,median_flux_log,stdev_flux_log,
stdev_flux_lower,stdev_flux_upper,skewness_log,
p_value_log,runtime_errors,date,comments,complete,
hole,solute,flux,burial_flux,gradient,porosity,
z,dp,bottom_conc,conc_fit,r_squared,
age_depth_boundaries,sedrate,advection,precision,ds,
temp_d,bottom_temp,bottom_temp_est,cycles,
por_error,mean_flux,median_flux,stdev_flux,
skewness,p_value,mean_flux_log,median_flux_log,
stdev_flux_log,stdev_flux_lower,stdev_flux_upper,
skewness_log,p_value_log,runtime_errors,date,comments,
complete)
con.execute(sql)
| 12,850
|
def clean_record(raw_string: str) -> str:
"""
Removes all unnecessary signs from a raw_string and returns it
:param raw_string: folder or file name to manage
:return: clean value
"""
for sign in ("'", '(', ')', '"'):
raw_string = raw_string.replace(sign, '')
return raw_string.replace(' ', '-').replace('--', '-')
| 12,851
|
def df_of_tables_for_dd_ids(dd_ids, sqlite_tables, sql_con):
"""
:param list dd_ids: list of Deep Dive IDs to retrieve
:param list sqlite_tables: list of SQLite tables to join
:param sqlalchemy.create_engine sql_con: Connection to SQLite (can be \
omitted)
:returns: `pandas.DataFrame` -- dataframe of tables, joined using the Deep \
Dive IDs.
"""
import pandas as pd
import numpy as np
dd_ids_str = ','.join(['"{}"'.format(x) for x in dd_ids])
query_fmt = 'select * from {} where dd_id in ({})'.format
df = pd.read_sql(query_fmt(sqlite_tables[0], dd_ids_str), sql_con).drop_duplicates()
df['dd_id'] = df.dd_id.astype(int)
for s_t in sqlite_tables[1:]:
df_2 = pd.read_sql(query_fmt(s_t, dd_ids_str), sql_con)
df_2['dd_id'] = df_2.dd_id.astype(int)
# We use outer joins because dd_ids in one table may be missing from the other.
df = df.merge(df_2, on=['dd_id'], how='outer')
if 'post_date' in df:
df['post_date'] = df.post_date.apply(pd.to_datetime)
if 'duration_in_mins' in df:
df['duration_in_mins'] = df.duration_in_mins.apply(lambda x: float(x) if x != '' else np.nan)
# I melted some rows when making this, and it's proven a mistake. Let's unmelt
melted_cols = ['ethnicity', 'flag']
for m_c in melted_cols:
if m_c in df.columns:
df = aggregated_df(df, m_c, 'dd_id', '|')
return df
| 12,852
|
def get_group_type(group: Union[hou.EdgeGroup, hou.PointGroup, hou.PrimGroup]) -> int:
"""Get an HDK compatible group type value.
:param group: The group to get the group type for.
:return: An HDK group type value.
"""
try:
return _GROUP_TYPE_MAP[type(group)]
except KeyError as exc:
raise ValueError("Invalid group type") from exc
| 12,853
|
def download_data(count, n_dims, n_classes, outpath, name):
"""Download data set to the data folder for further usage"""
dataset, ground_truth = make_classification(
n_samples=count,
n_features=n_dims,
n_informative=2,
n_redundant=2,
n_repeated=0,
n_classes=n_classes,
n_clusters_per_class=2,
weights=None,
flip_y=0.01,
class_sep=1.0,
hypercube=True,
shift=0.0,
scale=1.0,
shuffle=True,
random_state=42,
)
logging.info("Download_data: Creating example classification dataset")
logging.info(f"Download_data: - samples: {dataset.shape}")
logging.info(f"Download_data: - labels: {ground_truth.shape}")
save_samples(dataset, ground_truth, outpath, name)
| 12,854
|
def test_positive() -> None:
"""
Run mypy on the positive test file. There should be no errors.
"""
subprocess.check_call(get_mypy_cmd(POSITIVE_FILE))
| 12,855
|
def mp2d_driver(jobrec, verbose=1):
"""Drive the jobrec@i (input) -> mp2drec@i -> mp2drec@io -> jobrec@io (returned) process."""
return module_driver(
jobrec=jobrec, module_label='mp2d', plant=mp2d_plant, harvest=mp2d_harvest, verbose=verbose)
| 12,856
|
def query():
"""
TODO: add secret file and pass them to functions
"""
# client = get_client_as_service_account('/path/to/service_secret.json')
# client = get_client_as_user_account('/path/to/client_secret.json', False)
client = get_client_with_ADC()
query_job = client.query(QUERY_STRING)
for row in query_job:
print(f'{row["year"]}-{row["month"]}: {row["num_reports"]}')
| 12,857
|
def query_user_list():
"""
Retrieve list of users on user watch list.
"""
conn = connect.connect()
cur = conn.cursor()
cur.execute("SELECT * FROM watched_users")
watched_users = cur.fetchall()
return watched_users
| 12,858
|
def service_stop_list(service_id, direction):
""" Queries all patterns for a service and creates list of stops sorted
topologically.
:param service_id: Service ID.
:param direction: Groups journey patterns by direction - False for
outbound and True for inbound.
"""
graph, dict_stops = service_graph_stops(service_id, direction)
if not dict_stops:
raise ValueError(f"No stops exist for service ID {service_id}")
return [dict_stops[v] for v in graph.sequence()]
| 12,859
|
def test_infer_a_json_converter(json_sample_path):
"""Infer a JSONConverter from file path string."""
json = infer_converter_from_file_type(json_sample_path)
assert isinstance(json, JSONConverter)
| 12,860
|
def convert_coord(value):
"""将GPS值转换为度分秒形式
Args:
value(str): GPS读取的经度或纬度
Returns:
list: 度分秒列表
"""
v1, v2 = value.split('.')
v2_dec = Decimal(f'0.{v2}') * 60 # + Decimal(random.random())
return [v1[:-2], v1[-2:], v2_dec.to_eng_string()]
| 12,861
|
def _interpolate_zbuf(
pix_to_face: torch.Tensor, barycentric_coords: torch.Tensor, meshes
) -> torch.Tensor:
"""
A helper function to calculate the z buffer for each pixel in the
rasterized output.
Args:
pix_to_face: LongTensor of shape (N, H, W, K) specifying the indices
of the faces (in the packed representation) which
overlap each pixel in the image.
barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying
the barycentric coordianates of each pixel
relative to the faces (in the packed
representation) which overlap the pixel.
meshes: Meshes object representing a batch of meshes.
Returns:
zbuffer: (N, H, W, K) FloatTensor
"""
verts = meshes.verts_packed()
faces = meshes.faces_packed()
faces_verts_z = verts[faces][..., 2][..., None] # (F, 3, 1)
zbuf = interpolate_face_attributes(pix_to_face, barycentric_coords, faces_verts_z)[
..., 0
] # (1, H, W, K)
zbuf[pix_to_face == -1] = -1
return zbuf
| 12,862
|
def pp_chain(chain: Sequence[Subtree]) -> str:
"""Pretty-print a chain
"""
return ' '.join(
s.label if isinstance(s, ParentedTree) else str(s)
for s in chain
)
| 12,863
|
def ptr_ty(ty : 'LLVMType') -> 'LLVMPointerType':
"""``ty*``, i.e. a pointer to a value of type ``ty``."""
return LLVMPointerType(ty)
| 12,864
|
async def test_set_fan_speed(hass: HomeAssistant, device: Dyson360Eye):
"""Test setting fan speed of the vacuum."""
fan_speed_map = {
"Max": PowerMode.MAX,
"Quiet": PowerMode.QUIET,
}
for service_speed, command_speed in fan_speed_map.items():
await hass.services.async_call(
PLATFORM_DOMAIN,
SERVICE_SET_FAN_SPEED,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_FAN_SPEED: service_speed},
blocking=True,
)
device.set_power_mode.assert_called_with(command_speed)
| 12,865
|
def series_spline(self):
"""Fill NaNs using a spline interpolation."""
inds, values = np.arange(len(self)), self.values
invalid = isnull(values)
valid = -invalid
firstIndex = valid.argmax()
valid = valid[firstIndex:]
invalid = invalid[firstIndex:]
inds = inds[firstIndex:]
result = values.copy()
s = InterpolatedUnivariateSpline(inds[valid], values[firstIndex:][valid])
result[firstIndex:][invalid] = s(inds[invalid])
return Series(result, index=self.index, name=self.name)
| 12,866
|
def label(job_name, p5_connection=None):
"""
Syntax: Job <name> label
Description: Returns the (human readable) job label.
The following labels are returned:
Archive, Backup, Synchronize and System.
A Job label can be used in conjunction with the Job describe command to
better display the job record in various list displays.
Return Values:
-On Success: the job label
"""
method_name = "label"
return exec_nsdchat([module_name, job_name, method_name], p5_connection)
| 12,867
|
def rating(pairing, previous):
"""The lower the rating value is the better"""
current = set(chain.from_iterable(pair[1] for pair in pairing))
overlaps = current & set(previous)
if overlaps:
return sum(math.pow(0.97, previous[overlap] / 86400) for overlap in overlaps)
return 0.0
| 12,868
|
def copy_multipart_passthrough(src_blob: AnyBlob,
dst_blob: CloudBlob,
compute_checksums: bool=False) -> Optional[Dict[str, str]]:
"""
Copy from `src_blob` to `dst_blob`, passing data through the executing instance.
Optionally compute checksums.
"""
checksums: Optional[dict] = None
if compute_checksums:
checksums = {SSDSObjectTag.SSDS_MD5: checksum.S3EtagUnordered(),
SSDSObjectTag.SSDS_CRC32C: checksum.GScrc32cUnordered()}
with dst_blob.multipart_writer() as writer:
for part in src_blob.parts():
if checksums is not None:
for cs in checksums.values():
cs.update(part.number, part.data)
writer.put_part(part)
if checksums is not None:
return {key: cs.hexdigest() for key, cs in checksums.items()}
else:
return None
| 12,869
|
def read_manifest_from_csv(filename):
"""
Read the ballot manifest into a list in the format ['batch id : number of ballots']
from CSV file named filename
"""
manifest = []
with open(filename, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter = ",")
for row in reader:
# row.remove(row[1])
batch = " , ".join(row)
manifest.append(batch)
return manifest[1:]
| 12,870
|
def open_github(subdir=None):
"""Opens the GitHub repository for this package.
Args:
subdir (str, optional): Sub-directory of the repository. Defaults to None.
"""
import webbrowser
url = 'https://github.com/giswqs/geemap'
if subdir == 'source':
url += '/tree/master/geemap/'
elif subdir == 'examples':
url += '/tree/master/examples'
elif subdir == 'tutorials':
url += '/tree/master/tutorials'
webbrowser.open_new_tab(url)
| 12,871
|
def siblings_list():
"""
Shows child element iteration
"""
o = untangle.parse(
"""
<root>
<child name="child1"/>
<child name="child2"/>
<child name="child3"/>
</root>
"""
)
return ",".join([child["name"] for child in o.root.child])
| 12,872
|
def process_linked_datasets(labbook: LabBook, logged_in_username: str) -> None:
"""Method to update or init any linked dataset submodule references, clean up lingering files, and schedule
jobs to auto-import if needed
Args:
labbook: the labbook to analyze
logged_in_username: the current logged in username
Returns:
"""
im = InventoryManager()
# Update linked datasets inside the Project or clean them out if needed
im.update_linked_datasets(labbook, logged_in_username)
# Check for linked datasets, and schedule auto-imports
d = Dispatcher()
datasets = im.get_linked_datasets(labbook)
for ds in datasets:
kwargs = {
'logged_in_username': logged_in_username,
'dataset_owner': ds.namespace,
'dataset_name': ds.name,
'remote_url': ds.remote,
}
metadata = {'dataset': f"{logged_in_username}|{ds.namespace}|{ds.name}",
'method': 'dataset_jobs.check_and_import_dataset'}
d.dispatch_task(gtmcore.dispatcher.dataset_jobs.check_and_import_dataset,
kwargs=kwargs,
metadata=metadata)
| 12,873
|
def max_distance_from_home(traj, start_night='22:00', end_night='07:00', show_progress=True):
"""
Compute the maximum distance from home (in kilometers) traveled by an individual.
:param traj: the trajectories of the individuals
:type traj: TrajDataFrame
:param str start_night: the starting time for the night (format HH:MM)
:param str end_night: the ending time for the night (format HH:MM)
:param show_progress: if True show a progress bar
:type show_progress: boolean
:return: the maximum distance from home of the individuals
:rtype: pandas DataFrame
Examples:
Computing the maximum distance from home of each individual in a TrajDataFrame
>>> import skmob
>>> from skmob.measures.individual import max_distance_from_home
>>> from skmob import TrajDataFrame
>>> tdf = TrajDataFrame.from_file('../data_test/brightkite_data.csv'data, user_id='user', datetime='check-in time', latitude='latitude', longitude='longitude')
>>> max_distance_from_home(tdf).head()
uid max_distance_from_home
0 1 46.409510
1 2 68.499333
2 3 56.806038
3 4 78.949592
4 5 69.393777
date_time
.. seealso:: :func:`maximum_distance`, :func:`home_location`
References:
.. [canzian2015trajectories] Luca Canzian and Mirco Musolesi. "Trajectories of depression: unobtrusive monitoring of depressive states by means of smartphone mobilhttps://www.gazzetta.it/?refresh_ceity traces analysis." In Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp '15), 1293--1304, 2015.
"""
# if 'uid' column in not present in the TrajDataFrame
if constants.UID not in traj.columns:
return pd.DataFrame([_max_distance_from_home_individual(traj,
start_night=start_night,
end_night=end_night)], columns=[sys._getframe().f_code.co_name])
if show_progress:
df = traj.groupby(constants.UID).progress_apply(lambda x: _max_distance_from_home_individual(x, start_night=start_night, end_night=end_night))
else:
df = traj.groupby(constants.UID).apply(lambda x: _max_distance_from_home_individual(x, start_night=start_night, end_night=end_night))
return pd.DataFrame(df).reset_index().rename(columns={0: sys._getframe().f_code.co_name})
| 12,874
|
def combine_histogram(old_hist, arr):
""" Collect layer histogram for arr and combine it with old histogram.
"""
new_max = np.max(arr)
new_min = np.min(arr)
new_th = max(abs(new_min), abs(new_max))
(old_hist, old_hist_edges, old_min, old_max, old_th) = old_hist
if new_th <= old_th:
hist, _ = np.histogram(arr,
bins=len(old_hist),
range=(-old_th, old_th))
return (old_hist + hist, old_hist_edges, min(old_min, new_min),
max(old_max, new_max), old_th)
else:
old_num_bins = len(old_hist)
old_step = 2 * old_th / old_num_bins
half_increased_bins = int((new_th - old_th) // old_step + 1)
new_num_bins = half_increased_bins * 2 + old_num_bins
new_th = half_increased_bins * old_step + old_th
hist, hist_edges = np.histogram(arr,
bins=new_num_bins,
range=(-new_th, new_th))
hist[half_increased_bins:new_num_bins -
half_increased_bins] += old_hist
return (hist, hist_edges, min(old_min, new_min), max(old_max,
new_max), new_th)
| 12,875
|
def main():
"""Make a jazz noise here"""
args = get_args()
annotations = args.annotations
outfile = args.outfile
input = args.csv
if not os.path.isfile(input):
die('"{}" is not a file'.format(input))
if not os.path.isfile(annotations):
die('"{}" is not a file'.format(annotations))
with open(input) as txt:
rows = ( line.split('\t') for line in txt )
hits = {row[1]:row[2] for row in rows}
tree = {}
with open(annotations) as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
for rows in reader:
if rows['species'] == '':
rows['species'] = 'NA'
if rows['genus'] == '':
rows['genus'] = 'NA'
tree[rows['centroid']] = rows['genus'], rows['species']
# match two dictionary
out_fh = open(outfile, 'wt') if outfile else sys.stdout
name = ("seq_id", "pident", "genus", "species")
out_fh.write('\t'.join(name) + '\n')
for key in hits.keys():
if key in tree.keys():
out_fh.write(key + '\t' + hits[key] + '\t' + '\t'.join(tree[key]) + '\n')
else:
sys.stderr.write('Cannot find seq "{}" in lookup\n'.format(key))
| 12,876
|
def test_update_secondary_ids(client):
"""Function to test the update_secondary_ids function"""
customer = Customer(client, number='+254711892648')
data = [
{"key": "passport", "value": "808083", "expires_at": 300000000},
{
"key": "huduma",
"value": "808082",
"expires_at": 500000000,
},
]
response = loop.run_until_complete(customer.update_secondary_ids(data))
assert all(elem in response for elem in ("customer_id", "status", "description"))
response = loop.run_until_complete(customer.get_state())
assert list(value for elem, value in response["identity_state"]['secondary_ids'] if value in ("passport", "huduma"))
| 12,877
|
async def repo_is_here(wannasee):
""" For .repo command, just returns the repo URL. """
await wannasee.edit("[Repo](https://github.com/tesbot07/ironbot) Lihat ke GitHub.")
| 12,878
|
def unmunchify(x):
""" Recursively converts a Munch into a dictionary.
>>> b = Munch(foo=Munch(lol=True), hello=42, ponies='are pretty!')
>>> sorted(unmunchify(b).items())
[('foo', {'lol': True}), ('hello', 42), ('ponies', 'are pretty!')]
unmunchify will handle intermediary dicts, lists and tuples (as well as
their subclasses), but ymmv on custom datatypes.
>>> b = Munch(foo=['bar', Munch(lol=True)], hello=42,
... ponies=('are pretty!', Munch(lies='are trouble!')))
>>> sorted(unmunchify(b).items()) #doctest: +NORMALIZE_WHITESPACE
[('foo', ['bar', {'lol': True}]), ('hello', 42), ('ponies', ('are pretty!', {'lies': 'are trouble!'}))]
nb. As dicts are not hashable, they cannot be nested in sets/frozensets.
"""
# Munchify x, using `seen` to track object cycles
seen = dict()
def unmunchify_cycles(obj):
# If we've already begun unmunchifying obj, just return the already-created unmunchified obj
try:
return seen[id(obj)]
except KeyError:
pass
# Otherwise, first partly unmunchify obj (but without descending into any lists or dicts) and save that
seen[id(obj)] = partial = pre_unmunchify(obj)
# Then finish unmunchifying lists and dicts inside obj (reusing unmunchified obj if cycles are encountered)
return post_unmunchify(partial, obj)
def pre_unmunchify(obj):
# Here we return a skeleton of unmunchified obj, which is enough to save for later (in case
# we need to break cycles) but it needs to filled out in post_unmunchify
if isinstance(obj, Mapping):
return dict()
elif isinstance(obj, list):
return type(obj)()
elif isinstance(obj, tuple):
type_factory = getattr(obj, "_make", type(obj))
return type_factory(unmunchify_cycles(item) for item in obj)
else:
return obj
def post_unmunchify(partial, obj):
# Here we finish unmunchifying the parts of obj that were deferred by pre_unmunchify because they
# might be involved in a cycle
if isinstance(obj, Mapping):
partial.update((k, unmunchify_cycles(obj[k])) for k in iterkeys(obj))
elif isinstance(obj, list):
partial.extend(unmunchify_cycles(v) for v in obj)
elif isinstance(obj, tuple):
for (value_partial, value) in zip(partial, obj):
post_unmunchify(value_partial, value)
return partial
return unmunchify_cycles(x)
| 12,879
|
def split_data(mapping, encoded_sequence):
""" Function to split the prepared data in train and test
Args:
mapping (dict): dictionary mapping of all unique input charcters to integers
encoded_sequence (list): number encoded charachter sequences
Returns:
numpy array : train and test split numpy arrays
"""
encoded_sequence_ = np.array(encoded_sequence)
X, y = encoded_sequence_[:, :-1], encoded_sequence_[:, -1]
y = to_categorical(y, num_classes=len(mapping))
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.1, random_state=42)
return X_train, X_test, y_train, y_test
| 12,880
|
def plot_spectral_hist(freqs, power_bins, spectral_hist, spectrum_freqs=None,
spectrum=None, ax=None, **kwargs):
"""Plot spectral histogram.
Parameters
----------
freqs : 1d array
Frequencies over which the histogram is calculated.
power_bins : 1d array
Power bins within which histogram is aggregated.
spectral_hist : 2d array
Spectral histogram to be plotted.
spectrum_freqs : 1d array, optional
Frequency axis of the power spectrum to be plotted.
spectrum : 1d array, optional
Spectrum to be plotted over the histograms.
ax : matplotlib.Axes, optional
Figure axes upon which to plot.
**kwargs
Keyword arguments for customizing the plot.
Examples
--------
Plot a spectral histogram:
>>> from neurodsp.sim import sim_combined
>>> from neurodsp.spectral import compute_spectral_hist
>>> sig = sim_combined(n_seconds=100, fs=500,
... components={'sim_synaptic_current': {},
... 'sim_bursty_oscillation' : {'freq': 10}},
... component_variances=(0.5, 1))
>>> freqs, bins, spect_hist = compute_spectral_hist(sig, fs=500, nbins=40, f_range=(1, 75),
... cut_pct=(0.1, 99.9))
>>> plot_spectral_hist(freqs, bins, spect_hist)
"""
# Get axis, by default scaling figure height based on number of bins
figsize = (8, 12 * len(power_bins) / len(freqs))
ax = check_ax(ax, figsize)
# Plot histogram intensity as image and automatically adjust aspect ratio
im = ax.imshow(spectral_hist, extent=[freqs[0], freqs[-1], power_bins[0], power_bins[-1]],
aspect='auto')
plt.colorbar(im, label='Probability')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Log10 Power')
# If a power spectrum is provided, plot over the histogram data
if spectrum is not None:
plt_inds = np.logical_and(spectrum_freqs >= freqs[0], spectrum_freqs <= freqs[-1])
ax.plot(spectrum_freqs[plt_inds], np.log10(spectrum[plt_inds]), color='w', alpha=0.8)
| 12,881
|
def plot_data(data, savefile):
"""Creates a phase diagram plot from the data by using a "for" loop to plot each
column within the dataframe with a series of predefined colors for each column
and saves the figure as a jpg."""
phase_diagram_fig = plt.figure()
plot1 = phase_diagram_fig.add_subplot()
colorlist = ["blue", "purple", "green", "yellow", "red"]
counter = 0
for column in data.columns[1:6]:
plot1.plot(data.loc[:, "Pressure"], data.loc[:, column], "-",
color=colorlist[counter], markeredgecolor="black",
markersize=9, label=str(column))
counter = counter+1
plot1.set_xlabel("Pressure (GPa)")
plot1.set_ylabel("Temperature °C")
plot1.set_title("Lunar Basalt 12009 MAGPOX Phase Diagram")
plot1.set_ylim(1000, 1500)
plot1.legend()
phase_diagram_fig.savefig(savefile)
plt.show()
| 12,882
|
def fsp_loss(teacher_var1_name,
teacher_var2_name,
student_var1_name,
student_var2_name,
program=None):
"""Combine variables from student model and teacher model by fsp-loss.
Args:
teacher_var1_name(str): The name of teacher_var1.
teacher_var2_name(str): The name of teacher_var2. Except for the
second dimension, all other dimensions should
be consistent with teacher_var1.
student_var1_name(str): The name of student_var1.
student_var2_name(str): The name of student_var2. Except for the
second dimension, all other dimensions should
be consistent with student_var1.
program(Program): The input distiller program. If not specified,
the default program will be used. Default: None
Returns:
Variable: fsp distiller loss.
"""
if program == None:
program = paddle.static.default_main_program()
teacher_var1 = program.global_block().var(teacher_var1_name)
teacher_var2 = program.global_block().var(teacher_var2_name)
student_var1 = program.global_block().var(student_var1_name)
student_var2 = program.global_block().var(student_var2_name)
teacher_fsp_matrix = paddle.fluid.layers.fsp_matrix(teacher_var1,
teacher_var2)
student_fsp_matrix = paddle.fluid.layers.fsp_matrix(student_var1,
student_var2)
fsp_loss = paddle.mean(
paddle.nn.functional.square_error_cost(student_fsp_matrix,
teacher_fsp_matrix))
return fsp_loss
| 12,883
|
def diff_numpy_array(A, B):
"""
Numpy Array A - B
return items in A that are not in B
By Divakar
https://stackoverflow.com/a/52417967/1497443
"""
return A[~np.in1d(A, B)]
| 12,884
|
def load_training_data(training_fns, trunc_min_scores,
trunc_max_scores, debug=False):
""" First parse group, read and position to find shared data points
Then read in training scores, truncating as appropriate """
# Parse file twice. First time get all the loci, second time all the value data
training_ids = [parse_training_loci(t_ids) for t_ids in training_fns]
shared_ids = set.intersection(*training_ids)
id_list = sorted(shared_ids)
if debug:
print('Number of shared ids in training sets:', len(shared_ids), file=sys.stderr)
# value_array contents
# 0 truth
# 1 strand
# 2:len(train)+2, tool score x train
# 2+len(train):2+len(train)*2, tool prediction x train
# 2+2*len(train) or -2, numeric order
# 3+2*len(train) or -1, model predicted score
groups = len(training_fns)
value_array = np.zeros((len(shared_ids),4+(2*groups)), dtype=np.single)
value_array[:,-2] = np.arange(0,len(shared_ids), dtype=np.single)
for index, (training_fn, t_min, t_max) in \
enumerate(zip(training_fns, trunc_min_scores, trunc_max_scores)):
# Read in values
contents = parse_training_values(training_fn, shared_ids, t_min, t_max, debug=debug)
for i, id in enumerate(id_list):
strand, label, predicted, score = contents[id]
if index == 0:
value_array[i,0] = label
value_array[i,1] = strand
value_array[i,2+index] = score
value_array[i,2+groups+index] = predicted
return value_array, id_list
| 12,885
|
def _find_data_between_ranges(data, ranges, top_k):
"""Finds the rows of the data that fall between each range.
Args:
data (pd.Series): The predicted probability values for the postive class.
ranges (list): The threshold ranges defining the bins. Should include 0 and 1 as the first and last value.
top_k (int): The number of row indices per bin to include as samples.
Returns:
list(list): Each list corresponds to the row indices that fall in the range provided.
"""
results = []
for i in range(1, len(ranges)):
mask = data[(data >= ranges[i - 1]) & (data < ranges[i])]
if top_k != -1:
results.append(mask.index.tolist()[: min(len(mask), top_k)])
else:
results.append(mask.index.tolist())
return results
| 12,886
|
def validar_entero_n():
"""
"""
try:
n = int(input('n= ')) #si es un float también funciona el programa
except:
print ('Número no válido')
return False
else:
return n
| 12,887
|
def Conv_Cifar10_32x64x64():
"""A 3 hidden layer convnet designed for 32x32 cifar10."""
base_model_fn = _cross_entropy_pool_loss([32, 64, 64],
jax.nn.relu,
num_classes=10)
datasets = image.cifar10_datasets(batch_size=128)
return _ConvTask(base_model_fn, datasets)
| 12,888
|
def rotate(angle_list: List, delta: float) -> List:
"""Rotates a list of angles (wraps around at 2 pi)
Args:
angle_list (List): list of angles in pi radians
delta (float): amount to change in pi radians
Returns:
List: new angle list in pi radians
"""
new_angle_list = []
for angle in angle_list:
new_angle = angle + delta
if new_angle >= 2.0:
new_angle -= 2.0
new_angle_list.append(new_angle)
new_angle_list.sort()
return new_angle_list
| 12,889
|
def main():
"""
Parses command line arguments and invokes the appropriate method to respond to them
Returns
-------
None
"""
parser = argparse.ArgumentParser(
prog='harmony-gdal', description='Run the GDAL service'
)
harmony.setup_cli(parser)
args = parser.parse_args()
if (harmony.is_harmony_cli(args)):
harmony.run_cli(parser, args, HarmonyAdapter)
else:
parser.error("Only --harmony CLIs are supported")
| 12,890
|
def global_average_pooling_3d(tensor: TorchTensorNCX) -> TorchTensorNCX:
"""
3D Global average pooling.
Calculate the average value per sample per channel of a tensor.
Args:
tensor: tensor with shape NCDHW
Returns:
a tensor of shape NC
"""
assert len(tensor.shape) == 5, 'must be a NCDHW tensor!'
return F.avg_pool3d(tensor, tensor.shape[2:]).squeeze(2).squeeze(2).squeeze(2)
| 12,891
|
def verbose_create():
"""
Initiate detailed post creation process
"""
create(get_input())
| 12,892
|
def get_LAB_L_SVD_s(image):
"""Returns s (Singular values) SVD from L of LAB Image information
Args:
image: PIL Image or Numpy array
Returns:
vector of singular values
Example:
>>> from PIL import Image
>>> from ipfml.processing import transform
>>> img = Image.open('./images/test_img.png')
>>> s = transform.get_LAB_L_SVD_s(img)
>>> len(s)
200
"""
L = get_LAB_L(image)
return compression.get_SVD_s(L)
| 12,893
|
def print_metrics(y_t, y_pred_t, mode=''):
"""
Print metrics of various kind
Parameters
----------
y_t :
y_pred_t :
mode : string
"""
print('Model performance on the {} dataset:'.format(mode))
# mse = mean_squared_error(y_t, y_pred_t)
# logloss = log_loss(y_t, y_pred_t)
accuracy = accuracy_score(y_t, y_pred_t)
f1 = f1_score(y_t, y_pred_t)
precision_micro = precision_score(y_t, y_pred_t, average='micro')
precision_macro = precision_score(y_t, y_pred_t, average='macro')
avg_precision = average_precision_score(y_t, y_pred_t)
precision = precision_score(y_t, y_pred_t)
recall = recall_score(y_t, y_pred_t, average='binary')
auc = roc_auc_score(y_t, y_pred_t)
r2 = r2_score(y_t, y_pred_t)
print(' Metric {}'.format(mode.title()))
print('accuracy........... {0:8.4f}'.format(accuracy))
print('recall............. {0:8.4f}'.format(recall))
print('auc................ {0:8.4f}'.format(auc))
print('precision (p=0.5).. {0:8.4f}'.format(precision))
print('precision (avg).... {0:8.4f}'.format(avg_precision))
print('precision (micro).. {0:8.4f}'.format(precision_micro))
print('precision (macro).. {0:8.4f}'.format(precision_macro))
print('f1................. {0:8.4f}'.format(f1))
print('r2................. {0:8.4f}'.format(r2))
# print('logloss............ {0:8.4f}'.format(logloss))
# print('mse................ {0:8.4f}'.format(mse))
| 12,894
|
def path_inclusion_filter_fn(path, param, layer):
"""Returns whether or not layer name is contained in path."""
return layer in path
| 12,895
|
def _parse_feature(line: PipelineRecord) -> Tuple[str, Coordinates, Feature]:
""" Creates a Feature from a line of output from a CSVReader """
contig = line[0]
coordinates = parse_coordinates(line[1])
feature = line[2]
# Piler-cr and BLAST both use 1-based indices, but Opfi uses 0-based indices.
# To make both coordinate systems consistent, we subtract 1 from the start
# since feature coordinates come directly from those tools.
# If features are on the reverse strand, the second coordinate will be larger
# than the first, but operon_analyzer assumes the start is always less than the
# end
first_coord, second_coord = parse_coordinates(line[3])
feature_start = min(first_coord, second_coord) - 1
feature_end = max(first_coord, second_coord)
query_orfid = line[4]
strand = int(line[5]) if line[5] else (1 if feature_start < feature_end else -1)
hit_accession = line[6]
hit_eval = float(line[7]) if line[7] else None
description = line[8]
sequence = line[9]
if len(line) > 10:
bit_score = float(line[10]) if line[10] != '' else None
raw_score = int(line[11]) if line[11] != '' else None
aln_len = int(line[12]) if line[12] != '' else None
pident = float(line[13]) if line[13] != '' else None
nident = int(line[14]) if line[14] != '' else None
mismatch = int(line[15]) if line[15] != '' else None
positive = int(line[16]) if line[16] != '' else None
gapopen = int(line[17]) if line[17] != '' else None
gaps = int(line[18]) if line[18] != '' else None
ppos = float(line[19]) if line[19] != '' else None
qcovhsp = int(line[20]) if line[20] != '' else None
contig_filename = line[21] if line[21] else ''
else:
bit_score = None
raw_score = None
aln_len = None
pident = None
nident = None
mismatch = None
positive = None
gapopen = None
gaps = None
ppos = None
qcovhsp = None
contig_filename = None
return contig, contig_filename, coordinates, Feature(
feature,
(feature_start, feature_end),
query_orfid,
strand,
hit_accession,
hit_eval,
description,
sequence,
bit_score,
raw_score,
aln_len,
pident,
nident,
mismatch,
positive,
gapopen,
gaps,
ppos,
qcovhsp)
| 12,896
|
def average_summary_df_tasks(df, avg_columns):
""" Create averages of the summary df across tasks."""
new_df = []
# Columns to have after averaging
keep_cols = ["dataset", "method_name", "trial_number"]
subsetted = df.groupby(keep_cols)
for subset_indices, subset_df in subsetted:
return_dict = {}
return_dict.update(dict(zip(keep_cols, subset_indices)))
for column in avg_columns:
task_values = subset_df[column].values
min_length = min([len(i) for i in task_values])
new_task_values = []
for j in task_values:
j = np.array(j)
if len(j) > min_length:
percentiles = np.linspace(0, len(j) - 1, min_length).astype(int)
new_task_values.append(j[percentiles])
else:
new_task_values.append(j)
avg_task = np.mean(np.array(new_task_values), axis=0).tolist()
return_dict[column] = avg_task
new_df.append(return_dict)
return pd.DataFrame(new_df)
| 12,897
|
def show_result(img, result, class_names, score_thr=0.3, out_file=None):
"""Visualize the detection results on the image.
Args:
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
class_names (list[str] or tuple[str]): A list of class names.
score_thr (float): The threshold to visualize the bboxes and masks.
out_file (str, optional): If specified, the visualization result will
be written to the out file instead of shown in a window.
"""
assert isinstance(class_names, (tuple, list))
img = mmcv.imread(img)
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
# draw segmentation masks
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
# draw bounding boxes
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
mmcv.imshow_det_bboxes(
img.copy(),
bboxes,
labels,
class_names=class_names,
score_thr=score_thr,
show=out_file is None,
out_file=out_file)
| 12,898
|
def ml_variance(values, mean):
"""
Given a list of values assumed to come from a normal distribution and
their maximum likelihood estimate of the mean, compute the maximum
likelihood estimate of the distribution's variance of those values.
There are many libraries that do something like this, but they
likely don't do exactly what you want, so you should not use them
directly. (And to be clear, you're not allowed to use them.)
"""
# Your code here
return 1.0
| 12,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.