content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def test_gcsfetchstrategy_without_url(_fetch_method):
"""Ensure constructor with no URL fails."""
with spack.config.override('config:url_fetch_method', _fetch_method):
with pytest.raises(ValueError):
spack.fetch_strategy.GCSFetchStrategy(None)
| 12,300
|
def combine_dataframes(dfs: [pd.DataFrame]) -> pd.DataFrame:
"""
Receives a list of DataFrames and concatenates them. They must all have the same header.
:param dfs: List of DataFrames
:return: Single concatenated DataFrame
"""
df = pd.concat(dfs, sort=False)
return df
| 12,301
|
def reduce_company_info_file(
ticker_symbols_file, company_info_input_file, company_info_output_file):
"""Reduce the company info file."""
if not ticker_symbols_file:
raise click.BadParameter(
'--ticker-symbols-file option is required')
if not company_info_input_file:
raise click.BadParameter(
'--company-info-input-file option is required')
if not company_info_output_file:
raise click.BadParameter(
'--company-info-output-file option is required')
ticker_symbols_raw = ticker_symbols_file.readlines()
ticker_symbols = {l.replace('\n', '') for l in ticker_symbols_raw}
click.echo('{0} ticker symbols to process'.format(len(ticker_symbols)))
ind_sectors_by_title = {}
company_info_input_csv = csv.DictReader(
company_info_input_file, encoding='utf-8-sig')
company_info_output_file.write(
'ticker_symbol,title,sector\n')
for line in company_info_input_csv:
ticker_symbol = line['ticker_symbol'].strip()
company_title = line['title'].strip()
ind_sector_title = line['sector'].strip()
if ticker_symbol in ticker_symbols:
company_info_output_file.write('{0},{1},{2}\n'.format(
ticker_symbol, company_title, ind_sector_title))
click.echo('Done!')
| 12,302
|
def df_to_hdf5(df, key, dir_path):
"""
Save the DataFrame object as an HDF5 file. The file is stored
in the directory specified and uses the key for the filename
and 'h5' as the extension.
:param df: DataFrame to save as a file
:param key: ID for storage and retrieval
:param dir_path: Directory to store the HDF5 data file
"""
file_path = os.path.join(dir_path, key + '.h5')
df.to_hdf(file_path, key, complevel=9, complib='zlib')
return file_path
| 12,303
|
def _switch_default_role(db, obj, admin):
"""Switch between default user/service and admin roles for users/services"""
user_role = orm.Role.find(db, 'user')
admin_role = orm.Role.find(db, 'admin')
def add_and_remove(db, obj, current_role, new_role):
if current_role in obj.roles:
strip_role(db, entity=obj, rolename=current_role.name)
# only add new default role if the user has no other roles
if len(obj.roles) < 1:
grant_role(db, entity=obj, rolename=new_role.name)
if admin:
add_and_remove(db, obj, user_role, admin_role)
else:
add_and_remove(db, obj, admin_role, user_role)
| 12,304
|
def _bound_accumulated_rotations(robot_name, command_dicts):
"""
Checks axes whose rotations have been accumulated to ensure they've not
exceeded the stated limits. If they have, this function attempts to slide
the commands by +/- 360 degrees. If the limits are still exceeded, this
function returns the commands that exceed the limits by the least amount
:param robot_name:
:param animation_settings:
:param command_dicts:
:return:
"""
# TODO: Do this check using userOptions instead...
# Get axes, if they exist
command_axes = []
for command_dict in command_dicts:
axes = command_dict[postproc.AXES] if postproc.AXES in command_dict else None
command_axes.append(list(axes))
reconcile_axes = mimic_utils.get_reconcile_axes(robot_name)
rotation_limits = mimic_utils.get_all_limits(robot_name)['Position']
# Make sure the user has selected use of axes
if not all(x is None for x in command_axes):
for i, reconcile_axis in enumerate(reconcile_axes):
if reconcile_axis:
valid_solutions = []
axis_number = i + 1 # Axis numbers are 1-indexed
axis_name = 'Axis {}'.format(axis_number)
# Get the axis limits
limit_min = rotation_limits[axis_name]['Min Limit']
limit_max = rotation_limits[axis_name]['Max Limit']
# Create a list of commands for the axis to be checked
axis_vals_init = [axis[i] for axis in command_axes]
axis_min = min(axis_vals_init)
axis_max = max(axis_vals_init)
'''
print "#######################################################"
print "Initial Axis {} vals: ".format(i+1), axis_vals_init
print "Axis Min Limit: ", limit_min
print "Axis Max Limit: ", limit_max
print "Axis Min: ", axis_min
print "Axis Max: ", axis_max
'''
# If both the max and min axis values exceed their respective
# limits, then there's nothing we can do about it, so we don't
# modify the commands
if axis_min < limit_min and axis_max > limit_max:
# print '## Both limits exceeded, but no shift'
continue
# If no limits are violated, add the axes to the list of valid solutions
if axis_min >= limit_min and axis_max <= limit_max:
valid_solutions.append(axis_vals_init)
# Get the shifted axes and append them to the valid_solutions if they're valide (i.e. not 'None')
axis_vals_shift = _shift_accumulated_axes(axis_vals_init, limit_max, limit_min)
if axis_vals_shift:
valid_solutions.append(axis_vals_shift)
# If we have no valid solitions, continue with the initial solition
if len(valid_solutions) == 0:
# print 'No valid solutions, returning initial solutions'
sol = axis_vals_init
# If we only have one valid solution, we can return that solution
elif len(valid_solutions) == 1:
# print 'Only one valid solution'
sol = valid_solutions[0]
# If we have two valid solutions, prompt the user to pick which one they want
# if they have the option checked on the program UI, otherwise, return the
# first solution
else:
# print 'Two valid solutions -> user choice'
prompt_opt = pm.checkBox('cb_promptOnRedundantSolutions', value=True, query=True)
# If the user option for this feature is selected, prompt the user
if prompt_opt:
user_selection = _get_bounded_solution_user_input(valid_solutions, axis_number)
sol = valid_solutions[user_selection]
# Otherwise, continue with the initial solution
else:
sol = axis_vals_init
# Drop the final solution back into the command_dict
for command_index in range(len(command_dicts)):
command_axes[command_index][i] = sol[command_index]
reconciled_axes = postproc.Axes(*command_axes[command_index])
command_dicts[command_index][postproc.AXES] = reconciled_axes
return command_dicts
| 12,305
|
def train_genomes(genomes, dataset):
"""Train each network.
Args:
networks (list): Current population of networks
dataset (str): Dataset to use for training/evaluating
"""
pbar = tqdm(total=len(genomes))
for genome in genomes:
genome.train(dataset)
genome.print_genome()
pbar.update(1)
pbar.close()
# Sort our final population.
genomes = sorted(genomes, key=lambda x: x.accuracy, reverse=True)
# Print out the top 5 networks.
print_genomes(genomes[:5])
| 12,306
|
def get_email_manager(config: CFG, session: Session):
"""
:return: EmailManager instance
"""
# TODO: Find a way to import properly without cyclic import
smtp_config = SmtpConfiguration(
config.EMAIL__NOTIFICATION__SMTP__SERVER,
config.EMAIL__NOTIFICATION__SMTP__PORT,
config.EMAIL__NOTIFICATION__SMTP__USER,
config.EMAIL__NOTIFICATION__SMTP__PASSWORD,
)
return EmailManager(config=config, smtp_config=smtp_config, session=session)
| 12,307
|
def test_list_g_month_enumeration_3_nistxml_sv_iv_list_g_month_enumeration_4_3(mode, save_output, output_format):
"""
Type list/gMonth is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/gMonth/Schema+Instance/NISTSchema-SV-IV-list-gMonth-enumeration-4.xsd",
instance="nistData/list/gMonth/Schema+Instance/NISTXML-SV-IV-list-gMonth-enumeration-4-3.xml",
class_name="NistschemaSvIvListGMonthEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 12,308
|
def _approx_sp(salt,pres):
"""Approximate TDl at SP.
Approximate the temperature and liquid water density of sea-ice with
the given salinity and pressure.
:arg float salt: Salinity in kg/kg.
:arg float pres: Pressure in Pa.
:returns: Temperature and liquid water density (both in SI units).
"""
CDIF = _CLIQ-_CICE
R0 = _LILTP/_TTP / CDIF
r1 = (pres-_PTPE) * (_VITP-_VLTP)/_TTP / CDIF
r2 = _RSAL*salt / CDIF
w = -(1 - R0 + r1) * numpy.exp(-(1 - R0 - r2))
negz = 1 - (1 + _E*w)**_C_SP
temp = (1 - R0 + r1)*_TTP/negz
dliq = _dliq_default(temp,pres)
return temp, dliq
| 12,309
|
def test_close_with_heartbeatTimer(caplog):
"""Test close() with heartbeatTimer"""
iface = MeshInterface(noProto=True)
anode = Node('foo', 'bar')
radioConfig = RadioConfig()
radioConfig.preferences.phone_timeout_secs = 10
anode.radioConfig = radioConfig
iface.localNode = anode
assert iface.heartbeatTimer is None
with caplog.at_level(logging.DEBUG):
iface._startHeartbeat()
assert iface.heartbeatTimer is not None
iface.close()
| 12,310
|
def recharge_polybar():
"""Command to restart polybar"""
os.system("polybar-msg hook pomobar 1 &> /dev/null")
| 12,311
|
def test_get_4x4_determinant_method():
"""tests get 4x4 determinant method"""
matrix = Matrix([
[4, 5, 2, -1],
[5, 8, 7, 6],
[3, 7, -4, -2],
[-1, 6, -2, 5]
])
det = matrix.get_determinant()
assert det == 378
| 12,312
|
def test_can_parse_alias_file():
"""
Make sure aliases.json file can be parsed.
This is to make sure an edit doesn't accidentally corrupt it.
"""
# We'll have to hardcode this.
alias_file_path = os.path.join(
config.DEFAULT_EXAMPLES_DIR,
util.ALIAS_FILE_NAME
)
alias_file_contents = util._get_contents_of_file(alias_file_path)
alias_dict = json.loads(alias_file_contents)
# We'll check that link goes to ln, as we know that one will be present.
assert_equal(alias_dict['link'], 'ln')
| 12,313
|
def test():
"""A little test suite."""
import io
testtext = 'Netstrings rule'
inf = io.StringIO()
outf = io.StringIO()
print("Writing a Netstring ... ", end=' ')
f = NetStringIO(outf)
f.write(testtext)
print(outf.getvalue(), end=' ')
inf = io.StringIO(outf.getvalue())
f.close()
print("Reading this Netstring ... ", end=' ')
fz = NetStringIO(inf)
ret = fz.read()
assert ret == testtext, "String is different after reading"
print(ret)
fz.close()
| 12,314
|
def init_config_dirs():
"""Creates .tiflash base folder as well as any configuration subfolders
inside of it.
Note:
Typically only used by setup.py when pip installing package
"""
base = get_base_dir()
custom = get_custom_dir()
if not os.path.exists(base):
os.mkdir(base)
if not os.path.exists(custom):
os.mkdir(custom)
| 12,315
|
def createToolBar():
"""Creates the toolbar containing the action to open the Settings Dialog
"""
Data.toolbar = sbsui.add_toolbar("Megscan Link", "megascanlink")
qicon = QtGui.QIcon()
qicon.addPixmap(icon.getIconAsQPixmap("megascan_logo_idle.png"))
qicon.addPixmap(icon.getIconAsQPixmap("megascan_logo.png"), QtGui.QIcon.Active)
action = Data.toolbar.addAction(qicon, None)
action.triggered.connect(openSettingsDialog)
| 12,316
|
def _get_connection_params(resource):
"""Extract connection and params from `resource`."""
args = resource.split(";")
if len(args) > 1:
return args[0], args[1:]
else:
return args[0], []
| 12,317
|
def download_archive(url, out_path):
"""Downloads a file from the specified URL to the specified path on disk."""
return subprocess.call(['curl', url, '-o', out_path])
| 12,318
|
def listdata(
resp: Union[requests.Response, List[Dict[str, Any]]],
*keys: Union[str, Callable[[], bool]],
sort: Union[bool, str] = True,
full: bool = False, # returns dicts instead of tuples
) -> List[tuple]:
"""Return data from a given requests.Response object.
Only non reserved fields are returned.
By default data are converted to List[Tuple[Any]], but if `full` is True,
then List[Dict[str, Any] is returned.
Usage:
>>> data = [
... {'_id': 1, 'odd': 1, 'even': 2},
... {'_id': 2, 'odd': 3, 'even': 4},
... {'_id': 3, 'odd': 5, 'even': 6},
... }
>>> listdata(data)
[
(1, 2),
(3, 4),
(5, 6),
]
>>> listdata(data, 'even')
[2, 4, 6]
>>> listdata(data, 'odd', 'even')
[
(1, 2),
(3, 4),
(5, 6),
]
>>> listdata(data, full=True)
data = [
{'odd': 1, 'even': 2},
{'odd': 3, 'even': 4},
{'odd': 5, 'even': 6},
}
"""
# Prepare data
if isinstance(resp, list):
data = resp
elif resp.headers['content-type'].startswith('text/html'):
data = resp.context
assert resp.status_code == 200, pformat(data)
assert 'data' in data, pformat(data)
assert 'header' in data, pformat(data)
header = data['header']
keys = keys or [k for k in header if not k.startswith('_')]
data = [
{k: v.value for k, v in zip(header, row)}
for row in cast(List[List[Cell]], data['data'])
]
else:
data = resp.json()
assert resp.status_code == 200, pformat(data)
assert '_data' in data, pformat(data)
data = data['_data']
keys = keys or sorted({
k
for d in flatten(data)
for k in d
if not k.startswith('_')
})
# Clean data
if full:
data = [take(keys, row) for row in data]
elif len(keys) == 1:
k = keys[0]
data = [take(k, row) for row in data]
else:
data = [tuple(take(k, row) for k in keys) for row in data]
# Sort
if sort is True:
data = sorted(data, key=str)
elif sort:
if full:
sort_key = operator.itemgetter(sort)
else:
sort_key = operator.itemgetter(keys.index(sort))
data = sorted(data, key=sort_key)
return data
| 12,319
|
def create_chart_data(start_path, excluded_dashboards=excluded_dashboards):
"""Read chart names and SQL code from the repository.
Args:
start_path (str): "./dashboards"
excluded_dashboards (list): list of dashboards to exclude from testing (e.g. WIP, Untitled, etc)
Returns:
chart_results (array of dict): contains name, dashboard owner and SQL code for each chart
"""
chart_results = []
print("Processing the charts data...\n")
for path, _, files in os.walk(start_path):
for filename in files:
if ('sql' in filename) and ('text' not in filename) and all(dashboard not in path for dashboard in excluded_dashboards):
chart_dict = {}
path_sql = os.path.join(path, filename)
dashboard_with_id = path_sql.split('/')[2]
chart_with_id = path_sql.split('/')[3]
chart_dict["NAME"] = 'chart_' + dashboard_with_id.split('.')[0] + '_' + chart_with_id.replace(".", "_")
try:
with open(path_sql) as f:
chart_dict['SQL_CODE'] = f.read()
except Exception as e:
print(e)
path_chart_yaml = os.path.join(path, filename.replace(".sql", ".yaml"))
try:
with open(path_chart_yaml) as f:
parsed_yaml_file = yaml.load(f, Loader=yaml.FullLoader)
chart_name = parsed_yaml_file['display_name']
except Exception as e:
print(e)
path_dashboard_yaml = os.path.join(start_path, dashboard_with_id, dashboard_with_id.split('.')[0] + '.yaml')
try:
with open(path_dashboard_yaml) as f:
parsed_yaml_file = yaml.load(f, Loader=yaml.FullLoader)
dashboard_name = parsed_yaml_file['display_name']
chart_dict["OWNER"] = parsed_yaml_file['dashboard_preferences']['settings']['owner'] or "No Owner"
chart_dict["BI_NAME"] = (dashboard_name + ": " + chart_name) or "No Name"
except Exception as e:
chart_dict["OWNER"] = "No Owner"
chart_dict["BI_NAME"] = "No Name"
print(e)
chart_results.append(chart_dict)
return chart_results
| 12,320
|
def get_current_pkg():
"""
Returns:
パッケージ名 (str): 常に大文字表記で返ってくる
"""
return eval_foreign_vm_copy("(send *package* :name)")
| 12,321
|
def _normalise_trigger(value: float) -> float:
"""
Helper function used to normalise the controller trigger values into a common range.
:param value: Value to be normalised
:raises: ValueError
:return: Normalised value
"""
return _normalise(value, _HARDWARE_TRIGGER_MIN, _HARDWARE_TRIGGER_MAX, _INTENDED_TRIGGER_MIN, _INTENDED_TRIGGER_MAX)
| 12,322
|
def full_process(s):
"""Process string by
-- removing all but letters and numbers
-- trim whitespace
-- force to lower case"""
if s is None:
return ""
#Here we weill force a return of "" if it is of None, empty, or not valid
#Merged from validate_string
try:
s = unicode(s)
len(s) > 0
except TypeError:
return ""
# Keep only Letters and Numbers (see Unicode docs).
string_out = StringProcessor.replace_with_whitespace(s)
# Force into lowercase.
string_out = StringProcessor.to_lower_case(string_out)
# Remove leading and trailing whitespaces.
string_out = StringProcessor.strip(string_out)
return string_out
| 12,323
|
def ldexp(space, x, i):
"""ldexp(x, i) -> x * (2**i)
"""
return math2(space, math.ldexp, x, i)
| 12,324
|
def menu():
"""
Print a menu with all the functionalities.
Returns:
The choice of the user.
"""
print "=" * 33 + "\nMENU\n" + "=" * 33
descriptions = ["Load host from external file",
"Add a new host",
"Print selected hosts",
"Check active hosts",
"Select only active hosts",
"Select bots",
"Execute command locally",
"Execute command on bots",
"Run external script",
"Open shell in a host",
"Exit"]
for num, func in enumerate(descriptions):
print "[" + str(num) + "] " + func
choice = raw_input(">>> ")
return choice
| 12,325
|
async def objects_get(bucket: Optional[str] = None,
index: Index = Depends(Provide[Container.index]),
buckets: Buckets = Depends(Provide[Container.buckets])) -> List[Object]:
"""
searches for objects
"""
if not bucket:
return index.get_all()
buckets.validate_bucket(bucket)
return index.get_all(bucket)
| 12,326
|
def test_node_can_be_loaded_simple():
# type: () -> None
"""Test loading a single node with "simple" representation.
The "simple" representation should return the same structure that would
be created if the '!gryaml.node' tag were absent or the implicit type.
"""
gryaml.register_simple()
sample_yaml = """
!gryaml.node
- properties:
name: Babs_Jensen
- labels:
- person
"""
node_loaded = yaml.safe_load(sample_yaml)
node_data = yaml.load(sample_yaml.replace('!gryaml.node', ''))
assert node_data == node_loaded
node_data = yaml.load(sample_yaml.replace('!gryaml.node', '!!seq'))
assert node_data == node_loaded
| 12,327
|
def thumbnail_create(request, repo_id):
"""create thumbnail from repo file list
return thumbnail src
"""
content_type = 'application/json; charset=utf-8'
result = {}
repo = get_repo(repo_id)
if not repo:
err_msg = _(u"Library does not exist.")
return HttpResponse(json.dumps({"error": err_msg}), status=400,
content_type=content_type)
path = request.GET.get('path', None)
if not path:
err_msg = _(u"Invalid arguments.")
return HttpResponse(json.dumps({"error": err_msg}), status=400,
content_type=content_type)
if repo.encrypted or not ENABLE_THUMBNAIL or \
check_folder_permission(request, repo_id, path) is None:
err_msg = _(u"Permission denied.")
return HttpResponse(json.dumps({"error": err_msg}), status=403,
content_type=content_type)
size = request.GET.get('size', THUMBNAIL_DEFAULT_SIZE)
success, status_code = generate_thumbnail(request, repo_id, size, path)
if success:
src = get_thumbnail_src(repo_id, size, path)
result['encoded_thumbnail_src'] = urlquote(src)
return HttpResponse(json.dumps(result), content_type=content_type)
else:
err_msg = _('Failed to create thumbnail.')
return HttpResponse(json.dumps({'err_msg': err_msg}),
status=status_code, content_type=content_type)
| 12,328
|
def initialize_event_loop():
"""Attempt to use uvloop."""
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
return asyncio.get_event_loop()
| 12,329
|
def _setup_pgops(multi_actions=False,
normalise_entropy=False,
sequence_length=4,
batch_size=2,
num_mvn_actions=3,
num_discrete_actions=5):
"""Setup polices, actions, policy_vars and (optionally) entropy_scale_op."""
t = sequence_length
b = batch_size
a = num_mvn_actions
c = num_discrete_actions
# MVN actions
mu = tf.placeholder(tf.float32, shape=(t, b, a))
sigma = tf.placeholder(tf.float32, shape=(t, b, a))
mvn_policies = tfp.distributions.MultivariateNormalDiag(
loc=mu, scale_diag=sigma)
mvn_actions = tf.placeholder(tf.float32, shape=(t, b, a))
mvn_params = [mu, sigma]
if multi_actions:
# Create a list of n_cat Categorical distributions
n_cat = 2
cat_logits = [tf.placeholder(tf.float32, shape=(t, b, c))
for _ in xrange(n_cat)]
cat_policies = [tfp.distributions.Categorical(logits=logits)
for logits in cat_logits]
cat_actions = [tf.placeholder(tf.int32, shape=(t, b))
for _ in xrange(n_cat)]
cat_params = [[logits] for logits in cat_logits]
# Create an exponential distribution
exp_rate = tf.placeholder(tf.float32, shape=(t, b))
exp_policies = tfp.distributions.Exponential(rate=exp_rate)
exp_actions = tf.placeholder(tf.float32, shape=(t, b))
exp_params = [exp_rate]
# Nest all policies and nest corresponding actions and parameters
policies = [mvn_policies, cat_policies, exp_policies]
actions = [mvn_actions, cat_actions, exp_actions]
policy_vars = [mvn_params, cat_params, exp_params]
else:
# No nested policy structure
policies = mvn_policies
actions = mvn_actions
policy_vars = mvn_params
entropy_scale_op = None
if normalise_entropy:
# Scale op that divides by total action dims
def scale_op(policies):
policies = nest.flatten(policies)
num_dims = [tf.to_float(tf.reduce_prod(policy.event_shape_tensor()))
for policy in policies]
return 1. / tf.reduce_sum(tf.stack(num_dims))
entropy_scale_op = scale_op
return policies, actions, policy_vars, entropy_scale_op
| 12,330
|
async def definition_delete(hub, ctx, name, **kwargs):
"""
.. versionadded:: 1.0.0
Delete a policy definition.
:param name: The name of the policy definition to delete.
CLI Example:
.. code-block:: bash
azurerm.resource.policy.definition_delete testpolicy
"""
result = False
polconn = await hub.exec.azurerm.utils.get_client(ctx, "policy", **kwargs)
try:
# pylint: disable=unused-variable
policy = polconn.policy_definitions.delete(policy_definition_name=name)
result = True
except (CloudError, ErrorResponseException) as exc:
await hub.exec.azurerm.utils.log_cloud_error("resource", str(exc), **kwargs)
return result
| 12,331
|
def run(setup: Callable[[BaseScene], None] = None, *, log_level=logging.WARNING,
starting_scene=BaseScene, title="PursuedPyBear", **engine_opts):
"""
Run a small game.
The resolution will 800 pixels wide by 600 pixels tall.
setup is a callable that accepts a scene and returns None.
log_level let's you set the expected log level. Consider logging.DEBUG if
something is behaving oddly.
starting_scene let's you change the scene used by the engine.
"""
logging.basicConfig(level=log_level)
with make_engine(setup, starting_scene=starting_scene, title=title, **engine_opts) as eng:
eng.run()
| 12,332
|
def agent_environment(test_config, agent_env_settings_fields):
"""
Set essential environment variables for test function and unset the after.
"""
agent_settings = test_config.get("agent_settings", dict())
for name in agent_env_settings_fields:
value = compat.os_environ_unicode.get(name, agent_settings.get(name))
if not value:
raise NameError(
"'{0}' environment variable must be specified.".format(name)
)
os.environ[name] = value
yield
# NOTE: We don't need environment variables at the end since setup runs before
# each test which means environment would only be correctly set up for a single test
# In our case each tox invocation results in multiple tests since we use
# pytest_generate_tests functionality.
| 12,333
|
def test_bootstrap_random_seed():
"""Test that we can get reproducible resamples by seeding the RNG."""
data = rs.randn(50)
seed = 42
boots1 = algo.bootstrap(data, random_seed=seed)
boots2 = algo.bootstrap(data, random_seed=seed)
assert_array_equal(boots1, boots2)
| 12,334
|
def test_simplex_project():
"""Test simplex project"""
res = utils.simplex_project(np.array([0, 0, 0]))
assert np.allclose(res, [1 / 3] * 3), \
"projecting [0, 0, 0] didn't result in uniform"
res = utils.simplex_project(np.array([1.2, 1.4]))
assert np.allclose(res, [.4, .6]), \
"simplex project didn't return correct result"
res = utils.simplex_project(np.array([-0.1, 0.8]))
assert np.allclose(res, [0.05, 0.95]), \
"simplex project didn't return correct result"
| 12,335
|
def demo(numsamples=6, numoutcomes=500):
"""
A demonstration of frequency distributions and probability
distributions. This demonstration creates three frequency
distributions with, and uses them to sample a random process with
``numsamples`` samples. Each frequency distribution is sampled
``numoutcomes`` times. These three frequency distributions are
then used to build six probability distributions. Finally, the
probability estimates of these distributions are compared to the
actual probability of each sample.
:type numsamples: int
:param numsamples: The number of samples to use in each demo
frequency distributions.
:type numoutcomes: int
:param numoutcomes: The total number of outcomes for each
demo frequency distribution. These outcomes are divided into
``numsamples`` bins.
:rtype: None
"""
# Randomly sample a stochastic process three times.
fdist1 = _create_rand_fdist(numsamples, numoutcomes)
fdist2 = _create_rand_fdist(numsamples, numoutcomes)
fdist3 = _create_rand_fdist(numsamples, numoutcomes)
# Use our samples to create probability distributions.
pdists = [
MLEProbDist(fdist1),
LidstoneProbDist(fdist1, 0.5, numsamples),
HeldoutProbDist(fdist1, fdist2, numsamples),
HeldoutProbDist(fdist2, fdist1, numsamples),
CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples),
SimpleGoodTuringProbDist(fdist1),
SimpleGoodTuringProbDist(fdist1, 7),
_create_sum_pdist(numsamples),
]
# Find the probability of each sample.
vals = []
for n in range(1,numsamples+1):
vals.append(tuple([n, fdist1.freq(n)] +
[pdist.prob(n) for pdist in pdists]))
# Print the results in a formatted table.
print(('%d samples (1-%d); %d outcomes were sampled for each FreqDist' %
(numsamples, numsamples, numoutcomes)))
print('='*9*(len(pdists)+2))
FORMATSTR = ' FreqDist '+ '%8s '*(len(pdists)-1) + '| Actual'
print(FORMATSTR % tuple(repr(pdist)[1:9] for pdist in pdists[:-1]))
print('-'*9*(len(pdists)+2))
FORMATSTR = '%3d %8.6f ' + '%8.6f '*(len(pdists)-1) + '| %8.6f'
for val in vals:
print(FORMATSTR % val)
# Print the totals for each column (should all be 1.0)
zvals = list(zip(*vals))
sums = [sum(val) for val in zvals[1:]]
print('-'*9*(len(pdists)+2))
FORMATSTR = 'Total ' + '%8.6f '*(len(pdists)) + '| %8.6f'
print(FORMATSTR % tuple(sums))
print('='*9*(len(pdists)+2))
# Display the distributions themselves, if they're short enough.
if len("%s" % fdist1) < 70:
print(' fdist1: %s' % fdist1)
print(' fdist2: %s' % fdist2)
print(' fdist3: %s' % fdist3)
print()
print('Generating:')
for pdist in pdists:
fdist = FreqDist(pdist.generate() for i in range(5000))
print('%20s %s' % (pdist.__class__.__name__[:20], ("%s" % fdist)[:55]))
print()
| 12,336
|
def parse_options(argv):
"""Parses and checks the command-line options.
Returns:
A tuple containing the options structure and a list of categories to
be traced.
"""
usage = 'Usage: %prog [options] [category1 [category2 ...]]'
desc = 'Example: %prog -b 32768 -t 15 gfx input view sched freq'
parser = optparse.OptionParser(usage=usage, description=desc)
parser.add_option('-o', dest='output_file', help='write trace output to FILE',
default=None, metavar='FILE')
parser.add_option('-t', '--time', dest='trace_time', type='int',
help='trace for N seconds', metavar='N')
parser.add_option('-l', '--list-categories', dest='list_categories',
default=False, action='store_true',
help='list the available categories and exit')
parser.add_option('-j', '--json', dest='write_json',
default=False, action='store_true',
help='write a JSON file')
parser.add_option('--link-assets', dest='link_assets', default=False,
action='store_true',
help='(deprecated)')
parser.add_option('--from-file', dest='from_file', action='store',
help='read the trace from a file (compressed) rather than'
'running a live trace')
parser.add_option('--asset-dir', dest='asset_dir', default='trace-viewer',
type='string', help='(deprecated)')
parser.add_option('-e', '--serial', dest='device_serial_number',
default=_get_default_serial(),
type='string', help='adb device serial number')
parser.add_option('--target', dest='target', default='android', type='string',
help='chose tracing target (android or linux)')
parser.add_option('--timeout', dest='timeout', type='int',
help='timeout for start and stop tracing (seconds)')
parser.add_option('--collection-timeout', dest='collection_timeout',
type='int', help='timeout for data collection (seconds)')
atrace_ftrace_options = optparse.OptionGroup(parser,
'Atrace and Ftrace options')
atrace_ftrace_options.add_option('-b', '--buf-size', dest='trace_buf_size',
type='int', help='use a trace buffer size '
' of N KB', metavar='N')
atrace_ftrace_options.add_option('--no-fix-threads', dest='fix_threads',
default=True, action='store_false',
help='don\'t fix missing or truncated '
'thread names')
atrace_ftrace_options.add_option('--no-fix-tgids', dest='fix_tgids',
default=True, action='store_false',
help='Do not run extra commands to restore'
' missing thread to thread group id '
'mappings.')
atrace_ftrace_options.add_option('--no-fix-circular', dest='fix_circular',
default=True, action='store_false',
help='don\'t fix truncated circular traces')
parser.add_option_group(atrace_ftrace_options)
# Add the other agent parsing options to the parser. For Systrace on the
# command line, all agents are added. For Android, only the compatible agents
# will be added.
for module in ALL_MODULES:
option_group = module.add_options(parser)
if option_group:
parser.add_option_group(option_group)
options, categories = parser.parse_args(argv[1:])
if options.output_file is None:
options.output_file = 'trace.json' if options.write_json else 'trace.html'
if options.link_assets or options.asset_dir != 'trace-viewer':
parser.error('--link-assets and --asset-dir are deprecated.')
if options.trace_time and options.trace_time < 0:
parser.error('the trace time must be a non-negative number')
if (options.trace_buf_size is not None) and (options.trace_buf_size <= 0):
parser.error('the trace buffer size must be a positive number')
return (options, categories)
| 12,337
|
def open_excel(path):
"""context generator to open and close excel sheets"""
spreadsheet = Spreadsheet(path)
yield spreadsheet
spreadsheet.close()
| 12,338
|
def test_fix_nothing():
"""Ensures that fix works for Nothing container."""
assert Nothing.fix(lambda _: None) == Nothing
assert Nothing.fix(lambda _: 1) == Some(1)
| 12,339
|
def get_data_for_file(folder, ports):
"""Parses the pcap files in the specified folder, and outputs data for the specified ports
"""
# Load private keys and port->provider mappings
keys, providers, nodes = read_keys(os.path.join(folder, 'keys'))
print 'Loading packets'
# Load packets
with open(os.path.join(folder, 'network.pcap'), 'rb') as f:
cap = dpkt.pcap.Reader(f)
packets = []
for ts, buf in cap:
eth = dpkt.sll.SLL(buf)
if eth.type != 3:
# tcpdump captures both type 3 and 4 packets, resulting in duplicates
continue
eth.time = ts
try:
eth.data.src = socket.inet_ntoa(eth.data.src)
eth.data.dst = socket.inet_ntoa(eth.data.dst)
except:
pass
packets.append(eth)
# Load config
config = json.load(open(os.path.join(folder, 'config.json')))
# Invert exponential parameters to get rate
loops, drop, payload = 1/config['EXP_PARAMS_LOOPS'], 1 / \
config['EXP_PARAMS_DROP'], 1/config['EXP_PARAMS_PAYLOAD']
lambda_total = loops + drop + payload
print "λ_loop = %f, λ_drop = %f, λ_payload = %f, λ = %f" % (
loops, drop, payload, lambda_total)
data = []
for port in ports:
print "Parsing port %d from %s" % (port, folder)
# Filter packets by source port
filtered = [x for x in packets if x.data.data.sport == port]
print "Analysing all packets"
all_mean = analyse_packets(filtered, packets[0].time, packets[-1].time)
print "-----------------"
decrypted_filtered = [(x, decrypt_packet(
x, keys[nodes[get_addr(x.data.dst, x.data.data.dport)]], keys)) for x in filtered]
real_filtered = [
x for x, decrypt in decrypted_filtered if decrypt[0] == 'REAL']
if len(real_filtered) == 0:
print "Warning, 0 real packets"
real_mean = None
else:
print "Analysing real packets"
real_mean = analyse_packets(
real_filtered, packets[0].time, packets[-1].time)
print "\n-----------------\n"
data.append((port, loops, drop, payload,
lambda_total, all_mean, real_mean))
return data
| 12,340
|
async def create_channel_in_db(
context: 'Context',
game_config: 'GameConfig',
channel_id: str,
finished: bool = False
) -> Channel:
"""Utility function to create a channel in the database
:param context: The Discord Context.
:param game_config: The GameConfig to use for extra info.
:param finished: Whether or not the channel is finished.
:return: The Channel that was created"""
owner = (await sync_to_async(User.objects.get_or_create)(id=context.author.id))[0]
return await sync_to_async(Channel.objects.create)(
id=channel_id,
owner=owner, guild_id=context.guild.id, game=game_config.game,
finished=finished
)
| 12,341
|
def error(msg):
""" Print message to stderr, can't use print because of Python 2/3
incompatibility """
sys.stderr.write(u'error: {}\n'.format(msg))
| 12,342
|
def retry(times, func, *args, **kwargs):
"""Try to execute multiple times function mitigating exceptions.
:param times: Amount of attempts to execute function
:param func: Function that should be executed
:param args: *args that are passed to func
:param kwargs: **kwargs that are passed to func
:raises Exception: Raise any exception that can raise func
:returns: Result of func(*args, **kwargs)
"""
for i in range(times):
try:
return func(*args, **kwargs)
except Exception:
if i == times - 1:
raise
| 12,343
|
def _apply_prediction(G, func, ebunch=None):
"""Applies the given function to each edge in the specified iterable
of edges.
`G` is an instance of :class:`networkx.Graph`.
`ebunch` is an iterable of pairs of nodes. If not specified, all
non-edges in the graph `G` will be used.
"""
if ebunch is None:
ebunch = nx.non_edges(G)
return sorted([(u, v, func(G, u, v)) for u, v in ebunch], key = lambda t:t[2], reverse = True)
| 12,344
|
def import_class(class_object):
"""
Import a class given a string with its name in the format module.module.classname
"""
d = class_object.rfind(".")
class_name = class_object[d + 1:len(class_object)]
m = __import__(class_object[0:d], globals(), locals(), [class_name])
return getattr(m, class_name)
| 12,345
|
def anonymous_fun_0_(empty_closure_0_):
"""
empty_closure_0_: ()
"""
def anonymous_fun_1_(par_map_input_1_):
"""
par_map_input_1_: Double
"""
def anonymous_fun_2_(par_map_input_0_):
"""
par_map_input_0_: Double
"""
def anonymous_fun_3_(fused_input_0_):
"""
fused_input_0_: (Double,Double)
"""
def anonymous_fun_4_(maybeRow_1_):
"""
maybeRow_1_: Maybe Double
"""
if maybeRow_1_ is not None:
cond_result_0_ = maybeRow_1_
else:
cond_result_0_ = 0.0
return cond_result_0_
def anonymous_fun_5_(row_1_):
"""
row_1_: Double
"""
if row_1_ < 5.0:
cond_result_1_ = row_1_
else:
cond_result_1_ = None
return cond_result_1_
def anonymous_fun_6_(maybeRow_0_):
"""
maybeRow_0_: Maybe Double
"""
if maybeRow_0_ is not None:
cond_result_2_ = maybeRow_0_
else:
cond_result_2_ = 0.0
return cond_result_2_
def anonymous_fun_7_(row_0_):
"""
row_0_: Double
"""
if row_0_ > 10.0:
cond_result_3_ = row_0_
else:
cond_result_3_ = None
return cond_result_3_
return ((fun_comp(anonymous_fun_4_,anonymous_fun_5_))(fused_input_0_[0]),(fun_comp(anonymous_fun_6_,anonymous_fun_7_))(fused_input_0_[1]))
def anonymous_fun_8_(dbrow_0_):
"""
dbrow_0_: Double
"""
return (dbrow_0_,dbrow_0_)
def anonymous_fun_9_(fused_input_1_):
"""
fused_input_1_: (Double,Double)
"""
def anonymous_fun_10_(maybeRow_1_):
"""
maybeRow_1_: Maybe Double
"""
if maybeRow_1_ is not None:
cond_result_4_ = maybeRow_1_
else:
cond_result_4_ = 0.0
return cond_result_4_
def anonymous_fun_11_(row_1_):
"""
row_1_: Double
"""
if row_1_ < 5.0:
cond_result_5_ = row_1_
else:
cond_result_5_ = None
return cond_result_5_
def anonymous_fun_12_(maybeRow_0_):
"""
maybeRow_0_: Maybe Double
"""
if maybeRow_0_ is not None:
cond_result_6_ = maybeRow_0_
else:
cond_result_6_ = 0.0
return cond_result_6_
def anonymous_fun_13_(row_0_):
"""
row_0_: Double
"""
if row_0_ > 10.0:
cond_result_7_ = row_0_
else:
cond_result_7_ = None
return cond_result_7_
return ((fun_comp(anonymous_fun_10_,anonymous_fun_11_))(fused_input_1_[0]),(fun_comp(anonymous_fun_12_,anonymous_fun_13_))(fused_input_1_[1]))
def anonymous_fun_14_(dbrow_1_):
"""
dbrow_1_: Double
"""
return (dbrow_1_,dbrow_1_)
return ((fun_comp(anonymous_fun_3_,anonymous_fun_8_))(par_map_input_0_),(fun_comp(anonymous_fun_9_,anonymous_fun_14_))(par_map_input_0_))
def anonymous_fun_15_(fused_input_2_):
"""
fused_input_2_: (Double,Double)
"""
def anonymous_fun_16_(maybeRow_1_):
"""
maybeRow_1_: Maybe Double
"""
if maybeRow_1_ is not None:
cond_result_8_ = maybeRow_1_
else:
cond_result_8_ = 0.0
return cond_result_8_
def anonymous_fun_17_(row_1_):
"""
row_1_: Double
"""
if row_1_ < 5.0:
cond_result_9_ = row_1_
else:
cond_result_9_ = None
return cond_result_9_
def anonymous_fun_18_(maybeRow_0_):
"""
maybeRow_0_: Maybe Double
"""
if maybeRow_0_ is not None:
cond_result_10_ = maybeRow_0_
else:
cond_result_10_ = 0.0
return cond_result_10_
def anonymous_fun_19_(row_0_):
"""
row_0_: Double
"""
if row_0_ > 10.0:
cond_result_11_ = row_0_
else:
cond_result_11_ = None
return cond_result_11_
return ((fun_comp(anonymous_fun_16_,anonymous_fun_17_))(fused_input_2_[0]),(fun_comp(anonymous_fun_18_,anonymous_fun_19_))(fused_input_2_[1]))
def anonymous_fun_20_(dbrow_2_):
"""
dbrow_2_: Double
"""
return (dbrow_2_,dbrow_2_)
return (anonymous_fun_2_(par_map_input_1_),(fun_comp(anonymous_fun_15_,anonymous_fun_20_))(par_map_input_1_))
return anonymous_fun_1_
| 12,346
|
def test_version():
"""Test there is a version string"""
assert {{ cookiecutter.project_slug }}.__version__ == '{{ cookiecutter.version }}'
| 12,347
|
def fromrecords(recList: List[List[int]], names: List[Literal["c", "b", "a"]]):
"""
usage.matplotlib: 1
"""
...
| 12,348
|
def get_examples(mode='train'):
"""
dataset[0][0] examples
"""
examples = {
'train':
({'id': '0a25cb4bc1ab6f474c699884e04601e4', 'title': '', 'context': '第35集雪见缓缓张开眼睛,景天又惊又喜之际,长卿和紫萱的仙船驶至,见众人无恙,'
'也十分高兴。众人登船,用尽合力把自身的真气和水分输给她。雪见终于醒过来了,但却一脸木然,全无反应。众人向常胤求助,却发现人世界竟没有雪见的身世纪录。长卿询问清微的身世,'
'清微语带双关说一切上了天界便有答案。长卿驾驶仙船,众人决定立马动身,往天界而去。众人来到一荒山,长卿指出,魔界和天界相连。由魔界进入通过神魔之井,便可登天。众人至魔界入口,'
'仿若一黑色的蝙蝠洞,但始终无法进入。后来花楹发现只要有翅膀便能飞入。于是景天等人打下许多乌鸦,模仿重楼的翅膀,制作数对翅膀状巨物。刚佩戴在身,便被吸入洞口。众人摔落在地,'
'抬头发现魔界守卫。景天和众魔套交情,自称和魔尊重楼相熟,众魔不理,打了起来。', 'question': '仙剑奇侠传3第几集上天界', 'answers': ['第35集'], 'answer_starts': [0]}),
}
return examples[mode]
| 12,349
|
def finished(ignored):
"""
Callback invoked when both logins and method calls have finished to shut
down the reactor so the example exits.
"""
reactor.stop()
| 12,350
|
def launch_external_program(path):
"""Launches an application with a predefined CSV to open.
Args:
path (str):
Returns:
None.
"""
OS = sys.platform
log.info('Identified OS : '+OS)
if OS == 'linux':
os.system('libreoffice ' + path)
elif OS == 'win32':
os.system('Start excel.exe ' + path.replace('/', '\\'))
elif OS == 'darwin':
os.system('Numbers ' + path)
input('Press ENTER to continue...')
| 12,351
|
def display_tables(tables, max_rows=10, datetime_fmt='%Y-%m-%d %H:%M:%S', row=True):
"""Display mutiple tables side by side on a Jupyter Notebook.
Args:
tables (dict[str, DataFrame]):
``dict`` containing table names and pandas DataFrames.
max_rows (int):
Max rows to show per table. Defaults to 10.
datetime_fmt (str):
Format with which to display datetime columns.
"""
# Import here to avoid making IPython a hard dependency
from IPython.core.display import HTML
names = []
data = []
for name, table in tables.items():
table = table.copy()
for column in table.columns:
column_data = table[column]
if column_data.dtype.kind == 'M':
table[column] = column_data.dt.strftime(datetime_fmt)
names.append('<td style="text-align:left"><b>{}</b></td>'.format(name))
data.append('<td>{}</td>'.format(table.head(max_rows).to_html(index=False)))
if row:
html = '<table><tr>{}</tr><tr>{}</tr></table>'.format(
''.join(names),
''.join(data),
)
else:
rows = [
'<tr>{}</tr><tr>{}</tr>'.format(name, table)
for name, table in zip(names, data)
]
html = '<table>{}</table>'.format(''.join(rows))
return HTML(html)
| 12,352
|
def get_visible_enemy_units(observation, as_list=False, as_dict=True):
"""
This function takes an observation and returns a list of the enemy units that are
on screen and are visible.
A unit is considered visible if is either visible (in the protos sense) or if it
is snapshotted and finished.
The definition of display_type can be found here:
https://github.com/Blizzard/s2client-proto/blob/master/s2clientprotocol/raw.proto#L55
"""
if as_list == as_dict:
raise ValueError("One and only one of as_list and as_dict should be True")
if as_list == True:
visible_enemy_units = []
for unit in observation.raw_data.units:
if unit.alliance == 4 and unit.is_on_screen:
if unit.display_type == 1 or (
unit.display_type == 2 and unit.build_progress == 1
):
visible_enemy_units.append(get_unit_doc(unit))
if as_dict == True:
# TO-DO: fix this one, this is the root of all bugs.
visible_enemy_units = {}
for unit in observation.raw_data.units:
if unit.alliance == 4 and unit.is_on_screen:
if unit.display_type == 1 or (
unit.display_type == 2 and unit.build_progress == 1
):
if str(unit.unit_type) not in visible_enemy_units:
visible_enemy_units[str(unit.unit_type)] = [get_unit_doc(unit)]
else:
visible_enemy_units[str(unit.unit_type)].append(
get_unit_doc(unit)
)
return visible_enemy_units
| 12,353
|
def prepare_default_result_dict(key, done, nodes):
"""Prepares the default result `dict` using common values returned by any
operation on the DHT.
Returns:
dict: with keys `(k, d, n)` for the key, done and nodes; `n` is a list
of `dict` with keys `(i, a, x)` for id, address, and expiration.
"""
d = {
"k": key,
"d": done,
}
nb = []
for n in nodes:
_node = n.getNode()
nb.append({
"i": n.getId().toString(),
"a": _node.getAddr(),
"x": _node.isExpired()
})
d["n"] = nb
return d
| 12,354
|
def MatchScorer(match, mismatch):
"""Factory function that returns a score function set to match and mismatch.
match and mismatch should both be numbers. Typically, match should be
positive and mismatch should be negative.
Resulting function has signature f(x,y) -> number.
"""
def scorer(x, y):
if x == y:
return match
else:
return mismatch
return scorer
| 12,355
|
def random_choice(context: RuntimeContext, *choices):
"""Template helper for random choices.
Supports structures like this:
random_choice:
- a
- b
- <<c>>
Or like this:
random_choice:
- choice:
pick: A
probability: 50%
- choice:
pick: A
probability: 50%
Probabilities are really just weights and don't need to
add up to 100.
Pick-items can have arbitrary internal complexity.
Pick-items are lazily evaluated.
"""
if not choices:
raise ValueError("No choices supplied!")
if getattr(choices[0], "function_name", None) == "choice":
choices = [choice.render(context) for choice in choices]
rc = weighted_choice(choices)
else:
rc = random.choice(choices)
if hasattr(rc, "render"):
rc = rc.render(context)
return rc
| 12,356
|
def _compute_paddings(height_pad_amt, width_pad_amt, patch_axes):
"""Convert the total pad amounts to the format needed by tf.pad()."""
top_pad = height_pad_amt // 2
bottom_pad = height_pad_amt - top_pad
left_pad = width_pad_amt // 2
right_pad = width_pad_amt - left_pad
paddings = [[0, 0] for _ in range(4)]
paddings[patch_axes[0]] = [top_pad, bottom_pad]
paddings[patch_axes[1]] = [left_pad, right_pad]
return paddings
| 12,357
|
def tab_size(computer, name, value):
"""Compute the ``tab-size`` property."""
if isinstance(value, int):
return value
else:
return length(computer, name, value)
| 12,358
|
def match_complete(user_id=""):
"""Switch 'complete' to true in matches table for user, return tallies."""
print("match_complete", user_id)
user = sm.get_user(user_id)
# Note: 0/1 used for 'complete' b/c Booleans not allowed in SimpleObjects
this_match, i = current_match_i(user)
temp = this_match['complete']
temp[i] = 1
this_match['complete'] = temp
return _get_tallies(user)
| 12,359
|
def compute_range_map(flow,
downsampling_factor=1,
reduce_downsampling_bias=True,
resize_output=True):
"""Count how often each coordinate is sampled.
Counts are assigned to the integer coordinates around the sampled coordinates
using weights from bilinear interpolation.
Args:
flow: A float tensor of shape (batch size x height x width x 2) that
represents a dense flow field.
downsampling_factor: An integer, by which factor to downsample the output
resolution relative to the input resolution. Downsampling increases the
bin size but decreases the resolution of the output. The output is
normalized such that zero flow input will produce a constant ones output.
reduce_downsampling_bias: A boolean, whether to reduce the downsampling bias
near the image boundaries by padding the flow field.
resize_output: A boolean, whether to resize the output at the input
resolution.
Returns:
A float tensor of shape [batch_size, height, width, 1] that denotes how
often each pixel is sampled.
"""
# Get input shape.
input_shape = flow.shape.as_list()
if len(input_shape) != 4:
raise NotImplementedError()
batch_size, input_height, input_width, _ = input_shape
flow_height = input_height
flow_width = input_width
# Apply downsampling (and move the coordinate frame appropriately).
output_height = input_height // downsampling_factor
output_width = input_width // downsampling_factor
if downsampling_factor > 1:
# Reduce the bias that comes from downsampling, where pixels at the edge
# will get lower counts that pixels in the middle of the image, by padding
# the flow field.
if reduce_downsampling_bias:
p = downsampling_factor // 2
flow_height += 2 * p
flow_width += 2 * p
# Apply padding in multiple steps to padd with the values on the edge.
for _ in range(p):
flow = tf.pad(
tensor=flow,
paddings=[[0, 0], [1, 1], [1, 1], [0, 0]],
mode='SYMMETRIC')
coords = flow_to_warp(flow) - p
# Update the coordinate frame to the downsampled one.
coords = (coords + (1 - downsampling_factor) * 0.5) / downsampling_factor
elif downsampling_factor == 1:
coords = flow_to_warp(flow)#after warpping image
else:
raise ValueError('downsampling_factor must be an integer >= 1.')
# Split coordinates into an integer part and a float offset for interpolation.
coords_floor = tf.floor(coords)#返回一个具有相同类型的张量,不大于的最大整数
coords_offset = coords - coords_floor
coords_floor = tf.cast(coords_floor, 'int32')
# Define a batch offset for flattened indexes into all pixels.
batch_range = tf.reshape(tf.range(batch_size), [batch_size, 1, 1])
idx_batch_offset = tf.tile(
batch_range, [1, flow_height, flow_width]) * output_height * output_width
# Flatten everything.
coords_floor_flattened = tf.reshape(coords_floor, [-1, 2])#zhengshu
coords_offset_flattened = tf.reshape(coords_offset, [-1, 2])#xiaoshu
idx_batch_offset_flattened = tf.reshape(idx_batch_offset, [-1])#suoyin
# Initialize results.
idxs_list = []
weights_list = []
# Loop over differences di and dj to the four neighboring pixels.
for di in range(2):
for dj in range(2):
# Compute the neighboring pixel coordinates.
idxs_i = coords_floor_flattened[:, 0] + di
idxs_j = coords_floor_flattened[:, 1] + dj
# Compute the flat index into all pixels.
idxs = idx_batch_offset_flattened + idxs_i * output_width + idxs_j
# Only count valid pixels.
mask = tf.reshape(
tf.compat.v1.where(
tf.logical_and(
tf.logical_and(idxs_i >= 0, idxs_i < output_height),
tf.logical_and(idxs_j >= 0, idxs_j < output_width))), [-1])
valid_idxs = tf.gather(idxs, mask)
valid_offsets = tf.gather(coords_offset_flattened, mask)
# Compute weights according to bilinear interpolation.
weights_i = (1. - di) - (-1)**di * valid_offsets[:, 0]
weights_j = (1. - dj) - (-1)**dj * valid_offsets[:, 1]
weights = weights_i * weights_j
# Append indices and weights to the corresponding list.
idxs_list.append(valid_idxs)
weights_list.append(weights)
# Concatenate everything.
idxs = tf.concat(idxs_list, axis=0)
weights = tf.concat(weights_list, axis=0)
# Sum up weights for each pixel and reshape the result.
counts = tf.math.unsorted_segment_sum(
weights, idxs, batch_size * output_height * output_width)
count_image = tf.reshape(counts, [batch_size, output_height, output_width, 1])
if downsampling_factor > 1:
# Normalize the count image so that downsampling does not affect the counts.
count_image /= downsampling_factor**2
if resize_output:
count_image = resize(
count_image, input_height, input_width, is_flow=False)
return count_image
| 12,360
|
def get_wer(refs: List[str], hyps: List[str]):
"""
args:
refs (list of str): reference texts
hyps (list of str): hypothesis/prediction texts
"""
n_words, n_errors = 0, 0
for ref, hyp in zip(refs, hyps):
ref, hyp = ref.split(), hyp.split()
n_words += len(ref)
n_errors += editdistance.eval(ref, hyp)
return safe_divide(n_errors, n_words)
| 12,361
|
def quad_sim(x_c, y_c, z_c):
"""
Calculates the necessary thrust and torques for the quadrotor to
follow the trajectory described by the sets of coefficients
x_c, y_c, and z_c.
"""
x_pos = -5
y_pos = -5
z_pos = 5
x_vel = 0
y_vel = 0
z_vel = 0
x_acc = 0
y_acc = 0
z_acc = 0
roll = 0
pitch = 0
yaw = 0
roll_vel = 0
pitch_vel = 0
yaw_vel = 0
des_yaw = 0
dt = 0.1
t = 0
q = Quadrotor(x=x_pos, y=y_pos, z=z_pos, roll=roll,
pitch=pitch, yaw=yaw, size=1, show_animation=show_animation)
i = 0
n_run = 8
irun = 0
while True:
while t <= T:
# des_x_pos = calculate_position(x_c[i], t)
# des_y_pos = calculate_position(y_c[i], t)
des_z_pos = calculate_position(z_c[i], t)
# des_x_vel = calculate_velocity(x_c[i], t)
# des_y_vel = calculate_velocity(y_c[i], t)
des_z_vel = calculate_velocity(z_c[i], t)
des_x_acc = calculate_acceleration(x_c[i], t)
des_y_acc = calculate_acceleration(y_c[i], t)
des_z_acc = calculate_acceleration(z_c[i], t)
thrust = m * (g + des_z_acc + Kp_z * (des_z_pos -
z_pos) + Kd_z * (des_z_vel - z_vel))
roll_torque = Kp_roll * \
(((des_x_acc * sin(des_yaw) - des_y_acc * cos(des_yaw)) / g) - roll)
pitch_torque = Kp_pitch * \
(((des_x_acc * cos(des_yaw) - des_y_acc * sin(des_yaw)) / g) - pitch)
yaw_torque = Kp_yaw * (des_yaw - yaw)
roll_vel += roll_torque * dt / Ixx
pitch_vel += pitch_torque * dt / Iyy
yaw_vel += yaw_torque * dt / Izz
roll += roll_vel * dt
pitch += pitch_vel * dt
yaw += yaw_vel * dt
R = rotation_matrix(roll, pitch, yaw)
acc = (np.matmul(R, np.array(
[0, 0, thrust.item()]).T) - np.array([0, 0, m * g]).T) / m
x_acc = acc[0]
y_acc = acc[1]
z_acc = acc[2]
x_vel += x_acc * dt
y_vel += y_acc * dt
z_vel += z_acc * dt
x_pos += x_vel * dt
y_pos += y_vel * dt
z_pos += z_vel * dt
q.update_pose(x_pos, y_pos, z_pos, roll, pitch, yaw)
t += dt
t = 0
i = (i + 1) % 4
irun += 1
if irun >= n_run:
break
print("Done")
| 12,362
|
def get_first(somelist, function):
""" Returns the first item of somelist for which function(item) is True """
for item in somelist:
if function(item):
return item
return None
| 12,363
|
def np_cross(a, b):
"""
Simple numba compatible cross product of vectors
"""
return np.array([
a[1] * b[2] - a[2] * b[1],
a[2] * b[0] - a[0] * b[2],
a[0] * b[1] - a[1] * b[0],
])
| 12,364
|
def get_process_output(process, encoding=None):
"""Get the output from the process."""
output = process.communicate()
returncode = process.returncode
if not encoding:
try:
encoding = sys.stdout.encoding
except Exception:
encoding = locale.getpreferredencoding()
if returncode != 0:
raise RuntimeError("Runtime Error: %s" % (output[0].rstrip().decode(encoding, errors='replace')))
return output[0].decode(encoding, errors='replace')
| 12,365
|
def augment_model_with_bundled_inputs(
model: torch.jit.ScriptModule,
inputs: Optional[Sequence[Tuple[Any, ...]]] = None,
_receive_inflate_expr: Optional[List[str]] = None, # For debugging.
info: Optional[List[str]] = None, # Optional argument to provide info about forward or its inputs
) -> None:
""" Add bundled sample inputs to a model for the forward function.
Models with bundled inputs can be invoked in a uniform manner by
benchmarking and code coverage tools.
Augmented models will support the following methods:
`get_all_bundled_inputs() -> List[Tuple[Any, ...]]`
Returns a list of tuples suitable for passing to the model like
`for inp in model.get_all_bundled_inputs(): model(*inp)`
`get_num_bundled_inputs() -> int`
Equivalent to `len(model.get_all_bundled_inputs())`,
but slightly easier to call from C++.
`get_bundled_inputs_functions_and_info() -> Dict[str, Dict[str: List[str]]]`
Returns a dictionary mapping function names to a metadata dictionary.
This nested dictionary maps preset strings like:
'get_inputs_function_name' -> the name of a function attribute in this model that can be
run to get back a list of inputs corresponding to that function.
'info' -> the user provided extra information about the bundled inputs
Inputs can be specified in one of two ways:
- The model can define `_generate_bundled_inputs_for_forward`.
If the user chooses this method inputs should be None
- `inputs` is a list of inputs of form List[Tuple[Any, ...]]. A list of tuples where the elements
of each tuple are the args that make up one input.
"""
if not isinstance(model, torch.jit.ScriptModule):
raise Exception("Only ScriptModule is supported.")
forward: Callable = model.forward
# Sometimes forward won't have a name attached so just in case
if not hasattr(forward, "__name__"):
forward.__name__ = 'forward'
augment_many_model_functions_with_bundled_inputs(
model,
inputs={forward : inputs},
_receive_inflate_expr=_receive_inflate_expr,
info={forward : info} if info else None,
)
| 12,366
|
def check(ctx):
"""Check the consistency of documentation, coding style and a few other things."""
with chdir(BASE_FOLDER):
log.write("Pep517 check")
ctx.run("python -m pep517.check .")
log.write("Running all pre-commit hooks on whole repository.")
ctx.run("pre-commit run --all-files")
| 12,367
|
def test_one_hot_encoder_not_sparse():
"""
Tests whether the monkey patching of ('sklearn.preprocessing._encoders', 'OneHotEncoder') with dense output
"""
test_code = cleandoc("""
import pandas as pd
from sklearn.preprocessing import label_binarize, OneHotEncoder
import numpy as np
df = pd.DataFrame({'A': ['cat_a', 'cat_b', 'cat_a', 'cat_c']})
one_hot_encoder = OneHotEncoder(sparse=False)
encoded_data = one_hot_encoder.fit_transform(df)
expected = np.array([[1., 0., 0.], [0., 1., 0.], [1., 0., 0.], [0., 0., 1.]])
print(encoded_data)
assert np.allclose(encoded_data, expected)
test_df = pd.DataFrame({'A': ['cat_a', 'cat_b', 'cat_a', 'cat_c']})
encoded_data = one_hot_encoder.transform(test_df)
""")
inspector_result = _pipeline_executor.singleton.run(python_code=test_code, track_code_references=True,
inspections=[RowLineage(3)])
expected_dag = networkx.DiGraph()
expected_data_source = DagNode(0,
BasicCodeLocation("<string-source>", 5),
OperatorContext(OperatorType.DATA_SOURCE,
FunctionInfo('pandas.core.frame', 'DataFrame')),
DagNodeDetails(None, ['A']),
OptionalCodeInfo(CodeReference(5, 5, 5, 62),
"pd.DataFrame({'A': ['cat_a', 'cat_b', 'cat_a', 'cat_c']})"))
expected_transformer = DagNode(1,
BasicCodeLocation("<string-source>", 6),
OperatorContext(OperatorType.TRANSFORMER,
FunctionInfo('sklearn.preprocessing._encoders', 'OneHotEncoder')),
DagNodeDetails('One-Hot Encoder: fit_transform', ['array']),
OptionalCodeInfo(CodeReference(6, 18, 6, 45), 'OneHotEncoder(sparse=False)'))
expected_dag.add_edge(expected_data_source, expected_transformer)
expected_data_source_two = DagNode(2,
BasicCodeLocation("<string-source>", 11),
OperatorContext(OperatorType.DATA_SOURCE,
FunctionInfo('pandas.core.frame', 'DataFrame')),
DagNodeDetails(None, ['A']),
OptionalCodeInfo(CodeReference(11, 10, 11, 67),
"pd.DataFrame({'A': ['cat_a', 'cat_b', 'cat_a', 'cat_c']})"))
expected_transformer_two = DagNode(3,
BasicCodeLocation("<string-source>", 6),
OperatorContext(OperatorType.TRANSFORMER,
FunctionInfo('sklearn.preprocessing._encoders',
'OneHotEncoder')),
DagNodeDetails('One-Hot Encoder: transform', ['array']),
OptionalCodeInfo(CodeReference(6, 18, 6, 45), 'OneHotEncoder(sparse=False)'))
expected_dag.add_edge(expected_data_source_two, expected_transformer_two)
compare(networkx.to_dict_of_dicts(inspector_result.dag), networkx.to_dict_of_dicts(expected_dag))
inspection_results_data_source = inspector_result.dag_node_to_inspection_results[expected_transformer]
lineage_output = inspection_results_data_source[RowLineage(3)]
expected_lineage_df = DataFrame([[numpy.array([1.0, 0.0, 0.0]), {LineageId(0, 0)}],
[numpy.array([0.0, 1.0, 0.0]), {LineageId(0, 1)}],
[numpy.array([1.0, 0.0, 0.0]), {LineageId(0, 2)}]],
columns=['array', 'mlinspect_lineage'])
pandas.testing.assert_frame_equal(lineage_output.reset_index(drop=True), expected_lineage_df.reset_index(drop=True))
inspection_results_data_source = inspector_result.dag_node_to_inspection_results[expected_transformer_two]
lineage_output = inspection_results_data_source[RowLineage(3)]
expected_lineage_df = DataFrame([[numpy.array([1.0, 0.0, 0.0]), {LineageId(2, 0)}],
[numpy.array([0.0, 1.0, 0.0]), {LineageId(2, 1)}],
[numpy.array([1.0, 0.0, 0.0]), {LineageId(2, 2)}]],
columns=['array', 'mlinspect_lineage'])
pandas.testing.assert_frame_equal(lineage_output.reset_index(drop=True), expected_lineage_df.reset_index(drop=True))
| 12,368
|
def dct_2d(pixel_blocks: np.ndarray, verbose: int = 0) -> np.ndarray:
"""
Does 8x8 2D DCT on an image represented by pixel blocks.
:param pixel_blocks:
A np.ndarray of shape AxBx8x8x3, where A = H/8, B = W/8.
:param verbose:
An int; if greater than 0 will print out a tqdm progress bar.
:return:
A np.ndarray of shape AxBx8x8x3, where A = H/8, B = W/8.
"""
to_return = list()
if verbose > 0:
pbar = tqdm(total=pixel_blocks.shape[0] * pixel_blocks.shape[1], file=stdout)
for row in pixel_blocks:
current_row = list()
for pixel_block in row:
current_row.append(dct_2d_on_8x8_block(pixel_block))
if verbose > 0:
pbar.update()
to_return.append(current_row)
if verbose > 0:
pbar.close()
return np.array(to_return)
| 12,369
|
def set_task_status(
bucket: Bucket,
job_id: str,
task_id: str,
state: TaskState,
worker: Optional[str] = _LOCAL_FQDN,
):
"""Set the status of a task.
Uploads the JSON serialization of a TaskStatus into a bucket, recording its
present state.
Parameters
----------
bucket : Bucket
The Google Cloud Storage bucket that hosts the given job and task.
job_id : str
The ID of the job.
task_id : str
The ID of the task.
state : TaskState
The state of the task.
worker : Optional[str]
An identifier for the worker reporting the state of the task (or None if
no worker is handling the task).
"""
if state == TaskState.REQUESTED:
worker = None
status = TaskStatus(state, worker)
blob_path = _task_status_path(job_id, task_id)
bucket.blob(blob_path).upload_from_string(status.to_bytes())
| 12,370
|
def remove_duplicates(llist):
"""
Removes any and all duplicate entries in the specified list.
This function is intended to be used during dataset merging and
therefore must be able to handle list-of-lists.
:param llist: The list to prune.
:return: A list of unique elements only.
"""
if not llist:
return []
llist.sort()
return [x for x, _ in itertools.groupby(llist)]
| 12,371
|
def images_in_bbox(bbox: dict, **filters) -> str:
"""
Gets a complete list of images with custom filter within a BBox
:param bbox: Bounding box coordinates
Format::
>>> {
... 'west': 'BOUNDARY_FROM_WEST',
... 'south': 'BOUNDARY_FROM_SOUTH',
... 'east': 'BOUNDARY_FROM_EAST',
... 'north': 'BOUNDARY_FROM_NORTH'
... }
:type bbox: dict
:param filters: Different filters that may be applied to the output
Example filters::
- max_captured_at
- min_captured_at
- image_type: pano, flat, or all
- compass_angle
- sequence_id
- organization_id
:type filters: dict
:return: Output is a GeoJSON string that represents all the within a bbox after passing given
filters
:rtype: str
Usage::
>>> import mapillary as mly
>>> mly.interface.set_access_token('MLY|XXX')
>>> mly.interface.images_in_bbox(
... bbox={
... 'west': 'BOUNDARY_FROM_WEST',
... 'south': 'BOUNDARY_FROM_SOUTH',
... 'east': 'BOUNDARY_FROM_EAST',
... 'north': 'BOUNDARY_FROM_NORTH'
... },
... max_captured_at='YYYY-MM-DD HH:MM:SS',
... min_captured_at='YYYY-MM-DD HH:MM:SS',
... image_type='pano',
... compass_angle=(0, 360),
... sequence_id='SEQUENCE_ID',
... organization_id='ORG_ID'
... )
"""
return image.get_images_in_bbox_controller(
bounding_box=bbox, layer="image", zoom=14, filters=filters
)
| 12,372
|
def find_start_time_from_afl(project_base_dir):
"""
Finds the start time of a project from afl directories.
This time is taken from the fuzzer_stats entry of
the first config iteration's fuzzer.
"""
try:
first_main_dir = main_dirs_for_proj(project_base_dir)[0]
except:
#if fuzzware-project dir exists but contains no mainXXX dirs
return 0
first_fuzzer_dir = fuzzer_dirs_for_main_dir(first_main_dir)[0]
fuzzer_stats_path = first_fuzzer_dir.joinpath("fuzzer_stats")
with open(fuzzer_stats_path, "r") as f:
start_time = int(f.readline().split(": ")[1])
return start_time
| 12,373
|
def set_password(user, passwd):
""" set the users password """
# execute all commands from config file
for rcmd in conf("commands.setPassword"):
cmdlog = rcmd.replace("$1", user).replace("$2", "*****")
cmd = rcmd.replace("$1", user).replace("$2", passwd)
run(cmd, cmdlog)
| 12,374
|
def filter_none_values(d, recursive=True):
"""
Returns a filtered copy of a dict, with all keys associated with 'None' values removed.
adapted from: http://stackoverflow.com/q/20558699
adapted from: http://stackoverflow.com/a/20558778
:param d: a dict-like object.
:param recursive: If True, performs the operation recursively on inner elements of the object.
:return: a new dict (of the same type as the original) containing the original dict's values,
except as modified per this function's documented effects.
>>> filter_none_values(None) is None
True
>>> filter_none_values(1)
Traceback (most recent call last):
TypeError: d is not a dict-like object.
>>> filter_none_values({})
{}
>>> filter_none_values({'a': 1, 'b': None, 'c': '3'})
{'a': 1, 'c': '3'}
>>> filter_none_values({'a': 1, 'b': [1, None, 3], 'c': '3'})
{'a': 1, 'c': '3', 'b': [1, 3]}
>>> filter_none_values({'a': 1, 'b': [1, {'ba': 1, 'bb': None, 'bc': '3'}, 3], 'c': '3'})
{'a': 1, 'c': '3', 'b': [1, {'ba': 1, 'bc': '3'}, 3]}
>>> from collections import OrderedDict as od; filter_none_values(od((('a', 1), ('b', None), ('c', '3'))))
OrderedDict([('a', 1), ('c', '3')])
>>> from collections import OrderedDict as od; filter_none_values({'r': od((('a', 1), ('b', None), ('c', '3')))})
{'r': OrderedDict([('a', 1), ('c', '3')])}
>>> from json import loads; repr(filter_none_values(loads('{"a": 1, "b": null, "c": 3}')))
"{u'a': 1, u'c': 3}"
>>> from json import loads; repr(filter_none_values(loads('{"a": 1, "b": [], "c": 3}')))
"{u'a': 1, u'c': 3, u'b': []}"
>>> from json import loads; repr(filter_none_values(loads('{"a": 1, "b": {"ba": null}, "c": 3}')))
"{u'a': 1, u'c': 3, u'b': {}}"
>>> from json import loads; repr(filter_none_values(loads('{"a": 1, "b": {"ba": []}, "c": 3}')))
"{u'a': 1, u'c': 3, u'b': {u'ba': []}}"
>>> from json import loads; repr(filter_none_values(loads('{"a": 1, "b": {"ba": {"baa": null}}, "c": 3}')))
"{u'a': 1, u'c': 3, u'b': {u'ba': {}}}"
"""
# def my_remove_none(obj):
# """Note: adapted from remove_none."""
# if isinstance(obj, (collections.Sequence, list, tuple, set)):
# return type(obj)(remove_none(x) for x in obj if x is not None)
# elif isinstance(obj, (collections.Mapping, dict)):
# return type(obj)((remove_none(k), remove_none(v))
# for k, v in obj.items() if k is not None and v is not None)
# else:
# return obj
def remove_none(obj):
"""Note: This one seems to be functionally equivalent to purify (at least for the cases I tested)."""
if isinstance(obj, (list, tuple, set)):
return type(obj)(remove_none(x) for x in obj if x is not None)
elif isinstance(obj, dict):
return type(obj)((remove_none(k), remove_none(v))
for k, v in obj.items() if k is not None and v is not None)
else:
return obj
def purify(o):
"""Note: This one seems to be functionally equivalent to remove_none (at least for the cases I tested)."""
if hasattr(o, 'items'):
oo = type(o)()
for k in o:
if k is not None and o[k] is not None:
oo[k] = purify(o[k])
elif hasattr(o, '__iter__'):
oo = []
for it in o:
if it is not None:
oo.append(purify(it))
else:
return o
return type(o)(oo)
def strip_none(data):
"""Note: This one doesn't support OrderedDict, etc."""
if isinstance(data, dict):
return {k: strip_none(v) for k, v in data.items() if k is not None and v is not None}
elif isinstance(data, list):
return [strip_none(item) for item in data if item is not None]
elif isinstance(data, tuple):
return tuple(strip_none(item) for item in data if item is not None)
elif isinstance(data, set):
return {strip_none(item) for item in data if item is not None}
else:
return data
if d is None:
return None
elif not hasattr(d, 'items'):
raise TypeError('d is not a dict-like object.')
if recursive:
# return my_remove_none(d)
# return remove_none(d)
return purify(d)
# return strip_none(d)
else:
d = d.copy()
# remove all bad keys
bad_keys = [k for k, v in d.items() if v is None]
for k in bad_keys:
d.pop(k)
return d
| 12,375
|
def nonseq():
""" Return non sequence """
return 1
| 12,376
|
def handler(settings, orch, output, signum=None, frame=None):
"""Signal handler
This function is called when the simulator receive SIGTERM, SIGHUP, SIGKILL
or SIGQUIT from the OS.
Its function is simply to write on a file the partial results.
Parameters
----------
settings : Settings
The simulator settings
orch : Orchestrator
The instance of the orchestrator
output : str
The output file
"""
logger.error('Received signal %d. Terminating' % signum)
RESULTS_WRITER[settings.RESULTS_FORMAT](orch.results, output)
logger.info('Saved intermediate results to file %s' % os.path.abspath(output))
orch.stop()
sys.exit(-signum)
| 12,377
|
def load_image(path, color_space = None, target_size = None):
"""Loads an image as an numpy array
Arguments:
path: Path to image file
target_size: Either, None (default to original size)
or tuple of ints '(image height, image width)'
"""
img = io.imread(path)
if target_size:
img = cv2.resize(img, target_size, interpolation = cv2.INTER_CUBIC)
return img
| 12,378
|
def kubernetes_node_label_to_dict(node_label):
"""Load Kubernetes node label to Python dict."""
if node_label:
label_name, value = node_label.split("=")
return {label_name: value}
return {}
| 12,379
|
def get_category_user_problem(cat_name, username):
"""
获取直接在指定目录下的用户AC的题目、尚未AC的题目和尚未做过的题目的情况
:param cat_name:
:param username:
:return:
"""
cat = __Category.objects.filter(name=cat_name).first()
user = __User.objects.filter(username=username).first()
if user is None or cat is None:
return {'solved': [], 'not_solved': [], 'not_tried': []}
query_dict = {}
relation = __ProblemUserRelation.objects.filter(user=user).values('problem_id', 'solved').distinct()
for i in relation:
query_dict[i['problem_id']] = i['solved']
problems = cat.problem.filter(category_relation__direct=True).values('id', 'title')
solved = []
not_solved = []
not_tried = []
for i in problems:
if i['id'] in query_dict:
if query_dict[i['id']] is True:
solved.append(i)
else:
not_solved.append(i)
else:
not_tried.append(i)
return {'solved': solved, 'not_solved': not_solved, 'not_tried': not_tried}
| 12,380
|
def write_list_content(log_writer, key, list_value, level, **kwargs):
"""
Writes list content to HTML. For example, the fields content of an event.
The html_wrapper decorator puts a start and end tag
(possibly specified in kwargs) around the content written by this function.
- log_writer:
The co-routine that writes the test log.
- key:
An event key, such as 'attr' or 'value'.
- list_value:
A list, the value that corresponds to 'key'.
- level:
The level of the div children used in calculating the
right margin.
"""
# Write key inside the div as it is and insert a collapsible button.
log_writer.send(key + COLLAPSE_BUTTON)
level += 1
# Calculate the margin for the list elements.
start_div = get_margin('div', level)
# Call recursive function on each list element.
for item in list_value:
write_html(log_writer, item,
event_no=kwargs.get('event_no', None),
start_tag=start_div,
level=level)
| 12,381
|
def floatToJson(x):
"""Custom rule for converting non-finite numbers to JSON as quoted strings: ``"inf"``, ``"-inf"``, and ``"nan"``. This avoids Python's bad habit of putting literal ``Infinity``, ``-Infinity``, and ``NaN`` in the JSON (without quotes)."""
if x in ("nan", "inf", "-inf"):
return x
elif math.isnan(x):
return "nan"
elif math.isinf(x) and x > 0.0:
return "inf"
elif math.isinf(x):
return "-inf"
else:
return x
| 12,382
|
def get_datafiles(datadir, prefix = ""):
"""
Scan directory for all csv files
prefix: used in recursive call
"""
datafiles = []
for fname in os.listdir(datadir):
fpath = os.path.join(datadir, fname)
datafile = os.path.join(prefix, fname)
if os.path.isdir(fpath):
datafiles += get_datafiles(fpath, datafile)
elif fname.endswith(".csv"):
datafiles.append(datafile)
return datafiles
| 12,383
|
def auto_delete_file_on_change(sender, instance, **kwargs):
"""
Deletes old file from filesystem
when corresponding `MediaFile` object is updated
with new file.
"""
if not instance.pk:
return False
try:
old_file = sender.objects.get(pk=instance.pk).url_banner
except sender.DoesNotExist:
return False
new_file = instance.url_banner
if not old_file == new_file:
if os.path.isfile(old_file.path):
os.remove(old_file.path)
| 12,384
|
def systematic_uncertainties():
"""tabulates sources of uncertainty and sums them in quadrature"""
result_m = [
0.066, # [0.07-0.12] 0.066 ± 0.019
0.019, # [0.12-0.20] 0.019 ± 0.009
0.002, # [0.20-0.30] 0.002 ± 0.009
-0.006, # [0.30-0.45] -0.006 ± 0.014
0.007, # [0.45-0.65] 0.007 ± 0.023
0.012 # [0.65-1.00] 0.012 ± 0.040
]
result_p = [
0.026, # [0.07-0.12] 0.026 ± 0.019
0.021, # [0.12-0.20] 0.021 ± 0.008
0.002, # [0.20-0.30] 0.002 ± 0.009
-0.014, # [0.30-0.45] -0.014 ± 0.013
0.024, # [0.45-0.65] 0.024 ± 0.022
0.046 # [0.65-1.00] 0.046 ± 0.037
]
pid_contamination = 0.10
pid_asym_m = [
( 0.051 , 0.038), # [0.07-0.12] 0.051 ± 0.038
(-0.017 , 0.016), # [0.12-0.20] -0.017 ± 0.016
(-0.032 , 0.016), # [0.20-0.30] -0.032 ± 0.016
(-0.006 , 0.023), # [0.30-0.45] -0.006 ± 0.023
(-0.031 , 0.042), # [0.45-0.65] -0.031 ± 0.042
( 0.089 , 0.085) # [0.65-1.00] 0.089 ± 0.085
]
pid_asym_p = [
( 0.005 , 0.036), # [0.07-0.12] 0.005 ± 0.036
( 0.006 , 0.015), # [0.12-0.20] 0.006 ± 0.015
(-0.006 , 0.015), # [0.20-0.30] -0.006 ± 0.015
( 0.018 , 0.020), # [0.30-0.45] 0.018 ± 0.020
(-0.038 , 0.032), # [0.45-0.65] -0.038 ± 0.032
( 0.142 , 0.059) # [0.65-1.00] 0.142 ± 0.059
]
for i in range(len(pid_asym_m)):
val, err = pid_asym_m[i]
pid_asym_m[i] = max( val-result_m[i], err)
for i in range(len(pid_asym_p)):
val, err = pid_asym_p[i]
pid_asym_p[i] = max( val-result_p[i], err)
beam_vector = 0.0102
asigma_m = [
0.035, # [0.07-0.12] 0.005 ± 0.035
0.015, # [0.12-0.20] -0.012 ± 0.015
0.016, # [0.20-0.30] -0.014 ± 0.016
0.027, # [0.30-0.45] -0.027 ± 0.023
0.066, # [0.45-0.65] -0.066 ± 0.040
0.073 # [0.65-1.00] -0.072 ± 0.073
]
asigma_p = [
0.034, # [0.07-0.12] -0.001 ± 0.034
0.014, # [0.12-0.20] -0.007 ± 0.014
0.015, # [0.20-0.30] 0.007 ± 0.015
0.025, # [0.30-0.45] -0.025 ± 0.022
0.039, # [0.45-0.65] -0.039 ± 0.037
0.061 # [0.65-1.00] 0.033 ± 0.061
]
mcasym_m = [
0.0066, # [0.07-0.12] 0.0012 ± 0.0066
0.0057, # [0.12-0.20] 0.0057 ± 0.0025
0.0089, # [0.20-0.30] 0.0089 ± 0.0020
0.0077, # [0.30-0.45] 0.0077 ± 0.0026
0.0042, # [0.45-0.65] 0.0038 ± 0.0042
0.0070 # [0.65-1.00] 0.0053 ± 0.0070
]
mcasym_p = [
0.0047, # [0.07-0.12] -0.0014 ± 0.0047
0.0077, # [0.12-0.20] 0.0077 ± 0.0024
0.0147, # [0.20-0.30] 0.0147 ± 0.0023
0.0105, # [0.30-0.45] 0.0105 ± 0.0024
0.0057, # [0.45-0.65] 0.0057 ± 0.0044
0.0112 # [0.65-1.00] 0.0112 ± 0.0081
]
pt_shift_m = [ 0, 0,
0.003, # [0.20-0.30] 0.006 low, 0.001 high, 0.003 avg
0.005, # [0.30-0.45] 0.007 low, 0.003 high, 0.005 avg
0.016, # [0.45-0.65] 0.020 low, 0.012 high, 0.016 avg
0.010 # [0.65-1.00] 0.011 low, 0.008 high, 0.010 avg
]
pt_shift_p = [ 0, 0,
0.004, # [0.20-0.30] 0.005 low, 0.003 high, 0.004 avg
0.007, # [0.30-0.45] 0.008 low, 0.006 high, 0.007 avg
0.016, # [0.45-0.65] 0.023 low, 0.008 high, 0.016 avg
0.016 # [0.65-1.00] 0.012 low, 0.020 high, 0.016 avg
]
relative_luminosity = 9.4e-4
minus = [0.0 for bin in zbins[:-1]]
plus = [0.0 for bin in zbins[:-1]]
start = len(zbins) == 5 and 2 or 0
for i in range(start, start+len(zbins)-1):
minus[i-start] = math.sqrt(
pow(relative_luminosity, 2) +
pow(pid_contamination*pid_asym_m[i], 2) +
pow(beam_vector*asigma_m[i], 2) +
pow(mcasym_m[i], 2) +
pow(pt_shift_m[i], 2)
)
plus[i-start] = math.sqrt(
pow(relative_luminosity, 2) +
pow(pid_contamination*pid_asym_p[i], 2) +
pow(beam_vector*asigma_p[i], 2) +
pow(mcasym_p[i], 2) +
pow(pt_shift_p[i], 2)
)
return {'minus':minus, 'plus':plus}
| 12,385
|
def extract_metamap(json_, key):
"""
Task function to parse and extract concepts from json_ style dic, using
the MetaMap binary.
Input:
- json_ : dic,
json-style dictionary generated from the Parse object related
to the specific type of input
- key : str,
string denoting the type of medical text to read from. Used to
find the correct paragraph in the settings.yaml file.
Output:
- json_ : dic,
the previous json-style dictionary enriched with medical concepts
"""
# outerfield for the documents in json
docfield = settings['out']['json']['itemfield']
# textfield to read text from
textfield = settings['out']['json']['json_text_field']
N = len(json_[docfield])
for i, doc in enumerate(json_[docfield]):
text = clean_text(doc[textfield])
if len(text) > 5000:
chunks = create_text_batches(text)
results = {'text': text, 'sents': []}
sent_id = 0
for chunk in chunks:
tmp = metamap_wrapper(chunk)
for sent in tmp['sents']:
sent['sent_id'] = sent_id
sent_id += 1
results['sents'].append(sent)
else:
results = metamap_wrapper(text)
json_[docfield][i].update(results)
proc = int(i/float(N)*100)
if proc % 10 == 0 and proc > 0:
time_log('We are at %d/%d documents -- %0.2f %%' % (i, N, proc))
return json_
| 12,386
|
def get_testinfo_by_reference(ref_name, ref_type):
""" get test content by reference name
@params:
ref_name: reference name, e.g. api_v1_Account_Login_POST($UserName, $Password)
ref_type: "api" or "suite"
"""
function_meta = parse_function(ref_name)
func_name = function_meta["func_name"]
call_args = function_meta["args"]
test_info = get_test_definition(func_name, ref_type)
def_args = test_info.get("function_meta").get("args", [])
if len(call_args) != len(def_args):
raise exception.ParamsError("call args mismatch defined args!")
args_mapping = {}
for index, item in enumerate(def_args):
if call_args[index] == item:
continue
args_mapping[item] = call_args[index]
if args_mapping:
test_info = substitute_variables_with_mapping(test_info, args_mapping)
return test_info
| 12,387
|
def game_info(uuid: str) -> dict:
"""
return info about game by uuid
:param uuid:
:return: message
"""
logging.info(uuid)
logging.info(games.keys())
if UUID(uuid) in games.keys():
select_game: Game = games.get(UUID(uuid))
return {
"uuid": uuid,
"start_time": select_game.start_time,
"field": select_game.field,
}
else:
return {"Error": f"{uuid} game not found!"}
| 12,388
|
def main(genome_accession_file, project_dir, lsf=True):
"""
Parses a file of genome accessions and downloads genomes from ENA. Genomes
are downloaded in fasta format. It is a requirement that genome_accession
file containes a GCA or WGS accession per genome
genome_accession_file: A file with a list of upid\tGCA\tdomain or
upid\tWGS\tdomain pairs
project_dir: The path to the directory where all genomes will be downloaded
It will be created if it does not exist
return: void
"""
# generating project_directory
if not os.path.exists(project_dir):
os.mkdir(project_dir)
# create a file handle and read genome id pairs
input_fp = open(genome_accession_file, 'r')
for genome in input_fp:
genome_data = genome.strip().split('\t')
upid = genome_data[0]
domain = genome_data[2]
gen_acc = genome_data[1]
# create domain directory
domain_dir = os.path.join(project_dir, domain)
if not os.path.exists(domain_dir):
os.mkdir(domain_dir)
# create genome directory e.g. project_dir/domain/updir
updir = os.path.join(project_dir, os.path.join(domain_dir, upid))
if not os.path.exists(updir):
os.mkdir(updir)
if lsf is not True:
fetch_genome_from_ENA(gen_acc, updir)
# submit a job
else:
err_file = os.path.join(updir, upid+'.err')
out_file = os.path.join(updir, upid+'.err')
bsub_cmd = "bsub -o %s -e %s -g %s %s -f fasta -m -d %s %s" % (out_file,
err_file,
LSF_GROUP % domain,
os.path.join(ENA_TOOL, 'enaDataGet'),
updir,
gen_acc)
subprocess.call(bsub_cmd, shell=True)
| 12,389
|
def miller_rabin(n, a):
"""
Miller-Rabin Primality Test
Returns true if n is a (probable) prime
Returns false if n is a composite number
"""
s = 0
d = n - 1
while d % 2 == 0:
s = s + 1
d = d >> 1
x = square_and_multiply(a, d, n)
if x != 1 and x + 1 != n:
for r in range(1, s):
x = square_and_multiply(x, 2, n)
if x == 1:
return False
elif x == n - 1:
a = 0
break
if a:
return False
return True
| 12,390
|
def luhn_sum_v1(num):
"""
First version of luhn_sum; uses a list which it modifies in-place.
"""
nums = [int(i) for i in reversed(str(num))]
for i in xrange(1, len(nums), 2):
nums[i] *= 2
return sum(sum(divmod(i, 10)) for i in nums)
| 12,391
|
def build(c):
"""Build"""
c.run(f"{sys.executable} setup.py sdist bdist_wheel")
| 12,392
|
def get_rate_discounted_rate(item_code, customer, company, so_number = None):
""" This function is use to get discounted rate and rate """
item_group = frappe.get_value("Item", item_code, 'item_group')
# parent_item_group = frappe.get_value("Item Group", item_group, 'parent_item_group')
count = frappe.db.sql(f"""
SELECT
COUNT(*)
FROM
`tabDelivery Note Item` as soi
JOIN
`tabDelivery Note` as so ON so.`name` = soi.`parent`
WHERE
soi.`item_group` = '{item_group}' AND
soi.`docstatus` = 1 AND
so.customer = '{customer}' AND
so.`company` = '{company}'
LIMIT 1
""")
where_clause = ''
if count[0][0]:
where_clause = f"soi.item_group = '{item_group}' AND"
data = None
if so_number:
data = frappe.db.sql(f"""
SELECT
soi.`rate` as `rate`
FROM
`tabDelivery Note Item` as soi
JOIN
`tabDelivery Note` as so ON soi.parent = so.name
WHERE
{where_clause}
so.`customer` = '{customer}' AND
so.`company` = '{company}' AND
so.`docstatus` != 2 AND
so.`name` = '{so_number}'
ORDER BY
soi.`creation` DESC
LIMIT
1
""", as_dict = True)
if not data:
data = frappe.db.sql(f"""
SELECT
soi.`rate` as `rate`
FROM
`tabDelivery Note Item` as soi JOIN
`tabDelivery Note` as so ON soi.parent = so.name
WHERE
{where_clause}
so.`customer` = '{customer}' AND
so.`company` = '{company}' AND
so.`docstatus` != 2
ORDER BY
soi.`creation` DESC
LIMIT
1
""", as_dict = True)
return data[0] if data else {'rate': 0}
| 12,393
|
def lambda_handler(event: Dict[str, Any], context: Dict[str, Any]) -> str:
"""
Lambda function to parse notification events and forward to Slack
:param event: lambda expected event object
:param context: lambda expected context object
:returns: none
"""
if os.environ.get("LOG_EVENTS", "False") == "True":
logging.info(f"Event logging enabled: `{json.dumps(event)}`")
for record in event["Records"]:
sns = record["Sns"]
subject = sns["Subject"]
message = sns["Message"]
region = sns["TopicArn"].split(":")[3]
payload = get_slack_message_payload(
message=message, region=region, subject=subject
)
response = send_slack_notification(payload=payload)
if json.loads(response)["code"] != 200:
response_info = json.loads(response)["info"]
logging.error(
f"Error: received status `{response_info}` using event `{event}` and context `{context}`"
)
return response
| 12,394
|
def sum_obs_np(A):
"""summation over axis 0 (obs) equivalent to np.sum(A, 0)"""
return np.einsum("ij -> j", A) if A.ndim > 1 else np.sum(A)
| 12,395
|
def _macos_command_line_infoplist_impl(ctx):
"""Implementation of the internal `macos_command_line_infoplist` rule.
This rule is an internal implementation detail of
`macos_command_line_application` and should not be used directly by clients.
It merges Info.plists as would occur for a bundle but then propagates an
`objc` provider with the necessary linkopts to embed the plist in a binary.
Args:
ctx: The rule context.
Returns:
A `struct` containing the `objc` provider that should be propagated to a
binary that should have this plist embedded.
"""
bundle_id = ctx.attr.bundle_id
infoplists = ctx.files.infoplists
if ctx.attr.version and AppleBundleVersionInfo in ctx.attr.version:
version = ctx.attr.version[AppleBundleVersionInfo]
else:
version = None
if not bundle_id and not infoplists and not version:
fail("Internal error: at least one of bundle_id, infoplists, or version " +
"should have been provided")
plist_results = plist_actions.merge_infoplists(
ctx,
None,
infoplists,
bundle_id = bundle_id,
exclude_executable_name = True,
extract_from_ctxt = True,
include_xcode_env = True,
)
merged_infoplist = plist_results.output_plist
return [
linker_support.sectcreate_objc_provider(
"__TEXT",
"__info_plist",
merged_infoplist,
),
]
| 12,396
|
def get_custom_data_format(*args):
"""
get_custom_data_format(dfid) -> data_format_t
Get definition of a registered custom data format.
@param dfid: data format id (C++: int)
@return: data format definition or NULL
"""
return _ida_bytes.get_custom_data_format(*args)
| 12,397
|
def memory(info, func, expr):
"""
checks if the function has been called with the same argument previously and
if so, returns the same results instead of running the function again
args:
-
"""
rows=None
if info:
if func in info.evaluated:
if expr in info.evaluated[func]:
rows = info.evaluated[func][expr]
else:
info.evaluated[func] = {}
else:
info = Info()
info.evaluated[func] = {}
return info, rows
| 12,398
|
def install_common_tool():
"""
Install many of the tool that I use for my day to day work. Other might want to
modify this function.
"""
config_bashrc()
install_apt_pkg()
install_nvtop()
install_pip_dependencies()
install_gdrive_rclone()
install_iterm_shell_integration()
install_vim_tmux()
| 12,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.