content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def upload_to_sql(git_commit_filename, commit_people_filename, cc,
git_checkout_path, commits_after_date): # pragma: no cover
"""Writes suspicious git commits to a Cloud SQL database
Args:
cc: a cursor for the Cloud SQL connection
git_checkout_path(str): path to a local git checkout
"""
write_to_csv(git_commit_filename, commit_people_filename, cc,
git_checkout_path, commits_after_date)
csql.write_to_sql_table(cc, commit_people_filename, 'commit_people')
csql.write_to_sql_table(cc, git_commit_filename, 'git_commit')
| 8,900
|
def l2_normalize_rows(frame):
"""
L_2-normalize the rows of this DataFrame, so their lengths in Euclidean
distance are all 1. This enables cosine similarities to be computed as
dot-products between these rows.
Rows of zeroes will be normalized to zeroes, and frames with no rows will
be returned as-is.
"""
if frame.shape[0] == 0:
return frame
index = frame.index
return pd.DataFrame(
data=normalize(frame, norm='l2', copy=False, axis=1), index=index
)
| 8,901
|
def install():
""" Installs the webapp code in the virtual environemnt 'web' on the server.
"""
with cd('/home/genomics.www/status'):
with prefix('workon web'):
sudo('python setup.py develop', user='genomics.www')
| 8,902
|
def unregister_custom_op(op_name: str) -> None:
""" Unregister a custom operator.
Args:
op_name (str): Name of the custom operator
"""
bound_op_map.pop(op_name)
| 8,903
|
def canonical_ipv4_address(ip_addr):
"""Return the IPv4 address in a canonical format"""
return socket.inet_ntoa(socket.inet_aton(ip_addr))
| 8,904
|
def _build_dynatree(site, expanded):
"""Returns a dynatree hash representation of our pages and menu
hierarchy."""
subtree = _pages_subtree(site.doc_root, site.default_language, True, 1,
expanded)
subtree['activate'] = True
pages_node = {
'title': 'Pages',
'key': 'system:pages',
'expand': True,
'icon': 'fatcow/folders_explorer.png',
'children': [subtree, ],
}
language = site.default_language
menus = []
for menu in Menu.objects.filter(site=site):
items = []
for item in menu.first_level.all():
items.append(_menuitem_subtree(item, language, True, 1, expanded))
menus.append({
'title': menu.name,
'key': 'menu:%d' % menu.id,
'expand': True,
'icon': 'fatcow/folders.png',
'children':items,
})
menus_node = {
'title': 'Menus',
'key': 'system:menus',
'expand': True,
'icon': 'fatcow/folders_explorer.png',
'children': menus,
}
tags = []
for tag in Tag.objects.filter(site=site):
title = tag.display_text(language)
if not title:
title = '<i>None</i>'
tags.append({
'title': title,
'key':'tag:%d' % tag.id,
'icon': 'fatcow/document_tag.png',
'expand': False,
})
tags_node = {
'title':'Tags',
'key':'system:tags',
'expand':False,
'icon': 'fatcow/folders_explorer.png',
'children': tags,
}
tree = [pages_node, menus_node, tags_node]
return tree
| 8,905
|
def check_tie_condition(board):
""" tie = if no empty cells and no win """
logging.debug('check_tie_condition()')
# is the board full and no wins
empty_cells = board.count('-')
logging.debug(f'Number of empty cells {empty_cells}')
tie = (empty_cells == 0)
return tie
| 8,906
|
def aggregator(df, groupbycols):
"""
Aggregates flowbyactivity or flowbysector df by given groupbycols
:param df: Either flowbyactivity or flowbysector
:param groupbycols: Either flowbyactivity or flowbysector columns
:return:
"""
# tmp replace null values with empty cells
df = replace_NoneType_with_empty_cells(df)
# drop columns with flowamount = 0
df = df[df['FlowAmount'] != 0]
# list of column headers, that if exist in df, should be aggregated using the weighted avg fxn
possible_column_headers = ('Spread', 'Min', 'Max', 'DataReliability', 'TemporalCorrelation',
'GeographicalCorrelation', 'TechnologicalCorrelation',
'DataCollection')
# list of column headers that do exist in the df being aggregated
column_headers = [e for e in possible_column_headers if e in df.columns.values.tolist()]
df_dfg = df.groupby(groupbycols).agg({'FlowAmount': ['sum']})
# run through other columns creating weighted average
for e in column_headers:
df_dfg[e] = weighted_average(df, e, 'FlowAmount', groupbycols)
df_dfg = df_dfg.reset_index()
df_dfg.columns = df_dfg.columns.droplevel(level=1)
# if datatypes are strings, ensure that Null values remain NoneType
df_dfg = replace_strings_with_NoneType(df_dfg)
return df_dfg
| 8,907
|
def test_svr_linear_rfe():
"""
Test to see if normalized SVR dataset is a pandas core dataframe
"""
test_x_train = vse.get_data_rfe()[0]
test_y_train = vse.get_data_rfe()[2]
test_result = vse.svr_linear_rfe(test_x_train, test_y_train)
assert isinstance(test_result[1], pd.core.frame.DataFrame), \
"Not a pandas core dataframe"
| 8,908
|
def setup_logging():
"""Set up logging based on provided log params."""
formatter = logging.Formatter(LOG_FORMAT)
ROOTLOGGER.setLevel(config["app"]["log_level"].upper())
sh = logging.StreamHandler()
sh.setLevel(config["app"]["log_level"].upper())
sh.setFormatter(formatter)
ROOTLOGGER.addHandler(sh)
LOGGER.info("-------------------------STARTING-------------------------")
LOGGER.info("INFO Logging Level -- Enabled")
LOGGER.warning("WARNING Logging Level -- Enabled")
LOGGER.critical("CRITICAL Logging Level -- Enabled")
LOGGER.debug("DEBUG Logging Level -- Enabled")
| 8,909
|
def unpack(X):
""" Unpack a comma separated list of values into a flat list """
return flatten([x.split(",") for x in list(X)])
| 8,910
|
def doize(tock=0.0, **opts):
"""
Decorator that returns Doist compatible decorated generator function.
Usage:
@doize
def f():
pass
Parameters:
tock is default tock attribute of doized f
opts is dictionary of remaining parameters that becomes .opts attribute
of doized f
"""
def decorator(f):
# must create copy not wrapper so inspect.isgeneratorfunction works
# result of decoration
g = helping.copy_func(f)
g.tock = tock # default tock attributes
g.done = None # default done state
g.opts = dict(opts) # default opts attribute
return g
return decorator
| 8,911
|
def get_env(path):
"""
Read the environment file from given path.
:param path: Path to the environment file.
:return: the environment (loaded yaml)
"""
with codecs.open(path, 'r', 'UTF-8') as env_file:
conf_string = env_file.read()
env = yaml.load(conf_string)
logging.debug('env: %s', env)
return env
| 8,912
|
def put_value(obj, value):
"""Sets the value of `obj` reference to `value`.
See [ECMA-262 8.7.2] for details."""
if isinstance(obj, Reference):
obj.put_value(value)
else:
raise ReferenceError("Can't put a value of non-reference object %r" % obj)
| 8,913
|
def make_chained_transformation(tran_fns, *args, **kwargs):
"""Returns a dataset transformation function that applies a list of
transformations sequentially.
Args:
tran_fns (list): A list of dataset transformation.
*args: Extra arguments for each of the transformation function.
**kwargs: Extra keyword arguments for each of the transformation
function.
Returns:
A transformation function to be used in
:tf_main:`tf.data.Dataset.map <data/Dataset#map>`.
"""
def _chained_fn(data):
for tran_fns_i in tran_fns:
data = tran_fns_i(data, *args, **kwargs)
return data
return _chained_fn
| 8,914
|
def configure_optimizer(learning_rate):
"""Configures the optimizer used for training.
Args:
learning_rate: A scalar or `Tensor` learning rate.
Returns:
An instance of an optimizer.
Raises:
ValueError: if FLAGS.optimizer is not recognized.
"""
if FLAGS.optimizer == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(
learning_rate,
rho=FLAGS.adadelta_rho,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'adagrad':
optimizer = tf.train.AdagradOptimizer(
learning_rate,
initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)
elif FLAGS.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(
learning_rate,
beta1=FLAGS.adam_beta1,
beta2=FLAGS.adam_beta2,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'ftrl':
optimizer = tf.train.FtrlOptimizer(
learning_rate,
learning_rate_power=FLAGS.ftrl_learning_rate_power,
initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,
l1_regularization_strength=FLAGS.ftrl_l1,
l2_regularization_strength=FLAGS.ftrl_l2)
elif FLAGS.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(
learning_rate,
momentum=FLAGS.momentum,
name='Momentum')
elif FLAGS.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=FLAGS.rmsprop_decay,
momentum=FLAGS.rmsprop_momentum,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
elif FLAGS.optimizer == "adamweightdecay":
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=FLAGS.adam_beta1,
beta_2=FLAGS.adam_beta2,
epsilon=FLAGS.opt_epsilon,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
else:
raise ValueError('Optimizer [%s] was not recognized' % FLAGS.optimizer)
return optimizer
| 8,915
|
def train_and_evaluate(config, workdir):
"""Runs a training and evaluation loop.
Args:
config: Configuration to use.
workdir: Working directory for checkpoints and TF summaries. If this
contains checkpoint training will be resumed from the latest checkpoint.
Returns:
Training state.
"""
rng = jax.random.PRNGKey(config.seed)
rng, data_rng = jax.random.split(rng)
# Make sure config defines num_epochs and num_train_steps appropriately.
utils.check_epochs_and_steps(config)
# Check that perturbed-topk is selection method.
assert config.selection_method == "perturbed-topk", (
"ntsnet only supports perturbed-topk as selection method. Got: {}".format(
config.selection_method))
train_preprocessing_fn, eval_preprocessing_fn = data.parse_preprocessing_strings(
config.get("train_preprocess_str", ""),
config.get("eval_preprocess_str", ""))
assert config.batch_size % jax.local_device_count() == 0, (
f"Batch size ({config.batch_size}) should be divisible by number of "
f"devices ({jax.local_device_count()}).")
per_device_batch_size = config.batch_size // jax.local_device_count()
train_ds, eval_ds, num_classes = data.get_dataset(
config.dataset,
per_device_batch_size,
data_rng,
train_preprocessing_fn=train_preprocessing_fn,
eval_preprocessing_fn=eval_preprocessing_fn,
**config.get("data", {}))
module = AttentionNet.partial(config=config, num_classes=num_classes)
optimizer = create_optimizer(config)
loss_fn = functools.partial(ntsnet_loss, config=config)
train_metrics_dict = {
"train_loss": loss_fn,
"train_loss_raw": cross_entropy_raw_logits,
"train_loss_concat": cross_entropy_concat_logits,
"train_loss_part": cross_entropy_part_logits,
"train_accuracy": accuracy,
"train_rpn_scores_entropy": rpn_scores_entropy,
}
eval_metrics_dict = {
"eval_loss": loss_fn,
"eval_loss_raw": cross_entropy_raw_logits,
"eval_loss_concat": cross_entropy_concat_logits,
"eval_loss_part": cross_entropy_part_logits,
"eval_accuracy": accuracy,
"eval_rpn_scores_entropy": rpn_scores_entropy,
}
# Enables relevant statistics aggregator.
stats_aggregators = []
def add_image_prefix(image_aggregator):
def aggregator(stats):
d = image_aggregator(stats)
return {f"image_{k}": v for k, v in d.items()}
return aggregator
if config.get("log_images", True):
@add_image_prefix
def plot_patches(stats):
d = {
"part_imgs": (stats["part_imgs"] + 1.0) / 2.0,
"x": (stats["x"] + 1.0) / 2.0
}
for i, sc in enumerate(stats["scores"]):
d[f"scores_{i}"] = sc
return d
stats_aggregators.append(plot_patches)
stats_aggregators.append(lambda x: {"sigma": x["sigma"]})
state = classification_lib.training_loop(
module=module,
rng=rng,
train_ds=train_ds,
eval_ds=eval_ds,
loss_fn=loss_fn,
optimizer=optimizer,
train_metrics_dict=train_metrics_dict,
eval_metrics_dict=eval_metrics_dict,
stats_aggregators=stats_aggregators,
config=config,
workdir=workdir)
return state
| 8,916
|
def flail(robot: DynamixelRobotComponent):
"""Commands the robot to flail if it's stuck on an obstacle."""
for _ in range(6):
robot.set_state(
{'all': RobotState(qpos=(np.random.rand(12) - .5) * 3)},
**SET_PARAMS,
timeout=.15,
)
| 8,917
|
def deploy(version):
""" depoly app to cloud """
with cd(app_path):
get_app(version)
setup_app(version)
config_app()
nginx_config()
nginx_enable_site('growth-studio.conf')
circus_config()
circus_upstart_config()
circus_start()
nginx_restart()
| 8,918
|
def test_publishing_the_same_volumes_with_a_different_target_path():
"""Publishing the same volumes with a different target_path."""
| 8,919
|
def increment(i,k):
""" this is a helper function for a summation of the type :math:`\sum_{0 \leq k \leq i}`,
where i and k are multi-indices.
Parameters
----------
i: numpy.ndarray
integer array, i.size = N
k: numpy.ndarray
integer array, k.size = N
Returns
-------
changes k on return
Example
-------
k = [1,0,1]
i = [2,0,2]
increment(i, k) # changes k to [1,0,2]
increment(i, k) # changes k to [2,0,0]
increment(i, k) # changes k to [2,0,1]
"""
carryover = 1
if len(k) != len(i):
raise ValueError('size of i and k do not match up')
for n in range(len(k))[::-1]:
if i[n] == 0:
continue
tmp = k[n] + carryover
# print 'tmp=',tmp
carryover = tmp // (i[n]+1)
# print 'carryover=',carryover
k[n] = tmp % (i[n]+1)
if carryover == 0:
break
return k
| 8,920
|
def sample_lopt(key: chex.PRNGKey) -> cfgobject.CFGObject:
"""Sample a small lopt model."""
lf = cfgobject.LogFeature
rng = hk.PRNGSequence(key)
task_family_cfg = para_image_mlp.sample_image_mlp(next(rng))
lopt_name = parametric_utils.choice(
next(rng), [
"LearnableAdam", "LearnableSGDM", "LearnableSGD", "MLPLOpt",
"AdafacMLPLOpt"
])
kwargs = {}
if lopt_name in ["MLPLOpt", "AdafacMLPLOpt"]:
kwargs["hidden_size"] = lf(parametric_utils.log_int(next(rng), 2, 512))
kwargs["hidden_layers"] = parametric_utils.log_int(next(rng), 1, 4)
kwargs["exp_mult"] = lf(parametric_utils.log_float(next(rng), 1e-5, 1))
kwargs["step_mult"] = lf(parametric_utils.log_float(next(rng), 1e-5, 1))
lopt_cfg = cfgobject.CFGObject(lopt_name, kwargs)
num_steps = lf(parametric_utils.log_int(next(rng), 1, 100))
outer_bs = lf(parametric_utils.log_int(next(rng), 1, 8))
return cfgobject.CFGObject(
"ParametricLOpt", {
"lopt": lopt_cfg,
"task_family": task_family_cfg,
"num_steps": num_steps,
"outer_batch_size": outer_bs,
})
| 8,921
|
def helper_describe_zones():
"""
Create the zones list in the project
:return:
"""
global zones_list
request = compute_service.zones().list(project=project)
while request is not None:
response = request.execute()
for zone in response['items']:
zones_list.append(zone['name'])
request = compute_service.zones().list_next(previous_request=request, previous_response=response)
return
| 8,922
|
def gen_df_groupby_usecase(method_name, groupby_params=None, method_params=''):
"""Generate df groupby method use case"""
groupby_params = {} if groupby_params is None else groupby_params
groupby_params = get_groupby_params(**groupby_params)
func_text = groupby_usecase_tmpl.format(**{
'method_name': method_name,
'groupby_params': groupby_params,
'method_params': method_params
})
global_vars = {'np': numpy, 'time': time}
loc_vars = {}
exec(func_text, global_vars, loc_vars)
_df_groupby_usecase = loc_vars[f'df_groupby_{method_name}_usecase']
return _df_groupby_usecase
| 8,923
|
def run_successful_task(action: Action, action_owner_name: str):
"""Run action and expect it succeeds"""
task = action.run()
try:
wait_for_task_and_assert_result(task, status="success")
except AssertionError as error:
raise AssertionError(
f'Action {action.name} should have succeeded when ran on {action_owner_name}:\n{error}'
) from error
| 8,924
|
def _processor_startup_fn(
pull_port, push_port, sockets_connected_evt, process_fn, event_queue, debug
):
"""
Parameters
----------
pull_port :
push_port :
sockets_connected_evt :
process_fn :
event_queue :
debug :
Returns
-------
"""
bridge = Bridge(debug=debug)
push_socket = bridge._connect_push(pull_port)
pull_socket = bridge._connect_pull(push_port)
if debug:
print("image processing sockets connected")
sockets_connected_evt.set()
def process_and_sendoff(image_tags_tuple, original_dtype):
"""
Parameters
----------
image_tags_tuple :
Returns
-------
"""
if len(image_tags_tuple) != 2:
raise Exception("If image is returned, it must be of the form (pixel, metadata)")
pixels = image_tags_tuple[0]
metadata = image_tags_tuple[1]
# only accepts same pixel type as original
if not np.issubdtype(image_tags_tuple[0].dtype, original_dtype) and not np.issubdtype(
original_dtype, image_tags_tuple[0].dtype
):
raise Exception(
"Processed image pixels must have same dtype as input image pixels, "
"but instead they were {} and {}".format(image_tags_tuple[0].dtype, pixels.dtype)
)
metadata["PixelType"] = "GRAY8" if pixels.dtype.itemsize == 1 else "GRAY16"
processed_img = {
"pixels": pixels.tobytes(),
"metadata": metadata,
}
push_socket.send(processed_img)
while True:
message = None
while message is None:
message = pull_socket.receive(timeout=30) # check for new message
if "special" in message and message["special"] == "finished":
push_socket.send(message) # Continue propagating the finihsed signal
push_socket.close()
pull_socket.close()
return
metadata = message["metadata"]
pixels = deserialize_array(message["pixels"])
image = np.reshape(pixels, [metadata["Height"], metadata["Width"]])
params = signature(process_fn).parameters
if len(params) == 2 or len(params) == 4:
processed = None
try:
if len(params) == 2:
processed = process_fn(image, metadata)
elif len(params) == 4:
processed = process_fn(image, metadata, bridge, event_queue)
except Exception as e:
warnings.warn("exception in image processor: {}".format(e))
continue
else:
raise Exception(
"Incorrect number of arguments for image processing function, must be 2 or 4"
)
if processed is None:
continue
if type(processed) == list:
for image in processed:
process_and_sendoff(image, pixels.dtype)
else:
process_and_sendoff(processed, pixels.dtype)
| 8,925
|
def hydra_breakpoints(in_bam, pair_stats):
"""Detect structural variation breakpoints with hydra.
"""
in_bed = convert_bam_to_bed(in_bam)
if os.path.getsize(in_bed) > 0:
pair_bed = pair_discordants(in_bed, pair_stats)
dedup_bed = dedup_discordants(pair_bed)
return run_hydra(dedup_bed, pair_stats)
else:
return None
| 8,926
|
def merge_csvfiles(options):
""" Think of this as a 'join' across options.mergefiles on equal values of
the column options.timestamp. This function takes each file in
options.mergefiles, reads them, and combines their columns in
options.output. The only common column should be options.timestamp. The
results are undefined if the mergefiles share other column names.
Args:
options.mergefiles - list of csv filenames
options.output - filename of merged csv file from this operation
Returns:
bool - True if success
Raises:
AssertionError - if merging encounters an error.
"""
records = {}
all_header_names = []
records_list = []
# collect all header fields from mergefiles
for filename in options.mergefiles:
records = read_csvfile(filename, True)
records_list.append(records)
all_header_names += records.fieldnames
all_header_names = sorted(set(all_header_names))
# eliminate duplicate $header
output_fd = open(options.output,'w')
writer = csv.DictWriter(output_fd, all_header_names)
writer.writeheader()
try:
# read all values until StopIteration is reached.
while True:
merge_list = [ records.next() for records in records_list ]
merge_dict = merge_rows(merge_list, options)
writer.writerow(merge_dict)
except StopIteration:
pass
output_fd.close()
return True
| 8,927
|
def data_generator(samples, shape, batch_size, correction, sensitivity,
angle_threshold):
"""Used to build an augmented data set for training
Args:
samples (list(str)): list of samples to process
shape (tuple(int)): shape of input
batch_size (int): number of samples to process in each batch
correction (float): offset to apply to left and right camera angles
* left will have the correction added
* right will have the correction deducted
sensitivity (float): used to set upper limit and lower limit within
which the correction value can fluctuate
angle_threshold (float): angles below this will be considered for
exclusion from the generator in order to
balance the distribution of angles the
training model will see.
"""
num_samples = len(samples)
rows, cols, ch = shape
factors = [0, 1, -1]
samples = shuffle(samples)
while True:
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset + batch_size]
X = []
y = []
for i, sample in enumerate(batch_samples):
# get steering angle
angle = float(sample[3])
# camera selection
# -> drop near-0 angle data with probability
# -> [0, 1, 2] = [center, left, right]
if abs(angle) < angle_threshold and np.random.random() < 0.5:
camera = np.random.choice([1, 2])
else:
camera = np.random.choice([0, 1, 2])
adjustment = np.random.uniform(correction - sensitivity,
correction + sensitivity)
# adjust angle if needed
# -> factors[0, 1, -1] = [center, left, right]
angle += (factors[camera] * adjustment)
# read image
img_path = os.path.join(repo_dir, sample[camera])
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# randomly choose to flip
if np.random.random() < 0.5:
img = np.fliplr(img)
angle *= -1
# randomly amend brightness
# https://discussions.udacity.com/t/still-having-some-issues-with-project-behavioral-cloning-any-advice/234354/45
if np.random.random() < 0.5:
random_bright = .25 + np.random.uniform()
img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
img[:, :, 2] = img[:, :, 2] * random_bright
img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)
img = np.clip(img, 0, 255)
X.append(img)
y.append(angle)
yield shuffle(np.array(X), np.array(y))
| 8,928
|
async def euphoria():
"""
Trigger a state of "euphoria" emotion, extremely happy and positive bot
"""
async with db.DiscordDB("emotions") as db_obj:
db_obj.write(
{
"happy": EMOTION_CAPS["happy"][1],
"anger": EMOTION_CAPS["anger"][0],
"bored": 0,
"confused": 0,
}
)
| 8,929
|
def row_component(cards):
"""
Creates a horizontal row used to contain cards.
The card and row_component work together to create a
layout that stretches and shrinks when the user changes the size of the window,
or accesses the dashboard from a mobile device.
See https://developer.mozilla.org/en-US/docs/Learn/CSS/CSS_layout for more information.
"""
return html.Div(
cards, className="govuk-list card-container", style={"alignItems": "stretch"}
)
| 8,930
|
def recCopyElement(oldelement):
"""Generates a copy of an xml element and recursively of all
child elements.
:param oldelement: an instance of lxml.etree._Element
:returns: a copy of the "oldelement"
.. warning::
doesn't copy ``.text`` or ``.tail`` of xml elements
"""
newelement = ETREE.Element(oldelement.tag, oldelement.attrib)
if len(oldelement.getchildren()) > 0:
for childelement in oldelement.getchildren():
newelement.append(recCopyElement(childelement))
return newelement
| 8,931
|
def NameExpansionIterator(command_name,
debug,
logger,
gsutil_api,
url_strs,
recursion_requested,
all_versions=False,
cmd_supports_recursion=True,
project_id=None,
ignore_symlinks=False,
continue_on_error=False,
bucket_listing_fields=None):
"""Static factory function for instantiating _NameExpansionIterator.
This wraps the resulting iterator in a PluralityCheckableIterator and checks
that it is non-empty. Also, allows url_strs to be either an array or an
iterator.
Args:
command_name: name of command being run.
debug: Debug level to pass to underlying iterators (range 0..3).
logger: logging.Logger object.
gsutil_api: Cloud storage interface. Settable for testing/mocking.
url_strs: Iterable URL strings needing expansion.
recursion_requested: True if -r specified on command-line. If so,
listings will be flattened so mapped-to results contain objects
spanning subdirectories.
all_versions: Bool indicating whether to iterate over all object versions.
cmd_supports_recursion: Bool indicating whether this command supports a '-r'
flag. Useful for printing helpful error messages.
project_id: Project id to use for the current command.
ignore_symlinks: If True, ignore symlinks during iteration.
continue_on_error: If true, yield no-match exceptions encountered during
iteration instead of raising them.
bucket_listing_fields: Iterable fields to include in expanded results.
Ex. ['name', 'acl']. Underyling iterator is responsible for converting
these to list-style format ['items/name', 'items/acl']. If this is
None, only the object name is included in the result.
Raises:
CommandException if underlying iterator is empty.
Returns:
Name expansion iterator instance.
For example semantics, see comments in NameExpansionIterator.__init__.
"""
url_strs = PluralityCheckableIterator(url_strs)
name_expansion_iterator = _NameExpansionIterator(
command_name,
debug,
logger,
gsutil_api,
url_strs,
recursion_requested,
all_versions=all_versions,
cmd_supports_recursion=cmd_supports_recursion,
project_id=project_id,
ignore_symlinks=ignore_symlinks,
continue_on_error=continue_on_error,
bucket_listing_fields=bucket_listing_fields)
name_expansion_iterator = PluralityCheckableIterator(name_expansion_iterator)
if name_expansion_iterator.IsEmpty():
raise CommandException(NO_URLS_MATCHED_GENERIC)
return name_expansion_iterator
| 8,932
|
def KETAMA(key):
"""
MD5-based hashing algorithm used in consistent hashing scheme
to compensate for servers added/removed from memcached pool.
"""
d = hashlib.md5(key).digest()
c = _signed_int32
h = c((ord(d[3])&0xff) << 24) | c((ord(d[2]) & 0xff) << 16) | \
c((ord(d[1]) & 0xff) << 8) | c(ord(d[0]) & 0xff)
return h
| 8,933
|
def add_months(dt, months):
"""
月加减
"""
month = dt.month - 1 + months
year = dt.year + month / 12
month = month % 12 + 1
day = min(dt.day, calendar.monthrange(year, month)[1])
return dt.replace(year=year, month=month, day=day)
| 8,934
|
def create_plot(
file_names,
output_folder_name,
output_prefix,
gat_TATA_constitutive_output,
gat_TATA_variable_output,
palette,
variable1_name,
variable2_name,
):
"""import and process the raw outputs after running gat (Genomic association tester). Then create barplot of constitutive and variable gene TATA enrichment"""
# import gat output files as dfs
constitutive = pd.read_table(
gat_TATA_constitutive_output, sep="\t", header=0
)
variable = pd.read_table(gat_TATA_variable_output, sep="\t", header=0)
# merge dfs
merged = pd.concat([constitutive, variable], ignore_index=True)
# set style to ticks
sns.set(style="ticks", color_codes=True)
# set colour palette
colours = sns.color_palette(palette)
# bar chart, 95% confidence intervals
plot = sns.barplot(
x="annotation",
y="l2fold",
data=merged,
order=[variable1_name, variable2_name],
palette=colours,
)
plot.axhline(0, color="black")
plt.xlabel("Gene type")
plt.ylabel("Log2-fold enrichment over background").get_figure().savefig(
f"../../data/output/{file_names}/TATA/{output_folder_name}plots/{output_prefix}_log2fold.pdf",
format="pdf",
)
| 8,935
|
def install_plugins():
"""Check for existing plugins and removed if they exist..."""
vim_plugins = os.path.join(str(pathlib.Path.home()), '.vim', 'bundle', )
print("Removing existing bundles...")
subprocess.Popen(['rm', '-rf', vim_plugins]).wait()
print("Installing plugins...")
subprocess.Popen(['vim', '-c', ':PluginInstall', '-c', 'x', '-c', 'x']).wait()
| 8,936
|
def _decomposer_interp(fp, x=None, xp=None):
"""Do the actual interpolation for multiprocessing"""
return np.interp(x, xp, fp)
| 8,937
|
def create_nn(x, x_shape, is_training):
"""
Args:
x: input hits array
x_shape: input tensor shape for single event
is_training: placeholder for indicating train or valid/test phase
Note: Only code in `create_nn` function scope will be exctracted and saved
in model directory. It's important to provide all necessary imports
within.
"""
import tensorflow as tf
from nn4omtf import utils
import numpy as np
arr = [0, 5, 10, 15, 20, 25, 30]
out_sz = 2 * len(arr) + 1
in_sz = np.prod(x_shape)
hidden_layers = [128, 64, 64]
x = tf.reshape(x, [-1, in_sz])
for sz in hidden_layers:
# Pass is_training to setup batch normalization on these layers
x = utils.mk_fc_layer(x, sz, act_fn=tf.nn.relu, is_training=is_training)
logits = utils.mk_fc_layer(x, out_sz, is_training=is_training)
return logits, arr
| 8,938
|
def create_new_case(case_dir):
"""Creates new case directory"""
# Check that the specified case directory does not already exist
if os.path.exists(case_dir):
call(["rm", "-r", "snappy"])
#raise RuntimeError(
# 'Refusing to write to existing path: {}'.format(case_dir)
#)
# Create the case
return Case(case_dir)
| 8,939
|
def generate_outlier_bounds_iqr(df, column, multiplier=1.5):
"""
Takes in a dataframe, the column name, and can specify a multiplier (default=1.5). Returns the upper and lower bounds for the
values in that column that signify outliers.
"""
q1 = df[column].quantile(.25)
q3 = df[column].quantile(.75)
iqr = q3 - q1
upper = q3 + (multiplier * iqr)
lower = q1 - (multiplier * iqr)
return upper, lower
| 8,940
|
def generate_data(Type):
"""
随机生成CAN帧中所包含的数据
:param Type: 需要生成数据的类型
:return: 生成的随机数据序列,长度为8,如['88', '77', '55', '44', '22', '11', '33'', '44']
"""
data = []
if Type == 1:
# 生成反馈帧单体电池Cell1-24电压信息
standard_vol = 35
offset = random.randint(0, 15)
max_vol = standard_vol + offset
min_vol = standard_vol - offset // 2
data.append('44')
data.append(str(max_vol))
data.append('44')
data.append(str(min_vol))
offset = random.randint(0, 15)
max_vol = standard_vol + offset
min_vol = standard_vol - offset // 2
data.append('44')
data.append(str(max_vol))
data.append('44')
data.append(str(min_vol))
elif Type == 2:
# 生成反馈帧单体电池Cell1-8温度信息
stanard_temp = 45
offest = random.randint(0, 20)
max_temp = stanard_temp + offest
min_temp = stanard_temp - offest - 5
data.append(str(max_temp))
data.append('6c')
data.append(str(min_temp))
data.append('6c')
offest = random.randint(0, 20)
max_temp = stanard_temp + offest
min_temp = stanard_temp - offest - 5
data.append(str(max_temp))
data.append('6c')
data.append(str(min_temp))
data.append('6c')
elif Type == 3:
# 生成反馈帧单体电池最高最低电压温度信息
standard_vol = 35
standard_temp = 45
vol_offset = random.randint(0, 15)
temp_offset = random.randint(0, 20)
max_temp = standard_temp + temp_offset
min_temp = standard_temp - temp_offset - 5
max_vol = standard_vol + vol_offset
min_vol = standard_vol - vol_offset // 2
data.append('44')
data.append(str(max_vol))
data.append('44')
data.append((str(min_vol)))
data.append(str(max_temp))
data.append('5c')
data.append(str(min_temp))
data.append('5c')
elif Type == 4:
# 生成常发帧系统电压信息
standard_vol = 55
offset = random.randint(0, 10)
max_vol = standard_vol * offset * 10
min_vol = standard_vol - offset - 5
data.append('c5')
data.append(str(max_vol))
data.append('f2')
data.append(str(min_vol))
data.append('ed')
for i in range(3):
data.append(str(standard_vol + 5 * i))
elif Type == 5:
pass
else:
pass
return data
| 8,941
|
def counting_sort(array):
"""
SORTING FUNCTION USING COUNTING SORT ALGORITHM
ARG array = LIST(ARRAY) OF NUMBERS
"""
## counter lists has elements for every
maximum = max(array)
counter = [0]*(maximum+1)
for i in range(len(array)):
counter[array[i]] += 1
for i in range(1, maximum + 1):
counter[i] = counter[i] + counter[i-1]
#print_array(counter)
result = [0]*len(array)
for i in range(len(array)):
result[counter[array[i]] -1] = array[i]
counter[array[i]] -= 1
return result
| 8,942
|
def indexedcolor(i, num, npersat=15, lightness=60):
"""Returns an rgb color triplet for a given index, with a finite max 'num'.
Thus if you need 10 colors and want to get color #5, you would call this with (5, 10).
The colors are "repeatable".
"""
import math
from PIL import ImageColor
nsats = int(math.ceil(num/float(npersat)))
sat = 100 - int((i//npersat)*(100/nsats))
l = lightness
nhues = int(math.ceil(num/float(nsats)))
hue = (i % nhues) * (360//nhues)
#print >>sys.stderr, 'For i %d, num %d, got %d sats, %d hues -> %d, %d, %d' % (i, num, nsats, nhues, hue, sat, l)
return ImageColor.getrgb('hsl(%d,%d%%,%d%%)' % (hue, sat, l))
| 8,943
|
def make_customer_satisfaction(branch_index='A'):
"""Create average customer satisfaction heat map"""
customer_satisfaction = make_heat_map(branch_index, 'mean(Rating)', 'Average Satisfaction')
return customer_satisfaction
| 8,944
|
def extract_infos(fpath):
"""Extract information about file"""
try:
pe = pefile.PE(fpath)
except pefile.PEFormatError:
return {}
res = {}
res['Machine'] = pe.FILE_HEADER.Machine
res['SizeOfOptionalHeader'] = pe.FILE_HEADER.SizeOfOptionalHeader
res['Characteristics'] = pe.FILE_HEADER.Characteristics
res['MajorLinkerVersion'] = pe.OPTIONAL_HEADER.MajorLinkerVersion
res['MinorLinkerVersion'] = pe.OPTIONAL_HEADER.MinorLinkerVersion
res['SizeOfCode'] = pe.OPTIONAL_HEADER.SizeOfCode
res['SizeOfInitializedData'] = pe.OPTIONAL_HEADER.SizeOfInitializedData
res['SizeOfUninitializedData'] = pe.OPTIONAL_HEADER.SizeOfUninitializedData
res['AddressOfEntryPoint'] = pe.OPTIONAL_HEADER.AddressOfEntryPoint
res['BaseOfCode'] = pe.OPTIONAL_HEADER.BaseOfCode
try:
res['BaseOfData'] = pe.OPTIONAL_HEADER.BaseOfData
except AttributeError:
res['BaseOfData'] = 0
res['ImageBase'] = pe.OPTIONAL_HEADER.ImageBase
res['SectionAlignment'] = pe.OPTIONAL_HEADER.SectionAlignment
res['FileAlignment'] = pe.OPTIONAL_HEADER.FileAlignment
res['MajorOperatingSystemVersion'] = pe.OPTIONAL_HEADER.MajorOperatingSystemVersion
res['MinorOperatingSystemVersion'] = pe.OPTIONAL_HEADER.MinorOperatingSystemVersion
res['MajorImageVersion'] = pe.OPTIONAL_HEADER.MajorImageVersion
res['MinorImageVersion'] = pe.OPTIONAL_HEADER.MinorImageVersion
res['MajorSubsystemVersion'] = pe.OPTIONAL_HEADER.MajorSubsystemVersion
res['MinorSubsystemVersion'] = pe.OPTIONAL_HEADER.MinorSubsystemVersion
res['SizeOfImage'] = pe.OPTIONAL_HEADER.SizeOfImage
res['SizeOfHeaders'] = pe.OPTIONAL_HEADER.SizeOfHeaders
res['CheckSum'] = pe.OPTIONAL_HEADER.CheckSum
res['Subsystem'] = pe.OPTIONAL_HEADER.Subsystem
res['DllCharacteristics'] = pe.OPTIONAL_HEADER.DllCharacteristics
res['SizeOfStackReserve'] = pe.OPTIONAL_HEADER.SizeOfStackReserve
res['SizeOfStackCommit'] = pe.OPTIONAL_HEADER.SizeOfStackCommit
res['SizeOfHeapReserve'] = pe.OPTIONAL_HEADER.SizeOfHeapReserve
res['SizeOfHeapCommit'] = pe.OPTIONAL_HEADER.SizeOfHeapCommit
res['LoaderFlags'] = pe.OPTIONAL_HEADER.LoaderFlags
res['NumberOfRvaAndSizes'] = pe.OPTIONAL_HEADER.NumberOfRvaAndSizes
# Sections
res['SectionsNb'] = len(pe.sections)
entropy = list(map(lambda x: x.get_entropy(), pe.sections))
res['SectionsMeanEntropy'] = sum(entropy) / float(len(entropy))
res['SectionsMinEntropy'] = min(entropy)
res['SectionsMaxEntropy'] = max(entropy)
raw_sizes = list(map(lambda x: x.SizeOfRawData, pe.sections))
res['SectionsMeanRawsize'] = sum(raw_sizes) / float(len(raw_sizes))
res['SectionsMinRawsize'] = min(raw_sizes)
res['SectionsMaxRawsize'] = max(raw_sizes)
virtual_sizes = list(map(lambda x: x.Misc_VirtualSize, pe.sections))
res['SectionsMeanVirtualsize'] = sum(
virtual_sizes) / float(len(virtual_sizes))
res['SectionsMinVirtualsize'] = min(virtual_sizes)
res['SectionMaxVirtualsize'] = max(virtual_sizes)
# Imports
try:
res['ImportsNbDLL'] = len(pe.DIRECTORY_ENTRY_IMPORT)
imports = sum([x.imports for x in pe.DIRECTORY_ENTRY_IMPORT], [])
res['ImportsNb'] = len(imports)
res['ImportsNbOrdinal'] = len(
list(filter(lambda x: x.name is None, imports)))
except AttributeError:
res['ImportsNbDLL'] = 0
res['ImportsNb'] = 0
res['ImportsNbOrdinal'] = 0
# Exports
try:
res['ExportNb'] = len(pe.DIRECTORY_ENTRY_EXPORT.symbols)
except AttributeError:
# No export
res['ExportNb'] = 0
# Resources
resources = get_resources(pe)
res['ResourcesNb'] = len(resources)
if len(resources) > 0:
entropy = list(map(lambda x: x[0], resources))
res['ResourcesMeanEntropy'] = sum(entropy) / float(len(entropy))
res['ResourcesMinEntropy'] = min(entropy)
res['ResourcesMaxEntropy'] = max(entropy)
sizes = list(map(lambda x: x[1], resources))
res['ResourcesMeanSize'] = sum(sizes) / float(len(sizes))
res['ResourcesMinSize'] = min(sizes)
res['ResourcesMaxSize'] = max(sizes)
else:
res['ResourcesNb'] = 0
res['ResourcesMeanEntropy'] = 0
res['ResourcesMinEntropy'] = 0
res['ResourcesMaxEntropy'] = 0
res['ResourcesMeanSize'] = 0
res['ResourcesMinSize'] = 0
res['ResourcesMaxSize'] = 0
# Load configuration size
try:
res['LoadConfigurationSize'] = pe.DIRECTORY_ENTRY_LOAD_CONFIG.struct.Size
except AttributeError:
res['LoadConfigurationSize'] = 0
# Version configuration size
try:
version_infos = get_version_info(pe)
res['VersionInformationSize'] = len(version_infos.keys())
except AttributeError:
res['VersionInformationSize'] = 0
return res
| 8,945
|
def calibrate_eye_in_hand(calibration_inputs):
"""Perform eye-in-hand calibration.
Args:
calibration_inputs: List of HandEyeInput
Returns:
A HandEyeOutput instance containing the eye-in-hand transform
"""
return HandEyeOutput(
_zivid.calibration.calibrate_eye_in_hand(
[
calibration_input._HandEyeInput__impl # pylint: disable=protected-access
for calibration_input in calibration_inputs
]
)
)
| 8,946
|
def get_intersect(A: np.ndarray, B: np.ndarray, C: np.ndarray, D: np.ndarray) -> Optional[np.ndarray]:
"""
Get the intersection of [A, B] and [C, D]. Return False if segment don't cross.
:param A: Point of the first segment
:param B: Point of the first segment
:param C: Point of the second segment
:param D: Point of the second segment
:return: The intersection if any, otherwise None.
"""
det = (B[0] - A[0]) * (C[1] - D[1]) - (C[0] - D[0]) * (B[1] - A[1])
if det == 0:
# Parallel
return None
else:
t1 = ((C[0] - A[0]) * (C[1] - D[1]) - (C[0] - D[0]) * (C[1] - A[1])) / det
t2 = ((B[0] - A[0]) * (C[1] - A[1]) - (C[0] - A[0]) * (B[1] - A[1])) / det
if t1 > 1 or t1 < 0 or t2 > 1 or t2 < 0:
# not intersect
return None
else:
xi = A[0] + t1 * (B[0] - A[0])
yi = A[1] + t1 * (B[1] - A[1])
return np.array([xi, yi])
| 8,947
|
def _make_vector_laplace_scipy_nd(bcs: Boundaries) -> Callable:
""" make a vector Laplacian using the scipy module
This only supports uniform discretizations.
Args:
bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):
|Arg_boundary_conditions|
Returns:
A function that can be applied to an array of values
"""
scaling = bcs._uniform_discretization**-2
args = bcs._scipy_border_mode
dim = bcs.grid.dim
shape_out = (dim,) + bcs.grid.shape
def vector_laplace(arr, out=None):
""" apply vector Laplacian operator to array `arr` """
if out is None:
out = np.empty(shape_out)
for i in range(dim):
ndimage.laplace(arr[i], output=out[i], **args)
return out * scaling
return vector_laplace
| 8,948
|
def dot_to_dict(values):
"""Convert dot notation to a dict. For example: ["token.pos", "token._.xyz"]
become {"token": {"pos": True, "_": {"xyz": True }}}.
values (iterable): The values to convert.
RETURNS (dict): The converted values.
"""
result = {}
for value in values:
path = result
parts = value.lower().split(".")
for i, item in enumerate(parts):
is_last = i == len(parts) - 1
path = path.setdefault(item, True if is_last else {})
return result
| 8,949
|
def _draw_edges(G, pos, nodes, ax):
"""Draw the edges of a (small) networkx graph.
Params:
G (nx.classes.*) a networkx graph.
pos (dict) returned by nx.layout methods.
nodes (dict) of Circle patches.
ax (AxesSubplot) mpl axe.
Return:
(dict) of Circle patches.
"""
pointer = ArrowStyle.Fancy(head_width=10, head_length=15)
curved_edge = ConnectionStyle('arc3', rad=.2)
arrow_kwargs = {'arrowstyle': pointer,
'antialiased': True,
'connectionstyle': curved_edge,
'edgecolor': None,
'facecolor': None,
'linewidth': None}
edges = {}
for i, (a, b, attr) in enumerate(G.edges.data()):
arrow_kwargs['edgecolor'] = attr['color']
arrow_kwargs['facecolor'] = attr['color']
arrow_kwargs['linewidth'] = 1.0
edge = FancyArrowPatch(pos[a], pos[b],
patchA=nodes[a], patchB=nodes[b],
shrinkA=5, shrinkB=5,
**arrow_kwargs)
ax.add_patch(edge)
edges[(a, b)] = edge
return edges
| 8,950
|
def benjamini_hochberg_stepup(p_vals):
"""
Given a list of p-values, apply FDR correction and return the q values.
"""
# sort the p_values, but keep the index listed
index = [i[0] for i in sorted(enumerate(p_vals), key=lambda x:x[1])]
# keep the p_values sorted
p_vals = sorted(p_vals)
q_vals = [None]*len(p_vals) # initialize an empty list
prev_q = 0
# BH Step Up begins here.
for i, p in enumerate(p_vals):
q = len(p_vals)/(i+1)*p # calculate the q_value for the current point
q = min(q, 1) # if q >1, make it == 1
q = max(q, prev_q) # preserve monotonicity
q_vals[i] = q # store the q_value
prev_q = q # update the previous q_value
# prevent the lowest q value from going to zero
if np.sum(q_vals == 0) > 0:
# set the min q-value to 10x less than the smallest non-zero value
q_vals[np.where(q_vals == 0)] = np.min(q_vals[np.where(q_vals != 0)])/10
# return q_vals and the index so we can match up each q-value to its index
return q_vals, index
| 8,951
|
def test_3():
"""
Test PCE coefficients w/ lasso
"""
polynomial_basis = TensorProductBasis(dist, max_degree)
lasso = LassoRegression()
pce = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=lasso)
pce.fit(x, y)
assert round(pce.coefficients[0][0], 4) == 0.0004
| 8,952
|
def remove_transcription_site(rna, foci, nuc_mask, ndim):
"""Distinguish RNA molecules detected in a transcription site from the
rest.
A transcription site is defined as as a foci detected within the nucleus.
Parameters
----------
rna : np.ndarray, np.int64
Coordinates of the detected RNAs with shape (nb_spots, 4) or
(nb_spots, 3). One coordinate per dimension (zyx or yx coordinates)
plus the index of the foci assigned to the RNA. If no foci was
assigned, value is -1.
foci : np.ndarray, np.int64
Array with shape (nb_foci, 5) or (nb_foci, 4). One coordinate per
dimension for the foci centroid (zyx or yx coordinates),
the number of RNAs detected in the foci and its index.
nuc_mask : np.ndarray, bool
Binary mask of the nuclei region with shape (y, x).
ndim : int
Number of spatial dimensions to consider (2 or 3).
Returns
-------
rna_out_ts : np.ndarray, np.int64
Coordinates of the detected RNAs with shape (nb_spots, 4) or
(nb_spots, 3). One coordinate per dimension (zyx or yx coordinates)
plus the index of the foci assigned to the RNA. If no foci was
assigned, value is -1. RNAs from transcription sites are removed.
foci : np.ndarray, np.int64
Array with shape (nb_foci, 5) or (nb_foci, 4). One coordinate per
dimension for the foci centroid (zyx or yx coordinates),
the number of RNAs detected in the foci and its index.
ts : np.ndarray, np.int64
Array with shape (nb_ts, 5) or (nb_ts, 4). One coordinate per
dimension for the transcription site centroid (zyx or yx coordinates),
the number of RNAs detected in the transcription site and its index.
"""
# check parameters
check_array(rna,
ndim=2,
dtype=np.int64)
# discriminate foci from transcription sites
ts, foci = identify_objects_in_region(
nuc_mask, foci, ndim)
# filter out rna from transcription sites
rna_in_ts = ts[:, ndim + 1]
mask_rna_in_ts = np.isin(rna[:, ndim], rna_in_ts)
rna_out_ts = rna[~mask_rna_in_ts]
return rna_out_ts, foci, ts
| 8,953
|
def make_div_interp(dirname, profs, pout, gui=False):
"""Interpolate each of the given profiles to the corresponding new levels
in list p and write out."""
make_dirs(dirname, profs['nprof'], gui)
for p in range(profs['nprof']):
if gui:
proffn = dirname + '/{:03d}.py'.format(p + 1)
write_list(proffn, pout[p], True, 'P')
else:
profdir = dirname + '/{:03d}/atm/'.format(p + 1)
write_list(profdir + 'p.txt', pout[p])
logpin = np.log(profs[p]['p'])
logpout = np.log(pout[p])
for v in PROF_LIST:
if v in profs[p]:
dout = np.interp(x=logpout, xp=logpin, fp=profs[p][v]) #, left=-999., right=-999.)
if gui:
gn = v.upper()
write_list(proffn, dout, True, gn)
else:
write_list(profdir + v + '.txt', dout)
write_common(profs, dirname, gui)
| 8,954
|
def hexagonal_numbers(length: int) -> list[int]:
"""
:param len: max number of elements
:type len: int
:return: Hexagonal numbers as a list
Tests:
>>> hexagonal_numbers(10)
[0, 1, 6, 15, 28, 45, 66, 91, 120, 153]
>>> hexagonal_numbers(5)
[0, 1, 6, 15, 28]
>>> hexagonal_numbers(0)
Traceback (most recent call last):
...
ValueError: Length must be a positive integer.
"""
if length <= 0 or not isinstance(length, int):
raise ValueError("Length must be a positive integer.")
return [n * (2 * n - 1) for n in range(length)]
| 8,955
|
def _meta_model_test():
"""
1. input: [b, c, h, w]
2. get weight and bias like `maml`
3. return : [batch_size, num_classes]
"""
import torch
input = torch.rand(32, 3, 84, 84)
model = MetaConvModel(3, 5, hidden_size=64, feature_size=5 * 5 * 64, embedding=True)
out = model(input)
print(out.shape)
| 8,956
|
def get_free_remote_port(node: Node) -> int:
"""Returns a free remote port.
Uses a Python snippet to determine a free port by binding a socket
to port 0 and immediately releasing it.
:param node: Node to find a port on.
"""
output = node.run("python -c 'import socket; s=socket.socket();"
" s.bind((str(), 0)); print(s.getsockname()[1]);"
" s.close()'")
return int(output)
| 8,957
|
def read_conll_data(data_file_path: str) -> Tuple[List[Sentence], List[DependencyTree]]:
"""
Reads Sentences and Trees from a CONLL formatted data file.
Parameters
----------
data_file_path : ``str``
Path to data to be read.
"""
sentences: List[Sentence] = []
trees: List[DependencyTree] = []
with open(data_file_path, 'r') as file:
sentence_tokens = []
tree = DependencyTree()
for line in tqdm(file):
line = line.strip()
array = line.split('\t')
if len(array) < 10:
if sentence_tokens:
trees.append(tree)
sentences.append(sentence_tokens)
tree = DependencyTree()
sentence_tokens = []
else:
word = array[1]
pos = array[4]
head = int(array[6])
dep_type = array[7]
token = Token(word=word, pos=pos,
head=head, dep_type=dep_type)
sentence_tokens.append(token)
tree.add(head, dep_type)
if not sentences:
raise Exception(f"No sentences read from {data_file_path}. "
f"Make sure you have not replaced tabs with spaces "
f"in conll formatted file by mistake.")
return sentences, trees
| 8,958
|
def get_algo_meta(name: AlgoMeta) -> Optional[AlgoMeta]:
"""
Get meta information of a built-in or registered algorithm.
Return None if not found.
"""
for algo in get_all_algo_meta():
if algo.name == name:
return algo
return None
| 8,959
|
def do_eval(sess,input_ids,input_mask,segment_ids,label_ids,is_training,loss,probabilities,vaildX, vaildY, num_labels,batch_size,cls_id):
"""
evalution on model using validation data
"""
num_eval=1000
vaildX = vaildX[0:num_eval]
vaildY = vaildY[0:num_eval]
number_examples = len(vaildX)
eval_loss, eval_counter, eval_f1_score, eval_p, eval_r = 0.0, 0, 0.0, 0.0, 0.0
label_dict = init_label_dict(num_labels)
print("do_eval.number_examples:",number_examples)
f1_score_micro_sklearn_total=0.0
# batch_size=1 # TODO
for start, end in zip(range(0, number_examples, batch_size), range(batch_size, number_examples, batch_size)):
input_mask_, segment_ids_, input_ids_ = get_input_mask_segment_ids(vaildX[start:end],cls_id)
feed_dict = {input_ids: input_ids_,input_mask:input_mask_,segment_ids:segment_ids_,
label_ids:vaildY[start:end],is_training:False}
curr_eval_loss, prob = sess.run([loss, probabilities],feed_dict)
target_labels=get_target_label_short_batch(vaildY[start:end])
predict_labels=get_label_using_logits_batch(prob)
if start%100==0:
print("prob.shape:",prob.shape,";prob:",prob)
print("predict_labels:",predict_labels)
#print("predict_labels:",predict_labels)
label_dict=compute_confuse_matrix_batch(target_labels,predict_labels,label_dict,name='bert')
eval_loss, eval_counter = eval_loss + curr_eval_loss, eval_counter + 1
f1_micro, f1_macro = compute_micro_macro(label_dict) # label_dictis a dict, key is: accusation,value is: (TP,FP,FN). where TP is number of True Positive
f1_score_result = (f1_micro + f1_macro) / 2.0
return eval_loss / float(eval_counter+0.00001), f1_score_result, f1_micro, f1_macro
| 8,960
|
def read_gbt_target(sdfitsfile, objectname, verbose=False):
"""
Give an object name, get all observations of that object as an 'obsblock'
"""
bintable = _get_bintable(sdfitsfile)
whobject = bintable.data['OBJECT'] == objectname
if verbose:
print("Number of individual scans for Object %s: %i" % (objectname,whobject.sum()))
calON = bintable.data['CAL'] == 'T'
# HACK: apparently bintable.data can sometimes treat itself as scalar...
if np.isscalar(calON):
calON = np.array([(val in ['T',True]) for val in bintable.data['CAL']])
n_nods = np.unique(bintable.data['PROCSIZE'])
blocks = {}
for sampler in np.unique(bintable.data[whobject]['SAMPLER']):
whsampler = bintable.data['SAMPLER'] == sampler
nods = np.unique(bintable.data['PROCSEQN'][whsampler*whobject])
for nod in nods:
whnod = bintable.data['PROCSEQN'] == nod
for onoff in ('ON','OFF'):
calOK = (calON - (onoff=='OFF'))
whOK = (whobject*whsampler*calOK*whnod)
if whOK.sum() == 0:
continue
if verbose:
print("Number of spectra for sampler %s, nod %i, cal%s: %i" % (sampler,nod,onoff,whOK.sum()))
crvals = bintable.data[whOK]['CRVAL1']
if len(crvals) > 1:
maxdiff = np.diff(crvals).max()
else:
maxdiff = 0
freqres = np.max(bintable.data[whOK]['FREQRES'])
if maxdiff < freqres:
splist = [read_gbt_scan(bintable,ii) for ii in np.where(whOK)[0]]
blocks[sampler+onoff+str(nod)] = pyspeckit.ObsBlock(splist,force=True)
blocks[sampler+onoff+str(nod)]._arithmetic_threshold = np.diff(blocks[sampler+onoff+str(nod)].xarr).min() / 5.
else:
print("Maximum frequency difference > frequency resolution: %f > %f" % (maxdiff, freqres))
return blocks
| 8,961
|
def regression_target(label_name=None,
weight_column_name=None,
target_dimension=1):
"""Creates a _TargetColumn for linear regression.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
target_dimension: dimension of the target for multilabels.
Returns:
An instance of _TargetColumn
"""
return _RegressionTargetColumn(loss_fn=_mean_squared_loss,
label_name=label_name,
weight_column_name=weight_column_name,
target_dimension=target_dimension)
| 8,962
|
def write_bin(f, pixel_list, width):
"""Save image in MONO_HMSB format."""
index = 0
list_bytes = []
image_byte = 0
windex = 0
for pix in pixel_list:
image_byte = set_bit(image_byte, index, pix > 0)
index += 1
windex += 1
if index > 7 or windex >= width:
list_bytes.append(image_byte)
image_byte = 0
index = 0
if windex >= width:
windex = 0
f.write(bytearray(list_bytes))
| 8,963
|
def rdoublegauss(mu1, mu2, sigma1, sigma2, ratio, size=None):
"""random variable from double gaussian"""
r1 = ratio / (1. + ratio)
r2 = 1 - r1
R = np.asarray(np.random.random(size))
Rshape = R.shape
R = np.atleast1d(R)
mask1 = (R < r1)
mask2 = ~mask1
N1 = mask1.sum()
N2 = R.size - N1
R[mask1] = norm(mu1, sigma1).rvs(N1)
R[mask2] = norm(mu2, sigma2).rvs(N2)
return R.reshape(Rshape)
| 8,964
|
def check_add_role(store, id, name):
""" Checks if role exist and then adds record if it doesn't """
role = store.find_role(name)
if role == None:
return store.create_role(id=id, name=name)
else:
return role
| 8,965
|
def report_map():
"""
update DB with new version of a container instance's id map
:return: str. 'true' if successful
"""
if not request.json:
logger.error('received non-json data')
abort(400)
logger.info('Received map update from {}'.format(request.remote_addr))
logger.debug('Map update {}'.format(request.json))
_map = request.json
for k,v in _map.iteritems():
container_attributes = copy.deepcopy(v)
try:
container_attributes['cadvisor_url'] = \
"http://{}:9070/docker/{}".format(
container_attributes['instance_ip'],
container_attributes['container_id'])
container_attributes['graylog_url'] = \
settings.graylog_url.format(graylog_fqdn=settings.graylog_fqdn,
container_id=container_attributes['container_id'][:12])
except KeyError as e:
logger.error('Unable to find keys in response: {}'.format(e))
_map[k] = container_attributes
db.batch_put(_map, 'ecs_id_mapper_hash')
return 'true'
| 8,966
|
def delta_path(base_path: Path, item_path: Path, new_base_path: Path) -> Path:
"""
Removes a base path from an item, and appends result to a new path
:param base_path: The :py:class:`pathlib.Path` to be removed from `item_path`
:param item_path: The :py:class:`pathlib.Path` to be delta-ed
:param new_base_path: The new base :py:class:`pathlib.Path` for `item_path`.
:raises ValueError: If base_path is not a sub-path of item_path.
:return: The new combined path.
"""
path_stub = item_path.relative_to(base_path)
new_item_path = new_base_path / path_stub
return new_item_path
| 8,967
|
async def async_validate_config(hass, config):
"""Validate config."""
automations = list(
filter(
lambda x: x is not None,
await asyncio.gather(
*(
_try_async_validate_config_item(hass, p_config, config)
for _, p_config in config_per_platform(config, DOMAIN)
)
),
)
)
# Create a copy of the configuration with all config for current
# component removed and add validated config back in.
config = config_without_domain(config, DOMAIN)
config[DOMAIN] = automations
return config
| 8,968
|
def vm_sanity_check():
"""
Handles periodic VM sanity check
Invoked when scheduler runs task of type 'vm_sanity'
"""
logger.info("ENTERNING VM SANITY CHECK........")
try:
check_vm_sanity()
except:
log_exception()
pass
finally:
logger.debug("EXITING VM SANITY CHECK........")
| 8,969
|
def judge_result(problem_id, commit_id, data_num):
"""对输出数据进行评测"""
logging.debug("Judging result")
correct_result = os.path.join(
data_dir, str(problem_id), 'data%s.out' %
data_num)
user_result = os.path.join(
work_dir, str(commit_id), 'out%s.txt' %
data_num)
try:
correct = file(
correct_result).read(
).replace(
'\r',
'').rstrip(
) # 删除\r,删除行末的空格和换行
user = file(user_result).read().replace('\r', '').rstrip()
except:
return False
if correct == user: # 完全相同:AC
return "Accepted"
if correct.split() == user.split(): # 除去空格,tab,换行相同:PE
return "Presentation Error"
if correct in user: # 输出多了
return "Output limit"
return "Wrong Answer"
| 8,970
|
def _fwd6(y, dt): # pragma: no cover
"""Compute the first derivative of a uniformly-spaced-in-time array with a
sixth-order forward difference scheme.
Parameters
----------
y : (7,...) ndarray
Data to differentiate. The derivative is taken along the first axis.
Returns
-------
dy0 : float or (...) ndarray
Approximate derivative of y at the first entry, i.e., dy[0] / dt.
"""
return (-147*y[0] + 360*y[1] - 450*y[2] + 400*y[3] - 225*y[4] \
+ 72*y[5] - 10*y[6]) / (60*dt)
| 8,971
|
def minimize_newton_cg(nrgs, x0, num_params):
"""
Minimzes a structure using a Newton-CG method. This requires a
hopefully fully invertible analytic Hessian that will be used
to minimize geometries.
Parameters
----------
nrgs: [list of functionals]
Energy functions used to compute the energy, hessian, and mixed partials.
x0: np.array
Structure of the molecule to be minimized.
num_params: int
total number of parameters of the model. (ytz): this should be refactored out.
"""
assert x0.shape[1] == 3
N = x0.shape[0]
def hessian(conf):
conf = conf.reshape((N,3))
hess = None
for e in nrgs:
_, _, test_hessians, _ = e.total_derivative(conf, num_params)
if hess is None:
hess = test_hessians
else:
hess += test_hessians
return hess.reshape((N*3, N*3))
def gradient(conf):
conf = conf.reshape((N,3))
grads = np.zeros_like(conf)
for e in nrgs:
_, test_grads, _, _ = e.total_derivative(conf, num_params)
grads += test_grads
return grads.reshape(-1)
def energy(conf):
conf = conf.reshape((N,3))
nrg = 0
for e in nrgs:
test_nrg, _, _, _ = e.total_derivative(conf, num_params)
nrg += test_nrg
return nrg
res = minimize(
energy,
x0.reshape(-1),
# method='Newton-CG',
method='L-BFGS-B',
jac=gradient,
# hess=hessian,
# options={'xtol': 1e-8, 'disp': False}
)
# print("before and after")
# print(x0)
# print(np.array(res.x).reshape((N,3)))
return res.x.reshape((N,3))
# print(energy(x0), gradient(x0), hessian(x0).shape)
| 8,972
|
def currency(price, currency):
"""
Returns price in currency format
"""
price = float(price)
price *= float(currency.exchange_rate)
try:
return currency.display_format.format(price)
except Exception as e:
raise ImproperlyConfigured('Invalid currency format string: "%s" for currency "%s". %s' % (currency.display_format, currency.name, e.message))
| 8,973
|
def project_point(x, R, T, f, c, k, p):
"""
Args
x: Nx3 points in world coordinates
R: 3x3 Camera rotation matrix
T: 3x1 Camera translation parameters
f: 2x1 Camera focal length
c: 2x1 Camera center
k: 3x1 Camera radial distortion coefficients
p: 2x1 Camera tangential distortion coefficients
Returns
ypixel.T: Nx2 points in pixel space
depth: N points
"""
if 'aist' in config.DATASET.TEST_DATASET:
xcam = R.dot(x.T) + T # [B, 3, PJ]
else:
xcam = R.dot(x.T - T) # [B, 3, PJ]
y = xcam[:2] / (xcam[2] + 1e-5)
# === add camera distortion
r = np.sum(y ** 2, axis=0)
d = 1 + k[0] * r + k[1] * r * r + k[2] * r * r * r
u = y[0, :] * d + 2 * p[0] * y[0, :] * y[1, :] + p[1] * (r + 2 * y[0, :] * y[0, :])
v = y[1, :] * d + 2 * p[1] * y[0, :] * y[1, :] + p[0] * (r + 2 * y[1, :] * y[1, :])
y[0, :] = u
y[1, :] = v
ypixel = np.multiply(f, y) + c
depth = xcam[2]
return ypixel.T, depth
| 8,974
|
def sum_to_scalar(*args):
"""Adding losses/nmsks together that were evaluated in parallel"""
new_args = list()
for arg in args:
new_args.append({k: v.sum() for (k, v) in arg.items()})
return new_args
| 8,975
|
def overlay_test():
"""Demonstrates color fading in current overlay technique (mean).
Suggestion: use addition instead of averaging colors.
Update: Average wasn't the problem- no difference in fact due to
normalization. Will revert to that method.
Suggeston: Maybe a different color space?"""
infolder = "../data/in/Training/"
outfolder = f"../data/out/{time.time()}"
print(f"Creating outfolder: {outfolder}")
os.makedirs(outfolder)
paths = glob.glob(os.path.join(infolder, "Training_*.jpg"))
images = [dm.Image(path) for path in paths]
od_stack = [ci.scale_by_max(img.od.reshape(img.shape)[:1000,:1000], 255).astype(np.uint8) for img in images]
ol_stack = []
for i in range(1, len(od_stack)-1):
ol_stack.append(viz.overlay(od_stack[:i]))
for i, im in enumerate(ol_stack):
plt.imshow(im)
plt.suptitle(f"ol_{i}")
plt.show()
cv2.imwrite(os.path.join(outfolder, str(i) + ".jpg"), im)
print("Done")
| 8,976
|
def test_valid_certificate_200(client):
"""Test that a request for a valid certificate with signatories results in a 200"""
certificate = MicromastersCourseCertificateFactory.create()
signatory = CourseCertificateSignatoriesFactory.create(course=certificate.course)
resp = client.get(certificate_url(certificate.hash))
assert resp.status_code == status.HTTP_200_OK
assert is_subset_dict(
{
'certificate_hash': certificate.hash,
'course_title': certificate.course.title,
'program_title': certificate.course.program.title,
'name': certificate.user.profile.full_name,
'signatories': [signatory],
'certificate': certificate
},
resp.context_data
)
assert reverse('certificate', args=[certificate.hash]) in resp.content.decode('utf-8')
| 8,977
|
def inv(h_array: np.ndarray) -> np.ndarray:
"""
Calculate pinvh of PSD array. Note pinvh performs poorly
if input matrix is far from being Hermitian, so use pinv2
instead in this case.
Parameters:
----------
h_array : input matrix, assume to be Hermitian
Returns:
----------
h_inv : pseudo inverse of h_array.
"""
if np.allclose(h_array, h_array.T):
h_inv = linalg.pinvh(h_array)
else:
h_inv = linalg.pinv2(h_array)
return h_inv
| 8,978
|
def update_risk_cavs(connection):
"""Parse cavs from html to markdown.
Args:
connection: SQLAlchemy connection.
Returns:
ids of risks for which cavs where updated.
"""
cavs_data = connection.execute(
sa.text("""
SELECT cav.id, cav.attribute_value, cav.attributable_id
FROM custom_attribute_values AS cav
JOIN custom_attribute_definitions AS cad
ON cad.id = cav.custom_attribute_id
WHERE cad.definition_type = "risk"
AND attribute_value REGEXP :reg_exp
"""),
reg_exp=REGEX_HTML
).fetchall()
risks_ids = {data[2] for data in cavs_data}
cavs_ids = {data[0] for data in cavs_data}
cavs_table = sa.sql.table(
'custom_attribute_values',
sa.Column('id', sa.Integer()),
sa.Column('attribute_value', sa.Text, nullable=False),
sa.Column('updated_at', sa.DateTime, nullable=False),
)
for cav_id, attribute_value, _ in cavs_data:
op.execute(cavs_table.update().values(
attribute_value=parse_html(attribute_value),
updated_at=datetime.datetime.utcnow(),
).where(cavs_table.c.id == cav_id))
utils.add_to_objects_without_revisions_bulk(
connection, cavs_ids, "CustomAttributeValue", "modified",
)
return risks_ids
| 8,979
|
def prefix_to_number(prefix):
"""Return the number of the prefix."""
if prefix in PREFIXES:
return PREFIXES[prefix]
raise ValueError(f'prefix "{prefix}" not found in list of prefixes')
| 8,980
|
def package():
"""
Do nothing -- this group should never be called without a sub-command.
"""
pass
| 8,981
|
def is_response_going_to_be_used(request, spider):
"""Check whether the request's response is going to be used."""
callback = get_callback(request, spider)
if is_callback_using_response(callback):
return True
for provider in discover_callback_providers(callback):
if is_provider_using_response(provider):
return True
return False
| 8,982
|
def eval_market1501(distmat, q_vids, g_vids, q_camids, g_camids, max_rank=50):
"""Evaluation with Market1501 metrics
Key: for each query identity, its gallery images from the same camera view are discarded.
"""
num_q, num_g = distmat.shape
if num_g < max_rank:
max_rank = num_g
print(f"Note: number of gallery samples is quite small, got {num_g}")
indices = np.argsort(distmat, axis=1)
matches = (g_vids[indices] == q_vids[:, np.newaxis]).astype(np.int32)
# compute cmc curve for each query
all_cmc = []
all_ap = []
num_valid_q = 0. # number of valid query
for q_idx in range(num_q):
# get query vid and camid
q_vid = q_vids[q_idx]
q_camid = q_camids[q_idx]
# remove gallery samples that have the same vid and camid with query
order = indices[q_idx]
remove = (g_vids[order] == q_vid) & (g_camids[order] == q_camid)
keep = np.invert(remove)
# compute cmc curve
# binary vector, positions with value 1 are correct matches
orig_cmc = matches[q_idx][keep]
if not np.any(orig_cmc):
# this condition is true when query identity does not appear in gallery
continue
cmc = orig_cmc.cumsum()
cmc[cmc > 1] = 1
all_cmc.append(cmc[:max_rank])
num_valid_q += 1.
# compute average precision
# https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision
num_rel = orig_cmc.sum()
tmp_cmc = orig_cmc.cumsum()
tmp_cmc = [x / (i+1.) for i, x in enumerate(tmp_cmc)]
tmp_cmc = np.asarray(tmp_cmc) * orig_cmc
ap_ = tmp_cmc.sum() / num_rel
all_ap.append(ap_)
assert num_valid_q > 0, "Error: all query identities do not appear in gallery"
all_cmc = np.array(all_cmc, dtype=np.float32)
all_cmc = all_cmc.sum(0) / num_valid_q
map_ = np.mean(all_ap)
return all_cmc, map_
| 8,983
|
def get_band_structure_from_vasp_multiple_branches(dir_name, efermi=None,
projections=False):
"""
This method is used to get band structure info from a VASP directory. It
takes into account that the run can be divided in several branches named
"branch_x". If the run has not been divided in branches the method will
turn to parsing vasprun.xml directly.
The method returns None is there"s a parsing error
Args:
dir_name: Directory containing all bandstructure runs.
efermi: Efermi for bandstructure.
projections: True if you want to get the data on site projections if
any. Note that this is sometimes very large
Returns:
A BandStructure Object
"""
#ToDo: Add better error handling!!!
if os.path.exists(os.path.join(dir_name, "branch_0")):
#get all branch dir names
branch_dir_names = [os.path.abspath(d)
for d in glob.glob("{i}/branch_*"
.format(i=dir_name))
if os.path.isdir(d)]
#sort by the directory name (e.g, branch_10)
sort_by = lambda x: int(x.split("_")[-1])
sorted_branch_dir_names = sorted(branch_dir_names, key=sort_by)
# populate branches with Bandstructure instances
branches = []
for dir_name in sorted_branch_dir_names:
xml_file = os.path.join(dir_name, "vasprun.xml")
if os.path.exists(xml_file):
run = Vasprun(xml_file, parse_projected_eigen=projections)
branches.append(run.get_band_structure(efermi=efermi))
else:
# It might be better to throw an exception
warnings.warn("Skipping {}. Unable to find {}"
.format(d=dir_name, f=xml_file))
return get_reconstructed_band_structure(branches, efermi)
else:
xml_file = os.path.join(dir_name, "vasprun.xml")
#Better handling of Errors
if os.path.exists(xml_file):
return Vasprun(xml_file, parse_projected_eigen=projections)\
.get_band_structure(kpoints_filename=None, efermi=efermi)
else:
return None
| 8,984
|
def pytype_raise():
"""A pytest.raises wrapper for catching TypeErrors.
Parameters
----------
match : str, default=None
Regular expression to match exception error text against.
Returns
-------
RaisesContext
pytest context manager for catching exception-raising blocks.
"""
def _pytype_raise(match=None):
return pytest.raises(TypeError, match=match)
_pytype_raise.__doc__ = pyvalue_raise.__doc__
return _pytype_raise
| 8,985
|
def delete_dir(dir_path, recursive=False):
"""
删除目录
:param dir_path:
:param recursive:
:return:
"""
if os.path.exists(dir_path):
if recursive:
fps = get_sub_files(dir_path)
for sub_dir in fps:
if os.path.isdir(sub_dir):
delete_dir(sub_dir, recursive)
shutil.rmtree(dir_path)
| 8,986
|
def test_block_average_save():
"""Test the save of a file."""
block = exma.electrochemistry.statistics.BlockAverage(
[3.14, 3.15, 3.13, 3.13, 3.15, 3.15, 3.16, 3.12]
)
block.calculate()
block.save()
with open("block_average.csv", "r") as fin:
readed = fin.read()
os.remove("block_average.csv")
assert readed == (
"data_size,mean,var,varerr\n"
"8,3.141250e+00,2.299107e-05,1.228924e-05\n"
"4,3.141250e+00,2.656250e-05,2.168819e-05\n"
)
| 8,987
|
def test_batch_08():
"""
Test batch: num_parallel_workers=1, drop_remainder default
"""
logger.info("test_batch_08")
# define parameters
batch_size = 6
num_parallel_workers = 1
# apply dataset operations
data1 = ds.TFRecordDataset(DATA_DIR, shuffle=ds.Shuffle.FILES)
data1 = data1.batch(batch_size, num_parallel_workers=num_parallel_workers)
assert sum([1 for _ in data1]) == 2
filename = "batch_08_result.npz"
save_and_check_dict(data1, filename, generate_golden=GENERATE_GOLDEN)
| 8,988
|
def lift_split_buffers(lines):
"""Lift the split buffers in the program
For each module, if we find any split buffers with the name "buf_data_split",
we will lift them out of the for loops and put them in the variable declaration
section at the beginning of the module.
Parameters
----------
lines:
contains the codelines of the program
"""
code_len = len(lines)
for pos in range(code_len):
line = lines[pos]
if line.find("variable=buf_data_split") != -1:
# Search for the variable declaration section
decl_pos = -1
prev_pos = pos - 1
while prev_pos >= 0:
prev_line = lines[prev_pos]
if prev_line.find("Variable Declaration") != -1:
decl_pos = prev_pos
break
prev_pos -= 1
# Move the two code lines at [pos - 1] and [pos] to [decl_pos] and
# [decl_pos + 1]
indent = lines[decl_pos].find("/*")
line1 = " " * indent + lines[pos - 1].lstrip()
line2 = " " * indent + lines[pos].lstrip()
del lines[pos - 1]
del lines[pos - 1]
lines.insert(decl_pos, line1)
lines.insert(decl_pos + 1, line2)
return lines
| 8,989
|
def peak_finder(
df_run,
cd,
windowlength,
polyorder,
datatype,
lenmax,
peak_thresh):
"""Determines the index of each peak in a dQdV curve
V_series = Pandas series of voltage data
dQdV_series = Pandas series of differential capacity data
cd = either 'c' for charge and 'd' for discharge.
Output:
i = list of indexes for each found peak"""
(cycle_ind_col, data_point_col, volt_col, curr_col, dis_cap_col,
char_cap_col, charge_or_discharge) = col_variables(datatype)
V_series = df_run[volt_col]
# this makes the peak finding smoothing independent of any smoothing that
# has already occured.
dQdV_series = df_run['Smoothed_dQ/dV']
sigx, sigy = cd_dataframe(V_series, dQdV_series, cd)
# the below is to make sure the window length ends up an odd number - even
# though we are basing it on the length of the df
wl = lenmax / 20
wlint = int(round(wl))
if wlint % 2 == 0:
windowlength_new = wlint + 1
else:
windowlength_new = wlint
if len(sigy) > windowlength_new and windowlength_new > polyorder:
# has to be larger than 69 so that windowlength > 3 - necessary for sav
# golay function
sigy_smooth = scipy.signal.savgol_filter(
sigy, windowlength_new, polyorder)
else:
sigy_smooth = sigy
peak_thresh_ft = float(peak_thresh)
i = peakutils.indexes(
sigy_smooth,
thres=peak_thresh_ft,
min_dist=lenmax / 50)
if i is not None and len(i) > 0:
sigx_volts = list(sigx[i])
peak_heights = list(sigy[i])
else:
sigx_volts = []
peak_heights = []
return i, sigx_volts, peak_heights
| 8,990
|
def model_output_pipeline(model_outputs=True, visualize_anchors=False,
visualize_anchor_gt_pair=False, verbose=False, very_verbose=False):
"""
model_outputs - flag to enable plotting model outputs
visualize_anchors - flag to visualize anchors
visualize_anchor_gt_pair - flag to visualize ground truth bboxes and respective anchors
"""
params = Params(constants.params_path.format(general_config.model_id))
if model_outputs:
model = model_setup(params)
model = load_weigths_only(model, params)
model.to(device)
model.eval()
valid_loader = dataloaders.get_dataloaders_test(params)
with torch.no_grad():
total_iou, total_maps = 0, np.array([0, 0, 0, 0, 0, 0])
for batch_idx, (batch_images, batch_targets, images_info) in enumerate(valid_loader):
if model_outputs:
batch_images = batch_images.to(device)
predictions = model(batch_images)
else:
n_classes = len(classes_config.training_ids)
predictions = [torch.randn(params.batch_size, 4, anchor_config.total_anchors),
torch.randn(params.batch_size, n_classes, anchor_config.total_anchors)]
for idx in range(len(batch_images)):
non_background = batch_targets[1][idx] != 100
all_anchor_classes = batch_targets[1][idx]
gt_bbox = batch_targets[0][idx][non_background]
gt_class = batch_targets[1][idx][non_background]
iou, maps = anchor_mapping.test_anchor_mapping(
image=batch_images[idx], bbox_predictions=predictions[0][idx].permute(1, 0),
classification_predictions=predictions[1][idx].permute(1, 0),
gt_bbox=gt_bbox, gt_class=gt_class, image_info=images_info[idx], params=params,
model_outputs=model_outputs, visualize_anchors=visualize_anchors,
visualize_anchor_gt_pair=visualize_anchor_gt_pair, all_anchor_classes=all_anchor_classes,
verbose=verbose, very_verbose=very_verbose)
total_iou += iou
total_maps += maps
avg = (batch_idx + 1) * params.batch_size
print("Mean iou so far: ", total_iou / avg)
print("Mean maps so far: ", total_maps / avg)
if batch_idx == 10:
return
| 8,991
|
def make_box(world, x_dim, y_dim, z_dim, mass=0.5):
"""Makes a new axis-aligned box centered at the origin with
dimensions width x depth x height. The box is a RigidObject
with automatically determined inertia.
"""
boxgeom = Geometry3D()
boxgeom.loadFile("data/objects/cube.tri")
# box is centered at the origin
boxgeom.transform([x_dim, 0, 0, 0, y_dim, 0, 0, 0, z_dim], [-x_dim * 0.5, -y_dim * 0.5, -z_dim * 0.5])
print "Making a box a rigid object"
bmass = Mass()
bmass.setMass(mass)
bmass.setCom([0, 0, 0])
bmass.setInertia([x_dim / 12, y_dim / 12, z_dim / 12])
box = world.makeRigidObject("box")
box.geometry().set(boxgeom)
box.appearance().setColor(0.6, 0.3, 0.2, 1.0)
box.setMass(bmass)
cparams = box.getContactParameters()
cparams.kFriction = 1.5
cparams.kStiffness = 100000
cparams.kDamping = 30000
cparams.kRestitution = 0.5
return box
| 8,992
|
def as_file(uri):
"""
If the URI is a file (either the ``file`` scheme or no scheme), then returns the normalized
path. Otherwise, returns ``None``.
"""
if _IS_WINDOWS:
# We need this extra check in Windows before urlparse because paths might have a drive
# prefix, e.g. "C:" which will be considered a scheme for urlparse below
path = uri.replace('/', '\\')
if os.path.exists(path):
return os.path.normpath(path)
url = urlparse.urlparse(uri)
scheme = url.scheme
if (not scheme) or (scheme == 'file'):
path = url.path
if _IS_WINDOWS:
path = path.replace('/', '\\')
return os.path.normpath(path)
return None
| 8,993
|
def add_full_name(obj):
"""
A decorator to add __full_name__ to the function being decorated.
This should be done for all decorators used in pywikibot, as any
decorator that does not add __full_name__ will prevent other
decorators in the same chain from being able to obtain it.
This can be used to monkey-patch decorators in other modules.
e.g.
<xyz>.foo = add_full_name(<xyz>.foo)
@param obj: The function to decorate
@type obj: callable
@return: decorating function
@rtype: function
"""
def outer_wrapper(*outer_args, **outer_kwargs):
"""Outer wrapper.
The outer wrapper may be the replacement function if the decorated
decorator was called without arguments, or the replacement decorator
if the decorated decorator was called without arguments.
@param outer_args: args
@type outer_args: list
@param outer_kwargs: kwargs
@type outer_kwargs: dict
"""
def inner_wrapper(*args, **kwargs):
"""Replacement function.
If the decorator supported arguments, they are in outer_args,
and this wrapper is used to process the args which belong to
the function that the decorated decorator was decorating.
@param args: args passed to the decorated function.
@param kwargs: kwargs passed to the decorated function.
"""
add_decorated_full_name(args[0])
return obj(*outer_args, **outer_kwargs)(*args, **kwargs)
inner_wrapper.__doc__ = obj.__doc__
inner_wrapper.__name__ = obj.__name__
inner_wrapper.__module__ = obj.__module__
inner_wrapper.__signature__ = signature(obj)
# The decorator being decorated may have args, so both
# syntax need to be supported.
if (len(outer_args) == 1 and len(outer_kwargs) == 0 and
callable(outer_args[0])):
add_decorated_full_name(outer_args[0])
return obj(outer_args[0])
else:
return inner_wrapper
if not __debug__:
return obj
return outer_wrapper
| 8,994
|
def LikelihoodRedshiftMeasure( measure='', data=[], scenario=False, measureable=False):
"""
returns likelihood functions of redshift for observed data of measure,
can be used to obtain estimate and deviation
Parameters
----------
measure : string
indicate which measure is probed
data : array-like
1D array contain extragalactic component of observed values
scenario : dictionary
list of models combined to one scenario
prior : boolean
"""
if not measure:
exit( "you must provide a measure. Try: 'DM', 'RM', 'tau'" )
if scenario.redshift:
exit( "requires scenario with telescope and population" )
## prepare scenario for increasing redshift
tmp = scenario.copy()
tmp.population = False
tmp.telescope = False
## container for likelihoods and deviation at incrasing redshift
Ps = np.zeros( [len(DMs),len(redshift_bins)] )
devs= Ps.copy()
## for each redshift
for iz, z in enumerate( redshift_bins ):
tmp.redshift = z
L = GetLikelihood( measure, tmp )
if measureable:
L.Measureable()
Ps[:,iz], devs[:,iz] = L.Likelihoods( DMs, density=True ) ### use probability density to compare same value of DM at different redshifts. Otherwise influenced by different binning
Ls = []
for P, dev in Ps, devs:
L = LikelihoodFunction( P=P, x=redshift_range, dev=dev )
Ls.append(L)
return Ls
| 8,995
|
def plural_suffix(count: int) -> str:
""""s" when count is not one"""
suffix = ''
if count != 1:
suffix = 's'
return suffix
| 8,996
|
def _pr_exists(user, namespace, repo, idx):
""" Utility method checking if a given PR exists. """
repo_obj = pagure.lib.query.get_authorized_project(
flask.g.session, project_name=repo, user=user, namespace=namespace
)
if not repo_obj:
return False
pr_obj = pagure.lib.query.search_pull_requests(
flask.g.session, project_id=repo_obj.id, requestid=idx
)
if not pr_obj:
return False
return pr_obj
| 8,997
|
def readData(filename):
"""
Read in our data from a CSV file and create a dictionary of records,
where the key is a unique record ID and each value is dict
"""
data_d = {}
with open(filename) as f:
reader = csv.DictReader(f)
for row in reader:
clean_row = [(k, preProcess(v)) for (k, v) in row.items()]
row_id = int(row['activity_nr'])
data_d[row_id] = dict(clean_row)
return data_d
| 8,998
|
def write_last_activity_index(settings_dir, activity_index, format):
"""
Persists the index of the last exported activity for the given export format
(see also method read_settings())
:param settings_dir: Path to the pickle file
:param activity_index: Positive integer
:param format: Value of args["format"]
"""
settings = read_settings(settings_dir)
settings['activity_indices'][format] = activity_index
file_name = join(settings_dir, ".settings")
with open(file_name, "wb") as f:
pickle.dump(settings, f)
| 8,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.