content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def train(dataset, val_dataset, v, start_epoch=0):
"""Train the model, evaluate it and store it.
Args:
dataset (dataset.PairDataset): The training dataset.
val_dataset (dataset.PairDataset): The evaluation dataset.
v (vocab.Vocab): The vocabulary built from the training dataset.
start_epoch (int, optional): The starting epoch number. Defaults to 0.
"""
DEVICE = torch.device("cuda" if config.is_cuda else "cpu")
model = PGN(v)
model.load_model()
model.to(DEVICE)
if config.fine_tune:
# In fine-tuning mode, we fix the weights of all parameters except attention.wc.
print('Fine-tuning mode.')
for name, params in model.named_parameters():
if name != 'attention.wc.weight':
params.requires_grad=False
# forward
print("loading data")
train_data = SampleDataset(dataset.pairs, v)
val_data = SampleDataset(val_dataset.pairs, v)
print("initializing optimizer")
# Define the optimizer.
optimizer = optim.Adam(model.parameters(),
lr=config.learning_rate)
train_dataloader = DataLoader(dataset=train_data,
batch_size=config.batch_size,
shuffle=True,
collate_fn=collate_fn)
val_losses = np.inf
if (os.path.exists(config.losses_path)):
with open(config.losses_path, 'rb') as f:
val_losses = pickle.load(f)
# torch.cuda.empty_cache()
# SummaryWriter: Log writer used for TensorboardX visualization.
writer = SummaryWriter(config.log_path)
# tqdm: A tool for drawing progress bars during training.
with tqdm(total=config.epochs) as epoch_progress:
for epoch in range(start_epoch, config.epochs):
batch_losses = [] # Get loss of each batch.
num_batches = len(train_dataloader)
with tqdm(total=num_batches//100) as batch_progress:
for batch, data in enumerate(tqdm(train_dataloader)):
x, y, x_len, y_len, oov, len_oovs = data
assert not np.any(np.isnan(x.numpy()))
if config.is_cuda: # Training with GPUs.
x = x.to(DEVICE)
y = y.to(DEVICE)
x_len = x_len.to(DEVICE)
len_oovs = len_oovs.to(DEVICE)
model.train() # Sets the module in training mode.
optimizer.zero_grad() # Clear gradients.
# Calculate loss.
loss = model(x, x_len, y, len_oovs, batch=batch, num_batches=num_batches)
batch_losses.append(loss.item())
loss.backward() # Backpropagation.
# Do gradient clipping to prevent gradient explosion.
clip_grad_norm_(model.encoder.parameters(),
config.max_grad_norm)
clip_grad_norm_(model.decoder.parameters(),
config.max_grad_norm)
clip_grad_norm_(model.attention.parameters(),
config.max_grad_norm)
optimizer.step() # Update weights.
# Output and record epoch loss every 100 batches.
if (batch % 100) == 0:
batch_progress.set_description(f'Epoch {epoch}')
batch_progress.set_postfix(Batch=batch,
Loss=loss.item())
batch_progress.update()
# Write loss for tensorboard.
writer.add_scalar(f'Average loss for epoch {epoch}',
np.mean(batch_losses),
global_step=batch)
# Calculate average loss over all batches in an epoch.
epoch_loss = np.mean(batch_losses)
epoch_progress.set_description(f'Epoch {epoch}')
epoch_progress.set_postfix(Loss=epoch_loss)
epoch_progress.update()
avg_val_loss = evaluate(model, val_data, epoch)
print('training loss:{}'.format(epoch_loss),
'validation loss:{}'.format(avg_val_loss))
# Update minimum evaluating loss.
if (avg_val_loss < val_losses):
torch.save(model.encoder, config.encoder_save_name)
torch.save(model.decoder, config.decoder_save_name)
torch.save(model.attention, config.attention_save_name)
torch.save(model.reduce_state, config.reduce_state_save_name)
val_losses = avg_val_loss
with open(config.losses_path, 'wb') as f:
pickle.dump(val_losses, f)
writer.close()
| 24,900
|
def verify_manager_list(clazz):
"""Verifies that managers return List<? extends Parcelable> instead of arrays."""
if not clazz.name.endswith("Manager"): return
for m in clazz.methods:
if m.typ.startswith("android.") and m.typ.endswith("[]"):
warn(clazz, m, None, "Methods should return List<? extends Parcelable> instead of Parcelable[] to support ParceledListSlice under the hood")
| 24,901
|
def _ge(t1: 'Tensor', t2: 'Tensor', isnew: bool) -> 'Tensor':
"""
Also see
--------
:param t1:
:param t2:
:param isnew:
:return:
"""
data = t1.data >= t2.data
requires_grad = t1.requires_grad or t2.requires_grad
depends_on: List[Dependency] = []
if t1.requires_grad:
def grad_fn1(grad: 'np.ndarray') -> 'np.ndarray':
# Maually, discontinuous function just take its gradient to zero.
return np.zeros_like(t1.data)
depends_on.append(Dependency(t1, grad_fn1))
if t2.requires_grad:
def grad_fn2(grad: 'np.ndarray') -> 'np.ndarray':
return np.zeros_like(t2.data)
depends_on.append(Dependency(t2, grad_fn2))
if isnew:
requires_grad = False
depends_on: List[Dependency] = []
return Tensor(data,
requires_grad,
depends_on)
| 24,902
|
def test_haar2d():
"""
Asserts the forwards and inverse wavelet transformations are correct.
"""
assert haar2d(np.random.random([5,3]),2,debug=True).shape == (8,4), \
"Transform data must be padded to compatible shape."
assert haar2d(np.random.random([8,4]),2,debug=True).shape == (8,4), \
"Transform data must be padded to compatible shape, if required."
image = np.random.random([5,3])
haart = haar2d(image, 3)
haari = ihaar2d(haart, 3)[:image.shape[0], :image.shape[1]]
assert (image - haari < 0.001).all(), "Transform must be circular."
| 24,903
|
def convert_homogeneous_graph(graph: Dict[str, Any],
num_graphs: int,
output_dir: str):
"""Process a homogeneous graph."""
# NOTE(blais): We could in theory stash the data in the same format as their
# heterogeneous graphs in Python and just use convert_heterogeneous_graph().
# Gather node features.
logging.info("Processing node features")
num_nodes = graph.pop("num_nodes")
graph["node_#id"] = numpy.arange(num_nodes).astype(bytes)
node_features = extract_features(graph, "node", num_nodes)
filename = write_table(output_dir, "nodes", node_features, num_nodes)
node_features_dict = {}
node_features_dict["nodes"] = (filename, node_features)
# Gather edge features.
logging.info("Processing edge features")
indices = graph.pop("edge_index")
assert len(indices.shape) == 2
num_edges = indices.shape[1]
graph["edge_{}".format(tfgnn.SOURCE_NAME)] = indices[0].astype(bytes)
graph["edge_{}".format(tfgnn.TARGET_NAME)] = indices[1].astype(bytes)
# NOTE(blais): If external edge features are needed and each edge is
# unique, you can use this:
# graph["edge_#id"] = ["{}_{}".format(edge_index[0, i], edge_index[1, i])
# for i in range(num_edges)]
edge_features = extract_features(graph, "edge", num_edges)
filename = write_table(output_dir, "edges", edge_features, num_edges)
edge_features_dict = {}
edge_features_dict["edges"] = (filename, "nodes", "nodes", edge_features)
# Gather context features.
logging.info("Processing graph context features")
if num_graphs > 1:
graph_features = extract_features(graph, "graph", num_graphs)
filename = write_table(output_dir, "graph", graph_features, num_graphs)
context_features = (filename, graph_features)
else:
context_features = None
# Make sure we processed everything.
graph = remove_empty_dicts(graph)
if graph:
logging.error("Graph is not empty: %s", graph)
# Produce a corresponding graph schema.
logging.info("Producing graph schema")
return create_schema(context_features, node_features_dict, edge_features_dict)
| 24,904
|
def create_capacity_to_activity(connector, technology_list):
"""
This function writes the capacity to activity table in Temoa.
"""
table_command = """CREATE TABLE "CapacityToActivity" (
"regions" text,
"tech" text,
"c2a" real,
"c2a_notes" TEXT,
PRIMARY KEY("regions","tech"),
FOREIGN KEY("tech") REFERENCES "technologies"("tech")
);"""
insert_command = """INSERT INTO "CapacityToActivity" VALUES (?,?,?,?)"""
cursor = connector.cursor()
cursor.execute(table_command)
entries = []
for tech in technology_list:
db_entry = [(place,
tech.tech_name,
tech.capacity_to_activity,
'')
for place in tech.regions]
entries += db_entry
cursor.executemany(insert_command, entries)
connector.commit()
return
| 24,905
|
def create_virtualenv():
"""Create virtualenv for project."""
site = get_project_name()
version = get_config()['version']
virtualenv_dir = "{}/{}/virtualenv".format(SITES_DIR, site)
if cuisine.dir_exists(virtualenv_dir + "/bin"):
fab.puts("virtualenv for {0} already exists".format(site))
return
with cuisine.mode_sudo():
cuisine.dir_ensure(virtualenv_dir, recursive=True)
venv_bin = _python_bin_path(version, 'virtualenv')
fab.sudo("{venv_bin} {virtualenv_dir}".format(venv_bin=venv_bin,
virtualenv_dir=virtualenv_dir))
| 24,906
|
def second_deriv_log_pdf(phi, alpha, beta, eps=1e-4):
"""Second derivative of `log_pdf` with respect to latitude."""
return (
log_pdf(phi + eps, alpha, beta)
- 2 * log_pdf(phi, alpha, beta)
+ log_pdf(phi - eps, alpha, beta)
) / eps ** 2
| 24,907
|
def create_map(
tag: Optional[str],
func: Callable,
args_and_kwargs: Iterator[ARGS_AND_KWARGS],
map_options: Optional[options.MapOptions] = None,
) -> maps.Map:
"""
All map calls lead here.
This function performs various checks on the ``tag``,
constructs a submit object that represents the map for HTCondor,
saves all of the map's definitional data to the map directory,
and submits the map job,
returning the map's :class:`Map`.
Parameters
----------
tag
The ``tag`` to assign to this map.
func
The function to map the arguments over.
args_and_kwargs
The arguments and keyword arguments to map over - the output of :func:`zip_args_and_kwargs`.
map_options
An instance of :class:`htmap.MapOptions`.
Returns
-------
map :
A :class:`htmap.Map` representing the map.
"""
if tag is None:
tag = tags.random_tag()
transient = True
else:
transient = False
tags.raise_if_tag_is_invalid(tag)
tags.raise_if_tag_already_exists(tag)
logger.debug(f"Creating map {tag} ...")
if map_options is None:
map_options = options.MapOptions()
uid = uuid.uuid4()
map_dir = map_dir_path(uid)
try:
make_map_dir_and_subdirs(map_dir)
transformed_args_and_kwargs, extra_input_files = transform_args_and_kwargs(args_and_kwargs)
num_components = len(transformed_args_and_kwargs)
if num_components == 0:
raise exceptions.EmptyMap("Cannot create a map with zero components")
if map_options.input_files is None and len(extra_input_files) > 0:
map_options.input_files = [[] for _ in range(len(extra_input_files))]
for tif, extra in zip(map_options.input_files, extra_input_files):
tif.extend(extra)
submit_obj, itemdata = options.create_submit_object_and_itemdata(
tag, map_dir, num_components, map_options,
)
logger.debug(f"Submit description for map {tag} is\n{submit_obj}")
logger.debug(f"First itemdatum for map {tag} is \n{pformat(itemdata[0])}")
logger.debug(f"Creating map directory for map {tag} ...")
with utils.Timer() as timer:
htio.save_func(map_dir, func)
htio.save_inputs(map_dir, transformed_args_and_kwargs)
htio.save_num_components(map_dir, num_components)
htio.save_submit(map_dir, submit_obj)
htio.save_itemdata(map_dir, itemdata)
logger.debug(f"Created map directory for map {tag} (took {timer.elapsed:.6f} seconds)")
logger.debug(f"Submitting map {tag}...")
tags.tag_file_path(tag).write_text(str(uid))
m = maps.Map(tag=tag, map_dir=map_dir,)
if transient:
m._make_transient()
m._submit()
if utils.is_interactive_session():
print(f"Created map {m.tag} with {len(m)} components")
return m
except BaseException as e:
# something went wrong during submission, and the job is malformed
# so delete the entire map directory
# the condor bindings should prevent any jobs from being submitted
logger.exception(f"Map submission for map {tag} aborted due to: {e}")
try:
tags.tag_file_path(tag).unlink()
except FileNotFoundError:
pass
shutil.rmtree(str(map_dir.absolute()))
logger.debug(f"Removed malformed map directory {map_dir}")
raise e
| 24,908
|
def _fallback_schedule(cfg, wkl):
"""
Get default schedule for the workload
Parameters
----------
cfg : tvm.autotvm.task.space.FallbackConfigEntity
Fallback config to be updated
wkl : topi.nn.depthwise_conv2d.Workload
Convolution workload
"""
simd_width = get_fp32_len()
HPAD, WPAD = wkl.hpad, wkl.wpad
HSTR, WSTR = wkl.hstride, wkl.wstride
out_width = (wkl.width + 2 * WPAD - wkl.wkernel) // WSTR + 1
oc_bn = 1
for bn in range(simd_width, 0, -1):
if wkl.out_filter % bn == 0:
oc_bn = bn
break
ic_bn = 1
for bn in range(oc_bn, 0, -1):
if wkl.in_filter % bn == 0:
ic_bn = bn
break
reg_n = 1
for n in range(31, 0, -1):
if out_width % n == 0:
reg_n = n
break
cfg["tile_ic"] = SplitEntity([wkl.in_filter // ic_bn, ic_bn])
cfg["tile_oc"] = SplitEntity([wkl.out_filter // oc_bn, oc_bn])
cfg["tile_ow"] = SplitEntity([out_width // reg_n, reg_n])
cfg["unroll_kw"] = OtherOptionEntity(False)
| 24,909
|
def normalize_data(data, zp=25., zpsys='ab'):
"""Return a copy of the data with all flux and fluxerr values normalized
to the given zeropoint. Assumes data has already been standardized.
Parameters
----------
data : `~numpy.ndarray`
Structured array.
zp : float
zpsys : str
Returns
-------
normalized_data : `~numpy.ndarray`
"""
warn_once('standardize_data', '1.5', '2.0',
'This function not intended for public use; open an issue at '
'https://github.com/sncosmo/sncosmo/issues if you need this '
'functionality.')
normmagsys = get_magsystem(zpsys)
factor = np.empty(len(data), dtype=np.float)
for b in set(data['band'].tolist()):
idx = data['band'] == b
b = get_bandpass(b)
bandfactor = 10.**(0.4 * (zp - data['zp'][idx]))
bandzpsys = data['zpsys'][idx]
for ms in set(bandzpsys):
idx2 = bandzpsys == ms
ms = get_magsystem(ms)
bandfactor[idx2] *= (ms.zpbandflux(b) / normmagsys.zpbandflux(b))
factor[idx] = bandfactor
normalized_data = OrderedDict([('time', data['time']),
('band', data['band']),
('flux', data['flux'] * factor),
('fluxerr', data['fluxerr'] * factor),
('zp', zp),
('zpsys', zpsys)])
return dict_to_array(normalized_data)
| 24,910
|
def CreateMessage(sender, to, subject, message_text):
"""
Creates an object containing a base64url encoded email object.
"""
message = MIMEText(message_text)
message['to'] = to
message['from'] = sender
message['subject'] = subject
raw_message = base64.urlsafe_b64encode(message.as_bytes())
raw_message = raw_message.decode()
return {'raw': raw_message }
| 24,911
|
def bg_lookup(bg_name: str) -> str:
"""Look up ANSI escape codes based on background color name.
:param bg_name: background color name to look up ANSI escape code(s) for
:return: ANSI escape code(s) associated with this color
:raises ValueError if the color cannot be found
"""
try:
ansi_escape = BG_COLORS[bg_name.lower()]
except KeyError:
raise ValueError('Background color {!r} does not exist.'.format(bg_name))
return ansi_escape
| 24,912
|
def readData(f):
"""
Parse taxon count table (from count-taxon.py)
Parameters:
-----------
f : str
file name of taxon count table
Returns:
--------
tuple
a list of taxons and a list of their counts
"""
taxa_lis = []
num_lis = []
for n, line in enumerate(open(f)):
if line.startswith('#'):
continue
line = line.rstrip()
if line == '':
continue
taxa, num = line.split('\t')
skip = False
for word in EXCLUDE:
if word in taxa:
skip = True
break
if skip:
continue
taxa = taxa.rstrip(';')
lis = taxa.split(';')
lis2 = []
for item in lis:
item = item.strip()
if item.endswith(')'):
item = item.split('(')[0].strip()
# remove taxon level prefix, e.g. 'p__Firmicutes'
if '__' in item:
item = item.split('__', 1)[1]
#item = item.strip('"')
item = item.lower()
if 'unclassified' in item:
item = 'Unclassifed'
elif 'unknown' in item:
item = 'Unclassifed'
elif 'other' in item:
item = 'Unclassifed'
elif 'unassigned' in item:
item = 'Unclassifed'
item = item.capitalize()
lis2.append(item)
taxa_lis.append(lis2)
num_lis.append(float(num))
return taxa_lis, num_lis
| 24,913
|
def workspace_check(func):
"""
Decorator for confirming <workspace> is defined in the CONFIG_PATH (i.e. kaos workspace set has been run).
"""
def wrapper(*args, **kwargs):
config = ConfigParser(interpolation=ExtendedInterpolation())
config.read(CONFIG_PATH)
if 'pachyderm' not in config:
click.echo("{} - {} not defined - first run {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style("workspace", bold=True, fg='red'),
click.style("kaos workspace set", bold=True, fg='green')))
sys.exit(1)
# get base_url
base_url = config.get('backend', 'url')
current_workspace = config.get('pachyderm', 'workspace')
# GET all workspaces: /workspace
r = requests.get(f"{base_url}/workspace")
data = r.json()
workspaces_list = [v for v in data['names']]
if current_workspace not in workspaces_list:
click.echo("{} - Workspace {} has been {}. \n\n"
"Please ensure the kaos train/serve commands are run on an active workspace. \n\n"
"Check available workspaces with - {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style(current_workspace, bold=True, fg='green'),
click.style("deleted/killed", bold=True, fg='red'),
click.style("kaos workspace list", bold=True, fg='green')))
sys.exit(1)
func(*args, **kwargs)
return wrapper
| 24,914
|
def test_persistent_group_add_cli_chan(dev):
"""P2P persistent group formation and re-invocation with p2p_add_cli_chan=1"""
dev[0].request("SET p2p_add_cli_chan 1")
dev[1].request("SET p2p_add_cli_chan 1")
form(dev[0], dev[1])
dev[1].request("BSS_FLUSH 0")
dev[1].scan(freq="2412", only_new=True)
dev[1].scan(freq="2437", only_new=True)
dev[1].scan(freq="2462", only_new=True)
dev[1].request("BSS_FLUSH 0")
invite_from_cli(dev[0], dev[1])
invite_from_go(dev[0], dev[1])
| 24,915
|
def detect_onsets_offsets(data, threshold, min_distance):
"""
detects when a when a signal jumps above zero, and when it goes back to zero
"""
on = (data > threshold) # when the data is greater than zero
left_on = np.concatenate(([0], on), axis=0)[0:-1]
onset = np.squeeze(np.where(on & (left_on != True)))
offset = np.squeeze(np.where((on != True) & (left_on == True)))
if data[-1] > threshold:
offset = np.append(offset, len(data)) # make sure there is an offset at some point...
if len(np.shape(onset)) < 1:
offset = [offset]
onset = [onset]
new_offset = []
new_onset = []
if len(onset) > 0:
new_onset.append(onset[0])
if len(onset) > 1:
for i in range(len(onset)-1):
if (onset[i+1] - offset[i]) > min_distance:
new_onset.append(onset[i+1])
new_offset.append(offset[i])
new_offset.append(offset[-1])
return new_onset, new_offset
| 24,916
|
def main():
""" Script entry point. """
parser = argparse.ArgumentParser(description='This tool analyzes CPU\
utilization and platform metrics\
collected from eris agent and build data\
model for contention detect and resource\
regulation.')
parser.add_argument('workload_conf_file', help='workload configuration\
file describes each task name, type, id, request cpu\
count', type=argparse.FileType('rt'), default='wl.csv')
parser.add_argument('-v', '--verbose', help='increase output verbosity',
action='store_true')
parser.add_argument('-t', '--thresh', help='threshold used in outlier\
detection', type=int, default=4)
parser.add_argument('-f', '--fense-type', help='fense type used in outlier\
detection', choices=['quartile', 'normal',
'gmm-strict', 'gmm-normal'],
default='gmm-strict')
parser.add_argument('-m', '--metric-file', help='metrics file collected\
from eris agent', type=argparse.FileType('rt'),
default='metrics.csv')
args = parser.parse_args()
if args.verbose:
print(args)
process(args)
| 24,917
|
def STEPConstruct_PointHasher_IsEqual(*args):
"""
* Returns True when the two keys are the same. Two same keys must have the same hashcode, the contrary is not necessary.
:param Point1:
:type Point1: gp_Pnt
:param Point2:
:type Point2: gp_Pnt
:rtype: bool
"""
return _STEPConstruct.STEPConstruct_PointHasher_IsEqual(*args)
| 24,918
|
def step_smooth(x) :
""" Smooth polinomial rising step from 0(x=0) to 1(x=1)
"""
return np.select([x>1, x>0], [1, 3*np.square(x)-2*np.power(x,3)], default=0)
| 24,919
|
def getBitSizeOfVarInt64(value):
"""
Gets bit size of variable 64-bit signed integer value.
:param value: Value to use for bit size calculation.
:returns: Bit size of the value.
"""
return _getBitSizeOfVarIntImpl(value, VARINT64_MAX_VALUES, signed=True)
| 24,920
|
def bf_add_node_role_dimension(dimension):
# type: (NodeRoleDimension) -> None
"""
Adds another role dimension to the active network.
Individual roles within the dimension must have a valid (java) regex.
The node list within those roles, if present, is ignored by the server.
:param dimension: The NodeRoleDimension object for the dimension to add
:type dimension: :class:`pybatfish.datamodel.referencelibrary.NodeRoleDimension`
"""
if dimension.type == "AUTO":
raise ValueError("Cannot add a dimension of type AUTO")
restv2helper.add_node_role_dimension(bf_session, dimension)
| 24,921
|
def nan_if_exception(func):
"""Wrap func such that np.nan is returned if func raises an exception.
KeyboardInterrupt and SystemExit are still raised.
Examples:
>>> @nan_if_exception
... def f(x, y):
... assert x + y >= 5
>>> f(1, 2)
nan
>>> def f(x, y):
... assert x + y >= 5
>>> g = nan_if_exception(f)
>>> g(1, 2)
nan
"""
@functools.wraps(func)
def wrapper_nan_if_exception(params, *args, **kwargs):
try:
out = func(params, *args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
out = np.nan
return out
return wrapper_nan_if_exception
| 24,922
|
def func3():
"""
##### Get by name – in a <b>dataset</b>
"""
| 24,923
|
def main():
"""Start Flower client
Use first argument passed as workspace name
"""
workspace_name = sys.argv[1]
workspace = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "workspaces", workspace_name
)
fl.client.start_numpy_client(
"0.0.0.0:8080", client=MLCubeClient(workspace=workspace)
)
| 24,924
|
def get_client_public_key(patient_id, study_id):
"""Grabs a user's public key file from s3."""
key_pair_paths = construct_s3_key_paths(study_id, patient_id)
key = s3_retrieve(key_pair_paths['public'], study_id, raw_path=True)
return encryption.import_RSA_key( key )
| 24,925
|
def plot_offset_direction(
dsaimage: Image, coords: SkyCoord, ra_offsets: List[float], dec_offsets: List[float]
) -> Tuple["matplotlib.fig", "matplotlib.axes.Axes"]:
"""Plot measured offsets on an image."""
fig, ax = dsaimage.show()
dsaimage.add_arrows(coords, ra_offsets, dec_offsets)
return fig, ax
| 24,926
|
def get_file_path(mdir=None) -> str:
"""
makes user select a file using a TUI. `mdir` is the main starting directory which defaults to current
working directory.
.. note::
This clears screen a lot of times and might make your app ugly but
provides user with a easy way to choose files.
"""
if mdir is None:
mdir = os.getcwd()
mpath = os.path.abspath(mdir)
while True:
_print_tree(mpath)
f = input(">")
m = os.path.join(mpath, f)
if os.path.isfile(m):
_clear()
return m
elif os.path.isdir(m):
mpath = os.path.abspath(m)
_clear()
| 24,927
|
def sensitivity_metric(event_id_1, event_id_2):
"""Determine similarity between two epochs, given their event ids."""
if event_id_1 == 1 and event_id_2 == 1:
return 0 # Completely similar
if event_id_1 == 2 and event_id_2 == 2:
return 0.5 # Somewhat similar
elif event_id_1 == 1 and event_id_2 == 2:
return 0.5 # Somewhat similar
elif event_id_1 == 2 and event_id_1 == 1:
return 0.5 # Somewhat similar
else:
return 1
| 24,928
|
def duracion_promedio_peliculas(p1: dict, p2: dict, p3: dict, p4: dict, p5: dict) -> str:
"""Calcula la duracion promedio de las peliculas que entran por parametro.
Esto es, la duración total de todas las peliculas dividida sobre el numero de peliculas.
Retorna la duracion promedio en una cadena de formato 'HH:MM' ignorando los posibles decimales.
Parametros:
p1 (dict): Diccionario que contiene la informacion de la pelicula 1.
p2 (dict): Diccionario que contiene la informacion de la pelicula 2.
p3 (dict): Diccionario que contiene la informacion de la pelicula 3.
p4 (dict): Diccionario que contiene la informacion de la pelicula 4.
p5 (dict): Diccionario que contiene la informacion de la pelicula 5.
Retorna:
str: la duracion promedio de las peliculas en formato 'HH:MM'.
"""
# Se extraen las duraciones de las películas.
duracion1 = p1["duracion"]
duracion2 = p2["duracion"]
duracion3 = p3["duracion"]
duracion4 = p4["duracion"]
duracion5 = p5["duracion"]
# Promedio de duraciones de las películas.
promedio = (duracion1 + duracion2 + duracion3 + duracion4 + duracion5) / 5
# Conversión a formato 'HH:MM'.
horas = promedio // 60
minutos = promedio % 60
if horas < 10:
horas = '0' + str(int(horas))
else:
horas = str(int(horas))
if minutos < 10:
minutos = '0' + str(int(minutos))
else:
minutos = str(int(minutos))
return horas + ":" + minutos
| 24,929
|
def _to_test_data(text):
"""
Lines should be of this format: <word> <normal_form> <tag>.
Lines that starts with "#" and blank lines are skipped.
"""
return [l.split(None, 2) for l in text.splitlines()
if l.strip() and not l.startswith("#")]
| 24,930
|
def append(motion1, motion2):
"""
Combines two motion sequences into one. motion2 is appended to motion1.
The operation is not done in place.
Note that the operation places the sequences next to each other without
attempting to blend between the poses. To interpolate between the end of
motion1 and start of motion2, use the `append_and_blend` operation.
Args:
motion1, motion2: Motion sequences to be combined.
"""
assert isinstance(motion1, motion_class.Motion)
assert isinstance(motion2, motion_class.Motion)
assert motion1.skel.num_joints() == motion2.skel.num_joints()
combined_motion = copy.deepcopy(motion1)
combined_motion.name = f"{motion1.name}+{motion2.name}"
combined_motion.poses.extend(motion2.poses)
return combined_motion
| 24,931
|
def service(
fmt: SupportedFormats,
begints: datetime = Query(
..., description="Inclusive UTC timestamp window start for issuance."
),
endts: datetime = Query(
..., description="Exclusive UTC timestamp window end for issuance."
),
wfo: List[str] = Query(
None, description="WFO 3-letter codes for filter.", max_length=3
),
only_new: bool = Query(True, description="Only include issuance events."),
ph: List[str] = Query(
None, description="VTEC Phenomena 2-letter codes.", max_length=2
),
):
"""Replaced above."""
df = handler(begints, endts, wfo, only_new, ph)
return deliver_df(df, fmt)
| 24,932
|
def cols_shuffled(expr_df, dist_df=None, algo="agno", seed=0):
""" Return a copy of the expr_df DataFrame with columns shuffled randomly.
:param pandas.DataFrame expr_df: the DataFrame to copy and shuffle
:param pandas.DataFrame dist_df: the distance DataFrame to inform us about distances between columns
:param str algo: Agnostic to distance ('agno') or distance aware ('dist')?
:param int seed: set numpy's random seed if desired
:returns: A copy of the expr_df DataFrame with columns shuffled.
"""
shuffled_df = expr_df.copy(deep=True)
np.random.seed(seed)
if algo == "agno":
shuffled_df.columns = np.random.permutation(expr_df.columns)
elif algo == "dist":
# Make a distance-similarity matrix, allowing us to characterize one well_id's distance-similarity to another.
diss = pd.DataFrame(data=np.corrcoef(dist_df.values), columns=dist_df.columns, index=dist_df.index)
# Old and new well_id indices
available_ids = list(expr_df.columns)
shuffled_well_ids = []
# For each well_id in the original list, replace it with another one as distance-similar as possible.
for well_id in list(expr_df.columns):
# Do we want to avoid same tissue-class?
# This algo allows for keeping the same well_id and doesn't even look at tissue-class.
# sort the distance-similarity by THIS well_id's column, but use corresponding index of well_ids
candidates = diss.sort_values(by=well_id, ascending=False).index
candidates = [x for x in candidates if x in available_ids]
if len(candidates) == 1:
candidate = candidates[0]
elif len(candidates) < 20:
candidate = np.random.permutation(candidates)[0]
else:
n_candidates = min(20, int(len(candidates) / 5.0))
candidate = np.random.permutation(candidates[:n_candidates])[0]
# We have our winner, save it to our new list and remove it from what's available.
shuffled_well_ids.append(candidate)
available_ids.remove(candidate)
shuffled_df.columns = shuffled_well_ids
else:
shuffled_df = pd.DataFrame()
# Column labels have been shuffled; return a dataframe with identically ordered labels and moved data.
return shuffled_df.loc[:, expr_df.columns], dict(zip(expr_df.columns, shuffled_df.columns))
| 24,933
|
def verify(origin_dir, real_width, real_height, image_suffix):
"""
Verifique o tamanho da imagem
:return:
"""
if not os.path.exists(origin_dir):
print("[Aviso] O diretório {} não pode ser encontrado, ele será criado em breve".format(origin_dir))
os.makedirs(origin_dir)
print("Comece a verificar a coleção de fotos original")
# Imagem em tamanho real
real_size = (real_width, real_height)
# Lista e quantidade de nomes de fotos
img_list = os.listdir(origin_dir)
total_count = len(img_list)
print("O conjunto original de imagens compartilhadas: {}张".format(total_count))
# Lista de imagens inválida
bad_img = []
# Percorra todas as fotos para verificar
for index, img_name in enumerate(img_list):
file_path = os.path.join(origin_dir, img_name)
# Filtrar imagens com sufixos incorretos
if not img_name.endswith(image_suffix):
bad_img.append((index, img_name, "Sufixo de arquivo incorreto"))
continue
# Filtrar tags de imagem fora do padrão
prefix, posfix = img_name.split("_")
if prefix == "" or posfix == "":
bad_img.append((index, img_name, "O rótulo da imagem é anormal"))
continue
# A imagem não pode ser aberta normalmente
try:
img = Image.open(file_path)
except OSError:
bad_img.append((index, img_name, "A imagem não pode ser aberta normalmente"))
continue
# O tamanho da imagem está anormal
if real_size == img.size:
print("{} pass".format(index), end='\r')
else:
bad_img.append((index, img_name, "O tamanho da imagem está anormal:{}".format(img.size)))
print("====As seguintes {} imagens são anormais====".format(len(bad_img)))
if bad_img:
for b in bad_img:
print("[Foto {}] [{}] [{}]".format(b[0], b[1], b[2]))
else:
print("Nenhuma anormalidade encontrada(共 {} Fotos)".format(len(img_list)))
print("========end")
return bad_img
| 24,934
|
def build_model(task_description: Dict[str, Any]) -> Dict[str, Any]:
"""Build the predinet model."""
# ---------------------------
# Setup and process inputs
processors = {"image": process_image, "task_id": process_task_id}
mlp_inputs = utils.factory.create_input_layers(task_description, processors)
# ---------------------------
# Concatenate processed inputs
concat_in = next(iter(mlp_inputs["processed"].values()))
if len(mlp_inputs["processed"]) > 1:
concat_in = L.Concatenate()(list(mlp_inputs["processed"].values()))
# ---------------------------
for size, activation in zip(C["mlp_hidden_sizes"], C["mlp_hidden_activations"]):
concat_in = L.Dense(size, activation=activation)(concat_in)
predictions = L.Dense(task_description["output"]["num_categories"])(concat_in)
# ---------------------------
# Create model instance
model = tf.keras.Model(
inputs=mlp_inputs["input_layers"],
outputs=predictions,
name="mlp_image_classifier",
)
# ---------------------------
# Compile model for training
dataset_type = task_description["output"]["type"]
assert (
dataset_type == "binary"
), f"MLP image classifier requires a binary classification dataset, got {dataset_type}"
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
metrics = tf.keras.metrics.BinaryAccuracy(name="acc")
# ---------------------------
return {"model": model, "loss": loss, "metrics": metrics}
| 24,935
|
def create_unet_model(N_classes, input_shape=(None, None, 1), dropout_rate=0.24, learning_rate=1e-5):
"""
Implementation of Unet mode for multiclass semantic segmentation
:param N_classes: Number of classes of segmentation map
:param input_shape: input image shape
:param dropout_rate: dropout rate
:return: a tuple of two models, first element is model to train and second is model to save
"""
# make sure the sizes are divisible by 16
if(input_shape[0] is not None): assert 16 * (input_shape[0] // 16) == input_shape[0], 'invalid dimension 0'
if( input_shape[1] is not None): assert 16 * (input_shape[1] // 16) == input_shape[1], 'invalid dimension 1'
in_image = Input(shape=input_shape)
conv0 = Conv2D(32, (3, 3), activation='relu', name='conv1_0', padding='same')(in_image)
conv1, x = conv_block_down(32, dropout_rate=dropout_rate ) (conv0)
conv2, x = conv_block_down(64, dropout_rate=dropout_rate ) (x)
conv3, x = conv_block_down(128, dropout_rate=dropout_rate )(x)
conv4, x = conv_block_down(256, dropout_rate=dropout_rate )(x)
x = conv_block(512, dropout_rate=dropout_rate ) (x)
x = deconv_block(512, skip_layer=conv4, dropout_rate=dropout_rate ) (x)
x = deconv_block(256, skip_layer=conv3, dropout_rate=dropout_rate ) (x)
x = deconv_block(128, skip_layer=conv2, dropout_rate=dropout_rate ) (x)
x = deconv_block(64, skip_layer=conv1, dropout_rate=dropout_rate ) (x)
outp_logit = Conv2D(N_classes, (1, 1), activation='linear', padding='same', name='logit')(x)
outp_softmax = Softmax4D(axis=3, name='segmap')(outp_logit)
model_train = Model(inputs=in_image, outputs=[outp_logit,outp_softmax])
model_save = Model(inputs=in_image, outputs=[outp_softmax])
#if last channel is background
if(N_classes <=5):
class_indices = list(range(N_classes))[:-1] #except last one which is background
metrics_classwise=[]
for c in class_indices:
fc = multiclass_dice_coef_metric(from_logits=True, class_index=c)
fc.__name__='dmc'+str(c)
metrics_classwise.append(fc)
metrics = {'logit': metrics_classwise}
else:
metrics = {'logit': [multiclass_dice_coef_metric(from_logits=True)]} #all classes
model_train.compile(optimizer=Adam(lr=learning_rate),
loss={'logit': multiclass_balanced_cross_entropy(from_logits=True, P=5)},
metrics=metrics)
return Models(model_train, model_save)
| 24,936
|
def extension(name: str, compile_args=(), link_args=(), include_dirs=(),
libraries=(), language='c++', **kwargs):
"""Build standard Cython extension."""
path = os.path.sep.join(['src', *name.split('.')]) + '.pyx'
include_dirs = ['include', *include_dirs]
return Extension(name,
[path],
extra_compile_args=compile_args,
extra_link_args=link_args,
include_dirs=include_dirs,
libraries=libraries,
language=language,
**kwargs)
| 24,937
|
def load_ipython_extension(ipython):
"""call by ipython"""
from rqalpha.__main__ import inject_mod_commands
inject_mod_commands()
ipython.register_magic_function(run_ipython_cell, 'line_cell', 'rqalpha')
| 24,938
|
def push_changes():
"""Pushes commit.
"""
set_url = f'git remote set-url origin https://x-access-token:{GITHUB_TOKEN}@github.com/{TARGET_REPOSITORY}'
git_push = f'git push origin {TARGET_BRANCH}'
sp.check_call(split(set_url))
sp.check_call(split(git_push))
| 24,939
|
def get_5cnn_model(image_size: int = 84,
bn_eps: float = 1e-3,
bn_momentum: float = 0.95,
n_classes: int = 5,
filter_size: int = 32,
levels: Optional = None,
spp: bool = False) -> nn.Module:
"""
Gets a 5CNN that does not change the spatial dimension [H,W] as it processes the image.
:return:
"""
from uutils.torch_uu.models.learner_from_opt_as_few_shot_paper import get_default_learner
mdl: nn.Module = get_default_learner(image_size, bn_eps, bn_momentum, n_classes, filter_size, levels, spp)
return mdl
| 24,940
|
def valid_extract_input_specification(instance_of_property, depth, language_code, named_entity_label):
""" Checks if the input for the extraction is valid. Both to help
the user get correct input and to sanitize it to avoid
attacks as the values are used to generate filenames.
"""
pattern_match = valid_instance_of_property_pattern.match(instance_of_property)
if instance_of_property != "manual_entry" and instance_of_property != "stopwords" and( pattern_match is None or pattern_match.span()[1] != len(instance_of_property) ):
flash(f"The value of the instance of property must start with Q and then be followed by one or more digits (e.g. Q123). Currently, it is '{instance_of_property}'.", "danger")
return False
if len(language_code) != 2 or language_code.lower() != language_code:
flash(f"The language code must consist of two lowercase letters (e.g. en). Currently, it is '{language_code}'.", "danger")
return False
pattern_match = valid_named_entity_label_pattern.match(named_entity_label)
if pattern_match is None or pattern_match.span()[1] != len(named_entity_label):
flash(f"The label must only consist of the characters a-z (upper or lowercased) or the special characters - or _ (e.g. LOC or feature_film). Currently it is '{named_entity_label}'.", "danger")
return False
try:
depth_as_int = int(depth)
if depth_as_int < 0:
flash(f"The depth must be an integer >= 0. Currently it is '{depth}'.", "danger")
return False
except:
flash(f"The depth must be an integer >= 0. Currently it is '{depth}'.", "danger")
return False
return True
| 24,941
|
def get_company_data(mid):
"""Looks up stock ticker information for a company via its Freebase ID."""
query = MID_TO_TICKER_QUERY % mid
bindings = make_wikidata_request(query)
if not bindings:
if mid:
print("%s No company data found for MID: %s" % (WARNING, mid))
return None
# Collect the data from the response.
companies = []
for binding in bindings:
try:
name = binding["companyLabel"]["value"]
except KeyError:
name = None
try:
root = binding["rootLabel"]["value"]
except KeyError:
root = None
try:
symbol = binding["tickerLabel"]["value"]
except KeyError:
symbol = None
try:
exchange = binding["exchangeNameLabel"]["value"]
except KeyError:
exchange = None
company = {"name": name,
"symbol": symbol,
"exchange": exchange}
# Add the root if there is one.
if root and root != name:
company["root"] = root
# Add to the list unless we already have the same entry.
if company not in companies:
print("%s Adding company data: %s" % (OK, company))
companies.append(company)
else:
print("%s Skipping duplicate company data: %s" % (WARNING, company))
return companies
| 24,942
|
def helper_promote_field_values_watch_public(handle, gpuIds):
"""
Verifies that dcgm can update a field value watch
"""
fieldId = dcgm_fields.DCGM_FI_DEV_NAME
fieldIds = [fieldId, ]
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetGroupWithGpuIds('mygroup', gpuIds)
fieldGroup = pydcgm.DcgmFieldGroup(handleObj, "myfieldgroup", fieldIds)
updateFreq = 100000 #100 msec
maxKeepAge = 3600
maxKeepSamples = 0
#Track the number of watchers to make sure our watch promotion doesn't create another sub-watch
#but rather updates the existing one
numWatchersWithWatch = {}
numWatchersAfter = {}
#Watch the fields
groupObj.samples.WatchFields(fieldGroup, updateFreq, maxKeepAge, maxKeepSamples)
#Get watcher info after our watch and verify that the updateFrequency matches
for gpuId in gpuIds:
fieldInfo = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(handleObj.handle, gpuId, fieldId)
numWatchersWithWatch[gpuId] = fieldInfo.numWatchers
assert fieldInfo.monitorFrequencyUsec == updateFreq, "after watch: fieldInfo.monitorFrequencyUsec %d != updateFreq %d" % \
(fieldInfo.monitorFrequencyUsec, updateFreq)
#Update the watch with a faster update frequency
updateFreq = 50000 #50 msec
groupObj.samples.WatchFields(fieldGroup, updateFreq, maxKeepAge, maxKeepSamples)
#Get watcher info after our second watch and verify that the updateFrequency matches
for gpuId in gpuIds:
fieldInfo = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(handleObj.handle, gpuId, fieldId)
numWatchersAfter[gpuId] = fieldInfo.numWatchers
assert fieldInfo.monitorFrequencyUsec == updateFreq, "after watch: fieldInfo.monitorFrequencyUsec %d != updateFreq %d" % \
(fieldInfo.monitorFrequencyUsec, updateFreq)
assert numWatchersWithWatch == numWatchersAfter, "numWatchersWithWatch (%s) != numWatchersAfter (%s)" % \
(str(numWatchersWithWatch), str(numWatchersAfter))
| 24,943
|
def ShowPortSendRights(cmd_args=[], cmd_options={}):
""" Display a list of send rights across all tasks for a given port.
Usage: (lldb) showportsendrights <ipc_port_t>
"""
if not cmd_args:
raise ArgumentError("no port address provided")
port = kern.GetValueFromAddress(cmd_args[0], 'struct ipc_port *')
i = 1
for t in kern.tasks:
# Write a progress line. Using stderr avoids automatic newline when
# writing to stdout from lldb. Blank spaces at the end clear out long
# lines.
sys.stderr.write("checking {:s} ({}/{})...{:30s}\r".format(Cast(t.bsd_info, 'proc_t').p_name, i, len(kern.tasks), ''))
i += 1
entries = GetSpaceSendRightEntries(t.itk_space, port)
if entries:
print GetTaskIPCSummary.header
print GetTaskIPCSummary(t)
print '\t' + GetIPCEntrySummary.header
for entry in entries:
print "\t" + GetIPCEntrySummary(entry)
| 24,944
|
def embed_data_into_square_lattice(data):
"""Insert MR image into square 2D array."""
dims = np.array(data.shape)
offset_x = int((dims.max() - dims[0]) / 2.)
offset_y = int((dims.max() - dims[1]) / 2.)
temp = np.zeros((dims.max(), dims.max()))
temp[offset_x:offset_x+dims[0], offset_y:offset_y+dims[1]] = data
return temp
| 24,945
|
def is_partial_link_text_selector(selector):
"""
A basic method to determine if a selector is a partial link text selector.
"""
if (
selector.startswith("partial_link=")
or selector.startswith("partial_link_text=")
or selector.startswith("partial_text=")
or selector.startswith("p_link=")
or selector.startswith("p_link_text=")
or selector.startswith("p_text=")
):
return True
return False
| 24,946
|
def test_get_virtual_points_vector():
"""
Here, we test that when :math:`v_1 = v_2 = 0`, the virtual points
:math:`x'` are in fact points between :math:`-R_0\sin(\omega T/2)`
and :math:`-R_0\sin(\omega T/2)`
"""
x_prime = DCC.get_virtual_points_vector(0, 0)
omega = p.omega / 360 * 2 * np.pi
x_prime_theo = p.R0 * np.linspace(-np.sin(omega * p.T / 2), np.sin(omega * p.T / 2))
np.testing.assert_almost_equal(x_prime, x_prime_theo)
| 24,947
|
def scell(obj, dims, method=1, **kwds):
"""Build supercell based on `dims`.
Uses coords_frac and cell.
Parameters
----------
obj : Structure or Trajectory
dims : tuple (nx, ny, nz) for a N = nx * ny * nz supercell
method : int, optional
Switch between numpy-ish (1) or loop (2) implementation. (2) should
always produce correct results but is sublty slower. Only for
Structure.
**kwds : see :func:`scell_mask`
Notes
-----
The mask for the supercell is created by :func:`scell_mask` and applied to
each atom in `obj` one after another, i.e. each atom is repeated nx*ny*nz
times according to the mask pattern, independently of how the pattern looks
like (e.g. the `direc` parameter in :func:`scell_mask`). So, just as rows
in np.repeat(), we have:
| original: symbols=[A,B,C,D]
| 2 x 1 x 1: symbols=[A,A,B,B,C,C,D,D]
| nx x ny x nz: symbols=[(nx*ny*nz) x A, (nx*ny*nz) x B, ...]
Returns
-------
scell : Structure
"""
# Place each atom N = nx*ny*nz times in the supercell, i.e. copy unit cell
# N times. Actually, N-1, since ix=iy=iz=0 is the unit cell itself.
#
# Let k = {x,y,z}.
#
# mask[j,:] = [ix, iy, iz], ik = integers (floats actually, but
# mod(ik, floor(ik)) == 0.0)
#
# original cell:
# coords_frac[i,:] = position vect of atom i in the unit cell in *crystal*
# coords!!
#
# super cell:
# sc_coords_frac[i,:] = coords_frac[i,:] + [ix, iy, iz]
# for all permutations (see scell_mask()) of ix, iy, iz.
# ik = 0, ..., nk - 1
#
# sc_coords_frac : crystal coords w.r.t the *old* cell, i.e. the entries are in
# [0,(max(dims))], not [0,1], is scaled below
#
if 'direc' not in kwds:
kwds['direc'] = 1
mask = scell_mask(*tuple(dims), **kwds)
nmask = mask.shape[0]
if obj.is_struct:
sc_cell = obj.cell * np.asarray(dims)[:,None]
container = Structure
elif obj.is_traj:
# (nstep,3,3) * (1,3,1) -> (nstep, 3,3)
sc_cell = obj.cell * np.asarray(dims)[None,:,None]
container = Trajectory
else:
raise Exception("unknown input type")
if method == 1:
sc_symbols = np.array(obj.symbols).repeat(nmask).tolist() if (obj.symbols
is not None) else None
if obj.is_struct:
# (natoms, 1, 3) + (1, nmask, 3) -> (natoms, nmask, 3)
sc_coords_frac = (obj.coords_frac[:,None,:]
+ mask[None,...]).reshape(obj.natoms*nmask,3)
elif obj.is_traj:
# cool, eh?
# (nstep, natoms, 1, 3) + (1, 1, nmask, 3) -> (nstep, natoms, nmask, 3)
sc_coords_frac = (obj.coords_frac[...,None,:]
+ mask[None,None,...]).reshape(obj.nstep,obj.natoms*nmask,3)
else:
raise Exception("huh!?")
# explicit loop version for testing, this is the reference implementation,
# only for Structure
elif method == 2:
if obj.is_struct:
sc_symbols = []
sc_coords_frac = np.empty((nmask*obj.natoms, 3), dtype=float)
k = 0
for iatom in range(obj.natoms):
for j in range(nmask):
if obj.symbols is not None:
sc_symbols.append(obj.symbols[iatom])
sc_coords_frac[k,:] = obj.coords_frac[iatom,:] + mask[j,:]
k += 1
else:
raise Exception("method=2 only implemented for Structure")
else:
raise Exception("unknown method: %s" %repr(method))
sc_coords_frac[...,0] /= dims[0]
sc_coords_frac[...,1] /= dims[1]
sc_coords_frac[...,2] /= dims[2]
return container(coords_frac=sc_coords_frac,
cell=sc_cell,
symbols=sc_symbols)
| 24,948
|
def create_config(device: str = 'CPU', *,
per_process_gpu_memory_fraction: float = 0.0,
log_device_placement: bool = False) -> tf.ConfigProto:
"""Creates tf.ConfigProto for specifi device"""
config = tf.ConfigProto(log_device_placement=log_device_placement)
if is_gpu(device):
if per_process_gpu_memory_fraction > 0.0:
config.gpu_options.per_process_gpu_memory_fraction = per_process_gpu_memory_fraction
else:
config.gpu_options.allow_growth = True
else:
config.device_count['GPU'] = 0
return config
| 24,949
|
def dump_feat_name(feat_names, feat_name_file):
"""
save feat_names to feat_name_file
"""
with open(feat_name_file, "wb") as f:
for i,feat_name in enumerate(feat_names):
if feat_name.startswith("count") or feat_name.startswith("pos_of"):
f.write("('%s', SimpleTransform(config.count_feat_transform)),\n" % feat_name)
else:
f.write("('%s', SimpleTransform()),\n" % feat_name)
| 24,950
|
def format_childproc(cp: Union[Event, Dict]):
"""Format childproc event into single line."""
return f" @{as_configured_timezone(cp.get('event_timestamp'))}: {cp.get('childproc_cmdline')} - {cp.get('childproc_process_guid')}"
| 24,951
|
def create_not_mnist_doubleset() -> (list, list):
"""
A function which iterates through notMNIST images and sorts into two lists of images and arrays.
:return x: images as ndarrays
:return y: labels of images
"""
try:
with np.load("./notMNIST_all/all_data.npz") as f:
x, y = f['x'], f['y']
except FileNotFoundError:
# Have to parse the image files, if the .npz numpy file does not exist
x = []
y = []
for image_name in os.listdir("notMNIST_all"):
if image_name == ".DS_Store":
continue
try:
image_as_array = np.asarray(Image.open("notMNIST_all/" + image_name))
scale = float(np.max(image_as_array))
# Scale data so the input is in range [0, 1]
# and the class is in the range [0, 1, .., no_classes - 1]
x.append(image_as_array / scale)
y.append(ord(image_name[0]) - ord("A"))
except (FileNotFoundError, OSError) as e:
print(f"Skipping the file {image_name}, as it gave error {e}")
x, y = np.array(x), np.array(y)
np.savez(file="./notMNIST_all/all_data.npz", x=x, y=y) # Save data so we do not have to parse next time.
return x, y
| 24,952
|
def format_maven_jar_dep_name(group_id, artifact_id, repository = DEFAULT_REPOSITORY_NAME):
"""
group_id: str
artifact_id: str
repository: str = "maven"
"""
return "@%s//:%s" % (repository, format_maven_jar_name(group_id, artifact_id))
| 24,953
|
def PCO_GetCameraName(handle):
"""
This function retrieves the name of the camera.
"""
f = pixelfly_dll.PCO_GetCameraName
f.argtypes = (ctypes.wintypes.HANDLE, ctypes.c_char_p, ctypes.wintypes.WORD)
f.restype = ctypes.c_int
cameraName = ctypes.create_string_buffer(41)
ret_code = f(handle, cameraName, 41)
PCO_manage_error(ret_code)
return cameraName.raw.decode("ascii")
| 24,954
|
def import_object(absolute_name):
"""
根据名字 import 对象
:param absolute_name: 按照 module:name 的格式
:return: 返回对应对象
"""
try:
module_name, obj_name = absolute_name.split(':')
module = sys.modules.get(module_name, None)
if not module:
module = import_module(module_name)
obj = getattr(module, obj_name)
return obj
except ValueError:
raise MLPMJobException(MLPMJobErrorEnum.BAD_FUNC_NAME,
f'函数名`{absolute_name}`不正确,应该为 `module:name` 的形式。')
except ModuleNotFoundError:
raise MLPMJobException(MLPMJobErrorEnum.BAD_FUNC_NAME,
f'没有找到您的函数名`{absolute_name}`所对应的对象。')
except AttributeError:
raise MLPMJobException(MLPMJobErrorEnum.BAD_FUNC_NAME,
f'没有找到函数名`{absolute_name}`所对应的对象。')
| 24,955
|
def issingleton(var):
""" If isunitset(var) is True, this function returns True,
otherwise isscalar(var) is returned.
"""
# Here we define singleton as a unit set or scalar
if isunitset(var):
return True
return isscalar(var)
| 24,956
|
def satisfiesF(L):
"""
Assumes L is a list of strings
Assume function f is already defined for you and it maps a string to a Boolean
Mutates L such that it contains all of the strings, s, originally in L such
that f(s) returns True, and no other elements. Remaining elements in L
should be in the same order.
Returns the length of L after mutation
"""
idx =0
while idx < len(L):
if f(L[idx]): # do nothing if f true
idx += 1
else: # remove the element if false
L.pop(idx)
return len(L)
| 24,957
|
def calc_circle_radius(area: float) -> float:
"""
Calculate radius from area.
>>> calc_circle_radius(10.0)
1.7841241161527712
"""
assert not area < 0
radius = numpy_to_python_type(np.sqrt(area / np.pi))
assert isinstance(radius, float)
return radius
| 24,958
|
def load_imgs(paths, target_size):
"""Load images from `paths`."""
pairs = np.empty((len(paths), 2, *target_size), dtype=np.float32)
for i, row in tqdm(paths.iterrows(), total=len(pairs)):
img1 = img_to_array(load_img(row.p1, target_size=target_size)) / 255
img2 = img_to_array(load_img(row.p2, target_size=target_size)) / 255
pair = np.stack([img1, img2], axis=0)
pairs[i, :] = pair
y = paths.target.values.astype(np.uint8)
return pairs, y
| 24,959
|
async def test_handle_pubsub_msg_old(mocker, monkeypatch, consumer,
publish_time, raw_msg_data,
creation_audit_log_data,
caplog, pubsub_msg, mock_get_and_validate,
mock_create_gevent_msg):
"""Too-old message."""
mocker.patch('datetime.datetime.now', lambda tz: publish_time)
pubsub_msg.publish_time = publish_time - datetime.timedelta(
seconds=(consumer._max_msg_age + 1))
context = {
'plugin': 'event-consumer',
'msg_id': pubsub_msg.message_id,
'msg_age': consumer._max_msg_age + 1
}
await consumer._handle_pubsub_msg(pubsub_msg)
assert 2 == len(caplog.records)
consumer.metrics._incr_mock.assert_called_once_with(
'msg-too-old', value=1, context=context)
pubsub_msg.ack.assert_called_once_with()
mock_get_and_validate.assert_not_called()
mock_create_gevent_msg.assert_not_called()
assert consumer.success_channel.empty()
| 24,960
|
def load_dict_from_hdf5(h5_filepath):
"""
Load h5 file as a dict
"""
def recursively_load_dict_contents_from_group(h5_obj, path):
"""
Recursively load a dict from h5 file
"""
ans = {}
for key, item in h5_obj[path].items():
if isinstance(item, h5py._hl.dataset.Dataset):
ans[key] = item.value
elif isinstance(item, h5py._hl.group.Group):
ans[key] = recursively_load_dict_contents_from_group(h5_obj, path + key + '/')
return ans
with h5py.File(h5_filepath, 'r') as h5_obj:
return recursively_load_dict_contents_from_group(h5_obj, '/')
| 24,961
|
def interactive_grid_shape(grid, max_n=200, plotfxn=None, **kwargs):
""" Interactive ipywidgets for select the shape of a grid
Parameters
----------
grid : pygridgen.Gridgen
The base grid from which the grids of new shapes (resolutions) will be
generated.
max_n : int (default = 200)
The maximum number of possible cells in each dimension.
plotfxn : callable, optional
Function that plots the grid to provide user feedback. The call
signature of this function must accept to positional parameters for the
x- and y-arrays of node locations, and then accept any remaining keyword
arguments. If not provided, *pygridtools.viz.plot_cells* is used.
Additional Parameters
---------------------
All remaining keyword arguments are passed to *plotfxn*
Returns
-------
newgrid : pygridgen.Gridgen
The reshaped grid
widget : ipywidgets.interactive
Collection of IntSliders for changing the number cells along each axis
in the grid.
Examples
--------
>>> from pygridgen import grid
>>> from pygridtools import viz, iotools
>>> def make_fake_bathy(shape):
... j_cells, i_cells = shape
... y, x = numpy.mgrid[:j_cells, :i_cells]
... z = (y - (j_cells // 2))** 2 - x
... return z
>>> def plot_grid(x, y, ax=None):
... shape = x[1:, 1:].shape
... bathy = make_fake_bathy(shape)
... if not ax:
... fig, ax = pyplot.subplots(figsize=(8, 8))
... ax.set_aspect('equal')
... return viz.plot_cells(x, y, ax=ax, cmap='Blues', colors=bathy, lw=0.5, ec='0.3')
>>> d = numpy.array([
... (13, 16, 1.00), (18, 13, 1.00), (12, 7, 0.50),
... (10, 10, -0.25), ( 5, 10, -0.25), ( 5, 0, 1.00),
... ( 0, 0, 1.00), ( 0, 15, 0.50), ( 8, 15, -0.25),
... (11, 13, -0.25)])
>>> g = grid.Gridgen(d[:, 0], d[:, 1], d[:, 2], (75, 75), ul_idx=1, focus=None)
>>> new_grid, widget = iotools.interactive_grid_shape(g, plotfxn=plot_grid)
"""
if not plotfxn:
plotfxn = viz.plot_cells
common_opts = dict(min=2, max=max_n, continuous_update=False)
return grid, ipywidgets.interactive(
_change_shape,
g=ipywidgets.fixed(grid),
irows=ipywidgets.IntSlider(value=grid.ny, **common_opts),
jcols=ipywidgets.IntSlider(value=grid.nx, **common_opts),
plotfxn=ipywidgets.fixed(plotfxn),
plotopts=ipywidgets.fixed(kwargs)
)
| 24,962
|
def test_create_test_result_throws_exception():
"""Test that unknown status strings should result in exception.
"""
with pytest.raises(ValueError):
Test.Result.create("unknown_result")
| 24,963
|
def generate_initial_state(initial_pos: Transform, initial_speed: Optional[float] = None) -> AgentState:
"""
:param initial_speed: Initial speed in km/h
"""
from lgsvl.utils import transform_to_forward
movement = AgentState()
movement.transform = initial_pos
if initial_speed is not None:
movement.velocity = (initial_speed / 3.6) * transform_to_forward(movement.transform)
return movement
| 24,964
|
def resolve_ami(ami=None, arch="x86_64", tags=frozenset(), tag_keys=frozenset()):
"""
Find an AMI by ID, name, or tags.
- If an ID is given, it is returned with no validation; otherwise, selects the most recent AMI from:
- All available AMIs in this account with the Owner tag equal to this user's IAM username (filtered by tags given);
- If no AMIs found, all available AMIs in this account with the AegeaVersion tag present (filtered by tags given);
- If no AMIs found, all available AMIs in this account (filtered by tags given).
Return the AMI with the most recent creation date.
"""
assert arch in {"x86_64", "arm64"}
if ami is None or not ami.startswith("ami-"):
if ami is None:
filters = dict(Owners=["self"],
Filters=[dict(Name="state", Values=["available"]), dict(Name="architecture", Values=[arch])])
else:
filters = dict(Owners=["self"], Filters=[dict(Name="name", Values=[ami])])
all_amis = resources.ec2.images.filter(**filters)
if tags:
all_amis = filter_by_tags(all_amis, **tags)
if tag_keys:
all_amis = filter_by_tag_keys(all_amis, *tag_keys)
current_user_amis = all_amis.filter(Filters=[dict(Name="tag:Owner", Values=[ARN.get_iam_username()])])
amis = sorted(current_user_amis, key=lambda x: x.creation_date)
if len(amis) == 0:
aegea_amis = all_amis.filter(Filters=[dict(Name="tag-key", Values=["AegeaVersion"])])
amis = sorted(aegea_amis, key=lambda x: x.creation_date)
if len(amis) == 0:
amis = sorted(all_amis, key=lambda x: x.creation_date)
if not amis:
raise AegeaException("Could not resolve AMI {}".format(dict(tags, ami=ami)))
ami = amis[-1].id
return ami
| 24,965
|
def _MigrateTestLookupPatterns(old_pattern, new_pattern):
"""Enumerates individual test migration tasks and enqueues them.
Typically, this function is called by a request initiated by the user.
The purpose of this function is to queue up a set of requests which will
do all of the actual work.
Args:
old_pattern: Test path pattern for old names.
new_pattern: Test path pattern for new names.
Raises:
BadInputPatternError: Something was wrong with the input patterns.
"""
futures = []
tests = list_tests.GetTestsMatchingPattern(old_pattern, list_entities=False)
for test in tests:
old_test_key = utils.TestKey(test)
new_test_key = utils.TestKey(
_ValidateAndGetNewTestPath(old_test_key.id(), new_pattern))
futures.append(_QueueTask({
'old_test_key': old_test_key.urlsafe(),
'new_test_key': new_test_key.urlsafe(),
'status': _MIGRATE_TEST_CREATE
}))
for f in futures:
f.get_result()
| 24,966
|
def _filter_colors(hcl, ihue, nhues, minsat):
"""
Filter colors into categories.
Parameters
----------
hcl : tuple
The data.
ihue : int
The hue column.
nhues : int
The total number of hues.
minsat : float
The minimum saturation used for the "grays" column.
"""
breakpoints = np.linspace(0, 360, nhues)
gray = hcl[1] <= minsat
if ihue == 0:
return gray
color = breakpoints[ihue - 1] <= hcl[0] < breakpoints[ihue]
if ihue == nhues - 1:
color = color or color == breakpoints[ihue] # endpoint inclusive
return not gray and color
| 24,967
|
def add_file(conn: apsw.Connection, **file_info: Any) -> None:
"""Adds a FileInfo row to the database, with some default values.
Args:
conn: The database to modify.
file_info: A mapping from column names to binding values.
"""
data = dict(updated_at=0, file_index=0, id=111)
data.update(file_info)
assert isinstance(data["path"], bytes)
add_fixture_row(conn, "file_info", **data)
| 24,968
|
def test_convert(fx_asset):
"""Converts the image format."""
with Image(filename=str(fx_asset.join('mona-lisa.jpg'))) as img:
with img.convert('png') as converted:
assert converted.format == 'PNG'
strio = io.BytesIO()
converted.save(file=strio)
strio.seek(0)
with Image(file=strio) as png:
assert png.format == 'PNG'
with raises(ValueError):
img.convert('HONG')
with raises(TypeError):
img.convert(123)
| 24,969
|
def SWO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "swo.owl", **kwargs
) -> Graph:
"""Return SWO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "swo.owl"
Version to retrieve
The available versions are:
- swo.owl
"""
return AutomaticallyRetrievedGraph(
"SWO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
| 24,970
|
def upload_needed_files (handle, bucket, prefix, dir_path, kind, iter):
"""
upload the needed local files of a particular kind
"""
extension = f".{kind}"
count = 0
for uuid in iter:
file_name = uuid + extension
local_path = dir_path / file_name
grid_path = prefix + "/pub/" + kind + "/"
#print("uploading {} to {}".format(local_path, grid_path))
upload_file(handle, local_path.as_posix(), grid_path + file_name)
count += 1
return count
| 24,971
|
def detect_tag(filename):
"""Return type and position of ID3v2 tag in filename.
Returns (tag_class, offset, length), where tag_class
is either Tag22, Tag23, or Tag24, and (offset, length)
is the position of the tag in the file.
"""
with fileutil.opened(filename, "rb") as file:
file.seek(0)
header = file.read(10)
file.seek(0)
if len(header) < 10:
raise NoTagError("File too short")
if header[0:3] != b"ID3":
raise NoTagError("ID3v2 tag not found")
if header[3] not in _tag_versions or header[4] != 0:
raise TagError("Unknown ID3 version: 2.{0}.{1}"
.format(*header[3:5]))
cls = _tag_versions[header[3]]
offset = 0
length = Syncsafe.decode(header[6:10]) + 10
if header[3] == 4 and header[5] & _TAG24_FOOTER:
length += 10
return (cls, offset, length)
| 24,972
|
def plot_function_interpolations(function, support_points, interpolations, bases):
""" Plot a grid with the given function, the support points, interpolation and bases in each plot. """
x_f, y_f = function
fig1 = plt.figure()
for i in range(len(support_points)):
x_s, y_s = support_points[i]
x_i, y_i = interpolations[i]
p = fig1.add_subplot(3, 3, i + 1)
p.grid(True)
p.set_xlim(-5.3, 5.3)
p.set_xticks([-5, 0, 5])
p.set_ylim(-1.2, 2.2)
p.plot(x_f, y_f, 'r-')
p.plot(x_s, y_s, 'ko')
p.plot(x_i, y_i, 'b-')
fig2 = plt.figure()
for i in range(len(bases)):
p1 = fig2.add_subplot(3, 3, i + 1)
p1.grid(True)
p1.set_xlim(-5.3, 5.3)
p1.set_xticks([-5, 0, 5])
p1.set_ylim(-1.2, 2.2)
for base_func in bases[i]: plt.plot(x_f, base_func(x_f), '-')
plt.show()
| 24,973
|
def merge_peaks(peaks, start_merge_at, end_merge_at,
max_buffer=int(1e5)):
"""Merge specified peaks with their neighbors, return merged peaks
:param peaks: Record array of strax peak dtype.
:param start_merge_at: Indices to start merge at
:param end_merge_at: EXCLUSIVE indices to end merge at
:param max_buffer: Maximum number of samples in the sum_waveforms of
the resulting peaks (after merging).
Peaks must be constructed based on the properties of constituent peaks,
it being too time-consuming to revert to records/hits.
"""
assert len(start_merge_at) == len(end_merge_at)
new_peaks = np.zeros(len(start_merge_at), dtype=peaks.dtype)
# Do the merging. Could numbafy this to optimize, probably...
buffer = np.zeros(max_buffer, dtype=np.float32)
for new_i, new_p in enumerate(new_peaks):
old_peaks = peaks[start_merge_at[new_i]:end_merge_at[new_i]]
common_dt = np.gcd.reduce(old_peaks['dt'])
first_peak, last_peak = old_peaks[0], old_peaks[-1]
new_p['channel'] = first_peak['channel']
# The new endtime must be at or before the last peak endtime
# to avoid possibly overlapping peaks
new_p['time'] = first_peak['time']
new_p['dt'] = common_dt
new_p['length'] = \
(strax.endtime(last_peak) - new_p['time']) // common_dt
# re-zero relevant part of buffer (overkill? not sure if
# this saves much time)
buffer[:min(
int(
(
last_peak['time']
+ (last_peak['length'] * old_peaks['dt'].max())
- first_peak['time']) / common_dt
),
len(buffer)
)] = 0
for p in old_peaks:
# Upsample the sum waveform into the buffer
upsample = p['dt'] // common_dt
n_after = p['length'] * upsample
i0 = (p['time'] - new_p['time']) // common_dt
buffer[i0: i0 + n_after] = \
np.repeat(p['data'][:p['length']], upsample) / upsample
# Handle the other peak attributes
new_p['area'] += p['area']
new_p['area_per_channel'] += p['area_per_channel']
new_p['n_hits'] += p['n_hits']
new_p['saturated_channel'][p['saturated_channel'] == 1] = 1
# Downsample the buffer into new_p['data']
strax.store_downsampled_waveform(new_p, buffer)
new_p['n_saturated_channels'] = new_p['saturated_channel'].sum()
# Use the tight coincidence of the peak with the highest amplitude
i_max_subpeak = old_peaks['data'].max(axis=1).argmax()
new_p['tight_coincidence'] = old_peaks['tight_coincidence'][i_max_subpeak]
# If the endtime was in the peaks we have to recompute it here
# because otherwise it will stay set to zero due to the buffer
if 'endtime' in new_p.dtype.names:
new_p['endtime'] = strax.endtime(last_peak)
return new_peaks
| 24,974
|
def batchedpatternsgenerator(generatorfunction):
"""Decorator that assumes patterns (X,y) and stacks them in batches
This can be thought of a specialized version of the batchedgenerator
that assumes the base generator returns instances of data patterns,
as tuples of numpy arrays (X,y). When grouping them in batches the
numpy arrays are stacked so that each returned batch has a pattern
per row.
A "batchsize" parameter is added to the generator, that if specified
groups the data in batches of such size.
"""
def modgenerator(*args, **kwargs):
for batch in batchedgenerator(generatorfunction)(*args, **kwargs):
Xb, yb = zip(*batch)
yield np.stack(Xb), np.stack(yb)
return modgenerator
| 24,975
|
def reduce_min(raw_tensor, axis, keepdims=False):
"""
calculate reduce_min of raw_tensor, only support float16
Args:
raw_tensor (tvm.tensor.Tensor): input tensor
axis (Union[int, list]): reduce axis (range : [-len(raw_tensor.shape), len(raw_tensor.shape) - 1])
keepdims (bool): if true, retains reduced dimensions with length 1, default value is None
Returns:
tvm.tensor.Tensor, res
"""
return single_reduce_op(raw_tensor, axis, "reduce_min", keepdims)
| 24,976
|
def align2local(seq):
"""
Returns list such that
'ATG---CTG-CG' ==> [0,1,2,2,2,3,4,5,5,6,7]
Used to go from align -> local space
"""
i = -1
lookup = []
for c in seq:
if c != "-":
i += 1
lookup.append(i)
return lookup
| 24,977
|
def load_nodegraph(filename):
"""Load a nodegraph object from the given filename and return it.
Keyword argument:
filename -- the name of the nodegraph file
"""
nodegraph = _Nodegraph(1, [1])
nodegraph.load(filename)
return nodegraph
| 24,978
|
def get_bprop_sqrt(self):
"""Grad definition for `Sqrt` operation."""
mul_func = P.Mul()
fill_func = P.Fill()
div_op = P.RealDiv()
sqrt = P.Sqrt()
dtype = P.DType()
def bprop(x, out, dout):
temp = div_op(fill_func(dtype(x), shape_op(x), 0.5), sqrt(x))
dx = mul_func(dout, temp)
return (dx,)
return bprop
| 24,979
|
def hit_or_stand(deck, hand):
"""
Function prompting the Player to Hit or Stand.
This function should accept the deck and the player's hand as arguments,
and assign playing as a global variable.
:param deck:
:param hand:
:return:
"""
global playing # to control an upcoming while loop
while True:
x = input("Would you like to hit or stand? Enter 'h' or 's' ")
if x[0].lower() == 'h':
hit(deck, hand)
elif x[1].lower() == 's':
print('Player stands. Dealer is playing.')
playing = False
else:
print("Sorry, please enter 'h' or 's' only!")
continue
break
| 24,980
|
def classify_top1_batch(image):
"""Define method `classify_top1` for servable `resnet50`.
The input is `image` and the output is `lable`."""
x = register.add_stage(preprocess_batch, image, outputs_count=1, batch_size=1024)
x = register.add_stage(resnet_model, x, outputs_count=1)
x = register.add_stage(postprocess_top1, x, outputs_count=1)
return x
| 24,981
|
def notify_post_delete_order_adviser(sender, instance, **kwargs):
"""
Notify people that they have been removed from the order.
Note that `instance` is no longer in the database at this point,
so be very careful what you do with it.
"""
transaction.on_commit(
partial(notify.adviser_removed, order=instance.order, adviser=instance.adviser),
)
| 24,982
|
def ngram_word(max_features=2_000):
"""Word count vectorizer.
Args:
max_features: number of features to consider.
"""
return CountVectorizer(
ngram_range=(1, 3),
analyzer='word',
max_features=max_features,
)
| 24,983
|
def func_dispatcher(intent):
"""
Simple effect dispatcher that takes callables taking a box,
and calls them with the given box.
"""
def performer(dispatcher, intent, box):
intent(box)
return performer
| 24,984
|
def get_system_path():
""" Get the system path as a list of files
Returns:
List of names in the system path
"""
path = os.getenv('PATH')
if path:
return path.split(os.pathsep)
return []
| 24,985
|
def streaming_stage_current_consumption_test():
"""
:return:
"""
local_file_name = 'streaming_stage_curr.csv'
common.sm.setup_battery_mode()
# ***** Starting ECG, ADXL and PPG stream ******
common.watch_shell.quick_start('ecg', 'ecg')
common.watch_shell.quick_start('adxl', 'adxl')
# PPG capture - start
common.watch_shell.quick_start('adpd', 'adpd6')
# **********************************************
time.sleep(5)
common.sm.cfg_setup_buff(10)
#time.sleep(10)
#common.sm.cfg_read_buff()
#common.sm.cfg_output_state(0)
ret = common.sm.measure_current()
#time.sleep(1)
with open(local_file_name, 'w') as f_ref:
for i in ret.split(','):
# err, msg = common.sm.cfg_get_data()
f_ref.write(i.strip('+')+'\n')
time.sleep(1)
f_path = common.rename_stream_file(local_file_name, '_consumption.csv', 0, 0) # step copies report to shared drive
#print ret
#common.messagebox.showinfo('Streaming Current Value', 'Current Value = %f press OK!' % ret)
common.watch_shell.quick_stop('ecg', 'ecg')
common.watch_shell.quick_stop('adxl', 'adxl')
common.watch_shell.quick_stop('adpd', 'adpd6')
#common.sm.cfg_output_state(0)
# TODO: Update Current Threshold below
check_pass, avg_curr, failure_curr = meas_check.check_battery_charge(f_path, curr_threshold=17e-3)
if not check_pass:
common.test_logger.error('*** Streaming Stage Current Consumption Test - FAIL ***')
raise common.ConditionCheckFailure('\n\nFailure Current: {} | Average Current: {}'.format(failure_curr,
avg_curr))
else:
common.test_logger.info('Average Streaming Stage Current: {}'.format(avg_curr))
| 24,986
|
def encode(x, bps_arrangement='random', n_bps_points=512, radius=1.5, bps_cell_type='dists',
verbose=1, random_seed=13, x_features=None, custom_basis=None, n_jobs=-1):
"""Converts point clouds to basis point set (BPS) representation, multi-processing version
Parameters
----------
x: numpy array [n_clouds, n_points, n_dims]
batch of point clouds to be converted
bps_arrangement: str
supported BPS arrangements: "random", "grid", "custom"
n_bps_points: int
number of basis points
radius: float
radius for BPS sampling area
bps_cell_type: str
type of information stored in every BPS cell. Supported:
'dists': Euclidean distance to the nearest point in cloud
'deltas': delta vector from basis point to the nearest point
'closest': closest point itself
'features': return features of the closest point supplied by x_features.
e.g. RGB values of points, surface normals, etc.
verbose: boolean
whether to show conversion progress
x_features: numpy array [n_clouds, n_points, n_features]
point features that will be stored in BPS cells if return_values=='features'
custom_basis: numpy array [n_basis_points, n_dims]
custom basis to use
n_jobs: int
number of parallel jobs used for encoding. If -1, use all available CPUs
Returns
-------
x_bps: [n_clouds, n_points, n_bps_features]
point clouds converted to BPS representation.
"""
if n_jobs == -1:
n_jobs = multiprocessing.cpu_count()
if n_jobs == 1:
n_clouds, n_points, n_dims = x.shape
if bps_arrangement == 'random':
basis_set = generate_random_basis(n_bps_points, n_dims=n_dims, radius=radius, random_seed=random_seed)
elif bps_arrangement == 'grid':
# in case of a grid basis, we need to find the nearest possible grid size
grid_size = int(np.round(np.power(n_bps_points, 1 / n_dims)))
basis_set = generate_grid_basis(grid_size=grid_size, minv=-radius, maxv=radius)
elif bps_arrangement == 'custom':
# in case of a grid basis, we need to find the nearest possible grid size
if custom_basis is not None:
basis_set = custom_basis
else:
raise ValueError("Custom BPS arrangement selected, but no custom_basis provided.")
else:
raise ValueError("Invalid basis type. Supported types: \'random\', \'grid\', \'custom\'")
n_bps_points = basis_set.shape[0]
if bps_cell_type == 'dists':
x_bps = np.zeros([n_clouds, n_bps_points])
elif bps_cell_type == 'deltas':
x_bps = np.zeros([n_clouds, n_bps_points, n_dims])
elif bps_cell_type == 'closest':
x_bps = np.zeros([n_clouds, n_bps_points, n_dims])
elif bps_cell_type == 'features':
n_features = x_features.shape[2]
x_bps = np.zeros([n_clouds, n_bps_points, n_features])
else:
raise ValueError("Invalid cell type. Supported types: \'dists\', \'deltas\', \'closest\', \'features\'")
fid_lst = range(0, n_clouds)
if verbose:
fid_lst = tqdm(fid_lst)
for fid in fid_lst:
nbrs = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm="ball_tree").fit(x[fid])
fid_dist, npts_ix = nbrs.kneighbors(basis_set)
if bps_cell_type == 'dists':
x_bps[fid] = fid_dist.squeeze()
elif bps_cell_type == 'deltas':
x_bps[fid] = x[fid][npts_ix].squeeze() - basis_set
elif bps_cell_type == 'closest':
x_bps[fid] = x[fid][npts_ix].squeeze()
elif bps_cell_type == 'features':
x_bps[fid] = x_features[fid][npts_ix].squeeze()
return x_bps
else:
if verbose:
print("using %d available CPUs for BPS encoding.." % n_jobs)
bps_encode_func = partial(encode, bps_arrangement=bps_arrangement, n_bps_points=n_bps_points, radius=radius,
bps_cell_type=bps_cell_type, verbose=verbose, random_seed=random_seed,
x_features=x_features, custom_basis=custom_basis, n_jobs=1)
pool = multiprocessing.Pool(n_jobs)
x_chunks = np.array_split(x, n_jobs)
x_bps = np.concatenate(pool.map(bps_encode_func, x_chunks), 0)
pool.close()
return x_bps
| 24,987
|
def parsec_params_list_to_dict(var):
"""
convert parsec parameter array to dictionary
:param var:
:return:
"""
parsec_params = dict()
parsec_params["rle"] = var[0]
parsec_params["x_pre"] = var[1]
parsec_params["y_pre"] = var[2]
parsec_params["d2ydx2_pre"] = var[3]
parsec_params["th_pre"] = var[4]
parsec_params["x_suc"] = var[5]
parsec_params["y_suc"] = var[6]
parsec_params["d2ydx2_suc"] = var[7]
parsec_params["th_suc"] = var[8]
return parsec_params
| 24,988
|
def parse_msiinfo_suminfo_output(output_string):
"""
Return a dictionary containing information from the output of `msiinfo suminfo`
"""
# Split lines by newline and place lines into a list
output_list = output_string.splitlines()
results = {}
# Partition lines by the leftmost ":", use the string to the left of ":" as
# the key and use the string to the right of ":" as the value
for output in output_list:
key, _, value = output.partition(':')
if key:
results[key] = value.strip()
return results
| 24,989
|
def create_indicators_fields(tag_details: Dict[str, Any]) -> Dict[str, Any]:
"""
Returns the indicator fields
Args:
tag_details: a dictionary containing the tag details.
Returns:
A dictionary represents the indicator fields.
"""
fields: Dict[str, Any] = {}
tag = tag_details.get('tag', {})
refs = json.loads(tag.get('refs', '[]'))
fields['publications'] = create_publications(refs)
fields['aliases'] = tag_details.get('aliases', [])
fields['description'] = tag.get('description', '')
last_hit = tag.get('lasthit', '')
fields['lastseenbysource'] = datetime.strptime(last_hit, AF_TAGS_DATE_FORMAT).strftime(
DATE_FORMAT) if last_hit else None
updated_at = tag.get('updated_at', '')
fields['updateddate'] = datetime.strptime(updated_at, AF_TAGS_DATE_FORMAT).strftime(
DATE_FORMAT) if updated_at else None
fields['reportedby'] = tag.get('source', '')
remove_nulls_from_dictionary(fields)
return fields
| 24,990
|
def wait_until_edit_or_exit(filename, modified_time, process, sleep_time=0.1):
"""Wait until a file is edited or a process exits.
Positional arguments:
filename: The path to the file to check.
modified_time: The last modified time against which to check.
process: The process whose termination we're awaiting.
Keyword arguments:
sleep_time: Amount of time in seconds to sleep between checks.
Returns:
True if the file is edited; False if the process exits.
"""
while True:
if process.poll() is not None:
return
if os.path.getmtime(filename) > modified_time:
return
time.sleep(sleep_time)
| 24,991
|
def spatial_difference(gdf1: GeoDataFrame, gdf2: GeoDataFrame) -> GeoDataFrame:
"""Removes polygons from the first GeoDataFrame that intersect with polygons from the second GeoDataFrame
:param gdf1: First input data frame
:param gdf2: Second input data frame
:return: Resulting data frame
"""
gdf2 = gdf2[["geometry"]]
intersections = gpd.sjoin(gdf1, gdf2, how="left")
result_gdf = intersections[intersections["index_right"].isna()]
result_gdf = result_gdf.drop(columns=["index_right"])
return result_gdf
| 24,992
|
def frustumShellIxx(rb, rt, t, h, diamFlag=False):
"""This function returns a frustum's mass-moment of inertia (divided by density) about the
transverse x/y-axis passing through the center of mass with radii or diameter inputs.
NOTE: This is for a frustum SHELL, not a solid
INPUTS:
Parameters
----------
rb : float (scalar/vector), base radius
rt : float (scalar/vector), top radius
t : float (scalar/vector), thickness
h : float (scalar/vector), height
diamFlag : boolean, True if rb and rt are entered as diameters
OUTPUTs:
-------
Ixx=Iyy : float (scalar/vector), Moment of inertia about x/y-axis through center of mass (principle axes)
"""
if diamFlag:
# Convert diameters to radii
rb *= 0.5
rt *= 0.5
# Integrate 2*pi*r*dr*dz from r=ri(z) to ro(z), z=0 to h
rb_o = rb
rb_i = rb-t
rt_o = rt
rt_i = rt-t
return (frustumIxx(rb_o, rt_o, h) - frustumIxx(rb_i, rt_i, h))
| 24,993
|
def default_shaders():
"""
Returns a list with all thte default shadres of the current DCC
:return: str
"""
return shader_utils.get_default_shaders()
| 24,994
|
def get_domains_and_slugs():
"""
returns all the domain names and slugs as dictionary
{domain_name: slug}
"""
return_data = {}
domain_slugs = Domain.objects.filter(active=1).order_by('name')
if domain_slugs:
for domain in domain_slugs:
return_data[domain.name] = domain.slug
return return_data
| 24,995
|
def has_video_ads() -> bool:
"""has_video_ads() -> bool
(internal)
"""
return bool()
| 24,996
|
def _split_words_with_boundaries(
string: str, word_boundaries: Container[str]
) -> Iterator[str]:
"""
Split a string around given separators, conserving the separators.
>>> list(_split_words_with_boundaries("ab cd -ef_gh", " -_"))
['ab', ' ', 'cd', ' ', '-', 'ef', '_', 'gh']
"""
stack = [] # type: List[str]
for char in string:
if char in word_boundaries:
if stack:
yield "".join(stack)
yield char
stack[:] = []
else:
stack.append(char)
if stack:
yield "".join(stack)
| 24,997
|
def calculate_purchasing_plan(total_days, sellers, starting_bread=10, best_before_date=30, debug = False):
"""
total_days : positive int
sellers : list of tuple (day, price)
starting_bread : int, optional
best_before_date : positive int, (how long the bread lasts)
debug : boolean, (prints cost matrix)
"""
# create cost_matrix of (sellers+1) x total_days
cost_matrix = [[0] * starting_bread + [float('inf')] * (total_days - min(starting_bread, best_before_date))]
for merchant in sellers:
cost_matrix.append(
[float('inf')] * (merchant[0]) + # Add inf before
[merchant[1]] * min(best_before_date, (total_days - merchant[0])) + # Add merchant price
[float('inf')] * (total_days - merchant[0] - min(best_before_date, (total_days - merchant[0])))) # Add inf after
if debug:
print_matrix(cost_matrix)
current_merchant = len(sellers)
current_day = total_days - 1
best_merchant = current_merchant
merchant_of_the_day = [0] * total_days
new_merchant = True # If the merchant changes, we want to go as far up as possible
while current_day >= starting_bread:
best_price = cost_matrix[best_merchant][current_day]
# go up as far as you can
for best_merchant_index in range(current_merchant, -1, -1):
tmp = cost_matrix[best_merchant_index][current_day]
# go up only if price is lower
if tmp < best_price or (tmp <= best_price and new_merchant): # Up only if lower price or new merchant
# print("Better merchant found %3s with price %3s <= %3s" % (best_merchant_index, tmp, best_price))
best_merchant = best_merchant_index
best_price = tmp
new_merchant = True
merchant_of_the_day[current_day] = best_merchant # Save from which merchant we buy bread on selected day
current_day -= 1 # go left one step
if best_price == float('inf'):
if debug:
print("Plan not feasible on day %5s" % current_day)
return None
new_merchant = False # No new merchant for the previous day yet
# At this point we have fewest # merchants and lowest price. We need to make another walk from left to right to buy
# bread as soon as possible.
buying_plan = [0] * (len(sellers) + 1) # +1 is because initial bread is accounted for in the matrix
current_merchant = 0
current_day = 0
while current_day < total_days:
# If cost of current merchant is the same as cost of the merchant of the day, buy from current, since we buy
# bread from him earlier (because merchants are sorted by their arrival day)
if cost_matrix[current_merchant][current_day] > cost_matrix[merchant_of_the_day[current_day]][current_day]:
current_merchant = merchant_of_the_day[current_day]
buying_plan[current_merchant] += 1
current_day += 1
return buying_plan[1:]
| 24,998
|
def compile_sites(inp: NetInput,
y_true: Iterable[np.ndarray],
y_pred: Iterable[np.ndarray],
masks: Iterable[np.ndarray]):
"""
Prepares sites to be dumped in tsv file
:param inp: NetInput
:param y_true: True known classes mapped on templates
:param y_pred: True predicted classes mapped on templates
:param masks: boolean numpy arrays with
True placed at positions of any class that
could be positive
:return: Iterable over Sites
"""
positions = (np.where(y > 0)[0] + 1 for y in masks)
def comp_site(id_, pos, cls_pred, cls_true):
site = [id_, pos, 0, 0]
if cls_pred:
site[2] = 1
if cls_true:
site[3] = 1
return Site(*site)
sites = chain.from_iterable(
((id_, pos, p, t) for pos, p, t in zip(pp, yp, yt))
for id_, pp, yp, yt in zip(inp.ids, positions, y_pred, y_true))
return starmap(comp_site, sites)
| 24,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.