content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def astToMongo(ast):
"""Run the AST-to-mongo helper function after converting it to a not-free equivalent AST."""
return _astToMongo_helper(_eliminate_not(ast)) | 27,300 |
async def aliases() -> None:
"""\
Custom command aliases
======================
Aliases provide a way to abbreviate system commands and
add default arguments to commonly used commands.
Aliases are described in user-config files
(see `neuro help user-config` for details).
`~/.neuro/user.toml` is used for **global** aliases, and
`.neuro.toml` can be used for saving **project-specific** aliases.
Project aliases overrides global ones if the same alias
name exists in both configuration files.
There are two types of aliases: **internal** and **external**.
**Internal** aliases execute built-in neuro commands, and **
external** aliases execute **system OS** commands.
Internal aliases
----------------
Internal aliases are used for running existing neuro CLI commands under
a different name and with optional overriden defaults (passed predefined
command line options and arguments).
For example, the following alias definition creates a `neuro lsl` command
that executes `neuro storage ls -hl` to list the storage's content
using a long output mode with human-readable file sizes.
```
[alias.lsl]
cmd = "ls -l --human-readable"
help = "List directory contents in a long mode.
```
Available configuration arguments:
* `[alias.lsl]`: Defines a subgroup for a named alias,
`lsl` in this case.
* `cmd`: The command to execute with provided overridden options,
this key is **mandatory**.
The `cmd` key in the alias section implies **internal alias** mode.
* `help`: Help string displayed by the `neuro lsl --help`
command (optional).
Internal aliases accept additional command line options and agruments,
and pass them to the underlying command as is.
For example, `neuro lsl storage:directory` works as
`neuro ls -l --human-readable storage:directory`
External aliases
----------------
External aliases spawn a subprocess with passing default options and
arguments. All user-provided arguments are passed to the underlying
program as well.
For example, the following configuration defines `neuro du` command as
an alias for the system `du --human-readable` command with an additional
ability to specify a directory for analysis.
```
[alias.du]
exec = "du"
args = "[FILE]..."
options = [
"-h, --human-readable print sizes in powers of 1024 (e.g., 1024M)",
"-d, --max-depth=N max recursion level for subdirectories lookup",
]
help = '''
Summarize disk usage of the set of files,
recursively for directories.
'''
```
Available configuration arguments:
* `[alias.du]`: Defines a subgroup for a named alias,
`du` in this case.
* `exec`: External command to execute, this key is **mandatory**.
The `exec` key in the alias section implies **external alias** mode.
* `args`: Positional argumentss accepted by the alias,
the format is described below (optional).
* `options`: Options and flags accepted by the alias,
the format is described below (optional).
* `help`: Help string displayed by `neuro du --help`
command (optional),
**args** is a string with a sequence of arguments, e.g. `DIR SRC... [DST]`
If an argument is enclosed in square brackets, it's **optional** (`[FILE]`).
If an argument ends with an ellipsis, this argument accepts
multiple values (`SRC...`)
**options** is a list of strings specifying various options.
Each string describes a single option. The option definitions should be separated
from the option descriptions (help) by two or more spaces.
An option definition can contain:
* Short name (`-h`)
* Long name (`--human-readable`)
* Indication of the required value type (`-d, --max-depth=N`).
If the required value indicator (`=NAME`) is absent,
the option will be considered a boolean flag.
**exec** defines an external system command to execute.
The command is spawned in a subprocess. Neuro CLI waits for the subprocess
to be finished, and then returns the exit code to the outer caller.
The parameter may specify an executable file along with some options.
For example, `exec = "du --human-readable"` enforces human-readable mode
for the `du` command.
`exec` can be used in **simplified** and **pattern** mode.
Pattern mode
------------
In **pattern mode**, the system command is used along with **substitutions**.
For example, `exec = "du {human_readable} {max_depth} {file}"`.
Substitution is enclosed in curly brackets and represents a variable name to expand,
e.g. `{file}`.
It's expanded with an option or positional argument specified
by `args` or `options`. The substitution name is automatically lowercased,
and dashes (`-`) are replaced with underscores (`_`).
For example, `args = "ARG-NAME"` matches to `{arg_name}`.
If a substitution corresponds to an optional parameter not provided
by the user, this substitution will be expanded to an empty string.
If a substitution corresponds to multiple values, all of them are used.
For example, `neuro du folder1 folder2` expands to `du folder1 folder2` since
the `[FILE]...` argument matches to `folder1 folder2` values.
Options are expanded using the longest form if provided,
e.g. `neuro du -h` is expanded to `du --human-readable`.
Options with values are expanded as well,
e.g. `neuro du -d 1` is expanded to `du --max-depth 1`.
`neuro du --max-depth 1` matches to the same command.
Simplified mode
---------------
In **simplified mode**, the `exec` value does not contain any **substitutions**.
In this case, all parsed `options` and `args` are appended
to the executed command automatically if provided.
For example, `exec = "du"` is expanded to
`exec = "du {human_readable} {max_depth} {file}"`
""" | 27,301 |
def write_seqs_fasta(out_fp_seqs_fasta: str, out_fp_seqs_qza: str,
tsv_pd: pd.DataFrame) -> str:
"""
Write the fasta sequences.
:param out_fp_seqs_fasta: output sequences fasta file name.
:param out_fp_seqs_qza: output sequences qiime2 Artefact file name.
:param tsv_pd: table which feature names are sequences.
:param cur_sh: writing file handle.
"""
with open(out_fp_seqs_fasta, 'w') as fas_o:
for seq in tsv_pd.index:
fas_o.write('>%s\n%s\n' % (seq.strip(), seq.strip()))
cmd = run_import(
out_fp_seqs_fasta, out_fp_seqs_qza, 'FeatureData[Sequence]')
return cmd | 27,302 |
def f(x,y):
"""
Takes in two numpy arrays that are result of meshgrid.
Returns a numpy array with points representing the iteration number for divergence
"""
max_iter = 100 #maximum number of interations
c = x + 1j*y
z = np.zeros((N,N),dtype=complex)
r = np.zeros((N,N),dtype=int) #return
mask = np.full((N,N), True, dtype=bool)
for i in range(0,max_iter,1):
z[mask] = z[mask]**2 + c[mask] #z_i = z_i-1**2 + c
r[mask] = i #i is the iteration number at which point escapes (diverges)
#if point ever becomes larger than 2, the sequence will escape to infinity:
#https://en.wikipedia.org/wiki/Mandelbrot_set#Basic_properties
mask[np.abs(z) > 2] = False #points that diverge
return r, mask | 27,303 |
def AMZN_dataprep():
"""
将csv的数据的时间戳改成序号
Returns:
"""
src_file_path = "/Users/seenli/Documents/workspace/code/pytorch_learn2/time_series_DL/Twitter_volume_AMZN.csv"
tar_file_path = "/Users/seenli/Documents/workspace/code/pytorch_learn2/time_series_DL/Twitter_volume_AMZN_num.csv"
tar_file = open(tar_file_path, 'w', encoding='utf8')
f = open(src_file_path)
lines = f.readlines()
for i in range(len(lines)):
if i == 0:
tar_file.write(lines[i])
else:
new_line = lines[i].strip().split(',')[1]
print(str(i)+','+new_line)
tar_file.write(str(i)+','+new_line+'\n') | 27,304 |
def _expand_param_name(param: BaseDescriptor) -> List[str]:
"""
Get expanded param names
:param param: The param to expand
"""
if not getattr(param, 'expand', False):
raise ValueError('Cannot expand param that does not have the expand kwarg')
new_arg_names = _get_expanded_param_names(param)
prefix = getattr(param, 'prefix', '')
new_arg_names = [prefix + n for n in new_arg_names]
return new_arg_names | 27,305 |
def accuracy_top_k(output, target, top_k=(1,)):
"""Computes the precision@k for the specified values of k"""
max_k = max(top_k)
batch_size = target.size(0)
_, pred = output.topk(max_k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in top_k:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(1.0 / batch_size))
return res | 27,306 |
def softmax(data: NodeInput, axis: int, name: Optional[str] = None) -> Node:
"""Apply softmax operation on each element of input tensor.
:param data: The tensor providing input data.
:param axis: An axis along which Softmax should be calculated. Can be positive or negative.
:param name: Optional name for the node.
returns The new node with softmax operation applied on each element.
"""
return _get_node_factory_opset8().create("Softmax", [as_node(data)], {"axis": axis}) | 27,307 |
def get_duts_mac_address(duts):
"""
This is used to get the Duts and its mac addresses mapping
:param duts: List of DUTs
:return : Duts and its mac addresses mapping
"""
duts_mac_addresses = {}
cmd = "show platform syseeprom"
for dut in duts:
if st.is_vsonic(dut):
mac = basic.get_ifconfig_ether(dut)
duts_mac_addresses[dut] = mac
continue
eeprom_details = st.show(dut, cmd, skip_error_check=True)
if not eeprom_details:
iteration=3
for i in range(1, iteration+1):
st.wait(2)
eeprom_details = st.show(dut, cmd, skip_error_check=True)
if eeprom_details:
break
if not eeprom_details and i >= iteration + 1:
st.log("EEPROM data not found for {}".format(dut))
st.report_fail("eeprom_data_not_found", dut)
st.log("EEPROM DETAILS -- {}".format(eeprom_details))
if eeprom_details:
for data in eeprom_details:
if "tlv_name" in data and data["tlv_name"] == "Base MAC Address":
duts_mac_addresses[dut] = data["value"].replace(":","")
st.log("DUT MAC ADDRESS -- {}".format(duts_mac_addresses))
return duts_mac_addresses | 27,308 |
def crop(sample, crop_area, in_crop_threshold):
"""Crop an image to a given area and transform target accordingly.
Args:
sample: {
"image": PIL.Image,
"bboxes": Numpy array :math:`(N, 4)` (XYXY format),
"keypoints": Numpy array :math:`(N, n, 2)`, (optional)
...
}
crop_area: An array or list of four numbers (coordinates of the crop box).
in_crop_threshold: Float, a threshold for dropping detection targets that
intersect too little with the crop_area.
Returns:
A tuple of image crop (PIL.Image) and transformed targets.
"""
transformed_sample = {}
crop_area = np.array(crop_area)
bboxes = sample["bboxes"]
intersections = intersection(bboxes, crop_area)
bbox_areas = (bboxes[:,2:] - bboxes[:,:2]).prod(axis=1)
in_crop = (intersections/bbox_areas > in_crop_threshold)
bboxes = bboxes[in_crop] - np.tile(crop_area[:2], 2)
transformed_sample["bboxes"] = bboxes
if "keypoints" in sample.keys():
keypoints = sample["keypoints"]
keypoints = keypoints[in_crop] - crop_area[:2]
transformed_sample["keypoints"] = keypoints
image = sample["image"]
image = _crop_image(image, crop_area)
transformed_sample["image"] = image
for key in sample.keys():
if key in ["image", "bboxes", "keypoints"]:
continue
try:
transformed_sample[key] = np.array(sample[key])[in_crop]
except:
transformed_sample[key] = deepcopy(sample[key])
return transformed_sample | 27,309 |
def delete_if_exists(fname):
"""remove a file if it exists, with a message to stdout
ARGS:
fname (string): full path to the file to be removed
"""
if os.path.exists(fname):
sys.stdout.write('removed {}\n'.format(fname))
sys.stdout.flush()
os.remove(fname)
if os.path.exists(fname):
raise OSError('delete_if_exists failed: file still exists') | 27,310 |
def plugin_settings(settings):
"""
Update the LMS/Production (aka AWS) settings to use Figures properly.
Adds entries to the environment settings
You can disable CeleryBeat scheduler for Figures by configuration the
``lms.env.json`` file.
Create or update ``FIGURES`` as a top level key in
the ``lms.env.json`` file:
::
"FIGURES": {
"ENABLE_DAILY_METRICS_IMPORT": false
},
"""
settings.ENV_TOKENS.setdefault('FIGURES', {})
update_webpack_loader(settings.WEBPACK_LOADER, settings.ENV_TOKENS['FIGURES'])
update_celerybeat_schedule(settings.CELERYBEAT_SCHEDULE, settings.ENV_TOKENS['FIGURES'])
#settings.CELERY_IMPORTS += (
# "figures.tasks",
#) | 27,311 |
def get_cpu_utilization(mqueries, region, days):
"""
Gets CPU utilization for instances
"""
client = SESSION.client('cloudwatch', region_name=region)
time_from = (datetime.now() - timedelta(days=days))
time_to = datetime.now()
response = client.get_metric_data(
MetricDataQueries=mqueries,
StartTime=time_from,
EndTime=time_to
)
return response['MetricDataResults'] | 27,312 |
def std_opt_end_independent(policy_net, target_net, optimizer, memory,
batch_size=128, GAMMA=0.99, device='cuda'):
"""
Apply the standard procedure to an ensemble of deep Q network.
"""
if len(memory) < batch_size:
return 0
total_loss = 0
for ens_num in range(policy_net.get_num_ensembles()):
state_batch, action_batch, reward_batch, n_state_batch, done_batch = memory.sample(
batch_size)
state_batch = state_batch.to(device)
action_batch = action_batch.to(device)
reward_batch = reward_batch.to(device)
n_state_batch = n_state_batch.to(device)
done_batch = done_batch.to(device)
q = policy_net(state_batch, ens_num=ens_num).gather(1, action_batch)
nq = target_net(n_state_batch, ens_num=ens_num).max(1)[0].detach()
# Compute the expected Q values
expected_state_action_values = (
nq * GAMMA)*(1.-done_batch[:, 0]) + reward_batch[:, 0]
# Compute Huber loss
loss = F.smooth_l1_loss(q, expected_state_action_values.unsqueeze(1))
total_loss += loss
# Optimize the model
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
return total_loss.detach() / policy_net.get_num_ensembles() | 27,313 |
def scrape_forecast_products() -> Dict[str, Tuple[str, str]]:
""" Get list of forecast products by scraping state overivew pages
"""
logging.info("Scraping list of BOM forecast products")
products = dict()
for state in STATES:
url = f"http://www.bom.gov.au/{state}/forecasts/precis.shtml"
r = requests.get(url, timeout=10)
pattern = r'/forecasts/(?P<town>.+?).shtml">Detailed'
for town in re.findall(pattern, r.text):
product = get_town_forecast_product_id(state, town)
if product:
products[town] = (product, state)
return products | 27,314 |
def plate_from_list_spreadsheet(
filename, sheet_name=0, num_wells="infer", wellname_field="wellname"
):
"""Create a plate from a Pandas dataframe where each row contains the
name of a well and metadata on the well.
Parameters
----------
filename
Path to the spreadsheet file.
sheet_name
Index or name of the spreadsheet to use.
num_wells
Number of wells in the Plate to be created. If left to default 'infer',
the size of the plate will be chosen as the smallest format (out of
96, 384 and 1536 wells) which contains all the well names.
wellname_field="wellname"
Name of the column of the spreadsheet giving the well names
"""
if ".xls" in filename: # includes xlsx
dataframe = pd.read_excel(filename, sheet_name=sheet_name)
elif filename.endswith(".csv"):
dataframe = pd.read_csv(filename)
return plate_from_dataframe(
dataframe,
wellname_field=wellname_field,
num_wells=num_wells,
data={"filename": filename},
) | 27,315 |
def frexp(x: float) -> _Tuple[float, int]:
"""Decomposes a value ``x`` into a
tuple ``(m, p)``, such that ``x == m * (2 ** p)``.
Arguments:
x: The value to be decomposed.
Returns:
Tuple of ``m`` and ``p``.
"""
pass | 27,316 |
def main():
"""PE Tree Carve script entry-point"""
# Check command line arguments
parser = ArgumentParser(description="PE-Tree (Carve)")
parser.add_argument("filename", help="Path to file to carve", type=FileType("rb"))
args = parser.parse_args()
# Create PE Tree Qt application
application = QtWidgets.QApplication(sys.argv)
window = pe_tree.window.PETreeWindow(application, CarveRuntime, args, open_file=False)
# Determine architecture specifics
ptr_width = 16
# Iterate over all MZ bytes in the input file
for match in re.compile(b"MZ").finditer(window.runtime.data):
# Determine image base
image_base = int(match.start())
# Attempt to map PE
window.pe_tree_form.map_pe(filename="Offset {} ({:#08x})".format(image_base, image_base), image_base=image_base, disable_dump=True)
sys.exit(application.exec_()) | 27,317 |
def displaced_species_along_mode(species: Species,
mode_number: int,
disp_factor: float = 1.0,
max_atom_disp: float = 99.9) -> Optional[Species]:
"""
Displace the geometry along a normal mode with mode number indexed from 0,
where 0-2 are translational normal modes, 3-5 are rotational modes and 6
is the largest magnitude imaginary mode (if present). To displace along
the second imaginary mode we have mode_number=7
Arguments:
species (autode.species.Species):
mode_number (int): Mode number to displace along
Keyword Arguments:
disp_factor (float): Distance to displace (default: {1.0})
max_atom_disp (float): Maximum displacement of any atom (Å)
Returns:
(autode.species.Species):
Raises:
(autode.exceptions.CouldNotGetProperty):
"""
logger.info(f'Displacing along mode {mode_number} in {species.name}')
mode_disp_coords = species.normal_mode(mode_number)
if mode_disp_coords is None:
logger.error('Could not get a displaced species. No normal mode '
'could be found')
return None
coords = species.coordinates
disp_coords = coords.copy() + disp_factor * mode_disp_coords
# Ensure the maximum displacement distance any single atom is below the
# threshold (max_atom_disp), by incrementing backwards in steps of 0.05 Å,
# for disp_factor = 1.0 Å
for _ in range(20):
if np.max(np.linalg.norm(coords - disp_coords, axis=1)) < max_atom_disp:
break
disp_coords -= (disp_factor / 20) * mode_disp_coords
# Create a new species from the initial
disp_species = Species(name=f'{species.name}_disp',
atoms=species.atoms.copy(),
charge=species.charge,
mult=species.mult)
disp_species.coordinates = disp_coords
return disp_species | 27,318 |
def constant(pylist, dtype=None, ragged_rank=None, inner_shape=None,
name=None, row_splits_dtype=dtypes.int64):
"""Constructs a constant RaggedTensor from a nested Python list.
Example:
```python
>>> ragged.constant([[1, 2], [3], [4, 5, 6]]).eval()
RaggedTensorValue(values=[1, 2, 3, 4, 5, 6], splits=[0, 2, 3, 6])
```
All scalar values in `pylist` must have the same nesting depth `K`, and the
returned `RaggedTensor` will have rank `K`. If `pylist` contains no scalar
values, then `K` is one greater than the maximum depth of empty lists in
`pylist`. All scalar values in `pylist` must be compatible with `dtype`.
Args:
pylist: A nested `list`, `tuple` or `np.ndarray`. Any nested element that
is not a `list`, `tuple` or `np.ndarray` must be a scalar value
compatible with `dtype`.
dtype: The type of elements for the returned `RaggedTensor`. If not
specified, then a default is chosen based on the scalar values in
`pylist`.
ragged_rank: An integer specifying the ragged rank of the returned
`RaggedTensor`. Must be nonnegative and less than `K`. Defaults to
`max(0, K - 1)` if `inner_shape` is not specified. Defaults to `max(0, K
- 1 - len(inner_shape))` if `inner_shape` is specified.
inner_shape: A tuple of integers specifying the shape for individual inner
values in the returned `RaggedTensor`. Defaults to `()` if `ragged_rank`
is not specified. If `ragged_rank` is specified, then a default is chosen
based on the contents of `pylist`.
name: A name prefix for the returned tensor (optional).
row_splits_dtype: data type for the constructed `RaggedTensor`'s row_splits.
One of `tf.int32` or `tf.int64`.
Returns:
A potentially ragged tensor with rank `K` and the specified `ragged_rank`,
containing the values from `pylist`.
Raises:
ValueError: If the scalar values in `pylist` have inconsistent nesting
depth; or if ragged_rank or inner_shape are incompatible with `pylist`.
"""
def ragged_factory(values, row_splits):
row_splits = constant_op.constant(row_splits, dtype=row_splits_dtype)
return ragged_tensor.RaggedTensor.from_row_splits(values, row_splits,
validate=False)
with ops.name_scope(name, "RaggedConstant"):
return _constant_value(ragged_factory, constant_op.constant, pylist, dtype,
ragged_rank, inner_shape) | 27,319 |
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser(description='Install cert on device.')
parser.add_argument(
'-n', '--cert-name', default='dummycert', help='certificate name')
parser.add_argument(
'--overwrite', default=False, action='store_true',
help='Overwrite certificate file if it is already installed')
parser.add_argument(
'--remove', default=False, action='store_true',
help='Remove certificate file if it is installed')
parser.add_argument(
'--device-id', help='device serial number')
parser.add_argument(
'--adb-path', help='adb binary path')
parser.add_argument(
'cert_path', help='Certificate file path')
return parser.parse_args() | 27,320 |
def partition(lst, fn):
"""Partition lst by predicate.
- lst: list of items
- fn: function that returns True or False
Returns new list: [a, b], where `a` are items that passed fn test,
and `b` are items that failed fn test.
>>> def is_even(num):
... return num % 2 == 0
>>> def is_string(el):
... return isinstance(el, str)
>>> partition([1, 2, 3, 4], is_even)
[[2, 4], [1, 3]]
>>> partition(["hi", None, 6, "bye"], is_string)
[['hi', 'bye'], [None, 6]]
"""
good = []
bad = []
for item in lst:
if fn(item):
good.append(item)
else:
bad.append(item)
# [[fn(item) for item in lst], [item for item in lst if item not in good]]
return [good, bad]
# ACTUAL comp way to do it. put the value added first [val for val in lst if fn(val)]
# return [
# [val for val in lst if fn(val)],
# [val for val in lst if not fn(val)]
# ] | 27,321 |
def get_page(token, size):
"""Return portion of s3 backet objects."""
if token:
response = client.list_objects_v2(
Bucket=s3_bucket_name,
MaxKeys=size,
Prefix=s3_pbject_prefix,
ContinuationToken=token,
)
else:
response = client.list_objects_v2(
Bucket=s3_bucket_name,
MaxKeys=size,
Prefix=s3_pbject_prefix,
)
return response | 27,322 |
def get_segment_base_addr_by_proc_maps(pid:int, filename:str=None) -> dict:
"""Read /proc/pid/maps file to get base address. Return a dictionary obtaining keys: 'code',
'libc', 'ld', 'stack', 'heap', 'vdso'.
Args:
pid (int): Pid of process.
filename (str, optional): Filename to get code base address. Defaults to None.
Returns:
dict: All segment address. Key: str, Val: int.
"""
assert isinstance(pid, int), "error type!"
res = None
try:
res = subprocess.check_output(["cat", "/proc/{}/maps".format(pid)]).decode().split("\n")
except:
errlog_exit("cat /proc/{}/maps faild!".format(pid))
_d = {}
code_flag = 0
libc_flag = 0
ld_flag = 0
for r in res:
rc = re.compile(r"^([0123456789abcdef]{6,14})-([0123456789abcdef]{6,14})", re.S)
rc = rc.findall(r)
if len(rc) != 1 or len(rc[0]) != 2:
continue
start_addr = int(rc[0][0], base=16)
end_addr = int(rc[0][1], base=16)
if (filename is not None) and (not code_flag) and filename in r:
code_flag = 1
_d['code'] = start_addr
elif (not libc_flag) and ("/libc-2." in r or "/libc.so" in r):
libc_flag = 1
_d['libc'] = start_addr
elif (not ld_flag) and ("/ld-2." in r):
ld_flag = 1
_d['ld'] = start_addr
elif "heap" in r:
_d['heap'] = start_addr
elif "stack" in r:
_d['stack'] = start_addr
elif "vdso" in r:
_d['vdso'] = start_addr
return _d | 27,323 |
def num_physical_shards_option(f):
"""
Function to parse/validate the --num-physical-shards CLI option to dirbs-db repartition.
:param f: obj
:return: options obj
"""
def callback(ctx, param, value):
if value is not None:
if value < 1 or value > 100:
raise click.BadParameter('Number of physical IMEI shards must be between 1 and 100')
return value
return click.option('--num-physical-shards',
expose_value=True,
type=int,
help='The number of physical IMEI shards that tables in DIRBS Core should be split into.',
callback=callback)(f) | 27,324 |
def _ls(method_name, ls_type, path=None, log_throwing=True):
"""
Private helper method shared by various API methods
:param method_name: calling method name
:param ls_type: the WLST return type requested
:param path: the path (default is the current path)
:param log_throwing: whether or not to log the throwing message if the path location is not found
:return: the result of the WLST ls(returnMap='true') call
:raises: PyWLSTException: if a WLST error occurs
"""
_method_name = method_name
_logger.finest('WLSDPLY-00028', method_name, ls_type, path, class_name=_class_name, method_name=_method_name)
if path is not None:
# ls(path, returnMap='true') is busted in earlier versions of WLST so go ahead and
# change directories to the specified path to workaround this
current_path = get_pwd()
cd(path)
try:
result = wlst.ls(ls_type, returnMap='true', returnType=ls_type)
except (wlst.WLSTException, offlineWLSTException), e:
pwe = exception_helper.create_pywlst_exception('WLSDPLY-00029', path, ls_type, _get_exception_mode(e),
_format_exception(e), error=e)
if log_throwing:
_logger.throwing(class_name=_class_name, method_name=_method_name, error=pwe)
cd(current_path)
raise pwe
cd(current_path)
else:
current_path = get_pwd()
try:
result = wlst.ls(ls_type, returnMap='true', returnType=ls_type)
except (wlst.WLSTException, offlineWLSTException), e:
pwe = exception_helper.create_pywlst_exception('WLSDPLY-00029', current_path, ls_type,
_get_exception_mode(e), _format_exception(e), error=e)
_logger.throwing(class_name=_class_name, method_name=_method_name, error=pwe)
raise pwe
_logger.finest('WLSDPLY-00030', method_name, ls_type, current_path, result,
class_name=_class_name, method_name=_method_name)
return result | 27,325 |
def serialize_input_str(tx, prevout_n, sequence, script_sig):
"""
Based on project: https://github.com/chaeplin/dashmnb.
"""
s = ['CTxIn(']
s.append('COutPoint(%s, %s)' % (tx, prevout_n))
s.append(', ')
if tx == '00' * 32 and prevout_n == 0xffffffff:
s.append('coinbase %s' % script_sig)
else:
script_sig2 = script_sig
if len(script_sig2) > 24:
script_sig2 = script_sig2[0:24]
s.append('scriptSig=%s' % script_sig2)
if sequence != 0xffffffff:
s.append(', nSequence=%d' % sequence)
s.append(')')
return ''.join(s) | 27,326 |
def evidence():
"""
Confirm prohibition number and last name matches VIPS and
applicant business rules satisfied to submit evidence.
"""
if request.method == 'POST':
# invoke middleware functions
args = helper.middle_logic(business.is_okay_to_submit_evidence(),
prohibition_number=request.form['prohibition_number'],
driver_last_name=request.form['last_name'],
config=Config)
if 'error_string' not in args:
return jsonify(dict({"data": {"is_valid": True}}))
return jsonify(dict({
"data": {
"is_valid": False,
"error": args.get('error_string'),
}
})) | 27,327 |
def skop(p, rule="b3s23"):
"""Return a list of pairs (Pattern, minimum population) representing
the smallest known oscillators of the specified period in the given rule.
Assumes that the local installation of lifelib knows about said rule."""
rule = sanirule(rule)
rmod = import_module(f"..{aliases.get(rule, rule)}", __name__)
cands = []
for line in rmod.fixeds.split("\n"):
words = line.split(maxsplit=3)
lp, apg, mp = words[:3]
if int(lp) == p:
source = words[3] if len(words) > 3 else None
cands.append((rmod.lt.pattern(apg), int(mp), source))
for cfunc in rmod.cfuncs:
if (out := cfunc(p)):
cands.append(out + (() if len(out) > 2 else (None,)))
if not cands:
return []
cands = [trip if trip[1] else (trip[0], minpop(trip[0]), trip[2]) for trip in cands]
mp = min(trip[1] for trip in cands)
return list(filter(lambda trip: trip[1] == mp, cands)) | 27,328 |
def relabel_nodes_with_contiguous_numbers(graph_nx, start= 0):
"""
Creates a shallow copy
"""
mapping= {n : (idx + start) for idx, n in enumerate(list(graph_nx.nodes()))}
return nx.relabel.relabel_nodes(graph_nx, mapping, copy= True), mapping | 27,329 |
def get_yaml_frontmatter(file):
"""
Get the yaml front matter and the contents of the given file-like object.
"""
line = file.readline()
if line != "---\n":
return (None, line + file.read())
frontmatter = []
for line in file:
if line == "---\n":
break
else:
frontmatter.append(line)
return (yaml.load('\n'.join(frontmatter)), file.read()) | 27,330 |
def add_permissions():
"""Add Permissions for UAE VAT Settings and UAE VAT Account."""
for doctype in ('UAE VAT Settings', 'UAE VAT Account'):
add_permission(doctype, 'All', 0)
for role in ('Accounts Manager', 'Accounts User', 'System Manager'):
add_permission(doctype, role, 0)
update_permission_property(doctype, role, 0, 'write', 1)
update_permission_property(doctype, role, 0, 'create', 1) | 27,331 |
def haversine(coordinate1, coordinate2):
"""
returns the distance between two
coordinates using the haversine formula
"""
lon1 = coordinate1['Longitude']
lat1 = coordinate1['Latitude']
lon2 = coordinate2['Longitude']
lat2 = coordinate2['Latitude']
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
distance = EARTH_CIRCUMFERENCE * c
return distance | 27,332 |
def sine_from_peak(
peak,
num_samples=6000,
sample_duration=0.001,
target_integral=118,
plot=False,
title="Timecourse",
xlabel="Time",
ylabel="Measure",
save_to=None,
delimiter=",",
frequency=1,
xunit="s",
yunit="m",
annotation_x=0.6,
annotation_y=0.9,
):
"""
Calculates a sine wave with a specified max, with the integral fixed.
If the maximum value is not sufficient to reach the specified fixed
integral, then another sine wave will be appended.
If the maximum value is too high, and would overshoot the specified
integral, then the duration of the sine wave is reduced, and the
returned array will be zero in all other areas.
:param peak: Maximum value to be reached. If the a second sine wave is
appended, then this only applies to the first sine wave. Specified in
(units)
:param num_samples: Length of the final array
:param sample_duration: How long (in seconds) is each sample.
:param target_integral: Specified integral of the returned function
:param plot: If true, plot the function. Default: False
:param title: Default: "Timecourse"
:param xlabel: Default: "s"
:param ylabel: Default: "m"
:param save_to: Save to this file. Default: None
:param delimiter: Save file delimiter. Default: ","
:param frequency: Frequency of the sine function. Default: 1
:param xunit: Unit for the x axis. Default: s
:param yunit: Unit for they axis. Default: cm
:param annotation_x: Where on the axis to place information
:param annotation_y: Where on the axis to place information
"""
min_peak = ((2 / sample_duration) * target_integral) / num_samples
# if the peak value if sufficient to reach the target integral
if peak >= min_peak:
frequency = 1 / peak
num_samples_tmp = int(
round(((2 / sample_duration) * target_integral) / peak)
)
min_val, max_val = sine_min_max(frequency)
angles = np.linspace(min_val, max_val, num_samples_tmp)
y = get_scaled_sine(angles, peak, frequency)
y = pad_with_number_1d(y, num_samples)
# otherwise an additional sine wave is needed
else:
num_samples_tmp = int(round(0.5 * num_samples * (peak / min_peak)))
min_val, max_val = sine_min_max(frequency)
angles = np.linspace(min_val, max_val, num_samples_tmp)
y = get_scaled_sine(angles, peak, frequency)
x = np.arange(0, sample_duration * num_samples_tmp, sample_duration)
area = np.trapz(y, x)
area_diff = target_integral - area
space_left = num_samples - num_samples_tmp
angles = np.linspace(min_val, max_val, space_left)
velocity_extra = np.sin(frequency * angles) + 1
x = np.arange(0, sample_duration * space_left, sample_duration)
area = np.trapz(velocity_extra, x)
velocity_extra = velocity_extra * (area_diff / area)
y = np.append(y, velocity_extra)
x = np.arange(0, num_samples * sample_duration, sample_duration)
final_area = np.trapz(y, x)
peak = np.max(y)
if save_to is not None:
np.savetxt(save_to, y, delimiter=delimiter)
else:
print("No output file specified. Not saving results.")
if plot:
print("Plotting profile")
plt.figure()
xaxis = np.linspace(0, num_samples * sample_duration, num_samples)
plt.plot(xaxis, y)
plt.title(title)
plt.xlabel(xlabel + "({})".format(xunit))
plt.ylabel(ylabel + "({})".format(yunit))
annotation = "Max vel: {}({}) \n Displacement: {} ({})".format(
round(peak), yunit, round(final_area), xunit
)
plt.text(
annotation_x,
annotation_y,
annotation,
transform=plt.gca().transAxes,
)
plt.show()
else:
print("--plot not specified. Not plotting")
print("Done!") | 27,333 |
def f1(R1,R2,R3):
""" f1 switching function """
R01,R02,R03 = 1.160, 1.160, 2.320
alpha1 = 1.0
rho1 = R1 - R01
rho2 = R2 - R02
rho3 = R3 - R03
return 0.5 * (1 - adf.tanh(0.5 * alpha1 * (3*rho1 - rho2 - rho3))) | 27,334 |
def getUser(client, attrs):
"""Get the user, create it as needed.
"""
try:
return client.assertedSearch("User [name='%s']" % attrs['name'])[0]
except icat.SearchResultError:
user = client.new("user")
initobj(user, attrs)
user.create()
return user | 27,335 |
def _fn_pow_ ( self , b ) :
""" Power function: f = pow( a, b ) )
>>> f =
>>> a = f.pow ( b )
>>> a = f ** b
"""
return _fn_make_fun_ ( self ,
b ,
Ostap.MoreRooFit.Power ,
'pow_%s_%s' ) | 27,336 |
def funder_trans(params):
"""
:param params:
:return:
"""
if 6 > len(params):
LOG.error('funder_trans: Invalid params {}!'.format(params))
return None
selfpubkey = params[0]
otherpubkey = params[1]
addressFunding = params[2]
scriptFunding = params[3]
deposit = params[4]
asset_type = params[6]
founding_txid = params[5]
asset_id = get_asset_type_id(asset_type)
C_tx = createCTX(addressFunding=addressFunding, balanceSelf=deposit,
balanceOther=deposit, pubkeySelf=selfpubkey,
pubkeyOther=otherpubkey, fundingScript=scriptFunding, asset_id=asset_id,fundingTxId=founding_txid)
RD_tx = createRDTX(addressRSMC=C_tx["addressRSMC"], addressSelf=pubkeyToAddress(selfpubkey),
balanceSelf=deposit, CTxId=C_tx["txId"],
RSMCScript=C_tx["scriptRSMC"], asset_id=asset_id)
return {"C_TX":C_tx,"R_TX":RD_tx} | 27,337 |
def get_replacements_by_guid(replacements_by_name):
"""Returns a lookup table that is by-guid rather than by-name."""
brush_lookup = BrushLookup.get()
def guid_or_name_to_guid(guid_or_name):
if guid_or_name in brush_lookup.guid_to_name:
return guid_or_name
elif guid_or_name in brush_lookup.name_to_guids:
return brush_lookup.get_unique_guid(guid_or_name)
else:
raise LookupError("Not a known brush or brush guid: %r" % guid_or_name)
dct = {}
for before, after in replacements_by_name:
before_guid = guid_or_name_to_guid(before)
if after is True:
after_guid = before_guid
elif after is None:
after_guid = None
else:
after_guid = guid_or_name_to_guid(after)
dct[before_guid] = after_guid
return dct | 27,338 |
def _make_divergence_numba_1d(bcs: Boundaries) -> Callable:
"""make a 1d divergence operator using numba compilation
Args:
dim (int): The number of support points for each axes
boundaries (:class:`~pde.grids.boundaries.axes.Boundaries`):
{ARG_BOUNDARIES_INSTANCE}
dx (float): The discretization
Returns:
A function that can be applied to an array of values
"""
dim_x = bcs.grid.shape[0]
scale = 0.5 / bcs.grid.discretization[0]
region_x = bcs[0].make_region_evaluator()
@jit_allocate_out(out_shape=(dim_x,))
def divergence(arr, out=None):
""" apply gradient operator to array `arr` """
for i in range(dim_x):
valm, _, valp = region_x(arr[0], (i,))
out[i] = (valp - valm) * scale
return out
return divergence | 27,339 |
def draw_predicted_rectangle(image_arr, y, x, half_height, half_width):
"""Draws a rectangle onto the image at the provided coordinates.
Args:
image_arr: Numpy array of the image.
y: y-coordinate of the rectangle (normalized to 0-1).
x: x-coordinate of the rectangle (normalized to 0-1).
half_height: Half of the height of the rectangle (normalized to 0-1).
half_width: Half of the width of the rectangle (normalized to 0-1).
Returns:
Modified image (numpy array)
"""
assert image_arr.shape[0] == 3, str(image_arr.shape)
height = image_arr.shape[1]
width = image_arr.shape[2]
tl_y, tl_x, br_y, br_x = unnormalize_prediction(y, x, half_height, half_width, \
img_height=height, img_width=width)
image_arr = np.copy(image_arr) * 255
image_arr = np.rollaxis(image_arr, 0, 3)
return draw_rectangle(image_arr, tl_y, tl_x, br_y, br_x) | 27,340 |
def my_max(seq: Sequence[ItemType]) -> Optional[ItemType]:
"""Максимальный элемент последовательности
Использует подход динамического программирования.
:param seq: последовательность
:type seq: Sequence[ItemType]
:return: максимальный элемент последовательности
:rtype: ItemType
"""
if not seq:
return None
if len(seq) == 2:
if seq[0] >= seq[1]:
return seq[0]
return seq[1]
new_max = my_max(seq[1:])
if new_max is not None:
if seq[0] >= new_max:
return seq[0]
return new_max
return seq[0] | 27,341 |
def correct_pm0(ra, dec, pmra, pmdec, dist, vlsr=vlsr0, vx=0, vy=0, vz=0):
"""Corrects the proper motion for the speed of the Sun
Arguments:
ra - RA in deg
dec -- Declination in deg
pmra -- pm in RA in mas/yr
pmdec -- pm in declination in mas/yr
dist -- distance in kpc
Returns:
(pmra,pmdec) the tuple with the proper motions corrected for the Sun's motion
"""
C = acoo.ICRS(ra=ra * auni.deg,
dec=dec * auni.deg,
radial_velocity=0 * auni.km / auni.s,
distance=dist * auni.kpc,
pm_ra_cosdec=pmra * auni.mas / auni.year,
pm_dec=pmdec * auni.mas / auni.year)
kw = dict(galcen_v_sun=acoo.CartesianDifferential(
np.array([vx + 11.1, vy + vlsr + 12.24, vz + 7.25]) * auni.km / auni.s))
frame = acoo.Galactocentric(**kw)
Cg = C.transform_to(frame)
Cg1 = acoo.Galactocentric(x=Cg.x,
y=Cg.y,
z=Cg.z,
v_x=Cg.v_x * 0,
v_y=Cg.v_y * 0,
v_z=Cg.v_z * 0,
**kw)
C1 = Cg1.transform_to(acoo.ICRS())
return ((C.pm_ra_cosdec - C1.pm_ra_cosdec).to_value(auni.mas / auni.year),
(C.pm_dec - C1.pm_dec).to_value(auni.mas / auni.year)) | 27,342 |
def clean_output_type_names(df: pd.DataFrame) -> pd.DataFrame:
"""Convenience function for cleaning up output type names
The `outputs_clean` dict is located in the defaults submodule
:param df: Input data frame to be cleaned up
:type df: pandas DataFrame
:return: DataFrame with output type names cleaned up.
:rtype: pandas DataFrame
"""
df.replace(to_replace=outputs_clean, inplace=True)
return df | 27,343 |
def run_mask_camera(video_path, output_video_name, conf_thresh, target_shape):
"""
Runs the model on a video stream (file or camera input)
Parameters:
video_path (str): 0 for camera input or the path to the video
output_video_name (str): Output video name
conf_thresh (float): The min threshold of classification probability.
target_shape (tuple): Shape of the target
Returns:
None
"""
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
raise ValueError("Video open failed.")
return
writer = write_output_video(cap, output_video_name)
status_video_capture = True
# capture video
while status_video_capture:
start_stamp = time.time()
status_video_capture, img_raw = cap.read()
if status_video_capture:
img_raw = cv2.cvtColor(img_raw, cv2.COLOR_BGR2RGB)
read_frame_stamp = time.time()
mask_boxes, masks_on = inference(
img_raw,
conf_thresh,
iou_thresh=0.5,
target_shape=target_shape,
show_result=False,
)
# add UI messages on image
img_raw = add_view_messages(img_raw, len(mask_boxes), masks_on)
inference_stamp = time.time()
write_frame_stamp = video_output(writer, img_raw, bool(output_video_name))
print(
TIME_INFO_SAMPLE
% (
read_frame_stamp - start_stamp,
inference_stamp - read_frame_stamp,
write_frame_stamp - inference_stamp,
)
)
else:
cap.release()
print(RELEASE_MESSAGE)
break | 27,344 |
def analyze_sentiment(input_text):
"""
Using VADER perform sentiment analysis on the given text
"""
sentiment_analyzer = SentimentIntensityAnalyzer()
sentiment_dict = sentiment_analyzer.polarity_scores(input_text)
return sentiment_dict | 27,345 |
def get_free_header(filepath, needed_keys=(), original_name=None, observatory=None):
"""Return the complete unconditioned header dictionary of a reference file.
DOES NOT hijack warnings. DOES NOT verify checksums.
Original name is used to determine file type for web upload temporary files
which have no distinguishable extension. Original name is browser-side
name for file.
get_free_header() is a cached function to prevent repeat file reads.
Although parameters are given default values, for caching to work correctly
even default parameters should be specified positionally.
Since get_free_header() is cached, loading file updates requires first
clearing the function cache.
"""
file_obj = file_factory(filepath, original_name, observatory)
header = file_obj.get_header(needed_keys, checksum=False)
log.verbose("Header of", repr(filepath), "=", log.PP(header), verbosity=90)
return header | 27,346 |
def clean_ice(options, args):
"""
Clean all orphaned VMs
"""
if len(args) < 2:
print "The iceage command requires a run name. See --help"
return 1
dbname = args[1]
cb = CloudInitD(options.database, db_name=dbname, log_level=options.loglevel, logdir=options.logdir, terminate=False, boot=False, ready=True)
ha = cb.get_iaas_history()
for h in ha:
state = h.get_state()
handle = h.get_service_iaas_handle()
if state == "running":
if handle != h.get_id():
print_chars(2, "Terminating an orphaned VM %s\n" % (h.get_id()), bold=True)
h.terminate()
elif h.get_context_state() == cloudinitd.service_state_initial:
print_chars(2, "Terminating pre-staged VM %s\n" % (h.get_id()), bold=True)
h.terminate()
return 0 | 27,347 |
def ADD_CIPD_FILE(api, pkg, platform, image, customization, success=True):
""" mock add cipd file to unpacked image step """
return ADD_FILE(
api, image, customization,
'[CACHE]\\Pkgs\\CIPDPkgs\\resolved-instance_id-of-latest----------' +
'\\{}\\{}\\*'.format(pkg, platform), success) | 27,348 |
def init(
ctx: typer.Context,
verbose: bool = typer.Option(
False,
"--verbose",
"-V",
is_flag=True,
help="Print each step as it happens.",
),
) -> None:
"""Configure PyBites credentials and repository."""
while True:
username = Prompt.ask("Enter your PyBites username")
while True:
password = Prompt.ask("Enter your PyBites user password", password=True)
confirm_password = Prompt.ask("Confirm PyBites password", password=True)
if password == confirm_password:
break
print("[yellow]:warning: Password did not match.")
repo = Path(
Prompt.ask(
"Enter the path to your local git repo for PyBites, or press enter for the current directory",
default=Path().cwd(),
show_default=True,
)
).expanduser()
if not repo.exists():
print(f"[yellow]:warning: The path {repo} could not be found!")
print(f"Your input - username: {username}, repo: {repo}.")
if Confirm.ask(
"Are these inputs correct? If you confirm, they will be stored under .eatlocal in your user home directory"
):
break
if not EATLOCAL_HOME.is_dir():
EATLOCAL_HOME.mkdir()
with open(EATLOCAL_HOME / ".env", "w", encoding="utf-8") as fh:
fh.write(f"PYBITES_USERNAME={username}\n")
fh.write(f"PYBITES_PASSWORD={password}\n")
fh.write(f"PYBITES_REPO={repo}\n")
print(f"[green]Successfully stored configuration variables under {EATLOCAL_HOME}.") | 27,349 |
def row_to_columns(row):
"""Takes a row as a string and returns it as a list of columns."""
return [column for column in row.split() if column.strip() != ''] | 27,350 |
def circ_diagonal_mode_mat(bk):
"""Diagonal matrix of radial coefficients for all modes/wavenumbers.
Parameters
----------
bk : (M, N+1) numpy.ndarray
Vector containing values for all wavenumbers :math:`M` and modes up to
order :math:`N`
Returns
-------
Bk : (M, 2*N+1, 2*N+1) numpy.ndarray
Multidimensional array containing diagnonal matrices with input
vector on main diagonal.
"""
if len(bk.shape) == 1:
bk = bk[np.newaxis, :]
K, N = bk.shape
Bk = np.zeros([K, N, N], dtype=complex)
for k in range(K):
Bk[k, :, :] = np.diag(bk[k, :])
return np.squeeze(Bk) | 27,351 |
def log_updater(log, repetition, average_loss, optimization_time):
"""
Function to update the log object.
"""
index = repetition + 1
log["losses"] = log["losses"] + [[index, average_loss]]
log["times"] = log["times"] + [[index, optimization_time]]
return log | 27,352 |
def execute_function_multithreaded(fn,
args_list,
block_until_all_done=True,
max_concurrent_executions=1000):
"""
Executes fn in multiple threads each with one set of the args in the
args_list.
:param fn: function to be executed
:type fn:
:param args_list:
:type args_list: list(list)
:param block_until_all_done: if is True, function will block until all the
threads are done and will return the results of each thread's execution.
:type block_until_all_done: bool
:param max_concurrent_executions:
:type max_concurrent_executions: int
:return:
If block_until_all_done is False, returns None. If block_until_all_done is
True, function returns the dict of results.
{
index: execution result of fn with args_list[index]
}
:rtype: dict
"""
result_queue = queue.Queue()
worker_queue = queue.Queue()
for i, arg in enumerate(args_list):
arg.append(i)
worker_queue.put(arg)
def fn_execute():
while True:
try:
arg = worker_queue.get(block=False)
except queue.Empty:
return
exec_index = arg[-1]
res = fn(*arg[:-1])
result_queue.put((exec_index, res))
threads = []
number_of_threads = min(max_concurrent_executions, len(args_list))
for _ in range(number_of_threads):
thread = threading.Thread(target=fn_execute)
if not block_until_all_done:
thread.daemon = True
thread.start()
threads.append(thread)
# Returns the results only if block_until_all_done is set.
results = None
if block_until_all_done:
# Because join() cannot be interrupted by signal, a single join()
# needs to be separated into join()s with timeout in a while loop.
have_alive_child = True
while have_alive_child:
have_alive_child = False
for t in threads:
t.join(0.1)
if t.is_alive():
have_alive_child = True
results = {}
while not result_queue.empty():
item = result_queue.get()
results[item[0]] = item[1]
if len(results) != len(args_list):
raise RuntimeError(
'Some threads for func {func} did not complete '
'successfully.'.format(func=fn.__name__))
return results | 27,353 |
def cli(incluster, kubeconfig):
"""
CLI for testing kubernetes NetworkPolicies.
"""
if incluster:
k8s.config.load_incluster_config()
else:
try:
k8s.config.load_kube_config(config_file=kubeconfig)
except k8s.config.ConfigException as config_error:
LOGGER.error(config_error)
exit(1)
except TypeError as type_error:
LOGGER.error(
"Internal error: Couldn't load kubeconfig with error: %s", type_error
)
exit(1) | 27,354 |
def volume_encryption_metadata_get(context, volume_id, session=None):
"""Return the encryption metadata for a given volume."""
volume_ref = _volume_get(context, volume_id)
encryption_ref = volume_type_encryption_get(context,
volume_ref['volume_type_id'])
values = {
'encryption_key_id': volume_ref['encryption_key_id'],
}
if encryption_ref:
for key in ['control_location', 'cipher', 'key_size', 'provider']:
values[key] = encryption_ref[key]
return values | 27,355 |
def test_shift_to_other_frame(hlwm, direction, frameindex, clients_per_frame):
"""
in a frame grid with 3 columns, where the middle column has 3 rows, we put
the focused window in the middle, and then invoke 'shift' with the given
'direction'. Then, it is checked that the window stays focused but now
resides in the frame with the given 'frameindex'
"""
winid, _ = hlwm.create_client()
def otherclients():
# put 'otherclients'-many clients in every other frame
winids = hlwm.create_clients(clients_per_frame)
return ' '.join(winids)
layout_131 = f"""
(split horizontal:0.66:0
(split horizontal:0.5:1
(clients vertical:0 {otherclients()})
(split vertical:0.66:0
(split vertical:0.5:1
(clients vertical:0 {otherclients()})
(clients vertical:0 {winid}))
(clients vertical:0 {otherclients()})))
(clients vertical:0 {otherclients()}))
"""
hlwm.call(['load', layout_131])
assert hlwm.attr.clients.focus.winid() == winid
assert hlwm.attr.tags.focus.tiling.focused_frame.index() == '0101'
hlwm.call(['shift', direction])
# the window is still focused
assert hlwm.attr.clients.focus.winid() == winid
# but it's now in another frame
assert hlwm.attr.tags.focus.tiling.focused_frame.index() == frameindex | 27,356 |
def parse_local_cpus():
"""Return information about available CPU's and cores in local system.
The information is gathered from ``lscpu`` command which besides the available CPUs informations, also returns
information about physical cores in the system, which is usefull for hyper threading systems.
Returns:
dict(str, list), dict(str, list): two maps with mapping:
core_id -> list of cpu's assigned to core
cpu_id -> list of cores (in most situations this will be a single element list)
"""
cores = {}
cpus = {}
proc = subprocess.Popen(['lscpu', '-p'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
ex_code = proc.wait()
if ex_code != 0:
raise Exception("command lscpu failed with exit code {}: {}".format(ex_code, stderr))
for out_line in bytes.decode(stdout).splitlines():
# omit all commented lines
if out_line.startswith('#'):
continue
elems = out_line.split(',')
if len(elems) != 9:
_logger.warning('warning: unknown output format "{}"'.format(out_line))
cpu,core,socket = elems[0:3]
cores.setdefault(core, []).append(cpu)
cpus.setdefault(cpu, []).append(core)
return cores, cpus | 27,357 |
def perform_tick(gamefield):
"""
Perfom a tick.
A tick is one round where each cell has a rule check
"""
tick_changes = get_tick_changes(gamefield)
activate_rules(gamefield, tick_changes)
return gamefield | 27,358 |
def get_cairo_surface(pygame_surface):
""" Black magic. """
class Surface(ctypes.Structure):
_fields_ = [
(
'HEAD', ctypes.c_byte * object.__basicsize__),
(
'SDL_Surface', ctypes.c_void_p)]
class SDL_Surface(ctypes.Structure):
_fields_ = [
(
'flags', ctypes.c_uint),
(
'SDL_PixelFormat', ctypes.c_void_p),
(
'w', ctypes.c_int),
(
'h', ctypes.c_int),
(
'pitch', ctypes.c_ushort),
(
'pixels', ctypes.c_void_p)]
surface = Surface.from_address(id(pygame_surface))
ss = SDL_Surface.from_address(surface.SDL_Surface)
pixels_ptr = ctypes.pythonapi.PyMemoryView_FromMemory(ctypes.c_void_p(ss.pixels),
ss.pitch * ss.h,
PyBUF_WRITE)
pixels = ctypes.cast(pixels_ptr, ctypes.py_object).value
return cairo.ImageSurface.create_for_data(pixels, cairo.FORMAT_RGB24, ss.w, ss.h, ss.pitch) | 27,359 |
def string_out_table(dat, columns, caption, preferred_sizes=None, table_size="footnotesize"):
"""
- dat: (Dict String (Array String)), dict of arrays of data for the table
- columns: (Array String), the column names in desired order
- path: string, path to where to save the table
- caption: None or string
- preferred_sizes: None or (Array Integer), the preferred column sizes;
column will be at least that size
- table_size: None or string, if string, one of "Huge", "huge", "LARGE",
"Large", "large", "normalsize", "small", "footnotesize", "scriptsize",
"tiny", the table size
RETURN: string of the table in Markdown
"""
if preferred_sizes is None:
preferred_sizes = [0] * len(columns)
the_str = ""
with io.StringIO() as handle:
if table_size is not None:
handle.write(f"\\pandocbegin{{{table_size}}}\n\n")
handle.write(make_table_from_dict_of_arrays(
dat, columns=columns, preferred_sizes=preferred_sizes))
if caption is not None:
handle.write(f"\nTable: {caption}\n")
if table_size is not None:
handle.write(f"\n\\pandocend{{{table_size}}}\n\n")
the_str = handle.getvalue()
return the_str | 27,360 |
def getCreationDate(pdf):
"""Return the creation date of a document."""
r = string_at(libc.pycpdf_getCreationDate(pdf.pdf)).decode()
checkerror()
return r | 27,361 |
async def fast_dependencies(
_: Annotated[int, Dependant(dep_without_delays)]
) -> Response:
"""An endpoint with dependencies that execute instantly"""
return Response() | 27,362 |
def _pretty_print_bnode(bnode: BNode):
"""Print a blank node."""
return f'😶 {bnode}' | 27,363 |
def resource_config():
"""
The path to the resource configuration file.
A file containing the name of the current host and all host containers in
the training.
Returns:
path (str): The absolute path to the resource config JSON
"""
return os.path.join(config(), 'resourceconfig.json') | 27,364 |
def combine_div(range1, range2):
"""
Combiner for Divide operation.
>>> import gast as ast
>>> combine(Range(-1, 5), Range(3, 8), ast.Div())
Range(low=-1, high=1)
>>> combine(Range(-1, 5), Range(-5, -4), ast.Div())
Range(low=-2, high=0)
>>> combine(Range(-1, 5), Range(-5, 3), ast.Div())
Range(low=-inf, high=inf)
"""
if range2.low <= 0 and range2.high >= 0:
return UNKNOWN_RANGE
if 0 in range2:
return UNKNOWN_RANGE
res = [v1 / v2 for v1, v2 in itertools.product(range1, range2)]
return Range(numpy.min(res), numpy.max(res)) | 27,365 |
def run_covid(country):
"""Run the COVID model for some country"""
runner = getattr(covid_19, country)
runner.run_model() | 27,366 |
def air(pos, res=None, shape=None, rowmajor=False, rad=None, ref=None):
"""Setups up an Airy system. See the build function for details."""
pos, res, shape, mid = validate(pos, res, shape, rowmajor)
if rad is None:
if pos.ndim != 2:
raise ValueError("Airy requires either rad or pos[2,2]")
w = angdist(mid[0]*deg2rad,pos[0,1]*deg2rad,mid[0]*deg2rad,pos[1,1]*deg2rad)*rad2deg
h = angdist(pos[0,0]*deg2rad,mid[1]*deg2rad,pos[1,0]*deg2rad,mid[1]*deg2rad)*rad2deg
rad = (w+h)/4
w = WCS(naxis=2)
w.wcs.ctype = ["RA---AIR","DEC--AIR"]
w.wcs.set_pv([(2,1,90-rad)])
if ref is "standard": ref = None
return finalize(w, pos, res, shape, ref=ref) | 27,367 |
def start():
"""
Method to initiate socket server services.
"""
server.listen()
butter = (f"[SERVER_LISTENING] Server up and running on {SERVER}:{PORT} waiting for connections.")
print(butter.lower())
while True:
conn, addr = server.accept()
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
print(f"[ACTIVE CONNECTIONS] {threading.activeCount() - 1}") | 27,368 |
def get_pdf(list_of_figures: list, pdf_name: str) -> None:
"""Create a pdf with given plots in a list.
Keyword arguments:
list_of_figures -- list of matplotlib figures
pdf_name -- Name of the pdf file.
"""
# save pdf in a file called 'Output' which is located in the project file LetsScrape
pdf = PdfPages('Output/' + pdf_name)
# Add plot to pdf file
for fig in list_of_figures:
pdf.savefig(fig)
pdf.close() | 27,369 |
def get_config_type(service_name):
"""
get the config tmp_type based on service_name
"""
if service_name == "HDFS":
tmp_type = "hdfs-site"
elif service_name == "HDFS":
tmp_type = "core-site"
elif service_name == "MAPREDUCE":
tmp_type = "mapred-site"
elif service_name == "HBASE":
tmp_type = "hbase-site"
elif service_name == "OOZIE":
tmp_type = "oozie-site"
elif service_name == "HIVE":
tmp_type = "hive-site"
elif service_name == "WEBHCAT":
tmp_type = "webhcat-site"
else:
tmp_type = "global"
return tmp_type | 27,370 |
def batch_request(config, dataset_id, geographies, date_format,
record_offset=0, max_api_calls=10):
"""Fetch a NOMIS dataset from the API, in batches,
based on a configuration object.
Args:
config (dict): Configuration object, from which a get
request is formed.
dataset_id (str): NOMIS dataset ID
geographies (list): Return object from :obj:`discovery_iter`.
date_format (str): Formatting string for dates in the dataset
record_offset (int): Record to start from
max_api_calls (int): Number of requests allowed
Returns:
dfs (:obj:`list` of :obj:`pd.DataFrame`): Batch return results.
"""
config["geography"] = ",".join(str(row["nomis_id"])
for row in geographies)
config["RecordOffset"] = record_offset
date_parser = lambda x: pd.datetime.strptime(x, date_format)
# Build a list of dfs in chunks from the NOMIS API
dfs = []
offset = 25000
icalls = 0
done = False
while (not done) and icalls < max_api_calls:
#logging.debug(f"\t\t {offset}")
# Build the request payload
params = "&".join(f"{k}={v}" for k,v in config.items())
# Hit the API
r = requests.get(NOMIS.format(f"{dataset_id}.data.csv"), params=params)
# Read the data
with StringIO(r.text) as sio:
_df = pd.read_csv(sio, parse_dates=["DATE"], date_parser=date_parser)
done = len(_df) < offset
# Increment the offset
config["RecordOffset"] += offset
# Ignore empty fields
dfs.append(_df.loc[_df.OBS_VALUE > 0])
icalls += 1
# Combine and return
df = pd.concat(dfs)
df.columns = [c.lower() for c in df.columns]
return df, done, config["RecordOffset"] | 27,371 |
def audit_rds(accounts, send_report):
""" Runs auditors/rds_security_group """
sm_audit_rds(accounts, send_report) | 27,372 |
def get_val(in_root: str, wnid2idx: Dict[str, int]) -> List[Tuple[str, int]]:
"""Get validation split sample pairs.
Args:
in_root (str): Input dataset root directory.
wnid2idx (dict): Mapping of WordNet ID to class ID.
Returns:
List of pairs of (image filename, class ID).
"""
pairs = []
filename = os.path.join(in_root, 'val', 'val_annotations.txt')
lines = open(filename).read().strip().split('\n')
for line in tqdm(lines, leave=False):
basename, wnid = line.split()[:2]
filename = os.path.join(in_root, 'val', 'images', basename)
wnid_idx = wnid2idx[wnid]
pairs.append((filename, wnid_idx))
shuffle(pairs)
return pairs | 27,373 |
def is_music(file: File) -> bool:
"""See if the ext is a Music type."""
return file.ext in {
"aac",
"m4a",
"mp3",
"ogg",
"wma",
"mka",
"opus",
"alac",
"ape",
"flac",
"wav",
} | 27,374 |
def sqeuclidean_pdist(x, y=None):
"""Fast and efficient implementation of ||X - Y||^2 = ||X||^2 + ||Y||^2 - 2 X^T Y
Input: x is a Nxd matrix
y is an optional Mxd matirx
Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
"""
x_norm = (x**2).sum(1).unsqueeze(1)
if y is not None:
y_t = torch.transpose(y, 0, 1)
y_norm = (y**2).sum(1).unsqueeze(0)
else:
y_t = torch.transpose(x, 0, 1)
y_norm = x_norm.squeeze().unsqueeze(0)
dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
# get rid of NaNs
dist[torch.isnan(dist)] = 0.
# clamp negative stuff to 0
dist = torch.clamp(dist, 0., np.inf)
# ensure diagonal is 0
if y is None:
dist[dist == torch.diag(dist)] = 0.
return dist | 27,375 |
def test_receive_order_internal_duplicate_from_same_neighbor(scenario, engine):
"""
This tests receiving the same internal order from the neighbor multiple times.
"""
# Arrange.
peer_list: List[Peer] = create_test_peers(scenario, engine, 2)
peer_list[0].add_neighbor(peer_list[1])
peer_list[1].add_neighbor(peer_list[0])
order: Order = create_a_test_order(scenario)
peer_list[1].receive_order_external(order)
peer_list[1].send_orders_to_on_chain_check(peer_list[1].local_clock)
peer_list[1].store_orders()
# Act.
peer_list[0].receive_order_internal(peer_list[1], order)
peer_list[0].receive_order_internal(peer_list[1], order)
# Assert.
assert len(peer_list[0].order_pending_orderinfo_mapping[order]) == 1 | 27,376 |
def parse_options() -> argparse.Namespace:
"""Parse command line arguments"""
parser: argparse.ArgumentParser = argparse.ArgumentParser(
"Arguments for pretraining")
parser.add_argument('--sample_size', type=int, default=3200,
help='sample size for training')
parser.add_argument('--batch_size', type=int, default=128,
help='input batch size for training')
parser.add_argument('--num_epochs', type=int, default=100,
help='Number of epochs for training')
parser.add_argument('--contrastive_train_epochs', type=int, default=10,
help='Number of epochs for contrastive training')
parser.add_argument('--model_id', type=int, default=0,
help='Model ID for training')
parser.add_argument('--mode', default='train',
help='train | test | debug')
opt: argparse.Namespace = parser.parse_args()
return opt | 27,377 |
def plot_ellipse(ax, mu, sigma, color="k"):
"""
Based on
http://stackoverflow.com/questions/17952171/not-sure-how-to-fit-data-with-a-gaussian-python.
"""
# Compute eigenX_embeddedues and associated eigenvectors
X_embeddeds, vecs = np.linalg.eigh(sigma)
# Compute "tilt" of ellipse using first eigenvector
x, y = vecs[:, 0]
theta = np.degrees(np.arctan2(y, x))
# EigenX_embeddedues give length of ellipse along each eigenvector
w, h = 2 * np.sqrt(X_embeddeds)
ax.tick_params(axis='both', which='major', labelsize=20)
ellipse = Ellipse(mu, w, h, theta, color=color) # color="k")
ellipse.set_clip_box(ax.bbox)
ellipse.set_alpha(0.2)
ax.add_artist(ellipse) | 27,378 |
def commit(dir_info):
"""
Moves files from the temp directory to the final directory based
on the input given. Returns list of all files
Keyword arguments:
dir_info -- dictionary of service to dir_info hash
"""
def walk_file_list(base_dir, srcdir, resultdir, done_files=set()):
""" Gets files that haven't been seen yet """
result = []
if not base_dir.endswith(os.sep):
# For stripping the slash
base_dir = base_dir + os.sep
for root, dirnames, filenames in os.walk(srcdir):
after_base = root[len(base_dir):] #strip absolute
if after_base not in done_files:
for filename in filenames:
if os.path.join(after_base, filename) not in done_files:
result.append(os.path.join(resultdir, filename))
return result
result = defaultdict(list)
for service in dir_info:
# copy the directory
serv_dir = dir_info[service]['dir']
base_dir = dir_info[service]['base_dir']
log.info("Deploying %s to %s", service, base_dir)
files = set(os.listdir(serv_dir))
done_files = set()
for dirname, atomic in dir_info[service]['atomic'].items():
srcdir = os.path.join(serv_dir, dirname)
destdir = os.path.join(base_dir, dirname)
# Delete existing dir
if atomic:
if not os.path.islink(destdir):
shutil.rmtree(destdir, ignore_errors=True)
stripped = destdir.rstrip(os.sep)
makedirsp(os.path.dirname(stripped))
force_create_symlink(srcdir, stripped)
else:
# Copy
copy_tree(srcdir, destdir)
result[service].extend(walk_file_list(serv_dir, srcdir, dirname))
done_files.add(dirname.rstrip(os.sep))
# Do the remaining files
for name in files.difference(done_files):
src = os.path.join(serv_dir, name)
dst = os.path.join(base_dir, name)
if os.path.isdir(src):
if os.path.basename(os.path.normpath(src)) == '.git':
continue
_smart_copytree(src, dst, ignore=ignore_copy)
result[service].extend(walk_file_list(serv_dir, src, name, done_files))
else:
_smart_copyfile(src, dst)
result[service].append(name)
return result | 27,379 |
def len(file, path):
"""获取dataset第一维长度。
Args:
file: 文件路径。
path: dataset路径。
Returns:
返回长度。
"""
with h5py.File(file, mode='r') as h5_file:
length = h5_file[path].len()
return length | 27,380 |
def format_(session):
"""Format the code."""
session.install(*REQUIREMENTS_FORMAT)
session.run(
'black', '-l', '88', '-t', 'py38', '-S', '--exclude=.*/migrations/.*', *PY_PATHS
)
session.run('isort', *PY_PATHS)
session.run(
'docformatter',
'--in-place',
'--recursive',
'--wrap-summaries=88',
'--wrap-descriptions=88',
*PY_PATHS,
) | 27,381 |
def get_time_with_limits(
db_name: str,
user: str,
port: int,
test_mols_path: Path,
search_type: str,
path_to_save: Path,
password=None,
) -> None:
"""
Searches the database for the specified molecules and saves the search time to a excel file.
"""
limits = [1, 10, 100, 1000, 21000000]
res_limits = []
res_counts = []
for limit in limits:
res_lim, res_count = get_all_time_and_count(
db_name, user, test_mols_path, search_type, limit, port, password
)
res_limits.append(res_lim)
res_counts.append(res_count)
logger.info(res_limits)
logger.info(res_counts)
final_dict = {
**res_limits[0],
**res_limits[1],
**res_limits[2],
**res_limits[3],
**res_limits[4],
**res_counts[0],
**res_counts[1],
**res_counts[2],
**res_counts[3],
**res_counts[4],
}
df = pd.DataFrame(final_dict)
df.to_excel(path_to_save.as_posix()) | 27,382 |
def loop(step_fn, n_steps,
sequences=None, outputs_info=None, non_sequences=None,
go_backwards=False):
"""
Helper function to unroll for loops. Can be used to unroll theano.scan.
The parameter names are identical to theano.scan, please refer to here
for more information.
Note that this function does not support the truncate_gradient
setting from theano.scan.
Parameters
----------
step_fn : function
Function that defines calculations at each step.
sequences : TensorVariable or list of TensorVariables
List of TensorVariable with sequence data. The function iterates
over the first dimension of each TensorVariable.
outputs_info : list of TensorVariables
List of tensors specifying the initial values for each recurrent
value. Specify output_info to None for non-arguments to
the step_function
non_sequences: list of TensorVariables
List of theano.shared variables that are used in the step function.
n_steps: int
Number of steps to unroll.
go_backwards: bool
If true the recursion starts at sequences[-1] and iterates
backwards.
Returns
-------
List of TensorVariables. Each element in the list gives the recurrent
values at each time step.
"""
if not isinstance(sequences, (list, tuple)):
sequences = [] if sequences is None else [sequences]
# When backwards reverse the recursion direction
counter = range(n_steps)
if go_backwards:
counter = counter[::-1]
output = []
# ====== check if outputs_info is None ====== #
if outputs_info is not None:
prev_vals = outputs_info
else:
prev_vals = []
output_idx = [i for i in range(len(prev_vals)) if prev_vals[i] is not None]
# ====== check if non_sequences is None ====== #
if non_sequences is None:
non_sequences = []
# ====== Main loop ====== #
for i in counter:
step_input = [s[i] for s in sequences] + \
[prev_vals[idx] for idx in output_idx] + \
non_sequences
out_ = step_fn(*step_input)
# The returned values from step can be either a TensorVariable,
# a list, or a tuple. Below, we force it to always be a list.
if isinstance(out_, T.TensorVariable):
out_ = [out_]
if isinstance(out_, tuple):
out_ = list(out_)
output.append(out_)
prev_vals = output[-1]
# iterate over each scan output and convert it to same format as scan:
# [[output11, output12,...output1n],
# [output21, output22,...output2n],...]
output_scan = []
for i in range(len(output[0])):
l = map(lambda x: x[i], output)
output_scan.append(T.stack(*l))
return output_scan | 27,383 |
def get_model(hidden_size=20, n_hidden=5, in_dim=2, out_dim=1, penultimate=False, use_cuda=True, bn=False):
"""
Initialize the model and send to gpu
"""
in_dim = in_dim
out_dim = out_dim #1
model = Net(in_dim, out_dim, n_hidden=n_hidden, hidden_size=hidden_size,
activation=torch.nn.ReLU(), bias=True, penultimate=penultimate, bn=bn)
if use_cuda:
model=model.cuda()
return model | 27,384 |
def main(args=None):
"""Project entrypoint function"""
argparser = argparse.ArgumentParser(
description="""\r
\r-----------------------------
\r Spacin, puts space between!
\r-----------------------------\n\n
\rSpacin is a word-separator that distinguishes
\reach word in a given string.\n
\rexample:
\r> spacin "hellofriend"
\r...
\ras a sentence: "hello friend"
\ras separate words: ['hello', 'friend']
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent(
"""developed by Mohammad Salek
\r\n
"""
),
prog='spacin',
usage="""%(prog)s <string>"""
)
argparser.add_argument(
'input_str',
action='store',
type=str,
nargs="?",
help=argparse.SUPPRESS,
)
argparser.add_argument(
'-t',
'--text',
action='store',
metavar='<string>',
type=str,
help='accept input text in commandline',
)
argparser.add_argument(
'-w',
'--words',
action='store_true',
help='output result as words (as a list)')
argparser.add_argument(
'-s',
'--sentence',
action='store_true',
help='output result in a sentence (as string)')
try:
args = argparser.parse_args()
if not any([args.input_str, args.text]):
argparser.print_help()
sys.exit(1)
elif all([args.input_str, args.text]):
argparser.print_help()
sys.exit(1)
else:
# show process details:
show_process = True
show_process = not any([args.sentence, args.words])
# select input:
input_str = args.input_str if args.input_str else args.text
# choose algorithm(s):
algo = BasicAlgorithm()
# algorithm(s) and input details:
if show_process:
print(f"input text:\t{input_str}")
print(f"algorithm:\t{algo}")
print("processing...", end=' ', flush=True)
# run algorithm(s):
start_time = time.time()
res = Spacin.run(algo, input_str)
end_time = time.time()
# yell finished:
if show_process:
print("done!")
print(f"and it took {end_time-start_time:.3f} seconds\n")
# show results:
if not any([args.sentence, args.words]):
print(f"as a sentence:\t\t\"{' '.join(res)}\"")
print(f"as separate words:\t{res}")
elif all([args.sentence, args.words]):
print(f"\"{' '.join(res)}\"")
print(f"{res}")
elif args.sentence:
print(f"\"{' '.join(res)}\"")
elif args.words:
print(f"{res}")
except argparse.ArgumentTypeError as arge:
print('\n\nan argument error occured:', arge)
print('enter "spacin -h" for help')
sys.exit(1) | 27,385 |
def convert_sweep(sweep,sweep_loc,new_sweep_loc,AR,taper):
"""This converts arbitrary sweep into a desired sweep given
wing geometry.
Assumptions:
None
Source:
N/A
Inputs:
sweep [degrees]
sweep_loc [unitless]
new_sweep_loc [unitless]
AR [unitless]
taper [unitless]
Outputs:
quarter chord sweep
Properties Used:
N/A
"""
sweep_LE = np.arctan(np.tan(sweep)+4*sweep_loc*
(1-taper)/(AR*(1+taper)))
new_sweep = np.arctan(np.tan(sweep_LE)-4*new_sweep_loc*
(1-taper)/(AR*(1+taper)))
return new_sweep | 27,386 |
def is_circular(linked_list):
"""
Determine whether the Linked List is circular or not
Args:
linked_list(obj): Linked List to be checked
Returns:
bool: Return True if the linked list is circular, return False otherwise
The way we'll do this is by having two pointers, called "runners", moving
through the list at different rates. Typically we have a "slow" runner
which moves at one node per step and a "fast" runner that moves at two
nodes per step.
If a loop exists in the list, the fast runner will eventually move behind
the slow runner as it moves to the beginning of the loop. Eventually it will
catch up to the slow runner and both runners will be pointing to the same
node at the same time. If this happens then you know there is a loop in
the linked list. Below is an example where we have a slow runner
and a fast runner (the red arrow).
"""
slow = linked_list.head
fast = linked_list.head
#as fast runner will reach end first if there is no loop so
#adding a None check on just fast should be enough
while fast and fast.next:
slow = slow.next
#move fast runner 2 times to make it fast as compared to slow runner
fast = fast.next.next
if fast == slow:
return True
# If we get to a node where fast doesn't have a next node or doesn't exist itself,
# the list has an end and isn't circular
return False | 27,387 |
def bm_cv(
X_train: pd.DataFrame,
y_train: pd.Series,
cv: int,
metrics: List[Any],
metrics_proba: List[Any],
metric_kwargs: dict,
model_dict: dict,
):
"""
Perform cross validation benchmark with all models specified under model_dictionary, using the metrics defined.
Args:
X_train: Array of features, used to train the model
y_train: Array of label, used to train the model
cv: Number of cross-validation fold
metrics: List of metrics that we will use to score our validation performance
metrics_proba : List of metrics that we will use to score our validation performance.
This is only applicable for classification problem. The metrics under `metrics_proba` uses the predicted
probability instead of predicted class
metrics_kwargs: Dictionary containing the extra arguments needed for specific metrics,
listed in metrics and metrics_proba
model_dict: Model_dictionary, containing the model_name as the key and catalyst.ml.model object as value.
Returns:
DataFrame, which contains all of the metrics value for each of the model specified under model_dictionary,
as well as the cross-validation index.
"""
result_cv_df = pd.DataFrame()
kf = KFold(n_splits=cv, shuffle=True, random_state=42)
for cv_idx, (dev_idx, val_idx) in enumerate(kf.split(X_train)):
X_dev, X_val, y_dev, y_val = cv_split(X_train, y_train, dev_idx, val_idx)
df = bm(
X_dev,
y_dev,
X_val,
y_val,
metrics,
metrics_proba,
metric_kwargs,
model_dict,
)
df["cv_idx"] = cv_idx
result_cv_df = pd.concat([result_cv_df, df])
return result_cv_df | 27,388 |
def index_get(array, *argv):
"""
checks if a index is available in the array and returns it
:param array: the data array
:param argv: index integers
:return: None if not available or the return value
"""
try:
for index in argv:
array = array[index]
return array
# there is either no info available or no popular times
# TypeError: rating/rating_n/populartimes wrong of not available
except (IndexError, TypeError):
return None | 27,389 |
def add_graph(writer: torch.utils.tensorboard.SummaryWriter = None,
model: torch.nn.Module = None,
data_loader: torch.utils.data.dataloader = None,
device: torch.device = torch.device('cpu')):
"""Plot the graph of the model"""
# get an example image for running through the network
input_batch_dict = next(iter(data_loader))
input_batch = input_batch_dict['input'].to(device)
writer.add_graph(model, input_batch) | 27,390 |
def _test_converter(testname, fail_expected, error_text=None, format="yaml"):
"""
Convert a v1 object to v3, then apply the result and read it back.
"""
# Let's start every test afresh
wipe_etcd(get_ip())
testdata = data[testname]
# Convert data to V3 API using the tool under test
rc = calicoctl("convert -o %s" % format, data=testdata, format=format)
if not fail_expected:
logger.debug("Trying to convert manifest from V1 to V3")
rc.assert_no_error()
if format == "yaml":
parsed_output = yaml.safe_load(rc.output)
else:
parsed_output = json.loads(rc.output)
# Get the converted data and clean it up (remove fields we don't care about)
converted_data = clean_calico_data(parsed_output)
original_resource = rc
# Apply the converted data
rc = calicoctl("create", data=original_resource.output, format=format)
logger.debug("Trying to create resource using converted manifest")
rc.assert_no_error()
rc = calicoctl("get %s %s -o yaml" % (converted_data['kind'], name(converted_data)))
# Comparison here needs to be against cleaned versions of data to remove Creation Timestamp
logger.debug("Comparing 'get'ted output with original converted yaml")
cleaned_output = yaml.safe_dump(
clean_calico_data(
yaml.safe_load(rc.output),
extra_keys_to_remove=['projectcalico.org/orchestrator', 'namespace']
)
)
original_resource.assert_data(cleaned_output, format=format)
else:
rc.assert_error(error_text) | 27,391 |
def test_version():
"""
Make sure the version in the TOML file and in the __init__.py file are the same.
"""
with open("pyproject.toml") as f:
tomllines = f.read().splitlines()
tomlversion = set([l for l in tomllines if "version =" in l])
initversion = set([f'version = "{mei2volpiano.__version__}"'])
# set is there to catch any duplicate/additional entries
assert initversion == tomlversion | 27,392 |
def _env_vars_available() -> bool:
"""
Returns: `True` if all required environment variables for the Postgres connection are set, `False` otherwise
"""
return all(env_var in environ for env_var in DBConfigProviderEnvVarBasedImpl.required_env_vars) | 27,393 |
def launch(
code, structure, pseudo_family, daemon, protocol):
"""
Run the PwBandStructureWorkChain for a given input structure
to compute the band structure for the relaxed structure
"""
from aiida.orm.data.base import Str
from aiida.orm import DataFactory
from aiida.orm.utils import WorkflowFactory
from aiida.work.launch import run, submit
PwBandStructureWorkChain = WorkflowFactory('quantumespresso.pw.band_structure')
ParameterData = DataFactory('parameter')
inputs = {
'code': code,
'structure': structure,
## Pseudo family is not anymore a parameter of this workflow. Instead,
## you should already have the pseudos in your DB, or pass a pseudo_data
## modifier below, with the MD5 of the pseudos you want to use.
## If you don't have the SSSP pseudopotentials
## (that you can download from here for SSSP v.1.0:
## https://www.materialscloud.org/archive/2018.0001/v2
##
## you can get the dictionary with the md5 with the
## following code snippet, but you still need then to specify "cutoff" and "dual"
## for all relevant pseudos!
##
## CODE SNIPPET:
##
## def get_md5_dict(family_name):
## UpfData = DataFactory('upf')
## family = UpfData.get_upf_group(family_name)
## return {node.element: {'md5': node.md5sum} for node in family.nodes}
'protocol': ParameterData(dict={
'name': 'theos-ht-1.0',
}),
## or (to apply modifiers):
# 'protocol': ParameterData(dict={
# 'name: 'theos-ht-1.0',
# 'modifiers': {
# 'parameters': 'fast',
# 'pseudo': 'SSSP-efficiency-1.0'
# }
# })
## or (for custom-specified pseudos and cutoffs):
# 'protocol': ParameterData(dict={
# 'name: 'theos-ht-1.0',
# 'modifiers': {
# 'parameters': 'fast',
# 'pseudo': 'custom',
# 'pseudo_data': {
# "Ag": {
# "cutoff": "50",
# "dual": "4",
# "filename": "Ag_ONCV_PBE-1.0.upf",
# "md5": "96da9acec54ba82f98e06bace1ebc465",
# "pseudopotential": "SG15"
# },
# "Al": {
# "cutoff": "30",
# "dual": "8",
# "filename": "Al.pbe-n-kjpaw_psl.1.0.0.UPF",
# "md5": "4d58055f5a69695be6f94701d50bfe3f",
# "pseudopotential": "100PAW"
# },
# # ...
# }
# })
}
if daemon:
workchain = submit(PwBandStructureWorkChain, **inputs)
click.echo('Submitted {}<{}> to the daemon'.format(PwBandStructureWorkChain.__name__, workchain.pk))
else:
run(PwBandStructureWorkChain, **inputs) | 27,394 |
def precisionatk_implementation(y_true, y_pred, k):
"""Fujnction to calculate precision at k for a given sample
Arguments:
y_true {list} -- list of actual classes for the given sample
y_pred {list} -- list of predicted classes for the given sample
k {[int]} -- top k predictions we are interested in
"""
# if k = 0 return 0 as we should never have k=0
# as k is always >=1
if k == 0:
return 0
# as we are interested in top k predictions
y_pred = y_pred[:k]
# convert predictions to set
pred_set = set(y_pred)
# convert actual values to set
true_set = set(y_true)
# find comon values in both
common_values = pred_set.intersection(true_set)
# return length of common values over k
return len(common_values) / len(y_pred[:k]) | 27,395 |
def add_plane_data(
data_frame: pandas.DataFrame,
file_path: str,
target_col: str = const.DF_PLANE_COL_NAME
) -> pandas.DataFrame:
"""Merges DataFrame with information about the flight planes
Args:
data_frame (pandas.DataFrame): Source DataFrame
file_path (str): Source file path
target_col (str): Target column to merge
Returns:
pandas.DataFrame: Source DataFrame with aditional information
"""
planes = df_fileloader.load_agenda(file_path)
data_frame[target_col] = data_frame[target_col].astype(str)
planes[target_col] = planes[target_col].astype(str)
data_frame = pandas.merge(data_frame, planes, how='outer', on=[target_col], indicator=True)
unmatched = data_frame.query('_merge == "left_only"').groupby([target_col]).size().reset_index(name='count')
if not unmatched.empty:
err_msg = 'There\'s missing information about the following planes:'
for index, row in unmatched.iterrows():
err_msg += '\n {} with {} ocurrences.'.format(row[target_col], row['count'])
utility.eprint(err_msg)
return
return data_frame.query('_merge == "both"').drop(columns=['_merge']) | 27,396 |
def record(location):
"""Creates an empty record."""
draft = RDMDraft.create({})
record = RDMRecord.publish(draft)
return record | 27,397 |
def generate_tests():
# type: () -> Generator[Tuple[str, Callable, List[Any], List[Any]]]
"""
Yield tuples of test data.
:return: Tuple with testdata (test_name, func_obj, inputs, outputs)
:rtype: Generator[Tuple[str, Callable, List[Any], List[Any]]]
"""
with open(TEST_DATA, "rb") as stream:
data = json.load(stream)
for func_name, tests in data.items():
func_obj = getattr(ic, func_name)
for test_name, test_values in tests.items():
# Convert stream and bytes test values
ntv = []
for tv in test_values["inputs"]:
if isinstance(tv, str) and tv.startswith("stream:"):
ntv.append(io.BytesIO(bytes.fromhex(tv.lstrip("stream:"))))
elif isinstance(tv, str) and tv.startswith("bytes:"):
ntv.append(bytes.fromhex(tv.lstrip("bytes:")))
else:
ntv.append(tv)
test_values["inputs"] = ntv
yield test_name, func_obj, test_values["inputs"], test_values["outputs"] | 27,398 |
def firfls(x, f_range, fs=1000, w=3, tw=.15):
"""
Filter signal with an FIR filter
*Like firls in MATLAB
x : array-like, 1d
Time series to filter
f_range : (low, high), Hz
Cutoff frequencies of bandpass filter
fs : float, Hz
Sampling rate
w : float
Length of the filter in terms of the number of cycles
of the oscillation whose frequency is the low cutoff of the
bandpass filter
tw : float
Transition width of the filter in normalized frequency space
Returns
-------
x_filt : array-like, 1d
Filtered time series
"""
if w <= 0:
raise ValueError(
'Number of cycles in a filter must be a positive number.')
if np.logical_or(tw < 0, tw > 1):
raise ValueError('Transition width must be between 0 and 1.')
nyq = fs / 2
if np.any(np.array(f_range) > nyq):
raise ValueError('Filter frequencies must be below nyquist rate.')
if np.any(np.array(f_range) < 0):
raise ValueError('Filter frequencies must be positive.')
Ntaps = np.floor(w * fs / f_range[0])
if len(x) < Ntaps:
raise RuntimeError(
'Length of filter is loger than data. '
'Provide more data or a shorter filter.')
# Characterize desired filter
f = [0, (1 - tw) * f_range[0] / nyq, f_range[0] / nyq,
f_range[1] / nyq, (1 + tw) * f_range[1] / nyq, 1]
m = [0, 0, 1, 1, 0, 0]
if any(np.diff(f) < 0):
raise RuntimeError(
'Invalid FIR filter parameters.'
'Please decrease the transition width parameter.')
# Perform filtering
taps = firwin2(Ntaps, f, m)
x_filt = filtfilt(taps, [1], x)
if any(np.isnan(x_filt)):
raise RuntimeError(
'Filtered signal contains nans. Adjust filter parameters.')
# Remove edge artifacts
return _remove_edge(x_filt, Ntaps) | 27,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.