content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def test_double_min_inclusive004_1096_double_min_inclusive004_1096_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : (facet=minInclusive and value=1.1 and
facet=maxInclusive and value=7.7) and document value=5.55
"""
assert_bindings(
schema="msData/datatypes/Facets/double/double_minInclusive004.xsd",
instance="msData/datatypes/Facets/double/double_minInclusive004.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 17,200
|
def read_keys():
""" read aws credentials from file, then stick into global variables... """
with open('%s/.aws/credentials' % os.getenv('HOME'), 'rt') as infile:
for line in infile:
if 'aws_access_key_id' in line:
aws_access_key_id = line.split('=')[-1].strip()
if 'aws_secret_access_key' in line:
aws_secret_access_key = line.split('=')[-1].strip()
return aws_access_key_id, aws_secret_access_key
| 17,201
|
def RecalculatedEdgeDegreeAttack(G, remove_fraction = 1.0):
""" Recalculated Edge Degree Attack
"""
n = G.number_of_nodes()
m = int(G.number_of_edges() * (remove_fraction+0.0) )
tot_ND = [0] * (m + 1)
tot_T = [0] * (m + 1)
ND, ND_lambda = ECT.get_number_of_driver_nodes(G)
tot_ND[0] = ND
tot_T[0] = 0
for i in range(m):
# calculate max edge degree
cur_max_edge_degree = -1
cur_max_u = -1
cur_max_v = -1
for u, v in G.edges():
temp = G.degree(u) * G.degree(v)
if temp > cur_max_edge_degree:
cur_max_edge_degree = temp
cur_max_u = u
cur_max_v = v
# remove edge
G.remove_edge(cur_max_u, cur_max_v)
# calculate and save ND
ND, ND_lambda = ECT.get_number_of_driver_nodes(G)
tot_ND[i+1] = ND
tot_T [i+1] = i + 1
return (tot_ND, tot_T)
| 17,202
|
def test_strip_text():
"""
Remove test text.
Args:
"""
assert helpers.strip_text(" text ", []) == "text"
# I can't interpret the rest of the code well enough yet
| 17,203
|
def add_role(rolename, roleinfo, require_parent=True):
"""
<Purpose>
Add to the role database the 'roleinfo' associated with 'rolename'.
<Arguments>
rolename:
An object representing the role's name, conformant to 'ROLENAME_SCHEMA'
(e.g., 'root', 'snapshot', 'timestamp').
roleinfo:
An object representing the role associated with 'rolename', conformant to
ROLEDB_SCHEMA. 'roleinfo' has the form:
{'keyids': ['34345df32093bd12...'],
'threshold': 1,
'signatures': ['ab23dfc32']
'paths': ['path/to/target1', 'path/to/target2', ...],
'path_hash_prefixes': ['a324fcd...', ...],
'delegations': {'keys': }
The 'paths', 'path_hash_prefixes', and 'delegations' dict keys are
optional.
The 'target' role has an additional 'paths' key. Its value is a list of
strings representing the path of the target file(s).
require_parent:
A boolean indicating whether to check for a delegating role. add_role()
will raise an exception if this parent role does not exist.
<Exceptions>
tuf.FormatError, if 'rolename' or 'roleinfo' does not have the correct
object format.
tuf.RoleAlreadyExistsError, if 'rolename' has already been added.
tuf.InvalidNameError, if 'rolename' is improperly formatted.
<Side Effects>
The role database is modified.
<Returns>
None.
"""
# Does 'rolename' have the correct object format?
# This check will ensure 'rolename' has the appropriate number of objects
# and object types, and that all dict keys are properly named.
tuf.formats.ROLENAME_SCHEMA.check_match(rolename)
# Does 'roleinfo' have the correct object format?
tuf.formats.ROLEDB_SCHEMA.check_match(roleinfo)
# Does 'require_parent' have the correct format?
tuf.formats.BOOLEAN_SCHEMA.check_match(require_parent)
# Raises tuf.InvalidNameError.
_validate_rolename(rolename)
if rolename in _roledb_dict:
raise tuf.RoleAlreadyExistsError('Role already exists: '+rolename)
# Make sure that the delegating role exists. This should be just a
# sanity check and not a security measure.
if require_parent and '/' in rolename:
# Get parent role. 'a/b/c/d' --> 'a/b/c'.
parent_role = '/'.join(rolename.split('/')[:-1])
if parent_role not in _roledb_dict:
raise tuf.Error('Parent role does not exist: '+parent_role)
_roledb_dict[rolename] = copy.deepcopy(roleinfo)
| 17,204
|
def get(url, params=None, headers=None):
"""Return the contents from a URL
Params:
- url (str): Target website URL
- params (dict, optional): Param payload to add to the GET request
- headers (dict, optional): Headers to add to the GET request
Example:
```
get('https://httpbin.org/anything', {'soup': 'gazpacho'})
```
"""
opener = build_opener()
if params:
url += "?" + urlencode(params)
if headers:
for h in headers.items():
opener.addheaders = [h]
if (headers and not headers.get("User-Agent")) or not headers:
UA = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:69.0) Gecko/20100101 Firefox/69.0"
opener.addheaders = [("User-Agent", UA)]
with opener.open(url) as f:
content = f.read().decode("utf-8")
return content
| 17,205
|
def main(extn='pdf', fig_mult=1.0):
"""Load and combine multiple jet runs into one DataFrame, plot info."""
# File called runinfo.yml stores information about each JetFindRun
# with a label and file location
with open('runinfo.yml', 'r') as cfg:
data = yaml.safe_load(cfg.read())
dsets = ['ERAI-Daily', 'ERAI-Monthly', 'MERRA2-Daily', 'MERRA2-Monthly',
'JRA55-Daily', 'JRA55-Monthly', 'CFSR-Daily', 'CFSR-Monthly']
fds = [FileDiag(data[dset], file_path='jet_out') for dset in dsets]
metric = fds[0].metric
for fdi in fds[1:]:
metric = metric.append(fdi.metric)
plt.rc('font', size=8 * fig_mult)
sns.set_style('whitegrid')
fig_width = (12.9 / 2.54) * fig_mult
fig_height = fig_width * 0.6
hems = {'nh': {'ticks': np.arange(20, 60, 10),
'min_ticks': np.arange(20, 55, 5),
'ylims': (15, 50),
'sea_order': ['DJF', 'MAM', 'JJA', 'SON']},
'sh': {'ticks': np.arange(-10, -55, -10),
'min_ticks': np.arange(-10, -55, -5),
'ylims': (-15, -50),
'sea_order': ['JJA', 'SON', 'DJF', 'MAM']}}
figures = [plt.subplots(1, 1, figsize=(fig_width, fig_height))
for i in range(2)]
for hidx, hem in enumerate(hems):
fig, axis = figures[hidx]
make_violinplot(metric, axis, hems, hem, fig_mult)
fig.subplots_adjust(left=0.06, bottom=0.05,
right=0.98, top=0.98, hspace=0.0)
fig.legend(bbox_to_anchor=(0.06, 0.05),
loc='lower left', borderaxespad=0.,
ncol=3)
fig.savefig(f'plt_compare_dist_all_{hem}.{extn}')
| 17,206
|
def get_param_num(model):
""" get the number of parameters
Args:
model:
Returns:
"""
return sum(p.numel() for p in model.parameters())
| 17,207
|
def aggregate_results(jade_runtime_output, fmt, verbose):
"""Aggregate results on a directory of upgrade cost analysis simulations."""
level = logging.DEBUG if verbose else logging.INFO
log_file = jade_runtime_output / "upgrade_cost_analysis_aggregation.log"
setup_logging(__name__, log_file, console_level=level, packages=["disco"])
logger.info(get_cli_string())
jade_config_file = jade_runtime_output / CONFIG_FILE
if not jade_config_file.exists():
logger.error("aggregate-results is only supported when run through JADE.")
sys.exit(1)
job_names = (x["name"] for x in load_data(jade_config_file)["jobs"])
_aggregate_results(jade_runtime_output, log_file, job_names, fmt)
| 17,208
|
def get_branch_name():
"""Get the name of the current branch
returns:
The name of the current branch
"""
HEAD = data.get_ref('HEAD', deref=False)
if not HEAD.symbolic:
return None
HEAD = HEAD.value
assert HEAD.startswith('refs/heads/')
return os.path.relpath(HEAD, 'refs/heads')
| 17,209
|
def format_tensor_to_ndarray(x: Union[ms.Tensor, np.ndarray]) -> np.ndarray:
"""Unify `mindspore.Tensor` and `np.ndarray` to `np.ndarray`. """
if isinstance(x, ms.Tensor):
x = x.asnumpy()
if not isinstance(x, np.ndarray):
raise TypeError('input should be one of [ms.Tensor or np.ndarray],'
' but receive {}'.format(type(x)))
return x
| 17,210
|
def hunt_csv(regex: Pattern, body: str) -> list:
"""
finds chunk of csv in a larger string defined as regex, splits it,
and returns as list. really useful only for single lines.
worse than StringIO -> numpy or pandas csv reader in other cases.
"""
csv_string = re.search(regex, body)[0]
if r"\n" in csv_string:
lines = csv_string.split(r"\n")
processed_lines = []
for line in lines:
csv_fields = line.split(",")
csv_fields = [field.strip() for field in csv_fields]
processed_lines.append(csv_fields)
return processed_lines
csv_fields = csv_string.split(",")
return [field.strip() for field in csv_fields]
| 17,211
|
def time_key(file_name):
""" provides a time-based sorting key """
splits = file_name.split('/')
[date] = re.findall(r'(\d{4}_\d{2}_\d{2})', splits[-2])
date_id = [int(token) for token in date.split('_')]
recording_id = natural_key(splits[-1])
session_id = session_key(splits[-2])
return date_id + session_id + recording_id
| 17,212
|
def silent_popen(args, **kwargs):
"""Wrapper for subprocess.Popen with suppressed output.
STERR is redirected to STDOUT which is piped back to the
calling process and returned as the result.
"""
return subprocess.Popen(args,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE, **kwargs).communicate()[0]
| 17,213
|
def sizeRange(contourList, low, high):
"""Only keeps contours that are in range for size"""
newList = []
for i in contourList:
if (low <= cv2.contourArea(i) <= high):
newList.append(i)
return newList
| 17,214
|
def test_estim_v_3_basis(seed):
""" Test the estimation of the HRF with the scaled HRF model without noise
in the observed data. """
rng = check_random_state(seed)
t_r = 1.0
n_atoms = 2
n_voxels = 1000
n_times_valid = 100
n_times_atom = 30
indices = np.arange(n_voxels)
n_hrf_rois = 2
rois_1 = indices[int(n_voxels/2):]
rois_2 = indices[:int(n_voxels/2)]
hrf_rois = {1: rois_1, 2: rois_2}
u = rng.randn(n_atoms, n_voxels)
z = rng.randn(n_atoms, n_times_valid)
rois_idx, _, _ = split_atlas(hrf_rois)
h = hrf_3_basis(t_r, n_times_atom)
a_true = np.c_[[[1.0, 0.8, 0.5], [1.0, 0.5, 0.0]]]
v_true = np.c_[[a_.dot(h) for a_ in a_true]]
X = construct_X_hat_from_v(v_true, z, u, rois_idx)
a_init = np.c_[[np.array([1.0, 0.0, 0.0]) for _ in range(n_hrf_rois)]]
a_hat, v_hat = _estim_v_d_basis(a_init, X, h, z, u, rois_idx)
# no garantie of recovery in any case...
np.testing.assert_allclose(a_true, a_hat, atol=1e-1)
np.testing.assert_allclose(v_true, v_hat, atol=1e-1)
| 17,215
|
def set_persistent_cache(path_to_cache):
"""
Set a persistent cache. If the file does not yet exist, it is created.
:param path_to_cache: The place where the cache is stored or needs to be created
"""
net.server_cache.set_persistent_location(path_to_cache)
| 17,216
|
def test_parse_annotations() -> None:
"""Test the parse annotations function."""
filepath = get_test_file("motchallenge_labels.txt")
result = parse_annotations(filepath)
assert list(result.keys()) == [0, 1, 3]
for frame_idx, labels in result.items():
for label in labels:
assert label.attributes is not None
if label.attributes["ignored"]:
assert label.category == "pedestrian"
assert frame_idx == 1
else:
assert label.category == "pedestrian"
assert label.id == "1"
assert label.attributes["visibility"] == 1.0
assert label.box2d is not None
assert label.box2d.x1 in [458, 460]
assert label.box2d.x2 in [589, 587]
| 17,217
|
def release():
"""Create a tag release for an revision"""
print yellow(">>> Creating a tag release")
local("git tag")
tagname = prompt("Enter a new tagname as according as above: ")
print red('.... updating tag release at hstore_flattenfields')
_replace_in_file("__version__ = '.*'", "__version__ = '%s'" % tagname, 'hstore_flattenfields/__init__.py')
print red('.... versioning tag release')
diff_ = local('git diff', capture=True)
comment = prompt('Enter a bit comment for this release:')
if diff_:
print diff_
if confirm("It's ok?", default=False):
local('git add hstore_flattenfields/__init__.py')
local("git ci -m \"version %s - %s\"" % (tagname, comment))
local("git lg1 -n5")
rev = prompt("Which revision you want release?")
cmd_tag = "git tag -f %s %s -m '%s'" % (tagname, rev, comment)
if confirm('%s # Create tag?' % cmd_tag, default=False):
local(cmd_tag)
if confirm('push to github?', default=False):
local('git push origin master --tags -f')
| 17,218
|
def test_unique(exec):
""" Only a single event is accepted at a given position"""
rec = EventCollectRecorder("./test.txt")
exec.call_except(lambda: rec.register_event_source("SRC1", 1, "test1"), None)
exec.call_except(lambda: rec.register_event_source("SRC2", 1, "test2"), Exception)
exec.call_except(lambda: rec.register_event_source("SRC1", 2, "test1"), Exception)
| 17,219
|
def number_field_choices(field):
"""
Given a field, returns the number of choices.
"""
try:
return len(field.get_flat_choices())
except AttributeError:
return 0
| 17,220
|
def _is_an_unambiguous_user_argument(argument: str) -> bool:
"""Check if the provided argument is a user mention, user id, or username (name#discrim)."""
has_id_or_mention = bool(commands.IDConverter()._get_id_match(argument) or RE_USER_MENTION.match(argument))
# Check to see if the author passed a username (a discriminator exists)
argument = argument.removeprefix("@")
has_username = len(argument) > 5 and argument[-5] == "#"
return has_id_or_mention or has_username
| 17,221
|
def resize_image(image, size):
"""
Resize the image to fit in the specified size.
:param image: Original image.
:param size: Tuple of (width, height).
:return: Resized image.
:rtype: :py:class: `~PIL.Image.Image`
"""
image.thumbnail(size)
return image
| 17,222
|
def one_hot_vector(val, lst):
"""Converts a value to a one-hot vector based on options in lst"""
if val not in lst:
val = lst[-1]
return map(lambda x: x == val, lst)
| 17,223
|
def follow(request, username):
""" Add user with username to current user's following list """
request.user.followers.add(User.objects.get(username=username))
return redirect('accounts:followers')
| 17,224
|
def sesteva_stolpce(seznam_seznamov_stolpcev):
"""sešteje vse 'stolpce' v posameznem podseznamu """
matrika_stolpcev = []
for i in range(len(seznam_seznamov_stolpcev)):
sez = seznam_seznamov_stolpcev[i]
stolpec11 = sez[0]
while len(sez) > 1:
i = 0
stolpec22 = sez[1]
stolpec11 = vsota_stolpcev(stolpec11, stolpec22)
sez = sez[i+1:]
matrika_stolpcev.append(stolpec11)
return matrika_stolpcev
| 17,225
|
def create_response(data={}, status=200, message=''):
"""
Wraps response in a consistent format throughout the API
Format inspired by https://medium.com/@shazow/how-i-design-json-api-responses-71900f00f2db
Modifications included:
- make success a boolean since there's only 2 values
- make message a single string since we will only use one message per response
IMPORTANT: data must be a dictionary where:
- the key is the name of the type of data
- the value is the data itself
"""
response = {
'success': 200 <= status < 300,
'code': status,
'message': message,
'result': data
}
return jsonify(response), status
| 17,226
|
async def test_reschedule_action_1(startup_and_shutdown_uvicorn, base_url, tmp_path):
""" schedule action, run it, change it, re-run it """
await reset_dispatcher(base_url, str(tmp_path))
action1 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output1.txt"),
payload={"show": "two"},
)
action2 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output2.txt"),
payload={"show": "three"},
)
scheduler = Timely(interval=1)
await add_action(base_url=base_url, action_name="foo", action=action1)
await add_scheduler(base_url=base_url, scheduler_name="bar", scheduler=scheduler)
await schedule_action(base_url=base_url, action_name="foo", scheduler_name="bar")
await assert_job_count(base_url=base_url, n=1)
await run_and_stop_jobs(base_url=base_url, pause=2)
lines = None
with open(action1.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
await set_action(base_url=base_url, action_name="foo", action=action2)
await assert_job_count(base_url=base_url, n=1)
await run_and_stop_jobs(base_url=base_url, pause=2)
lines = None
with open(action2.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
| 17,227
|
def add_to_worklist(worklist, item):
"""assures that each item is only once in the list"""
if item in worklist:
return
worklist.append(item)
| 17,228
|
def do_sizes_match(imgs):
"""Returns if sizes match for all images in list."""
return len([*filter(lambda x: x.size != x.size[0], imgs)]) > 0
| 17,229
|
def gaussian_sampling(len_x, len_y, num_samples, spread_factor=5, origin_ball=1):
"""
Create a gaussian sampling pattern where each point is sampled from a
bivariate, concatenated normal distribution.
Args:
len_x (int): Size of output mask in x direction (width)
len_y (int): Size of output mask in y direction (height)
num_samples (int): Number of samples to pick
spread_factor (float): Concentration of samples (ie, the SD of the
probability distributions are len/spread_factor)
origin_ball (int): Radius of ball around origin where all samples
are included.
Returns:
np.ndarray: A boolean numpy array (mask) depicting sampling pattern.
"""
# Create two truncated normal distributions for x and y dir
lower = 0
upper_x = len_x
mu_x = len_x // 2
sigma_x = len_x // spread_factor
randgen_x = stats.truncnorm(
(lower - mu_x) / sigma_x,
(upper_x - mu_x) / sigma_x,
loc=mu_x,
scale=sigma_x
)
upper_y = len_y
mu_y = len_y // 2
sigma_y = len_y // spread_factor
randgen_y = stats.truncnorm(
(lower - mu_y) / sigma_y,
(upper_y - mu_y) / sigma_y,
loc=mu_y,
scale=sigma_y
)
# Create mask
mask = np.zeros([len_y, len_x], dtype=np.bool)
# Add origin ball
if origin_ball > 0:
y_grid, x_grid = np.ogrid[:len_y, :len_x]
dist_from_center = np.sqrt((y_grid - mu_y) ** 2 + (x_grid - mu_x) ** 2)
mask = dist_from_center <= origin_ball
# Subtract origin ball from number of samples
num_samples -= np.sum(mask)
# Sample points from distribution
xs = randgen_x.rvs(num_samples).astype(np.uint32)
ys = randgen_y.rvs(num_samples).astype(np.uint32)
for i in range(num_samples):
x, y = xs[i], ys[i]
# Ensure unique samples
while mask[y, x]:
x = randgen_x.rvs(1).astype(np.uint32)
y = randgen_y.rvs(1).astype(np.uint32)
xs[i], ys[i] = x, y
mask[y, x] = True
return mask
| 17,230
|
def add_shares(parent):
"""allows you to add share"""
def save():
if(manage_db.check_if_valid_name(name.get()) and
manage_db.check_for_real_numbers(entry_price.get()) and
manage_db.check_date_format(date.get())):
share = {"Name": name.get(),
"Quantity": quantity.get(),
"BuyingPrice": entry_price.get(),
"BuyingDate": date.get(),
"Cost": "",
"SellingPrice": "",
"SellingDate": "",
"Dividends": ""}
manage_db.add_share("gpw_shares", share)
manage_db.add_current_price(
name.get(), scrap_web.pull_current_price(name.get()))
shares_page.Shares.curent_canvas(parent)
top_window.destroy()
top_window = tk.Toplevel(parent, height=600, width=390)
# LABELS:
list_of_labels = ["Name:", "Quantity:", "Entry price (per share):",
"Entry date:", ]
for txt in list_of_labels:
label = tk.Label(top_window, text=txt, font=FONT)
label.grid(sticky="nw")
# ENTRIES:
name = tk.Entry(top_window, width=9, font=FONT)
name.grid(row=0, column=1, padx=10)
quantity = tk.Entry(top_window, width=9, font=FONT)
quantity.grid(row=1, column=1, padx=10)
entry_price = tk.Entry(top_window, width=9, font=FONT)
entry_price.grid(row=2, column=1, padx=10)
date = tk.Entry(top_window, width=9, font=FONT)
date.grid(row=3, column=1, padx=10)
add_button = tk.Button(
top_window, text="Add", font=FONT, command=save)
add_button.grid(sticky="nw", padx=5, pady=5)
| 17,231
|
def from_cx_jsons(graph_json_str: str) -> BELGraph:
"""Read a BEL graph from a CX JSON string."""
return from_cx(json.loads(graph_json_str))
| 17,232
|
def _calculate_dimensions(image: Image) -> Tuple[int, int]:
"""
Returns the width and height of the given pixel data.
The height of the image is the number of rows in the list,
while the width of the image is determined by the number of
pixels on the first row. It is assumed that each row contains
the same number of pixels.
:param image: pixel data
:return: width and height as a tuple
"""
try:
width = 0
height = len(image)
if height != 0:
width = len(image[0])
return width, height
except (IndexError, TypeError):
# Either data is not subscribable, or the
# length of the first row cannot be obtained.
raise ValueError("invalid pixel data - could not determine dimensions")
| 17,233
|
def cli(ctx, invocation_id):
"""Get a summary of an invocation, stating the number of jobs which succeed, which are paused and which have errored.
Output:
The invocation summary.
For example::
{'states': {'paused': 4, 'error': 2, 'ok': 2},
'model': 'WorkflowInvocation',
'id': 'a799d38679e985db',
'populated_state': 'ok'}
"""
return ctx.gi.invocations.get_invocation_summary(invocation_id)
| 17,234
|
def test_local_plugin_can_add_option(local_config):
"""A local plugin can add a CLI option."""
argv = ["--config", local_config, "--anopt", "foo"]
stage1_parser = stage1_arg_parser()
stage1_args, rest = stage1_parser.parse_known_args(argv)
cfg, cfg_dir = config.load_config(
config=stage1_args.config, extra=[], isolated=False
)
opts = finder.parse_plugin_options(
cfg,
cfg_dir,
enable_extensions=None,
require_plugins=None,
)
plugins = finder.find_plugins(cfg, opts)
loaded_plugins = finder.load_plugins(plugins, opts)
option_manager = OptionManager(
version="123",
plugin_versions="",
parents=[stage1_parser],
)
register_default_options(option_manager)
option_manager.register_plugins(loaded_plugins)
args = aggregator.aggregate_options(option_manager, cfg, cfg_dir, argv)
assert args.extended_default_select == ["XE", "C90", "F", "E", "W"]
assert args.anopt == "foo"
| 17,235
|
def test_compute_delta_cchalf_returned_results():
"""Test that delta cchalf return necessary values for scale_and_filter."""
# Check for correct recording of
# results_summary['per_dataset_delta_cc_half_values']['delta_cc_half_values']
summary = {}
delta_cc = {0: -4, 1: 2, 2: -3, 3: -5, 4: 1}
sorted_data, sorted_ccs = DeltaCCHalf.sort_deltacchalf_values(delta_cc, summary)
expected_data_order = [3, 0, 2, 4, 1]
expected_cc_order = [-5, -4, -3, 1, 2]
assert list(sorted_data) == expected_data_order
assert list(sorted_ccs) == expected_cc_order
assert (
summary["per_dataset_delta_cc_half_values"]["delta_cc_half_values"]
== expected_cc_order
)
# Check for correct recording for dataset mode
exp = generate_test_experiments(2)
refls = generate_test_reflections(2)
ids_to_remove = [0]
results_summary = {"dataset_removal": {}}
_ = CCHalfFromDials.remove_datasets_below_cutoff(
exp, refls, ids_to_remove, results_summary
)
assert "experiments_fully_removed" in results_summary["dataset_removal"]
assert "n_reflections_removed" in results_summary["dataset_removal"]
assert results_summary["dataset_removal"]["experiments_fully_removed"] == ["0"]
assert results_summary["dataset_removal"]["n_reflections_removed"] == 10
# Check for correct recording for image group mode.
exp = generate_test_experiments(2)
refls = generate_test_reflections(2)
ids_to_remove = [0, 1]
image_group_to_expid_and_range = {
0: ("0", (1, 5)),
1: ("0", (6, 10)),
2: ("1", (1, 5)),
3: ("1", (6, 10)),
}
expids_to_image_groups = {"0": [0, 1], "1": [2, 3]}
results_summary = {"dataset_removal": {}}
_ = CCHalfFromDials.remove_image_ranges_below_cutoff(
exp,
refls,
ids_to_remove,
image_group_to_expid_and_range,
expids_to_image_groups,
results_summary,
)
assert "experiments_fully_removed" in results_summary["dataset_removal"]
assert "n_reflections_removed" in results_summary["dataset_removal"]
assert "image_ranges_removed" in results_summary["dataset_removal"]
assert results_summary["dataset_removal"]["experiments_fully_removed"] == ["0"]
assert results_summary["dataset_removal"]["n_reflections_removed"] == 10
assert [(6, 10), 0] in results_summary["dataset_removal"]["image_ranges_removed"]
assert [(1, 5), 0] in results_summary["dataset_removal"]["image_ranges_removed"]
assert len(results_summary["dataset_removal"]["image_ranges_removed"]) == 2
| 17,236
|
def test_missing_classes():
"""Test that empty tuple is returned if data of embedded representation don't have 'class' key.
1. Create an embedded representation parser for a dictionary without classes.
2. Parse classes.
3. Check that empty tuple is returned.
"""
actual_classes = EmbeddedRepresentationParser(data={}, parser=JSONParser()).parse_classes()
assert actual_classes == (), "Wrong classes"
| 17,237
|
def union_categoricals(to_union: List[pandas.core.series.Series]):
"""
usage.dask: 8
"""
...
| 17,238
|
def remove_observations_mean(data,data_obs,lats,lons):
"""
Removes observations to calculate model biases
"""
### Import modules
import numpy as np
### Remove observational data
databias = data - data_obs[np.newaxis,np.newaxis,:,:,:]
return databias
| 17,239
|
def test_field_multi_instance(frozen: bool):
"""Each instance has a separate state."""
@dataclasses.dataclass(frozen=frozen)
class A:
x: Any = edc.field(validate=str) # pytype: disable=annotation-type-mismatch
a0 = A(123)
assert a0.x == '123'
a1 = A(456)
assert a0.x == '123'
assert a1.x == '456'
if frozen:
with pytest.raises(dataclasses.FrozenInstanceError):
a0.x = 456
else:
a0.x = 789
assert a0.x == '789'
| 17,240
|
def joint_extraction_model_fn(features, labels, mode, params):
"""Runs the node-level sequence labeling model."""
logging.info("joint_extraction_model_fn")
inputs = features # Arg "features" is the overall inputs.
# Read vocabs and inputs.
dropout = params["dropout"]
if params["circle_features"]:
nnodes, friend_has_label, (words, nwords), (
prev_text_words,
n_prev_text_words), (chars_list, chars_len_list), (partner_words, _), (
friends_words, n_friends_words), (friends_fix, friends_var), (
leaf_type_list, goldmine_feat_list), (_, _), (
node_xpath_list,
node_xpath_len_list), (attributes, attributes_plus_none), (
position_list) = inputs
else:
nnodes, (words, nwords), (prev_text_words, n_prev_text_words), (
chars_list, chars_len_list), (leaf_type_list, goldmine_feat_list), (
_, _), (node_xpath_list,
node_xpath_len_list), (attributes), (position_list) = inputs
# nnodes, the number of nodes in each page;
# shape is [?]; length is the number of pages.
# words, nwords are the node_text feature, shape is [?, ?, ?]
# the first two dimension is the batch * pages,
# the last one is the maximum length of the word lists
# prev_text_words, n_prev_text_words, similar as above for previous nodes'text
# chars_list, chars_len_list, shape is [?,?,?,?] also for node_text features
# the additional dim is for the length of the character sequences.
# friends_words, shape is [?, ?, ?], gathers all the words from different
# friends of one node.
# friends_fix, friends_var, shapes are [?, ?, ?, ?]
# the first two dimension is the batch * pages,
# the last two are the maximum length of friend nodes and words.
nnodes = merge_first_two_dims(nnodes)
training = (mode == tf.estimator.ModeKeys.TRAIN)
vocab_words = _index_table_from_file(
params["words"], num_oov_buckets=params["num_oov_buckets"])
with tf.gfile.Open(params["tags"]) as f:
indices = [idx for idx, tag in enumerate(f) if tag.strip() != "none"]
num_tags = len(indices) + 1 # Make "None" as the tag with the last index.
# NodeText Char Embeddings.
with tf.gfile.Open(params["chars"]) as f:
num_chars = sum(1 for _ in f) + params["num_oov_buckets"]
vocab_chars = _index_table_from_file(
params["chars"], num_oov_buckets=params["num_oov_buckets"])
char_ids = vocab_chars.lookup(chars_list)
variable = tf.get_variable("chars_embeddings",
[num_chars + 1, params["dim_chars"]], tf.float32)
char_embeddings = tf.nn.embedding_lookup(variable, char_ids)
char_embeddings = tf.layers.dropout(
char_embeddings, rate=dropout, training=training)
logging.info("char_embeddings.shape: %s", char_embeddings.shape)
# Char 1d convolution.
weights = tf.sequence_mask(chars_len_list)
char_embeddings = masked_conv1d_and_max(char_embeddings, weights,
params["filters"],
params["kernel_size"])
logging.info("char_embeddings.shape after CNN: %s", char_embeddings.shape)
# Word Embeddings.
word_ids = vocab_words.lookup(words)
glove = np.load(tf.gfile.Open(params["glove"],
"rb"))["embeddings"] # np.array
variable = np.vstack([glove, [[0.] * params["dim_word_embedding"]]])
# To finetune the GloVe embedding by setting trainable as True.
variable = tf.Variable(variable, dtype=tf.float32, trainable=True)
word_embeddings = tf.nn.embedding_lookup(variable, word_ids)
logging.info("word_embeddings.shape: %s", word_embeddings.shape)
# Prev_Text Representations.
prev_text_word_ids = vocab_words.lookup(prev_text_words)
prev_text_word_embeddings = tf.nn.embedding_lookup(variable,
prev_text_word_ids)
if params["use_prev_text_lstm"]:
# PREV_text LSTM.
logging.info("prev_text_representation using lstm")
prev_t = merge_first_two_dims(prev_text_word_embeddings)
# Seq * batch * input
prev_t = tf.transpose(prev_t, perm=[1, 0, 2]) # Need time-major.
prev_output_fw, prev_output_bw = _bidirectional_lstm(
prev_t, params["lstm_size"], merge_first_two_dims(n_prev_text_words))
prev_output = tf.concat([prev_output_fw, prev_output_bw], axis=-1)
prev_output = tf.reduce_mean(prev_output, 0)
prev_output = tf.layers.dropout(
prev_output, rate=dropout, training=training)
logging.info("prev_output.shape (after reduce_mean): %s", prev_output.shape)
context_representation = split_first_two_dims_by_example(
prev_output, prev_text_word_embeddings)
logging.info("context_representation.shape (after split): %s",
context_representation.shape)
else:
logging.info("prev_text_word_embeddings.shape: %s",
prev_text_word_embeddings.shape)
context_representation = tf.reduce_mean(prev_text_word_embeddings, 2)
logging.info("context_representation.shape: %s",
context_representation.shape)
if params["circle_features"]:
partner_embeddings, circle_representation = circle_feature_modeling(
variable, vocab_words, partner_words, friends_words, n_friends_words,
friends_fix, friends_var, word_embeddings, dropout, training, params)
context_representation = circle_representation
if params["use_friend_semantic"]:
friends_ids = vocab_words.lookup(friends_words)
friend_embeddings = tf.nn.embedding_lookup(variable, friends_ids)
if params["use_xpath_lstm"]:
h_output = xpath_feature_modeling(node_xpath_list, node_xpath_len_list,
training, params)
context_representation = tf.concat([h_output, context_representation],
axis=2)
if params["use_position_embedding"]:
position_representation = position_modeling(position_list, params)
context_representation = tf.concat(
[context_representation, position_representation], axis=2)
# Text Embeddings: Concatenate Word and Char and Feature Embeddings.
embeddings = tf.concat([word_embeddings, char_embeddings], axis=-1)
embeddings = tf.layers.dropout(embeddings, rate=dropout, training=training)
logging.info("embeddings.shape: %s", embeddings.shape)
# LSTM inside node texts.
t = merge_first_two_dims(embeddings)
t = tf.transpose(t, perm=[1, 0, 2]) # Need time-major.
output_fw, output_bw = _bidirectional_lstm(t, params["lstm_size"],
merge_first_two_dims(nwords))
output = tf.concat([output_fw, output_bw], axis=-1)
output = tf.reduce_mean(output, 0)
output = tf.layers.dropout(output, rate=dropout, training=training)
logging.info("output.shape (after reduce_mean): %s", output.shape)
output = split_first_two_dims_by_example(output, embeddings)
logging.info("output.shape (after split): %s", output.shape)
node_seq_input = tf.concat([output, context_representation], axis=2)
logging.info("output.shape (after + prev): %s", node_seq_input.shape)
# Leaf Type Features.
if params["add_leaf_types"]:
with tf.gfile.Open(params["leaf_types"]) as f:
num_leaf_types = sum(1 for _ in f) + params["num_oov_buckets"]
vocab_leaf_types = _index_table_from_file(
params["leaf_types"], num_oov_buckets=params["num_oov_buckets"])
leaf_type_ids = vocab_leaf_types.lookup(leaf_type_list)
leaf_variable = tf.get_variable(
"leaf_type_embeddings", [num_leaf_types + 1, params["dim_leaf_type"]],
tf.float32)
leaf_type_embeddings = tf.nn.embedding_lookup(leaf_variable, leaf_type_ids)
leaf_type_embeddings = tf.layers.dropout(
leaf_type_embeddings, rate=dropout, training=training)
logging.info("leaf_type_embeddings.shape: %s", char_embeddings.shape)
logging.info("node_seq_input.shape before leaf: %s", node_seq_input.shape)
node_seq_input = tf.concat([node_seq_input, leaf_type_embeddings], axis=2)
logging.info("node_seq_input.shape after leaf: %s", node_seq_input.shape)
# Goldmine Feat Embeddings.
if params["add_goldmine"]:
vocab_goldmine_features = _index_table_from_file(
params["goldmine_features"], num_oov_buckets=1)
goldmine_feature_variable = tf.get_variable("goldmine_feature_embeddings",
[8 + 1, params["dim_goldmine"]],
tf.float32)
goldmine_feat_ids = vocab_goldmine_features.lookup(goldmine_feat_list)
goldmine_feat_embeddings = tf.nn.embedding_lookup(goldmine_feature_variable,
goldmine_feat_ids)
goldmine_feat_embeddings = tf.reduce_sum(goldmine_feat_embeddings, 2)
logging.info("goldmine_feat_embeddings.shape: %s",
goldmine_feat_embeddings.shape)
node_seq_input = tf.concat([node_seq_input, goldmine_feat_embeddings],
axis=2)
logging.info("node_seq_input.shape after goldmine: %s",
node_seq_input.shape)
# Node-level LSTM modeling.
if params["node_encoder"] == "lstm":
# Node-Sequence-LSTM.
n_t = tf.transpose(node_seq_input, perm=[1, 0, 2]) # Need time-major.
node_output_fw, node_output_bw = _bidirectional_lstm(
n_t, params["node_lstm_size"], nnodes)
node_seq_output = tf.concat([node_output_fw, node_output_bw], axis=-1)
node_seq_output = tf.transpose(node_seq_output, perm=[1, 0, 2])
elif params["node_encoder"] == "cnn":
node_weights = tf.sequence_mask(nnodes)
node_seq_output = masked_conv1d_and_max(
node_seq_input,
node_weights,
params["node_filters"],
params["node_kernel_size"],
reducemax=False)
elif params["node_encoder"] == "transformer":
# Node-Sequence-Transformer.
node_seq_output = transformer_encoding(node_seq_input, nnodes, params, mode)
else:
node_seq_output = node_seq_input
logging.info("node_seq_input.shape after encoder: %s", node_seq_output.shape)
if params["node_encoder"] != "transformer":
# Add the dropout layer if the encoder is not a transformer.
node_seq_output = tf.layers.dropout(
node_seq_output, rate=dropout, training=training)
if params["use_friends_discrete_feature"] and params["circle_features"]:
friend_has_label = tf.expand_dims(friend_has_label, axis=-1)
node_seq_output = tf.concat([node_seq_output, friend_has_label], axis=-1)
logging.info("node_seq_input.shape after friend_has_label: %s",
node_seq_output.shape)
node_seq_output = tf.layers.dense(node_seq_output,
params["last_hidden_layer_size"])
logits = tf.layers.dense(node_seq_output, num_tags, name="label_dense_1")
if params["semantic_encoder"] and params["circle_features"]:
partner_similarity_emb = semantic_similarity(variable, vocab_words,
partner_embeddings, attributes,
params)
node_seq_output = tf.concat(
[node_seq_output,
tf.nn.softmax(partner_similarity_emb)], axis=-1)
logging.info("node_seq_output.shape after semantic encoder: %s",
node_seq_output.shape)
if params["use_friend_semantic"]:
friends_similarity_emb = semantic_similarity(variable, vocab_words,
friend_embeddings,
attributes, params)
node_seq_output = tf.concat([node_seq_output, friends_similarity_emb],
axis=-1)
if params["objective"] == "classification":
node_seq_output = tf.layers.dense(
node_seq_output, params["dim_word_embedding"], activation="relu")
node_seq_output = tf.layers.dense(node_seq_output,
params["last_hidden_layer_size"])
logging.info("node_seq_output.shape after semantic encoder: %s",
node_seq_output.shape)
logits = tf.layers.dense(node_seq_output, num_tags, name="label_dense_2")
elif params["objective"] == "semantic_scorer":
logits = semantic_scorer(attributes_plus_none, node_seq_output, params)
elif params["objective"] == "binary_scorer":
logits = binary_scorer(attributes_plus_none, node_seq_output, training,
params)
if params["use_crf"]:
# CRF Layer.
logging.info("logits.shape: %s", logits.shape)
crf_params = tf.get_variable("crf", [num_tags, num_tags], dtype=tf.float32)
pred_ids, _ = tfa.text.crf.crf_decode(logits, crf_params, nnodes)
logging.info("pred_ids.shape: %s", pred_ids.shape)
else:
pred_ids = tf.argmax(logits, 2)
logging.info("pred_ids.shape: %s", pred_ids.shape)
# Predict for new sentences in target set.
if mode == tf.estimator.ModeKeys.PREDICT:
reverse_vocab_tags = _index_table_from_file(params["tags"], 1)
pred_strings = reverse_vocab_tags.lookup(tf.strings.as_string(pred_ids))
predictions = {
"pred_ids": pred_ids,
"tags": pred_strings,
"scores": tf.nn.softmax(logits),
"raw_scores": logits,
}
# Store the intermediate weights.
if params["semantic_encoder"]:
predictions["similarity"] = partner_similarity_emb
if params["friend_encoder"]:
predictions["friends_embs"] = circle_representation
if params["extract_node_emb"]:
predictions["node_embs"] = node_seq_output
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
vocab_tags = _index_table_from_file(params["tags"], 1)
tags = vocab_tags.lookup(labels)
logging.info("tags.shape: %s", logits.shape)
logging.info(
"Parameter size: %s",
np.sum(
[np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))
if params["use_crf"]:
log_likelihood, _ = tfa.text.crf.crf_log_likelihood(logits, tags, nnodes,
crf_params)
loss = tf.reduce_mean(-log_likelihood)
else:
loss = tf.losses.sparse_softmax_cross_entropy(labels=tags, logits=logits)
# Processing the metrics.
weights = tf.sequence_mask(nnodes)
metrics = {
"acc":
tf.metrics.accuracy(tags, pred_ids, weights),
"precision":
seq_tagging_metric_util.precision(tags, pred_ids, num_tags, indices,
weights),
"recall":
seq_tagging_metric_util.recall(tags, pred_ids, num_tags, indices,
weights),
"f1":
seq_tagging_metric_util.f1(tags, pred_ids, num_tags, indices,
weights),
}
for metric_name, op in metrics.items():
tf.summary.scalar(metric_name, op[1])
if mode == tf.estimator.ModeKeys.TRAIN:
with tf.name_scope("train_scope"):
optimizer = tf.train.AdamOptimizer()
train_op = optimizer.minimize(
loss, global_step=tf.train.get_or_create_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=metrics)
| 17,241
|
def use_rbg():
"""
Swaps the Green and Blue channels on the LED backlight
Use if you have a first batch Display-o-Tron 3K
"""
global LED_R_G, LED_R_B
global LED_M_G, LED_M_B
global LED_L_G, LED_L_B
(LED_R_G, LED_R_B) = (LED_R_B, LED_R_G)
(LED_M_G, LED_M_B) = (LED_M_B, LED_M_G)
(LED_L_G, LED_L_B) = (LED_L_B, LED_L_G)
| 17,242
|
def convert(
input, ply, bb,
direction="z", inverse=False,
ignoreAlpha=True,
wSamples=0, hSamples=0, maintainAspectRatio=True
):
"""
Read the input directory and find all of the images of the supported file
extensions. This list is sorted and will then have its pixels processed
and stored in a PLY file format. All of the pixels will be mapped onto a
bounding box that is provided by the user. This bounding box is a list in
the following axis; x, y and z. The direction determines what depth
direction is, the depth direction is the direction travelled between each
images. This direction can be reversed if needed. The amount of samples
can be adjusted to lower the resolution of the point cloud, in case the
images are very high resolution the point cloud size can be adjusted by
changing the amount of samples. If no samples are specified the images
resolution will be used.
:param str input: Input directory
:param str ply: Output filepath
:param list bb: Bounding box; x, y, z
:param str direction: Depth direction
:param bool inverse: Inverse depth direction
:param bool ignoreAlpha: Skip pixel is alpha is < 25
:param int wSamples: Number of width sample points
:param int hSamples: Number of height sample points
:param maintainAspectRatio:
"""
# variables
t = time.time()
totalPoints = 0
# get conversion dict
mapper = getPositionMapper(direction)
if not mapper:
raise RuntimeError(
"Invalid depth direction! Valid arguments: 'x', 'y' or 'z'"
)
# get direction multiplier
multiplier = -1 if inverse else 1
# get images
sequence = getImageSequence(input)
length = len(sequence)
if not length:
raise RuntimeError(
"No Image sequence found!"
)
print (divider())
print ("Images Found: {0}".format(length))
# get mapper data
wI, hI, dI = mapper
wB, hB, dB = bb[wI], bb[hI], bb[dI]
print (divider())
print ("Width Index: {0}".format(wI))
print ("Height Index: {0}".format(hI))
print ("Depth Index: {0}".format(dI))
print (divider())
print ("Start Processing Images")
print (divider())
# write point cloud
with open(ply, "w") as f:
# write header
f.write(PLY_HEADER)
# get image data
for i, image in enumerate(sequence):
# process image
data = getImageData(
image,
ignoreAlpha,
wSamples,
hSamples,
maintainAspectRatio
)
# process data
for pos, colour in data:
# map position in 3 dimensions
position = [0, 0, 0]
position[wI] = wB * pos[0]
position[hI] = hB * pos[1]
position[dI] = (dB/length) * i * multiplier
# get strings
# rounding positions to 3 decimals
posString = [str(round(p,3)) for p in position]
###################monjoy start################
a =['0', '0', '0']
colourString = [str(c) for c in colour]
if colourString !=a:
#print(colourString)
f.write("{0}\n".format(" ".join(posString + colourString)))
totalPoints += 1
###################monjoy end################
# write data
#f.write("{0}\n".format(" ".join(posString + colourString)))
#totalPoints += 1
countString = "< {0} / {1} >".format(i + 1, length).ljust(20)
pointString = "Points Written: {0}".format(totalPoints).ljust(20)
print (countString, pointString)
# update header
print (divider())
print ("Updating header with vertex count: {0}".format(totalPoints))
f = fileinput.FileInput(ply, inplace=True)
for line in f:
print (line.replace("<VERTEXCOUNT>", str(totalPoints)),)
f.close()
# print duration and output path
diff = time.time() - t
print (divider())
print ("Output: {0}".format(ply))
print ("Duration: {0} min".format(round(diff/60,1)))
| 17,243
|
def jp2yy (sent):
"""take a Japanese sentence in UTF8 convert to YY-mode using mecab"""
### (id, start, end, [link,] path+, form [surface], ipos, lrule+[, {pos p}+])
### set ipos as lemma (just for fun)
### fixme: do the full lattice
yid = 0
start = 0
cfrom = 0
cto = 0
yy = list()
for tok in m.parse(sent.encode('utf-8')).split('\n'):
if tok and tok != 'EOS':
##print tok
(form, p, lemma, p1, p2, p3) = tok.decode('utf-8').split('\t')
if form in punct:
continue
p2 = p2 or 'n'
p3 = p3 or 'n'
# pos = '-'.join([p1, p2, p3])
pos = "%s:%s-%s" % (p1, p2, p3) ## wierd format jacy requires
cfrom = sent.find(form, cto) ## first instance after last token
cto = cfrom + len(form) ## find the end
yy.append('(%d, %d, %d, <%d:%d>, 1, "%s", %s, "null", "%s" 1.0)' % \
(yid, start, start +1, cfrom, cto, form, 0, pos))
yid += 1
start += 1
return yy
| 17,244
|
def save(data,filename):
"""This creates a FreeMind file based on a SUAVE data structure.
Assumptions:
None
Source:
N/A
Inputs:
data SUAVE data structure
filename <string> name of the output file
Outputs:
FreeMind file with name as specified by filename
Properties Used:
N/A
"""
try:
tag = data.tag
temp = Data()
temp[tag] = data
data = temp
except AttributeError:
pass
fm_data = XML.Data()
fm_data.tag = 'map'
fm_data.attributes.version = "1.0.0"
def build_nodes(prev,data):
if isinstance(data,dict):
for key,val in data.items():
node = new_fm_node(prev,key)
build_nodes(node,val)
elif isinstance(data,(list,tuple)):
for val in data:
build_nodes(prev,val)
elif not data is None:
text = str(data)
node = new_fm_node(prev,text)
build_nodes(fm_data,data)
XML.save(fm_data,filename)
return
| 17,245
|
def test_multi_headed_mat_attention():
"""Test invoking MultiHeadedMATAttention."""
feat = dc.feat.MATFeaturizer()
input_smile = "CC"
out = feat.featurize(input_smile)
node = torch.tensor(out[0].node_features).float().unsqueeze(0)
adj = torch.tensor(out[0].adjacency_matrix).float().unsqueeze(0)
dist = torch.tensor(out[0].distance_matrix).float().unsqueeze(0)
mask = torch.sum(torch.abs(node), dim=-1) != 0
layer = torch_layers.MultiHeadedMATAttention(
dist_kernel='softmax',
lambda_attention=0.33,
lambda_distance=0.33,
h=16,
hsize=1024,
dropout_p=0.0)
op = torch_layers.MATEmbedding()(node)
output = layer(op, op, op, mask, adj, dist)
assert (output.shape == (1, 3, 1024))
| 17,246
|
def make_ss_matrices(sigma_x, dt):
"""
To make Q full-rank for inversion (so the mle makes sense), use:
Q = [ dt**2 dt/2
dt/2 1 ]
to approximate Q = (dt 1)(dt 1)'
System:
x = [p_x p_y v_x v_y]
y = [p_x' p_y']
:param sigma_x:
:param dt:
:return:
sigma_0: starting value for sigma_v, with process variance (sigma_v^2 Q)
"""
i2 = np.eye(2)
_ = np.zeros((2, 2))
A = np.block([
[i2, dt*i2],
[_, i2],
])
Q = np.block([
[dt**2 * i2, dt*i2 * .5],
[dt*i2 * .5, i2],
])
C = np.block([i2, _])
R = sigma_x**2 * i2
sigma_0 = float(sigma_x) / 2
return A, Q, C, R, sigma_0
| 17,247
|
def get_icp_val(tmr):
"""Read input capture value"""
return peek(tmr + ICRx) | (peek(tmr + ICRx + 1) << 8)
| 17,248
|
def main(epochs: int, lr: float, min_accuracy: float, stop_service: bool):
"""Run the MLflow example pipeline"""
if stop_service:
service = load_last_service_from_step(
pipeline_name="continuous_deployment_pipeline",
step_name="model_deployer",
running=True,
)
if service:
service.stop(timeout=10)
return
# Initialize a continuous deployment pipeline run
deployment = continuous_deployment_pipeline(
importer=importer_mnist(),
normalizer=normalizer(),
trainer=tf_trainer(config=TrainerConfig(epochs=epochs, lr=lr)),
evaluator=tf_evaluator(),
deployment_trigger=deployment_trigger(
config=DeploymentTriggerConfig(
min_accuracy=min_accuracy,
)
),
model_deployer=model_deployer(config=MLFlowDeployerConfig(workers=3)),
)
deployment.run()
# Initialize an inference pipeline run
inference = inference_pipeline(
dynamic_importer=dynamic_importer(),
prediction_service_loader=prediction_service_loader(
MLFlowDeploymentLoaderStepConfig(
pipeline_name="continuous_deployment_pipeline",
step_name="model_deployer",
)
),
predictor=predictor(),
)
inference.run()
mlflow_env = Environment()[MLFLOW_ENVIRONMENT_NAME]
print(
"You can run:\n "
f"[italic green] mlflow ui --backend-store-uri {mlflow_env.tracking_uri}[/italic green]\n"
"...to inspect your experiment runs within the MLflow UI.\n"
"You can find your runs tracked within the `mlflow_example_pipeline`"
"experiment. There you'll also be able to compare two or more runs.\n\n"
)
service = load_last_service_from_step(
pipeline_name="continuous_deployment_pipeline",
step_name="model_deployer",
running=True,
)
if service:
print(
f"The MLflow prediction server is running locally as a daemon process "
f"and accepts inference requests at:\n"
f" {service.prediction_uri}\n"
f"To stop the service, re-run the same command and supply the "
f"`--stop-service` argument."
)
| 17,249
|
def dmp_to_mdiff(diffs):
"""Convert from diff_match_patch format to _mdiff format.
This is sadly necessary to use the HtmlDiff module.
"""
def yield_buffer(lineno_left, lineno_right):
while left_buffer or right_buffer:
if left_buffer:
left = lineno_left, '\0-{0}\1'.format(left_buffer.pop(0))
lineno_left += 1
else:
left = '', '\n'
if right_buffer:
right = lineno_right, '\0+{0}\1'.format(right_buffer.pop(0))
lineno_right += 1
else:
right = '', '\n'
yield (left, right, True), lineno_left, lineno_right
lineno_left = lineno_right = 1
left_buffer = []
right_buffer = []
for op, data in diffs:
for line in data.splitlines(True):
if op == DMP.DIFF_EQUAL:
for item, lleft, llright in yield_buffer(lineno_left,
lineno_right):
lineno_left = lleft
lineno_right = llright
yield item
yield (lineno_left, line), (lineno_right, line), False
lineno_left += 1
lineno_right += 1
elif op == DMP.DIFF_DELETE:
left_buffer.append(line)
elif op == DMP.DIFF_INSERT:
right_buffer.append(line)
for item, _, _ in yield_buffer(lineno_left, lineno_right):
yield item
| 17,250
|
def validate(segmenter, val_loader, epoch, num_classes=-1):
"""Validate segmenter
Args:
segmenter (nn.Module) : segmentation network
val_loader (DataLoader) : training data iterator
epoch (int) : current epoch
num_classes (int) : number of classes to consider
Returns:
Mean IoU (float)
"""
val_loader.dataset.set_stage("val")
segmenter.eval()
cm = np.zeros((num_classes, num_classes), dtype=int)
with torch.no_grad():
for i, sample in enumerate(val_loader):
input = sample["image"]
target = sample["mask"]
input_var = torch.autograd.Variable(input).float().cuda()
# Compute output
output = segmenter(input_var)
output = (
cv2.resize(
output[0, :num_classes].data.cpu().numpy().transpose(1, 2, 0),
target.size()[1:][::-1],
interpolation=cv2.INTER_CUBIC,
)
.argmax(axis=2)
.astype(np.uint8)
)
# Compute IoU
gt = target[0].data.cpu().numpy().astype(np.uint8)
gt_idx = (
gt < num_classes
) # Ignore every class index larger than the number of classes
cm += fast_cm(output[gt_idx], gt[gt_idx], num_classes)
if i % args.print_every == 0:
logger.info(
" Val epoch: {} [{}/{}]\t"
"Mean IoU: {:.3f}".format(
epoch, i, len(val_loader), compute_iu(cm).mean()
)
)
ious = compute_iu(cm)
logger.info(" IoUs: {}".format(ious))
miou = np.mean(ious)
logger.info(" Val epoch: {}\tMean IoU: {:.3f}".format(epoch, miou))
return miou
| 17,251
|
def test_determine_type_unsupported():
"""
GIVEN artifacts with an unsupported type
WHEN _determine_type is called with the artifacts
THEN FeatureNotImplementedError is raised.
"""
artifacts = types.ColumnArtifacts("unsupported")
with pytest.raises(exceptions.FeatureNotImplementedError):
column._determine_type(artifacts=artifacts)
| 17,252
|
def detect_forward(CoreStateMachine, PostConditionStateMachine):
"""A 'forward ambiguity' denotes a case where the post condition
implementation fails. This happens if an iteration in the core pattern is a
valid path in the post- condition pattern. In this case no decision can be
made about where to reset the input position.
Example: x+/x At the end of the post condition an incoming
'x' guides through a path in the post condition
and the core pattern. It cannot be determined
by a flag where the input position ends.
NOTE: For many cases where there is a forward ambiguity quex can gnerate an
inverse post-condition that goes backwards from the end of the post
condition (see function 'mount()'). However, there are cases where even
this is not possible (see function 'detect_backward()').
"""
## print_callstack()
__assert_state_machines(CoreStateMachine, PostConditionStateMachine)
core_acceptance_state_list = CoreStateMachine.get_acceptance_state_list()
pcsm_init_state = PostConditionStateMachine.get_init_state()
for csm_state in core_acceptance_state_list:
if __dive_to_detect_iteration(CoreStateMachine, csm_state,
PostConditionStateMachine, pcsm_init_state):
return True
return False
| 17,253
|
def price_sensitivity(results):
"""
Calculate the price sensitivity of a strategy
results
results dataframe or any dataframe with the columns
open, high, low, close, profit
returns
the percentage of returns sensitive to open price
Note
-----
Price sensitivity is calculated by
1) Calculating the profit in cases where open=high and open=low
2) Dividing these profits by the total profits
A high percentage indicates that most of your orders may not get
executed at the LIMIT price since the stock tends have a sharp
movement when open=low or open=high. A value of 1 indicates that
all returns are sensitive to prices
This is somewhat a rough measure and it doesn't take into account
whether you BUY or SELL
"""
profit = results["profit"].sum()
sen1 = results.query("open==low")["profit"].sum()
sen2 = results.query("open==high")["profit"].sum()
return (sen1 + sen2) / profit
| 17,254
|
def wrap_zone(tz, key=KEY_SENTINEL, _cache={}):
"""Wrap an existing time zone object in a shim class.
This is likely to be useful if you would like to work internally with
non-``pytz`` zones, but you expose an interface to callers relying on
``pytz``'s interface. It may also be useful for passing non-``pytz`` zones
to libraries expecting to use ``pytz``'s interface.
:param tz:
A :pep:`495`-compatible time zone, such as those provided by
:mod:`dateutil.tz` or :mod:`zoneinfo`.
:param key:
The value for the IANA time zone key. This is optional for ``zoneinfo``
zones, but required for ``dateutil.tz`` zones.
:return:
A shim time zone.
"""
if key is KEY_SENTINEL:
key = getattr(tz, "key", KEY_SENTINEL)
if key is KEY_SENTINEL:
raise TypeError(
"The `key` argument is required when wrapping zones that do not "
+ "have a `key` attribute."
)
instance = _cache.get((id(tz), key), None)
if instance is None:
instance = _cache.setdefault((id(tz), key), _PytzShimTimezone(tz, key))
return instance
| 17,255
|
async def async_browse_media(
hass, media_content_type, media_content_id, *, can_play_artist=True
):
"""Browse Spotify media."""
info = list(hass.data[DOMAIN].values())[0]
return await async_browse_media_internal(
hass,
info[DATA_SPOTIFY_CLIENT],
info[DATA_SPOTIFY_ME],
media_content_type,
media_content_id,
can_play_artist=can_play_artist,
)
| 17,256
|
def bounds(url: str) -> Tuple[str, str, str]:
"""Handle bounds requests."""
info = main.bounds(url)
return ("OK", "application/json", json.dumps(info))
| 17,257
|
def ldplotdb(xaxis, yaxis, center_freq=0, plotnum="", peaks=[]):
"""This function loads a matplotlib.pyplot figure with xaxis and yaxis data which can later be plotted"""
yaxis=10*np.log10(yaxis)
if(len(peaks)!=0):
plt.plot(xaxis[peaks], yaxis[peaks], 'ro')
plt.plot(xaxis, yaxis)
plt.title("FFT "+plotnum+str(center_freq)+" MHz")
plt.xlabel("Frequency [MHz]")
plt.ylabel("Power [dB]")
plt.grid(True)
| 17,258
|
def make_message_id():
"""
Generates rfc message id. The returned message id includes the angle
brackets.
"""
return email.utils.make_msgid('sndlatr')
| 17,259
|
def _understand_err_col(colnames):
"""Get which column names are error columns
Examples
--------
>>> colnames = ['a', 'a_err', 'b', 'b_perr', 'b_nerr']
>>> serr, terr = _understand_err_col(colnames)
>>> np.allclose(serr, [1])
True
>>> np.allclose(terr, [2])
True
>>> serr, terr = _understand_err_col(['a', 'a_nerr'])
Traceback (most recent call last):
...
ValueError: Missing positive error...
>>> serr, terr = _understand_err_col(['a', 'a_perr'])
Traceback (most recent call last):
...
ValueError: Missing negative error...
"""
shift = 0
serr = []
terr = []
for i, col in enumerate(colnames):
if col.endswith("_err"):
# The previous column, but they're numbered from 1!
# Plus, take shift into account
serr.append(i - shift)
shift += 1
elif col.endswith("_perr"):
terr.append(i - shift)
if len(colnames) == i + 1 or not colnames[i + 1].endswith('_nerr'):
raise ValueError("Missing negative error")
shift += 2
elif col.endswith("_nerr") and not colnames[i - 1].endswith('_perr'):
raise ValueError("Missing positive error")
return serr, terr
| 17,260
|
def clear_folder(folder: str):
"""create temporary empty folder.
If it already exists, all containing files will be removed.
Arguments:
folder {[str]} -- Path to the empty folder
"""
if not os.path.exists(os.path.dirname(folder)):
os.makedirs(os.path.dirname(folder))
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path) and ('.csv' in file_path or '.ply' in file_path):
os.unlink(file_path)
except Exception as e:
print(e)
| 17,261
|
def unpivot(frame):
"""
Example:
>>> df
date variable value
0 2000-01-03 A 0.895557
1 2000-01-04 A 0.779718
2 2000-01-05 A 0.738892
3 2000-01-03 B -1.513487
4 2000-01-04 B -0.543134
5 2000-01-05 B 0.902733
6 2000-01-03 C -0.053496
7 2000-01-04 C 0.298079
8 2000-01-05 C -1.962022
9 2000-01-03 D -0.174269
10 2000-01-04 D -0.047428
11 2000-01-05 D -1.871996
>>> tm.makeTimeDataFrame(3)
A B C D
2000-01-03 -0.911447 0.274853 -0.740769 2.330942
2000-01-04 -0.208471 -1.024612 0.512266 -0.708707
2000-01-05 -1.368389 -3.464163 -1.940530 -1.149835
"""
N, K = frame.shape
data = {
"value": frame.to_numpy().ravel("F"),
"variable": np.asarray(frame.columns).repeat(N),
"date": np.tile(np.asarray(frame.index), K),
}
return pd.DataFrame(data, columns=["date", "variable", "value"])
| 17,262
|
def find_viable_generators_aux (target_type, prop_set):
""" Returns generators which can be used to construct target of specified type
with specified properties. Uses the following algorithm:
- iterates over requested target_type and all it's bases (in the order returned bt
type.all-bases.
- for each type find all generators that generate that type and which requirements
are satisfied by properties.
- if the set of generators is not empty, returns that set.
Note: this algorithm explicitly ignores generators for base classes if there's
at least one generator for requested target_type.
"""
# Select generators that can create the required target type.
viable_generators = []
initial_generators = []
import type
# Try all-type generators first. Assume they have
# quite specific requirements.
all_bases = type.all_bases(target_type)
for t in all_bases:
initial_generators = __type_to_generators.get(t, [])
if initial_generators:
dout("there are generators for this type")
if t != target_type:
# We're here, when no generators for target-type are found,
# but there are some generators for a base type.
# We'll try to use them, but they will produce targets of
# base type, not of 'target-type'. So, we clone the generators
# and modify the list of target types.
generators2 = []
for g in initial_generators[:]:
# generators.register adds generator to the list of generators
# for toolsets, which is a bit strange, but should work.
# That list is only used when inheriting toolset, which
# should have being done before generators are run.
ng = g.clone_and_change_target_type(t, target_type)
generators2.append(ng)
register(ng)
initial_generators = generators2
break
for g in initial_generators:
dout("trying generator " + g.id()
+ "(" + str(g.source_types()) + "->" + str(g.target_types()) + ")")
m = g.match_rank(prop_set)
if m:
dout(" is viable")
viable_generators.append(g)
return viable_generators
| 17,263
|
async def test_initialized_finished():
"""When the polling method is initialized as finished, it shouldn't invoke the command or sleep"""
command = raise_exception("polling method shouldn't invoke the command")
polling_method = AsyncDeleteRecoverPollingMethod(command, final_resource=None, finished=True)
assert polling_method.finished()
with mock.patch(SLEEP, raise_exception("the polling method shouldn't sleep")):
await polling_method.run()
| 17,264
|
def analyze(model, Y, print_to_console=True):
"""
Perform variance-based sensitivty analysis for each process.
Parameters
----------
model : object
The model defined in the sammpy
Y : numpy.array
A NumPy array containing the model outputs
print_to_console : bool
Print results directly to console (default False)
Returns
----------
Returns a dictionary with keys 'PSK', 'PSTK', where
each entry is a list of size of the number of process.
"""
# Number of sample realizations
obs = Y.shape[1]
# Number of process and process models
npros = len(model.frames['names'])
# Creat a dict to store the results
S = create_si_dict(npros)
# Perfrom the difference-based process sensitivty anlaysis
if print_to_console:
print('Runing MMDS difference-based process sensitivy analysis...')
MMDS = mmds_mean_var(model, Y)
# Save results to the dict
for i in range(npros):
S['mean'][i] = MMDS[0, i]
S['variance'][i] = MMDS[1, i]
# Print results to console
if print_to_console:
print_indices(model, S)
return S
| 17,265
|
def generate_parity_fixture(destination_dir):
"""
The parity fixture generation strategy is to start a ghuc client with
existing fixtures copied into a temp datadir. Then a parity client
is started is peered with the ghuc client.
"""
with contextlib.ExitStack() as stack:
ghuc_datadir = stack.enter_context(common.tempdir())
ghuc_port = common.get_open_port()
ghuc_ipc_path_dir = stack.enter_context(common.tempdir())
ghuc_ipc_path = os.path.join(ghuc_ipc_path_dir, 'ghuc.ipc')
ghuc_keystore_dir = os.path.join(ghuc_datadir, 'keystore')
common.ensure_path_exists(ghuc_keystore_dir)
ghuc_keyfile_path = os.path.join(ghuc_keystore_dir, common.KEYFILE_FILENAME)
with open(ghuc_keyfile_path, 'w') as keyfile:
keyfile.write(common.KEYFILE_DATA)
genesis_file_path = os.path.join(ghuc_datadir, 'genesis.json')
with open(genesis_file_path, 'w') as genesis_file:
genesis_file.write(json.dumps(common.GENESIS_DATA))
stack.enter_context(
common.get_ghuc_process(
common.get_ghuc_binary(),
ghuc_datadir,
genesis_file_path,
ghuc_ipc_path,
ghuc_port,
str(CHAIN_CONFIG['params']['networkID']))
)
# set up fixtures
common.wait_for_socket(ghuc_ipc_path)
webu_ghuc = Webu(Webu.IPCProvider(ghuc_ipc_path))
chain_data = go_happyuc.setup_chain_state(webu_ghuc)
fixture_block_count = webu_ghuc.eth.blockNumber
datadir = stack.enter_context(common.tempdir())
keystore_dir = os.path.join(datadir, 'keys')
os.makedirs(keystore_dir, exist_ok=True)
parity_keyfile_path = os.path.join(keystore_dir, common.KEYFILE_FILENAME)
with open(parity_keyfile_path, 'w') as keyfile:
keyfile.write(common.KEYFILE_DATA)
chain_config_file_path = os.path.join(datadir, 'chain_config.json')
with open(chain_config_file_path, 'w') as chain_file:
chain_file.write(json.dumps(CHAIN_CONFIG))
parity_ipc_path_dir = stack.enter_context(common.tempdir())
parity_ipc_path = os.path.join(parity_ipc_path_dir, 'jsonrpc.ipc')
parity_port = common.get_open_port()
parity_binary = get_parity_binary()
parity_proc = stack.enter_context(get_parity_process( # noqa: F841
parity_binary=parity_binary,
datadir=datadir,
ipc_path=parity_ipc_path,
keys_path=keystore_dir,
chain_config_file_path=chain_config_file_path,
parity_port=parity_port,
))
common.wait_for_socket(parity_ipc_path)
webu = Webu(Webu.IPCProvider(parity_ipc_path))
time.sleep(10)
connect_nodes(webu, webu_ghuc)
wait_for_chain_sync(webu, fixture_block_count)
static_data = {
'raw_txn_account': common.RAW_TXN_ACCOUNT,
'keyfile_pw': common.KEYFILE_PW,
}
pprint.pprint(merge(chain_data, static_data))
shutil.copytree(datadir, destination_dir)
parity_proc = stack.enter_context(parity_export_blocks_process( # noqa: F841
parity_binary=parity_binary,
datadir=destination_dir,
chain_config_file_path=os.path.join(destination_dir, 'chain_config.json'),
parity_port=parity_port,
))
| 17,266
|
def GetMorganFingerprint(mol, atomId=-1, radius=2, fpType='bv', nBits=2048, useFeatures=False,
**kwargs):
"""
Calculates the Morgan fingerprint with the environments of atomId removed.
Parameters:
mol -- the molecule of interest
radius -- the maximum radius
fpType -- the type of Morgan fingerprint: 'count' or 'bv'
atomId -- the atom to remove the environments for (if -1, no environments is removed)
nBits -- the size of the bit vector (only for fpType = 'bv')
useFeatures -- if false: ConnectivityMorgan, if true: FeatureMorgan
any additional keyword arguments will be passed to the fingerprinting function.
"""
if fpType not in ['bv', 'count']:
raise ValueError("Unknown Morgan fingerprint type")
if not hasattr(mol, '_fpInfo'):
info = {}
# get the fingerprint
if fpType == 'bv':
molFp = rdMD.GetMorganFingerprintAsBitVect(mol, radius, nBits=nBits, useFeatures=useFeatures,
bitInfo=info, **kwargs)
else:
molFp = rdMD.GetMorganFingerprint(mol, radius, useFeatures=useFeatures, bitInfo=info,
**kwargs)
# construct the bit map
if fpType == 'bv':
bitmap = [DataStructs.ExplicitBitVect(nBits) for _ in range(mol.GetNumAtoms())]
else:
bitmap = [[] for _ in range(mol.GetNumAtoms())]
for bit, es in info.items():
for at1, rad in es:
if rad == 0: # for radius 0
if fpType == 'bv':
bitmap[at1][bit] = 1
else:
bitmap[at1].append(bit)
else: # for radii > 0
env = Chem.FindAtomEnvironmentOfRadiusN(mol, rad, at1)
amap = {}
Chem.PathToSubmol(mol, env, atomMap=amap)
for at2 in amap.keys():
if fpType == 'bv':
bitmap[at2][bit] = 1
else:
bitmap[at2].append(bit)
mol._fpInfo = (molFp, bitmap)
if atomId < 0:
return mol._fpInfo[0]
else: # remove the bits of atomId
if atomId >= mol.GetNumAtoms():
raise ValueError("atom index greater than number of atoms")
if len(mol._fpInfo) != 2:
raise ValueError("_fpInfo not set")
if fpType == 'bv':
molFp = mol._fpInfo[0] ^ mol._fpInfo[1][atomId] # xor
else: # count
molFp = copy.deepcopy(mol._fpInfo[0])
# delete the bits with atomId
for bit in mol._fpInfo[1][atomId]:
molFp[bit] -= 1
return molFp
| 17,267
|
def _rgb2lab(rgb):
"""Convert an RGB integer to Lab tuple"""
def xyzHelper(value):
"""Helper function for XYZ colourspace conversion"""
c = value / 255
if c > 0.0445:
c = (c + 0.055) / 1.055
c = math.pow(c, 2.4)
else:
c /= 12.92
c *= 100
return c
def labHelper(value):
"""Helper function for Lab colourspace conversion"""
c = value
if c > 0.008856:
c = math.pow(c, 1.0 / 3.0)
else:
c = (7.787 * c) + (16.0 / 116.0)
return c
# convert into XYZ colourspace
c1 = xyzHelper((rgb >> 16) & 0xFF)
c2 = xyzHelper((rgb >> 8) & 0xFF)
c3 = xyzHelper(rgb & 0xFF)
x = (c1 * 0.4124) + (c2 * 0.3576) + (c3 * 0.1805)
y = (c1 * 0.2126) + (c2 * 0.7152) + (c3 * 0.0722)
z = (c1 * 0.0193) + (c2 * 0.1192) + (c3 * 0.9505)
# convert into Lab colourspace
c1 = labHelper(x / 95.047)
c2 = labHelper(y / 100.0)
c3 = labHelper(z / 108.883)
l = (116.0 * c2) - 16
a = 500.0 * (c1 - c2)
b = 200.0 * (c2 - c3)
return LabColour(l, a, b)
| 17,268
|
def _resolve_dir(env_name, dflt_dir):
"""Resolve a directory given the override env var and
its default directory. And if '~' is used to indicate
the home directory, then expand that."""
folder = os.environ.get(env_name, dflt_dir)
if folder is not None:
return os.path.expanduser(folder)
return None
| 17,269
|
def test_start_r():
""" tests the restful endpoint: start """
# get web app
test_app = start_web_app()
# create some container
d = docker.from_env()
d.images.pull('alpine')
test_cont = d.containers.create('alpine')
# test start
r = test_app.post('/start', params={})
assert r.status == 200
r = test_app.post('/start', params={'id': test_cont.attrs['Id']})
assert r.status == 200
r = test_app.post('/start', params={'id': []})
assert r.status == 200
| 17,270
|
def biband_mask(n: int, kernel_size: int, device: torch.device, v=-1e9):
"""compute mask for local attention with kernel size.
Args:
n (torch.Tensor): the input length.
kernel_size (int): The local attention kernel size.
device (torch.device): transformer mask to the device.
Returns: torch.Tensor. shape: [n,n]. The masked locations are -1e9
and unmasked locations are 0.
"""
if kernel_size is None:
return None
half = kernel_size // 2
mask1 = torch.ones(n, n).triu(diagonal=-half)
mask2 = torch.ones(n, n).tril(diagonal=half)
mask = mask1 * mask2
mask = (1 - mask) * v
return mask.to(device)
| 17,271
|
def URDFBoundingObject(proto, link, level, boxCollision):
"""Write a bounding object (collision).
Args:
proto (file): proto file to write the information in.
link (Link): link object.
level (int): level in the tree.
boxCollision (bool): If True, the bounding objects are approximated using boxes.
"""
indent = ' '
boundingLevel = level
proto.write(level * indent + 'boundingObject ')
hasGroup = len(link.collision) > 1
if hasGroup:
proto.write('Group {\n')
proto.write((level + 1) * indent + 'children [\n')
boundingLevel = level + 2
for boundingObject in link.collision:
initialIndent = boundingLevel * indent if hasGroup else ''
if boundingObject.position != [0.0, 0.0, 0.0] or boundingObject.rotation[3] != 0.0:
proto.write(initialIndent + 'Transform {\n')
proto.write((boundingLevel + 1) * indent + 'translation %lf %lf %lf\n' % (boundingObject.position[0], boundingObject.position[1], boundingObject.position[2]))
proto.write((boundingLevel + 1) * indent + 'rotation %lf %lf %lf %lf\n' % (boundingObject.rotation[0], boundingObject.rotation[1], boundingObject.rotation[2], boundingObject.rotation[3]))
proto.write((boundingLevel + 1) * indent + 'children [\n')
boundingLevel = boundingLevel + 2
hasGroup = True
initialIndent = boundingLevel * indent
if boundingObject.geometry.box.x != 0:
proto.write(initialIndent + 'Box {\n')
proto.write((boundingLevel + 1) * indent + ' size %lf %lf %lf\n' % (boundingObject.geometry.box.x, boundingObject.geometry.box.y, boundingObject.geometry.box.z))
proto.write(boundingLevel * indent + '}\n')
elif boundingObject.geometry.cylinder.radius != 0 and boundingObject.geometry.cylinder.length != 0:
proto.write(initialIndent + 'Cylinder {\n')
proto.write((boundingLevel + 1) * indent + 'radius ' + str(boundingObject.geometry.cylinder.radius) + '\n')
proto.write((boundingLevel + 1) * indent + 'height ' + str(boundingObject.geometry.cylinder.length) + '\n')
proto.write(boundingLevel * indent + '}\n')
elif boundingObject.geometry.sphere.radius != 0:
proto.write(initialIndent + 'Sphere {\n')
proto.write((boundingLevel + 1) * indent + 'radius ' + str(boundingObject.geometry.sphere.radius) + '\n')
proto.write(boundingLevel * indent + '}\n')
elif boundingObject.geometry.trimesh.coord and boxCollision:
aabb = {
'minimum': {'x': float('inf'),
'y': float('inf'),
'z': float('inf')},
'maximum': {'x': float('-inf'),
'y': float('-inf'),
'z': float('-inf')}
}
for value in boundingObject.geometry.trimesh.coord:
x = value[0] * boundingObject.geometry.scale[0]
y = value[1] * boundingObject.geometry.scale[1]
z = value[2] * boundingObject.geometry.scale[2]
aabb['minimum']['x'] = min(aabb['minimum']['x'], x)
aabb['maximum']['x'] = max(aabb['maximum']['x'], x)
aabb['minimum']['y'] = min(aabb['minimum']['y'], y)
aabb['maximum']['y'] = max(aabb['maximum']['y'], y)
aabb['minimum']['z'] = min(aabb['minimum']['z'], z)
aabb['maximum']['z'] = max(aabb['maximum']['z'], z)
proto.write(initialIndent + 'Transform {\n')
proto.write((boundingLevel + 1) * indent + 'translation %f %f %f\n' % (
0.5 * (aabb['maximum']['x'] + aabb['minimum']['x']),
0.5 * (aabb['maximum']['y'] + aabb['minimum']['y']),
0.5 * (aabb['maximum']['z'] + aabb['minimum']['z']),))
proto.write((boundingLevel + 1) * indent + 'children [\n')
proto.write((boundingLevel + 2) * indent + 'Box {\n')
proto.write((boundingLevel + 3) * indent + 'size %f %f %f\n' % (
aabb['maximum']['x'] - aabb['minimum']['x'],
aabb['maximum']['y'] - aabb['minimum']['y'],
aabb['maximum']['z'] - aabb['minimum']['z'],))
proto.write((boundingLevel + 2) * indent + '}\n')
proto.write((boundingLevel + 1) * indent + ']\n')
proto.write(boundingLevel * indent + '}\n')
elif boundingObject.geometry.trimesh.coord:
proto.write(initialIndent + 'IndexedFaceSet {\n')
proto.write((boundingLevel + 1) * indent + 'coord Coordinate {\n')
proto.write((boundingLevel + 2) * indent + 'point [\n' + (boundingLevel + 3) * indent)
for value in boundingObject.geometry.trimesh.coord:
proto.write('%lf %lf %lf, ' % (value[0] * boundingObject.geometry.scale[0], value[1] * boundingObject.geometry.scale[1], value[2] * boundingObject.geometry.scale[2]))
proto.write('\n' + (boundingLevel + 2) * indent + ']\n')
proto.write((boundingLevel + 1) * indent + '}\n')
proto.write((boundingLevel + 1) * indent + 'coordIndex [\n' + (boundingLevel + 2) * indent)
if isinstance(boundingObject.geometry.trimesh.coordIndex[0], np.ndarray) or type(boundingObject.geometry.trimesh.coordIndex[0]) == list:
for value in boundingObject.geometry.trimesh.coordIndex:
if len(value) == 3:
proto.write('%d %d %d -1 ' % (value[0], value[1], value[2]))
elif isinstance(boundingObject.geometry.trimesh.coordIndex[0], np.int32):
for i in range(len(boundingObject.geometry.trimesh.coordIndex) / 3):
proto.write('%d %d %d -1 ' % (boundingObject.geometry.trimesh.coordIndex[3 * i + 0], boundingObject.geometry.trimesh.coordIndex[3 * i + 1], boundingObject.geometry.trimesh.coordIndex[3 * i + 2]))
else:
print('Unsupported "%s" coordinate type' % type(boundingObject.geometry.trimesh.coordIndex[0]))
proto.write('\n' + (boundingLevel + 1) * indent + ']\n')
proto.write(boundingLevel * indent + '}\n')
else:
proto.write(initialIndent + 'Box{\n')
proto.write((boundingLevel + 1) * indent + ' size 0.01 0.01 0.01\n')
proto.write(boundingLevel * indent + '}\n')
if boundingLevel == level + 4:
proto.write((level + 3) * indent + ']\n')
proto.write((level + 2) * indent + '}\n')
boundingLevel = level + 2
if boundingLevel == level + 2:
proto.write((level + 1) * indent + ']\n')
proto.write(level * indent + '}\n')
| 17,272
|
def test_define_x(arterynetwork_def, param):
"""Test correct value for x.
:param arterynetwork_def: Artery network object
:param param: Config parameters
"""
an = arterynetwork_def
order, rc, qc, Ru, Rd, L, k1, k2, k3, rho, Re, nu, p0, R1, R2, CT,\
Nt, Nx, T, N_cycles, output_location, theta, Nt_store,\
N_cycles_store, store_area, store_pressure, q0, q_half = param
an.define_x()
for ip in an.range_parent_arteries:
i1, i2 = an.daughter_arteries(ip)
p, d1, d2 = an.arteries[ip], an.arteries[i1], an.arteries[i2]
x = an.initial_x(p, d1, d2)
for i in range(18):
assert(near(an.x[ip, i], x[i]))
| 17,273
|
def containsIfElse(node):
""" Checks whether the given node contains another if-else-statement """
if node.type == "if" and hasattr(node, "elsePart"):
return True
for child in node:
if child is None:
pass
# Blocks reset this if-else problem so we ignore them
# (and their content) for our scan.
elif child.type == "block":
pass
# Script blocks reset as well (protected by other function)
elif child.type == "script":
pass
elif containsIfElse(child):
return True
return False
| 17,274
|
def main():
"""this is test function"""
os.chdir( opt.outDir )
rdir = opt.tophatdir
rfile = opt.tophatsummary
d,report = readTophatMapResult(rdir,rfile)
# print(d)
w = open(opt.outfile,"w")
w.writelines(report)
# print(report)
| 17,275
|
def lstm_create_dataset(data_home, batch_size, repeat_num=1, training=True):
"""Data operations."""
ds.config.set_seed(1)
data_dir = os.path.join(data_home, "aclImdb_train.mindrecord0")
if not training:
data_dir = os.path.join(data_home, "aclImdb_test.mindrecord0")
data_set = ds.MindDataset(data_dir, columns_list=["feature", "label"], num_parallel_workers=4)
# apply map operations on images
data_set = data_set.shuffle(buffer_size=data_set.get_dataset_size())
data_set = data_set.batch(batch_size=batch_size, drop_remainder=True)
data_set = data_set.repeat(count=repeat_num)
return data_set
| 17,276
|
def get_gitlab_scripts(data):
"""GitLab is nice, as far as I can tell its files have a
flat hierarchy with many small job entities"""
def flatten_nested_string_lists(data):
"""helper function"""
if isinstance(data, str):
return data
elif isinstance(data, list):
return "\n".join([flatten_nested_string_lists(item) for item in data])
else:
raise ValueError(
f"unexpected data type {type(data)} in script section: {data}"
)
result = {}
for jobkey in data:
if not isinstance(data[jobkey], dict):
continue
for section in ["script", "before_script", "after_script"]:
if section in data[jobkey]:
script = data[jobkey][section]
result[f"{jobkey}/{section}"] = flatten_nested_string_lists(script)
return result
| 17,277
|
def method(cls):
"""Adds the function as a method to the given class."""
import new
def _wrap(f):
cls.__dict__[f.func_name] = new.instancemethod(f,None,cls)
return None
return _wrap
| 17,278
|
def maps_from_echse(conf):
"""Produces time series of rainfall maps from ECHSE input data and catchment shapefiles.
"""
# Read sub-catchment rainfall from file
fromfile = np.loadtxt(conf["f_data"], dtype="string", delimiter="\t")
if len(fromfile)==2:
rowix = 1
elif len(fromfile)>2:
rowix = slice(1,len(fromfile))
else:
raise Exception("Data file is empty: %s" % conf["f_data"])
var = fromfile[rowix,1:].astype("f4")
dtimes = fromfile[rowix,0]
dtimes_file = np.array([wradlib.util.iso2datetime(dtime) for dtime in dtimes])
dtimesfromconf = wradlib.util.from_to(conf["tstart"], conf["tend"], conf["interval"])
dtimes = np.intersect1d(dtimes_file, dtimesfromconf)
if len(dtimes)==0:
print "No datetimes for mapping based on intersection of data file and config info."
return(0)
# objects = fromfile[0,1:]
cats = plt.genfromtxt(conf["f_coords"], delimiter="\t", names=True,
dtype=[('id', '|S20'), ('lat', 'f4'), ('lon', 'f4'),
('x', 'f4'), ('y', 'f4')])
mapx, mapy = wradlib.georef.reproject(cats["x"],cats["y"],
projection_source=conf["trg_proj"],
projection_target=conf["map_proj"])
# Read shapefile
dataset, inLayer = wradlib.io.open_shape(conf["f_cats_shp"])
polys, keys = wradlib.georef.get_shape_coordinates(inLayer, key='DN')
keys = np.array(keys)
# Preprocess polygons (remove minors, sort in same order as in coords file)
polys2 = []
for i, id in enumerate(cats["id"]):
keyix = np.where( keys==eval(id.strip("cats_")) )[0]
if len(keyix) > 1:
# More than one key matching? Find largest matching polygon
keyix = keyix[np.argmax([len(polys[key]) for key in keyix])]
else:
keyix = keyix[0]
poly = polys[keyix].copy()
if poly.ndim==1:
# Multi-Polygons - keep only the largest polygon
# (just for plotting - no harm done)
poly2 = poly[np.argmax([len(subpoly) for subpoly in poly])].copy()
else:
poly2 = poly.copy()
polys2.append ( wradlib.georef.reproject(poly2,
projection_source=conf["trg_proj"],
projection_target=conf["map_proj"]) )
colors = plt.cm.spectral(np.linspace(0,1,len(conf["levels"])))
mycmap, mynorm = from_levels_and_colors(conf["levels"], colors, extend="max")
plt.interactive(False)
for dtime in dtimes:
datestr = (dtime-dt.timedelta(seconds=conf["interval"])).strftime("%Y%m%d.png")
i = np.where(dtimes_file==dtime)[0][0]
print datestr, i
figpath = os.path.join(conf["savefigs"], datestr)
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111, aspect="equal")
ax, coll = tl.vis.plot_cats(polys2, var[i], ax=ax, bbox=conf["bbox"], cmap=mycmap,
norm=mynorm, edgecolors='none')
cb = plt.colorbar(coll, ax=ax, ticks=conf["levels"], shrink=0.6)
cb.ax.tick_params(labelsize="small")
cb.set_label("(mm)")
plt.xlabel("Longitude")
plt.ylabel("Latitude")
tl.vis.plot_trmm_grid_lines(ax)
plt.text(conf["bbox"]["left"]+0.25, conf["bbox"]["top"]-0.25,
"%s\n%s to\n%s" % (conf["figtxtbody"],
(dtime-dt.timedelta(seconds=conf["interval"])).isoformat(" "),
dtime.isoformat(" ") ),
color="red", fontsize="small", verticalalignment="top")
plt.tight_layout()
plt.savefig(figpath)
plt.close()
plt.interactive(True)
| 17,279
|
def utility_assn(tfr_dfs):
"""Harvest a Utility-Date-State Association Table."""
# These aren't really "data" tables, and should not be searched for associations
non_data_dfs = [
"balancing_authority_eia861",
"service_territory_eia861",
]
# The dataframes from which to compile BA-Util-State associations
data_dfs = [tfr_dfs[table]
for table in tfr_dfs if table not in non_data_dfs]
logger.info("Building an EIA 861 Util-State-Date association table.")
tfr_dfs["utility_assn_eia861"] = _harvest_associations(
data_dfs, ["report_date", "utility_id_eia", "state"])
return tfr_dfs
| 17,280
|
def is_smtp_enabled(backend=None):
"""
Check if the current backend is SMTP based.
"""
if backend is None:
backend = get_mail_backend()
return backend not in settings.SENTRY_SMTP_DISABLED_BACKENDS
| 17,281
|
def validate_isotypes(form, field):
"""
Validates that isotypes are resolved.
"""
try:
df = form.trait_data.processed_data
except AttributeError:
return
try:
unknown_strains = df.STRAIN[df.ISOTYPE.isnull()]
if unknown_strains.any():
unknown_strains = unknown_strains.values
form.trait_data.error_items.extend(unknown_strains)
raise ValidationError(f"Unknown isotype for the following strain(s): {unknown_strains}")
except AttributeError:
pass
| 17,282
|
def test_th_ch():
"""
Run `python -m pytest ./day-02/part-1/th-ch.py` to test the submission.
"""
assert (
ThChSubmission().run(
"""
forward 5
down 5
forward 8
up 3
down 8
forward 2
""".strip()
)
== 150
)
| 17,283
|
def get_presentation_requests_received(tenant: str, state: str = ''):
"""
state: must be in ['propsal-sent', 'proposal-received', 'request-sent', 'request-received', 'presentation-sent', 'presentation-received', 'done', 'abondoned']
"""
possible_states = ['', 'propsal-sent', 'proposal-received', 'request-sent', 'request-received', 'presentation-sent', 'presentation-received', 'done', 'abondoned']
if state not in possible_states:
raise HTTPException(400, "state must be in: " + possible_states)
params = None
if state:
params = {
'state': state,
}
j = requests.get(ACAPY_API + '/present-proof-2.0/records', params=params, headers=prepare_headers(tenant=tenant)).json()
return j['results']
| 17,284
|
def generate_round():
"""
Генерируем раунд.
Returns:
question: Вопрос пользователю
result: Правильный ответ на вопрос
"""
total_num, random_num = generate_numbers()
question = " ".join(total_num)
answer = str(random_num)
return question, answer
| 17,285
|
def get_basic_activity():
"""
A basic set of activity records for a 'Cohort 1' and CoreParticipant participant.
"""
return [
{'timestamp': datetime(2018, 3, 6, 0, 0), 'group': 'Profile', 'group_id': 1,
'event': p_event.EHRFirstReceived},
{'timestamp': datetime(2018, 3, 6, 20, 20, 57), 'group': 'Profile', 'group_id': 1,
'event': p_event.SignupTime},
{'timestamp': datetime(2018, 3, 6, 20, 35, 12), 'group': 'QuestionnaireModule', 'group_id': 40,
'event': p_event.ConsentPII, 'answer': 'ConsentPermission_Yes',
'answer_id': 767},
{'timestamp': datetime(2018, 3, 6, 20, 43, 50), 'group': 'QuestionnaireModule', 'group_id': 40,
'event': p_event.EHRConsentPII, 'answer': 'ConsentPermission_Yes',
'answer_id': 767},
{'timestamp': datetime(2018, 3, 6, 20, 46, 48), 'group': 'QuestionnaireModule', 'group_id': 40,
'event': p_event.TheBasics, 'ConsentAnswer': None},
{'timestamp': datetime(2018, 3, 6, 20, 49, 0), 'group': 'QuestionnaireModule', 'group_id': 40,
'event': p_event.OverallHealth, 'ConsentAnswer': None},
{'timestamp': datetime(2018, 3, 6, 20, 51, 6), 'group': 'QuestionnaireModule', 'group_id': 40,
'event': p_event.Lifestyle, 'ConsentAnswer': None},
{'timestamp': datetime(2018, 3, 28, 20, 18, 59), 'group': 'Biobank', 'group_id': 20,
'event': p_event.BiobankConfirmed, 'dna_tests': 3, 'basline_tests': 4},
{'timestamp': datetime(2018, 5, 17, 2, 11, 37), 'group': 'Biobank', 'group_id': 20,
'event': p_event.BiobankOrder, 'dna_tests': 0, 'basline_tests': 0},
# ROC-295: duplicate record, manually cancelled
{'timestamp': datetime(2018, 5, 21, 18, 9, 8), 'group': 'Profile', 'group_id': 1,
'event': p_event.PhysicalMeasurements, 'status': 'CANCELLED', 'status_id': 2},
{'timestamp': datetime(2018, 5, 21, 18, 9, 12), 'group': 'Profile', 'group_id': 1,
'event': p_event.PhysicalMeasurements, 'status': 'COMPLETED', 'status_id': 1},
{'timestamp': datetime(2019, 6, 13, 0, 0), 'group': 'Profile', 'group_id': 1,
'event': p_event.EHRLastReceived}
]
| 17,286
|
def bad_multi_examples_per_input_estimator_misaligned_input_refs(
export_path, eval_export_path):
"""Like the above (good) estimator, but the input_refs is misaligned."""
estimator = tf.estimator.Estimator(model_fn=_model_fn)
estimator.train(input_fn=_train_input_fn, steps=1)
return util.export_model_and_eval_model(
estimator=estimator,
serving_input_receiver_fn=_serving_input_receiver_fn,
eval_input_receiver_fn=_bad_eval_input_receiver_fn_misaligned_input_refs,
export_path=export_path,
eval_export_path=eval_export_path)
| 17,287
|
def page_is_dir(path) -> bool:
"""
Tests whether a path corresponds to a directory
arguments:
path -- a path to a file
returns:
True if the path represents a directory else False
"""
return os.path.isdir(path)
| 17,288
|
def read_configuration(dirname_f: str) -> dict:
"""
:param dirname_f: path to the project ending with .../cameras_robonomics
:type dirname_f: str
:return: dictionary containing all the configurations
:rtype: dict
Reading config, containing all the required data, such as filepath, robonomics parameters (remote wss, seed),
camera parameters (ip, login, password, port), etc
"""
config_path = dirname_f + "/config/config.yaml"
logging.debug(config_path)
try:
with open(config_path) as f:
content = f.read()
config_f = yaml.load(content, Loader=yaml.FullLoader) #
logging.debug(f"Configuration dict: {content}")
return config_f
except Exception as e:
while True:
logging.error("Error in configuration file!")
logging.error(e)
exit()
| 17,289
|
def caption_example(image):
"""Convert image caption data into an Example proto.
Args:
image: A ImageMetadata instance.
Returns:
example: An Example proto with serialized tensor data.
"""
# Collect image object information from metadata.
image_features, positions = read_object(image.objects, image.image_id)
# Serialize multi-dimensional tensor data.
captions_proto = tf.make_tensor_proto(np.array(image.captions))
features_proto = tf.make_tensor_proto(image_features)
positions_proto = tf.make_tensor_proto(positions)
# Create final features dict.
features = dict(
image_id=int64_feature(image.image_id),
captions=bytes_feature(captions_proto.SerializeToString()),
object_features=bytes_feature(features_proto.SerializeToString()),
object_positions=bytes_feature(positions_proto.SerializeToString()))
return tf.train.Example(features=tf.train.Features(feature=features))
| 17,290
|
def test_env_vars():
"""test if the critical env variables are available in the environment"""
CELERY_BROKER_URL = get_env_var('CELERY_BROKER_URL')
DB_URL = get_env_var('SQLALCHEMY_DATABASE_URI')
DB_URL_TEST = get_env_var('SQLALCHEMY_DATABASE_URI_TEST')
SECRET_KEY = get_env_var('MPORTER_SECRET')
MAILGUN_KEY = get_env_var('MAILGUN_KEY')
MAILGUN_SANDBOX = get_env_var('MAILGUN_SANDBOX')
assert CELERY_BROKER_URL is not None
assert DB_URL is not None
assert DB_URL_TEST is not None
assert SECRET_KEY is not None
assert MAILGUN_KEY is not None
assert MAILGUN_SANDBOX is not None
| 17,291
|
def GetAttributeTableByFid(fileshp, layername=0, fid=0):
"""
GetAttributeTableByFid
"""
res = {}
dataset = ogr.OpenShared(fileshp)
if dataset:
layer = dataset.GetLayer(layername)
feature = layer.GetFeature(fid)
geom = feature.GetGeometryRef()
res["geometry"] = geom.ExportToWkt()
layerDefinition = layer.GetLayerDefn()
for j in range(layerDefinition.GetFieldCount()):
fieldname = layerDefinition.GetFieldDefn(j).GetName()
res[fieldname] = feature.GetField(j)
dataset = None
return res
| 17,292
|
def get_root_folder_id(db, tree_identifier, linked_to, link_id):
"""Get id of the root folder for given data category and profile or user group
Args:
db (object): The db object
tree_identifier (str): The identifier of the tree
linked_to (str): ['profile'|'group']
link_id (int): The profile id or the group id (depending on linked_to)
Returns:
The id of the root folder.
"""
if linked_to not in ['profile', 'group']:
raise MSGException(Error.CORE_INVALID_PARAMETER,
"Incorrect 'linked_to' value.")
root_folder_id = None
SQL_PROFILE = """SELECT root_folder_id
FROM data_profile_tree
WHERE profile_id=? AND tree_identifier=?"""
SQL_USER_GROUP = """SELECT root_folder_id
FROM data_user_group_tree
WHERE user_group_id=? AND tree_identifier=?"""
sql = SQL_PROFILE if linked_to == 'profile' else SQL_USER_GROUP
res = db.execute(sql,
(link_id, tree_identifier)).fetch_one()
if res:
root_folder_id = res['root_folder_id']
return root_folder_id
| 17,293
|
def check_normalized_names(id_series, season_number, episode_number):
"""Check normalized names. Print the difference between IMDb and transcripts
Parameters
----------
id_series : `str`
Id of the series.
season_number : `str`
The desired season number. If None, all seasons are processed.
episode_number : `str`
The desired episode_number. If None, all episodes are processed.
"""
# Plumcot database object
db = Plumcot()
# Retrieve IMDB normalized character names
imdb_chars_series = db.get_characters(id_series, season_number,
episode_number)
# Retrieve transcripts normalized character names
trans_chars_series = db.get_transcript_characters(id_series, season_number,
episode_number,extension=".txt")
for episode_uri in imdb_chars_series:
print("\n"+episode_uri)
imdb=imdb_chars_series.get(episode_uri)
if imdb is None:
warnings.warn(f"{episode_uri} is not IMDB, jumping to next episode")
continue
else:
imdb=set(imdb)
transcripts=trans_chars_series.get(episode_uri)
if transcripts is None:
warnings.warn(f"{episode_uri} is not transcripts, jumping to next episode")
continue
else:
transcripts=set([char for char in transcripts if "#unknown#" not in char and "@" not in char])
print("In imdb but not in transcripts:")
print(imdb-transcripts)
print("In transcripts but not imdb (not counting #unknown# and alice@bob):")
print(transcripts-imdb)
| 17,294
|
def str_cell(cell):
"""Get a nice string of given Cell statistics."""
result = f"-----Cell ({cell.x}, {cell.y})-----\n"
result += f"sugar: {cell.sugar}\n"
result += f"max sugar: {cell.capacity}\n"
result += f"height/level: {cell.level}\n"
result += f"Occupied by Agent {cell.agent.id if cell.agent else None}\n"
return result
| 17,295
|
def get_idx_pair(mu):
"""get perturbation position"""
idx = np.where(mu != 0)[0]
idx = [idx[0], idx[-1]]
return idx
| 17,296
|
def zeeman_transitions(ju, jl, type):
""" Find possible mu and ml for valid ju and jl for a given transistion
polarization
Parameters:
ju (scalar): Upper level J
jl (scalar): Lower level J
type (string): "Pi", "S+", or "S-" for relevant polarization type
Returns:
tuple: MU, ML arrays for given Js and polarization type
"""
assert np.isscalar(ju) and np.isscalar(jl), "non-scalar J non supported"
assert type.lower() in ["pi", "s+", "s-"], "unknown transition type"
assert ju - jl in [-1, 0, 1], "delta-J should belong to {-1, 0, 1}"
assert ju > 0 and jl >= 0, "only for positive ju and non-negative for jl"
if type.lower() == "pi":
J = min(ju, jl)
return np.arange(-J, J + 1), np.arange(-J, J + 1)
elif type.lower() == "s+":
if ju < jl:
return np.arange(-ju, ju+1), np.arange(-ju+1, ju+2)
elif ju == jl:
return np.arange(-ju, ju), np.arange(-ju+1, ju+1)
else:
return np.arange(-ju, jl), np.arange(-ju+1, jl+1)
elif type.lower() == "s-":
if ju < jl:
return np.arange(-ju, ju+1), np.arange(-jl, ju)
elif ju == jl:
return np.arange(-ju+1, ju+1), np.arange(-ju, ju)
else:
return np.arange(-ju+2, ju+1), np.arange(-ju+1, ju)
| 17,297
|
def get_reachable_nodes(node):
"""
returns a list with all the nodes from the tree with root *node*
"""
ret = []
stack = [node]
while len(stack) > 0:
cur = stack.pop()
ret.append(cur)
for c in cur.get_children():
stack.append(c)
return ret
| 17,298
|
def imagetransformer_sep_channels_16l_16h_imgnet_lrg_loc():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_12l_16h_imagenet_large()
hparams.num_hidden_layers = 16
hparams.local_attention = True
hparams.batch_size = 1
hparams.block_length = 256
return hparams
| 17,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.