content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def load_data_test(test_path, diagnoses_list, baseline=True, multi_cohort=False):
"""
Load data not managed by split_manager.
Args:
test_path (str): path to the test TSV files / split directory / TSV file for multi-cohort
diagnoses_list (List[str]): list of the diagnoses wanted in case of split_dir or multi-cohort
baseline (bool): If True baseline sessions only used (split_dir handling only).
multi_cohort (bool): If True considers multi-cohort setting.
"""
# TODO: computes baseline sessions on-the-fly to manager TSV file case
if multi_cohort:
if not test_path.endswith(".tsv"):
raise ValueError(
"If multi_cohort is given, the tsv_path argument should be a path to a TSV file."
)
else:
tsv_df = pd.read_csv(test_path, sep="\t")
check_multi_cohort_tsv(tsv_df, "labels")
test_df = pd.DataFrame()
found_diagnoses = set()
for idx in range(len(tsv_df)):
cohort_name = tsv_df.loc[idx, "cohort"]
cohort_path = tsv_df.loc[idx, "path"]
cohort_diagnoses = (
tsv_df.loc[idx, "diagnoses"].replace(" ", "").split(",")
)
if bool(set(cohort_diagnoses) & set(diagnoses_list)):
target_diagnoses = list(set(cohort_diagnoses) & set(diagnoses_list))
cohort_test_df = load_data_test_single(
cohort_path, target_diagnoses, baseline=baseline
)
cohort_test_df["cohort"] = cohort_name
test_df = pd.concat([test_df, cohort_test_df])
found_diagnoses = found_diagnoses | (
set(cohort_diagnoses) & set(diagnoses_list)
)
if found_diagnoses != set(diagnoses_list):
raise ValueError(
f"The diagnoses found in the multi cohort dataset {found_diagnoses} "
f"do not correspond to the diagnoses wanted {set(diagnoses_list)}."
)
test_df.reset_index(inplace=True, drop=True)
else:
if test_path.endswith(".tsv"):
tsv_df = pd.read_csv(test_path, sep="\t")
multi_col = {"cohort", "path"}
if multi_col.issubset(tsv_df.columns.values):
raise ValueError(
"To use multi-cohort framework, please add --multi_cohort flag."
)
test_df = load_data_test_single(test_path, diagnoses_list, baseline=baseline)
test_df["cohort"] = "single"
return test_df | 5,333,600 |
def pratn_writer(clf, y_true, y_prob, eval_folder, i=''):
"""
This function will produce a plot of precision & recall vs. percent
population given a classifier's name or the object itself, true labels
and predicted scores for each fold, and a folder to save it in.
Can be used stand-alone. Supports single fold.
clf - string or classifier object
y_true - list of numpy arrays, each array contains true labels
for different folds
y_prob - list of numpy arrays, each array contains predicted scores
for diffrent folds
eval_folder - string, folder to save evaluations
images folder will be created in this folder containing
the image
i - int, allows for function to be used with a list of classifiers
for unique names, is not required
"""
if type(y_true[0]) is not np.ndarray: y_true = [y_true]
if type(y_prob[0]) is not np.ndarray: y_prob = [y_prob]
img_dir = eval_folder+'/images/'
if not os.path.exists(img_dir):
os.makedirs(img_dir)
clf_name = str(clf)[:str(clf).index('(')]+str(i)
enum_list = range(0, len(y_true))
fig, ax1 = plt.subplots()
ax1.set_xlabel('percent of population')
ax1.set_ylabel('precision', color='b')
ax2 = ax1.twinx()
ax2.set_ylabel('recall', color='r')
mean_prec = 0.0
mean_rec = 0.0
mean_pct_above = np.linspace(0,1,1000)
for i in enum_list:
prec, rec, thres = metrics.precision_recall_curve(y_true[i], y_prob[i])
prec = prec[:-1]
rec = rec[:-1]
skip_size = int(thres.shape[0]/1000.0)
if skip_size == 0: skip_size = 1
plotting_thres = thres[0::skip_size][::-1]
plotting_prec = prec[0::skip_size][::-1]
plotting_rec = rec[0::skip_size][::-1]
how_many = float(len(y_true[i]))
pct_above = [(y_prob[i][y_prob[i] >= value].shape[0])/how_many
for value in plotting_thres]
pct_above = np.array(pct_above)
mean_prec += interp(mean_pct_above, pct_above, plotting_prec)
mean_rec += interp(mean_pct_above, pct_above, plotting_rec)
#ax1.plot(pct_above, plotting_prec, 'b')
#ax2.plot(pct_above, plotting_rec, 'r')
mean_prec /= len(y_true)
mean_rec /= len(y_true)
mean_prec[-1] = np.mean([np.mean(enu) for enu in y_true])
mean_rec[-1] = 1.0
ax1.plot(mean_pct_above, mean_prec, 'b')
ax2.plot(mean_pct_above, mean_rec, 'r')
plt.title('Precision, Recall vs % Population')
plt.savefig(img_dir+'PRATN_Curve_'+clf_name+'.png') | 5,333,601 |
def render_thread(gui):
"""
This thread runs the gui.render function thirty times per
second and stops the gui if any errors occur.
"""
while gui:
sleep(SLEEP_TIME)
try:
# Update the window size
gui.screen_size = gui.screen.getmaxyx()
gui.screen_height = gui.screen_size[0]
gui.screen_width = gui.screen_size[1]
# Run the render functions to populate pixels
gui.pre_render()
gui.render()
gui.post_render()
# Output the pixels to the screen.
for y in range(gui.screen_height):
for x in range(gui.screen_width):
pixel = gui.pixels.get((y, x), BLANK_PIXEL)
try:
gui.screen.addstr(y, x, *pixel)
except Exception as e:
pass
# Zero the pixels
gui.pixels = {}
except BaseException as e:
gui.stop()
gui.log("Render thread exception, {}: {}".format(type(e), e)) | 5,333,602 |
def load_augmentations_config(
placeholder_params: dict, path_to_config: str = "configs/augmentations.json"
) -> dict:
"""Load the json config with params of all transforms
Args:
placeholder_params (dict): dict with values of placeholders
path_to_config (str): path to the json config file
"""
with open(path_to_config, "r") as config_file:
augmentations = json.load(config_file)
for name, params in augmentations.items():
params = [fill_placeholders(param, placeholder_params) for param in params]
return augmentations | 5,333,603 |
def simplify_mask(mask, r_ids, r_p_zip, replace=True):
"""Simplify the mask by replacing all `region_ids` with their `root_parent_id`
The `region_ids` and `parent_ids` are paired from which a tree is inferred. The root
of this tree is value `0`. `region_ids` that have a corresponding `parent_id` of 0
are penultimate roots. This method replaces each `region_id` with its penultimate `parent_id`.
It *simplifies* the volume.
:param mask: a 3D volume
:type mask: `numpy.array`
:param r_id: sequence of `region_id`
:type r_id: iterable
:param r_p_zip: sequence of 2-tuples with `region_id` and `parent_id`
:type r_p_zip: iterable
:param bool replace: if `True` then the returned `mask` will have values; `False` will leave the `mask` unchanged (useful for running tests to speed things up)
:return: `simplified_mask`, `segment_colours`, `segment_ids`
:rtype: tuple
"""
simplified_mask = numpy.ndarray(mask.shape, dtype=int) # @UnusedVariable
simplified_mask = 0
# group regions_ids by parent_id
root_parent_id_group = dict()
for r in r_ids:
p = get_root(r_p_zip, r)
if p not in root_parent_id_group:
root_parent_id_group[p] = [r]
else:
root_parent_id_group[p] += [r]
if replace:
# It is vastly faster to use multiple array-wide comparisons than to do
# comparisons element-wise. Therefore, we generate a string to be executed
# that will do hundreds of array-wide comparisons at a time.
# Each comparison is for all region_ids for a parent_id which will
# then get assigned the parent_id.
for parent_id, region_id_list in root_parent_id_group.items():
# check whether any element in the mask has a value == r0 OR r1 ... OR rN
# e.g. (mask == r0) | (mask == r1) | ... | (mask == rN)
comp = ' | '.join(['( mask == %s )' % r for r in region_id_list])
# set those that satisfy the above to have the parent_id
# Because parent_ids are non-overlapping (i.e. no region_id has two parent_ids)
# we can do successive summation instead of assignments.
full_op = 'simplified_mask += (' + comp + ') * %s' % parent_id
exec(full_op)
else:
simplified_mask = mask
segment_ids = root_parent_id_group.keys()
# segment_colors = [r_c_zip[s] for s in segment_ids]
return simplified_mask, segment_ids | 5,333,604 |
def _write_single_annotation(doc: str, annotation: str, values, append: bool, root: Path, allow_newlines: bool = False):
"""Write an annotation to a file."""
is_span = not split_annotation(annotation)[1]
if is_span:
# Make sure that spans are sorted
assert all(values[i] <= values[i + 1] for i in range(len(values) - 1)), "Annotation spans must be sorted."
file_path = get_annotation_path(doc, annotation, root)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
mode = "a" if append else "w"
with open(file_path, mode) as f:
ctr = 0
for value in values:
if value is None:
value = ""
elif is_span:
start, end = value
start_subpos, end_subpos = None, None
if isinstance(start, tuple):
start, start_subpos = start
if isinstance(end, tuple):
end, end_subpos = end
start_subpos = ".{}".format(start_subpos) if start_subpos is not None else ""
end_subpos = ".{}".format(end_subpos) if end_subpos is not None else ""
value = "{}{}-{}{}".format(start, start_subpos, end, end_subpos)
elif allow_newlines:
# Replace line breaks with "\n"
value = value.replace("\\", r"\\").replace("\n", r"\n").replace("\r", "")
else:
# Remove line breaks entirely
value = value.replace("\n", "").replace("\r", "")
print(value, file=f)
ctr += 1
# Update file modification time even if nothing was written
os.utime(file_path, None)
_log.info(f"Wrote {ctr} items: {doc + '/' if doc else ''}{annotation}") | 5,333,605 |
def getStops(ll):
"""
getStops
Returns a list of stops based off of a lat long pair
:param: ll { lat : float, lng : float }
:return: list
"""
if not ll:
return None
url = "%sstops?appID=%s&ll=%s,%s" % (BASE_URI, APP_ID, ll['lat'], ll['lng'])
try:
f = urlopen(url)
except HTTPError:
return None
response = f.read()
dom = parseString(response)
stopElems = dom.getElementsByTagName("location")
stops = []
for se in stopElems:
locid = se.getAttribute("locid")
desc = se.getAttribute("desc")
direction = se.getAttribute("dir")
stops.append("ID: %s, %s on %s" % (locid, direction, desc))
return stops | 5,333,606 |
def rws(log_joint, observed, latent, axis=None):
"""
Implements Reweighted Wake-sleep from (Bornschein, 2015). This works for
both continuous and discrete latent `StochasticTensor` s.
:param log_joint: A function that accepts a dictionary argument of
``(string, Tensor)`` pairs, which are mappings from all
`StochasticTensor` names in the model to their observed values. The
function should return a Tensor, representing the log joint likelihood
of the model.
:param observed: A dictionary of ``(string, Tensor)`` pairs. Mapping from
names of observed `StochasticTensor` s to their values.
:param latent: A dictionary of ``(string, (Tensor, Tensor))``) pairs.
Mapping from names of latent `StochasticTensor` s to their samples and
log probabilities.
:param axis: The sample dimension(s) to reduce when computing the
outer expectation in log likelihood and in the cost for adapting
proposals. If `None`, no dimension is reduced.
:return: A Tensor. The surrogate cost to minimize.
:return: A Tensor. Estimated log likelihoods.
"""
warnings.warn("rws(): This function will be deprecated in the coming "
"version (0.3.1). Variational utilities are moving to "
"`zs.variational`. Features of the original rws() can be "
"achieved by two new variational objectives. For learning "
"model parameters, please use the importance weighted "
"objective: `zs.variational.iw_objective()`. For adapting "
"the proposal, the new rws gradient estimator can be "
"accessed by first constructing the inclusive KL divergence "
"objective using `zs.variational.klpq` and then calling "
"its rws() method.", category=FutureWarning)
latent_k, latent_v = map(list, zip(*six.iteritems(latent)))
latent_outputs = dict(zip(latent_k, map(lambda x: x[0], latent_v)))
latent_logpdfs = map(lambda x: x[1], latent_v)
joint_obs = merge_dicts(observed, latent_outputs)
log_joint_value = log_joint(joint_obs)
entropy = -sum(latent_logpdfs)
log_w = log_joint_value + entropy
if axis is not None:
log_w_max = tf.reduce_max(log_w, axis, keep_dims=True)
w_u = tf.exp(log_w - log_w_max)
w_tilde = tf.stop_gradient(
w_u / tf.reduce_sum(w_u, axis, keep_dims=True))
log_likelihood = log_mean_exp(log_w, axis)
fake_log_joint_cost = -tf.reduce_sum(w_tilde * log_joint_value, axis)
fake_proposal_cost = tf.reduce_sum(w_tilde * entropy, axis)
cost = fake_log_joint_cost + fake_proposal_cost
else:
cost = log_w
log_likelihood = log_w
return cost, log_likelihood | 5,333,607 |
def fit_svr(X, y, kernel: str = 'rbf') -> LinearSVR:
"""
Fit support vector regression for the given input X and expected labes y.
:param X: Feature data
:param y: Labels that should be correctly computed
:param kernel: type of kernel used by the SVR {‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’, ‘precomputed’}, default=’rbf’
:return: SVR that is fitted to X and y
"""
svr = LinearSVR()
svr.fit(X=X, y=y)
return svr | 5,333,608 |
def test_g_month_day_min_inclusive005_1247_g_month_day_min_inclusive005_1247_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : (facet=minInclusive and value=-
-01-01 and facet=maxExclusive and value=- -10-01) and document value=-
-03-15
"""
assert_bindings(
schema="msData/datatypes/Facets/gMonthDay/gMonthDay_minInclusive005.xsd",
instance="msData/datatypes/Facets/gMonthDay/gMonthDay_minInclusive005.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,333,609 |
def process_get_namespaces_from_accounts(
status: int,
json: list,
network_type: models.NetworkType,
) -> typing.Sequence[models.NamespaceInfo]:
"""
Process the "/account/namespaces" HTTP response.
:param status: Status code for HTTP response.
:param json: JSON data for response message.
"""
assert status == 200
return [models.NamespaceInfo.create_from_dto(i, network_type) for i in json] | 5,333,610 |
def plot_time_shifts(filename, data, greens, component, misfit, stations, origin, source,
backend='matplotlib'):
""" For a given component, creates a "spider plot" showing how
time shifts vary geographically
"""
if backend.lower()=='gmt':
raise NotImplementedError
# prepare synthetics
greens = greens.select(origin)
_set_components(data, greens)
synthetics, _ = _prepare_synthetics(data, greens, misfit, source)
# collect time shifts
time_shifts, indices = _collect_time_shifts(synthetics, component)
if len(indices)==0:
warn("Component not present in dataset")
return
_save_figure(filename,
time_shifts, [stations[_i] for _i in indices], origin, source) | 5,333,611 |
def _initialize_pydataverse(DATAVERSE_URL: Optional[str], API_TOKEN: Optional[str]):
"""Sets up a pyDataverse API for upload."""
# Get environment variables
if DATAVERSE_URL is None:
try:
DATAVERSE_URL = os.environ["DATAVERSE_URL"]
except KeyError:
raise MissingURLException
if API_TOKEN is None:
try:
API_TOKEN = os.environ["DATAVERSE_API_TOKEN"]
except KeyError:
raise MissingCredentialsException
return NativeApi(DATAVERSE_URL, API_TOKEN), DataAccessApi(DATAVERSE_URL, API_TOKEN) | 5,333,612 |
def _hexsplit(string):
""" Split a hex string into 8-bit/2-hex-character groupings separated by spaces"""
return ' '.join([string[i:i+2] for i in range(0, len(string), 2)]) | 5,333,613 |
def get_analysis_id(analysis_id):
"""
Get the new analysis id
:param analysis_id: analysis_index DataFrame
:return: new analysis_id
"""
if analysis_id.size == 0:
analysis_id = 0
else:
analysis_id = np.nanmax(analysis_id.values) + 1
return int(analysis_id) | 5,333,614 |
def get_station_pqr(station_name: str, rcu_mode: Union[str, int], db):
"""
Get PQR coordinates for the relevant subset of antennas in a station.
Args:
station_name: Station name, e.g. 'DE603LBA' or 'DE603'
rcu_mode: RCU mode (0 - 6, can be string)
db: instance of LofarAntennaDatabase from lofarantpos
Example:
>>> from lofarantpos.db import LofarAntennaDatabase
>>> db = LofarAntennaDatabase()
>>> pqr = get_station_pqr("DE603", "outer", db)
>>> pqr.shape
(96, 3)
>>> pqr[0, 0]
1.7434713
>>> pqr = get_station_pqr("LV614", "5", db)
>>> pqr.shape
(96, 3)
"""
full_station_name = get_full_station_name(station_name, rcu_mode)
station_type = get_station_type(full_station_name)
if 'LBA' in station_name or str(rcu_mode) in ('1', '2', '3', '4', 'inner', 'outer'):
# Get the PQR positions for an individual station
station_pqr = db.antenna_pqr(full_station_name)
# Exception: for Dutch stations (sparse not yet accommodated)
if (station_type == 'core' or station_type == 'remote') and int(rcu_mode) in (3, 4):
station_pqr = station_pqr[0:48, :]
elif (station_type == 'core' or station_type == 'remote') and int(rcu_mode) in (1, 2):
station_pqr = station_pqr[48:, :]
elif 'HBA' in station_name or str(rcu_mode) in ('5', '6', '7', '8'):
selected_dipole_config = {
'intl': GENERIC_INT_201512, 'remote': GENERIC_REMOTE_201512, 'core': GENERIC_CORE_201512
}
selected_dipoles = selected_dipole_config[station_type] + \
np.arange(len(selected_dipole_config[station_type])) * 16
station_pqr = db.hba_dipole_pqr(full_station_name)[selected_dipoles]
else:
raise RuntimeError("Station name did not contain LBA or HBA, could not load antenna positions")
return station_pqr.astype('float32') | 5,333,615 |
def hello_world(cities: List[str] = ["Berlin", "Paris"]) -> bool:
"""
Hello world function.
Arguments:
- cities: List of cities in which 'hello world' is posted.
Return:
- success: Whether or not function completed successfully.
"""
try:
[print("Hello {}!".format(c)) for c in cities] # for loop one-liner
return True
except KeyboardInterrupt:
return False
finally:
pass | 5,333,616 |
def init_db_command():
"""Clear the existing data and create new tables."""
init_db()
print("Database initalized")
click.echo('Initialized the database.') | 5,333,617 |
def parse_dir(directory, default_settings, oldest_revision, newest_revision,
rep):
"""Parses bench data from files like bench_r<revision>_<scalar>.
(str, {str, str}, Number, Number) -> {int:[BenchDataPoints]}"""
revision_data_points = {} # {revision : [BenchDataPoints]}
file_list = os.listdir(directory)
file_list.sort()
for bench_file in file_list:
file_name_match = re.match('bench_r(\d+)_(\S+)', bench_file)
if (file_name_match is None):
continue
revision = int(file_name_match.group(1))
scalar_type = file_name_match.group(2)
if (revision < oldest_revision or revision > newest_revision):
continue
file_handle = open(directory + '/' + bench_file, 'r')
if (revision not in revision_data_points):
revision_data_points[revision] = []
default_settings['scalar'] = scalar_type
revision_data_points[revision].extend(
bench_util.parse(default_settings, file_handle, rep))
file_handle.close()
return revision_data_points | 5,333,618 |
def random_samples(traj_obs, expert, num_sample):
"""Randomly sample a subset of states to collect expert feedback.
Args:
traj_obs: observations from a list of trajectories.
expert: an expert policy.
num_sample: the number of samples to collect.
Returns:
new expert data.
"""
expert_data = []
for i in range(len(traj_obs)):
obs = traj_obs[i]
random.shuffle(obs)
new_expert_data = []
chosen = np.random.choice(range(len(obs)),
size=min(num_sample, len(obs)),
replace=False)
for ch in chosen:
state = obs[ch].observation
action_step = expert.action(obs[ch])
action = action_step.action
new_expert_data.append((state, action))
expert_data.extend(new_expert_data)
return expert_data | 5,333,619 |
def test_normalization_of_images(test_link):
"""
Test if normalize_pages returns 0 for None, or pages number otherwise.
"""
assert test_link.pages == 15
assert Reddit('http://www.reddit.com/', None).pages == 0 | 5,333,620 |
def makeFolder(path):
"""Build a folder.
Args:
path (str): Folder path.
Returns:
bool: Creation status.
"""
if(not os.path.isdir(path)):
try:
os.makedirs(path)
except OSError as error:
print("Directory %s can't be created (%s)" % (path, error))
return False
else:
return True
else:
return False | 5,333,621 |
def get_p2_vector(img):
"""
Returns a p2 vector.
We calculate the p2 vector by taking the radial mean of
the autocorrelation of the input image.
"""
radvars = []
dimX = img.shape[0]
dimY = img.shape[1]
fftimage = np.fft.fft2(img)
final_image = np.fft.ifft2(fftimage*np.conj(fftimage))
finImg = np.abs(final_image)/(dimX*dimY)
centrdImg = np.fft.fftshift(finImg)
center = [int(dimX/2), int(dimY/2)]
radvar, _ = radial_profile(centrdImg, center, (dimX, dimY))
radvars.append(radvar)
p2_vec = np.array(radvars)
return p2_vec[0] | 5,333,622 |
def start_replica_cmd(builddir, replica_id):
"""
Return a command that starts an skvbc replica when passed to
subprocess.Popen.
Note each arguments is an element in a list.
"""
statusTimerMilli = "500"
viewChangeTimeoutMilli = "10000"
path = os.path.join(builddir, "tests", "simpleKVBC",
"TesterReplica", "skvbc_replica")
return [path,
"-k", KEY_FILE_PREFIX,
"-i", str(replica_id),
"-s", statusTimerMilli,
"-v", viewChangeTimeoutMilli,
"-e", str(True)
] | 5,333,623 |
def multiplex(n, q, **kwargs):
""" Convert one queue into several equivalent Queues
>>> q1, q2, q3 = multiplex(3, in_q)
"""
out_queues = [Queue(**kwargs) for i in range(n)]
def f():
while True:
x = q.get()
for out_q in out_queues:
out_q.put(x)
t = Thread(target=f)
t.daemon = True
t.start()
return out_queues | 5,333,624 |
def sse_md5(params, **kwargs):
"""
S3 server-side encryption requires the encryption key to be sent to the
server base64 encoded, as well as a base64-encoded MD5 hash of the
encryption key. This handler does both if the MD5 has not been set by
the caller.
"""
_sse_md5(params, 'SSECustomer') | 5,333,625 |
def identity_of(lines):
""" extract identity of each line """
rex = re.compile("^#[0-9]+")
for line in lines:
match = rex.search(line)
if match is None:
yield None
else:
yield line[match.start():match.end()] | 5,333,626 |
def stack(arrays, axis=0):
"""
Join a sequence of arrays along a new axis.
The `axis` parameter specifies the index of the new axis in the dimensions
of the result. For example, if ``axis=0`` it will be the first dimension
and if ``axis=-1`` it will be the last dimension.
.. versionadded:: 1.10.0
Parameters
----------
arrays : sequence of array_like
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
split : Split array into a list of multiple sub-arrays of equal size.
Examples
--------
>>> arrays = [np.random.randn(3, 4) for _ in range(10)]
>>> np.stack(arrays, axis=0).shape
(10, 3, 4)
>>> np.stack(arrays, axis=1).shape
(3, 10, 4)
>>> np.stack(arrays, axis=2).shape
(3, 4, 10)
>>> a = np.array_create.array([1, 2, 3])
>>> b = np.array_create.array([2, 3, 4])
>>> np.stack((a, b))
array_create.array([[1, 2, 3],
[2, 3, 4]])
>>> np.stack((a, b), axis=-1)
array_create.array([[1, 2],
[2, 3],
[3, 4]])
"""
arrays = [array_create.array(arr) for arr in arrays]
if not arrays:
raise ValueError('need at least one array to stack')
shapes = set(arr.shape for arr in arrays)
if len(shapes) != 1:
raise ValueError('all input arrays must have the same shape')
result_ndim = arrays[0].ndim + 1
if not -result_ndim <= axis < result_ndim:
msg = 'axis {0} out of bounds [-{1}, {1})'.format(axis, result_ndim)
raise IndexError(msg)
if axis < 0:
axis += result_ndim
sl = (slice(None),) * axis + (None,)
expanded_arrays = [arr[sl] for arr in arrays]
return concatenate(expanded_arrays, axis=axis) | 5,333,627 |
def get_runs(runs, selected_runs, cmdline):
"""Selects which run(s) to execute based on parts of the command-line.
Will return an iterable of run numbers. Might also fail loudly or exit
after printing the original command-line.
"""
name_map = dict((r['id'], i) for i, r in enumerate(runs) if 'id' in r)
run_list = []
def parse_run(s):
try:
r = int(s)
except ValueError:
logging.critical("Error: Unknown run %s", s)
raise UsageError
if r < 0 or r >= len(runs):
logging.critical("Error: Expected 0 <= run <= %d, got %d",
len(runs) - 1, r)
sys.exit(1)
return r
if selected_runs is None:
run_list = list(irange(len(runs)))
else:
for run_item in selected_runs.split(','):
run_item = run_item.strip()
if run_item in name_map:
run_list.append(name_map[run_item])
continue
sep = run_item.find('-')
if sep == -1:
run_list.append(parse_run(run_item))
else:
if sep > 0:
first = parse_run(run_item[:sep])
else:
first = 0
if sep + 1 < len(run_item):
last = parse_run(run_item[sep + 1:])
else:
last = len(runs) - 1
if last < first:
logging.critical("Error: Last run number should be "
"greater than the first")
sys.exit(1)
run_list.extend(irange(first, last + 1))
# --cmdline without arguments: display the original command-line
if cmdline == []:
print("Original command-lines:")
for run in run_list:
print(' '.join(shell_escape(arg)
for arg in runs[run]['argv']))
sys.exit(0)
return run_list | 5,333,628 |
def declare_default_toolchain():
"""The default ReasonML/BuckleScript toolchain.
This toolchain will register as `bs-platform` and will include the `nix`
managed ReasonML tools (such as `refmt`) and the `bazel` compiled BuckleScript
and patched Ocaml compilers.
It defaults to:
* `bs_stdlib = "//reason/private/bs:stdlib.ml"`
* `bsc = "//reason/private/bs:bsc.exe"`
* `ocamlc = "//reason/private/opam:ocamlc.opt"`
* `ocamlopt = "//reason/private/opam:ocamlopt.opt"`
* `ocamldep = "//reason/private/opam:ocamldep.opt"`
* `ocamlrun = "//reason/private/opam:ocamlrun"`
* `ocaml_stdlib = "//reason/private/ocaml:stdlib.ml"`
* `refmt = "//reason/private/bs:refmt.exe"`
"""
_reason_toolchain(
name="bs",
bs_stdlib="//reason/private/bs:stdlib.ml",
bsc="//reason/private/bs:bsc.exe",
ocamlc="//reason/private/opam:ocamlc.opt",
ocamlopt="//reason/private/opam:ocamlopt.opt",
ocamldep="//reason/private/opam:ocamldep.opt",
ocamlrun="//reason/private/opam:ocamlrun",
ocaml_stdlib="//reason/private/ocaml:stdlib.ml",
refmt="//reason/private/bs:refmt.exe",
) | 5,333,629 |
def verifica_cc(numero):
"""verifica_cc(numero): int -> tuple
Funcao que verifica o numero do cartao, indicando a categoria e a rede emissora"""
numero_final = str(numero)
if luhn_verifica(numero_final) == True:
categor = categoria(numero_final)
rede_cartao = valida_iin(numero_final)
if rede_cartao == "":
return "cartao invalido"
else:
return (categor, rede_cartao)
else:
return "cartao invalido" | 5,333,630 |
def check_modified_file(filename, errors):
"""Check each modified file to make sure it adheres to the standards"""
# skip code that isn't ours
if filename.find("dependency") != -1 or "/eigen3/" in filename:
return
# don't check header guard in template headers
if filename.find("templates") != -1:
return
info = python_tools.get_project_info(filename)
if filename.endswith('.h') or filename.endswith('.cpp') \
or filename.endswith('.c'):
check_c_file(filename, errors)
# don't check header guard in template headers
if (cpp_format and filename.endswith('.h')
and filename.find("templates") == -1):
cpp_format.check_header_file(
get_file(filename),
info["name"],
errors)
elif cpp_format and filename.endswith('.cpp'):
cpp_format.check_cpp_file(
get_file(filename),
info["name"],
errors)
elif filename.endswith('.py'):
check_python_file(filename, errors) | 5,333,631 |
def create_security_group(stack, name, rules=()):
"""Add EC2 Security Group Resource."""
ingress_rules = []
for rule in rules:
ingress_rules.append(
SecurityGroupRule(
"{0}".format(rule['name']),
CidrIp=rule['cidr'],
FromPort=rule['from_port'],
ToPort=rule['to_port'],
IpProtocol=rule['protocol'],
)
)
return stack.stack.add_resource(
SecurityGroup(
'{0}SecurityGroup'.format(name),
GroupDescription="{0} Security Group".format(name),
SecurityGroupIngress=ingress_rules,
VpcId=Ref(stack.vpc),
)) | 5,333,632 |
def features_targets_and_externals(
df: pd.DataFrame,
region_ordering: List[str],
id_col: str,
time_col: str,
time_encoder: OneHotEncoder,
weather: Weather_container,
time_interval: str,
latitude: str,
longitude: str,
):
"""
Function that computes the node features (outflows), target values (next step prediction)
and external data such as time_encoding and weather information
Args:
df (pd.DataFrame): [description]
region_ordering (List[str]): [description]
id_col (str): [description]
time_col (str): [description]
time_encoder (OneHotEncoder): [description]
weather (Weather_container): [description]
Returns:
[type]: [description]
"""
id_grouped_df = df.groupby(id_col)
lat_dict = dict()
lng_dict = dict()
for node in region_ordering:
grid_group_df = id_grouped_df.get_group(node)
lat_dict[node] = grid_group_df[latitude].mean()
lng_dict[node] = grid_group_df[longitude].mean()
grouped_df = df.groupby([time_col, id_col])
dt_range = pd.date_range(df[time_col].min(), df[time_col].max(), freq=time_interval)
node_inflows = np.zeros((len(dt_range), len(region_ordering), 1))
lat_vals = np.zeros((len(dt_range), len(region_ordering)))
lng_vals = np.zeros((len(dt_range), len(region_ordering)))
targets = np.zeros((len(dt_range) - 1, len(region_ordering)))
# arrays for external data
weather_external = np.zeros((len(dt_range), 4))
num_cats = 0
for cats in time_encoder.categories_:
num_cats += len(cats)
time_external = np.zeros((len(dt_range), num_cats))
# Loop through every (timestep, node) pair in dataset. For each find number of outflows and set as feature
# also set the next timestep for the same node as the target.
for t, starttime in tqdm(enumerate(dt_range), total=len(dt_range)):
for i, node in enumerate(region_ordering):
query = (starttime, node)
try:
group = grouped_df.get_group(query)
node_inflows[t, i] = len(group)
except KeyError:
node_inflows[t, i] = 0
lat_vals[t, i] = lat_dict[node]
lng_vals[t, i] = lng_dict[node]
# current solution:
# The target to predict, is the number of inflows at next timestep.
if t > 0:
targets[t - 1, i] = node_inflows[t, i]
time_obj = group[time_col].iloc[0]
time_external[t, :] = time_encoder.transform(
np.array([[time_obj.hour, time_obj.weekday(), time_obj.month]])
).toarray()
start_time_dt = pd.Timestamp(starttime).to_pydatetime()
weather_dat = weather.get_weather_df(start=start_time_dt, end=start_time_dt + timedelta(hours=1))
weather_dat = np.nan_to_num(weather_dat, copy=False, nan=0.0)
weather_external[t, :] = weather_dat
time_external = time_external[:-1, :]
# normalize weather features
weather_external = (weather_external - weather_external.mean(axis=0)) / (weather_external.std(axis=0) + 1e-6)
weather_external = weather_external[:-1, :]
X = node_inflows[:-1, :, :]
lng_vals = lng_vals[:-1, :]
lat_vals = lat_vals[:-1, :]
feature_scaler = StandardScaler()
feature_scaler.fit(X[:, :, 0])
target_scaler = StandardScaler()
target_scaler.fit(targets)
return X, lat_vals, lng_vals, targets, time_external, weather_external, feature_scaler, target_scaler | 5,333,633 |
def main(argv=None):
""" Execute the application CLI.
Arguments are taken from sys.argv by default.
"""
args = _cmdline(argv)
config.load(args.config)
results = get_package_list(args.search_term)
results = sorted(results, key=lambda a: sort_function(a[1]), reverse=True)
results_normalized = list()
last_result = None
for result in results:
if result[0] == last_result:
continue
results_normalized.append(result)
last_result = result[0]
print('\n'.join(["%s - %s" % (_[0], _[1]) for _ in results_normalized]))
return 0 | 5,333,634 |
def transform_child_joint_frame_to_parent_inertial_frame(child_body):
"""Return the homogeneous transform from the child joint frame to the parent inertial frame."""
parent_joint = child_body.parent_joint
parent = child_body.parent_body
if parent_joint is not None and parent.inertial is not None:
h_p_c = parent_joint.homogeneous # from parent to child link/joint frame
h_c_p = get_inverse_homogeneous(h_p_c) # from child to parent link/joint frame
h_p_pi = parent.inertial.homogeneous # from parent link/joint frame to inertial frame
h_c_pi = h_c_p.dot(h_p_pi) # from child link/joint frame to parent inertial frame
return h_c_pi | 5,333,635 |
def delete_article_number_sequence(
sequence_id: ArticleNumberSequenceID,
) -> None:
"""Delete the article number sequence."""
db.session.query(DbArticleNumberSequence) \
.filter_by(id=sequence_id) \
.delete()
db.session.commit() | 5,333,636 |
def team_to_repos(api, no_repos, organization):
"""Create a team_to_repos mapping for use in _add_repos_to_teams, anc create
each team and repo. Return the team_to_repos mapping.
"""
num_teams = 10
# arrange
team_names = ["team-{}".format(i) for i in range(num_teams)]
repo_names = ["some-repo-{}".format(i) for i in range(num_teams)]
for name in team_names:
organization.create_team(name, permission="pull")
for name in repo_names:
organization.create_repo(name)
team_to_repos = {
team_name: [repo_name]
for team_name, repo_name in zip(team_names, repo_names)
}
return team_to_repos | 5,333,637 |
def test_min(hass):
"""Test the min filter."""
assert template.Template("{{ [1, 2, 3] | min }}", hass).async_render() == "1" | 5,333,638 |
def box_minus(plus_transform: pin.SE3, minus_transform: pin.SE3) -> np.ndarray:
"""
Compute the box minus between two transforms:
.. math::
T_1 \\boxminus T_2 = \\log(T_1 \\cdot T_2^{-1})
This operator allows us to think about orientation "differences" as
similarly as possible to position differences, but mind the frames! Its
formula has two use cases, depending on whether the common frame :math:`C`
between the two transforms is their source or their target.
When the common frame is the target, denoting by :math:`T_{CP}` the
transform from frame :math:`P` (source) to frame :math:`C` (target), the
resulting twist is expressed in the target frame:
.. math::
{}_C \\xi_{CM} = T_{CP} \\boxminus T_{CM}
When the common frame is the source frame, denoting by :math:`T_{MC}` the
transform from frame :math:`C` (source) to frame :math:`M` (target), the
resulting twist is expressed in the target frame of the transform on the
right-hand side of the operator:
.. math::
-{}_M \\xi_{M} = T_{PC} \\boxminus T_{MC}
Args:
plus_transform: Transform :math:`T_1` on the left-hand side of the box
minus operator.
minus_transform: Transform :math:`T_2` on the right-hand side of the
box minus operator.
Returns:
In the first case :math:`T_{CP} \\boxminus T_{CM}`, the outcome is a
spatial twist :math:`{}_C \\xi_{CM}` expressed in the common frame
:math:`C`.
In the second case :math:`T_{PC} \\boxminus T_{MC}`, the outcome is a
body twist :math:`-{}_M \\xi_{CM}` (mind the unitary minus).
Note:
Prefer using :func:`pink.tasks.utils.body_box_minus` to calling this
function in the second use case :math:`T_{PC} \\boxminus T_{MC}`.
"""
diff_array = plus_transform.act(minus_transform.inverse())
twist: np.ndarray = pin.log(diff_array).vector
return twist | 5,333,639 |
def ipykernel(ctx, name=None, display_name=None):
"""Installs an IPyKernel for this project"""
if not name:
name = 'spines-dev'
if not display_name:
display_name = 'Spines Dev'
log("Installing IPyKernel: %s (%s)" % (name, display_name))
ctx.run(
'python -m ipykernel install --user --name %s --display-name "%s"'
% (name, display_name)
) | 5,333,640 |
def padandsplit(message):
"""
returns a two-dimensional array X[i][j] of 32-bit integers, where j ranges
from 0 to 16.
First pads the message to length in bytes is congruent to 56 (mod 64),
by first adding a byte 0x80, and then padding with 0x00 bytes until the
message length is congruent to 56 (mod 64). Then adds the little-endian
64-bit representation of the original length. Finally, splits the result
up into 64-byte blocks, which are further parsed as 32-bit integers.
"""
origlen = len(message)
padlength = 64 - ((origlen - 56) % 64) # minimum padding is 1!
message += b"\x80"
message += b"\x00" * (padlength - 1)
message += struct.pack("<Q", origlen * 8)
assert (len(message) % 64 == 0)
return [
[
struct.unpack("<L", message[i + j:i + j + 4])[0]
for j in range(0, 64, 4)
]
for i in range(0, len(message), 64)
] | 5,333,641 |
def base_put(url_path, content):
"""
Do a PUT to the REST API
"""
response = requests.put(url=settings.URL_API + url_path, json=content)
return response | 5,333,642 |
def inverse_rotation(theta: float) -> np.ndarray:
"""
Compute inverse of the 2d rotation matrix that rotates a
given vector by theta without use of numpy.linalg.inv and numpy.linalg.solve.
Arguments:
theta: rotation angle
Return:
Inverse of the rotation matrix
"""
rotation_matrix(theta)
m = np.zeros((2, 2))
m[0, 0] = (np.cos(theta_rad)) / (diag - offDiag)
m[0, 1] = (np.sin(theta_rad)) / (diag - offDiag)
m[1, 0] = -(np.sin(theta_rad)) / (diag - offDiag)
m[1, 1] = (np.cos(theta_rad)) / (diag - offDiag)
return m | 5,333,643 |
def _config_validation_decorator(func):
"""A decorator used to easily run validations on configs loaded into dicts.
Add this decorator to any method that returns the config as a dict.
Raises:
ValueError: If the configuration fails validation
"""
@functools.wraps(func)
def validation_wrapper(*args, **kwargs):
config_dict = func(*args, **kwargs)
validate_dict(config_dict)
return config_dict
return validation_wrapper | 5,333,644 |
def image_transpose_exif(im):
"""
https://stackoverflow.com/questions/4228530/pil-thumbnail-is-rotating-my-image
Apply Image.transpose to ensure 0th row of pixels is at the visual
top of the image, and 0th column is the visual left-hand side.
Return the original image if unable to determine the orientation.
As per CIPA DC-008-2012, the orientation field contains an integer,
1 through 8. Other values are reserved.
Parameters
----------
im: PIL.Image
The image to be rotated.
"""
exif_orientation_tag = 0x0112
exif_transpose_sequences = [ # Val 0th row 0th col
[], # 0 (reserved)
[], # 1 top left
[Image.FLIP_LEFT_RIGHT], # 2 top right
[Image.ROTATE_180], # 3 bottom right
[Image.FLIP_TOP_BOTTOM], # 4 bottom left
[Image.FLIP_LEFT_RIGHT, Image.ROTATE_90], # 5 left top
[Image.ROTATE_270], # 6 right top
[Image.FLIP_TOP_BOTTOM, Image.ROTATE_90], # 7 right bottom
[Image.ROTATE_90], # 8 left bottom
]
try:
seq = exif_transpose_sequences[im._getexif()[exif_orientation_tag]]
except Exception:
return im
else:
return functools.reduce(type(im).transpose, seq, im) | 5,333,645 |
def GetFileList(folder, surfixs=".xls,.xlsx"):
""" 遍历文件夹查找所有满足后缀的文件 """
surfix = surfixs.split(",")
if type(folder) == str:
folder = folder.decode('utf-8')
p = os.path.abspath(folder)
flist = []
if os.path.isdir(p):
FindFileBySurfix(flist, p, surfix)
else:
raise "folder param(%s) is not a real folder" % str(folder)
utf8list=[]
for it in flist:
utf8list.append(it.encode('utf-8'))
return utf8list | 5,333,646 |
def generateHTML(fileName, styles):
"""We generate the HTML"""
"""and write the string into a file."""
code = markdown(getFC(fileName))
htmlName = fileName.split('.')[0] + '.html'
htmlFile = open(htmlName, 'w')
compCode = insertStyles(code, styles, fileName.split('.')[0])
htmlFile.write(compCode)
htmlFile.close() | 5,333,647 |
def opt_pore_diameter(elements, coordinates, bounds=None, com=None, **kwargs):
"""Return optimised pore diameter and it's COM."""
args = elements, coordinates
if com is not None:
pass
else:
com = center_of_mass(elements, coordinates)
if bounds is None:
pore_r = pore_diameter(elements, coordinates, com=com)[0] / 2
bounds = (
(com[0]-pore_r, com[0]+pore_r),
(com[1]-pore_r, com[1]+pore_r),
(com[2]-pore_r, com[2]+pore_r)
)
minimisation = minimize(
correct_pore_diameter, x0=com, args=args, bounds=bounds)
pored = pore_diameter(elements, coordinates, com=minimisation.x)
return (pored[0], pored[1], minimisation.x) | 5,333,648 |
async def test_create_contestants_csv_event_not_found(
client: _TestClient,
mocker: MockFixture,
token: MockFixture,
event: dict,
new_contestant: dict,
) -> None:
"""Should return 404 Not found."""
EVENT_ID = "event_id_1"
CONTESTANT_ID = "290e70d5-0933-4af0-bb53-1d705ba7eb95"
mocker.patch(
"event_service.adapters.events_adapter.EventsAdapter.get_event_by_id", # noqa: B950
return_value=None,
)
mocker.patch(
"event_service.services.contestants_service.create_id",
return_value=CONTESTANT_ID,
)
mocker.patch(
"event_service.adapters.contestants_adapter.ContestantsAdapter.create_contestant", # noqa: B950
return_value=CONTESTANT_ID,
)
mocker.patch(
"event_service.adapters.contestants_adapter.ContestantsAdapter.get_contestant_by_name", # noqa: B950
return_value=None,
)
mocker.patch(
"event_service.adapters.contestants_adapter.ContestantsAdapter.get_contestant_by_minidrett_id", # noqa: B950
return_value=None,
)
files = {"file": open("tests/files/contestants_eventid_364892.csv", "rb")}
headers = {
hdrs.AUTHORIZATION: f"Bearer {token}",
}
with aioresponses(passthrough=["http://127.0.0.1"]) as m:
m.post("http://example.com:8081/authorize", status=204)
resp = await client.post(
f"/events/{EVENT_ID}/contestants", headers=headers, data=files
)
assert resp.status == 404 | 5,333,649 |
def test_update_bad_token(app, combatant):
"""Test user update API with bad token."""
client = app.test_client()
response = client.put(
'/api/combatant_update',
data=json.dumps({'token': uuid4().hex}),
content_type = 'application/json'
)
assert response.status_code == 401 | 5,333,650 |
def example() -> None:
"""create example config files in ~/.aec/."""
config_dir = os.path.expanduser("~/.aec/")
os.makedirs(config_dir, exist_ok=True)
for r in resources.files("aec.config-example").iterdir():
copy(r, config_dir) | 5,333,651 |
def animate_operators(operators, date):
"""Main."""
results = []
failures = []
length = len(operators)
count = 1
for i in operators:
try:
i = i.encode('utf-8')
except:
i = unicode(i, 'utf-8')
i = i.encode('utf-8')
print(i, count, "/", length)
try:
output = animate_one_day(i, date)
results.append(output)
print("success!")
output.to_csv("sketches/{}/{}/data/indiv_operators/{}.csv".format(OUTPUT_NAME, DATE, i))
except Exception:
failures.append(i)
print("failed:")
count += 1
return results, failures | 5,333,652 |
def random_sampler(vocs, evaluate_f,
executor=None,
output_path=None,
chunk_size=10,
max_samples=100,
verbose=None):
"""
Makes random samples based on vocs
"""
if verbose is not None:
warnings.warn('xopt.cnsga verbose option has been deprecated')
toolbox = Toolbox()
toolbox.register('evaluate', sampler_evaluate, evaluate_f=evaluate_f)
# Logo
logger.info(sampler_logo)
if not executor:
executor = DummyExecutor()
logger.info('No executor given. Running in serial mode.')
# Setup saving to file
if output_path:
path = full_path(output_path)
assert os.path.exists(path), f'output_path does not exist {path}'
def save(data):
file = new_date_filename(prefix='sampler-', path=path)
with open(file, 'w') as f:
json.dump(data, f, ensure_ascii=True, cls=NpEncoder) # , indent=4)
logger.info(f'Samples written to: {file}')
else:
# Dummy save
def save(data):
pass
# Initial batch
futures = [executor.submit(toolbox.evaluate, random_settings(vocs)) for _ in range(chunk_size)]
# Continuous loop
ii = 0
t0 = time.time()
done = False
results = []
all_results = []
while not done:
if ii > max_samples:
done = True
# Check the status of all futures
for ix in range(len(futures)):
# Examine a future
fut = futures[ix]
if not fut.done():
continue
# Future is done.
results.append(fut.result())
all_results.append(fut.result())
ii += 1
# Submit new job, keep in futures list
future = executor.submit(toolbox.evaluate, random_settings(vocs))
futures[ix] = future
# output
if ii % chunk_size == 0:
t1 = time.time()
dt = t1 - t0
t0 = t1
logger.info(f'{chunk_size} samples completed in {dt / 60:0.5f} minutes')
data = {'vocs': vocs}
# Reshape data
for k in ['inputs', 'outputs', 'error']:
data[k] = [r[k] for r in results]
save(data)
results = []
# Slow down polling. Needed for MPI to work well.
time.sleep(0.001)
# Cancel remaining jobs
for future in futures:
future.cancel()
data = {'vocs': vocs}
# Reshape data
for k in ['inputs', 'outputs', 'error']:
data[k] = [r[k] for r in all_results]
return data | 5,333,653 |
def get_span_encoding(key, zero_span_rep=None):
"""
Input: document key
Output: all possible span tuples and their encodings
"""
instance = reader.text_to_instance(combined_json[key]["sentences"])
instance.index_fields(model.vocab)
generator = iterator(instances=[instance])
batch = next(generator)
if type(get_map_loc().index) == int:
batch = move_to_device(batch, get_map_loc().index)
if zero_span_rep is not None: # for debugging
assert (
zero_span_rep % 2 == 0
), "zero_span_rep must be even as it corresponds to concat(endpoint, attended)"
shape = list(batch["spans"].shape)
shape[-1] = int(zero_span_rep / 2)
zeros = torch.zeros(shape)
return {
"original_text": batch["metadata"][0]["original_text"],
"all_spans": batch["spans"],
"endpoint_span_embeddings": zeros,
"attended_span_embeddings": zeros,
"roberta_embeddings": zeros,
}
output = model.forward(tensor_batch=batch, task_name="coref", for_training=False)
reps = {
"original_text": batch["metadata"][0]["original_text"],
"all_spans": output["all_spans"],
"endpoint_span_embeddings": output["endpoint_span_embeddings"],
"attended_span_embeddings": output["attended_span_embeddings"],
}
if include_bert:
reps["roberta_embeddings"] = get_bert_reps(
combined_json[key]["sentences"], output["all_spans"][0]
)
return reps | 5,333,654 |
def _check_component_dtypes(value_type):
"""Checks all components of the `value_type` to be either ints or floats."""
if not (type_analysis.is_structure_of_floats(value_type) or
type_analysis.is_structure_of_integers(value_type)):
raise TypeError('Component dtypes of `value_type` must all be integers or '
f'floats. Found {value_type}.') | 5,333,655 |
def add_stocks(letter, page, get_last_page=False):
"""
goes through each row in table and adds to df if it is a stock
returns the appended df
"""
df = pd.DataFrame()
res = req.get(BASE_LINK.format(letter, page))
soup = bs(res.content, 'lxml')
table = soup.find('table', {'id': 'CompanylistResults'})
stks = table.findAll('tr')
stocks_on_page = (len(stks) - 1) / 2
for stk in stks[1:]:
deets = stk.findAll('td')
if len(deets) != 7:
continue
company_name = deets[0].text.strip()
ticker = deets[1].text.strip()
market_cap = deets[2].text.strip()
# 4th entry is blank
country = deets[4].text.strip()
ipo_year = deets[5].text.strip()
subsector = deets[6].text.strip()
df = df.append(pd.Series({'company_name': company_name,
'market_cap': market_cap,
'country': country,
'ipo_year': ipo_year,
'subsector': subsector},
name=ticker))
if get_last_page:
# get number of pages
lastpage_link = soup.find('a', {'id': 'two_column_main_content_lb_LastPage'})
last_page_num = int(lastpage_link['href'].split('=')[-1])
return df, total_num_stocks, last_page_num
return df, stocks_on_page | 5,333,656 |
def available_parent_amount_rule(model, pr):
"""
Each parent has a limited resource budget; it cannot allocate more than that.
:param ConcreteModel model:
:param int pr: parent resource
:return: boolean indicating whether pr is staying within budget
"""
if model.parent_possible_allocations[pr]:
return sum(model.PARENT_AMT[pr, i] for i in model.parent_possible_allocations[pr]) <= model.avail_parent_amt[pr]
else:
return Constraint.Skip | 5,333,657 |
def extract_coords(filename):
"""Extract J2000 coordinates from filename or filepath
Parameters
----------
filename : str
name or path of file
Returns
-------
str
J2000 coordinates
"""
# in case path is entered as argument
filename = filename.split("/")[-1] if "/" in filename else filename
# to check whether declination is positive or negative
plus_minus = "+" if "+" in filename else "-"
# extracting right acesnsion (ra) and declination(dec) from filename
filename = filename.split("_")[0].strip("J").split(plus_minus)
ra_extracted = [
"".join(filename[0][0:2]),
"".join(filename[0][2:4]),
"".join(filename[0][4:]),
]
dec_extracted = [
"".join(filename[1][0:2]),
"".join(filename[1][2:4]),
"".join(filename[1][4:]),
]
coordinates = " ".join(ra_extracted) + " " + plus_minus + " ".join(dec_extracted)
# return coordinates as a string in HH MM SS.SSS format
return coordinates | 5,333,658 |
def exponential(mantissa, base, power, left, right):
"""Return the exponential signal.
The signal's value will be `mantissa * base ^ (power * time)`.
Parameters:
mantissa: The mantissa, i.e. the scale of the signal
base: The exponential base
power: The exponential power
left: Left bound of the signal
right: Rright bound of the signal
Returns:
ndarray[float]: The values of the signal
ndarray[int]: The interval of the signal from left bound
to right bound
"""
n = np.arange(left, right+1, 1)
x = mantissa * (base ** (power * n))
return x, n | 5,333,659 |
def move_distribute_blocks(
parent_folder, new_folders, blocks, relation_filepath, template_extension="xlsx"
):
"""Move and distribute equal number of blocks of files to a list of new folders (person names)"""
distribution = np.random.permutation(
np.tile(
np.random.permutation(new_folders),
len(blocks) // len(new_folders) + (len(blocks) % len(new_folders) != 0),
)[: len(blocks)]
)
for folder in tqdm(np.unique(new_folders), desc="Distributions"):
current_blocks = np.array(blocks)[np.where(distribution == folder)]
move_blocks(
parent_folder=parent_folder,
new_folder=folder,
blocks=current_blocks,
relation_filepath=relation_filepath,
template_extension=template_extension,
) | 5,333,660 |
def get_features(features, featurestore=None, featuregroups_version_dict={}, join_key=None, online=False):
"""
Gets a list of features (columns) from the featurestore. If no featuregroup is specified it will query hopsworks
metastore to find where the features are stored. It will try to construct the query first from the cached metadata,
if that fails it will re-try after reloading the cache
Example usage:
>>> # The API will default to version 1 for feature groups and the project's feature store
>>> features = featurestore.get_features(["pagerank", "triangle_count", "avg_trx"],
>>> featurestore=featurestore.project_featurestore())
>>> #You can also explicitly define feature group, version, feature store, and join-key:
>>> features = featurestore.get_features(["pagerank", "triangle_count", "avg_trx"],
>>> featurestore=featurestore.project_featurestore(),
>>> featuregroups_version_dict={"trx_graph_summary_features": 1,
>>> "trx_summary_features": 1}, join_key="cust_id")
Args:
:features: a list of features to get from the featurestore
:featurestore: the featurestore where the featuregroup resides, defaults to the project's featurestore
:featuregroups: (Optional) a dict with (fg --> version) for all the featuregroups where the features resides
:featuregroup_version: the version of the featuregroup, defaults to 1
:join_key: (Optional) column name to join on
:online: a boolean flag whether to fetch the online feature group or the offline one (assuming that the
feature group has online serving enabled)
Returns:
A dataframe with all the features
"""
# try with cached metadata
try:
return core._do_get_features(features,
core._get_featurestore_metadata(featurestore,
update_cache=update_cache_default),
featurestore=featurestore,
featuregroups_version_dict=featuregroups_version_dict,
join_key=join_key,
online=online)
# Try again after updating cache
except:
return core._do_get_features(features, core._get_featurestore_metadata(featurestore, update_cache=True),
featurestore=featurestore,
featuregroups_version_dict=featuregroups_version_dict,
join_key=join_key,
online=online) | 5,333,661 |
def get_flex_bounds(x, samples, nsig=1):
"""
Here, we wish to report the distribution of the subchunks 'sample'
along with the value of the full sample 'x'
So this function will return x, x_lower_bound, x_upper_bound,
where the range of the lower and upper bound expresses
the standard deviation of the sample distribution, the mean
of which is often not aligned with x.
"""
mean=np.mean(samples); sig=np.std(samples)
return [x, nsig*sig+x-mean, nsig*sig+mean-x] | 5,333,662 |
def make_intervention_frequencies_plot(medication_df, cols_to_plot, fig_filename=None):
"""
Given a bunch of binary cols_to_plot, plot the frequency with which they occur in the data overall and in disadvantaged racial/SES groups.
Two plots: one of absolute risks, one of relative risks (relative to the outgroup).
Also run some regressions where we see whether pain or KLG better predicts if you'll receive a treatment.
"""
plt.figure(figsize=[10, 10])
sns.set_style('whitegrid')
assert len(set(medication_df['id'])) == len(medication_df) # should be person-level.
bar_width = .2
for group_idx, group in enumerate(['income < $50k', "didn't graduate college", "black"]):
bar_positions = []
risks_for_each_group = [] # absolute risks
relative_risks = [] # risk relative to outgroup.
labels = []
current_pos = group_idx * bar_width
for c in cols_to_plot:
if c not in cols_to_plot:
continue
assert set(medication_df[c].dropna()).issubset(set([0, 1]))
if medication_df[c].mean() < .01:
continue
if group == 'overall': # only need to do regressions once.
raise Exception("This is a bit sketchy because the plot is made using individual-level data, not side-individual level data, so KLG and koos are invalid")
klg_rsquared = sm.OLS.from_formula('%s ~ C(xrkl)' % c, data=medication_df).fit().rsquared
koos_rsquared = sm.OLS.from_formula('%s ~ koos_pain_subscore' % c, data=medication_df).fit().rsquared
combined_model = sm.OLS.from_formula('%s ~ koos_pain_subscore + xrkl' % c, data=medication_df).fit()
all_rsquareds.append({'intervention':c,
'koos r^2':koos_rsquared,
'klg r^2 with categorial KLG':klg_rsquared,
'koos_beta_in_combined_model with linear KLG':combined_model.params['koos_pain_subscore'],
'klg_beta_in_combined_model with linear KLG':combined_model.params['xrkl']})
labels.append('%s\n(%1.0f%% overall)' % (c.replace('_', ' '), 100 * medication_df[c].mean()))
bar_positions.append(current_pos)
if group == 'overall':
risks_for_each_group.append(medication_df[c].mean())
relative_risks.append(1)
elif group == 'black':
risks_for_each_group.append(medication_df.loc[medication_df['race_black'] == 1, c].mean())
relative_risks.append(medication_df.loc[medication_df['race_black'] == 1, c].mean()/
medication_df.loc[medication_df['race_black'] == 0, c].mean())
elif group == 'income < $50k':
risks_for_each_group.append(medication_df.loc[medication_df['binarized_income_at_least_50k'] == 0, c].mean())
relative_risks.append(medication_df.loc[medication_df['binarized_income_at_least_50k'] == 0, c].mean()/
medication_df.loc[medication_df['binarized_income_at_least_50k'] == 1, c].mean())
elif group == "didn't graduate college":
risks_for_each_group.append(medication_df.loc[medication_df['binarized_education_graduated_college'] == 0, c].mean())
relative_risks.append(medication_df.loc[medication_df['binarized_education_graduated_college'] == 0, c].mean()/
medication_df.loc[medication_df['binarized_education_graduated_college'] == 1, c].mean())
else:
raise Exception("invalid col")
print("%-30s: high SES people are %2.3fx as likely to get %s" % (group, 1/relative_risks[-1], c.replace('\n', ' ')))
current_pos += 1
#plt.subplot(121)
#plt.barh(bar_positions, risks_for_each_group, label=group, height=bar_width)
#plt.subplot(111)
plt.barh(bar_positions, relative_risks, label=group, height=bar_width)
#plt.subplot(121)
#plt.yticks([a - bar_width for a in bar_positions], labels)
#plt.legend()
#plt.xlabel('Proportion of people reporting use')
#plt.subplot(122)
#plt.yticks([])
fontsize = 18
plt.yticks([a - bar_width for a in bar_positions], labels, fontsize=fontsize)
plt.xlabel("Risk relative to outgroup", fontsize=fontsize)
plt.xticks([0, 0.5, 1, 1.5, 2.0, 2.5], ['0x', '0.5x', '1x', '1.5x', '2.0x', '2.5x'], fontsize=fontsize)
plt.plot([1, 1], [min(bar_positions) - 1, max(bar_positions) + 1], linestyle='-', color='black')
plt.ylim(min(bar_positions) - .5, max(bar_positions) + bar_width/2)
plt.legend(fontsize=fontsize - 2)
plt.subplots_adjust(left=.3)
if fig_filename is not None:
plt.savefig(fig_filename, dpi=300) | 5,333,663 |
def _parse_multi_header(headers):
"""
Parse out and return the data necessary for generating ZipkinAttrs.
Returns a dict with the following keys:
'trace_id': str or None
'span_id': str or None
'parent_span_id': str or None
'sampled_str': '0', '1', 'd', or None (defer)
"""
parsed = {
"trace_id": headers.get("X-B3-TraceId", None),
"span_id": headers.get("X-B3-SpanId", None),
"parent_span_id": headers.get("X-B3-ParentSpanId", None),
"sampled_str": headers.get("X-B3-Sampled", None),
}
# Normalize X-B3-Flags and X-B3-Sampled to None, '0', '1', or 'd'
if headers.get("X-B3-Flags") == "1":
parsed["sampled_str"] = "d"
if parsed["sampled_str"] == "true":
parsed["sampled_str"] = "1"
elif parsed["sampled_str"] == "false":
parsed["sampled_str"] = "0"
if parsed["sampled_str"] not in (None, "1", "0", "d"):
raise ValueError("Got invalid X-B3-Sampled: %s" % parsed["sampled_str"])
for k in ("trace_id", "span_id", "parent_span_id"):
if parsed[k] == "":
raise ValueError("Got empty-string %r" % k)
if parsed["trace_id"] and not parsed["span_id"]:
raise ValueError("Got X-B3-TraceId but not X-B3-SpanId")
elif parsed["span_id"] and not parsed["trace_id"]:
raise ValueError("Got X-B3-SpanId but not X-B3-TraceId")
# Handle the common case of no headers at all
if not parsed["trace_id"] and not parsed["sampled_str"]:
raise ValueError() # won't trigger a log message
return parsed | 5,333,664 |
def get_tgimg(img):
"""
处理提示图片,提取提示字符
:param img: 提示图片
:type img:
:return: 返回原图描边,提示图片按顺序用不同颜色框,字符特征图片列表
:rtype: img 原图, out 特征图片列表(每个字), templets 角度变换后的图
"""
imgBW = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
h, w = imgBW.shape
_, imgBW = cv2.threshold(imgBW, 0, 255,
cv2.THRESH_BINARY + cv2.THRESH_OTSU)
img2 = cv2.erode(imgBW, None, iterations=3)
img2 = cv2.dilate(img2, None, iterations=3)
out = numpy.full((20 + h, 20 + w), 255, numpy.uint8)
copy_image(out, 10, 10, img2)
out, cnts, hierarchy = cv2.findContours(out, cv2.RETR_LIST,
cv2.CHAIN_APPROX_NONE)
rects = []
# cnts[-1] 边框
for cnt in cnts[:-1]:
cnt -= 10
x1 = cnt[:, :, 0].min()
y1 = cnt[:, :, 1].min()
x2 = cnt[:, :, 0].max()
y2 = cnt[:, :, 1].max()
x1 = 0 if x1 < 0 else x1
y1 = 0 if y1 < 0 else y1
x2 = w - 1 if x2 > w - 1 else x2
y2 = h - 1 if y2 > h - 1 else y2
rects.append((x1, y1, x2, y2))
cv2.drawContours(img, cnt, -1, [0, 0, 255])
# cv2.rectangle(img, (x1, y1), (x2, y2), [0, 0, 255])
rects.sort()
out = numpy.full(imgBW.shape, 255, numpy.uint8)
x0 = spacing = 3
templets = []
for x1, y1, x2, y2 in rects:
imgchar = numpy.full((30, 30), 255, numpy.uint8)
tmpl = imgBW[y1:y2 + 1, x1:x2 + 1]
if value2 != (max_value2 // 2):
tmpl = rotate_image(tmpl, (max_value2 // 2 - value2) * 10)
templets.append(tmpl)
copy_image(imgchar, 0, (30 - y2 + y1 - 1) // 2, tmpl)
copy_image(out, x0, 0, imgchar)
x0 += x2 - x1 + 1 + spacing
out = cv2.cvtColor(out, cv2.COLOR_GRAY2BGR)
i = 0
x0 = spacing
for x1, y1, x2, y2 in rects:
cv2.rectangle(out, (x0, 0), (x0 + x2 - x1 + 1, 29), COLORS[i])
x0 += x2 - x1 + 1 + spacing
i += 1
return img, out, templets | 5,333,665 |
def brand_profitsharing_order_query(self, transaction_id, out_order_no, sub_mchid):
"""查询连锁品牌分账结果
:param transaction_id: 微信支付订单号,示例值:'4208450740201411110007820472'
:param out_order_no: 商户分账单号,只能是数字、大小写字母_-|*@,示例值:'P20150806125346'
:param sub_mchid: 子商户的商户号,由微信支付生成并下发。示例值:'1900000109'
"""
if sub_mchid:
path = '/v3/brand/profitsharing/orders?sub_mchid=%s' % sub_mchid
else:
raise Exception('sub_mchid is not assigned.')
if transaction_id and out_order_no:
path = '%s&transaction_id=%s&out_order_no=%s' % (transaction_id, out_order_no)
else:
raise Exception('transaction_id or out_order_no is not assigned.')
return self._core.request(path) | 5,333,666 |
def get_controller_from_module(module, cname):
"""
Extract classes that inherit from BaseController
"""
if hasattr(module, '__controller__'):
controller_classname = module.__controller__
else:
controller_classname = cname[0].upper() + cname[1:].lower() + 'Controller'
controller_class = module.__dict__.get(controller_classname, None)
return controller_class | 5,333,667 |
def exp(d: D) -> NumDict:
"""Compute the base-e exponential of d."""
return d.exp() | 5,333,668 |
def test_init_invalid_q():
"""Verify constructor invalid q
"""
# String not fully specified
base_conversion = list(uut.properties.StrConv.values())
errmsg = r"^String literal initialization Q format must be fully constrained\.$"
for nbit in tools.test_iterator():
s = random.randrange(2)
m = random.randint(s, 1000)
n = random.randint(m == 0, 1000 - m)
L = m + n
init = random.getrandbits(L) | (s << (L-1))
invalid_arguments = [
{},
{'n': n},
{'m': m},
{'m': m, 'n': n},
{'signed': s},
{'signed': s, 'n': n},
{'signed': s, 'm': m},
]
conv = random.choice(base_conversion)
for args in invalid_arguments:
with nose.tools.assert_raises_regex(ValueError, errmsg):
uut.FixedPoint(conv(init), **args)
# Negative n
errmsg = r"^Number of fractional bits must be non-negative\.$"
with nose.tools.assert_raises_regex(ValueError, errmsg):
uut.FixedPoint(init, n=-1)
# Negative m
errmsg = r"^Number of integer bits must be non-negative\.$"
with nose.tools.assert_raises_regex(ValueError, errmsg):
uut.FixedPoint(init, m=-max(m, 1))
# m=0 for signed
errmsg = r"^Number of integer bits must be at least 1 for signed numbers\.$"
with nose.tools.assert_raises_regex(ValueError, errmsg):
uut.FixedPoint(init, signed=1, m=0, n=max(n, 1))
# Zero word length
errmsg = r"^Word size \(integer and fractional\) must be positive\.$"
with nose.tools.assert_raises_regex(ValueError, errmsg):
uut.FixedPoint(init, m=0, n=0) | 5,333,669 |
def Main(operation, args):
"""Supports 2 operations
1. Consulting the existing data (get)
> get ["{address}"]
2. Inserting data about someone else (certify)
> certify ["{address}","{hash}"]
"""
if len(args) == 0:
Log('You need to provide at least 1 parameter - [address]')
return 'Error: You need to provide at least 1 parameter - [address]'
address = args[0]
if len(address) != 20:
Log('Wrong address size')
return 'Error: Wrong address size'
if operation == 'get':
return get_certs(address)
elif operation == 'certify':
# Caller cannot add certifications to his address
if CheckWitness(address):
Log('You cannot add certifications for yourself')
return 'Error: You cannot add certifications for yourself'
if 3 != len(args):
Log('Certify requires 3 parameters - [address] [caller_address] [hash]')
return 'Error: Certify requires 3 parameters - [address] [caller_address] [hash]'
caller_address = args[1]
# To make sure the address is from the caller
if not CheckWitness(caller_address):
Log('You need to provide your own address')
return 'Error: You need to provide your own address'
content = args[2]
return add_certification(address, caller_address, content)
else:
Log('Invalid Operation')
return 'Error": "Invalid Operation' | 5,333,670 |
def get_latest_file_list_orig1(input_list, start_time, num_files):
"""
Return a list of file names, trying to get one from each index file in input_list.
The starting time is start_time and the number of days to investigate is num_days.
"""
out = []
for rind in input_list:
# Create time_list
time_list = time_list(start_time, rind.get_hours() * 3600, num_files)
# print "rind: dir", rind.get_base_dir(), rind.get_index_date()
line_list, index_date_list = rind.readlines_list_rev(time_list, 1)
flist = get_files(line_list)
if flist != []:
out.append("%s/%s/%s" % (rind.get_base_dir(), index_date_list[0], flist[0]))
else:
out.append("None")
print out
return out | 5,333,671 |
def get_ogheader(blob, url=None):
"""extract Open Graph markup into a dict
The OG header section is delimited by a line of only `---`.
Note that the page title is not provided as Open Graph metadata if
the image metadata is not specified.
"""
found = False
ogheader = dict()
for line in blob.split('\n'):
if line == '---':
found = True
break
if line.startswith('image: '):
toks = line.split()
assert len(toks) == 2
ogheader['image'] = toks[1]
if not found:
ogheader = dict() # Ignore any matches as false positives
return ogheader
if url is not None:
assert 'url' not in ogheader
ogheader['url'] = url
for line in blob.split('\n'):
if line.startswith('# '):
ogheader['title'] = line[2:]
return ogheader | 5,333,672 |
def cleanup():
"""Cleanup the oslo_messaging layer."""
global TRANSPORTS, NOTIFIERS
NOTIFIERS = {}
for url in TRANSPORTS:
TRANSPORTS[url].cleanup()
del TRANSPORTS[url] | 5,333,673 |
def rm(user, host="submit-3.chtc.wisc.edu", keyfile=None, expr=None,
verbose=False):
"""Remove condor jobs on a remote machine.
Parameters
----------
user : str
the remote machine user name
host : str, optional
the remote machine host
keyfile : str, optional
a SSH private key file to use
expr : str, optional
an expression used to search for jobs
verbose : str, optional
print information
"""
client = pm.SSHClient()
client.set_missing_host_key_policy(pm.AutoAddPolicy())
_, keyfile, pw = tools.ssh_test_connect(client, host, user, keyfile=keyfile,
auth=False)
client.connect(host, username=user, key_filename=keyfile, password=pw)
print("connecting to {0}@{1}".format(user, host))
cmd = "condor_q {user}".format(user=user)
stdin, stdout, stderr = exec_remote_cmd(client, cmd, verbose=verbose)
expr = user if expr is None else expr
cexpr = re.compile(expr)
pids = [l.split()[0] for l in stdout.readlines() if cexpr.search(l)]
if len(pids) > 0:
cmd = "condor_rm {pids}".format(pids=" ".join(pids))
stdin, stdout, stderr = exec_remote_cmd(client, cmd, verbose=verbose)
else:
print("No jobs found matching {0}.".format(expr))
client.close() | 5,333,674 |
def run_pool_exhaust_test(server_host_ip, server_port, num_runs, global_model_real):
"""
Run the workers loop to exhaust the gevent pool in a stress test.
This involves running requesting the global model from the server
for all but one worker 20 times. Since the server only allocates
enough gevent pool resources for 10 times the number of workers,
this should exhaust the pool if repeated requests are not taken
care of properly by the server.
Parameters
----------
server_host_ip: str
The ip-address of the host of the server.
server_port: int
The port at which the serer should listen to
num_runs: int
Number of runs of the sending of models etc. to perform
global_model_real: bool
If true, the global model returned is a bianry serialized version of
MobileNetV2 that is used in the plantvillage example.
"""
workers = []
if global_model_real:
model_data = io.BytesIO()
torch.save(models.mobilenet_v2(pretrained=True), model_data)
bin_model = model_data.getvalue()
else:
bin_model = msgpack.packb("A 'local model update'!!")
chunk_str = "1 of 1"
for fn in get_worker_keys_from_chunk(chunk_str):
workers.append(SimpleLPWorker(
server_host_ip, server_port,
os.path.join(STRESS_KEYS_FOLDER, fn))
)
num_workers = len(workers)
for i, worker in enumerate(workers):
# print(f'Registering {i} th worker')
worker.worker.register_worker()
# get the current global model and check
for worker in workers:
print(f"Requesting global model for {worker.worker.worker_id}")
worker.global_model_changed_callback(worker.worker.get_global_model())
done_count = 0
def run_wg(gl_worker):
nonlocal done_count
logger.info(f"Starting long poll for {gl_worker.worker.worker_id}")
gl_worker.worker.send_model_update(bin_model)
gl_worker.global_model_changed_callback(
gl_worker.worker.get_global_model())
logger.info(f"Long poll for {gl_worker.worker.worker_id} finished")
done_count += 1
try:
for run_no in range(num_runs+1):
logger.info(f"********************** STARTING RUN {run_no + 1}:")
sleep(5)
worker_lets = []
workers_lst = workers[:-1] if run_no < num_runs else workers
for i, worker in enumerate(workers_lst):
logger.info(f"Spawning for worker {i}")
worker_lets.append(Greenlet.spawn(run_wg, worker))
if (i + 1) % 10 == 0:
sleep(0.5)
if run_no < num_runs:
for i, worker_let in enumerate(worker_lets):
logger.info(f"Killing worker {i}")
worker_let.kill()
continue
while done_count < num_workers:
sleep(1)
logger.info(
f"{done_count} workers have received the global model update - need to get to {num_workers}...")
done_count = 0
except Exception as e:
print(e)
exit() | 5,333,675 |
def check_key(k, expected_keys):
"""Used to check if k is an expected key (with some fudging for DACs at the moment)"""
if k in expected_keys:
return
elif k[0:3] in ['DAC', 'ADC']: # TODO: This should be checked better
return
else:
logger.warning(f'Unexpected key in logs: k = {k}, Expected = {expected_keys}')
return | 5,333,676 |
def variable_accessed(variable):
"""Notifies all tapes in the stack that a variable has been accessed.
Args:
variable: variable to be watched.
"""
strategy, context = (
distribution_strategy_context.get_strategy_and_replica_context())
if context:
variables = [strategy.extended.value_container(variable)]
else:
variables = strategy.experimental_local_results(variable)
for var in variables:
pywrap_tensorflow.TFE_Py_TapeVariableAccessed(var) | 5,333,677 |
def list_ingredient():
"""List all ingredients currently in the database"""
ingredients = IngredientCollection()
ingredients.load_all()
return jsonify(ingredients=[x.to_dict() for x in ingredients.models]) | 5,333,678 |
def ParseSavedQueries(cnxn, post_data, project_service, prefix=''):
"""Parse form data for the Saved Queries part of an admin form."""
saved_queries = []
for i in xrange(1, MAX_QUERIES + 1):
if ('%ssavedquery_name_%s' % (prefix, i)) not in post_data:
continue # skip any entries that are blank or have no predicate.
name = post_data['%ssavedquery_name_%s' % (prefix, i)].strip()
if not name:
continue # skip any blank entries
if '%ssavedquery_id_%s' % (prefix, i) in post_data:
query_id = int(post_data['%ssavedquery_id_%s' % (prefix, i)])
else:
query_id = None # a new query_id will be generated by the DB.
project_names_str = post_data.get(
'%ssavedquery_projects_%s' % (prefix, i), '')
project_names = [pn.strip().lower()
for pn in re.split('[],;\s]+', project_names_str)
if pn.strip()]
project_ids = project_service.LookupProjectIDs(
cnxn, project_names).values()
base_id = int(post_data['%ssavedquery_base_%s' % (prefix, i)])
query = post_data['%ssavedquery_query_%s' % (prefix, i)].strip()
subscription_mode_field = '%ssavedquery_sub_mode_%s' % (prefix, i)
if subscription_mode_field in post_data:
subscription_mode = post_data[subscription_mode_field].strip()
else:
subscription_mode = None
saved_queries.append(tracker_bizobj.MakeSavedQuery(
query_id, name, base_id, query, subscription_mode=subscription_mode,
executes_in_project_ids=project_ids))
return saved_queries | 5,333,679 |
def label_anchors(anchors, anchor_is_untruncated, gt_classes, gt_bboxes, background_id, iou_low_threshold=0.41, iou_high_threshold=0.61):
""" Get the labels of the anchors. Each anchor can be labeled as positive (1), negative (0) or ambiguous (-1). Truncated anchors are always labeled as ambiguous. """
n = anchors.shape[0]
k = gt_bboxes.shape[0]
# Compute the IoUs of the anchors and ground truth boxes
tiled_anchors = np.tile(np.expand_dims(anchors, 1), (1, k, 1))
tiled_gt_bboxes = np.tile(np.expand_dims(gt_bboxes, 0), (n, 1, 1))
tiled_anchors = tiled_anchors.reshape((-1, 4))
tiled_gt_bboxes = tiled_gt_bboxes.reshape((-1, 4))
ious, ioas, iogs = iou_bbox(tiled_anchors, tiled_gt_bboxes)
ious = ious.reshape(n, k)
ioas = ioas.reshape(n, k)
iogs = iogs.reshape(n, k)
# Label each anchor based on its max IoU
max_ious = np.max(ious, axis=1)
max_ioas = np.max(ioas, axis=1)
max_iogs = np.max(iogs, axis=1)
best_gt_bbox_ids = np.argmax(ious, axis=1)
labels = -np.ones((n), np.int32)
positive_idx = np.where(max_ious >= iou_high_threshold)[0]
negative_idx = np.where(max_ious < iou_low_threshold)[0]
labels[positive_idx] = 1
labels[negative_idx] = 0
# Truncated anchors are always ambiguous
ignore_idx = np.where(anchor_is_untruncated==0)[0]
labels[ignore_idx] = -1
bboxes = gt_bboxes[best_gt_bbox_ids]
classes = gt_classes[best_gt_bbox_ids]
classes[np.where(labels<1)[0]] = background_id
max_ious[np.where(anchor_is_untruncated==0)[0]] = -1
max_ioas[np.where(anchor_is_untruncated==0)[0]] = -1
max_iogs[np.where(anchor_is_untruncated==0)[0]] = -1
return labels, bboxes, classes, max_ious, max_ioas, max_iogs | 5,333,680 |
def make_hash_md5(obj):
"""make_hash_md5
Args:
obj (any): anything that can be hashed.
Returns:
hash (str): hash from object.
"""
hasher = hashlib.md5()
hasher.update(repr(make_hashable(obj)).encode())
return hasher.hexdigest() | 5,333,681 |
def fbx_data_bindpose_element(root, me_obj, me, scene_data, arm_obj=None, mat_world_arm=None, bones=[]):
"""
Helper, since bindpose are used by both meshes shape keys and armature bones...
"""
if arm_obj is None:
arm_obj = me_obj
# We assume bind pose for our bones are their "Editmode" pose...
# All matrices are expected in global (world) space.
bindpose_key = get_blender_bindpose_key(arm_obj.bdata, me)
fbx_pose = elem_data_single_int64(root, b"Pose", get_fbx_uuid_from_key(bindpose_key))
fbx_pose.add_string(fbx_name_class(me.name.encode(), b"Pose"))
fbx_pose.add_string(b"BindPose")
elem_data_single_string(fbx_pose, b"Type", b"BindPose")
elem_data_single_int32(fbx_pose, b"Version", FBX_POSE_BIND_VERSION)
elem_data_single_int32(fbx_pose, b"NbPoseNodes", 1 + (1 if (arm_obj != me_obj) else 0) + len(bones))
# First node is mesh/object.
mat_world_obj = me_obj.fbx_object_matrix(scene_data, global_space=True)
fbx_posenode = elem_empty(fbx_pose, b"PoseNode")
elem_data_single_int64(fbx_posenode, b"Node", me_obj.fbx_uuid)
elem_data_single_float64_array(fbx_posenode, b"Matrix", matrix4_to_array(mat_world_obj))
# Second node is armature object itself.
if arm_obj != me_obj:
fbx_posenode = elem_empty(fbx_pose, b"PoseNode")
elem_data_single_int64(fbx_posenode, b"Node", arm_obj.fbx_uuid)
elem_data_single_float64_array(fbx_posenode, b"Matrix", matrix4_to_array(mat_world_arm))
# And all bones of armature!
mat_world_bones = {}
for bo_obj in bones:
bomat = bo_obj.fbx_object_matrix(scene_data, rest=True, global_space=True)
mat_world_bones[bo_obj] = bomat
fbx_posenode = elem_empty(fbx_pose, b"PoseNode")
elem_data_single_int64(fbx_posenode, b"Node", bo_obj.fbx_uuid)
elem_data_single_float64_array(fbx_posenode, b"Matrix", matrix4_to_array(bomat))
return mat_world_obj, mat_world_bones | 5,333,682 |
def lambda_(
db_path,
table,
columns,
code,
imports,
dry_run,
multi,
output,
output_type,
drop,
silent,
):
"""
Transform columns using Python code you supply. For example:
\b
$ sqlite-transform lambda my.db mytable mycolumn
--code='"\\n".join(textwrap.wrap(value, 10))'
--import=textwrap
"value" is a variable with the column value to be transformed.
"""
if output is not None and len(columns) > 1:
raise click.ClickException("Cannot use --output with more than one column")
if multi and len(columns) > 1:
raise click.ClickException("Cannot use --multi with more than one column")
# If single line and no 'return', add the return
if "\n" not in code and not code.strip().startswith("return "):
code = "return {}".format(code)
# Compile the code into a function body called fn(value)
new_code = ["def fn(value):"]
for line in code.split("\n"):
new_code.append(" {}".format(line))
code_o = compile("\n".join(new_code), "<string>", "exec")
locals = {}
globals = {}
for import_ in imports:
globals[import_] = __import__(import_)
exec(code_o, globals, locals)
fn = locals["fn"]
if dry_run:
# Pull first 20 values for first column and preview them
db = sqlite3.connect(db_path)
db.create_function("preview_transform", 1, lambda v: fn(v) if v else v)
sql = """
select
[{column}] as value,
preview_transform([{column}]) as preview
from [{table}] limit 10
""".format(
column=columns[0], table=table
)
for row in db.execute(sql).fetchall():
print(row[0])
print(" --- becomes:")
print(row[1])
print()
elif multi:
_transform_multi(db_path, table, columns[0], fn, drop, silent)
else:
_transform(db_path, table, columns, fn, output, output_type, drop, silent) | 5,333,683 |
def partitioned_rml_estimator(y, sigma2i, iterations=50):
"""
Implementation of the robust maximum likelihood estimator.
Parameters
----------
y : :py:class:`~numpy.ndarray`, (n_replicates, n_variants)
The variant scores matrix
sigma2i : :py:class:`~numpy.ndarray`, (n_replicates, n_variants)
The score variance matrix
iterations : `int`
Number of iterations to perform.
Returns
-------
`tuple`
Tuple of :py:class:`~numpy.ndarray` objects, corresponding to
``betaML``, ``var_betaML``, ``eps``.
Notes
-----
@book{demidenko2013mixed,
title={Mixed models: theory and applications with R},
author={Demidenko, Eugene},
year={2013},
publisher={John Wiley \& Sons}
}
"""
# Initialize each array to be have len number of variants
max_replicates = y.shape[0]
betaML = np.zeros(shape=(y.shape[1],)) * np.nan
var_betaML = np.zeros(shape=(y.shape[1],)) * np.nan
eps = np.zeros(shape=(y.shape[1],)) * np.nan
nreps = np.zeros(shape=(y.shape[1],)) * np.nan
y_num_nans = np.sum(np.isnan(y), axis=0)
for k in range(0, max_replicates - 1, 1):
# Partition y based on the number of NaNs a column has,
# corresponding to the number of replicates a variant has
# across selections.
selector = y_num_nans == k
if np.sum(selector) == 0:
continue
y_k = np.apply_along_axis(lambda col: col[~np.isnan(col)], 0, y[:, selector])
sigma2i_k = np.apply_along_axis(
lambda col: col[~np.isnan(col)], 0, sigma2i[:, selector]
)
betaML_k, var_betaML_k, eps_k = rml_estimator(y_k, sigma2i_k, iterations)
# Handles the case when SE is 0 resulting in NaN values.
betaML_k[np.isnan(betaML_k)] = 0.0
var_betaML_k[np.isnan(var_betaML_k)] = 0.0
eps_k[np.isnan(eps_k)] = 0.0
betaML[selector] = betaML_k
var_betaML[selector] = var_betaML_k
eps[selector] = eps_k
nreps[selector] = max_replicates - k
return betaML, var_betaML, eps, nreps | 5,333,684 |
def inv_kinema_cal_3(JOINT_ANGLE_OFFSET, L, H, position_to_move):
"""逆運動学を解析的に解く関数.
指先のなす角がηになるようなジョイント角度拘束条件を追加して逆運動学問題を解析的に解く
引数1:リンク長さの配列.nd.array(6).単位は[m]
引数2:リンク高さの配列.nd.array(1).単位は[m]
引数3:目標位置(直交座標系)行列.nd.array((3, 1)).単位は[m]
戻り値(成功したとき):ジョイント角度配列.nd.array((6)).単位は[°]
戻り値(失敗したとき):引数に関係なくジョイント角度配列(90, 90, 90, 90, 90, 0).nd.array((6)).単位は[°]を返す
※戻り値のq_3,q_4はサーボの定義と異なる
"""
final_offset = 0.012
#final_offset = 0
# position_to_move(移動先位置)の円筒座標系表現
r_before = math.sqrt(position_to_move[0, 0] ** 2 + position_to_move[1, 0] ** 2) + 0.03
r_to_move = math.sqrt(r_before ** 2 + final_offset ** 2) # [m]
#r_to_move = math.sqrt(r_before ** 2) # [m]
#theta_to_move = np.arctan2(position_to_move[1, 0], position_to_move[0, 0]) # [rad]
theta_to_move = np.arctan2(position_to_move[1, 0], position_to_move[0, 0]) - np.arcsin(final_offset / r_before) # [rad]
#theta_to_move = np.arccos(position_to_move[0, 0] / r_to_move) - np.arcsin(final_offset / r_before) # [rad]
z_to_move = position_to_move[2, 0] # [m]
print('移動先の円筒座標系表現は\n', r_to_move, '[m]\n', int(theta_to_move * 180 / np.pi), '[°]\n', z_to_move, '[m]')
# 計算のため定義する定数
A = L[2]
B = L[3]
# 逆運動学解析解計算
#old1 = time.time()
deta = np.pi / 180 # ηの刻み幅.i[°]ずつ実行
eta = np.arange(0, np.pi + deta, deta, dtype = 'float64') # 全ηの配列
print('etaの形は', eta.shape)
# パターンa
q_2_a = np.arcsin((A ** 2 - B ** 2 + (r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2) \
/ (2 * A * np.sqrt((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2))) \
- np.arctan((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) / (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta))) # [rad]
qlist_a_1 = np.concatenate([[eta], [q_2_a]], 0) # 縦に連結
qlist_a_2 = np.delete(qlist_a_1, np.where((np.isnan(qlist_a_1)) | (qlist_a_1 < 0) | ((np.pi * (1 - JOINT_ANGLE_OFFSET[1] / 180)) < qlist_a_1))[1], 1) # q_2_aがNAN,またはジョイント制限外の列を削除
q_3_a = np.arcsin((r_to_move - (L[4] + L[5] + L[6]) * np.cos(qlist_a_2[0, :]) - H[0] * np.sin(qlist_a_2[0, :])- A * np.cos(qlist_a_2[1, :]) + z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(qlist_a_2[0, :]) - H[0] * np.cos(qlist_a_2[0, :]) - A * np.sin(qlist_a_2[1, :])) \
/ (np.sqrt(2) * B)) - qlist_a_2[1, :] + np.pi / 4 # [rad]
qlist_a_3 = np.concatenate([qlist_a_2, [q_3_a]], 0) # 縦に連結
qlist_a_4 = np.delete(qlist_a_3, np.where((np.isnan(qlist_a_3)) | (qlist_a_3 < (np.pi * (JOINT_ANGLE_OFFSET[2] / 180))) | (np.pi < qlist_a_3))[1], 1) # q_3_aがNAN,またはジョイント制限外の列を削除
q_4_a = -qlist_a_4[0, :] + np.pi - qlist_a_4[1, :] - qlist_a_4[2, :]
qlist_a_5 = np.concatenate([qlist_a_4, [q_4_a]], 0) # 縦に連結
qlist_a_6 = np.delete(qlist_a_5, np.where((qlist_a_5 < (np.pi * (JOINT_ANGLE_OFFSET[3] / 180))) | (np.pi < qlist_a_5))[1], 1) # q_4_aがジョイント制限外の列を削除
#print('qlist_a_6の形は', qlist_a_6.shape)
#print('qlist_a_6 = ', qlist_a_6)
# パターンb
q_2_b = np.arcsin((A ** 2 - B ** 2 + (r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2) \
/ (2 * A * np.sqrt((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2))) \
- np.arctan((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) / (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta))) # [rad]
qlist_b_1 = np.concatenate([[eta], [q_2_b]], 0) # 縦に連結
qlist_b_2 = np.delete(qlist_b_1, np.where((np.isnan(qlist_b_1)) | (qlist_b_1 < 0) | ((np.pi * (1 - JOINT_ANGLE_OFFSET[1] / 180))< qlist_a_1))[1], 1) # q_2_bがNAN,またはジョイント制限外の列を削除
q_3_b = np.pi - np.arcsin((r_to_move - (L[4] + L[5] + L[6]) * np.cos(qlist_b_2[0, :]) - H[0] * np.sin(qlist_b_2[0, :])- A * np.cos(qlist_b_2[1, :]) + z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(qlist_b_2[0, :]) - H[0] * np.cos(qlist_b_2[0, :]) - A * np.sin(qlist_b_2[1, :])) \
/ (np.sqrt(2) * B)) - qlist_b_2[1, :] + np.pi / 4 # [rad]
qlist_b_3 = np.concatenate([qlist_b_2, [q_3_b]], 0) # 縦に連結
qlist_b_4 = np.delete(qlist_b_3, np.where((np.isnan(qlist_b_3)) | (qlist_b_3 < (np.pi * (JOINT_ANGLE_OFFSET[2] / 180))) | (np.pi < qlist_b_3))[1], 1) # q_3_bがNAN,またはジョイント制限外の列を削除
q_4_b = -qlist_b_4[0, :] + np.pi - qlist_b_4[1, :] - qlist_b_4[2, :]
qlist_b_5 = np.concatenate([qlist_b_4, [q_4_b]], 0) # 縦に連結
qlist_b_6 = np.delete(qlist_b_5, np.where((qlist_b_5 < (np.pi * (JOINT_ANGLE_OFFSET[3] / 180))) | (np.pi < qlist_b_5))[1], 1) # q_3_bがジョイント制限外の列を削除
#print('qlist_b_6の形は', qlist_b_6.shape)
#print('qlist_b_6 = ', qlist_b_6)
# パターンc
q_2_c = np.pi - np.arcsin((A ** 2 - B ** 2 + (r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2) \
/ (2 * A * np.sqrt((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2))) \
- np.arctan((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) / (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta))) # [rad]
qlist_c_1 = np.concatenate([[eta], [q_2_c]], 0) # 縦に連結
qlist_c_2 = np.delete(qlist_c_1, np.where((np.isnan(qlist_c_1)) | (qlist_c_1 < 0) | ((np.pi * (1 - JOINT_ANGLE_OFFSET[1] / 180))< qlist_a_1))[1], 1) # q_2_cがNAN,またはジョイント制限外の列を削除
q_3_c = np.arcsin((r_to_move - (L[4] + L[5] + L[6]) * np.cos(qlist_c_2[0, :]) - H[0] * np.sin(qlist_c_2[0, :])- A * np.cos(qlist_c_2[1, :]) + z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(qlist_c_2[0, :]) - H[0] * np.cos(qlist_c_2[0, :]) - A * np.sin(qlist_c_2[1, :])) \
/ (np.sqrt(2) * B)) - qlist_c_2[1, :] + np.pi / 4 # [rad]
qlist_c_3 = np.concatenate([qlist_c_2, [q_3_c]], 0) # 縦に連結
qlist_c_4 = np.delete(qlist_c_3, np.where((np.isnan(qlist_c_3)) | (qlist_c_3 < (np.pi * (JOINT_ANGLE_OFFSET[2] / 180))) | (np.pi < qlist_c_3))[1], 1) # q_3_cがNAN,またはジョイント制限外の列を削除
q_4_c = -qlist_c_4[0, :] + np.pi - qlist_c_4[1, :] - qlist_c_4[2, :]
qlist_c_5 = np.concatenate([qlist_c_4, [q_4_c]], 0) # 縦に連結
qlist_c_6 = np.delete(qlist_c_5, np.where((qlist_c_5 < (np.pi * (JOINT_ANGLE_OFFSET[3] / 180))) | (np.pi < qlist_c_5))[1], 1) # q_3_cがジョイント制限外の列を削除
#print('qlist_c_6の形は', qlist_c_6.shape)
#print('qlist_c_6 = ', (qlist_c_6 * 180 / np.pi).astype('int64'))
# パターンd
q_2_d = np.pi - np.arcsin((A ** 2 - B ** 2 + (r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2) \
/ (2 * A * np.sqrt((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2))) \
- np.arctan((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) / (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta))) # [rad]
qlist_d_1 = np.concatenate([[eta], [q_2_d]], 0) # 縦に連結
qlist_d_2 = np.delete(qlist_d_1, np.where((np.isnan(qlist_d_1)) | (qlist_d_1 < 0) | ((np.pi * (1 - JOINT_ANGLE_OFFSET[1] / 180))< qlist_a_1))[1], 1) # q_2_dがNAN,またはジョイント制限外の列を削除
q_3_d = np.pi - np.arcsin((r_to_move - (L[4] + L[5] + L[6]) * np.cos(qlist_d_2[0, :]) - H[0] * np.sin(qlist_d_2[0, :])- A * np.cos(qlist_d_2[1, :]) + z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(qlist_d_2[0, :]) - H[0] * np.cos(qlist_d_2[0, :]) - A * np.sin(qlist_d_2[1, :])) \
/ (np.sqrt(2) * B)) - qlist_d_2[1, :] + np.pi / 4 # [rad]
qlist_d_3 = np.concatenate([qlist_d_2, [q_3_d]], 0) # 縦に連結
qlist_d_4 = np.delete(qlist_d_3, np.where((np.isnan(qlist_d_3)) | (qlist_d_3 < (np.pi * (JOINT_ANGLE_OFFSET[2] / 180))) | (np.pi < qlist_d_3))[1], 1) # q_3_dがNAN,またはジョイント制限外の列を削除
q_4_d = -qlist_d_4[0, :] + np.pi - qlist_d_4[1, :] - qlist_d_4[2, :]
qlist_d_5 = np.concatenate([qlist_d_4, [q_4_d]], 0) # 縦に連結
qlist_d_6 = np.delete(qlist_d_5, np.where((qlist_d_5 < (np.pi * (JOINT_ANGLE_OFFSET[3] / 180))) | (np.pi < qlist_d_5))[1], 1) # q_3_dがジョイント制限外の列を削除
#print('qlist_d_6の形は', qlist_d_6.shape)
#print('qlist_d_6 = ', qlist_d_6)
#print('ベクトル化で計算', time.time() - old1,'[s]')
qlist_abcd_6 = np.concatenate([qlist_a_6, qlist_b_6, qlist_c_6, qlist_d_6], 1) # パターンa,b,c,dの実行結果を横に連結
print(qlist_abcd_6)
qlist_q2norm = np.abs(np.pi / 2 - qlist_abcd_6[1, :]) # π/2 - q_2の絶対値
print(qlist_q2norm)
qlist_abcd_62 = np.concatenate([qlist_abcd_6, [qlist_q2norm]], 0) # 縦連結
print(qlist_abcd_62)
k = np.where(qlist_abcd_62[4, :] == np.min(qlist_abcd_62[4, :])) # 最もq_2がπ/2に近い列のタプルを取得
print(k)
print(qlist_abcd_62[:, k])
# サーボ指令角度への変換とint化(pyFirmataのpwmは整数値指令しか受け付けない)
q_1_command = int(np.round(theta_to_move * 180 / np.pi)) # [°]
q_2_command = int(np.round(qlist_abcd_62[1, k] * 180 / np.pi)) # [°]
q_3_command = int(np.round(qlist_abcd_62[2, k] * 180 / np.pi)) # [°]
q_4_command = int(np.round(qlist_abcd_62[3, k] * 180 / np.pi)) # [°]
q_5_command = int(np.round(np.pi / 2 * 180 / np.pi)) # [°]
q_6_command = int(np.round(0 * 180 / np.pi)) # [°]
z = np.array([q_1_command, q_2_command, q_3_command, q_4_command, q_5_command, q_6_command])
print(z)
return z | 5,333,685 |
def create(path):
"""Crea un árbol de dominios a partir de un archivo y lo imprime.
Permite al usuario decidir sobre el proceso de creación.
Parameters:
path (str): Path al archivo que contiene los dominios.
"""
global root
if not root.is_leaf:
opt = input("Ya existe un árbol. Desea sobreescribirlo? Y/N ")
if opt == "N":
return
root = Node(".")
if not os.path.isfile(path):
print(f"El path {path} no es un archivo válido")
return
file = open(path, 'r')
lines = file.readlines()
for line in lines:
add(line.rstrip())
print_tree() | 5,333,686 |
def get_log_filename(log_directory, device_name, name_prefix=""):
"""Returns the full path of log filename using the information provided.
Args:
log_directory (path): to where the log file should be created.
device_name (str): to use in the log filename
name_prefix (str): string to prepend to the start of the log file.
Returns:
str: Path to log filename using the information provided.
"""
log_timestamp = time.strftime("%Y%m%d-%H%M%S")
if name_prefix:
log_file_name = "{}-{}-{}.txt".format(name_prefix, device_name,
log_timestamp)
else:
log_file_name = "{}-{}.txt".format(device_name, log_timestamp)
return os.path.join(log_directory, log_file_name) | 5,333,687 |
def getTime(dataPath, indatatype, grdROMS, grdMODEL, year, month, day, mytime, firstRun):
"""
Create a date object to keep track of Julian dates etc.
Also create a reference date starting at 1948/01/01.
Go here to check results:http://lena.gsfc.nasa.gov/lenaDEV/html/doy_conv.html
"""
if indatatype == 'SODA':
filename = getSODAfilename(year, month, day, None, dataPath)
if indatatype == 'SODA3':
filename = getSODA3filename(year, month, day, None, dataPath)
if indatatype == 'SODAMONTHLY':
filename = getSODAMONTHLYfilename(year, month, day, None, dataPath)
if indatatype == 'GLORYS':
filename = getGLORYSfilename(year, month, day, "S", dataPath)
if indatatype == 'WOAMONTHLY':
filename = getWOAMONTHLYfilename(year, month, day, "temperature", dataPath)
if indatatype == 'NORESM':
filename = getNORESMfilename(year, month, day, "saln", dataPath)
if indatatype == 'NS8KM':
filename = getNS8KMfilename(year, month, day, "salt", dataPath)
if indatatype == 'NS8KMZ':
filename, readFromOneFile = getNS8KMZfilename(year, month, day, "salt", dataPath)
# Now open the input file and get the time
cdf = Dataset(filename)
if (indatatype)=='NORESM':
jdref = date2num(datetime(1800,1,1),cdf.variables["time"].units,calendar=cdf.variables["time"].calendar)
elif indatatype=='NS8KMZ':
jdref = date2num(datetime(1948,1,1),units="days since 1948-01-01 00:00:00",calendar="standard")
elif (indatatype)=='GLORYS':
jdref = date2num(datetime(1948,1,1),cdf.variables["time_counter"].units,calendar=cdf.variables["time_counter"].calendar)
elif indatatype=='NS8KM':
jdref = date2num(datetime(1948,1,1),cdf.variables["ocean_time"].units,calendar=cdf.variables["ocean_time"].calendar)
elif indatatype=='SODA3':
jdref = date2num(datetime(1948,1,1),units="days since 1948-01-01 00:00:00",calendar="standard")
else:
jdref = date2num(datetime(1948,1,1),cdf.variables["time"].units,calendar=cdf.variables["time"].calendar)
if indatatype == 'SODA':
# Find the day and month that the SODA file respresents based on the year and ID number.
# Each SODA file represents a 5 day average, therefore we let the date we find be the first day
# of those 5 days. Thats the reason we subtract 4 below for day of month.
import date
days = 0.0; month = 1; loop = True
while loop is True:
d = date.NumberDaysMonth(month, year)
if days + d < int(ID) * 5:
days = days + d
month += 1
else:
day = int(int(ID) * 5 - days)
loop = False
mycalendar = cdf.variables["time"].calendar
myunits = cdf.variables["time"].units
currentdate = datetime(year, month, day)
jd = date2num(currentdate,units="days since 1948-01-01 00:00:00",calendar="standard")
if indatatype == 'SODAMONTHLY':
# Find the day and month that the SODAMONTHLY file respresents based on the year and ID number.
# Each SODA file represents a 1 month average.
mycalendar = cdf.variables["time"].calendar
myunits = cdf.variables["time"].units
currentdate = datetime(year, month, day)
jd = date2num(currentdate, myunits, calendar=mycalendar)
if indatatype == 'SODA3':
# Each SODA file represents 12 month averages.
#mycalendar = cdf.variables["time"].calendar
myunits = cdf.variables["time"].units
currentdate = datetime(year, month, day)
jd = date2num(currentdate,units="days since 1948-01-01 00:00:00",calendar="standard")
if indatatype == 'GLORYS':
# Find the day and month that the GLORYS file respresents based on the year and ID number.
# Each file represents a 1 month average.
mycalendar = cdf.variables["time_counter"].calendar
myunits = cdf.variables["time_counter"].units
currentdate = datetime(year, month, day)
jd = date2num(currentdate, myunits, calendar=mycalendar)
if indatatype == 'NS8KM':
# Find the day and month that the GLORYS file respresents based on the year and ID number.
# Each file represents a 1 month average.
mycalendar = cdf.variables["ocean_time"].calendar
myunits = cdf.variables["ocean_time"].units
currentdate = datetime(year, month, day)
jd = date2num(currentdate, myunits, calendar=mycalendar)
if indatatype == 'NS8KMZ':
# Find the day and month that the GLORYS file respresents based on the year and ID number.
# Each file represents a 1 month average.
mycalendar = "gregorian"
refdate = datetime(1948, 1, 1)
currentdate = datetime(year, month, day)
myunits = cdf.variables["time"].units
jd = date2num(currentdate, myunits, calendar="gregorian")
print("Days:", jd, currentdate," year month day ",year, month, day)
if indatatype == 'NORESM':
# Find the day and month that the NORESM file. We need to use the time modules from
# netcdf4 for python as they handle calendars that are no_leap.
# http://www.esrl.noaa.gov/psd/people/jeffrey.s.whitaker/python/netcdftime.html#datetime
mydays = cdf.variables["time"][0]
mycalendar = cdf.variables["time"].calendar
myunits = cdf.variables["time"].units
# Fake the start date first time around
# if (firstRun):
# currentdate = datetime(2006,1,1)
# print "NOTICE!\n First datestamp in result files are hardcoded to %s"%(currentdate)
# else:
currentdate = num2date(mydays, units=myunits, calendar=mycalendar)
jd = date2num(currentdate, myunits, calendar='noleap')
grdROMS.time = (jd - jdref)
grdROMS.reftime = jdref
grdROMS.timeunits=myunits
cdf.close()
print('\nCurrent time of %s file : %s' % (indatatype, currentdate)) | 5,333,688 |
def delete_voting(request, slug):
"""Delete voting view."""
if request.method == 'POST':
poll = get_object_or_404(Poll, slug=slug)
if poll.automated_poll and Bug.objects.filter(id=poll.bug.id):
# This will trigger a cascade delete, removing also the poll.
Bug.objects.filter(id=poll.bug.id).delete()
else:
poll.delete()
messages.success(request, 'Voting successfully deleted.')
statsd.incr('voting.delete_voting')
return redirect('voting_list_votings') | 5,333,689 |
def http_delete_request(
portia_config: dict,
endpoint: str,
payload: dict=None,
params: dict=None,
optional_headers: dict=None
) -> object:
"""Makes an HTTP DELETE request.
Arguments:
portia_config {dict} -- Portia's configuration arguments
endpoint {str} -- endpoint to make the request to
Keyword Arguments:
payload {dict} -- payload to send to the service (default: {None})
params {dict} -- params to send to the service (default: {None})
optional_headers {dict} -- dictionary with other headers
(default: {None})
Returns:
object -- response object
"""
headers = {
'Authorization': 'Bearer {0}' \
.format(portia_config.get('authorization'))
}
if optional_headers is not None:
headers = {**headers, **optional_headers}
start = time.time()
response = requests.delete(
'{0}{1}'.format(portia_config.get('baseurl'), endpoint),
headers=headers,
params=params,
json=payload
)
end = time.time()
if portia_config.get('debug') == True:
print(
'[portia-debug]: status: {0} | {1:.4f} sec. | {2}' \
.format(response.status_code, end - start, response.url.encode('utf8'))
)
return response | 5,333,690 |
def test_parse_annotations_in_all_sections(parse_numpy, docstring, name):
"""Assert annotations are parsed in all relevant sections.
Parameters:
parse_numpy: Fixture parser.
docstring: Parametrized docstring.
name: Parametrized name in annotation.
"""
docstring = docstring.format(name=name)
sections, _ = parse_numpy(docstring, parent=Function("f"))
assert len(sections) == 1
assert sections[0].value[0].annotation == Name(name, name) | 5,333,691 |
def action_copy(sourcepaths, targetpaths):
"""Copy a file or directory."""
format = request.format
# Copying a symlink/junction means copying the real file/directory.
# It makes no sense if the symlink/junction is broken.
if not os.path.exists(sourcepaths[0]):
abort(404, "Source does not exist.")
try:
if len(sourcepaths) == 1:
if len(targetpaths) == 1:
try:
os.makedirs(os.path.dirname(targetpaths[0]), exist_ok=True)
except OSError:
traceback.print_exc()
abort(500, "Unable to copy to this path.")
try:
shutil.copytree(sourcepaths[0], targetpaths[0])
except NotADirectoryError:
shutil.copy2(sourcepaths[0], targetpaths[0])
except shutil.Error:
traceback.print_exc()
abort(500, 'Fail to copy some files.')
else:
error = False
with open_archive_path(targetpaths, 'w') as zip:
try:
util.zip_compress(zip, sourcepaths[0], targetpaths[-1])
except shutil.Error:
traceback.print_exc()
error = True
if error:
abort(500, 'Fail to copy some files.')
elif len(sourcepaths) > 1:
if len(targetpaths) == 1:
try:
os.makedirs(os.path.dirname(targetpaths[0]), exist_ok=True)
except OSError:
traceback.print_exc()
abort(500, "Unable to copy to this path.")
with open_archive_path(sourcepaths) as zh:
util.zip_extract(zh, targetpaths[0], sourcepaths[-1])
else:
with open_archive_path(sourcepaths) as zip:
try:
zip.getinfo(sourcepaths[-1])
except KeyError:
entries = [e for e in zip.namelist() if e.startswith(sourcepaths[-1] + '/')]
else:
entries = [sourcepaths[-1]]
with open_archive_path(targetpaths, 'w') as zip2:
cut = len(sourcepaths[-1])
for entry in entries:
info = zip.getinfo(entry)
info.filename = targetpaths[-1] + entry[cut:]
zip2.writestr(info, zip.read(entry), **util.zip_compression_params(
compress_type=info.compress_type,
compresslevel=None if info.compress_type == zipfile.ZIP_STORED else 9,
))
except HTTPException:
raise
except Exception:
traceback.print_exc()
abort(500, 'Unable to copy to the target.') | 5,333,692 |
def http_basic_auth(func):
"""
Attempts to login user with u/p provided in HTTP_AUTHORIZATION header.
If successful, returns the view, otherwise returns a 401.
If PING_BASIC_AUTH is False, then just return the view function
Modified code by:
http://djangosnippets.org/users/bthomas/
from
http://djangosnippets.org/snippets/1304/
"""
@wraps(func)
def _decorator(request, *args, **kwargs):
if getattr(settings, 'PING_BASIC_AUTH', PING_BASIC_AUTH):
from django.contrib.auth import authenticate, login
if request.META.has_key('HTTP_AUTHORIZATION'):
authmeth, auth = request.META['HTTP_AUTHORIZATION'].split(' ', 1)
if authmeth.lower() == 'basic':
auth = auth.strip().decode('base64')
username, password = auth.split(':', 1)
user = authenticate(username=username, password=password)
if user:
login(request, user)
return func(request, *args, **kwargs)
else:
return HttpResponse("Invalid Credentials", status=401)
else:
return HttpResponse("No Credentials Provided", status=401)
else:
return func(request, *args, **kwargs)
return _decorator | 5,333,693 |
def update_modules(to_build, cfg):
"""
Updates modules to be built with the passed-in config.
Updating each individual module section will propogate the changes to
STATUS as well (as the references are the same).
Note that we have to apply overrides in a specific order -
1) reset the modules being built to the defaults
2) set any modules selected for build as build = True
3) reset configs using config_collection_for_built
4) apply config overrides
"""
global STATUS
selected = set(to_build)
for module_id in shutit.cfg:
if module_id in ORIG_MOD_CFG and 'shutit.core.module.build' in ORIG_MOD_CFG[module_id]:
shutit.cfg[module_id]['shutit.core.module.build'] = ORIG_MOD_CFG[module_id]['shutit.core.module.build']
if module_id in selected:
shutit.cfg[module_id]['shutit.core.module.build'] = True
if cfg is not None:
sec, key, val = cfg
ORIG_MOD_CFG[sec][key] = val
for module_id in ORIG_MOD_CFG:
for cfgkey in ORIG_MOD_CFG[module_id]:
if cfgkey == 'shutit.core.module.build': continue
shutit.cfg[module_id][cfgkey] = ORIG_MOD_CFG[module_id][cfgkey]
errs = []
errs.extend(shutit_main.check_deps(shutit))
# There is a complexity here in that module configs may depend on
# configs from other modules (!). We assume this won't happen as we
# would have to override each module at the correct time.
shutit_main.config_collection_for_built(shutit)
errs.extend(shutit_main.check_conflicts(shutit))
# Cache first
errs.extend(shutit_main.check_ready(shutit, throw_error=False))
errs.extend(shutit_main.check_ready(shutit))
STATUS['errs'] = [err[0] for err in errs]
STATUS['modules'] = [
{
"module_id": module_id,
"description": shutit.shutit_map[module_id].description,
"run_order": float(shutit.shutit_map[module_id].run_order),
"build": shutit.cfg[module_id]['shutit.core.module.build'],
"selected": module_id in selected
} for module_id in shutit_main.allowed_module_ids(shutit)
] | 5,333,694 |
def stats_to_df(stats_data):
""" Transform Statistical API response into a pandas.DataFrame
"""
df_data = []
for single_data in stats_data['data']:
df_entry = {}
is_valid_entry = True
df_entry['interval_from'] = parse_time(
single_data['interval']['from']).date()
df_entry['interval_to'] = parse_time(
single_data['interval']['to']).date()
for output_name, output_data in single_data['outputs'].items():
for band_name, band_values in output_data['bands'].items():
band_stats = band_values['stats']
if band_stats['sampleCount'] == band_stats['noDataCount']:
is_valid_entry = False
break
for stat_name, value in band_stats.items():
col_name = f'{output_name}_{band_name}_{stat_name}'
if stat_name == 'percentiles':
for perc, perc_val in value.items():
perc_col_name = f'{col_name}_{perc}'
df_entry[perc_col_name] = perc_val
else:
df_entry[col_name] = value
if is_valid_entry:
df_data.append(df_entry)
return pd.DataFrame(df_data) | 5,333,695 |
def make_risk_metrics(
stocks,
weights,
start_date,
end_date
):
"""
Parameters:
stocks: List of tickers compatiable with the yfinance module
weights: List of weights, probably going to be evenly distributed
"""
if mlfinlabExists:
Var, VaR, CVaR, CDaR = generate_risk_stats(
stocks,
weights,
start_date=start_date,
end_date=end_date
)
else:
Var, VaR, CVaR, CDaR = 0,0,0,0
return [
{
"value": Var,
"name": "Variance",
"description": "This measure can be used to compare portfolios" \
" based on estimations of the volatility of returns."
},
{
"value": VaR,
"name": "Value at Risk",
"description": "This measure can be used to compare portfolios" \
" based on the amount of investments that can be lost in the next observation, assuming the returns for assets follow a multivariate normal distribution."
},
{
"value": CVaR,
"name": "Expected Shortfall",
"description": "This measure can be used to compare portfolios" \
" based on the average amount of investments that can be lost in a worst-case scenario, assuming the returns for assets follow a multivariate normal distribution."
},
{
"value": CDaR,
"name": "Conditional Drawdown at Risk",
"description": "This measure can be used to compare portfolios"
" based on the average amount of a portfolio drawdown in a worst-case scenario, assuming the drawdowns follow a normal distribution."
}
] | 5,333,696 |
def decrypt(bin_k, bin_cipher):
"""decrypt w/ DES"""
return Crypto.Cipher.DES.new(bin_k).decrypt(bin_cipher) | 5,333,697 |
def send_shopfloor_activities_summary_report():
"""
Similar to auto_email_report.send(), but renders the report in to a jinja template to allow for more flexibility
"""
# Get auto email report
auto_email_report = frappe.get_doc("Auto Email Report", "Horizon Global (PTA): Shopfloor Activities Summary")
report = frappe.get_doc('Report', auto_email_report.report)
# Get report content
columns, data = report.get_data(limit=auto_email_report.no_of_rows or 100, user = auto_email_report.user,
filters = auto_email_report.filters, as_dict=True)
# Render template
date_time = global_date_format(now()) + ' ' + format_time(now())
report_doctype = frappe.db.get_value('Report', auto_email_report.report, 'ref_doctype')
report_html_data = frappe.render_template('quality_management_system/process/report/shopfloor_activities_summary/shopfloor_activities_summary_jinja.html', {
'title': auto_email_report.name,
'description': auto_email_report.description,
'date_time': date_time,
'columns': columns,
'data': data,
'report_url': get_url_to_report(auto_email_report.report, auto_email_report.report_type, report_doctype),
'report_name': auto_email_report.report,
'edit_report_settings': get_link_to_form('Auto Email Report', auto_email_report.name),
'site_url': get_url()
})
# Later for testing: create PDF and attach
# file_name = "test_dirk7.html"
# from frappe.utils.file_manager import save_file
# my_file = save_file(file_name, report_html_data, "Workstation",
# "MagnaFlux Machine", "Home/Attachments")
# Send email, according to Auto Email Report settings
attachments = None
message = report_html_data
frappe.sendmail(
recipients = auto_email_report.email_to.split(),
subject = auto_email_report.name,
message = message,
attachments = attachments,
reference_doctype = auto_email_report.doctype,
reference_name = auto_email_report.name
) | 5,333,698 |
def mass_recorder(grams, item, sigfigs):
"""Record the mass of different items.
Args:
grams ([type]): [description]
item ([type]): [description]
sigfigs ([type]): [description]
Raises:
ValueError: [description]
"""
item_dict = {}
if isinstance(grams, float) and isinstance(item, str) and isinstance(sigfigs, int):
if item in item_dict:
formatter = "{0:." + str(Sigfigs) + "f}"
grams = formatter.format(grams)
item_Dict[item] = grams
else:
formatter = "{0:." + str(sigfigs) + "f}"
grams = formatter.format(grams)
item_dict[item] += grams
else:
raise ValueError('Please input grams as a float value, item as a string, and sigfigs as an integer value") | 5,333,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.