content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def get_car_changing_properties(car):
"""
Gets cars properties that change during a trip
:param car: car info in original system JSON-dict format
:return: dict with keys mapped to common electric2go format
"""
result = {mapped_key: car.get(original_key, None)
for mapped_key, original_key
in KEYS['changing'].items()}
# derived fields that can't be done automatically with a key mapping
result['address'] = ', '.join(car['address'])
result['price_offer'] = car['rentalPrice']['isOfferDrivePriceActive']
result['price_offer_details'] = car['rentalPrice'].get('offerDrivePrice', {})
return result
| 5,336,300
|
def get_metadata(record):
"""
Calls DNZ's API to retrieve the metadata for a given record.
"""
id = record['id']
url = DNZ_URL + '{id}.json?api_key={key}'.format(id=id, key=DNZ_KEY)
try:
metadata = get(url).json()['record']
metadata['hash'] = record['hash']
except KeyError:
print('You forgot the DNZ Key – Again!')
exit(1)
return metadata
| 5,336,301
|
def client():
"""Yield client fixture.
See <http://flask.pocoo.org/docs/1.0/testing/#the-testing-skeleton>
for ideas to expand this.
"""
yield backend.PLATEYPUS.test_client()
| 5,336,302
|
def _expand_one_dict(cfg, shared):
"""expand a piece of config
Parameters
----------
cfg : dict
Configuration
shared : dict
A dict of shared objects
Returns
-------
dict, list
Expanded configuration
"""
if shared['default_config_key'] is not None:
if not (len(cfg) == 1 and list(cfg.keys())[0] in shared['config_keys']):
cfg = {shared['default_config_key']: cfg}
if not len(cfg) == 1:
return cfg.copy()
key, val = list(cfg.items())[0]
if key not in shared['config_keys']:
cfg = _apply_default_for_all_keys(cfg, shared)
return cfg.copy()
if key not in shared['expand_func_map']:
cfg = _apply_default_for_all_keys(cfg, shared)
return cfg.copy()
expand_func = shared['expand_func_map'][key]
try:
return expand_func(val, shared)
except TypeError:
return expand_func(val)
| 5,336,303
|
def _api_decrypt():
"""
Return the response dictionary from the KMS decrypt API call.
"""
kms = _kms()
data_key = _cfg_data_key()
try:
return kms.decrypt(CiphertextBlob=data_key)
except botocore.exceptions.ClientError as orig_exc:
error_code = orig_exc.response.get("Error", {}).get("Code", "")
if error_code != "InvalidCiphertextException":
raise
err_msg = "aws_kms:data_key is not a valid KMS data key"
config_error = salt.exceptions.SaltConfigurationError(err_msg)
six.raise_from(config_error, orig_exc)
| 5,336,304
|
def hide_panel(panel_name, base_url=DEFAULT_BASE_URL):
"""Hide a panel in the UI of Cytoscape.
Other panels will expand into the space.
Args:
panel_name (str): Name of the panel. Multiple ways of referencing panels is supported:
(WEST == control panel, control, c), (SOUTH == table panel, table, ta), (SOUTH_WEST == tool panel, tool, to), (EAST == results panel, results, r)
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://127.0.0.1:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
str: ''
Raises:
CyError: if panel name is not recognized
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> hide_panel('control panel')
''
>>> hide_panel('WEST')
''
"""
panel_name = _check_panel_name(panel_name)
panel_name_state = {'name': panel_name, 'state': 'HIDE'}
res = commands.cyrest_put('ui/panels', body=[panel_name_state], base_url=base_url, require_json=False)
return res
| 5,336,305
|
def test_medicationdispense_10(base_settings):
"""No. 10 tests collection for MedicationDispense.
Test File: medicationdispense0328.json
"""
filename = base_settings["unittest_data_dir"] / "medicationdispense0328.json"
inst = medicationdispense.MedicationDispense.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "MedicationDispense" == inst.resource_type
impl_medicationdispense_10(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "MedicationDispense" == data["resourceType"]
inst2 = medicationdispense.MedicationDispense(**data)
impl_medicationdispense_10(inst2)
| 5,336,306
|
def user_tickets(raffle_prize, user):
"""return the allocate ticket for user"""
return raffle_prize.allocated_tickets(user)
| 5,336,307
|
def body_part(ent):
"""Enrich a body part span."""
data = {}
parts = [REPLACE.get(t.lower_, t.lower_) for t in ent]
text = ' '.join(parts)
if MISSING_RE.search(ent.text.lower()) is not None:
data['missing'] = True
data['body_part'] = text
ent._.data = data
| 5,336,308
|
def build(c):
""" Clean and build Sphinx docs """
make(c, 'html')
| 5,336,309
|
def db_add(src_path, db, new_entry):
"""Adds a string entry to a database file and the given set variable.
If it already exists in the set, do not take any action.
"""
if new_entry not in db:
with open(src_path, "r+") as f:
f.seek(0, 2)
f.write(str(new_entry) + "\n")
f.seek(0)
db.add(new_entry)
| 5,336,310
|
def init_container(self, **kwargs):
"""Initialise a container with a dictionary of inputs
"""
for k, v in kwargs.iteritems():
try:
setattr(self, k, v)
except Exception as e:
# Deal with the array -> list issue
if isinstance(getattr(self, k), list) and isinstance(v, ndarray):
setattr(self, k, v.tolist())
return self
| 5,336,311
|
def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
"""
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# Reshape 4D tensors to 2D, each row represents a pixel, each column a class
logits = tf.reshape(nn_last_layer, (-1, num_classes), name="fcn_logits")
correct_label_reshaped = tf.reshape(correct_label, (-1, num_classes))
# Calculate distance from actual labels using cross entropy
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label_reshaped[:])
# Take mean for total loss
loss_op = tf.reduce_mean(cross_entropy, name="fcn_loss")
# The model implements this operation to find the weights/parameters that would yield correct pixel labels
train_op = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss_op, name="fcn_train_op")
return logits, train_op, loss_op
| 5,336,312
|
def setIamPolicy(asset_id, policy):
"""Sets ACL info for an asset.
Args:
asset_id: The asset to set the ACL policy on.
policy: The new Policy to apply to the asset. This replaces
the current Policy.
Returns:
The new ACL, as an IAM Policy.
"""
return _execute_cloud_call(
_get_cloud_api_resource().projects().assets().setIamPolicy(
resource=_cloud_api_utils.convert_asset_id_to_asset_name(asset_id),
body={'policy': policy},
prettyPrint=False))
| 5,336,313
|
def get_corners(n):
"""Returns corner numbers of layer n"""
end = end = (2*n + 1) * (2*n + 1)
return [end-m*n for m in range(0,8,2)]
| 5,336,314
|
def plot_single_hist(histvals, edges, legend=None, **kwds):
""" Bokeh-based plotting of a single histogram with legend and tooltips.
**Parameters**\n
histvals: 1D array
Histogram counts (e.g. vertical axis).
edges: 1D array
Histogram edge values (e.g. horizontal axis).
legend: str
Text for the plot legend.
**kwds:
Keyword arguments for 'bokeh.plotting.figure().quad()'.
**Return**\n
p: object
An instance of 'bokeh.plotting.figure()' as a plot handle.
"""
ttp = kwds.pop('tooltip', [('(x, y)', '($x, $y)')])
p = pbk.figure(background_fill_color='white', tooltips=ttp)
p.quad(top=histvals, bottom=0, left=edges[:-1], right=edges[1:],
line_color='white', alpha=0.8, legend=legend, **kwds)
p.y_range.start = 0
p.legend.location = 'top_right'
p.grid.grid_line_color = 'lightgrey'
return p
| 5,336,315
|
def resnet50(alpha, beta,**kwargs):
"""Constructs a ResNet-50 based model.
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], alpha, beta, **kwargs)
checkpoint = torch.load(model_dirs['resnet50'])
layer_name = list(checkpoint.keys())
for ln in layer_name:
if 'conv' in ln or 'downsample.0.weight' in ln:
checkpoint[ln] = checkpoint[ln].unsqueeze(2)
if 'conv2' in ln:
n_out, n_in, _, _, _ = checkpoint[ln].size()
checkpoint[ln] = checkpoint[ln][:n_out // alpha * (alpha - 1), :n_in//beta,:,:,:]
model.load_state_dict(checkpoint,strict = False)
return model
| 5,336,316
|
def raise_sql_error(_req):
"""Raise a sql error"""
raise IntegrityError(ERROR_MESSAGE_2)
| 5,336,317
|
def get_datetimefromnctime(ds,time,time_units):
"""
Purpose:
Create a series of datetime objects from the time read from a netCDF file.
Usage:
footprint_utils.get_datetimefromnctime(ds,time,time_units)
Side effects:
Creates a Python datetime series in the data structure
Author: PRI
Date: September 2014
"""
ts = int(ds.globalattributes["time_step"])
nRecs = int(ds.globalattributes["nc_nrecs"])
# handle the change of default return object introduced at cftime V1.1.0
try:
dt = cftime.num2pydate(time, time_units)
except AttributeError:
dt = cftime.num2date(time, time_units)
#dt = netCDF4.num2date(time,time_units)
ds.series[unicode("DateTime")] = {}
ds.series["DateTime"]["Data"] = dt
ds.series["DateTime"]["Flag"] = numpy.zeros(nRecs)
ds.series["DateTime"]["Attr"] = {}
ds.series["DateTime"]["Attr"]["long_name"] = "Datetime in local timezone"
ds.series["DateTime"]["Attr"]["units"] = "None"
| 5,336,318
|
def datetime_to_timestamp(dt, epoch=datetime(1970,1,1)):
"""takes a python datetime object and converts it to a Unix timestamp.
This is a non-timezone-aware function.
:param dt: datetime to convert to timestamp
:param epoch: datetime, option specification of start of epoch [default: 1/1/1970]
:return: timestamp
"""
td = dt - epoch
return (td.microseconds + (td.seconds + td.days * 86400))
| 5,336,319
|
def connectivity_dict_builder(edge_list, as_edges=False):
"""Builds connectivity dictionary for each vertex (node) - a list
of connected nodes for each node.
Args:
edge_list (list): a list describing the connectivity
e.g. [('E7', 'N3', 'N6'), ('E2', 'N9', 'N4'), ...]
as_edges (bool): whether to return connected vertices / nodes or edges
Returns:
(dict): connectivity dictionary, each node is a key and the
value is a set of connected nodes
e.g. {'N3': {'N6', 'N11', 'N7'}, 'N9': {'N4'}, etc}
"""
connectivity_dict = {}
for b, n1, n2 in edge_list:
n_set = connectivity_dict.get(n1,set())
n_set.add(b if as_edges else n2)
connectivity_dict[n1] = n_set
n_set = connectivity_dict.get(n2,set())
n_set.add(b if as_edges else n1)
connectivity_dict[n2] = n_set
return connectivity_dict
| 5,336,320
|
def get_confusion_matrix(*, labels, logits, batch_mask):
"""Computes the confusion matrix that is necessary for global mIoU."""
if labels.ndim == logits.ndim: # One-hot targets.
y_true = jnp.argmax(labels, axis=-1)
else:
y_true = labels
# Set excluded pixels (label -1) to zero, because the confusion matrix
# computation cannot deal with negative labels. They will be ignored due to
# the batch_mask anyway:
y_true = jnp.maximum(y_true, 0)
y_pred = jnp.argmax(logits, axis=-1)
# Prepare sample weights for confusion matrix:
weights = batch_mask.astype(jnp.float32)
# Normalize weights by number of samples to avoid having very large numbers in
# the confusion matrix, which could lead to imprecise results (note that we
# should not normalize by sum(weights) because that might differ between
# devices/hosts):
weights = weights / weights.size
confusion_matrix = model_utils.confusion_matrix(
y_true=y_true,
y_pred=y_pred,
num_classes=logits.shape[-1],
weights=weights)
confusion_matrix = confusion_matrix[jnp.newaxis, ...] # Dummy batch dim.
return confusion_matrix
| 5,336,321
|
def init_socket():
"""Returns a fresh socket"""
return socket.socket()
| 5,336,322
|
def randomize_cycles():
"""Randomize the cycles renderer seed."""
bpy.data.scenes["Scene"].cycles.seed = random.randint(1, 10000000)
| 5,336,323
|
def schedule(sched):
"""Helper function to run the scheduler."""
def _schedule():
"""Run the scheduler, output some stats."""
new_placement = 0
evicted = 0
for event in sched.schedule():
if event.node:
new_placement = new_placement + 1
else:
evicted = evicted + 1
print('scheduled: ', new_placement, ', evicted: ', evicted)
interval = timeit.timeit(stmt=_schedule, number=1)
print('time :', interval)
| 5,336,324
|
def semitone_frequencies(fmin, fmax, fref=A4):
"""
Returns frequencies separated by semitones.
Parameters
----------
fmin : float
Minimum frequency [Hz].
fmax : float
Maximum frequency [Hz].
fref : float, optional
Tuning frequency of A4 [Hz].
Returns
-------
semitone_frequencies : numpy array
Semitone frequencies [Hz].
"""
# return MIDI frequencies
return log_frequencies(12, fmin, fmax, fref)
| 5,336,325
|
def write_model(output_directory: OutputDirectory, k: float):
"""Write "our model" (which happens to be a single value).
In real life it can be a neural network we obtained, or matrices, or
whatever we want to save."""
output_file = os.path.join(output_directory, 'model.json')
with open(output_file, 'w') as f:
json.dump({'k': k}, f)
| 5,336,326
|
def check_binary_task(cls, method):
"""Raise an error if the task is invalid."""
if not cls.task.startswith("bin"):
raise PermissionError(
f"The {method} method is only available for binary classification tasks!"
)
| 5,336,327
|
def inplace_m_arcsinh_derivative(Z, delta):
"""Apply the derivative of the hyperbolic m-arcsinh function.
It exploits the fact that the derivative is a relatively
simple function of the output value from hyperbolic m-arcsinh.
Further details on this function are available at:
https://arxiv.org/abs/2009.07530
(Parisi, L., 2020; License: http://creativecommons.org/licenses/by/4.0/).
If you are using this function, please cite this paper as follows:
arXiv:2009.07530 [cs.LG].
Parameters
----------
Z : {array-like, sparse matrix}, shape (n_samples, n_features)
The data which was output from the hyperbolic tangent activation
function during the forward pass.
delta : {array-like}, shape (n_samples, n_features)
The backpropagated error signal to be modified in place.
"""
delta *= (np.sqrt(np.abs(Z))/(12*np.sqrt(Z**2+1)) + (Z*np.arcsinh(Z))/(24*np.abs(Z)**(3/2)))
| 5,336,328
|
def dict_get_value(dict: Mapping, name: str) -> Any:
"""Gets data from a dictionary using a dotted accessor-string
:param dict: source dictionary
:param name: dotted value name
"""
current_data = dict
for chunk in name.split('.'):
if not isinstance(current_data, (Mapping, Sequence)):
raise InvalidParamError('Could not find item "{}"'.format(name))
if chunk not in current_data:
raise InvalidParamError('Could not find item "{}"'.format(name))
current_data = current_data.get(chunk, {})
return current_data
| 5,336,329
|
def get_files_from_split(split):
""" "
Get filenames for real and fake samples
Parameters
----------
split : pandas.DataFrame
DataFrame containing filenames
"""
files_1 = split[0].astype(str).str.cat(split[1].astype(str), sep="_")
files_2 = split[1].astype(str).str.cat(split[0].astype(str), sep="_")
files_real = pd.concat([split[0].astype(str), split[1].astype(str)]).to_list()
files_fake = pd.concat([files_1, files_2]).to_list()
return files_real, files_fake
| 5,336,330
|
def test_partial_overlap_multiple_ranges(
empty_range_stream_fresh, initial_ranges, overlapping_range
):
"""
Partial overlap with termini of the centred range [3,7) covered on multiple
ranges (both termini are contained) but `in` does not report True as the
entirety of this interval is not within the initial ranges: specifically
because these ranges [2,4) and [6,9) are not contiguous.
"""
stream = empty_range_stream_fresh
for rng_start, rng_end in initial_ranges:
stream.add(byte_range=Range(rng_start, rng_end))
spanning_rng_pre = stream.spanning_range
stream.handle_overlap(rng=overlapping_range, internal=False)
spanning_rng_post = stream.spanning_range
assert spanning_rng_pre == spanning_rng_post
internal_rng_list = ranges_in_reg_order(stream._ranges)
external_rng_list = ranges_in_reg_order(stream.ranges)
assert internal_rng_list[0] > external_rng_list[0]
assert internal_rng_list[1] == external_rng_list[1]
stream.add(overlapping_range)
external_rng_list = ranges_in_reg_order(stream.ranges)
assert overlapping_range in external_rng_list
for init_rng in initial_ranges:
assert init_rng not in external_rng_list
assert len(external_rng_list) == 3
| 5,336,331
|
def main(args):
"""
args[0] ... dir to place no binaries in
args[1] ... log file to parse
args[2] ... for longer files lower line number limit
args[3] ... for longer files upper line number limit
"""
with open(args[1], 'r') as f:
lines = f.readlines()
lower_l = 0
upper_l = None
if len(args) == 4:
lower_l = int(args[2])
upper_l = int(args[3])
for l in lines[lower_l:upper_l]:
output = parse("{addr} - {file}", l)
bin_args = [None, None]
bin_args[0] = args[0]
bin_args[1] = output["addr"]
with open(output["file"], 'rb') as f:
f_storage = bytearray(f.read())
modify(bin_args, f_storage, basename(output["file"]))
| 5,336,332
|
def dedupBiblioReferences(doc):
"""
SpecRef has checks in its database preventing multiple references from having the same URL.
Shepherd, while it doesn't have an explicit check for this,
should also generally have unique URLs.
But these aren't uniqued against each other.
So, if you explicitly biblio-link to a SpecRef spec,
and autolink to a Shepherd spec,
you might get two distinct biblio entries with the exact same URL.
This code checks for this,
and deletes Shepherd biblio if there's a SpecRef biblio with the same URL.
It then adjusts doc.externalRefsUsed to point to the SpecRef biblio.
"""
def isShepherdRef(ref):
return isinstance(ref, SpecBasedBiblioEntry)
normSpecRefRefs = {}
normShepherdRefs = {}
informSpecRefRefs = {}
informShepherdRefs = {}
for ref in doc.normativeRefs.values():
if isShepherdRef(ref):
normShepherdRefs[ref.url] = ref
else:
normSpecRefRefs[ref.url] = ref
for ref in doc.informativeRefs.values():
if isShepherdRef(ref):
informShepherdRefs[ref.url] = ref
else:
informSpecRefRefs[ref.url] = ref
normSpecRefUrls = set(normSpecRefRefs.keys())
normShepherdUrls = set(normShepherdRefs.keys())
informSpecRefUrls = set(informSpecRefRefs.keys())
informShepherdUrls = set(informShepherdRefs.keys())
specRefUrls = normSpecRefUrls | informSpecRefUrls
shepherdUrls = normShepherdUrls | informShepherdUrls
dupedUrls = shepherdUrls & specRefUrls
if not dupedUrls:
return
# If an informative duped URL is SpecRef,
# and a normative Shepherd version also exists,
# mark it for "upgrading", so the SpecRef becomes normative.
upgradeUrls = dupedUrls & informSpecRefUrls & normShepherdUrls
upgradeRefs = {}
popInformatives = []
for key, ref in doc.informativeRefs.items():
if ref.url in upgradeUrls and not isShepherdRef(ref):
upgradeRefs[ref.url] = ref
popInformatives.append(key)
for key in popInformatives:
doc.informativeRefs.pop(key)
for key, ref in doc.normativeRefs.items():
if ref.url in upgradeUrls:
doc.normativeRefs[key] = upgradeRefs[ref.url]
for url in upgradeUrls:
normShepherdUrls.discard(url)
informSpecRefUrls.discard(url)
normSpecRefUrls.add(url)
shepherdUrls = normShepherdUrls | informShepherdUrls
specRefUrls = normSpecRefUrls | informSpecRefUrls
dupedUrls = shepherdUrls & specRefUrls
# Remove all the Shepherd refs that are left in duped
poppedKeys = defaultdict(dict)
for key, ref in list(doc.informativeRefs.items()):
if ref.url in dupedUrls:
if isShepherdRef(ref):
doc.informativeRefs.pop(key)
poppedKeys[ref.url]["shepherd"] = key
else:
poppedKeys[ref.url]["specref"] = key
for key, ref in list(doc.normativeRefs.items()):
if ref.url in dupedUrls:
if isShepherdRef(ref):
doc.normativeRefs.pop(key)
poppedKeys[ref.url]["shepherd"] = key
else:
poppedKeys[ref.url]["specref"] = key
# For every key that was popped,
# swap out the "externalRefsUsed" for that key
for keys in poppedKeys.values():
if "shepherd" not in keys or "specref" not in keys:
continue
if keys["shepherd"] in doc.externalRefsUsed:
for k, v in list(doc.externalRefsUsed[keys["shepherd"]].items()):
doc.externalRefsUsed[keys["specref"]][k] = v
del doc.externalRefsUsed[keys["shepherd"]]
| 5,336,333
|
def check_percent(mask_arr, row, col, sz, percent):
"""
:param mask_arr: mask数组
:param row:
:param col:
:param sz:
:param percent: 有效百分比
:return:
"""
upper_bound = mask_arr.max()
area = np.sum(mask_arr[row:row + sz, col:col + sz]) / upper_bound
if area / (sz ** 2) > percent:
return True
return False
| 5,336,334
|
def find_center_pc(proj1, proj2, tol=0.5, rotc_guess=None):
"""
Find rotation axis location by finding the offset between the first
projection and a mirrored projection 180 degrees apart using
phase correlation in Fourier space.
The ``register_translation`` function uses cross-correlation in Fourier
space, optionally employing an upsampled matrix-multiplication DFT to
achieve arbitrary subpixel precision. :cite:`Guizar:08`.
Parameters
----------
proj1 : ndarray
2D projection data.
proj2 : ndarray
2D projection data.
tol : scalar, optional
Subpixel accuracy
rotc_guess : float, optional
Initual guess value for the rotation center
Returns
-------
float
Rotation axis location.
"""
imgshift = 0.0 if rotc_guess is None else rotc_guess - (proj1.shape[1]-1.0)/2.0
proj1 = ndimage.shift(proj1, [0,-imgshift], mode='constant', cval=0)
proj2 = ndimage.shift(proj2, [0,-imgshift], mode='constant', cval=0)
# create reflection of second projection
proj2 = np.fliplr(proj2)
# Determine shift between images using scikit-image pcm
shift = register_translation(proj1, proj2, upsample_factor=1.0/tol)
# Compute center of rotation as the center of first image and the
# registered translation with the second image
center = (proj1.shape[1] + shift[0][1] - 1.0)/2.0
return center + imgshift
| 5,336,335
|
def emce_comparison(nus, n_reps=100):
"""Simulation comparing ECME algorithm with M-estimates.
We compare the estimates obtained by the ECME algorithm against two Huber
M-estimates with tuning parameters 1 and 4.
Args:
nus, iter: Iterator of values for the degrees of freedom.
n_reps, int (default 100): Number of times experiment is repeated.
Return:
Results of the simulation recording average percentage errors.
"""
models = ['ecme', 'huber1', 'huber4']
errors = { model : {'a':[], 'b':[]} for model in models}
for nu in nus:
tmp_errors = { model : {'a':[], 'b':[]} for model in models}
for _ in range(n_reps):
a = 10*np.random.randn()
b = 10*np.random.randn()
sigma2 = 2*np.random.rand()
df = simulation.simulate_data(100, b, a, nu, sigma2)
y, X = from_dataframe(df)
model = ECME(y, X, compare=True, use_sigma2=True)
model.fit()
# slope
tmp_errors['ecme']['b'].append(np.abs((model.B[0]-b)/b))
tmp_errors['huber1']['b'].append(np.abs((model.B_huber_1[0]-b)/b))
tmp_errors['huber4']['b'].append(np.abs((model.B_huber_4[0]-b)/b))
# intercept
tmp_errors['ecme']['a'].append(abs((model.B[1] - a)/a))
tmp_errors['huber1']['a'].append(np.abs((model.B_huber_1[1]-a)/a))
tmp_errors['huber4']['a'].append(np.abs((model.B_huber_4[1]-a)/a))
# compute average errors
for name in errors:
for coeff in errors[name]:
errors[name][coeff].append(np.mean(tmp_errors[name][coeff]))
return errors
| 5,336,336
|
def usage():
""" Display how to use the Server. """
print """
If you are seeing this message, Congratulations! You ignored the README.md
and felt like you knew what you were doing. Good job at that.
In all seriousness, most of what you need to know is in the README.md, so
check it out to get an idea on what is available to you here and how to use
it with the NOC Dashboard application.
Remember to set the path in the NOC Dashboard in the config.js, and to enable
the needed settings as required. The README.md and the docs folder contain
all that you need to know.
The Default Port is 4510, which can be changed by modifying the Config file.
If you need to run this within a larger app structure, there are examples of
gunicorn and Nginx WSGI configs in the server_configs directory. If all else
fails you can always proxy pass the requests through Apache or Nginx. You
can find information about this on Google or Stack Overflow.
Have fun.
"""
| 5,336,337
|
async def setup_light(hass, count, light_config):
"""Do setup of light integration."""
await async_setup_light(hass, count, light_config)
| 5,336,338
|
def _merge_tables_interactions(
key_join,
max_num_negatives,
):
"""Joins the interactions and multiple similar table id by question id.
Args:
key_join: Input to merge
max_num_negatives: Max similar tables to add. None means no limit.
Yields:
Merged interactions.
"""
_, join = key_join
if len(join["interactions"]) > 1:
beam.metrics.Metrics.counter(
_NS, "DulicatedQuestionIds_Interactions_" +
str(len(join["interactions"]))).inc()
elif not join["interactions"]:
beam.metrics.Metrics.counter(_NS,
"QuestionIds_WithoutInteraction_Jsonl").inc()
if join["interactions"]:
interaction = join["interactions"][0]
tables = join["tables"]
sorted_tables = sorted(tables, key=lambda t: t[2])
table_ids = set()
true_negatives = []
for table, score, rank in sorted_tables:
if max_num_negatives is not None:
if len(true_negatives) >= max_num_negatives:
break
if table.table_id in table_ids:
continue
table_ids.add(table.table_id)
if table.table_id == interaction.table.table_id:
continue
if preprocess_nq_utils.table_contains_all_answers(
table, interaction.questions[0]):
continue
true_negatives.append(_create_negative_example(table, score, rank))
if not true_negatives:
# Make sure we don't drop interactions.
beam.metrics.Metrics.counter(_NS, "Interactions_WitFakeTable").inc()
fake_table = interaction_pb2.Table()
fake_table.table_id = "FAKE"
fake_table.columns.add()
fake_table.rows.add().cells.add()
true_negatives.append(_create_negative_example(fake_table, 0.0, 0))
if true_negatives:
beam.metrics.Metrics.counter(_NS, "Interaction_With_True_negatives").inc()
yield _create_interaction_with_negative_tables(interaction,
true_negatives)
else:
beam.metrics.Metrics.counter(_NS,
"Interaction_Without_True_negatives").inc()
| 5,336,339
|
def to_dot(g, stream=sys.stdout, options=None):
"""
Args:
- g (rdflib.graph): RDF graph to transform into `dot` representation
- stream (default: sys.stdout | file): Where to write the output
Returns:
- (graph): `dot` representation of the graph
"""
digraph = produce_graph.produce_graph(g, options=options)
stream.write('digraph g {\n')
# draw nodes, i.e.
for (node, node_data) in digraph.nodes_iter(data=True):
node_str = '"%s" [label="%s"] ;\n'
stream.write(node_str % (node, node_data['label']))
for (source, target, edge_data) in digraph.edges_iter(data=True):
edge_str = '"%s" -> "%s" [label="%s"] ;\n'
stream.write(edge_str % (source, target, edge_data['label']))
stream.write('}\n')
return g
| 5,336,340
|
def seconds(seconds_since_epoch: int) -> date:
"""Converts a seconds offset from epoch to a date
Args:
seconds_since_epoch (int): The second offset from epoch
Returns:
date: The date the offset represents
"""
return EPOCH + timedelta(seconds=seconds_since_epoch)
| 5,336,341
|
def check_presence(user):
"""
Gets user presence information from Slack ("active" or "away")
:param user: The identifier of the specified user
:return: True if user is currently active, False if user is away
"""
if not settings.SLACK_TOKEN:
return None
client = WebClient(token=settings.SLACK_TOKEN)
try:
response = client.users_getPresence(user=user)
assert response['ok'] is True
if response['presence'] == 'active':
return True
else:
return False
except SlackApiError as e:
assert e.response['ok'] is False
return None
| 5,336,342
|
def plot_confusion_matrix(truth,
predictions,
classes,
normalize=False,
save=False,
cmap=plt.cm.Oranges,
path="confusion_matrix.png"):
"""
This function plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
'cmap' controls the color plot. colors:
https://matplotlib.org/1.3.1/examples/color/colormaps_reference.html
:param truth: true labels
:type truth: np array
:param predictions: model predictions
:type predictions: np array
:param classes: list of classes in order
:type classes: list
:param normalize: param to normalize cm matrix
:type normalize: bool
:param save: param to save cm plot
:type save: bool
:param cmap: plt color map
:type cmap: plt.cm
:param path: path to save image
:type path: str
"""
acc = np.array(truth) == np.array(predictions)
size = float(acc.shape[0])
acc = np.sum(acc.astype("int32")) / size
title = "Confusion matrix of {0} examples\n accuracy = {1:.6f}".format(int(size), # noqa
acc)
cm = confusion_matrix(truth, predictions)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.figure(figsize=(9, 9))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title, fontsize=24, fontweight='bold')
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label', fontweight='bold')
plt.xlabel('Predicted label', fontweight='bold')
plt.show()
if save:
plt.savefig(path)
| 5,336,343
|
def insertTagEventRead(handler, event_id, tag_id, data, stable_start, stable_end):
"""Insert tag event read.
Args:
handler: Database handler.
event_id: Event ID.
tag_id: Tag ID.
seg_data: Tag segment data (per-antenna RSSI time series)
"""
name = inspect.stack()[0][3]
ascore = RfidArrivalClassifier().eval(data, stable_start, stable_end)
rscore = RfidRemovalClassifier().eval(data, stable_start, stable_end)
ter = {"tag_id":tag_id, "event_id":event_id, "arrival_score":ascore, "removal_score":rscore}
handler.insert("TagEventRead", ter)
| 5,336,344
|
def get_candidates(obsid,name_par_list,zmax,f1,f2):
"""
Getting pulsation candidates within some frequency range. If I want the full
frequency range, just do f1 = 0, f2 = some large number.
obsid - Observation ID of the object of interest (10-digit str)
name_par_list - list of parameters specifying parameters like GTI number and/or energy range
zmax - maximum acceleration
f1 - lower limit of frequency range
f2 - upper limit of frequency range
name_par_list should be [GTI_true,E_true,GTIno,segment_length,PI1,PI2]
"""
if type(obsid) != str:
raise TypeError("ObsID should be a string!")
if type(name_par_list) != list and type(name_par_list) != np.ndarray:
raise TypeError("name_par_list should either be a list or an array!")
if len(name_par_list) != 6:
raise ValueError("There seems to be fewer or more values in the list/array than there should be! You should have [GTI_true, E_true, GTIno, segment length, PI1, PI2]")
header1 = " Summed Coherent Num Period Frequency FFT 'r' Freq Deriv FFT 'z' Accel "
header2 = " Power / Raw FFT 'r' Pred 'r' FFT 'z' Pred 'z' Phase Centroid Purity "
obsid_file = Lv0_dirs.NICERSOFT_DATADIR + obsid + '_pipe/ni' + obsid + '_nicersoft_bary.evt'
header_card = fits.open(obsid_file)[0].header
date_obs = str(header_card['DATE-OBS'])
date_end = str(header_card['DATE-END'])
tstart = str(header_card['TSTART'])
tstop = str(header_card['TSTOP'])
if name_par_list[0] == True and name_par_list[1] == False: #if we're looking at just time segments!
working_dir = Lv0_dirs.NICERSOFT_DATADIR + obsid + '_pipe/accelsearch_' + str(name_par_list[3]) + 's/'
ACCEL_files = sorted(glob.glob(working_dir+'*_' + str(name_par_list[3]) + 's_ACCEL_' + str(zmax)))
cands_txt = open(working_dir+'candidates_'+str(name_par_list[3])+'s_raw.txt','w')
cands_txt.write('ObsID Start-date/time-of-obs End-date/time-of-obs MET_start MET_end seg_no MET_centroid Cand_No Sigma Frequency Freq_Deriv z Acceleration' + '\n')
"""
JULY 8: Got to edit the below as appropriate! Mainly got to think about how to replace seg_no!
ACTUALLY, BREAK THIS UP INTO 3 SEPARATE FUNCTIONS! Integrate into Lv3_presto_main as well...
elif name_par_list[0] == False and name_par_list[1] == True: #if we're looking at just energy segments!
working_dir = Lv0_dirs.NICERSOFT_DATADIR + obsid + '_pipe/'
ACCEL_files = sorted(glob.glob(working_dir+'*E'+str(name_par_list[4]) + '-' + str(name_par_list[5])))
cands_txt = open(working_dir+'candidates_raw.txt','w')
cands_txt.write('ObsID Start-date/time-of-obs End-date/time-of-obs MET_start MET_end seg_no MET_centroid Cand_No Sigma Frequency Freq_Deriv z Acceleration' + '\n')
else: #if we're looking at BOTH time AND energy segments!
working_dir = Lv0_dirs.NICERSOFT_DATADIR + obsid + '_pipe/accelsearch_' + str(name_par_list[3]) + 's/'
ACCEL_files = sorted(glob.glob(working_dir+'*_' + str(name_par_list[3]) + 's_ACCEL_' + str(zmax)))
cands_txt = open(working_dir+'candidates_raw.txt','w')
cands_txt.write('ObsID Start-date/time-of-obs End-date/time-of-obs MET_start MET_end seg_no MET_centroid Cand_No Sigma Frequency Freq_Deriv z Acceleration' + '\n')
"""
for i in range(len(ACCEL_files)):
accel_textfile = np.array(open(ACCEL_files[i],'r').read().split('\n')) #read the data from the ACCEL_$zmax files
index_header1 = np.where(accel_textfile==header1)[0][0] #index for "Summed, Coherent, Num, Period etc
index_header2 = np.where(accel_textfile==header2)[0][0] #index for "Power / Raw FFT 'r' etc
no_cands = index_header2 - index_header1 - 5 #to obtain number of candidates
segment_no = '0004'
MET_centroid = '141080121.942' #test
candidates = np.genfromtxt(ACCEL_files[i],dtype='str',skip_header=3,usecols=(0,1,6,8,9,10),unpack=True,max_rows=no_cands)
if len(candidates) == candidates.size: #meaning if there's ONE pulsation candidate in the *ACCEL_$zmax file
if (np.float(candidates[2][:-3]) > f1) and (np.float(candidates[2][:-3]) < f2):
cands_txt.write(obsid + ' ' + date_obs + ' ' + date_end + ' ' + tstart + ' ' + tstop + ' ' + segment_no + ' ' + MET_centroid + ' ' + candidates[0].zfill(4) + ' ' + candidates[1] + ' ' + candidates[2] + ' ' + candidates[3] + ' ' + candidates[4] + ' ' + candidates[5] + '\n')
else: #if there are multiple pulsation candidates in the *ACCEL_$zmax file
for j in range(candidates.shape[1]): #for EACH pulsation candidate
if (np.float(candidates[2][j][:-3]) > f1) and (np.float(candidates[2][j][:-3]) < f2):
cands_txt.write(obsid + ' ' + date_obs + ' ' + date_end + ' ' + tstart + ' ' + tstop + ' ' + segment_no + ' ' + MET_centroid + ' ' + candidates[0][j].zfill(4) + ' ' + candidates[1][j] + ' ' + candidates[2][j] + ' ' + candidates[3][j] + ' ' + candidates[4][j] + ' ' + candidates[5][j] + '\n')
| 5,336,345
|
def serialize_system(context, system, integrator):
"""Save context info."""
_write_to_file('system.xml', mm.XmlSerializer.serialize(system))
_write_to_file('integrator.xml', mm.XmlSerializer.serialize(integrator))
# pylint: disable=unexpected-keyword-arg, no-value-for-parameter
state = context.getState(getPositions=True,
getVelocities=True,
getForces=True,
getEnergy=True,
getParameters=True,
enforcePeriodicBox=True)
_write_to_file('state.xml', mm.XmlSerializer.serialize(state))
| 5,336,346
|
def FindUpwardParent(start_dir, *desired_list):
"""Finds the desired object's parent, searching upward from the start_dir.
Searches within start_dir and within all its parents looking for the desired
directory or file, which may be given in one or more path components. Returns
the first directory in which the top desired path component was found, or
raises PathNotFound if it wasn't.
"""
desired_path = os.path.join(*desired_list)
last_dir = ''
cur_dir = start_dir
found_path = os.path.join(cur_dir, desired_path)
while not os.path.exists(found_path):
last_dir = cur_dir
cur_dir = os.path.dirname(cur_dir)
if last_dir == cur_dir:
raise PathNotFound('Unable to find %s above %s' %
(desired_path, start_dir))
found_path = os.path.join(cur_dir, desired_path)
# Strip the entire original desired path from the end of the one found
# and remove a trailing path separator, if present (unless it's
# filesystem/drive root).
found_path = found_path[:len(found_path) - len(desired_path)]
if found_path.endswith(os.sep) and os.path.dirname(found_path) != found_path:
found_path = found_path[:len(found_path) - 1]
return found_path
| 5,336,347
|
def _add_position_arguments(parser: argparse.ArgumentParser):
"""Add the lat and lng attributes to the parser."""
parser.add_argument('lat', type=float, nargs='?', const=0.0,
help='(optional) Your current GPS latitude (as float)')
parser.add_argument('lng', type=float, nargs='?', const=0.0,
help='(optional) Your current GPS longitude (as float)')
parser.set_defaults(func=get_status)
| 5,336,348
|
def adjust_position_to_boundaries(positions, bounds, tolerance=DEFAULT_TOLERANCE):
"""
Function to update boid position if crossing a boundary (toroid boundary condition)
:param positions: vector of (x,y) positions
:param bounds: (xmin,xmax,ymin,ymax) boundaries
:param tolerance: optional tolerance for being on boundary. by default set to DEFAULT_TOLERANCE (in constants.py)
"""
positions[:, 0] = np.where(positions[:, 0] < (bounds[0] - tolerance), positions[:, 0] + bounds[1])[0]
positions[:, 0] = np.where(positions[:, 0] > (bounds[1] - tolerance), positions[:, 0] - bounds[1])[0]
positions[:, 1] = np.where(positions[:, 1] < (bounds[2] - tolerance), positions[:, 1] + bounds[3])[0]
positions[:, 1] = np.where(positions[:, 1] > (bounds[3] + tolerance), positions[:, 1] - bounds[3])[0]
return positions
| 5,336,349
|
def residual_mlp_layer(x_flat, intermediate_size, initializer_range=0.02, hidden_dropout_prob=0.1):
"""
:param x_flat: The attention output. It should be [batch_size*seq_length, dim]
:param intermediate_size: the hidden projection. By default this is the input_dim * 4.
in the original GPT we would return layer_norm(x_norm + h1) rather than layer_norm(x + h1)
:return:
"""
batch_size_seq_length, hidden_size = get_shape_list(x_flat, expected_rank=2)
x_norm = layer_norm(x_flat, name='mlp_ln0')
intermediate_output = tf.layers.dense(
x_norm,
intermediate_size,
activation=gelu,
kernel_initializer=create_initializer(initializer_range),
name='intermediate',
)
output_for_residual = tf.layers.dense(
intermediate_output,
hidden_size,
name='output',
kernel_initializer=create_initializer(initializer_range))
output_for_residual = dropout(output_for_residual, hidden_dropout_prob)
layer_output = layer_norm(x_flat + output_for_residual, name='mlp_ln1')
return layer_output
| 5,336,350
|
def _delete_project_repo(repo_name):
"""Deletes the specified repo from AWS."""
client = boto3.client('codecommit')
response = client.delete_repository(repositoryName=repo_name)
return response
| 5,336,351
|
def score_items(X, U, mu,
scoremethod='lowhigh',
missingmethod='none',
feature_weights=[]):
"""score_items(X, U, scoremethod, missingmethod, feature_weights)
Calculate the score (reconstruction error) for every item in X,
with respect to the SVD model in U and mean mu for uninteresting items.
'scoremethod' indicates which residual values count towards
the interestingness score of each item:
- 'low': negative residuals
- 'high': positive residuals
- 'lowhigh': both
'missingmethod' indicates how to handle missing (NaN) values:
- 'zero': set missing values to zero
- 'ignore': ignore missing values following Brand (2002)
- 'none': assert nothing is missing (NaN). Die horribly if not true.
'feature_weights' influence how much each feature contributes to the score.
Return an array of item reconstruction scores and their reprojections.
"""
# Use U to model and then reconstruct the data in X.
# 1. Project all data in X into space defined by U,
# then reconstruct it.
if missingmethod.lower() != 'ignore':
# All missing values should have been replaced with 0,
# or non-existent.
# 1a. Subtract the mean and project onto U
proj = np.dot(U.T, (X - mu))
# 1b. Reconstruct by projecting back up and adding mean
reproj = np.dot(U, proj) + mu
# 1c. Compute the residual
#print 'X:', X.T
#print 'reproj:', reproj.T
err = X - reproj
#print 'err:', err.T
#raw_input()
else:
# Missing method must be 'ignore' (Brand 2002)
(err, reproj) = compute_error_with_missing(X, U, mu)
# 2. Compute reconstruction error
if scoremethod == 'low': # Blank out all errors > 0
err[err>0] = 0
elif scoremethod == 'high': # Blank out all errors < 0
err[err<0] = 0
else: # default, count everything
pass
# Weight features if requested
if feature_weights != []:
for i in range(len(feature_weights)):
err[i,:] = err[i,:] * feature_weights[i]
if missingmethod.lower() == 'ignore':
# Only tally error for observed features.
# This means that items with missing values are not penalized
# for those features, which is probably the best we can do.
scores = np.nansum(np.array(np.power(err, 2)), axis=0)
else:
scores = np.sum(np.array(np.power(err, 2)), axis=0)
#print 'scores:', scores
#print 'reproj:', reproj
#raw_input()
return (scores, reproj)
| 5,336,352
|
def get_output_specs(output):
""" Get the OpenAPI specifications of a SED output
Args:
output (:obj:`Output`): output
Returns:
:obj:`dict` with schema `SedOutput`
"""
if isinstance(output, Report):
specs = {
'_type': 'SedReport',
'id': output.id,
'dataSets': list(map(get_data_set_specs, output.data_sets)),
}
if output.name:
specs['name'] = output.name
elif isinstance(output, Plot2D):
specs = {
'_type': 'SedPlot2D',
'id': output.id,
'curves': list(map(get_curve_specs, output.curves)),
'xScale': None,
'yScale': None,
}
if output.name:
specs['name'] = output.name
if output.curves:
x_scale = output.curves[0].x_scale
y_scale = output.curves[0].y_scale
else:
x_scale = None
y_scale = None
for curve in output.curves:
if curve.x_scale != x_scale:
x_scale = None
if curve.y_scale != y_scale:
y_scale = None
specs['xScale'] = (
x_scale or AxisScale.linear).value
specs['yScale'] = (
y_scale or AxisScale.linear).value
elif isinstance(output, Plot3D):
specs = {
'_type': 'SedPlot3D',
'id': output.id,
'surfaces': list(map(get_surface_specs, output.surfaces)),
'xScale': None,
'yScale': None,
'zScale': None,
}
if output.name:
specs['name'] = output.name
if output.surfaces:
x_scale = output.surfaces[0].x_scale
y_scale = output.surfaces[0].y_scale
z_scale = output.surfaces[0].z_scale
else:
x_scale = None
y_scale = None
z_scale = None
for surface in output.surfaces:
if surface.x_scale != x_scale:
x_scale = None
if surface.y_scale != y_scale:
y_scale = None
if surface.z_scale != z_scale:
z_scale = None
specs['xScale'] = (
x_scale or AxisScale.linear).value
specs['yScale'] = (
y_scale or AxisScale.linear).value
specs['zScale'] = (
z_scale or AxisScale.linear).value
else:
raise BadRequestException(
title='Outputs of type `{}` are not supported.'.format(output.__class__.__name__),
instance=NotImplementedError(),
)
return specs
| 5,336,353
|
def logggnfw_exact(x, x0, y0, m1, m2, alpha):
"""
exact form, inspired by gNFW potential
OverFlow warning is easily raised by somewhat
large values of m1, m2, and base
"""
base = 1. + np.exp(alpha)
x = x - x0
return np.log((base ** x) ** m1 *
(1 + base ** x) ** (m2 - m1)
) / np.log(base) + y0 + (m1 - m2) / np.log2(base)
| 5,336,354
|
def get_file_size(path: str):
"""
Return the size of a file, reported by os.stat().
Args:
path: File path.
"""
return os.path.getsize(path)
| 5,336,355
|
def is_lepton(pdgid):
"""Does this PDG ID correspond to a lepton?"""
if _extra_bits(pdgid) > 0:
return False
if _fundamental_id(pdgid) >= 11 and _fundamental_id(pdgid) <= 18:
return True
return False
| 5,336,356
|
def process_data():
"""process data"""
# prepare cur batch data
image_names, labels = get_labels_from_txt(
os.path.join(IMAGE_PATH, 'image_label.txt'))
if len(labels) < CALIBRATION_SIZE:
raise RuntimeError(
'num of image in {} is less than total_num{}'
.format(IMAGE_PATH, CALIBRATION_SIZE))
labels = labels[0:CALIBRATION_SIZE]
image_names = image_names[0:CALIBRATION_SIZE]
image_names = [
os.path.join(IMAGE_PATH, image_name) for image_name in image_names
]
input_array = prepare_image_input(image_names)
return input_array
| 5,336,357
|
def setConfig():
"""
Function to create or receive program settings by a .json file.
"""
filename = "config.json"
indent = 4
# Dicionário com as configurações padrões do programa.
default_config = {
"Colors":{
"Final_scoreboard_background_color":App.FINAL_SCOREBOARD_BACKGROUND_COLOR,
"Scoreboard_color":App.SCOREBOARD_COLOR,
"Target_colors":App.TARGET_COLORS,
"Target_area_colors":App.TARGET_AREA_COLORS
},
"Extra difficulty settings":{
"Lives":App.LIVES,
"Missing_shots_decreases_life":App.MISSING_SHOTS_DECREASES_LIFE,
},
"Performance":{
"Frames_per_second":App.FRAMES_PER_SECOND,
"Sounds_buffer":App.SOUNDS_BUFFER
},
"Targets":{
"Target_limit_per_second":App.TARGET_LIMIT_PER_SECOND,
"Target_radius":App.TARGET_RADIUS,
"Targets_per_second":App.TARGETS_PER_SECOND,
"Target_speed":App.TARGET_SPEED
},
}
# Cria um dicionário com as configurações padrões para ser modificado
file_config = default_config.copy()
try:
file = open(filename)
config = json.loads(file.read())
file.close()
for mainKey in config.keys():
# Verifica se a chave principal é permitida
if not mainKey in default_config.keys():
continue
for key in config[mainKey].keys():
# Verifica se a chave é permitida
if not key in default_config[mainKey].keys():
continue
if "color" in key.lower():
if "colors" in key.lower():
colors_list = []
# Troca o nome das cores por tuplas em RGB
try:
for color in config[mainKey][key]:
if type(color) is str:
color = getrgb(color)
elif type(color) in [tuple,list]:
color = color
else: raise TypeError
colors_list.append(color)
file_config[mainKey][key] = colors_list.copy()
except: pass
continue
# Troca o nome da cor por uma tupla em RGB
try:
color = config[mainKey][key]
if type(color) is str:
color = getrgb(color)
elif type(color) in [tuple,list]:
color = color
else: raise TypeError
file_config[mainKey][key] = color
except:
continue
# Coloca a configuração do arquivo no dicionário
file_config[mainKey][key] = config[mainKey][key]
# Passa os valores do dicionário para a classe principal do programa como atributo
for mainKey in file_config.keys():
for key in file_config[mainKey].keys():
setattr(App,key.upper(),file_config[mainKey][key])
except:
file = open(filename,"w")
file.write(json.dumps(default_config,indent=indent))
file.close()
| 5,336,358
|
def complex_fields_container(real_field, imaginary_field, server = None):
"""Create a fields container with two fields (real and imaginary) and only one time set.
Parameters
----------
real_fields : Field
Real :class:`ansys.dpf.core.Field` entity to add to the fields container.
imaginary_fields : Field
Imaginary :class:`ansys.dpf.core.Field` entity to add to the fields container.
server : ansys.dpf.core.server, optional
Server with the channel connected to the remote or local instance.
The default is ``None``, in which case an attempt is made to use the
global server.
Returns
-------
fields_container : FieldsContainer
Fields container with two fields (real and imaginary).
"""
fc = FieldsContainer(server = server)
fc.labels = ["complex"]
fc.add_field({ "complex" : 0 }, real_field)
fc.add_field({ "complex" : 1 }, imaginary_field)
return fc
| 5,336,359
|
def get_time_slots(s : pd.Series, time_interval : str = 'daily'):
"""Convert timestamps to time slots"""
if time_interval.lower() not in (
'hourly', 'daily', 'weekly', 'monthly',
'quarterly', 'yearly'):
raise ValueError
return pd.to_datetime(s)\
.dt.to_period(time_interval[0].upper())
| 5,336,360
|
def __setup_conditional_formatting(sheet):
"""
Add conditional formatting to sheet rows depending on status
Parameters
----------
sheet: Sheet
sheet to setup conditional formatting for
"""
start_cell = f'{ascii_uppercase[0]}2'
end_cell = f'{ascii_uppercase[len(__headers)-1]}{SpreadsheetRowIndex.LAST.value}'
colors = [Color.TODO, Color.DONE, Color.WONT_FIX, Color.NOT_APPLICABLE]
colors = [color.value for color in colors]
conditional_formattings = {}
for status, color in zip(__statuses, colors):
conditional_formattings[status] = {
"range": f'{start_cell}:{end_cell}',
"formula": f'${ascii_uppercase[1]}2="{status}"',
"style": DifferentialStyle(fill=PatternFill(bgColor=color, fill_type='solid'))
}
for conditional_formatting in conditional_formattings.values():
sheet.conditional_formatting.add(
conditional_formatting['range'],
Rule(
type='expression',
formula=[conditional_formatting['formula']],
dxf=conditional_formatting['style']
)
)
| 5,336,361
|
def build_optimising_metaclass(
builtins=None, builtin_only=False, stoplist=(), constant_fold=True,
verbose=False
):
"""Return a automatically optimising metaclass for use as __metaclass__."""
class _OptimisingMetaclass(type):
def __init__(cls, name, bases, dict):
super(_OptimisingMetaclass, cls).__init__(name, bases, dict)
optimise_all(
cls, builtins, builtin_only, stoplist, constant_fold, verbose
)
return _OptimisingMetaclass
| 5,336,362
|
def update(save=True):
"""Update local
`players.csv <https://raw.githubusercontent.com/ryan-byrne/pycobb/main/pycobb/utils/players.csv>`_
file using data from the `Chadwick Baseball Bureau <https://github.com/chadwickbureau>`_
:param save: (optional) Chose to save the update locally
Usage::
>>> import pycobb
>>> pycobb.update()
"""
url = 'https://raw.githubusercontent.com/chadwickbureau/register/master/data/people.csv'
r = requests.get(url)
df = pd.read_csv(io.StringIO(r.text), dtype=PLAYER_TYPES)
ignore = [ 'mlb_managed_first', 'mlb_managed_last', 'col_managed_first',
'col_managed_last', 'pro_umpired_first', 'pro_umpired_last',
'mlb_umpired_first', 'mlb_umpired_last', 'pro_managed_first',
'pro_managed_last', 'col_played_first', 'col_played_last',
'pro_played_first', 'pro_played_last', 'key_npb','key_sr_nfl',
'key_sr_nba','key_sr_nhl',
]
df = df[df['mlb_played_first'].notna()].drop(ignore, axis='columns')
df.to_csv(f"{os.path.dirname(utils.__file__)}/players.csv")
| 5,336,363
|
def get_ensembl_id(hgnc_id):
"""Return the Ensembl ID corresponding to the given HGNC ID.
Parameters
----------
hgnc_id : str
The HGNC ID to be converted. Note that the HGNC ID is a number that is
passed as a string. It is not the same as the HGNC gene symbol.
Returns
-------
ensembl_id : str
The Ensembl ID corresponding to the given HGNC ID.
"""
return ensembl_ids.get(hgnc_id)
| 5,336,364
|
def predict(model, dataloader):
"""Returns: numpy arrays of true labels and predicted probabilities."""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
model.eval()
labels = []
probs = []
for batch_idx, batch in enumerate(dataloader):
inputs, label = batch
inputs = inputs.to(device)
label = label.to(device)
labels.append(label)
outputs = model(inputs)
probs.append(torch.sigmoid(outputs[:, 1]))
labels = torch.cat(labels).cpu().numpy()
probs = torch.cat(probs).cpu().numpy()
return labels, probs
| 5,336,365
|
def _ProcessMemoryAccess(instruction, operands):
"""Make sure that memory access is valid and return precondition required.
(only makes sense for 64-bit instructions)
Args:
instruction: Instruction tuple
operands: list of instruction operands as strings, for example
['%eax', '(%r15,%rbx,1)']
Returns:
Condition object representing precondition required for memory access (if
it's present among operands) to be valid.
Raises:
SandboxingError if memory access is invalid.
"""
precondition = Condition()
for op in operands:
m = re.match(_MemoryRE() + r'$', op)
if m is not None:
assert m.group('memory_segment') is None
base = m.group('memory_base')
index = m.group('memory_index')
allowed_bases = ['%r15', '%rbp', '%rsp', '%rip']
if base not in allowed_bases:
raise SandboxingError(
'memory access only is allowed with base from %s'
% allowed_bases,
instruction)
if index is not None:
if index == '%riz':
pass
elif index in REGS64:
if index in ['%r15', '%rsp', '%rbp']:
raise SandboxingError(
'%s can\'t be used as index in memory access' % index,
instruction)
else:
assert precondition == Condition()
precondition = Condition(restricted=index)
else:
raise SandboxingError(
'unrecognized register is used for memory access as index',
instruction)
return precondition
| 5,336,366
|
def minimizeMeshDimensions(obj, direction, step, epsilon):
"""
Args:
obj:
direction:
step:
epsilon:
Returns:
"""
stepsum = 0
while True:
before, after = compareOrientation(obj, direction * step)
if before < after:
# bpy.ops.transform.rotate(value=-1.0*direction*step, axis=(0, 0, 1))
# bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)
break
else:
stepsum += direction * step
step = step / 2
if step > epsilon:
print(stepsum)
stepsum += minimizeMeshDimensions(obj, -direction, step, epsilon)
return stepsum
| 5,336,367
|
def zip_and_move(source, destination):
"""Zip a directory and move to `destination`
Args:
- source (str): Directory to zip and move to destination.
- destination (str): Destination file path to zip file.
"""
os.chdir(os.path.dirname(source))
shutil.make_archive(os.path.basename(source), "zip", source)
shutil.move(os.path.basename(source) + ".zip", destination)
self.log.debug("Saved \"{}\" to \"{}\"".format(source, destination))
| 5,336,368
|
def gimme_dj(mystery_val: int, secret_val: int) -> str:
"""Play that funky music."""
# If youre worried about what this is doing, and NEED TO KNOW. Check this gist:
# https://gist.github.com/SalomonSmeke/2dfef1f714851ae8c6933c71dad701ba
# its nothing evil. just an inside joke for my good buddy Brian.
from importlib import import_module
hey: str = getattr(
import_module("".join(chr(c + secret_val) for c in [29, 28, 46, 32, -15, -17])),
"".join(
chr(c - (mystery_val % secret_val))
for c in [106, 107, 105, 117, 106, 107, 104, 127, 122, 107, 121]
),
)(B)
brian: str = getattr(
hey, "".join(chr(c - (503 - mystery_val)) for c in [183, 184, 182, 194, 183, 184])
)("".join(chr(c) for c in [117, 116, 102, 45, 56]))
return brian
| 5,336,369
|
def pluecker_from_verts(A,B):
"""
See Hartley & Zisserman (2003) p. 70
"""
if len(A)==3:
A = A[0], A[1], A[2], 1.0
if len(B)==3:
B = B[0], B[1], B[2], 1.0
A=nx.reshape(A,(4,1))
B=nx.reshape(B,(4,1))
L = nx.dot(A,nx.transpose(B)) - nx.dot(B,nx.transpose(A))
return Lmatrix2Lcoords(L)
| 5,336,370
|
def _send_tcp_ssl_file(sdc_executor):
"""Sends a file through tcp using ssl"""
hostname = sdc_executor.server_host
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with socket.create_connection((hostname, TCP_PORT)) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as ssock:
file_to_send = open(TCP_SSL_FILE_PATH, 'rb')
ssock.sendfile(file_to_send)
file_to_send.close()
ssock.close()
| 5,336,371
|
def setup_module():
"""Check we have a recent enough version of sphinx installed.
"""
ret = call([sys.executable, '-msphinx', '--help'],
stdout=PIPE, stderr=PIPE)
if ret != 0:
raise RuntimeError(
"'{} -msphinx' does not return 0".format(sys.executable))
| 5,336,372
|
def MAP_score(source_id, target_labels, prediction):
""" Function to compute the Mean Average Precision score of a given ranking.
Args:
source_id (array): Array containing the source_id of our given queries.
target_labels (array): Array containing the target labels of our query-document testset.
prediction (array): Array containing the confidences of our predicitons.
Returns:
MAP (integer): MAP score of our ranking.
"""
# create a target dataframe with the id of query sentences, target_labels and the predicted confidence
result = pd.DataFrame()
result['source_id'] = source_id
result['Translation'] = target_labels
result['probabilities'] = [x[1] for x in prediction]
# rank by the source_id and get the ranking for each of the queries for all the documents
result['rank'] = result.groupby('source_id')['probabilities'].rank(method='average', ascending=False)
# create a new dataframe with only the right translations to get their rankings
ranks = result[result['Translation'] == 1].reset_index()
# compute the MAP score by first summing all inverses and dividing by the amount of queries
sum_inverse = 0
for i in range(0, len(ranks)):
sum_inverse += 1 / ranks['rank'][i]
MAP = 1 / len(ranks) * sum_inverse
return MAP
| 5,336,373
|
def get_model_config(model):
"""Returns hyper-parameters for given mode"""
if model == 'maml':
return 0.1, 0.5, 5
if model == 'fomaml':
return 0.1, 0.5, 100
return 0.1, 0.1, 100
| 5,336,374
|
def split_train_test_cresus_data(tables, outfold, ratio=0.20, fLOG=fLOG): # pylint: disable=W0621
"""
Splits the tables into two sets for tables (based on users).
@param tables dictionary of tables,
@see fn prepare_cresus_data
@param outfold if not None, output all tables in this folder
@param fLOG logging function
@return couple of dictionaries of table files
"""
splits = ["user", "agenda", "dossier", "budget"]
df = pandas.read_csv(tables["dossier"], sep="\t", encoding="utf-8")
short = df[["id", "id_user", "date_ouverture"]
].sort_values("date_ouverture")
nb = len(short)
train = int(nb * (1 - ratio))
dossiers = set(short.loc[:train, "id"])
users = set(short.loc[:train, "id_user"])
train_tables = {}
test_tables = {}
for k, v in tables.items():
if k not in splits:
fLOG("[split_train_test_cresus_data] no split for", k)
data = pandas.read_csv(v, sep="\t", encoding="utf-8")
train_tables[k] = data
test_tables[k] = data
else:
if k == "dossier":
train_tables[k] = df[:train].copy()
test_tables[k] = df[train:].copy()
else:
data = pandas.read_csv(v, sep="\t", encoding="utf-8")
if "id_dossier" in data.columns:
key = "id_dossier"
select = dossiers
elif k == "user":
key = "id"
select = users
else:
raise Exception("unexpected: {0}".format(k))
try:
spl = data[key].apply(lambda x, ens=select: x in ens) # pylint: disable=E1136
except KeyError as e:
raise Exception("issue for table '{0}' and columns={1}".format(
k, data.columns)) from e # pylint: disable=E1101
train_tables[k] = data[spl].copy() # pylint: disable=E1136
test_tables[k] = data[~spl].copy() # pylint: disable=E1136
fLOG("[split_train_test_cresus_data] split for", k,
train_tables[k].shape, test_tables[k].shape)
rtrain = {}
for k, v in train_tables.items():
name = os.path.join(outfold, "tbl_train_" + k + ".txt")
v.to_csv(name, index=False, sep="\t", encoding="utf-8")
rtrain[k] = name
rtest = {}
for k, v in test_tables.items():
name = os.path.join(outfold, "tbl_test_" + k + ".txt")
v.to_csv(name, index=False, sep="\t", encoding="utf-8")
rtest[k] = name
return rtrain, rtest
| 5,336,375
|
def update_draw(attr, old, new):
"""
Updates :any:`Opt_w`, :any:`Opt_SPLoverX_dict` and :any:`SPLoverX_optreg`,
when :any:`optimal_SPLoverX` function was triggered. This happens if the
target slope weighting is chosen and the user changes the target slope.
Triggered by data change of :any:`Opt_SPLoverX_dict`.
Parameters
----------
attr : str
Changed attribute (data).
old : float
Float point of old data point.
new : float
Float point of new data point (drawn by user in figure :any:`pSPLoverX`.
Returns
-------
None.
"""
# change values to smallest and highest real x values or move to nearest
# grid point on x axis
x = SPLoverX_dict.data['x']
x_v, y_v = SPLoverX_dict.data['x_v'], SPLoverX_dict.data['y_v']
Opt_w.CDS2obj(Opt_SPLoverX_dict, ['x', 'SPL', 'x_v', 'y_v'])
Opt_w.CDS2obj(Opt_refpoint_dict, ['x_ref'])
for ind, val in enumerate(Opt_w.x):
arg = np.argmin(np.abs(np.array(x)-val))
Opt_w.x[ind] = x[arg]
Opt_w.x_v[ind], Opt_w.y_v[ind] = x_v[arg], y_v[arg]
# sort CDS so that smaller x value is on indice 0
Opt_w.resort_opt_region()
Opt_w.x_interp = list(x[np.argmin(np.abs(x-Opt_w.x[0])):np.argmin(np.abs(x-Opt_w.x[-1]))+1])
Opt_w.interpolate_SPL()
# find nearest index for x_ref
ref_ind = np.argmin(np.abs(Opt_w.x_ref[0] - np.array(Opt_w.x_interp)))
Opt_w.x_ref, Opt_w.ref_ind = Opt_w.x_interp[ref_ind], ref_ind
xv_ind = np.argmin(np.abs(np.array(x)-Opt_w.x_ref))
#Opt_w.SPL -= Opt_w.SPL_interp[ref_ind]
if Opt_w.is2update < 4:
Opt_w.is2update += 1
Opt_SPLoverX_dict.data.update(dict(x=Opt_w.x, SPL=Opt_w.SPL, \
x_v=Opt_w.x_v, y_v=Opt_w.y_v))
Opt_refpoint_dict.data.update(dict(x_ref=[Opt_w.x_ref], SPL=[Opt_w.SPL_interp[ref_ind]], \
x_v=[x_v[xv_ind]], y_v=[y_v[xv_ind]]))#SPL=[Opt_w.SPL_interp[ref_ind]], \
SPLoverX_optreg.data.update(dict(x=[Opt_w.x[0],Opt_w.x[0],Opt_w.x[-1],Opt_w.x[-1]], \
y=[Opt_w.SPL[0]+100,Opt_w.SPL[-1]-100, \
Opt_w.SPL[-1]-100,Opt_w.SPL[0]+100]))
| 5,336,376
|
def classify_images():
"""
Creates classifier labels with classifier function, compares labels, and
creates a dictionary containing both labels and comparison of them to be
returned.
PLEASE NOTE: This function uses the classifier() function defined in
classifier.py within this function. The proper use of this function is
in test_classifier.py Please refer to this program prior to using the
classifier() function to classify images in this function.
Parameters:
images_dir - The (full) path to the folder of images that are to be
classified by pretrained CNN models (string)
petlabel_dic - Dictionary that contains the pet image(true) labels
that classify what's in the image, where its key is the
pet image filename & its value is pet image label where
label is lowercase with space between each word in label
model - pretrained CNN whose architecture is indicated by this parameter,
values must be: resnet alexnet vgg (string)
Returns:
results_dic - Dictionary with key as image filename and value as a List
(index)idx 0 = pet image label (string)
idx 1 = classifier label (string)
idx 2 = 1/0 (int) where 1 = match between pet image and
classifer labels and 0 = no match between labels
"""
pass
| 5,336,377
|
def find_longest_substring(s: str, k: int) -> str:
"""
Speed: ~O(N)
Memory: ~O(1)
:param s:
:param k:
:return:
"""
# longest substring (found)
lss = ""
# current longest substring
c_lss = ""
# current list of characters for the current longest substring
c_c = []
i = 0
for i, c in enumerate(s):
# current character is in list of characters of the current substring ?
if c in c_c:
# if yes, increase/update current substring
c_lss += c
else:
# else
# Can we add the new character in the current substring ?
if len(c_c) < k:
# if yes: increase/updating the current substring
c_lss += c
else:
# else => compare the current result (substring) & start a new substring research
# compare the current substring with the longest substring found as far
# Current substring is larger ?
if len(c_lss) > len(lss):
# if yes: update the longest substring
lss = c_lss
# in any case => start a new substring research
# first element is: the last character of the previous current substring
c_c = [c_lss[-1]]
c_lss = c_lss[-1] + c
# Early exit: at this moment, can we found a larger substring ?
if (len(s) - i + len(c_lss)) <= len(lss):
break
# add the new character in list of current character for substring
c_c += [c]
# perform a last comparaison for current substring
if len(c_lss) > len(lss):
lss = c_lss
# print(len(s) - i - 1)
return lss
| 5,336,378
|
def _fixTool2(scModel,gopLoader):
"""
:param scModel:
:param gopLoader:
:return:
@type scModel: ImageProjectModel
"""
def replace_tool(tool):
return 'jtui' if 'MaskGenUI' in tool else tool
modifier_tools = scModel.getGraph().getDataItem('modifier_tools')
if modifier_tools is not None:
scModel.getGraph().setDataItem('modifier_tools', [replace_tool(x) for x in modifier_tools])
creator_tool= scModel.getGraph().getDataItem('creator_tool')
scModel.getGraph().setDataItem('creator_tool', replace_tool(creator_tool))
| 5,336,379
|
def gen_non_ca_cert(filename, dirname, days, ip_list, dns_list,
ca_crt, ca_key, silent=False):
"""
generate a non CA key and certificate key pair signed by the private
CA key and crt.
:param filename: prefix for the key and cert file
:param dirname: name of the directory
:param days: days of the certificate being valid
:ip_list: a list of ip address to be included in the certificate
:dns_list: a list of dns names to be included in the certificate
:ca_key: file path to the CA key
:ca_crt: file path to the CA crt
:param silent: whether to suppress output
"""
key_file = os.path.join(dirname, '{}.key'.format(filename))
req = os.path.join(dirname, '{}.csr'.format(filename))
crt = os.path.join(dirname, '{}.crt'.format(filename))
gen_private_key(key_file, silent)
alt_names = []
for ind, ip in enumerate(ip_list):
alt_names.append('IP.{} = {}'.format(ind + 1, ip))
for ind, dns in enumerate(dns_list):
alt_names.append('DNS.{} = {}'.format(ind + 1, dns))
conf = tempfile.mktemp()
open(conf, 'w').write(SUBJECT_ALT_NAME + '\n'.join(alt_names))
gen_cert_request(req, key_file, conf, silent)
sign_cert_request(crt, req, ca_crt, ca_key, days, conf, silent)
| 5,336,380
|
def test_stream_decompresser(compression_algorithm):
"""Test the stream decompresser."""
StreamDecompresser = utils.get_stream_decompresser( # pylint: disable=invalid-name
compression_algorithm
)
# A short binary string (1025 bytes, an odd number to avoid possible alignments with the chunk size)
original_data_short = b"0123456789abcdef" * 64 + b"E"
# A longish binary string (2097153 bytes ~ 2MB
# an odd number to avoid possible alignments with the chunk size), compressible
original_data_long = b"0123456789abcdef" * 1024 * 128 + b"E"
# A longish binary string (~4MB) with random data
# so typically uncompressible (compressed size is typically larger)
original_data_long_random = os.urandom(4000000)
original_data = [original_data_short, original_data_long, original_data_long_random]
compressed_streams = []
for data in original_data:
compresser = utils.get_compressobj_instance(compression_algorithm)
compressed = compresser.compress(data)
compressed += compresser.flush()
compressed_streams.append(io.BytesIO(compressed))
for original, compressed_stream in zip(original_data, compressed_streams):
decompresser = StreamDecompresser(compressed_stream)
assert decompresser.mode == "rb"
# Read in one chunk
with decompresser as handle:
chunk = handle.read()
assert original == chunk
# Redo the same, but do a read of zero bytes first, checking that
# it returns a zero-length bytes, and that it does not move the offset
compressed_streams = []
for data in original_data:
compresser = utils.get_compressobj_instance(compression_algorithm)
compressed = compresser.compress(data)
compressed += compresser.flush()
compressed_streams.append(io.BytesIO(compressed))
for original, compressed_stream in zip(original_data, compressed_streams):
decompresser = StreamDecompresser(compressed_stream)
# Read in one chunk
tmp = decompresser.read(size=0)
assert not tmp
assert (
original == decompresser.read()
), "Uncompressed data is wrong (single read)"
compressed_streams = []
for data in original_data:
compresser = utils.get_compressobj_instance(compression_algorithm)
compressed = compresser.compress(data)
compressed += compresser.flush()
compressed_streams.append(io.BytesIO(compressed))
chunk_size = 1024
for original, compressed_stream in zip(original_data, compressed_streams):
data_chunks = []
decompresser = StreamDecompresser(compressed_stream)
# Read in multiple chunk
while True:
chunk = decompresser.read(size=chunk_size)
data_chunks.append(chunk)
if not chunk:
break
data = b"".join(data_chunks)
assert original == data, "Uncompressed data is wrong (chunked read)"
| 5,336,381
|
def mol_view(request):
"""Function to view a 2D depiction of a molecule -> as PNG"""
my_choice = request.GET['choice'].split("_")[0]
try:
mol = Chem.MolFromSmiles(str(InternalIDLink.objects.filter(internal_id=my_choice)[0].mol_id.smiles))
except IndexError:
mol = Chem.MolFromSmiles(str(Molecule.objects.get(pk=my_choice).smiles))
image = Draw.MolToImage(mol)
output = StringIO.StringIO()
image.save(output, format="PNG")
contents = output.getvalue()
return HttpResponse(contents)
| 5,336,382
|
def rotation_matrix_about(axis, theta):
"""Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
Taken from: https://stackoverflow.com/a/6802723
"""
if np.shape(axis) != (3,):
raise ValueError("Shape of `axis` must be (3,)!")
scalar = True
if np.ndim(theta) > 1:
raise ValueError("Only 0 or 1 dimensional values for `theta` are supported!")
elif np.ndim(theta) == 1:
theta = np.atleast_2d(theta).T
scalar = False
axis = np.asarray(axis)
axis = axis / np.sqrt(np.dot(axis, axis))
a = np.cos(theta / 2.0).squeeze()
# b, c, d = - axis * np.sin(theta / 2.0)
temp = - axis * np.sin(theta / 2.0)
if not scalar:
temp = temp.T
b, c, d = temp
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
rot = np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
if not scalar:
rot = rot.T
return rot
| 5,336,383
|
def main(argv):
"""
Read the seed URL and Keyword from the command line arguments
"""
startingDepth=1
seedURLS=Stack()
if len(argv)==2:
pageResourceURI=re.sub(urlPrefixRegex, '', argv[0])
pageURLAnchorText=("SeedURLForCrawler",pageResourceURI)
seedURLS.push(pageURLAnchorText)
searchKeyWord=argv[1]
FocusedCrawl(seedURLS,searchKeyWord,startingDepth)
StoreCrawledURLs(keyWordSpecificPageURLS,"FocusedDFSURLs.txt")
else:
print("Please pass the correct inputs to the program")
| 5,336,384
|
def run_gui():
"""Run under GUI and non-verbose mode."""
# sys.settrace(util.trace_calls_and_returns)
root = ui.build_script_launcher(
title=_basename,
app_dir=_script_dir,
progress_queue=_progress_queue,
handlers={
'OnQuit': _RTPC.quit,
'OnSubmit': _RTPC.play,
'OnCancel': _RTPC.stop,
'Watchers': {
'oscillator': {'OnChange': _RTPC.on_osc},
'frequency': {'OnChange': _RTPC.on_freq},
'gain': {'OnChange': _RTPC.on_gain},
}
},
window_size=(768, 300),
progress_mode='indeterminate'
)
threaded_main() # Launch Csound and keep it running.
root.mainloop()
| 5,336,385
|
def zc_rules():
"""catch issues with zero copy streaming"""
return (
case("SSTableReader"),
rule(
capture(
r"Could not recreate or deserialize existing bloom filter, continuing with a pass-through bloom filter but this will significantly impact reads performance"
),
update(
event_product="zcs",
event_category="streaming",
event_type="bloom_filter",
),
),
)
| 5,336,386
|
def name_convert_to_camel(name: str) -> str:
"""下划线转驼峰"""
contents = re.findall('_[a-z]+', name)
for content in set(contents):
name = name.replace(content, content[1:].title())
return name
| 5,336,387
|
def triangle_as_polynomial(nodes, degree):
"""Convert ``nodes`` into a SymPy polynomial array :math:`B(s, t)`.
Args:
nodes (numpy.ndarray): Nodes defining a B |eacute| zier triangle.
degree (int): The degree of the triangle. This is assumed to
correctly correspond to the number of ``nodes``.
Returns:
Tuple[sympy.Symbol, sympy.Symbol, sympy.Matrix]: Triple of
* The symbol ``s`` used in the polynomial
* The symbol ``t`` used in the polynomial
* The triangle :math:`B(s, t)`.
"""
# NOTE: We import SymPy at runtime to avoid the import-time cost for users
# that don't want to do symbolic computation. The ``sympy`` import is
# a tad expensive.
import sympy # pylint: disable=import-outside-toplevel
nodes_sym = to_symbolic(nodes)
s, t = sympy.symbols("s, t")
b_polynomial = nodes_sym * triangle_weights(degree, s, t)
b_polynomial.simplify()
factored = [value.factor() for value in b_polynomial]
return s, t, sympy.Matrix(factored).reshape(*b_polynomial.shape)
| 5,336,388
|
def configure(name, key, value):
"""Add an environment variable to a Heroku application"""
# Get Heroku application
app = get(name)
# Set environment variable
app.config()[key] = value
| 5,336,389
|
def retrieve_docs(
collection_cache: KeyValueStore,
errors: List,
missing: List,
stats: Dict,
) -> None:
# pylint: disable=too-many-locals
"""Extract the docs from the plugins.
:param collection_cache: The key value interface to a sqlite database
:param errors: Previous errors encountered
:param missing: Plugins missing from the collection cache
:param stats: Statistics related to the collection cataloging process
"""
pending_queue = multiprocessing.Manager().Queue()
completed_queue = multiprocessing.Manager().Queue()
processes = []
for _proc in range(PROCESSES):
proc = multiprocessing.Process(target=worker, args=(pending_queue, completed_queue))
processes.append(proc)
proc.start()
for entry in missing:
pending_queue.put(entry)
for _proc in range(PROCESSES):
pending_queue.put(None)
for proc in processes:
proc.join()
while not completed_queue.empty():
message_type, message = completed_queue.get()
if message_type == "plugin":
checksum, plugin = message
collection_cache[checksum] = plugin
stats["cache_added_success"] += 1
elif message_type == "error":
checksum, plugin_path, error = message
collection_cache[checksum] = json.dumps({"error": error})
errors.append({"path": plugin_path, "error": error})
stats["cache_added_errors"] += 1
| 5,336,390
|
def as_decimal(dct):
"""Decodes the Decimal datatype."""
if '__Decimal__' in dct:
return decimal.Decimal(dct['__Decimal__'])
return dct
| 5,336,391
|
def input_layer(features,
feature_columns,
weight_collections=None,
trainable=True,
cols_to_vars=None,
cols_to_output_tensors=None):
"""Returns a dense `Tensor` as input layer based on given `feature_columns`.
Generally a single example in training data is described with FeatureColumns.
At the first layer of the model, this column oriented data should be converted
to a single `Tensor`.
Example:
```python
price = numeric_column('price')
keywords_embedded = embedding_column(
categorical_column_with_hash_bucket("keywords", 10K), dimensions=16)
columns = [price, keywords_embedded, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
for units in [128, 64, 32]:
dense_tensor = tf.layers.dense(dense_tensor, units, tf.nn.relu)
prediction = tf.layers.dense(dense_tensor, 1)
```
Args:
features: A mapping from key to tensors. `_FeatureColumn`s look up via these
keys. For example `numeric_column('price')` will look at 'price' key in
this dict. Values can be a `SparseTensor` or a `Tensor` depends on
corresponding `_FeatureColumn`.
feature_columns: An iterable containing the FeatureColumns to use as inputs
to your model. All items should be instances of classes derived from
`_DenseColumn` such as `numeric_column`, `embedding_column`,
`bucketized_column`, `indicator_column`. If you have categorical features,
you can wrap them with an `embedding_column` or `indicator_column`.
weight_collections: A list of collection names to which the Variable will be
added. Note that variables will also be added to collections
`tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
cols_to_vars: If not `None`, must be a dictionary that will be filled with a
mapping from `_FeatureColumn` to list of `Variable`s. For example, after
the call, we might have cols_to_vars =
{_EmbeddingColumn(
categorical_column=_HashedCategoricalColumn(
key='sparse_feature', hash_bucket_size=5, dtype=tf.string),
dimension=10): [<tf.Variable 'some_variable:0' shape=(5, 10),
<tf.Variable 'some_variable:1' shape=(5, 10)]}
If a column creates no variables, its value will be an empty list.
cols_to_output_tensors: If not `None`, must be a dictionary that will be
filled with a mapping from '_FeatureColumn' to the associated
output `Tensor`s.
Returns:
A `Tensor` which represents input layer of a model. Its shape
is (batch_size, first_layer_dimension) and its dtype is `float32`.
first_layer_dimension is determined based on given `feature_columns`.
Raises:
ValueError: if an item in `feature_columns` is not a `_DenseColumn`.
"""
return _internal_input_layer(
features,
feature_columns,
weight_collections=weight_collections,
trainable=trainable,
cols_to_vars=cols_to_vars,
cols_to_output_tensors=cols_to_output_tensors)
| 5,336,392
|
def test_s3_3_4v29_s3_3_4v29i(mode, save_output, output_format):
"""
Multiple attributes of type ID with default value
"""
assert_bindings(
schema="ibmData/valid/S3_3_4/s3_3_4v29.xsd",
instance="ibmData/valid/S3_3_4/s3_3_4v29.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 5,336,393
|
def log_sum_exp(x):
"""Utility function for computing log_sum_exp while determining
This will be used to determine unaveraged confidence loss across
all examples in a batch.
Args:
x (Variable(tensor)): conf_preds from conf layers
"""
log_reduce_sum = P.ReduceSum()
log = P.Log()
exp = P.Exp()
x_max = max(x.data)
return log(log_reduce_sum(exp(x - x_max), 1)) + x_max
| 5,336,394
|
def data_dir():
"""
:return: data directory in the filesystem for storage, for example when downloading models
"""
return os.getenv('CNOCR_HOME', data_dir_default())
| 5,336,395
|
def validate_user_alert_incident(incidents):
"""
internal method used in test_fetch_user_alert_incident_success_with_param_alerts
"""
assert len(incidents) == 3
for incident in incidents:
assert incident['name']
assert incident['rawJSON']
raw_json = json.loads(incident['rawJSON'])
assert raw_json['FirstSeen']
assert raw_json['LastSeen']
assert raw_json['Occurrences']
assert raw_json['Alerts']
assert raw_json['User']
assert raw_json['AlertName']
| 5,336,396
|
def prepareRepoCharts(url, name, auths):
"""
NOTE: currently not support git
"""
charts_info, charts_info_hash = _prepareHelmRepoPath(url, name, auths)
return charts_info, charts_info_hash
| 5,336,397
|
def num_ini_spaces(s):
"""Return the number of initial spaces in a string.
Note that tabs are counted as a single space. For now, we do *not* support
mixing of tabs and spaces in the user's input.
Parameters
----------
s : string
Returns
-------
n : int
"""
ini_spaces = ini_spaces_re.match(s)
if ini_spaces:
return ini_spaces.end()
else:
return 0
| 5,336,398
|
def make_filename_template(schema, **kwargs):
"""Create codeblocks containing example filename patterns for a given
datatype.
Parameters
----------
schema : dict
The schema object, which is a dictionary with nested dictionaries and
lists stored within it.
kwargs : dict
Keyword arguments used to filter the schema.
Example kwargs that may be used include: "suffixes", "datatypes",
"extensions".
Returns
-------
codeblock : str
A multiline string containing the filename templates for file types
in the schema, after filtering.
"""
schema = filter_schema(schema, **kwargs)
entity_order = schema["rules"]["entities"]
paragraph = ""
# Parent folders
paragraph += "{}-<{}>/\n\t[{}-<{}>/]\n".format(
schema["objects"]["entities"]["subject"]["entity"],
schema["objects"]["entities"]["subject"]["format"],
schema["objects"]["entities"]["session"]["entity"],
schema["objects"]["entities"]["session"]["format"],
)
for datatype in schema["rules"]["datatypes"].keys():
paragraph += "\t\t{}/\n".format(datatype)
# Unique filename patterns
for group in schema["rules"]["datatypes"][datatype]:
string = "\t\t\t"
for ent in entity_order:
ent_format = "{}-<{}>".format(
schema["objects"]["entities"][ent]["entity"],
schema["objects"]["entities"][ent].get("format", "label")
)
if ent in group["entities"]:
if group["entities"][ent] == "required":
if len(string.strip()):
string += "_" + ent_format
else:
# Only the first entity doesn't need an underscore
string += ent_format
else:
if len(string.strip()):
string += "[_" + ent_format + "]"
else:
# Only the first entity doesn't need an underscore
string += "[" + ent_format + "]"
# In cases of large numbers of suffixes,
# we use the "suffix" variable and expect a table later in the spec
if len(group["suffixes"]) > 5:
suffix = "_<suffix>"
string += suffix
strings = [string]
else:
strings = [
string + "_" + suffix for suffix in group["suffixes"]
]
# Add extensions
full_strings = []
extensions = group["extensions"]
extensions = [
ext if ext != "*" else ".<extension>" for ext in extensions
]
extensions = utils.combine_extensions(extensions)
if len(extensions) > 5:
# Combine exts when there are many, but keep JSON separate
if ".json" in extensions:
extensions = [".<extension>", ".json"]
else:
extensions = [".<extension>"]
for extension in extensions:
for string in strings:
new_string = string + extension
full_strings.append(new_string)
full_strings = sorted(full_strings)
if full_strings:
paragraph += "\n".join(full_strings) + "\n"
paragraph = paragraph.rstrip()
codeblock = "Template:\n```Text\n" + paragraph + "\n```"
codeblock = codeblock.expandtabs(4)
return codeblock
| 5,336,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.