content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
from typing import List
def sum_per_agent(df: pd.DataFrame, columns: List[str]) -> pd.DataFrame:
"""Calculates summed values per agent for each given column individually"""
all_values_per_agent = pd.DataFrame(columns=columns)
for column in columns:
function = calc_sum(column)
value_per_agent = call_function_per_agent(df, function)
for agent_id, value in value_per_agent.items():
all_values_per_agent.at[agent_id, column] = value
return all_values_per_agent
|
b828e68a2f2555b9b12f4c17376a7f88211611d4
| 3,644,000
|
import logging
async def vcx_ledger_get_fees() -> str:
"""
Get ledger fees from the sovrin network
Example:
fees = await vcx_ledger_get_fees()
:return: JSON representing fees
{ "txnType1": amount1, "txnType2": amount2, ..., "txnTypeN": amountN }
"""
logger = logging.getLogger(__name__)
if not hasattr(vcx_ledger_get_fees, "cb"):
logger.debug("vcx_ledger_get_fees: Creating callback")
vcx_ledger_get_fees.cb = create_cb(CFUNCTYPE(None, c_uint32))
result = await do_call('vcx_ledger_get_fees',
vcx_ledger_get_fees.cb)
logger.debug("vcx_ledger_get_fees completed")
return result
|
ba801ec57b57354b9bdb3a033b02860e8daed450
| 3,644,001
|
def calc_one_sample_metric(sample):
""" 计算 V1 数据一个样本的 rouge-l 和 bleu4 分数 """
if len(sample['best_match_scores']) == 0: # bad case
return -1, -1
pred_answers, ref_answers = [], []
pred_answers.append({'question_id': sample['question_id'],
'question_type': sample['question_type'],
# 取 gold fake answer 作为预测的答案
'answers': [''.join(sample['fake_answers'][sample['best_match_scores'].index(max(sample['best_match_scores']))])],
'entity_answers': [[]],
'yesno_answers': []})
ref_answers.append({'question_id': sample['question_id'],
'question_type': sample['question_type'],
'segmented_question': sample['segmented_question'],
'answers': [''.join(seg_ans) for seg_ans in sample['segmented_answers']],
'entity_answers': [[]],
'yesno_answers': [],
'documents': sample['documents']})
pred_dict = read_data_to_dict(pred_answers)
ref_dict = read_data_to_dict(ref_answers, is_ref=True)
metrics = compute_bleu_rouge(pred_dict, ref_dict)
rouge_l, bleu4 = metrics['ROUGE-L'], metrics['BLEU-4']
return rouge_l, bleu4
|
37ce17d36e2d6b31e0fdae29172de442d87fd676
| 3,644,002
|
def ta_1d(x, a, w_0, w_1):
"""1d tanh function."""
return a * np.tanh(w_0 + (w_1 * x))
|
ce062d87f3040d95d8bc5360a58b0b7c4625e877
| 3,644,003
|
def get_flat_topic_df(all_topics, n_topics):
"""
Get df with Multiindex to plot easier
:param all_topics: the IDs of the topics as list
:param n_topics: the number of topics in the model
:return: df with index [TopicID, Word] and weight
"""
init_topic = all_topics.columns[0]
# TODO refator due duplication.
topics_flat = all_topics[[init_topic]].copy().dropna(axis=0)
topics_flat.index.rename("Word", inplace=True)
topics_flat.columns = ["weight"]
topics_flat["TopicID"] = init_topic
topics_flat.set_index("TopicID", inplace=True, append=True) # ADD the index
topics_flat = topics_flat.reorder_levels(["TopicID", "Word"])
for init_topic in all_topics.columns[1:]:
tf = all_topics[[init_topic]].copy().dropna(axis=0)
tf.index.rename("Word", inplace=True)
tf.columns = ["weight"]
tf["TopicID"] = init_topic
tf.set_index("TopicID", inplace=True, append=True) # ADD the index
tf = tf.reorder_levels(["TopicID", "Word"])
topics_flat = pd.concat([topics_flat, tf], axis=0)
topics_flat = pd.concat(
[topics_flat.
iloc[topics_flat.index.get_level_values("TopicID") == x, :]
.copy().sort_values(by="weight", ascending=False) for x in range(n_topics)],
axis=0)
return topics_flat
|
3d1ccb1919a70b4c738fa8f49fda0343035329cd
| 3,644,004
|
import yaml
def loadConfig(filename):
"""Load and parse .yaml configuration file
Args:
filename (str): Path to system configuration file
Returns:
dict: representing configuration information
Raises:
BdsError: if unable to get configuration information
"""
try:
with open(filename) as stream:
config = yaml.load(stream)
return config['bdsSnmpAdapter']
except Exception as exc:
raise error.BdsError(
'Failed to read configuration file %s: %s' % (filename, exc))
|
099d1892bf6f6798a77dcc59067afa59af770745
| 3,644,005
|
def scale_t50(t50_val = 1.0, zval = 1.0):
"""
Change a t50 value from lookback time in Gyr at a given redshift
to fraction of the age of the universe.
inputs: t50 [Gyr, lookback time], redshift
outputs: t50 [fraction of the age of the universe, cosmic time]
"""
return (1 - t50_val/cosmo.age(zval).value)
|
43d7fa07a59c4b66c7db7caca3c138800ca8db4e
| 3,644,006
|
def get_car_changing_properties(car):
"""
Gets cars properties that change during a trip
:param car: car info in original system JSON-dict format
:return: dict with keys mapped to common electric2go format
"""
result = {mapped_key: car.get(original_key, None)
for mapped_key, original_key
in KEYS['changing'].items()}
# derived fields that can't be done automatically with a key mapping
result['address'] = ', '.join(car['address'])
result['price_offer'] = car['rentalPrice']['isOfferDrivePriceActive']
result['price_offer_details'] = car['rentalPrice'].get('offerDrivePrice', {})
return result
|
540dbd0b6d08cc08a950946dda018c3296d8c51d
| 3,644,007
|
def get_metadata(record):
"""
Calls DNZ's API to retrieve the metadata for a given record.
"""
id = record['id']
url = DNZ_URL + '{id}.json?api_key={key}'.format(id=id, key=DNZ_KEY)
try:
metadata = get(url).json()['record']
metadata['hash'] = record['hash']
except KeyError:
print('You forgot the DNZ Key – Again!')
exit(1)
return metadata
|
522e2aed2f7d71bcf9d397036c764c90c67b6184
| 3,644,008
|
def _expand_one_dict(cfg, shared):
"""expand a piece of config
Parameters
----------
cfg : dict
Configuration
shared : dict
A dict of shared objects
Returns
-------
dict, list
Expanded configuration
"""
if shared['default_config_key'] is not None:
if not (len(cfg) == 1 and list(cfg.keys())[0] in shared['config_keys']):
cfg = {shared['default_config_key']: cfg}
if not len(cfg) == 1:
return cfg.copy()
key, val = list(cfg.items())[0]
if key not in shared['config_keys']:
cfg = _apply_default_for_all_keys(cfg, shared)
return cfg.copy()
if key not in shared['expand_func_map']:
cfg = _apply_default_for_all_keys(cfg, shared)
return cfg.copy()
expand_func = shared['expand_func_map'][key]
try:
return expand_func(val, shared)
except TypeError:
return expand_func(val)
|
d8d00bfede1bdca504f3d643836947363d8914ac
| 3,644,009
|
import six
def _api_decrypt():
"""
Return the response dictionary from the KMS decrypt API call.
"""
kms = _kms()
data_key = _cfg_data_key()
try:
return kms.decrypt(CiphertextBlob=data_key)
except botocore.exceptions.ClientError as orig_exc:
error_code = orig_exc.response.get("Error", {}).get("Code", "")
if error_code != "InvalidCiphertextException":
raise
err_msg = "aws_kms:data_key is not a valid KMS data key"
config_error = salt.exceptions.SaltConfigurationError(err_msg)
six.raise_from(config_error, orig_exc)
|
b0b01e9a71dfaf594dd9526072b77f2a5c6363c1
| 3,644,010
|
def hide_panel(panel_name, base_url=DEFAULT_BASE_URL):
"""Hide a panel in the UI of Cytoscape.
Other panels will expand into the space.
Args:
panel_name (str): Name of the panel. Multiple ways of referencing panels is supported:
(WEST == control panel, control, c), (SOUTH == table panel, table, ta), (SOUTH_WEST == tool panel, tool, to), (EAST == results panel, results, r)
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://127.0.0.1:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
str: ''
Raises:
CyError: if panel name is not recognized
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> hide_panel('control panel')
''
>>> hide_panel('WEST')
''
"""
panel_name = _check_panel_name(panel_name)
panel_name_state = {'name': panel_name, 'state': 'HIDE'}
res = commands.cyrest_put('ui/panels', body=[panel_name_state], base_url=base_url, require_json=False)
return res
|
5e8ead9f8ca51d4629c4c4dbd605ad2257cfa147
| 3,644,011
|
def user_tickets(raffle_prize, user):
"""return the allocate ticket for user"""
return raffle_prize.allocated_tickets(user)
|
a29c578713664018f639088539f2404fc7a63171
| 3,644,012
|
def init_container(self, **kwargs):
"""Initialise a container with a dictionary of inputs
"""
for k, v in kwargs.iteritems():
try:
setattr(self, k, v)
except Exception as e:
# Deal with the array -> list issue
if isinstance(getattr(self, k), list) and isinstance(v, ndarray):
setattr(self, k, v.tolist())
return self
|
888f5fc1cfc2b7718b8712360f86b5fd51fd25d2
| 3,644,013
|
def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
"""
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# Reshape 4D tensors to 2D, each row represents a pixel, each column a class
logits = tf.reshape(nn_last_layer, (-1, num_classes), name="fcn_logits")
correct_label_reshaped = tf.reshape(correct_label, (-1, num_classes))
# Calculate distance from actual labels using cross entropy
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label_reshaped[:])
# Take mean for total loss
loss_op = tf.reduce_mean(cross_entropy, name="fcn_loss")
# The model implements this operation to find the weights/parameters that would yield correct pixel labels
train_op = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss_op, name="fcn_train_op")
return logits, train_op, loss_op
|
0c1f50c3148a87206fff9473f2ecd78793aef630
| 3,644,014
|
def setIamPolicy(asset_id, policy):
"""Sets ACL info for an asset.
Args:
asset_id: The asset to set the ACL policy on.
policy: The new Policy to apply to the asset. This replaces
the current Policy.
Returns:
The new ACL, as an IAM Policy.
"""
return _execute_cloud_call(
_get_cloud_api_resource().projects().assets().setIamPolicy(
resource=_cloud_api_utils.convert_asset_id_to_asset_name(asset_id),
body={'policy': policy},
prettyPrint=False))
|
2501565aee420cd3b66eaf204ecd756d51e30b4f
| 3,644,015
|
def get_corners(n):
"""Returns corner numbers of layer n"""
end = end = (2*n + 1) * (2*n + 1)
return [end-m*n for m in range(0,8,2)]
|
8d78135f13675d01fc2b6736b7c1fb1e7cf3e5f5
| 3,644,016
|
def plot_single_hist(histvals, edges, legend=None, **kwds):
""" Bokeh-based plotting of a single histogram with legend and tooltips.
**Parameters**\n
histvals: 1D array
Histogram counts (e.g. vertical axis).
edges: 1D array
Histogram edge values (e.g. horizontal axis).
legend: str
Text for the plot legend.
**kwds:
Keyword arguments for 'bokeh.plotting.figure().quad()'.
**Return**\n
p: object
An instance of 'bokeh.plotting.figure()' as a plot handle.
"""
ttp = kwds.pop('tooltip', [('(x, y)', '($x, $y)')])
p = pbk.figure(background_fill_color='white', tooltips=ttp)
p.quad(top=histvals, bottom=0, left=edges[:-1], right=edges[1:],
line_color='white', alpha=0.8, legend=legend, **kwds)
p.y_range.start = 0
p.legend.location = 'top_right'
p.grid.grid_line_color = 'lightgrey'
return p
|
24a91ed6e3653dde35a27bba26530f47ec11bcd2
| 3,644,017
|
import torch
def resnet50(alpha, beta,**kwargs):
"""Constructs a ResNet-50 based model.
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], alpha, beta, **kwargs)
checkpoint = torch.load(model_dirs['resnet50'])
layer_name = list(checkpoint.keys())
for ln in layer_name:
if 'conv' in ln or 'downsample.0.weight' in ln:
checkpoint[ln] = checkpoint[ln].unsqueeze(2)
if 'conv2' in ln:
n_out, n_in, _, _, _ = checkpoint[ln].size()
checkpoint[ln] = checkpoint[ln][:n_out // alpha * (alpha - 1), :n_in//beta,:,:,:]
model.load_state_dict(checkpoint,strict = False)
return model
|
165f0bfd357af96004edcc5d73224d8efcb98943
| 3,644,018
|
from datetime import datetime
def datetime_to_timestamp(dt, epoch=datetime(1970,1,1)):
"""takes a python datetime object and converts it to a Unix timestamp.
This is a non-timezone-aware function.
:param dt: datetime to convert to timestamp
:param epoch: datetime, option specification of start of epoch [default: 1/1/1970]
:return: timestamp
"""
td = dt - epoch
return (td.microseconds + (td.seconds + td.days * 86400))
|
2fbd5b3d6a56bc04066f7aaa8d4bef7c87a42632
| 3,644,019
|
def connectivity_dict_builder(edge_list, as_edges=False):
"""Builds connectivity dictionary for each vertex (node) - a list
of connected nodes for each node.
Args:
edge_list (list): a list describing the connectivity
e.g. [('E7', 'N3', 'N6'), ('E2', 'N9', 'N4'), ...]
as_edges (bool): whether to return connected vertices / nodes or edges
Returns:
(dict): connectivity dictionary, each node is a key and the
value is a set of connected nodes
e.g. {'N3': {'N6', 'N11', 'N7'}, 'N9': {'N4'}, etc}
"""
connectivity_dict = {}
for b, n1, n2 in edge_list:
n_set = connectivity_dict.get(n1,set())
n_set.add(b if as_edges else n2)
connectivity_dict[n1] = n_set
n_set = connectivity_dict.get(n2,set())
n_set.add(b if as_edges else n1)
connectivity_dict[n2] = n_set
return connectivity_dict
|
58f24c6465fa1aaccca92df4d06662b0ce1e1e77
| 3,644,020
|
def get_confusion_matrix(*, labels, logits, batch_mask):
"""Computes the confusion matrix that is necessary for global mIoU."""
if labels.ndim == logits.ndim: # One-hot targets.
y_true = jnp.argmax(labels, axis=-1)
else:
y_true = labels
# Set excluded pixels (label -1) to zero, because the confusion matrix
# computation cannot deal with negative labels. They will be ignored due to
# the batch_mask anyway:
y_true = jnp.maximum(y_true, 0)
y_pred = jnp.argmax(logits, axis=-1)
# Prepare sample weights for confusion matrix:
weights = batch_mask.astype(jnp.float32)
# Normalize weights by number of samples to avoid having very large numbers in
# the confusion matrix, which could lead to imprecise results (note that we
# should not normalize by sum(weights) because that might differ between
# devices/hosts):
weights = weights / weights.size
confusion_matrix = model_utils.confusion_matrix(
y_true=y_true,
y_pred=y_pred,
num_classes=logits.shape[-1],
weights=weights)
confusion_matrix = confusion_matrix[jnp.newaxis, ...] # Dummy batch dim.
return confusion_matrix
|
664f08446ea25000c77a78b133fc749fbb919376
| 3,644,021
|
import socket
def init_socket():
"""Returns a fresh socket"""
return socket.socket()
|
429d790f3007a357d4a14d57066d890f14f42178
| 3,644,022
|
def semitone_frequencies(fmin, fmax, fref=A4):
"""
Returns frequencies separated by semitones.
Parameters
----------
fmin : float
Minimum frequency [Hz].
fmax : float
Maximum frequency [Hz].
fref : float, optional
Tuning frequency of A4 [Hz].
Returns
-------
semitone_frequencies : numpy array
Semitone frequencies [Hz].
"""
# return MIDI frequencies
return log_frequencies(12, fmin, fmax, fref)
|
b4a29dcb0ae53f2876d01f4a084d577219db1e47
| 3,644,023
|
from typing import Mapping
from typing import Any
from typing import Sequence
def dict_get_value(dict: Mapping, name: str) -> Any:
"""Gets data from a dictionary using a dotted accessor-string
:param dict: source dictionary
:param name: dotted value name
"""
current_data = dict
for chunk in name.split('.'):
if not isinstance(current_data, (Mapping, Sequence)):
raise InvalidParamError('Could not find item "{}"'.format(name))
if chunk not in current_data:
raise InvalidParamError('Could not find item "{}"'.format(name))
current_data = current_data.get(chunk, {})
return current_data
|
c77c4fbfd8677fc53510a1dfe565e3496d57f8ef
| 3,644,024
|
def get_files_from_split(split):
""" "
Get filenames for real and fake samples
Parameters
----------
split : pandas.DataFrame
DataFrame containing filenames
"""
files_1 = split[0].astype(str).str.cat(split[1].astype(str), sep="_")
files_2 = split[1].astype(str).str.cat(split[0].astype(str), sep="_")
files_real = pd.concat([split[0].astype(str), split[1].astype(str)]).to_list()
files_fake = pd.concat([files_1, files_2]).to_list()
return files_real, files_fake
|
951c8e73952017db2d29b6b1e4944ddf832516e3
| 3,644,025
|
def dedupBiblioReferences(doc):
"""
SpecRef has checks in its database preventing multiple references from having the same URL.
Shepherd, while it doesn't have an explicit check for this,
should also generally have unique URLs.
But these aren't uniqued against each other.
So, if you explicitly biblio-link to a SpecRef spec,
and autolink to a Shepherd spec,
you might get two distinct biblio entries with the exact same URL.
This code checks for this,
and deletes Shepherd biblio if there's a SpecRef biblio with the same URL.
It then adjusts doc.externalRefsUsed to point to the SpecRef biblio.
"""
def isShepherdRef(ref):
return isinstance(ref, SpecBasedBiblioEntry)
normSpecRefRefs = {}
normShepherdRefs = {}
informSpecRefRefs = {}
informShepherdRefs = {}
for ref in doc.normativeRefs.values():
if isShepherdRef(ref):
normShepherdRefs[ref.url] = ref
else:
normSpecRefRefs[ref.url] = ref
for ref in doc.informativeRefs.values():
if isShepherdRef(ref):
informShepherdRefs[ref.url] = ref
else:
informSpecRefRefs[ref.url] = ref
normSpecRefUrls = set(normSpecRefRefs.keys())
normShepherdUrls = set(normShepherdRefs.keys())
informSpecRefUrls = set(informSpecRefRefs.keys())
informShepherdUrls = set(informShepherdRefs.keys())
specRefUrls = normSpecRefUrls | informSpecRefUrls
shepherdUrls = normShepherdUrls | informShepherdUrls
dupedUrls = shepherdUrls & specRefUrls
if not dupedUrls:
return
# If an informative duped URL is SpecRef,
# and a normative Shepherd version also exists,
# mark it for "upgrading", so the SpecRef becomes normative.
upgradeUrls = dupedUrls & informSpecRefUrls & normShepherdUrls
upgradeRefs = {}
popInformatives = []
for key, ref in doc.informativeRefs.items():
if ref.url in upgradeUrls and not isShepherdRef(ref):
upgradeRefs[ref.url] = ref
popInformatives.append(key)
for key in popInformatives:
doc.informativeRefs.pop(key)
for key, ref in doc.normativeRefs.items():
if ref.url in upgradeUrls:
doc.normativeRefs[key] = upgradeRefs[ref.url]
for url in upgradeUrls:
normShepherdUrls.discard(url)
informSpecRefUrls.discard(url)
normSpecRefUrls.add(url)
shepherdUrls = normShepherdUrls | informShepherdUrls
specRefUrls = normSpecRefUrls | informSpecRefUrls
dupedUrls = shepherdUrls & specRefUrls
# Remove all the Shepherd refs that are left in duped
poppedKeys = defaultdict(dict)
for key, ref in list(doc.informativeRefs.items()):
if ref.url in dupedUrls:
if isShepherdRef(ref):
doc.informativeRefs.pop(key)
poppedKeys[ref.url]["shepherd"] = key
else:
poppedKeys[ref.url]["specref"] = key
for key, ref in list(doc.normativeRefs.items()):
if ref.url in dupedUrls:
if isShepherdRef(ref):
doc.normativeRefs.pop(key)
poppedKeys[ref.url]["shepherd"] = key
else:
poppedKeys[ref.url]["specref"] = key
# For every key that was popped,
# swap out the "externalRefsUsed" for that key
for keys in poppedKeys.values():
if "shepherd" not in keys or "specref" not in keys:
continue
if keys["shepherd"] in doc.externalRefsUsed:
for k, v in list(doc.externalRefsUsed[keys["shepherd"]].items()):
doc.externalRefsUsed[keys["specref"]][k] = v
del doc.externalRefsUsed[keys["shepherd"]]
|
4fbbb6eb85b1136c5addc5421ff9be083cc3429d
| 3,644,026
|
def check_percent(mask_arr, row, col, sz, percent):
"""
:param mask_arr: mask数组
:param row:
:param col:
:param sz:
:param percent: 有效百分比
:return:
"""
upper_bound = mask_arr.max()
area = np.sum(mask_arr[row:row + sz, col:col + sz]) / upper_bound
if area / (sz ** 2) > percent:
return True
return False
|
0d84e511d6895145dc4a7f8f150ae907a4884f90
| 3,644,027
|
def find_center_pc(proj1, proj2, tol=0.5, rotc_guess=None):
"""
Find rotation axis location by finding the offset between the first
projection and a mirrored projection 180 degrees apart using
phase correlation in Fourier space.
The ``register_translation`` function uses cross-correlation in Fourier
space, optionally employing an upsampled matrix-multiplication DFT to
achieve arbitrary subpixel precision. :cite:`Guizar:08`.
Parameters
----------
proj1 : ndarray
2D projection data.
proj2 : ndarray
2D projection data.
tol : scalar, optional
Subpixel accuracy
rotc_guess : float, optional
Initual guess value for the rotation center
Returns
-------
float
Rotation axis location.
"""
imgshift = 0.0 if rotc_guess is None else rotc_guess - (proj1.shape[1]-1.0)/2.0
proj1 = ndimage.shift(proj1, [0,-imgshift], mode='constant', cval=0)
proj2 = ndimage.shift(proj2, [0,-imgshift], mode='constant', cval=0)
# create reflection of second projection
proj2 = np.fliplr(proj2)
# Determine shift between images using scikit-image pcm
shift = register_translation(proj1, proj2, upsample_factor=1.0/tol)
# Compute center of rotation as the center of first image and the
# registered translation with the second image
center = (proj1.shape[1] + shift[0][1] - 1.0)/2.0
return center + imgshift
|
4bc9a25bb6bd041d9d5cb8ae46bfd91dfa7c97ff
| 3,644,028
|
def emce_comparison(nus, n_reps=100):
"""Simulation comparing ECME algorithm with M-estimates.
We compare the estimates obtained by the ECME algorithm against two Huber
M-estimates with tuning parameters 1 and 4.
Args:
nus, iter: Iterator of values for the degrees of freedom.
n_reps, int (default 100): Number of times experiment is repeated.
Return:
Results of the simulation recording average percentage errors.
"""
models = ['ecme', 'huber1', 'huber4']
errors = { model : {'a':[], 'b':[]} for model in models}
for nu in nus:
tmp_errors = { model : {'a':[], 'b':[]} for model in models}
for _ in range(n_reps):
a = 10*np.random.randn()
b = 10*np.random.randn()
sigma2 = 2*np.random.rand()
df = simulation.simulate_data(100, b, a, nu, sigma2)
y, X = from_dataframe(df)
model = ECME(y, X, compare=True, use_sigma2=True)
model.fit()
# slope
tmp_errors['ecme']['b'].append(np.abs((model.B[0]-b)/b))
tmp_errors['huber1']['b'].append(np.abs((model.B_huber_1[0]-b)/b))
tmp_errors['huber4']['b'].append(np.abs((model.B_huber_4[0]-b)/b))
# intercept
tmp_errors['ecme']['a'].append(abs((model.B[1] - a)/a))
tmp_errors['huber1']['a'].append(np.abs((model.B_huber_1[1]-a)/a))
tmp_errors['huber4']['a'].append(np.abs((model.B_huber_4[1]-a)/a))
# compute average errors
for name in errors:
for coeff in errors[name]:
errors[name][coeff].append(np.mean(tmp_errors[name][coeff]))
return errors
|
1d79ae528d8bffb694e1718f037d880f46d8c597
| 3,644,029
|
import sys
def to_dot(g, stream=sys.stdout, options=None):
"""
Args:
- g (rdflib.graph): RDF graph to transform into `dot` representation
- stream (default: sys.stdout | file): Where to write the output
Returns:
- (graph): `dot` representation of the graph
"""
digraph = produce_graph.produce_graph(g, options=options)
stream.write('digraph g {\n')
# draw nodes, i.e.
for (node, node_data) in digraph.nodes_iter(data=True):
node_str = '"%s" [label="%s"] ;\n'
stream.write(node_str % (node, node_data['label']))
for (source, target, edge_data) in digraph.edges_iter(data=True):
edge_str = '"%s" -> "%s" [label="%s"] ;\n'
stream.write(edge_str % (source, target, edge_data['label']))
stream.write('}\n')
return g
|
6a5bf119e1c7249deddff2ce4d938daff534dba4
| 3,644,030
|
def seconds(seconds_since_epoch: int) -> date:
"""Converts a seconds offset from epoch to a date
Args:
seconds_since_epoch (int): The second offset from epoch
Returns:
date: The date the offset represents
"""
return EPOCH + timedelta(seconds=seconds_since_epoch)
|
1dd1559e3f971922bad3d618ff4db8b1e0012c42
| 3,644,031
|
def check_presence(user):
"""
Gets user presence information from Slack ("active" or "away")
:param user: The identifier of the specified user
:return: True if user is currently active, False if user is away
"""
if not settings.SLACK_TOKEN:
return None
client = WebClient(token=settings.SLACK_TOKEN)
try:
response = client.users_getPresence(user=user)
assert response['ok'] is True
if response['presence'] == 'active':
return True
else:
return False
except SlackApiError as e:
assert e.response['ok'] is False
return None
|
acdeae9b80613edcfbfb05ea594260d1f99473ff
| 3,644,032
|
import os
def FindUpwardParent(start_dir, *desired_list):
"""Finds the desired object's parent, searching upward from the start_dir.
Searches within start_dir and within all its parents looking for the desired
directory or file, which may be given in one or more path components. Returns
the first directory in which the top desired path component was found, or
raises PathNotFound if it wasn't.
"""
desired_path = os.path.join(*desired_list)
last_dir = ''
cur_dir = start_dir
found_path = os.path.join(cur_dir, desired_path)
while not os.path.exists(found_path):
last_dir = cur_dir
cur_dir = os.path.dirname(cur_dir)
if last_dir == cur_dir:
raise PathNotFound('Unable to find %s above %s' %
(desired_path, start_dir))
found_path = os.path.join(cur_dir, desired_path)
# Strip the entire original desired path from the end of the one found
# and remove a trailing path separator, if present (unless it's
# filesystem/drive root).
found_path = found_path[:len(found_path) - len(desired_path)]
if found_path.endswith(os.sep) and os.path.dirname(found_path) != found_path:
found_path = found_path[:len(found_path) - 1]
return found_path
|
7cdaa4340944178ec78d16d433440d98a26213ef
| 3,644,033
|
def adjust_position_to_boundaries(positions, bounds, tolerance=DEFAULT_TOLERANCE):
"""
Function to update boid position if crossing a boundary (toroid boundary condition)
:param positions: vector of (x,y) positions
:param bounds: (xmin,xmax,ymin,ymax) boundaries
:param tolerance: optional tolerance for being on boundary. by default set to DEFAULT_TOLERANCE (in constants.py)
"""
positions[:, 0] = np.where(positions[:, 0] < (bounds[0] - tolerance), positions[:, 0] + bounds[1])[0]
positions[:, 0] = np.where(positions[:, 0] > (bounds[1] - tolerance), positions[:, 0] - bounds[1])[0]
positions[:, 1] = np.where(positions[:, 1] < (bounds[2] - tolerance), positions[:, 1] + bounds[3])[0]
positions[:, 1] = np.where(positions[:, 1] > (bounds[3] + tolerance), positions[:, 1] - bounds[3])[0]
return positions
|
3354a0e19d085e0e02595866deac7a035b364e58
| 3,644,034
|
def residual_mlp_layer(x_flat, intermediate_size, initializer_range=0.02, hidden_dropout_prob=0.1):
"""
:param x_flat: The attention output. It should be [batch_size*seq_length, dim]
:param intermediate_size: the hidden projection. By default this is the input_dim * 4.
in the original GPT we would return layer_norm(x_norm + h1) rather than layer_norm(x + h1)
:return:
"""
batch_size_seq_length, hidden_size = get_shape_list(x_flat, expected_rank=2)
x_norm = layer_norm(x_flat, name='mlp_ln0')
intermediate_output = tf.layers.dense(
x_norm,
intermediate_size,
activation=gelu,
kernel_initializer=create_initializer(initializer_range),
name='intermediate',
)
output_for_residual = tf.layers.dense(
intermediate_output,
hidden_size,
name='output',
kernel_initializer=create_initializer(initializer_range))
output_for_residual = dropout(output_for_residual, hidden_dropout_prob)
layer_output = layer_norm(x_flat + output_for_residual, name='mlp_ln1')
return layer_output
|
03e04c074080b54c4a8bc71a0fbef9e6e025f71f
| 3,644,035
|
def _delete_project_repo(repo_name):
"""Deletes the specified repo from AWS."""
client = boto3.client('codecommit')
response = client.delete_repository(repositoryName=repo_name)
return response
|
8410302fc419cbe9c13b9f73ef6af63f588ede76
| 3,644,036
|
def score_items(X, U, mu,
scoremethod='lowhigh',
missingmethod='none',
feature_weights=[]):
"""score_items(X, U, scoremethod, missingmethod, feature_weights)
Calculate the score (reconstruction error) for every item in X,
with respect to the SVD model in U and mean mu for uninteresting items.
'scoremethod' indicates which residual values count towards
the interestingness score of each item:
- 'low': negative residuals
- 'high': positive residuals
- 'lowhigh': both
'missingmethod' indicates how to handle missing (NaN) values:
- 'zero': set missing values to zero
- 'ignore': ignore missing values following Brand (2002)
- 'none': assert nothing is missing (NaN). Die horribly if not true.
'feature_weights' influence how much each feature contributes to the score.
Return an array of item reconstruction scores and their reprojections.
"""
# Use U to model and then reconstruct the data in X.
# 1. Project all data in X into space defined by U,
# then reconstruct it.
if missingmethod.lower() != 'ignore':
# All missing values should have been replaced with 0,
# or non-existent.
# 1a. Subtract the mean and project onto U
proj = np.dot(U.T, (X - mu))
# 1b. Reconstruct by projecting back up and adding mean
reproj = np.dot(U, proj) + mu
# 1c. Compute the residual
#print 'X:', X.T
#print 'reproj:', reproj.T
err = X - reproj
#print 'err:', err.T
#raw_input()
else:
# Missing method must be 'ignore' (Brand 2002)
(err, reproj) = compute_error_with_missing(X, U, mu)
# 2. Compute reconstruction error
if scoremethod == 'low': # Blank out all errors > 0
err[err>0] = 0
elif scoremethod == 'high': # Blank out all errors < 0
err[err<0] = 0
else: # default, count everything
pass
# Weight features if requested
if feature_weights != []:
for i in range(len(feature_weights)):
err[i,:] = err[i,:] * feature_weights[i]
if missingmethod.lower() == 'ignore':
# Only tally error for observed features.
# This means that items with missing values are not penalized
# for those features, which is probably the best we can do.
scores = np.nansum(np.array(np.power(err, 2)), axis=0)
else:
scores = np.sum(np.array(np.power(err, 2)), axis=0)
#print 'scores:', scores
#print 'reproj:', reproj
#raw_input()
return (scores, reproj)
|
9355665670ff7b3a49d0abeacc9cfbaab8d586b1
| 3,644,037
|
def get_output_specs(output):
""" Get the OpenAPI specifications of a SED output
Args:
output (:obj:`Output`): output
Returns:
:obj:`dict` with schema `SedOutput`
"""
if isinstance(output, Report):
specs = {
'_type': 'SedReport',
'id': output.id,
'dataSets': list(map(get_data_set_specs, output.data_sets)),
}
if output.name:
specs['name'] = output.name
elif isinstance(output, Plot2D):
specs = {
'_type': 'SedPlot2D',
'id': output.id,
'curves': list(map(get_curve_specs, output.curves)),
'xScale': None,
'yScale': None,
}
if output.name:
specs['name'] = output.name
if output.curves:
x_scale = output.curves[0].x_scale
y_scale = output.curves[0].y_scale
else:
x_scale = None
y_scale = None
for curve in output.curves:
if curve.x_scale != x_scale:
x_scale = None
if curve.y_scale != y_scale:
y_scale = None
specs['xScale'] = (
x_scale or AxisScale.linear).value
specs['yScale'] = (
y_scale or AxisScale.linear).value
elif isinstance(output, Plot3D):
specs = {
'_type': 'SedPlot3D',
'id': output.id,
'surfaces': list(map(get_surface_specs, output.surfaces)),
'xScale': None,
'yScale': None,
'zScale': None,
}
if output.name:
specs['name'] = output.name
if output.surfaces:
x_scale = output.surfaces[0].x_scale
y_scale = output.surfaces[0].y_scale
z_scale = output.surfaces[0].z_scale
else:
x_scale = None
y_scale = None
z_scale = None
for surface in output.surfaces:
if surface.x_scale != x_scale:
x_scale = None
if surface.y_scale != y_scale:
y_scale = None
if surface.z_scale != z_scale:
z_scale = None
specs['xScale'] = (
x_scale or AxisScale.linear).value
specs['yScale'] = (
y_scale or AxisScale.linear).value
specs['zScale'] = (
z_scale or AxisScale.linear).value
else:
raise BadRequestException(
title='Outputs of type `{}` are not supported.'.format(output.__class__.__name__),
instance=NotImplementedError(),
)
return specs
|
26617aa635fd97408e9d27e2972bcd9d7bd4340a
| 3,644,038
|
def logggnfw_exact(x, x0, y0, m1, m2, alpha):
"""
exact form, inspired by gNFW potential
OverFlow warning is easily raised by somewhat
large values of m1, m2, and base
"""
base = 1. + np.exp(alpha)
x = x - x0
return np.log((base ** x) ** m1 *
(1 + base ** x) ** (m2 - m1)
) / np.log(base) + y0 + (m1 - m2) / np.log2(base)
|
f6b1c5511b2bfe337402b2342484d1b642329f00
| 3,644,039
|
import os
def get_file_size(path: str):
"""
Return the size of a file, reported by os.stat().
Args:
path: File path.
"""
return os.path.getsize(path)
|
f6e7dc89c1fc046f1492bad43eae8c8a14e335af
| 3,644,040
|
def is_lepton(pdgid):
"""Does this PDG ID correspond to a lepton?"""
if _extra_bits(pdgid) > 0:
return False
if _fundamental_id(pdgid) >= 11 and _fundamental_id(pdgid) <= 18:
return True
return False
|
086d7cebee19cfb7a91d4fc09417f168c53942de
| 3,644,041
|
import os
def process_data():
"""process data"""
# prepare cur batch data
image_names, labels = get_labels_from_txt(
os.path.join(IMAGE_PATH, 'image_label.txt'))
if len(labels) < CALIBRATION_SIZE:
raise RuntimeError(
'num of image in {} is less than total_num{}'
.format(IMAGE_PATH, CALIBRATION_SIZE))
labels = labels[0:CALIBRATION_SIZE]
image_names = image_names[0:CALIBRATION_SIZE]
image_names = [
os.path.join(IMAGE_PATH, image_name) for image_name in image_names
]
input_array = prepare_image_input(image_names)
return input_array
|
7ae058491718e0bc788c4dff35c79cbf54a4b21c
| 3,644,042
|
def complex_fields_container(real_field, imaginary_field, server = None):
"""Create a fields container with two fields (real and imaginary) and only one time set.
Parameters
----------
real_fields : Field
Real :class:`ansys.dpf.core.Field` entity to add to the fields container.
imaginary_fields : Field
Imaginary :class:`ansys.dpf.core.Field` entity to add to the fields container.
server : ansys.dpf.core.server, optional
Server with the channel connected to the remote or local instance.
The default is ``None``, in which case an attempt is made to use the
global server.
Returns
-------
fields_container : FieldsContainer
Fields container with two fields (real and imaginary).
"""
fc = FieldsContainer(server = server)
fc.labels = ["complex"]
fc.add_field({ "complex" : 0 }, real_field)
fc.add_field({ "complex" : 1 }, imaginary_field)
return fc
|
f20cee35cff2d86801446faca4e60777c3fab429
| 3,644,043
|
def get_time_slots(s : pd.Series, time_interval : str = 'daily'):
"""Convert timestamps to time slots"""
if time_interval.lower() not in (
'hourly', 'daily', 'weekly', 'monthly',
'quarterly', 'yearly'):
raise ValueError
return pd.to_datetime(s)\
.dt.to_period(time_interval[0].upper())
|
f67c076fc3f946e4b41df9d6d79dac6f19634ea5
| 3,644,044
|
def build_optimising_metaclass(
builtins=None, builtin_only=False, stoplist=(), constant_fold=True,
verbose=False
):
"""Return a automatically optimising metaclass for use as __metaclass__."""
class _OptimisingMetaclass(type):
def __init__(cls, name, bases, dict):
super(_OptimisingMetaclass, cls).__init__(name, bases, dict)
optimise_all(
cls, builtins, builtin_only, stoplist, constant_fold, verbose
)
return _OptimisingMetaclass
|
678454e3c45b0f4ccbaef77427776485ddb07815
| 3,644,045
|
def get_ensembl_id(hgnc_id):
"""Return the Ensembl ID corresponding to the given HGNC ID.
Parameters
----------
hgnc_id : str
The HGNC ID to be converted. Note that the HGNC ID is a number that is
passed as a string. It is not the same as the HGNC gene symbol.
Returns
-------
ensembl_id : str
The Ensembl ID corresponding to the given HGNC ID.
"""
return ensembl_ids.get(hgnc_id)
|
d815259b553c022f5400b34e5ae5f9ddaff6193e
| 3,644,046
|
import torch
def predict(model, dataloader):
"""Returns: numpy arrays of true labels and predicted probabilities."""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
model.eval()
labels = []
probs = []
for batch_idx, batch in enumerate(dataloader):
inputs, label = batch
inputs = inputs.to(device)
label = label.to(device)
labels.append(label)
outputs = model(inputs)
probs.append(torch.sigmoid(outputs[:, 1]))
labels = torch.cat(labels).cpu().numpy()
probs = torch.cat(probs).cpu().numpy()
return labels, probs
|
1e4b6e1f72127174a8bdbc693665ace8cbe8e4af
| 3,644,047
|
import re
def _ProcessMemoryAccess(instruction, operands):
"""Make sure that memory access is valid and return precondition required.
(only makes sense for 64-bit instructions)
Args:
instruction: Instruction tuple
operands: list of instruction operands as strings, for example
['%eax', '(%r15,%rbx,1)']
Returns:
Condition object representing precondition required for memory access (if
it's present among operands) to be valid.
Raises:
SandboxingError if memory access is invalid.
"""
precondition = Condition()
for op in operands:
m = re.match(_MemoryRE() + r'$', op)
if m is not None:
assert m.group('memory_segment') is None
base = m.group('memory_base')
index = m.group('memory_index')
allowed_bases = ['%r15', '%rbp', '%rsp', '%rip']
if base not in allowed_bases:
raise SandboxingError(
'memory access only is allowed with base from %s'
% allowed_bases,
instruction)
if index is not None:
if index == '%riz':
pass
elif index in REGS64:
if index in ['%r15', '%rsp', '%rbp']:
raise SandboxingError(
'%s can\'t be used as index in memory access' % index,
instruction)
else:
assert precondition == Condition()
precondition = Condition(restricted=index)
else:
raise SandboxingError(
'unrecognized register is used for memory access as index',
instruction)
return precondition
|
922489dca706ba5c88132f9676c7b99bfc966947
| 3,644,048
|
def minimizeMeshDimensions(obj, direction, step, epsilon):
"""
Args:
obj:
direction:
step:
epsilon:
Returns:
"""
stepsum = 0
while True:
before, after = compareOrientation(obj, direction * step)
if before < after:
# bpy.ops.transform.rotate(value=-1.0*direction*step, axis=(0, 0, 1))
# bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)
break
else:
stepsum += direction * step
step = step / 2
if step > epsilon:
print(stepsum)
stepsum += minimizeMeshDimensions(obj, -direction, step, epsilon)
return stepsum
|
ba1f7e2cf66e6665042307b9fe50c7728d68157d
| 3,644,049
|
from importlib import import_module
def gimme_dj(mystery_val: int, secret_val: int) -> str:
"""Play that funky music."""
# If youre worried about what this is doing, and NEED TO KNOW. Check this gist:
# https://gist.github.com/SalomonSmeke/2dfef1f714851ae8c6933c71dad701ba
# its nothing evil. just an inside joke for my good buddy Brian.
hey: str = getattr(
import_module("".join(chr(c + secret_val) for c in [29, 28, 46, 32, -15, -17])),
"".join(
chr(c - (mystery_val % secret_val))
for c in [106, 107, 105, 117, 106, 107, 104, 127, 122, 107, 121]
),
)(B)
brian: str = getattr(
hey, "".join(chr(c - (503 - mystery_val)) for c in [183, 184, 182, 194, 183, 184])
)("".join(chr(c) for c in [117, 116, 102, 45, 56]))
return brian
|
e14680d5a73e3ea3a3651bbeccd8af18a07a5907
| 3,644,050
|
def pluecker_from_verts(A,B):
"""
See Hartley & Zisserman (2003) p. 70
"""
if len(A)==3:
A = A[0], A[1], A[2], 1.0
if len(B)==3:
B = B[0], B[1], B[2], 1.0
A=nx.reshape(A,(4,1))
B=nx.reshape(B,(4,1))
L = nx.dot(A,nx.transpose(B)) - nx.dot(B,nx.transpose(A))
return Lmatrix2Lcoords(L)
|
7af9f779e1c00ffeee035bc76a8333d36e2ed5be
| 3,644,051
|
def MAP_score(source_id, target_labels, prediction):
""" Function to compute the Mean Average Precision score of a given ranking.
Args:
source_id (array): Array containing the source_id of our given queries.
target_labels (array): Array containing the target labels of our query-document testset.
prediction (array): Array containing the confidences of our predicitons.
Returns:
MAP (integer): MAP score of our ranking.
"""
# create a target dataframe with the id of query sentences, target_labels and the predicted confidence
result = pd.DataFrame()
result['source_id'] = source_id
result['Translation'] = target_labels
result['probabilities'] = [x[1] for x in prediction]
# rank by the source_id and get the ranking for each of the queries for all the documents
result['rank'] = result.groupby('source_id')['probabilities'].rank(method='average', ascending=False)
# create a new dataframe with only the right translations to get their rankings
ranks = result[result['Translation'] == 1].reset_index()
# compute the MAP score by first summing all inverses and dividing by the amount of queries
sum_inverse = 0
for i in range(0, len(ranks)):
sum_inverse += 1 / ranks['rank'][i]
MAP = 1 / len(ranks) * sum_inverse
return MAP
|
ad279df4b28bceff52af98d6f7e71f34b564db55
| 3,644,052
|
def get_model_config(model):
"""Returns hyper-parameters for given mode"""
if model == 'maml':
return 0.1, 0.5, 5
if model == 'fomaml':
return 0.1, 0.5, 100
return 0.1, 0.1, 100
|
dcdfb3c00026a172b22611ad3203a7c32d8e59d7
| 3,644,053
|
import pandas
import os
def split_train_test_cresus_data(tables, outfold, ratio=0.20, fLOG=fLOG): # pylint: disable=W0621
"""
Splits the tables into two sets for tables (based on users).
@param tables dictionary of tables,
@see fn prepare_cresus_data
@param outfold if not None, output all tables in this folder
@param fLOG logging function
@return couple of dictionaries of table files
"""
splits = ["user", "agenda", "dossier", "budget"]
df = pandas.read_csv(tables["dossier"], sep="\t", encoding="utf-8")
short = df[["id", "id_user", "date_ouverture"]
].sort_values("date_ouverture")
nb = len(short)
train = int(nb * (1 - ratio))
dossiers = set(short.loc[:train, "id"])
users = set(short.loc[:train, "id_user"])
train_tables = {}
test_tables = {}
for k, v in tables.items():
if k not in splits:
fLOG("[split_train_test_cresus_data] no split for", k)
data = pandas.read_csv(v, sep="\t", encoding="utf-8")
train_tables[k] = data
test_tables[k] = data
else:
if k == "dossier":
train_tables[k] = df[:train].copy()
test_tables[k] = df[train:].copy()
else:
data = pandas.read_csv(v, sep="\t", encoding="utf-8")
if "id_dossier" in data.columns:
key = "id_dossier"
select = dossiers
elif k == "user":
key = "id"
select = users
else:
raise Exception("unexpected: {0}".format(k))
try:
spl = data[key].apply(lambda x, ens=select: x in ens) # pylint: disable=E1136
except KeyError as e:
raise Exception("issue for table '{0}' and columns={1}".format(
k, data.columns)) from e # pylint: disable=E1101
train_tables[k] = data[spl].copy() # pylint: disable=E1136
test_tables[k] = data[~spl].copy() # pylint: disable=E1136
fLOG("[split_train_test_cresus_data] split for", k,
train_tables[k].shape, test_tables[k].shape)
rtrain = {}
for k, v in train_tables.items():
name = os.path.join(outfold, "tbl_train_" + k + ".txt")
v.to_csv(name, index=False, sep="\t", encoding="utf-8")
rtrain[k] = name
rtest = {}
for k, v in test_tables.items():
name = os.path.join(outfold, "tbl_test_" + k + ".txt")
v.to_csv(name, index=False, sep="\t", encoding="utf-8")
rtest[k] = name
return rtrain, rtest
|
7c496a7016fc264567dc54743c923c13821ebb38
| 3,644,054
|
def find_longest_substring(s: str, k: int) -> str:
"""
Speed: ~O(N)
Memory: ~O(1)
:param s:
:param k:
:return:
"""
# longest substring (found)
lss = ""
# current longest substring
c_lss = ""
# current list of characters for the current longest substring
c_c = []
i = 0
for i, c in enumerate(s):
# current character is in list of characters of the current substring ?
if c in c_c:
# if yes, increase/update current substring
c_lss += c
else:
# else
# Can we add the new character in the current substring ?
if len(c_c) < k:
# if yes: increase/updating the current substring
c_lss += c
else:
# else => compare the current result (substring) & start a new substring research
# compare the current substring with the longest substring found as far
# Current substring is larger ?
if len(c_lss) > len(lss):
# if yes: update the longest substring
lss = c_lss
# in any case => start a new substring research
# first element is: the last character of the previous current substring
c_c = [c_lss[-1]]
c_lss = c_lss[-1] + c
# Early exit: at this moment, can we found a larger substring ?
if (len(s) - i + len(c_lss)) <= len(lss):
break
# add the new character in list of current character for substring
c_c += [c]
# perform a last comparaison for current substring
if len(c_lss) > len(lss):
lss = c_lss
# print(len(s) - i - 1)
return lss
|
78936d140ea1e54945c6b4dd849b38f0c5604a36
| 3,644,055
|
def _fixTool2(scModel,gopLoader):
"""
:param scModel:
:param gopLoader:
:return:
@type scModel: ImageProjectModel
"""
def replace_tool(tool):
return 'jtui' if 'MaskGenUI' in tool else tool
modifier_tools = scModel.getGraph().getDataItem('modifier_tools')
if modifier_tools is not None:
scModel.getGraph().setDataItem('modifier_tools', [replace_tool(x) for x in modifier_tools])
creator_tool= scModel.getGraph().getDataItem('creator_tool')
scModel.getGraph().setDataItem('creator_tool', replace_tool(creator_tool))
|
3eb3bf8a47514a28c2e699a2eeefb084f9f7923b
| 3,644,056
|
from io import StringIO
def mol_view(request):
"""Function to view a 2D depiction of a molecule -> as PNG"""
my_choice = request.GET['choice'].split("_")[0]
try:
mol = Chem.MolFromSmiles(str(InternalIDLink.objects.filter(internal_id=my_choice)[0].mol_id.smiles))
except IndexError:
mol = Chem.MolFromSmiles(str(Molecule.objects.get(pk=my_choice).smiles))
image = Draw.MolToImage(mol)
output = StringIO.StringIO()
image.save(output, format="PNG")
contents = output.getvalue()
return HttpResponse(contents)
|
91f202b34fe63c8b89e1250bb54222120410f9c2
| 3,644,057
|
def rotation_matrix_about(axis, theta):
"""Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
Taken from: https://stackoverflow.com/a/6802723
"""
if np.shape(axis) != (3,):
raise ValueError("Shape of `axis` must be (3,)!")
scalar = True
if np.ndim(theta) > 1:
raise ValueError("Only 0 or 1 dimensional values for `theta` are supported!")
elif np.ndim(theta) == 1:
theta = np.atleast_2d(theta).T
scalar = False
axis = np.asarray(axis)
axis = axis / np.sqrt(np.dot(axis, axis))
a = np.cos(theta / 2.0).squeeze()
# b, c, d = - axis * np.sin(theta / 2.0)
temp = - axis * np.sin(theta / 2.0)
if not scalar:
temp = temp.T
b, c, d = temp
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
rot = np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
if not scalar:
rot = rot.T
return rot
|
f65fdc6e40ad7712521fbb6db662827401f82aca
| 3,644,058
|
def zc_rules():
"""catch issues with zero copy streaming"""
return (
case("SSTableReader"),
rule(
capture(
r"Could not recreate or deserialize existing bloom filter, continuing with a pass-through bloom filter but this will significantly impact reads performance"
),
update(
event_product="zcs",
event_category="streaming",
event_type="bloom_filter",
),
),
)
|
e4847d95b0565d5cb9213cfca9e8e3f28657041c
| 3,644,059
|
import re
def name_convert_to_camel(name: str) -> str:
"""下划线转驼峰"""
contents = re.findall('_[a-z]+', name)
for content in set(contents):
name = name.replace(content, content[1:].title())
return name
|
109a1035a3efa98861b6a419206823b1114268e2
| 3,644,060
|
def triangle_as_polynomial(nodes, degree):
"""Convert ``nodes`` into a SymPy polynomial array :math:`B(s, t)`.
Args:
nodes (numpy.ndarray): Nodes defining a B |eacute| zier triangle.
degree (int): The degree of the triangle. This is assumed to
correctly correspond to the number of ``nodes``.
Returns:
Tuple[sympy.Symbol, sympy.Symbol, sympy.Matrix]: Triple of
* The symbol ``s`` used in the polynomial
* The symbol ``t`` used in the polynomial
* The triangle :math:`B(s, t)`.
"""
# NOTE: We import SymPy at runtime to avoid the import-time cost for users
# that don't want to do symbolic computation. The ``sympy`` import is
# a tad expensive.
import sympy # pylint: disable=import-outside-toplevel
nodes_sym = to_symbolic(nodes)
s, t = sympy.symbols("s, t")
b_polynomial = nodes_sym * triangle_weights(degree, s, t)
b_polynomial.simplify()
factored = [value.factor() for value in b_polynomial]
return s, t, sympy.Matrix(factored).reshape(*b_polynomial.shape)
|
20c0bc7021673ac375018a387926ae25bdfda2e5
| 3,644,061
|
import decimal
def as_decimal(dct):
"""Decodes the Decimal datatype."""
if '__Decimal__' in dct:
return decimal.Decimal(dct['__Decimal__'])
return dct
|
d25b3ff73d7559a9018666d5f2cd189e6503a268
| 3,644,062
|
def input_layer(features,
feature_columns,
weight_collections=None,
trainable=True,
cols_to_vars=None,
cols_to_output_tensors=None):
"""Returns a dense `Tensor` as input layer based on given `feature_columns`.
Generally a single example in training data is described with FeatureColumns.
At the first layer of the model, this column oriented data should be converted
to a single `Tensor`.
Example:
```python
price = numeric_column('price')
keywords_embedded = embedding_column(
categorical_column_with_hash_bucket("keywords", 10K), dimensions=16)
columns = [price, keywords_embedded, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
for units in [128, 64, 32]:
dense_tensor = tf.layers.dense(dense_tensor, units, tf.nn.relu)
prediction = tf.layers.dense(dense_tensor, 1)
```
Args:
features: A mapping from key to tensors. `_FeatureColumn`s look up via these
keys. For example `numeric_column('price')` will look at 'price' key in
this dict. Values can be a `SparseTensor` or a `Tensor` depends on
corresponding `_FeatureColumn`.
feature_columns: An iterable containing the FeatureColumns to use as inputs
to your model. All items should be instances of classes derived from
`_DenseColumn` such as `numeric_column`, `embedding_column`,
`bucketized_column`, `indicator_column`. If you have categorical features,
you can wrap them with an `embedding_column` or `indicator_column`.
weight_collections: A list of collection names to which the Variable will be
added. Note that variables will also be added to collections
`tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
cols_to_vars: If not `None`, must be a dictionary that will be filled with a
mapping from `_FeatureColumn` to list of `Variable`s. For example, after
the call, we might have cols_to_vars =
{_EmbeddingColumn(
categorical_column=_HashedCategoricalColumn(
key='sparse_feature', hash_bucket_size=5, dtype=tf.string),
dimension=10): [<tf.Variable 'some_variable:0' shape=(5, 10),
<tf.Variable 'some_variable:1' shape=(5, 10)]}
If a column creates no variables, its value will be an empty list.
cols_to_output_tensors: If not `None`, must be a dictionary that will be
filled with a mapping from '_FeatureColumn' to the associated
output `Tensor`s.
Returns:
A `Tensor` which represents input layer of a model. Its shape
is (batch_size, first_layer_dimension) and its dtype is `float32`.
first_layer_dimension is determined based on given `feature_columns`.
Raises:
ValueError: if an item in `feature_columns` is not a `_DenseColumn`.
"""
return _internal_input_layer(
features,
feature_columns,
weight_collections=weight_collections,
trainable=trainable,
cols_to_vars=cols_to_vars,
cols_to_output_tensors=cols_to_output_tensors)
|
89fda325ec1afb98a772d7238386471bf4484141
| 3,644,063
|
def log_sum_exp(x):
"""Utility function for computing log_sum_exp while determining
This will be used to determine unaveraged confidence loss across
all examples in a batch.
Args:
x (Variable(tensor)): conf_preds from conf layers
"""
log_reduce_sum = P.ReduceSum()
log = P.Log()
exp = P.Exp()
x_max = max(x.data)
return log(log_reduce_sum(exp(x - x_max), 1)) + x_max
|
72a39a81fa3959e73c096732a86e843b5330e27d
| 3,644,064
|
import os
def data_dir():
"""
:return: data directory in the filesystem for storage, for example when downloading models
"""
return os.getenv('CNOCR_HOME', data_dir_default())
|
196e30d66c7598e10af93268d6de2c1192132b3c
| 3,644,065
|
def prepareRepoCharts(url, name, auths):
"""
NOTE: currently not support git
"""
charts_info, charts_info_hash = _prepareHelmRepoPath(url, name, auths)
return charts_info, charts_info_hash
|
7d2a6af1cae019020cd0921155fcdc749585d32c
| 3,644,066
|
def num_ini_spaces(s):
"""Return the number of initial spaces in a string.
Note that tabs are counted as a single space. For now, we do *not* support
mixing of tabs and spaces in the user's input.
Parameters
----------
s : string
Returns
-------
n : int
"""
ini_spaces = ini_spaces_re.match(s)
if ini_spaces:
return ini_spaces.end()
else:
return 0
|
9870aa42020b56f765f0ed74f73edda21b1786b1
| 3,644,067
|
def make_filename_template(schema, **kwargs):
"""Create codeblocks containing example filename patterns for a given
datatype.
Parameters
----------
schema : dict
The schema object, which is a dictionary with nested dictionaries and
lists stored within it.
kwargs : dict
Keyword arguments used to filter the schema.
Example kwargs that may be used include: "suffixes", "datatypes",
"extensions".
Returns
-------
codeblock : str
A multiline string containing the filename templates for file types
in the schema, after filtering.
"""
schema = filter_schema(schema, **kwargs)
entity_order = schema["rules"]["entities"]
paragraph = ""
# Parent folders
paragraph += "{}-<{}>/\n\t[{}-<{}>/]\n".format(
schema["objects"]["entities"]["subject"]["entity"],
schema["objects"]["entities"]["subject"]["format"],
schema["objects"]["entities"]["session"]["entity"],
schema["objects"]["entities"]["session"]["format"],
)
for datatype in schema["rules"]["datatypes"].keys():
paragraph += "\t\t{}/\n".format(datatype)
# Unique filename patterns
for group in schema["rules"]["datatypes"][datatype]:
string = "\t\t\t"
for ent in entity_order:
ent_format = "{}-<{}>".format(
schema["objects"]["entities"][ent]["entity"],
schema["objects"]["entities"][ent].get("format", "label")
)
if ent in group["entities"]:
if group["entities"][ent] == "required":
if len(string.strip()):
string += "_" + ent_format
else:
# Only the first entity doesn't need an underscore
string += ent_format
else:
if len(string.strip()):
string += "[_" + ent_format + "]"
else:
# Only the first entity doesn't need an underscore
string += "[" + ent_format + "]"
# In cases of large numbers of suffixes,
# we use the "suffix" variable and expect a table later in the spec
if len(group["suffixes"]) > 5:
suffix = "_<suffix>"
string += suffix
strings = [string]
else:
strings = [
string + "_" + suffix for suffix in group["suffixes"]
]
# Add extensions
full_strings = []
extensions = group["extensions"]
extensions = [
ext if ext != "*" else ".<extension>" for ext in extensions
]
extensions = utils.combine_extensions(extensions)
if len(extensions) > 5:
# Combine exts when there are many, but keep JSON separate
if ".json" in extensions:
extensions = [".<extension>", ".json"]
else:
extensions = [".<extension>"]
for extension in extensions:
for string in strings:
new_string = string + extension
full_strings.append(new_string)
full_strings = sorted(full_strings)
if full_strings:
paragraph += "\n".join(full_strings) + "\n"
paragraph = paragraph.rstrip()
codeblock = "Template:\n```Text\n" + paragraph + "\n```"
codeblock = codeblock.expandtabs(4)
return codeblock
|
bb1d8eb776d8e248ca7fb67167594639a02c92cb
| 3,644,068
|
import os
def lambda_handler(event, context):
"""
Generate a pre-signed URL that allows a save file to be uploaded to S3 in the player's specified save slot. If the
slot is new, will verify that MAX_SAVE_SLOTS_PER_PLAYER has not been reached.
Parameters:
Request Context:
custom:gk_user_id: str
The player_id to associate the save file with. This value comes from the Cognito Authorizer that validates
the API Gateway request.
Header Parameters:
metadata: str
An arbitrary Base64 encoded string to associate with the save file.
[Optional, defaults to an empty string: '']
The total size of the metadata string cannot exceed 1887 bytes (MAX_METADATA_BYTES, see docs above) and must
be Base64 encoded, otherwise the Lambda will return an error. The 2KB limit comes from an S3 limitation, and
the Base64 encoding saves space compared to S3's native behavior for non-ASCII strings:
https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html#UserMetadata
The GameKit SDK handles encoding and decoding the metadata string for you; if not using the SDK, please
Base64 encode your metadata values before calling this lambda function.
Examples:
A string, representing the save slot's description:
unencoded_metadata = 'about to fight the boss 👍'
metadata = 'YWJvdXQgdG8gZmlnaHQgdGhlIGJvc3Mg8J+RjQ==' # Pass this to the lambda
A JSON blob, containing several metadata fields:
unencoded_metadata = '{"description": "about to fight the boss 👍", "total_playtime_seconds": "16200"}'
metadata = 'eyJkZXNjcmlwdGlvbiI6ICJhYm91dCB0byBmaWdodCB0aGUgYm9zcyDwn5GNIiwgInRvdGFsX3BsYXl0aW1lX3NlY29uZHMiOiAiMTYyMDAifQ==' # Pass this to the lambda
hash: str
The Base64 encoded SHA-256 hash of the file to upload.
The total size of the hash string will be 44 bytes; the SHA-256 hash itself is 32 bytes, and the Base64
encoding of it will bring the size up to 44. Base64 encoding is used to convert the SHA-256 hash from a
byte stream to an ASCII compliant string.
last_modified_epoch_time: int
The number of milliseconds since epoch of the last UTC time when the save slot was modified on the caller's
device.
Path Parameters:
slot_name: str
The slot name to use for the save file.
Limited to 512 characters long, using alphanumeric characters, dashes (-), underscores (_), and periods (.).
This lambda will return an error if a malformed slot name is provided.
If the slot_name is not occupied with another save file, the Lambda will check whether this new save file
will exceed the MAX_SAVE_SLOTS_PER_PLAYER. If it would be exceeded, the Lambda will return an error.
Query String Parameters:
time_to_live: int
The number of seconds the URL will be valid. The URL will no longer work after the time has expired.
[Optional, defaults to 120 seconds (DEFAULT_TIME_TO_LIVE_SECONDS).]
consistent_read: bool
Whether to use "Consistent Read" when querying DynamoDB.
[Optional, defaults to True (DEFAULT_CONSISTENT_READ).]
Errors:
400 Bad Request - Returned when a malformed 'slot_name' path parameter is provided.
400 Bad Request - Returned when the 'metadata' parameter exceeds 1883 bytes (MAX_METADATA_BYTES) after being
ASCII encoded.
400 Bad Request - Returned when the 'hash' parameter is not exactly 44 bytes (BASE_64_ENCODED_SHA_256_BYTES)
in size.
400 Bad Request - Returned when the save slot is new and would exceed the player's MAX_SAVE_SLOTS_PER_PLAYER.
401 Unauthorized - Returned when the 'custom:gk_user_id' parameter is missing from the request context.
"""
log_event(event)
# Get player_id from requestContext:
player_id = get_player_id(event)
if player_id is None:
return response_envelope(status_code=401)
# Get header inputs:
metadata = get_header_param(event, 'metadata', DEFAULT_METADATA)
sha_hash: str = get_header_param(event, S3_HASH_METADATA_KEY)
last_modified_epoch_time = int(get_header_param(event, 'last_modified_epoch_time'))
# Get path param inputs:
slot_name = get_path_param(event, 'slot_name')
# Get query param inputs:
time_to_live = int(get_query_string_param(event, 'time_to_live', DEFAULT_TIME_TO_LIVE_SECONDS))
consistent_read = bool(strtobool(get_query_string_param(event, 'consistent_read', DEFAULT_CONSISTENT_READ)))
# Validate inputs:
if not is_valid_primary_identifier(slot_name):
logger.error((f'Malformed slot_name: {slot_name} provided for player_id: {player_id}').encode(UTF_8))
return response_envelope(status_code=400, status_message=ResponseStatus.MALFORMED_SLOT_NAME)
if get_bytes_length(metadata) > MAX_METADATA_BYTES:
return response_envelope(status_code=400, status_message=ResponseStatus.MAX_METADATA_BYTES_EXCEEDED)
if not is_valid_base_64(metadata):
logger.error((f'Malformed metadata provided, expected a Base64 encoded string. Metadata: {metadata}').encode(UTF_8))
return response_envelope(status_code=400, status_message=ResponseStatus.MALFORMED_METADATA)
if len(sha_hash) != BASE_64_ENCODED_SHA_256_BYTES or not sha_hash.isascii():
logger.error((f'Malformed SHA-256 hash: {sha_hash} provided. Must be 44 characters and Base64 encoded.').encode(UTF_8))
return response_envelope(status_code=400, status_message=ResponseStatus.MALFORMED_HASH_SIZE_MISMATCH)
# Verify MAX_SAVE_SLOTS_PER_PLAYER won't be exceeded:
if is_new_save_slot(player_id, slot_name, consistent_read) and would_exceed_slot_limit(player_id, consistent_read):
return response_envelope(status_code=400, status_message=ResponseStatus.MAX_CLOUD_SAVE_SLOTS_EXCEEDED)
# Generate URL:
bucket_name = os.environ.get('GAMESAVES_BUCKET_NAME')
url = generate_presigned_url(
bucket_name, player_id, slot_name, metadata, sha_hash, last_modified_epoch_time, time_to_live
)
# Construct response object:
return response_envelope(
status_code=200,
response_obj={
'url': url
}
)
|
f4202aaad5bdd91779eb0ee72780ba0cf1fe330f
| 3,644,069
|
def getLanguageLevel() -> dict:
"""
Takes the user input and returns the found documents as dictionary.
:text: String
:language: String
:return: Dictionary
"""
text: str = request.params.get('text')
language: str = request.params.get('language')
# check API Key
if str(request.params.get('key')) != API_KEY:
response.status = 401
return {
"error": "API-KEY is wrong or missing. See https://github.com/elaisasearch/categorizer/blob/master/README.md for more information."
}
if language == "en":
return {
"result": categorizeText(text)
}
# other languages will follow in the future
else:
return {
"error": "'{}' currently isn't supported. Please use 'en' for English as language. Thank you.".format(language)
}
|
967b4244f7406c82715bdfb112cd82c652c9c68e
| 3,644,070
|
import oci.exceptions
def list_networks(**kwargs):
"""Lists all networks of the given compartment
Args:
**kwargs: Additional options
Keyword Args:
public_subnet (bool): Whether only public or private subnets should be
considered
compartment_id (str): OCID of the parent compartment.
config (object): An OCI config object or None.
return_formatted (bool): If set to true, a list object is returned.
check_privileges (bool): Checks if the user has privileges for the
subnet
Returns:
a network object
"""
public_subnet = kwargs.get("public_subnet")
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
return_formatted = kwargs.get("return_formatted", True)
check_privileges = kwargs.get("check_privileges", False)
# Get the active config and compartment
try:
config = configuration.get_current_config(config=config)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
# Create VirtualNetworkClient
virtual_network = core.get_oci_virtual_network_client(
config=config)
# List the virtual networks
vcns = virtual_network.list_vcns(
compartment_id=compartment_id).data
# Filter out all sub-nets that are not conforming to the
# public_subnet options
if public_subnet is not None:
# Loop over VCNs to see if access is granted
good_vcns = []
for vcn in vcns:
try:
if network_has_subnet(
network=vcn, compartment_id=compartment_id,
config=config,
public_subnet=public_subnet,
check_privileges=check_privileges):
good_vcns.append(vcn)
except oci.exceptions.ServiceError as e:
pass
vcns = good_vcns
if return_formatted:
return format_network_listing(vcns)
else:
return oci.util.to_dict(vcns)
except ValueError as e:
print(f"ERROR: {str(e)}")
return
|
32a816b595d45102a393be8a548f48414509f865
| 3,644,071
|
def ed_affine_to_extended(pt):
"""Map (x, y) to (x : y : x*y : 1)."""
new_curve = EllipticCurve(pt.curve, ED_EXT_HOM_PROJ, Edwards_ExtProj_Arithm)
return new_curve((pt.x, pt.y, pt.x * pt.y, new_curve.field(1)))
|
ee949c7c0487fb580d79764e3f0c10d2a2080943
| 3,644,072
|
import os
import shutil
def _download(path, url, archive_name, hash_, hash_type='md5'):
"""Download and extract an archive, completing the filename."""
full_name = op.join(path, archive_name)
remove_archive = True
fetch_archive = True
if op.exists(full_name):
logger.info('Archive exists (%s), checking hash %s.'
% (archive_name, hash_,))
fetch_archive = False
if hashfunc(full_name, hash_type=hash_type) != hash_:
if input('Archive already exists but the hash does not match: '
'%s\nOverwrite (y/[n])?'
% (archive_name,)).lower() == 'y':
os.remove(full_name)
fetch_archive = True
if fetch_archive:
logger.info('Downloading archive %s to %s' % (archive_name, path))
try:
temp_file_name, header = urlretrieve(url)
# check hash sum eg md5sum
if hash_ is not None:
logger.info('Verifying hash %s.' % (hash_,))
hashsum = hashfunc(temp_file_name, hash_type=hash_type)
if hash_ != hashsum:
raise RuntimeError('Hash mismatch for downloaded file %s, '
'expected %s but got %s'
% (temp_file_name, hash_, hashsum))
shutil.move(temp_file_name, full_name)
except Exception:
logger.error('Error while fetching file %s.'
' Dataset fetching aborted.' % url)
raise
# _fetch_file(url, full_name, print_destination=False,
# hash_=hash_, hash_type=hash_type)
return remove_archive, full_name
|
3df4c134734e62474ed4a36d710cc9140c267329
| 3,644,073
|
import joblib
def do_setup(experiment_folder, path_to_additional_args):
""" Setup Shell Scripts for Experiment """
additional_args = joblib.load(path_to_additional_args)
# Setup Data
logger.info("Setting Up Data")
data_args = setup_train_test_data(experiment_folder, **additional_args)
# Setup
logger.info("Saving Experiment Options per ID")
sampler_args = additional_args['sampler_args']
arg_list = dict_product(sampler_args, data_args)
options_df = setup_options(experiment_folder, arg_list)
return options_df
|
9489b5abab6335de4c5909d718b5ccb3bcc0f3c7
| 3,644,074
|
import requests
def getorgadmins(apikey, orgid, suppressprint=False):
"""
Args:
apikey: User's Meraki API Key
orgid: OrganizationId for operation to be performed against
suppressprint:
Returns:
"""
__hasorgaccess(apikey, orgid)
calltype = 'Organization'
geturl = '{0}/organizations/{1}/admins'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
|
640ca97bf7213b2b0e24190b7f1b6658c53332b6
| 3,644,075
|
def calc_recall(TP, FN):
"""
Calculate recall from TP and FN
"""
if TP + FN != 0:
recall = TP / (TP + FN)
else:
recall = 0
return recall
|
8f3513e11f8adad111eee32740c271aad31fbe28
| 3,644,076
|
def lookup_last_report_execution(job_type, work_ids=None):
"""Lookup in the database when the report/job chunk last executed
This is the expected table schema from the database (id and timestamp columns
are omitted),
---------------------------------------------------
| work_id | history |
---------------------------------------------------
| 1000 | {"report_A": 2019-01-11 11:22:33,
"report_B": 2020-01-12 02:03:44} |
| 2000 | {"report_A": 2012-01-11 12:23:33} |
---------------------------------------------------
The work_id parameter is expected to be work ids. The reason for naming the
parameter work_ids is to support future changes.
Args:
job_type (str): The name of the job to check execution time for
work_ids (list): Specific work ids to check execution time for
Returns:
last_exec_min (int or None): Largest number of minutes since the last
execution for any of the work ids. None
if never executed
Examples:
Looking up the greatest time since work id 1000 executed report_B
should be 2 minutes
>>> str(datetime.utcnow())
2020-01-12 02:05:44
>>> lookup_last_report_execution("report_B", [1000])
2
Looking up the greatest time since work id 1234 executed report_B
should be None, as it was never executed
>>> print(lookup_last_report_execution("report_B", [1234]))
None
"""
# Create string ready for SQL
work_ids_string = ", ".join([str(c) for c in work_ids])
# Query database
# This returns a single number that is the latest execution for any of
# the work_ids in minutes or a single row containing 99999999
sql = f"""
SELECT
MAX(IFNULL(MINUTES_SINCE_LAST_EXEC, 99999999)) AS last_exec
FROM (
-- Calculate the time since last execution
SELECT
TIMESTAMPDIFF(
MINUTE,
STR_TO_DATE(
JSON_UNQUOTE(
JSON_EXTRACT(
history,
'$."{job_type}"')
), "%Y-%m-%d %H:%i:%s"),
CURRENT_TIMESTAMP()
) AS MINUTES_SINCE_LAST_EXEC
FROM StauLatestExecution
WHERE workId IN ({work_ids_string})
) as subq
"""
with Stau() as queue:
rtn = queue._exec(sql, {})
return rtn.get("last_exec", None)
|
bcc7715d416820dcc9f065b952e0a751255c9929
| 3,644,077
|
def get_course_goal_options():
"""
Returns the valid options for goal keys, mapped to their translated
strings, as defined by theCourseGoal model.
"""
return {goal_key: goal_text for goal_key, goal_text in GOAL_KEY_CHOICES}
|
6f8fc2bd812a216abcff6a82107cf28bfc2fcbf4
| 3,644,078
|
def to_dataframe(y):
"""
If the input is not a dataframe, convert it to a dataframe
:param y: The target variable
:return: A dataframe
"""
if not isinstance(y, pd.DataFrame):
return pd.DataFrame(y)
return y
|
1fc302b1acb264bce5778c9c2349100f799da397
| 3,644,079
|
def url_equal(first, second, ignore_scheme=False, ignore_netloc=False, ignore_path=False, ignore_params=False,
ignore_query=False, ignore_fragment=False):
"""
Compare two URLs and return True if they are equal, some parts of the URLs can be ignored
:param first: URL
:param second: URL
:param ignore_scheme: ignore the scheme
:param ignore_netloc: ignore the netloc
:param ignore_path: ignore the path
:param ignore_params: ignore the params
:param ignore_query: ignore the query string
:param ignore_fragment: ignore the fragment
:return: result of comparison
"""
# <scheme>://<netloc>/<path>;<params>?<query>#<fragment>
firstp = urlparse(first)
secondp = urlparse(second)
return (
(firstp.scheme == secondp.scheme or ignore_scheme)
and (firstp.netloc == secondp.netloc or ignore_netloc)
and (firstp.path == secondp.path or ignore_path)
and (firstp.params == secondp.params or ignore_params)
and (firstp.query == secondp.query or ignore_query)
and (firstp.fragment == secondp.fragment or ignore_fragment)
)
|
caea2185db83c5938f48e8d2de432c5e74540014
| 3,644,080
|
def test_struct(n: cython.int, x: cython.double) -> MyStruct2:
"""
>>> test_struct(389, 1.64493)
(389, 1.64493)
>>> d = test_struct.__annotations__
>>> sorted(d)
['n', 'return', 'x']
"""
assert cython.typeof(n) == 'int', cython.typeof(n)
if is_compiled:
assert cython.typeof(x) == 'double', cython.typeof(x) # C double
else:
assert cython.typeof(x) == 'float', cython.typeof(x) # Python float
a = cython.declare(MyStruct2)
a[0] = MyStruct(is_integral=True, data=MyUnion(n=n))
a[1] = MyStruct(is_integral=False, data={'x': x})
return a[0].data.n, a[1].data.x
|
1bf5e97719d80c8327c44bfea66f7ef26b3f7400
| 3,644,081
|
def build_document(json_schema: dict) -> list:
"""
Returns a list of lines to generate a basic adoc file, with the format:
Title
A table for the data properties
A table for the data attributes and nested attributes if any
"""
lines = []
"""
Title and description of schema
"""
title = get_json_attribute(['title'], json_schema)
description = get_json_attribute(['description'], json_schema)
"""
Id and required properties of object
"""
data = get_json_attribute(['properties', 'data'], json_schema)
data_required = get_json_attribute(['required'], data)
data_properties = get_json_attribute(['properties'], data)
"""
Attributes of object
"""
attributes = get_json_attribute(['attributes'], data_properties)
required = get_json_attribute(['required'], attributes)
attribute_properties = get_json_attribute(['properties'], attributes)
"""
Relationships of object
"""
relationships = get_json_attribute(['relationships', 'properties'], data_properties)
print(relationships)
if relationships:
for relationship_name in relationships:
relationship_object = get_json_attribute([relationship_name], relationships)
relationship_required = get_json_attribute(['required'], relationship_object)
relationship_properties = get_json_attribute(['data', 'properties'], relationship_object)
if not relationship_required:
relationship_required = ''
if 'type' in relationship_properties:
relationship_type = get_json_attribute(['type', 'const'], relationship_properties)
relationship_object.update({'type': str(relationship_type)})
"""
Cleans up properties table
"""
# TODO: retrieve nested 'const' attribute from relationship to display under 'Type' in adoc table
data_type = get_json_attribute(['type', 'const'], data_properties)
if 'type' in data_properties:
data_properties.update({'type': {'type': str(data_type)}})
if 'relationships' in data_properties:
del data_properties['relationships']
del data_properties['attributes']
"""
Sets title, description, and tables
"""
lines.append(get_adoc_title(title, 3))
if description:
lines.append(description+'\n')
if data_properties:
lines.extend(get_adoc_table('Properties', ['Type', 'Description'], data_properties, data_required))
if attributes:
lines.extend(get_adoc_table('Attributes', ['Type', 'Description'], attribute_properties, required, True))
lines.append('\n')
if relationships:
lines.extend(get_adoc_table('Relationships', ['Type', 'Description'], relationships, relationship_required))
return lines
|
d92038392d23047130c6f981cb848f3f7ca9dd19
| 3,644,082
|
import os
def remove_potential_nonlipids_bad_esi_mode():
"""
remove_potential_nonlipids_bad_esi_mode
description:
ESI mode of the dataset is not 'pos' or 'neg'
returns:
(bool) -- test pass (True) or fail (False)
"""
dset = Dataset(os.path.join(os.path.dirname(__file__), 'real_data_1.csv'))
try:
remove_potential_nonlipids(dset)
except ValueError:
return True
return False
|
1b675faf7e90d5bf2695d6e64e8df4ca353c0850
| 3,644,083
|
def is_oasis_db():
""" Is this likely an OASIS database? Look at the table names to see
if we have the more specific ones.
Return "yes", "no", or "empty"
"""
expect = ['qtvariations', 'users', 'examqtemplates', 'marklog', 'qtattach',
'questions', 'guesses', 'exams', 'qtemplates']
tables = public_tables()
if len(tables) == 0:
return "empty"
if set(expect).issubset(tables):
return "yes"
return "no"
|
330da79c63afe4905c9469e54d61d5de6a8fa575
| 3,644,084
|
def make_segment(segment, discontinuity=False):
"""Create a playlist response for a segment."""
response = []
if discontinuity:
response.append("#EXT-X-DISCONTINUITY")
response.extend(["#EXTINF:10.0000,", f"./segment/{segment}.m4s"]),
return "\n".join(response)
|
8419b100409934f902c751734c396bc72d8a6917
| 3,644,085
|
def seq_aggregate_with_reducer(x, y):
"""
Sequencing function that works with the dataframe created by get_normal_frame
:param x:
:param y:
:return:
"""
res = []
for i in range(0, len(x)):
res.append((x[i][0], x[i][1], get_aggregation_func_by_name(x[i][0])(x[i][2], y[i][2])))
return tuple(res)
|
6faed81fd925656c2984e9d78df3b88e98fcb035
| 3,644,086
|
from typing import Any
def from_dicts(key: str, *dicts, default: Any = None):
"""
Returns value of key in first matchning dict.
If not matching dict, default value is returned.
Return:
Any
"""
for d in dicts:
if key in d:
return d[key]
return default
|
508febc48fd22d3a23dc0500b0aa3824c99fdbc3
| 3,644,087
|
def time_in_words(h, m):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/the-time-in-words/problem
Given the time in numerals we may convert it into words, as shown below:
----------------------------------------------
| 5:00 | -> | five o' clock |
| 5:01 | -> | one minute past five |
| 5:10 | -> | ten minutes past five |
| 5:15 | -> | quarter past five |
| 5:30 | -> | half past five |
| 5:40 | -> | twenty minutes to six |
| 5:45 | -> | quarter to six |
| 5:47 | -> | thirteen minutes to six |
| 5:28 | -> | twenty eight minutes past five |
----------------------------------------------
At minutes = 0, use o' clock. For 1 <= minutes <= 30, use past, and for 30 < minutes use to. Note the space between
the apostrophe and clock in o' clock. Write a program which prints the time in words for the input given in the
format described.
Args:
h (int): hour of the day
m (int): minutes after the hour
Returns:
str: string representation of the time
"""
time = ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen",
"fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen", "twenty", "twenty one", "twenty two",
"twenty three", "twenty four", "twenty five", "twenty six", "twenty seven", "twenty eight", "twenty nine"]
# We check for a certain set of cases:
# Case 1 - we're on the hour, so we use o' clock
if m == 0:
return "{0} o' clock".format(time[h-1])
# Case 2 - we're one minute after, so we use minute (versus minutes later on to describe the time)
if m == 1:
return "{0} minute past {1}".format(time[m-1], time[h-1])
# Case 3 - we're a quarter past the hour
if m == 15:
return "quarter past {0}".format(time[h-1])
# Case 4 - we're half past the hour
if m == 30:
return "half past {0}".format(time[h-1])
# Case 5 - we're a quarter to the next hour
if m == 45:
return "quarter to {0}".format(time[h])
# Case 6 - we check for minutes after the hour, which is until we hit minute 30
if m < 30:
return "{0} minutes past {1}".format(time[m-1], time[h-1])
# Case 7 - this covers the cases where the minutes are after 30 so we're mintues to the next hour
return "{0} minutes to {1}".format(time[59-m], time[h])
|
85f2247f01df36ef499105a9940be63eee189100
| 3,644,088
|
def majorityElement(nums):
"""超过三分之一的数,最多不超过两个数"""
num1, num2 = -1, -1
count1, count2 = 0, 0
for i in range(len(nums)):
curNum = nums[i]
if curNum == num1:
count1 += 1
elif curNum == num2:
count2 += 1
elif count1 == 0:
num1 = curNum
count1 = 1
elif count2 == 0:
num2 = curNum
count2 = 1
else:
count1 -= 1
count2 -= 2
count1, count2 = 0, 0
for n in nums:
if n == num1:
count1 += 1
elif n == num2:
count2 += 1
print("num1: {}, count1: {}; num2: {}, count2: {}".format(num1, count1, num2, count2))
numLens = len(nums)
ret = []
if count1 > numLens//3:
ret.append(num1)
if count2 > numLens//3:
ret.append(num2)
return ret
|
ef71fa445c3bc16bbaf79a1ab4e9548125e71b7b
| 3,644,089
|
def calcDensHeight(T,p,z):
"""
Calculate the density scale height H_rho
Parameters
----------
T: vector (float)
temperature (K)
p: vector (float) of len(T)
pressure (pa)
z: vector (float) of len(T
height (m)
Returns
-------
Hbar: vector (float) of len(T)
density scale height (m)
"""
dz=np.diff(z)
TLayer=(T[1:] + T[0:-1])/2.
dTdz=np.diff(T)/np.diff(z)
oneOverH=g/(Rd*TLayer) + (1/TLayer*dTdz)
Zthick=z[-1] - z[0]
oneOverHbar=np.sum(oneOverH*dz)/Zthick
Hbar = 1/oneOverHbar
return Hbar
|
c45d47d4f3dffe0e1706f979a9a6eb5028c7b775
| 3,644,090
|
import os
def Signal_figure(name,I,mask):
"""Plots a figure designed to show the influences of the image parameters and creates a .png image of it.
Parameters
----------
name: string
Desired name of the image.
I: array
MRI image.
mask: array
Region of interest binary mask.
Return
------
References
----------
"""
sns.set()
sns.set_style('ticks')
sns.set_context('talk')
fig=plt.figure(figsize=(20,20))
gs = fig.add_gridspec(2,2)
ax1=fig.add_subplot(gs[0, 0:1])
ax1.imshow(I,cmap='gray')
ax1.set_xticks([])
ax1.set_yticks([])
ax1.set_title('Noiseless image',fontsize=40)
ax2=fig.add_subplot(gs[0, 1:2])
ax2.imshow(mask,cmap='gray')
ax2.set_xticks([])
ax2.set_yticks([])
ax2.set_title('Mask',fontsize=40)
ax3=fig.add_subplot(gs[1, 0:])
hist, bins = np.histogram(I,80)
ax3.plot(bins[:-1],hist,'k')
ax3.fill_between(bins[:-1], hist,color='black')
ax3.set_title('Noiseless image histogram',fontsize=40)
ax3.set_ylabel('Number os pixels',fontsize=40)
ax3.set_xlabel('Value',fontsize=40)
ax3.set_xlim(0,750)
plt.xticks(fontsize=30)
plt.yticks(fontsize=30)
os.chdir('Figures')
plt.savefig(name+'.png')
os.chdir('..')
return None
|
c294d8c9f167935b6402e1573acb68ed6c23fbc2
| 3,644,091
|
def load_many_data(filenames, clean=True, first_seconds_remove=2, bandpass_range=(5, 50)):
"""
Loads several files and cleans data if clean is True. Returns a concatenated set of data (MNE object).
"""
# TODO: check for matching channels and other errors
raw_data = []
if filenames is None:
# open tkinter dialogue
#multiple files selected at one time
root = Tk()
root.withdraw()
filenames = filedialog.askopenfilenames()
for f in filenames:
#Check sample frequencies and ask user which sfreq files they would like to look at
cur_raw = load_data(f) # current raw object
raw_data.append(cur_raw)
print("The length of raw_data is:" + str(len(raw_data)))
# print("raw_data[0] is " + str(raw_data[0]))
# print("The length of the file list is:" + str(len([PATH1 + f for f in glob.glob(PATH1 + '*.raw.fif.gz')]))) #This file list doesn't return anything
data = mne.concatenate_raws(raw_data)
if clean:
data = clean_data(data, remove=first_seconds_remove, bandpass_range=bandpass_range)
return data
|
b9b8e5351419642b7b3e8b462df42e171a3564ce
| 3,644,092
|
import re
def extract_push_target(push_target: str):
"""
Extract push target from the url configured
Workspace is optional
"""
if not push_target:
raise ValueError("Cannot extract push-target if push-target is not set.")
match_pattern = re.compile(
r"(?P<http_scheme>https|http):\/\/(?P<askanna_host>[\w\.\-\:]+)\/(?P<workspace_suuid>[\w-]+){0,1}\/{0,1}project\/(?P<project_suuid>[\w-]+)\/{0,1}" # noqa: E501
)
matches = match_pattern.match(push_target)
matches_dict = matches.groupdict()
return matches_dict
|
3fe11ac218c0cfc7c6211cfe76fd11bd248c4588
| 3,644,093
|
import os
def main(global_config, **settings):
"""This function returns a Pyramid WSGI application."""
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.bind = engine
authn_policy = AuthTktAuthenticationPolicy('sosecret', callback=groupfinder,
hashalg='sha512')
authz_policy = ACLAuthorizationPolicy()
memcache_server = os.environ.get('MEMCACHE_SERVERS')
settings['beaker.cache.url'] = memcache_server
config = Configurator(settings=settings,
root_factory='atv.models.RootFactory')
config.include('pyramid_chameleon')
config.set_authentication_policy(authn_policy)
config.set_authorization_policy(authz_policy)
config.add_static_view('URL',
'static', cache_max_age=3600)
config.add_route('home', '/')
config.add_route('panda', '/panda/authorize_upload')
config.add_route('search', '/search')
config.add_route('searchb', '/search/')
config.add_route('answer', '/answer')
config.add_route('delete', '/delete')
config.add_route('denied', '/denied')
config.add_route('explore', '/explore')
config.add_route('exploreb', '/explore/')
config.add_route('exploretrending', '/explore/trending')
config.add_route('exploretrendingb', '/explore/trending/')
config.add_route('explorelatest', '/explore/latest')
config.add_route('explorelatestb', '/explore/latest/')
config.add_route('exploreourpicks', '/explore/ourpicks')
config.add_route('exploreourpicksb', '/explore/ourpicks/')
config.add_route('vote', '/vote')
config.add_route('deleteanswer', '/deleteanswer')
config.add_route('stream', '/i/stream')
config.add_route('streamb', '/i/stream/')
config.add_route('streamlatest', '/i/stream/latest')
config.add_route('streamlatestb', '/i/stream/latest/')
config.add_route('streamtop', '/i/stream/top')
config.add_route('streamtopb', '/i/stream/top/')
config.add_route('edit', '/i/edit')
config.add_route('editb', '/i/edit/')
config.add_route('followunfollow', '/2x4b32cp')
config.add_route('deletenotification', '/2x4b32qp')
config.add_route('chanlatest', '/{channel}/latest')
config.add_route('chanlatestb', '/{channel}/latest/')
config.add_route('chanrising', '/{channel}/top')
config.add_route('chanrisingb', '/{channel}/top/')
config.add_route('ask', '/ask')
config.add_route('signup', '/signup')
config.add_route('signupb', '/signup/')
config.add_route('login', '/login')
config.add_route('loginb', '/login/')
config.add_route('logout', '/logout')
config.add_route('logoutb', '/logout/')
config.add_route('privacy', '/privacy')
config.add_route('privacyb', '/privacy/')
config.add_route('terms', '/terms')
config.add_route('termsb', '/terms/')
config.add_route('blog', '/blog')
config.add_route('blogb', '/blog/')
config.add_route('admin', '/admin')
config.add_route('adminb', '/admin/')
config.add_route('copyright', '/copyright')
config.add_route('copyrightb', '/copyright/')
config.add_route('contact', '/contact')
config.add_route('contactb', '/contact/')
config.add_route('verify', '/verify')
config.add_route('verifyb', '/verify/')
config.add_route('reset', '/reset')
config.add_route('resetb', '/reset/')
config.add_route('ereset', '/ereset')
config.add_route('eresetb', '/ereset/')
config.add_route('verifyereset', '/ereset/{code}')
config.add_route('verifyreset', '/reset/{code}')
config.add_route('verifyemail', '/verify/{code}')
config.add_route('following', '/{channel}/following')
config.add_route('followingb', '/{channel}/following/')
config.add_route('a_history', '/{channel}/history/a')
config.add_route('a_historyb', '/{channel}/history/a/')
config.add_route('history', '/{channel}/history/q')
config.add_route('historyb', '/{channel}/history/q/')
config.add_route('question', '/{channel}/{question}')
config.add_route('questionb', '/{channel}/{question}/')
config.add_route('channel', '/{channel}')
config.add_route('channelb', '/{channel}/')
#Create WSGI app
config.scan()
return config.make_wsgi_app()
|
5ba2332214a77b485bc3f72d01578ff1299d6b52
| 3,644,094
|
def dish_gain(radius, freq):
"""
Dish radar gain.
Inputs:
- radius [float]: Dish radius (m)
- freq [float]: Transmit frequency (Hz)
Outputs:
- g: Gain
"""
return 4*pi**2*radius**2/wavelen(freq)**2
|
a20d963f9acc839a811aefaa942aaeaedce0689c
| 3,644,095
|
import os
def _collect_files(gold_dir, system_dir, limit):
"""Return the list of files to run the comparison on."""
gold_files = os.listdir(gold_dir)
system_files = os.listdir(system_dir)
# don't assume the directory content is the same, take the intersection
fnames = sorted(list(set(gold_files).intersection(set(system_files))))
# TODO: includes a hack to avoid a file, get rid of it
fnames = [f for f in fnames[:limit] if not f.endswith('wsj_0907.tml')]
return fnames
|
a940d6f958fe770580cc9a9a3327579e5f1b2633
| 3,644,096
|
def center_img(img, size=None, fill_value=255):
"""
center img in a square background
"""
h, w = img.shape[:2]
if size is None:
size = max(h, w)
shape = (size, size) + img.shape[2:]
background = np.full(shape, fill_value, np.uint8)
center_x = (size - w) // 2
center_y = (size - h) // 2
background[center_y:center_y + h, center_x:center_x + w] = img
return background
|
838d6185230fbb8184925a31e0f3334dc4bda627
| 3,644,097
|
def concat_files(*files):
"""
Concat some files together. Returns out and err to keep parity with shell commands.
Args:
*files: src1, src2, ..., srcN, dst.
Returns:
out: string
err: string
"""
out = ''
err = ''
dst_name = files[-1]
sources = [files[f] for f in range(len(files)) if f < len(files) - 1]
with open(dst_name, 'w') as dst:
for f in sources:
with open(f, 'r') as src:
for line in src:
dst.write(line)
return out, err
|
101c37e5b3955c153c8c2210e7575a62341c768a
| 3,644,098
|
def distribution_quality( df, refdata, values, ascending, names, fig):
"""Locate the quantile position of each putative :class:`.DesingSerie`
in a list of score distributions.
:param df: Data container.
:type df: :class:`~pandas.DataFrame`
:param grid: Shape of the grid to plot the values in the figure (rows x columns).
:type grid: :class:`tuple` with two :class:`int`
:param refdata: Data content to use as reference.
:type refdata: :class:`~pandas.DataFrame`
:param values: Contents from the data container that are expected to be plotted.
:type values: :func:`list` of :class:`str`
:param ascending: Way the data should be sorted. :data:`True` if the score is better
when lower, :data:`False` otherwise.
:type ascending: :func:`list` of :class:`bool`
:param names: Columns to use as identifiers for the query data.
:type names: :func:`list` of :class:`str`
:param fig: Figure into which the data is going to be plotted.
:type fig: :class:`~matplotlib.figure.Figure`
:return: :class:`~matplotlib.axes.Axes`
:raises:
:ValueError: If columns are requested that do not exist in the :class:`~pandas.DataFrame` of
data **and** reference.
:ValueError: If there isn't a ``ascending`` definition for each ``value``.
:ValueError: If ``refdata`` or ``df`` are not :class:`~pandas.DataFrame`.
:valueError: If the requested names do not exist in the input data.
.. rubric:: Example:
.. ipython::
:okwarning:
In [1]: from rstoolbox.plot import distribution_quality
...: from rstoolbox.utils import load_refdata
...: import matplotlib.pyplot as plt
...: df = load_refdata('scop')
...: qr = pd.DataFrame([['2F4V', 'C'], ['3BFU', 'B'], ['2APJ', 'C'],
...: ['2C37', 'V'], ['2I6E', 'H']],
...: columns=['pdb', 'chain'])
...: qr = qr.merge(df, on=['pdb', 'chain'])
...: refs = []
...: for i, t in qr.iterrows():
...: refs.append(df[(df['length'] >= (t['length'] - 5)) &
...: (df['length'] <= (t['length'] + 5))])
...: fig = plt.figure(figsize=(25, 6))
...: ax = distribution_quality(df=qr, refdata=refs,
...: values=['score', 'pack', 'avdegree',
...: 'cavity', 'psipred'],
...: ascending=[True, False, True, True, False],
...: names=['pdb', 'chain'], fig=fig)
...: plt.tight_layout()
@savefig distribution_quality_docs1.png width=5in
In [2]: plt.show()
In [3]: plt.close()
"""
if not isinstance(df, pd.DataFrame):
raise ValueError('Unknown data format.')
if not isinstance(refdata, (pd.DataFrame, list)):
raise ValueError('Unknown reference data format.')
if len(set(values).difference(set(list(df.columns)))) > 0:
raise ValueError("Some of the requested values do not exist "
"in the data container.")
if len(set(names).difference(set(list(df.columns)))) > 0:
raise ValueError("Some of the requested identifiers do not exist "
"in the data container.")
if isinstance(refdata, list):
if len(refdata) != df.shape[0]:
raise ValueError('If multiple references are provided, '
'there should be the same as queries.')
for i, x in enumerate(refdata):
if not isinstance(x, pd.DataFrame):
raise ValueError('Unknown reference {} data format.'.format(i))
if len(set(values).difference(set(list(x.columns)))) > 0:
raise ValueError("Some of the requested values do not exist "
"in the {} reference container.".format(i))
else:
if len(set(values).difference(set(list(refdata.columns)))) > 0:
raise ValueError("Some of the requested values do not exist "
"in the {} reference container.".format(i))
refdata = [refdata, ] * len(df.shape[0])
if len(values) != len(ascending):
raise ValueError("Number of values and orders should match.")
ax = plt.subplot2grid((1, 1), (0, 0), fig=fig)
cmap = discrete_cmap_from_colors([(144.0 / 255, 238.0 / 255, 144.0 / 255),
(135.0 / 255, 206.0 / 255, 250.0 / 255),
(255.0 / 255, 165.0 / 255, 0.0 / 255),
(205.0 / 255, 92.0 / 255, 92.0 / 255)])
data = []
labs = []
identifiers = df[names[0]].map(str)
for i in range(1, len(names)):
identifiers += '_' + df[names[i]].map(str)
df = df.reset_index(drop=True)
for i, row in df.iterrows():
data.append([])
labs.append([])
for isc, sc in enumerate(values):
qt = refdata[i][sc].quantile([.25, .5, .75])
if row[sc] <= qt[.25]:
data[-1].append(.12 if ascending[isc] else .87)
labs[-1].append('Q1' if ascending[isc] else 'Q4')
elif row[sc] <= qt[.5]:
data[-1].append(.37 if ascending[isc] else .67)
labs[-1].append('Q2' if ascending[isc] else 'Q3')
elif row[sc] <= qt[.75]:
data[-1].append(.67 if ascending[isc] else .37)
labs[-1].append('Q3' if ascending[isc] else 'Q2')
else:
data[-1].append(.87 if ascending[isc] else .12)
labs[-1].append('Q4' if ascending[isc] else 'Q1')
df = pd.DataFrame(data, columns=values, index=identifiers)
sns.heatmap(df, square=True, cmap=cmap, cbar=False, annot=pd.DataFrame(labs), fmt='s', ax=ax)
plt.setp( ax.yaxis.get_majorticklabels(), rotation=0 )
return ax
|
33a762ed659df4767b5a05c5005ca4d1213e7d0a
| 3,644,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.