content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def rewrite_string(
searcher: AbstractSearcher,
source: Text,
path: Text,
max_iterations=1,
) -> Text:
"""Applies any replacements to the input source, and returns the result."""
return formatting.apply_substitutions(
source, find_iter(
searcher,
source,
path,
max_iterations=max_iterations,
)) | 2de75ca2511ff2496f29674ff762c7a48531c51b | 27,700 |
def build_path_result_tests(name):
"""
Build a test for API commands that respond with ``Err`` and
``Mountpoint`` fields.
:param unicode command_name: The command in the schema to validate.
:return: ``TestCase``.
"""
return build_schema_test(
name=str(name + "Tests"),
schema={"$ref": "/endpoints.json#/definitions/" + name},
schema_store=SCHEMAS,
failing_instances={
b'additionalProperties': [
# Extra field:
{"Err": "", "Mountpoint": "/x", "extra": "y"},
# Wrong fields:
{"Result": "hello"},
],
b'required': [
# Missing field:
{}, {"Mountpoint": "/x"},
],
b'type': [
# Wrong types:
[], "", None,
],
},
passing_instances=[
{"Err": "Something went wrong."},
{"Err": "", "Mountpoint": "/x/"},
]) | 32491f1eb10d9f175522b7ea55f91a2c1e6aeb27 | 27,701 |
def get_mpls_autobw_template_detail_rpc(self, api_timeout=''):
"""
This is an auto-generated method for the PySwitchLib.
**Supported Versions**:
* SLXOS: 17r.1.01a, 17r.2.00, 17s.1.02
:type api_timeout: long or tuple(long, long)
:param api_timeout: Timeout for connection and response in seconds. If a tuple is specified, then the first value is for the connection timeout and the second value is for the response timeout.
:rtype: (*bool, list*)
:returns: Returns a tuple.
#. **api_success** (*bool*) - The success or failure of the API.
#. **details** (*list*) - List of REST request/response dictionaries, keyed by the asset's ip address.
:raises ConnectionError: If requests module connection or response timeout occurs.
:raises UnsupportedOSError: If firmware version installed on asset is not supported.
:raises RestInterfaceError: If requests module does not get a successful response from the rest URI.
:raises ValueError: If the argument value does not meet type requirements or value restrictions.
"""
operation_type = 'rpc'
compositions_list = []
bindings_list = [('pybind.slxos.v17r_1_01a.brocade_mpls_rpc.get_mpls_autobw_template_detail', 'pybind.slxos.v17r_1_01a.brocade_mpls_rpc.brocade_mpls', 'pybind.slxos.v17r_1_01a.brocade_mpls_rpc'), ('pybind.slxos.v17r_2_00.brocade_mpls_rpc.get_mpls_autobw_template_detail', 'pybind.slxos.v17r_2_00.brocade_mpls_rpc.brocade_mpls', 'pybind.slxos.v17r_2_00.brocade_mpls_rpc'), ('pybind.slxos.v17s_1_02.brocade_mpls_rpc.get_mpls_autobw_template_detail', 'pybind.slxos.v17s_1_02.brocade_mpls_rpc.brocade_mpls', 'pybind.slxos.v17s_1_02.brocade_mpls_rpc')]
composed_child_list = []
compositions_keyval_list = []
bindings_keyval = {'kwargs_key_name': '', 'keyval': '', 'extra_keyval': ''}
composed_child_leafval_list = []
leafval_map = {}
rest_leaf_name = ''
choices_kwargs_map = {}
leaf_os_support_map = {}
self._api_validation(choices_kwargs_map=choices_kwargs_map, leaf_os_support_map=leaf_os_support_map)
pybind_object = self._get_pybind_object(operation_type=operation_type, compositions_list=compositions_list, bindings_list=bindings_list, composed_child_list=composed_child_list, compositions_keyval_list=compositions_keyval_list, bindings_keyval=bindings_keyval, composed_child_leafval_list=composed_child_leafval_list, leafval_map=leafval_map)
return self._rpc_worker(operation_type=operation_type, pybind_object=pybind_object, resource_depth=1, timeout=api_timeout) | 97e0d7ef9a43ef2335fefc6c62ad34286dbf7efb | 27,702 |
def empty(a):
"""Test whether the slice is empty."""
return a is None or volume(a)==0 | c1de4cfbf3bcc569b4b3cde02d518aabf32ff6fd | 27,703 |
def handle_server_error(exception):
"""
handles server internal core exceptions.
note that normally you should never call this method manually.
in any environment which debug mode is False, the original error
message will be replaced by a generic error message before being
sent to client for security reasons.
:param CoreException exception: core exception instance.
:returns: tuple[dict | object, int, CoreHeaders]
:rtype: tuple
"""
return get_component(APIPackage.COMPONENT_NAME).handle_server_error(exception) | 8fd84df5b2bc2a7f5f76e2ae68e70f85fc069153 | 27,704 |
def nnPredict(w1, w2, data):
"""% nnPredict predicts the label of data given the parameter w1, w2 of Neural
% Network.
% Input:
% w1: matrix of weights of connections from input layer to hidden layers.
% w1(i, j) represents the weight of connection from unit i in input
% layer to unit j in hidden layer.
% w2: matrix of weights of connections from hidden layer to output layers.
% w2(i, j) represents the weight of connection from unit i in input
% layer to unit j in hidden layer.
% data: matrix of data. Each row of this matrix represents the feature
% vector of a particular image
% Output:
% label: a column vector of predicted labels"""
labels = np.array([])
# Your code here
return labels | 564e3db0c659713de9dbfda7c279381d50d490e3 | 27,705 |
from typing import List
def count_pairs(array: List[int], difference: int) -> int:
"""
Given an array of integers, count the number of unique pairs of integers that have a given difference.
These pairs are stored in a set in order to remove duplicates.
Time complexity: O(n^2).
:param array: is the array to count.
:param difference: is the difference between two elements.
:return: the number of unique pairs of integers that have a given difference.
"""
pairs = set()
for i in range(len(array)):
for j in range(len(array)):
if array[i] - array[j] == difference:
pairs.add((array[i], array[j]))
return len(pairs) | e027e8885f4c4531da9b7dab7de8e84a7004c913 | 27,706 |
def decompress_deltas_18bit(buffer):
"""Parse packet deltas from 18-byte compression format."""
if bad_data_size(buffer, 18, "18-byte compressed packet"):
raise ValueError("Bad input size for byte conversion.")
deltas = np.zeros((2, 4))
# Sample 1 - Channel 1
minibuf = [(buffer[0] >> 6),
((buffer[0] & 0x3F) << 2 & 0xFF) | (buffer[1] >> 6),
((buffer[1] & 0x3F) << 2 & 0xFF) | (buffer[2] >> 6)]
deltas[0][0] = int32_from_18bit(minibuf)
# Sample 1 - Channel 2
minibuf = [(buffer[2] & 0x3F) >> 4,
(buffer[2] << 4 & 0xFF) | (buffer[3] >> 4),
(buffer[3] << 4 & 0xFF) | (buffer[4] >> 4)]
deltas[0][1] = int32_from_18bit(minibuf)
# Sample 1 - Channel 3
minibuf = [(buffer[4] & 0x0F) >> 2,
(buffer[4] << 6 & 0xFF) | (buffer[5] >> 2),
(buffer[5] << 6 & 0xFF) | (buffer[6] >> 2)]
deltas[0][2] = int32_from_18bit(minibuf)
# Sample 1 - Channel 4
minibuf = [(buffer[6] & 0x03), buffer[7], buffer[8]]
deltas[0][3] = int32_from_18bit(minibuf)
# Sample 2 - Channel 1
minibuf = [(buffer[9] >> 6),
((buffer[9] & 0x3F) << 2 & 0xFF) | (buffer[10] >> 6),
((buffer[10] & 0x3F) << 2 & 0xFF) | (buffer[11] >> 6)]
deltas[1][0] = int32_from_18bit(minibuf)
# Sample 2 - Channel 2
minibuf = [(buffer[11] & 0x3F) >> 4,
(buffer[11] << 4 & 0xFF) | (buffer[12] >> 4),
(buffer[12] << 4 & 0xFF) | (buffer[13] >> 4)]
deltas[1][1] = int32_from_18bit(minibuf)
# Sample 2 - Channel 3
minibuf = [(buffer[13] & 0x0F) >> 2,
(buffer[13] << 6 & 0xFF) | (buffer[14] >> 2),
(buffer[14] << 6 & 0xFF) | (buffer[15] >> 2)]
deltas[1][2] = int32_from_18bit(minibuf)
# Sample 2 - Channel 4
minibuf = [(buffer[15] & 0x03), buffer[16], buffer[17]]
deltas[1][3] = int32_from_18bit(minibuf)
return deltas | 65fc1b7cc12c0d9f0a082c7df57bbae81fb1ec13 | 27,707 |
import re
def password_validate(password):
"""It validates password using regular expression
:param password: contains the users password
:returns: Boolean
:raises: ValidationError
"""
if re.match(
r"^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)(?=.*[@$!%*?&])[A-Za-z\d@$!%*?&]{8,}$",
password,
):
return True
raise serializers.ValidationError(
{
"message": "Password must be at least 8 characters long, at least one"
+ " capitalized character, alphanumeric and contain special characters."
}
) | 57cec9fd1ae1f4f2b4df887d98e65ea9f6191595 | 27,708 |
def get_emr_cluster_status(cluster_id: str, detail: bool = False):
"""
Provides cluster-level details including status, cluster_id, cluster_name and so on.
Args:
cluster_id: string, EMR cluster id
detail: bool, provided additional detail about cluster like ec2 attributes
Returns:
cluster_status(): dict, cluster level details like id, name, state
"""
cluster_status = {}
try:
response = emr.describe_cluster(ClusterId=cluster_id)
except Exception as error:
logger.error(error)
raise botocore.exceptions.ClientError(error, 'describe_cluster')
else:
cluster_status = {
"cluster_id": response.get('Cluster').get('Id'),
"cluster_name": response.get('Cluster').get('Name'),
"status": response.get('Cluster').get('Status').get('State'),
"protection": response.get('Cluster').get('TerminationProtected'),
"message": response.get('Cluster').get('Status').get('StateChangeReason').get('Message')
}
if detail:
cluster_status['ec2_attributes'] = {
'subnet_id': response.get('Cluster').get('Ec2InstanceAttributes').get('Ec2SubnetId'),
'availability_zone': response.get('Cluster').get('Ec2InstanceAttributes').get('Ec2AvailabilityZone'),
'master_sg': response.get('Cluster').get('Ec2InstanceAttributes').get('EmrManagedMasterSecurityGroup'),
'service_sg': response.get('Cluster').get('Ec2InstanceAttributes').get('ServiceAccessSecurityGroup')
}
return cluster_status | 48da66c5aa45f0697b71924a31ea6d942bb5ce19 | 27,709 |
def get_keypair() -> KeyPairInfo:
"""Returns current keypair (ec2.KeyPairInfo)
https://boto3.readthedocs.io/en/latest/reference/services/ec2.html#keypairinfo
"""
return get_keypair_dict()[get_keypair_name()] | 95cba4f8a81858578e831b0bdbae359791399841 | 27,710 |
def char_vectorizer(char,
custom_alphabet=False):
"""
Vectorize given nucleotide character. Convert to uppercase before
vectorizing.
>>> char_vectorizer("C")
[0, 1, 0, 0]
>>> char_vectorizer("g")
[0, 0, 1, 0]
>>> char_vectorizer("M", ['E', 'H', 'I', 'M', 'S'])
[0, 0, 0, 1, 0]
"""
alphabet = ['A','C','G','U']
if custom_alphabet:
alphabet = custom_alphabet
char = char.upper()
l = len(char)
vector = []
assert l == 1, "given char length != 1 (given char: \"%s\")" % (l)
for c in alphabet:
if c == char:
vector.append(1)
else:
vector.append(0)
return vector | 53c306d814807299b7bac7801f4b7d2a663b6f99 | 27,711 |
def orbit_from_name(name):
"""Return :py:class:`~poliastro.twobody.orbit.Orbit` given a name.
Retrieve info from JPL DASTCOM5 database.
Parameters
----------
name : str
NEO name.
Returns
-------
orbit : list (~poliastro.twobody.orbit.Orbit)
NEO orbits.
"""
records = record_from_name(name)
orbits = []
for record in records:
orbits.append(orbit_from_record(record))
return orbits | 145c1ef503743e9066ecf8e2f5e06c0526f8ab62 | 27,712 |
def make_snowflake(timestamp_ms, datacenter_id, worker_id, sequence_id, twepoch=twepoch):
"""generate a twitter-snowflake id, based on
https://github.com/twitter/snowflake/blob/master/src/main/scala/com/twitter/service/snowflake/IdWorker.scala
:param: timestamp_ms time since UNIX epoch in milliseconds"""
timestamp_ms = int(timestamp_ms)
sid = ((timestamp_ms - twepoch) % max_timestamp) << datacenter_id_bits << worker_id_bits << sequence_id_bits
sid += (datacenter_id % max_datacenter_id) << worker_id_bits << sequence_id_bits
sid += (worker_id % max_worker_id) << sequence_id_bits
sid += sequence_id % max_sequence_id
return sid | 473d97589dbe90949cb69ecdbeba719cbea5c2ee | 27,713 |
from typing import Sequence
def compute_committee(indices: Sequence[ValidatorIndex],
seed: Bytes32,
index: uint64,
count: uint64) -> Sequence[ValidatorIndex]:
"""
Return the committee corresponding to ``indices``, ``seed``, ``index``, and committee ``count``.
"""
start = (len(indices) * index) // count
end = (len(indices) * (index + 1)) // count
return [indices[compute_shuffled_index(i, len(indices), seed)] for i in range(start, end)] | 168fc69ef7d4962c089cd6e497cd94ccad062c73 | 27,714 |
def get_default_graph():
""" Setting graphviz graph global options """
graph = Digraph('AWS', engine='dot')
graph.body.append('splines=line')
graph.body.append('rankdir=LR')
graph.body.append('outputorder=edgesfirst')
graph.node_attr.update(shape='rectangle', style='filled', color='black')
return graph | 25ae7376b31d280722fb2791dbf9478a6b4cb2d1 | 27,715 |
from json import dumps
def GetJson(data):
"""
将对象转换为JSON
@data 被转换的对象(dict/list/str/int...)
"""
if data == bytes: data = data.decode('utf-8')
return dumps(data) | 372b501f5ada7254efab10447dcbdc91c8799408 | 27,716 |
def binary_tail(n: int) -> int:
""" The last 1 digit and the following 0s of a binary representation, as a number """
return ((n ^ (n - 1)) + 1) >> 1 | 63460cef7b39b7e7ee2ec880810ff71d82be01e9 | 27,717 |
from datetime import datetime
def time_left(expire_date):
"""Return remaining days before feature expiration or 0 if expired."""
today_dt = datetime.today()
expire_dt = datetime.strptime(expire_date, "%d-%b-%Y")
# Calculate remaining days before expiration
days_left_td = expire_dt - today_dt
days_left = days_left_td.days
if days_left <= 0:
days_left = 0
return days_left | 652acd27b0d4fa9b21321df4ff8ce6ce15b97ed6 | 27,718 |
import yaml
def load_config():
"""
importer_config.yaml must be in the current working directory
"""
with open(_PATH) as fd:
config = yaml.safe_load(fd)
# Remove trailing slashes from URLs
config['kbase_endpoint'] = config['kbase_endpoint'].strip('/')
return config | 0137c59cd2ea7b5b8ba57a82fb2997e6e7330751 | 27,719 |
def get_package_version():
"""Return the Review Board version as a Python package version string.
Returns:
unicode:
The Review Board package version.
"""
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2] or VERSION[3]:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3]:
version = '%s.%s' % (version, VERSION[3])
tag = VERSION[4]
if tag != 'final':
if tag == 'alpha':
tag = 'a'
elif tag == 'beta':
tag = 'b'
version = '%s%s%s' % (version, tag, VERSION[5])
return version | 418eef843dd3647cffdeb879781e3599487cd885 | 27,720 |
def create_state_representation(
name: str, state_space: StateSpace
) -> StateRepresentation:
"""Factory function for state representations
Returns:
Representation:
"""
# TODO: test
if name == 'default':
return DefaultStateRepresentation(state_space)
if name == 'no_overlap':
return NoOverlapStateRepresentation(state_space)
if name == 'compact':
raise NotImplementedError
raise ValueError(f'invalid name {name}') | c9396d5ca28f7718502b7f4e9c17d73144d9acc2 | 27,721 |
def fit(x, y, sigy=None, error=False):
""" Perform a linear fit on a range of x and y values.
This function fits a set of data points x, y with individual standard
deviations sigy for the y values to a straight line y = a + bx by
minimizing chi-square.
If the 'error' parameter is False the function returns only the found
values for 'a' and 'b' as a tuple. If 'error' is True then a third tuple
item is returned containing a dictionary of error statistics ('siga' the
standard deviation of 'a', 'sigb' the standard deviation of 'b', 'chi2'
the value of chi-square, and 'q' the goodness of fit probability).
If 'sigy' is 'None' then no standard deviations are used, 'q' will be 1.0
and the normalization of chi-square is to unit standard deviation for all
points.
Example:
>>> a, b, err = fit([1.1, 1.95, 3.05], [1, 2.01, 2.95], error=True)
"""
# This routine is based on an algorithm from Numerical Recipes
x = np.asarray(x).astype(float)
y = np.asarray(y).astype(float)
if x.size != y.size:
ValueError("Arrays 'x' and 'y' have different length")
if x.size < 2:
raise ValueError("Arrays 'x' and 'y' should have at least 2 elements")
useWeights = False
if sigy is not None:
sigy = np.asarray(sigy)
if sigy.size != y.size:
raise ValueError("Arrays 'sigy' and 'y' have different length")
useWeights = True
# We need to minimize:
#
# chi2 = sum( ((y[i] - a - b * x[i]) / sigy[i])^2 ; i=1..N)
#
# When taking derivatives with respect to a and b we get
#
# dchi2/da = 0 = -2 * sum( (y[i] - a - b * x[i]) / sig[i]^2 ; i=1..N)
# dchi2/db = 0 = -2 * sum( x[i] * (y[i] - a - b * x[i]) / sig[i]^2 ; i=1..N)
#
# which provides us with a linear equation that we can use to solve a and b
if useWeights:
weights = 1 / (np.square(sigy.flat))
S = np.sum(weights)
Sx = np.dot(x.flat, weights)
Sy = np.dot(y.flat, weights)
t = x.flat - Sx / S
tw = t * weights
Stt = np.dot(tw, t)
b = np.dot(tw, y.flat) / Stt
else:
S = x.size
Sx = np.sum(x.flat)
Sy = np.sum(y.flat)
t = x.flat - Sx / S
Stt = np.dot(t, t)
b = np.dot(t, y.flat) / Stt
a = (Sy - Sx * b) / S
if not error:
return (a, b)
siga = np.sqrt((1 + Sx * Sx / (S * Stt)) / S)
sigb = np.sqrt(1 / Stt)
chi2 = 0.0
q = 1.0
if useWeights:
chi = (y - a - b * x.flat) / sigy.flat
chi2 = np.dot(chi, chi)
if x.size > 2:
q = gammaq(0.5 * (x.size - 2), 0.5 * chi2)
else:
chi = y.flat - a - b * x.flat
chi2 = np.dot(chi, chi)
if x.size > 2:
sigdat = np.sqrt(chi2 / (x.size - 2))
siga *= sigdat
sigb *= sigdat
return (a, b, {'siga': siga, 'sigb': sigb, 'chi2': chi2, 'q': float(q)}) | f02db36a6e7395d3f955cafc6f506e3a42984abf | 27,722 |
def eval_metrics_offline(cfg,
pred_folder,
palette,
metrics=['mFscoreCD', 'mFscore']
):
"""Calculate evaluation metrics from offline GT and prediction images, maily use the evaluate of dataset class
Args:
cfg (Config): config dict, must have the necessary items to build a
dataset
palette (list | ndarray): palette of the label
metrics ([str]): metrics to be calculated. Default:
['mFscoreCD', 'mFscore']
Returns:
returns of the evaluate method of the dataset class
"""
assert osp.isdir(pred_folder)
dataset = build_dataset(cfg)
preds = []
for img_info in dataset.img_infos:
pred_path = osp.join(pred_folder, img_info['ann']['seg_map'])
pred = read_s2looking_label(pred_path, palette)
preds.append(pred)
return dataset.evaluate(preds, metric=metrics) | 7d5e44a1cda8d7bbb92a56cedf18e940c51663e5 | 27,723 |
def zero_pad_and_crop(img, amount=4):
"""Zero pad by `amount` zero pixels on each side then take a random crop.
Args:
img: numpy image that will be zero padded and cropped.
amount: amount of zeros to pad `img` with horizontally and verically.
Returns:
The cropped zero padded img. The returned numpy array will be of the same
shape as `img`.
"""
padded_img = np.zeros((img.shape[0] + amount * 2, img.shape[1] + amount * 2,
img.shape[2]))
padded_img[amount:img.shape[0] + amount, amount:
img.shape[1] + amount, :] = img
top = np.random.randint(low=0, high=2 * amount)
left = np.random.randint(low=0, high=2 * amount)
new_img = padded_img[top:top + img.shape[0], left:left + img.shape[1], :]
return new_img | 63ac80cb9759fd06032afd97f03e0057aed86784 | 27,724 |
def video_id(video_id_or_url):
"""
Returns video id from given video id or url
Parameters:
-----------
video_id_or_url: str - either a video id or url
Returns:
--------
the video id
"""
if 'watch?v=' in video_id_or_url:
return video_id_or_url.split('watch?v=')[1]
else:
# assume we already have an video id
return video_id_or_url | 9f680ac621e1f5c6314a6a3e97093d786fa7ea33 | 27,725 |
def check_permission(resource: Resource, permission_name: str) -> dict:
"""
Check if requester has sufficient permissions to do something on specific resource.
Raises if not.
"""
base_permission_policy = resource.get_guest_authorization()
if (authorization_header := resource.request.headers.get("Authorization")) is None:
if not base_permission_policy:
raise Unauthorized("Authorization header missing or empty")
authorizer = Authorizer(
auth_jwt=authorization_header,
resource_name=resource.get_name(),
permission_name=permission_name,
base_permission_policy=base_permission_policy,
)
authorizer.check_access()
return authorizer.restrictions | b632fbb5f3429c1540da35c3b82e6fa562b81c87 | 27,726 |
from datetime import datetime
import json
def makePalRecordsConsistent(pal_records, low_frequency, high_frequency,
user_id, fcc_channel_id="1",
start_date=None, end_date=None):
"""Make Pal object consistent with the inputs
Args:
pal_records: (list) A list of PAL Records in the form of dictionary.
low_frequency: (number) The Primary Low Frequency in Hz for PAL.
high_frequency: (number) The Primary High Frequency in Hz for PAL.
user_id: (string) The userId to put in PAL Records.
fcc_channel_id: (string) The FCC-supplied frequency channel identifier.
start_date: (string) PAL license start date, generally set as one year
before the current date
end_date: (string) PAL license expiration date, generally set as more than
one year after the current date
Returns:
A list containing individual PAL records in the form of dictionary
Note: The PAL Dictionary must contain censusYear(number) and
fipsCode(number)
"""
start_date = datetime.now().replace(year=datetime.now().year - 1) \
if start_date is None else start_date
end_date = datetime.now().replace(year=datetime.now().year + 1) \
if end_date is None else end_date
for index, pal_rec in enumerate(pal_records):
pal_fips_code = pal_rec['fipsCode']
pal_census_year = pal_rec['censusYear']
del pal_rec['fipsCode'], pal_rec['censusYear']
pal_rec = defaultdict(lambda: defaultdict(dict), pal_rec)
# Change the FIPS Code and Registration Date-Year in Pal Id
pal_rec['palId'] = '/'.join(['pal', '%s-%d' %
('{:02d}'.format(start_date.month),
start_date.year),
str(pal_fips_code), fcc_channel_id])
pal_rec['userId'] = user_id
# Make the date consistent in Pal Record for Registration and License
pal_rec['registrationInformation']['registrationDate'] = \
start_date.strftime('%Y-%m-%dT%H:%M:%SZ')
# Change License Information in Pal
pal_rec['license']['licenseAreaIdentifier'] = str(pal_fips_code)
pal_rec['license']['licenseAreaExtent'] = \
'zone/census_tract/census/{}/{}'.format(pal_census_year, pal_fips_code)
pal_rec['license']['licenseDate'] = start_date.strftime('%Y-%m-%dT%H:%M:%SZ')
pal_rec['license']['licenseExpiration'] = end_date.strftime('%Y-%m-%dT%H:%M:%SZ')
pal_rec['license']['licenseFrequencyChannelId'] = fcc_channel_id
# Change Frequency Information in Pal
pal_rec['channelAssignment']['primaryAssignment']['lowFrequency'] = low_frequency
pal_rec['channelAssignment']['primaryAssignment']['highFrequency'] = high_frequency
# Converting from defaultdict to dict
pal_records[index] = json.loads(json.dumps(pal_rec))
return pal_records | 3029ee761afb17428a6a2b0b1e85c0f2f3fdc6d6 | 27,727 |
import yaml
def read_config():
"""Read from rotest.yml config elrados segment."""
config_path = search_config_file()
if config_path is not None:
with open(config_path, "r") as config_file:
configuration_content = config_file.read()
yaml_configuration = yaml.load(configuration_content)
else:
yaml_configuration = {}
return AttrDict(yaml_configuration.get("elrados", {})) | 5fb81ef5609074c029203021895ba55654760a60 | 27,728 |
import typing
def cast_to_str(some_value: typing.Any, from_type: typing.Any) -> typing.Any:
"""Just helper for creating suitable test assets."""
if from_type == bytes:
return some_value.decode()
return str(some_value) | 9157873a74d0d02b919d047710c1d4ccee4121a6 | 27,729 |
def downsample_by_group(df,min_distance=1.,message_callback=print):
"""Group and down-sample a DataFrame of xrsd records.
Parameters
----------
df : pandas.DataFrame
dataframe containing xrsd samples
min_distance : float
the minimum allowed nearest-neighbor distance
for continuing to downsample after 10 or more samples
have been selected
Returns
-------
data_sample : pandas.DataFrame
DataFrame containing all of the down-sampled data from
each group in the input dataframe.
Features in this DataFrame are not scaled:
the correct scaler should be applied before training models.
"""
data_sample = pd.DataFrame(columns=df.columns)
group_cols = ['experiment_id','system_class']
all_groups = df.groupby(group_cols)
# downsample each group independently
for group_labels,grp in all_groups.groups.items():
group_df = df.iloc[grp].copy()
if message_callback:
message_callback('Downsampling data for group: {}'.format(group_labels))
#lbl_df = _filter_by_labels(data,lbls)
dsamp = downsample(df.iloc[grp].copy(), min_distance)
if message_callback:
message_callback('Finished downsampling: kept {}/{}'.format(len(dsamp),len(group_df)))
data_sample = data_sample.append(dsamp)
return data_sample | bbcdbab6c1bf5e42554e102bda8d245263a47bfd | 27,730 |
from typing import Union
def get_pure_ratings(
ratings: Union[str, pd.Series, pd.DataFrame]
) -> Union[str, pd.Series, pd.DataFrame]:
"""Removes rating watches/outlooks.
Parameters
----------
ratings : str, pd.Series, or pd.DataFrame
Rating may contain watch, such as `AA- *+`, `BBB+ (CwNegative)`.
Outlook/watch should be seperated by a blank from the actual rating.
Returns
-------
Union[str, pd.Series, pd.DataFrame]
String, Series, or DataFrame with regular ratings stripped off of watches.
The name of the resulting Series or the columns of the returning DataFrame will
be suffixed with `_clean`.
Examples
--------
Cleaning a single rating:
>>> get_pure_ratings("AA- *+")
'AA-'
>>> get_pure_ratings("Au")
'A'
Cleaning a `pd.Series`:
>>> import pandas as pd
>>> rating_series=pd.Series(
... data=[
... "BB+ *-",
... "BBB *+",
... np.nan,
... "AA- (Developing)",
... np.nan,
... "CCC+ (CwPositive)",
... "BB+u",
... ],
... name="rtg_SP",
... )
>>> get_pure_ratings(rating_series)
0 BB+
1 BBB
2 NaN
3 AA-
4 NaN
5 CCC+
6 BB+
Name: rtg_SP_clean, dtype: object
Cleaning a `pd.DataFrame`:
>>> rtg_df = pd.DataFrame(
... data={
... "rtg_SP": [
... "BB+ *-",
... "BBB *+",
... np.nan,
... "AA- (Developing)",
... np.nan,
... "CCC+ (CwPositive)",
... "BB+u",
... ],
... "rtg_Fitch": [
... "BB+ *-",
... "BBB *+",
... pd.NA,
... "AA- (Developing)",
... np.nan,
... "CCC+ (CwPositive)",
... "BB+u",
... ],
... },
... )
>>> get_pure_ratings(rtg_df)
rtg_SP_clean rtg_Fitch_clean
0 BB+ BB+
1 BBB BBB
2 NaN <NA>
3 AA- AA-
4 NaN NaN
5 CCC+ CCC+
6 BB+ BB+
"""
if isinstance(ratings, str):
ratings = ratings.split()[0]
ratings = ratings.rstrip("uU")
return ratings
elif isinstance(ratings, pd.Series):
# identify string occurrences
isstring = ratings.apply(type).eq(str)
# strip string after occurrence of very first blank and strip character 'u',
# which has usually been added without a blank
ratings[isstring] = ratings[isstring].str.split().str[0]
ratings[isstring] = ratings[isstring].str.rstrip("uU")
ratings.name = f"{ratings.name}_clean"
return ratings
elif isinstance(ratings, pd.DataFrame):
# Recursive call of `get_pure_ratings`
return pd.concat(
[get_pure_ratings(ratings=ratings[col]) for col in ratings.columns], axis=1
) | a834b3f33f12d8d3b6c109021a4d075c32160544 | 27,731 |
def export_grid(outname,resh,ignore=[255,0]):
"""[Exports given grid to a point cloud]
Args:
outname ([str]): [Output file name]
resh ([np.array]): [3D Volume in grid representation]
ignore (list, optional): [values to ignore]. Defaults to [255,0].
Returns:
Grid as a point cloud and its colors
"""
pts = []
colors = []
grid_shape = resh.shape
# print(grid_shape)
colorMap = np.array([[255, 0, 0],
[0, 255, 0],
[0, 0, 255],
[80, 128, 255],
[255, 230, 180],
[255, 0, 255],
[0, 255, 255],
[100, 0, 0],
[0, 100, 0],
[255, 255, 0],
[50, 150, 0],
[200, 255, 255],
[255, 200, 255],
[128, 128, 80],
[0, 50, 128],
[0, 100, 100],
[0, 255, 128],
[0, 128, 255],
[255, 0, 128],
[128, 0, 255],
[255, 128, 0],
[128, 255, 0],
])
# resh = np.load("results_overfit_scannet_corrected/0000.npy").reshape(grid_shape)
for x in range(grid_shape[0]):
for y in range(grid_shape[1]):
for z in range(grid_shape[2]):
if resh[x,y,z] not in ignore:
colors.append(colorMap[resh[x,y,z]])
pts.append([x,y,z])
a = trimesh.points.PointCloud(pts,colors).export(outname)
return np.array(pts),np.array(colors) | 27fd06c37d57f0eb320d0d81908efc1acf6bc8ee | 27,732 |
def sum_errors(dic):
"""Helper function to sum up number of failed jobs per host.
Assumes that dic is in the form
:param dict dic: {"error_code1":count1, "error_code2":count2, etc.}
:return int: Sum of all values in dic
"""
return sum(value for key, value in dic.iteritems()) | 0d2bc9df58e5bf9639a331d64061de4c0a5aa4ed | 27,733 |
def checkFriends(new_user, usernameOfFriend):
""" Check if users are friends or not"""
# Check if users are friends first
viewF = viewFriends(new_user, usernameOfFriend)
# Check if users have any pending requests
viewP = searchPendingRequests(new_user, usernameOfFriend)
# Logic
if viewF:
return True
elif viewP:
# The friendship has not been approved yet
return False
else:
return False | 6268d180a768ad858d5e1c5c69c92837a027f07f | 27,734 |
def update(instance, **data):
"""Update instance with data directly by using ``update()``
skipping calling ``save()`` method.
Usage: ``instance = update(instance, some_field=some_value)``
"""
instance.__class__.objects.filter(pk=instance.pk).update(**data)
return refresh(instance) | 8462d5459ba02d11ef3edd0c1d3c152d4b682634 | 27,735 |
def cast_distance_matrix_to_optimal_integer_type(D_X):
"""
Cast distance matrix to smallest signed integer type, sufficient
to hold all its distances.
Parameters
-----------
D_X: np.array (|X|×|X|)
Distance matrix of a compact metric space X with integer
distances.
Returns
--------
D: np.array (|X|×|X|)
Distance matrix of the metric space, cast to optimal type.
"""
max_distance = np.max(D_X)
# Type is signed integer to allow subtractions.
optimal_int_type = determine_optimal_int_type(max_distance)
D_X = D_X.astype(optimal_int_type)
return D_X | 01c8e43259d5edc7282a9c01269277f34495ff9d | 27,736 |
def getObject(name):
"""Get reference to single Rhino object, checking for failure"""
rc, obRef = Rhino.Input.RhinoGet.GetOneObject("Select " + name, True, Rhino.DocObjects.ObjectType.AnyObject)
if rc != Rhino.Commands.Result.Success or not obRef : raise NameError(rc)
return obRef | 20d562dd90dec82a479bdedabef62f17129cecf0 | 27,737 |
def get_falsecolor(input):
"""
picks false color bands from the 12 Sentinel bands (for visual interpretation of vegetation)
:param input: 12-band image tensor
:return: 3-band NIR-RED-GREEN tensor
"""
rgb_band_idxs = [bands.index(b) for b in ["S2B8", "S2B4", "S2B3"]]
return input[rgb_band_idxs] | a4e80bb61211456794b34c99ec0ad92f1b1f567d | 27,738 |
import sys
def CUnescape(text):
"""Unescape a text string with C-style escape sequences to UTF-8 bytes."""
def ReplaceHex(m):
# Only replace the match if the number of leading back slashes is odd. i.e.
# the slash itself is not escaped.
if len(m.group(1)) & 1:
return m.group(1) + 'x0' + m.group(2)
return m.group(0)
# This is required because the 'string_escape' encoding doesn't
# allow single-digit hex escapes (like '\xf').
result = _CUNESCAPE_HEX.sub(ReplaceHex, text)
if sys.version_info[0] < 3: ##PY25
##!PY25 if str is bytes: # PY2
return result.decode('string_escape')
result = ''.join(_cescape_highbit_to_str[ord(c)] for c in result)
return (result.encode('ascii') # Make it bytes to allow decode.
.decode('unicode_escape')
# Make it bytes again to return the proper type.
.encode('raw_unicode_escape')) | 222e83ee363d2c072356bfc8375fd1bf4bc746bd | 27,739 |
import ast
def apply_bin_op(left, right, op):
"""
Finds binary expression class suitable for combination of left and right
expressions depending on whether their output is scalar or vector and
creates instance of this expression with specified operation.
"""
exr_class = BIN_EXPR_CLASSES.get(
(left.output_size > 1, right.output_size > 1))
if exr_class is None:
# change the positions of left and right
left, right = right, left
exr_class = ast.BinVectorNumExpr
return exr_class(left, right, op) | fb5e6c0434dfa209ab34c4253e71bba6cb74e901 | 27,740 |
import sys
def create_module(dotted_name=None, filename=None, *, update_parent=True):
"""Create a new blank module at run time, insert it into `sys.modules`, and return it.
This is a utility function that closely emulates what Python's standard
importer does. It fills in some attributes of the module, and inserts the
new module into `sys.modules`. Used by `run` when no module is given.
However, this does not care whether a module by the given dotted name is already
in `sys.modules`; if so, its entry will be overwritten.
`dotted_name`: Fully qualified name of the module, for `sys.modules`. Optional.
Used as the `__name__` attribute of the module. If not provided,
a unique placeholder name will be auto-generated.
If `dotted_name` has at least one dot in it, the parent package
for the new module must already exist in `sys.modules`. The new
module's `__package__` attribute is set to the dotted name of
the parent.
If `dotted_name` has no dots in it, the new module is a top-level
module; its `__package__` attribute is set to `None`.
`filename`: Full path to the `.py` file the module represents, if applicable.
Otherwise some descriptive string is recommended. Optional.
Used as the `__file__` attribute of the module. If not provided,
a description will be auto-generated (based on `dotted_name` if
that was provided).
`update_parent`: bool, whether to honor Python's package semantics.
This parameter is used only when `dotted_name` has at least
one dot in it.
If `update_parent=True`, the new module is added to its
parent's namespace, like Python's importer would do.
Almost always, this is the right thing to do to achieve
least astonishment.
If `update_parent=False`, the parent module is not touched.
This is occasionally useful, to avoid causing any changes to
program state outside the new module object, while allowing
the execution of code that uses relative imports in the context
of the new module.
An example of when it is the right thing to **not** honor
package semantics can be found in the multi-phase compiler.
It must avoid updating parent modules when compiling a temporary
higher-phase module, so that any existing references in other
modules (to an old but complete version of the module being
compiled) will not be clobbered with ones pointing to the
temporary module (that is incomplete, because the module
being compiled hasn't reached phase 0 yet).
"""
if dotted_name:
if not isinstance(dotted_name, str):
raise TypeError(f"`dotted_name` must be an `str`, got {type(dotted_name)} with value {repr(dotted_name)}")
path = dotted_name.split(".")
if not all(component.isidentifier() for component in path):
raise TypeError(f"each component of `dotted_name` must be a valid identifier`, got {repr(dotted_name)}")
if filename and not isinstance(filename, str):
raise TypeError(f"`filename` must be an `str`, got {type(filename)} with value {repr(filename)}")
uuid = gensym("")
if not filename:
if dotted_name:
filename = f"<dynamically created module '{dotted_name}'>"
else:
filename = f"<dynamically created module {uuid}>"
dotted_name = dotted_name or f"dynamically_created_module_{uuid}"
# Look at the definition of `types.ModuleType` for available attributes.
#
# We always populate `__name__` and `__file__`, and when applicable, `__package__`.
#
# `__loader__` and `__spec__` are left to the default value `None`, because
# those don't make sense for a dynamically created module.
#
# `__doc__` can be filled later (by `run`, if that is used); we don't have the AST yet.
#
# `__dict__` is left at the default value, the empty dictionary. It is filled later,
# when some code is executed in this module.
#
module = ModuleType(dotted_name)
module.__name__ = dotted_name
module.__file__ = filename
# Manage the package abstraction, like the importer does - with the difference that we
# shouldn't import parent packages here. To keep things simple, we only allow creating
# a module with dots in the name if its parent package already exists in `sys.modules`.
if dotted_name.find(".") != -1:
packagename, finalcomponent = dotted_name.rsplit(".", maxsplit=1)
package = sys.modules.get(packagename, None)
if not package:
raise ModuleNotFoundError(f"while dynamically creating module '{dotted_name}': its parent package '{packagename}' not found in `sys.modules`")
module.__package__ = packagename
if update_parent:
# The standard importer adds submodules to the package namespace, so we should too.
# http://python-notes.curiousefficiency.org/en/latest/python_concepts/import_traps.html
setattr(package, finalcomponent, module)
sys.modules[dotted_name] = module
return module | bf20dd906c757c7d7d16dc7cacd7744a96b77feb | 27,741 |
def pre_filter(image):
"""Apply morphological filter"""
return cv2.morphologyEx(image, cv2.MORPH_OPEN, np.ones((3, 3))) | a1af13d831d8462bc9c23443d2416efe66d3082b | 27,742 |
def build_corpus(docs: DocumentSet, *, remove_words=None, min_word_length=3,
min_docs=5, max_docs_ratio=0.75, max_tokens=5000,
replace_words=None, custom_bigrams=None, ngram_threshold=None
) -> Corpus:
""" Build a `Corpus` object.
This function takes the words from the title/abstract of the given
documents, preprocesses the tokens, and returns a corpus consisting of a
word frequency vector for each document. This preprocessing stage is
highly customizable, thus it is advised to experiment with the many
parameters.
:param remove_words: list of words that should be ignored while building
the word frequency vectors.
:param min_word_length: Words shorter than this are ignored.
:param min_docs: Words that occur in fewer than this many documents are
ignored.
:param max_docs_ratio: Words that occur in more than this document are
ignored. Should be ratio between 0 and 1.
:param max_tokens: Only the top most common tokens are preserved.
:param replace_words: Replace words by other words. Must be a `dict`
containing *original word* to *replacement word*
pairs.
:param custom_bigrams: Add custom bigrams. Must be a `dict` where keys
are `(first, second)` tuples and values are
replacements. For example, the key can be
`("Big", "Data")` and the value `"BigData"`.
:param ngram_threshold: Threshold used for n-gram detection. Is passed
to `gensim.models.phrases.Phrases` to detect
common n-grams.
:returns: a `Corpus object`.
"""
filters = []
if custom_bigrams:
filters.append(lambda w: preprocess_merge_bigrams(w, custom_bigrams))
if remove_words:
filters.append(lambda w: preprocess_remove_words(w, remove_words))
if replace_words:
filters.append(lambda w: preprocess_replace_words(w, replace_words))
if min_word_length:
filters.append(lambda w: preprocess_remove_short(w,
min_length=min_word_length))
filters.append(preprocess_stopwords)
if ngram_threshold is not None:
filters.append(lambda w: preprocess_merge_ngrams(w, ngram_threshold))
filters.append(preprocess_smart_stemming)
if min_docs > 1 or max_docs_ratio < 1.0:
max_docs = int(len(docs) * max_docs_ratio)
filters.append(lambda w: preprocess_outliers(w, min_docs, max_docs))
return Corpus(docs, filters, max_tokens) | 746eaa78fa498cd32950b3a006bfa669431037d0 | 27,743 |
def _Data_to_bytes(data: Data) -> bytes:
"""
Cast websockets.typing.Data to bytes.
Parameters
----------
data : str | bytes
Returns
-------
bytes
Either casted string or original bytes.
"""
return data.encode() if isinstance(data, str) else data | 0391dd9b9de0c8a978b16b6c89f9f3515f1a49de | 27,744 |
import torch
import time
def run_pairs(genotype_df, variant_df, phenotype1_df, phenotype2_df, phenotype_pos_df,
covariates1_df=None, covariates2_df=None, p1=1e-4, p2=1e-4, p12=1e-5, mode='beta',
maf_threshold=0, window=1000000, batch_size=10000, logger=None, verbose=True):
"""Compute COLOC for all phenotype pairs"""
assert np.all(phenotype1_df.index == phenotype2_df.index)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if logger is None:
logger = SimpleLogger()
logger.write('Computing COLOC for all pairs of phenotypes')
logger.write(f' * {phenotype1_df.shape[0]} phenotypes')
logger.write(f' * phenotype group 1: {phenotype1_df.shape[1]} samples')
logger.write(f' * phenotype group 2: {phenotype2_df.shape[1]} samples')
if covariates1_df is not None:
assert np.all(phenotype1_df.columns == covariates1_df.index)
logger.write(f' * phenotype group 1: {covariates1_df.shape[1]} covariates')
residualizer1 = Residualizer(torch.tensor(covariates1_df.values, dtype=torch.float32).to(device))
else:
residualizer1 = None
if covariates2_df is not None:
assert np.all(phenotype2_df.columns == covariates2_df.index)
logger.write(f' * phenotype group 2: {covariates2_df.shape[1]} covariates')
residualizer2 = Residualizer(torch.tensor(covariates2_df.values, dtype=torch.float32).to(device))
else:
residualizer2 = None
if maf_threshold > 0:
logger.write(f' * applying in-sample {maf_threshold} MAF filter (in at least one cohort)')
genotype1_ix = np.array([genotype_df.columns.tolist().index(i) for i in phenotype1_df.columns])
genotype1_ix_t = torch.from_numpy(genotype1_ix).to(device)
genotype2_ix = np.array([genotype_df.columns.tolist().index(i) for i in phenotype2_df.columns])
genotype2_ix_t = torch.from_numpy(genotype2_ix).to(device)
igc = genotypeio.InputGeneratorCis(genotype_df, variant_df, phenotype1_df, phenotype_pos_df, window=window)
coloc_df = []
start_time = time.time()
logger.write(' * Computing pairwise colocalization')
for phenotype1, genotypes, genotype_range, phenotype_id in igc.generate_data(verbose=verbose):
phenotype2 = phenotype2_df.loc[phenotype_id]
# copy to GPU
phenotype1_t = torch.tensor(phenotype1, dtype=torch.float).to(device)
phenotype2_t = torch.tensor(phenotype2, dtype=torch.float).to(device)
genotypes_t = torch.tensor(genotypes, dtype=torch.float).to(device)
genotypes1_t = genotypes_t[:,genotype1_ix_t]
genotypes2_t = genotypes_t[:,genotype2_ix_t]
del genotypes_t
# filter monomorphic sites
m = ((genotypes1_t==0).all(1) | (genotypes1_t==1).all(1) | (genotypes1_t==2).all(1) |
(genotypes2_t==0).all(1) | (genotypes2_t==1).all(1) | (genotypes2_t==2).all(1))
genotypes1_t = genotypes1_t[~m]
genotypes2_t = genotypes2_t[~m]
impute_mean(genotypes1_t)
impute_mean(genotypes2_t)
if maf_threshold > 0:
maf1_t = calculate_maf(genotypes1_t)
maf2_t = calculate_maf(genotypes2_t)
mask_t = (maf1_t >= maf_threshold) | (maf2_t >= maf_threshold)
genotypes1_t = genotypes1_t[mask_t]
genotypes2_t = genotypes2_t[mask_t]
coloc_t = coloc(genotypes1_t, genotypes2_t, phenotype1_t, phenotype2_t,
residualizer1=residualizer1, residualizer2=residualizer2,
p1=p1, p2=p2, p12=p12, mode=mode)
coloc_df.append(coloc_t.cpu().numpy())
logger.write(' time elapsed: {:.2f} min'.format((time.time()-start_time)/60))
coloc_df = pd.DataFrame(coloc_df, columns=[f'pp_h{i}_abf' for i in range(5)], index=phenotype1_df.index)
logger.write('done.')
return coloc_df | 3a38f2f38bd7351b93d3ede0f6c6d2e07427263d | 27,745 |
def main(local_argv):
"""
local_argv is the argument list, progrom name is first arugment
this function prints the fibonacci list calcuated by the command line argument n
"""
if len(local_argv) != 2:
print("must add one and only one command argument, , exit ")
return
argument_n = int(local_argv[1]) #remember, this is the 2nd argument in command line
if argument_n <= 0:
print("please input an positive interger number, exit")
return
retList = []
h = gen_eratosthenes()
[retList.append(next(h)) for _ in range (0,argument_n)] #generates 1 new prime per iteration
#retList =eratosthenes(argument_n)
print(retList)
return retList | 932c2b7a4f82ed03c62739e6e48350db057957da | 27,746 |
def alternate(*iterables):
"""
[a[0], b[0], ... , a[1], b[1], ..., a[n], b[n] ...]
>>> alternate([1,4], [2,5], [3,6])
[1, 2, 3, 4, 5, 6]
"""
items = []
for tup in zip(*iterables):
items.extend([item for item in tup])
return items | ed3b0c8a32de8d88fc24b8bb08012a0900b37823 | 27,747 |
def softmax(x, axis=-1, t=-100.):
"""
Softmax operation
Args:
x (numpy.array): input X
axis (int): axis for sum
Return:
**softmax** (numpy.array) - softmax(X)
"""
x = x - np.max(x)
if np.min(x) < t:
x = x/np.min(x)*t
e_x = np.exp(x)
return e_x / e_x.sum(axis, keepdims=True) | 7979c7eeaf4be319f06532abc2a2cc3a23af134d | 27,748 |
def format_params_in_str_format(format_string):
"""
Get the "parameter" indices/names of the format_string
Args:
format_string: A format string (i.e. a string with {...} to mark parameter placement and formatting
Returns:
A list of parameter indices used in the format string, in the order they appear, with repetition.
Parameter indices could be integers, strings, or None (to denote "automatic field numbering".
>>> format_string = '{0} (no 1) {2}, and {0} is a duplicate, {} is unnamed and {name} is string-named'
>>> list(format_params_in_str_format(format_string))
[0, 2, 0, None, 'name']
"""
return map(lambda x: int(x) if str.isnumeric(x) else x if x != '' else None,
filter(_is_not_none, (x[1] for x in dflt_formatter.parse(format_string)))) | a8b79cb6ee7a544b60c193dfbc2dbdc22d5d1f92 | 27,749 |
def AMO(df, M1=5, M2=10):
"""
成交金额
:param M1:
:param M2:
:return:
"""
AMOUNT = df['amount']
AMOW = AMOUNT / 10000.0
AMO1 = MA(AMOW, M1)
AMO2 = MA(AMOW, M2)
return pd.DataFrame({
'AMOW': AMOW, 'AMO1': AMO1, 'AMO2': AMO2
}) | ef45e245e1abb5705760e55b99e2e0757c66667c | 27,750 |
def tex_initial_states(data):
"""Initial states are texed."""
initial_state = []
initial_state = [''.join(["\lstick{\ket{", str(data['init'][row]),"}}"]) for row in range(len(data['init']))]
return data, initial_state | cd1758b594ee854cfb7854ec742dc177a43b54b7 | 27,751 |
import csv
def get_upgraded_dependencies_count(repo_path, django_dependency_sheet) -> tuple:
"""
Entry point to read, parse and calculate django dependencies
@param repo_path: path for repo which we are calculating django deps
@param django_dependency_sheet: csv which contains latest status of django deps
@return: count for all + upgraded django deps in repo
"""
reader_instance = DjangoDependencyReader(repo_path)
deps = reader_instance.read()
django_deps = []
deps_support_django32 = []
upgraded_in_repo = []
csv_path = django_dependency_sheet
with open(csv_path, encoding="utf8") as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=',', quotechar='"')
for line in csv_reader:
package_name = line["Django Package Name"]
if package_name in deps.keys(): # pylint: disable=consider-iterating-dictionary
django_deps.append(package_name)
if line["Django 3.2"] and line["Django 3.2"] != '-':
deps_support_django32.append(package_name)
if parse(deps[package_name]) >= parse(line["Django 3.2"]):
upgraded_in_repo.append(package_name)
django_deps = list(set(django_deps))
deps_support_django32 = list(set(deps_support_django32))
upgraded_in_repo = list(set(upgraded_in_repo))
return django_deps, deps_support_django32, upgraded_in_repo | 1a793c57b69966c45f680fd39fec31a94d4d5616 | 27,752 |
from pygsti.modelmembers.operations import EmbeddedOp as _EmbeddedOp, StaticArbitraryOp as _StaticArbitraryOp
from pygsti.baseobjs.errorgenbasis import ExplicitElementaryErrorgenBasis as _ExplicitElementaryErrorgenBasis
def first_order_gauge_action_matrix_for_prep(prep_superket_vec, target_sslbls, model_state_space,
elemgen_gauge_basis, elemgen_row_basis):
"""
Returns a matrix for computing the *offset* of a given gate's error generator due to a local gauge action.
Note: clifford_superop must be in the *std* basis!
TODO: docstring
"""
#Utilize EmbeddedOp to perform superop embedding
def _embed(mx, target_labels, state_space): # SAME as in fn above
if mx.shape[0] == state_space.dim:
return mx # no embedding needed
else:
dummy_op = _EmbeddedOp(state_space, target_labels,
_StaticArbitraryOp(_np.identity(mx.shape[0], 'd'), 'densitymx_slow'))
embeddedOp = _sps.identity(state_space.dim, mx.dtype, format='lil')
scale = _np.sqrt(4**len(target_labels) / state_space.dim) # is this correct??
#fill in embedded_op contributions (always overwrites the diagonal
# of finalOp where appropriate, so OK it starts as identity)
for i, j, gi, gj in dummy_op._iter_matrix_elements('HilbertSchmidt'):
embeddedOp[i, j] = mx[gi, gj] * scale
#return embeddedOp.tocsr() # Note: if sparse, assure that this is in CSR or CSC for fast products
return embeddedOp.toarray()
element_action_mx = _sps.lil_matrix((prep_superket_vec.shape[0], len(elemgen_gauge_basis)),
dtype=prep_superket_vec.dtype)
for j, (gen_sslbls, gen) in enumerate(elemgen_gauge_basis.elemgen_supports_and_matrices):
action_sslbls = tuple(sorted(set(gen_sslbls).union(target_sslbls))) # (union) - joint support of ops
action_space = model_state_space.create_subspace(action_sslbls)
#Note: action_space is always (?) going to be the full model_state_space, since target_sslbls for a prep
# should always be all the sslbls of the model (I think...)
gen_expanded = _embed(gen, gen_sslbls, action_space) # expand gen to shared action_space
if _sps.issparse(gen_expanded):
gauge_action_deriv = gen_expanded.dot(prep_superket_vec) # sparse matrices
else:
gauge_action_deriv = _np.dot(gen_expanded, prep_superket_vec)
element_action_mx[:, j] = gauge_action_deriv[:, None]
#To identify set of vectors {v_i} such that {element_action_mx * v_i} span the range of element_action_mx,
# we find the SVD of element_action_mx and use the columns of V:
TOL = 1e-7
U, s, Vh = _np.linalg.svd(element_action_mx.toarray(), full_matrices=False) # DENSE - use sparse SVD here?
n = _np.count_nonzero(s > TOL)
relevant_basis = Vh[0:n, :].T.conjugate()
for j in range(relevant_basis.shape[1]): # normalize columns so largest element is +1.0
i_max = _np.argmax(_np.abs(relevant_basis[:, j]))
if abs(relevant_basis[i_max, j]) > 1e-6:
relevant_basis[:, j] /= relevant_basis[i_max, j]
relevant_basis = _mt.normalize_columns(relevant_basis)
# "gauge action" matrix is just the identity on the *relevant* space of gauge transformations:
action_mx_pre = _np.dot(relevant_basis, relevant_basis.T.conjugate()) # row basis == elemgen_gauge_basis
action_mx = _sps.lil_matrix((len(elemgen_row_basis), len(elemgen_gauge_basis)),
dtype=prep_superket_vec.dtype)
nonzero_rows = set()
nonzero_row_labels = {}
#Convert row-space to be over elemgen_row_basis instead of a elemgen_gauge_basis
for i, glbl in enumerate(elemgen_gauge_basis.labels):
new_i = elemgen_row_basis.label_index(glbl)
if _np.linalg.norm(action_mx_pre[i, :]) > 1e-8:
action_mx[new_i, :] = action_mx_pre[i, :]
nonzero_rows.add(i)
nonzero_row_labels[i] = glbl
#Remove all all-zero rows and cull these elements out of the row_basis. Actually,
# just construct a new matrix and basis
nonzero_row_indices = list(sorted(nonzero_rows))
labels = [nonzero_row_labels[i] for i in nonzero_row_indices]
data = []; col_indices = []; rowptr = [0] # build up a CSR matrix manually from nonzero rows
for ii, i in enumerate(nonzero_row_indices):
col_indices.extend(action_mx.rows[i])
data.extend(action_mx.data[i])
rowptr.append(len(data))
culled_action_mx = _sps.csr_matrix((data, col_indices, rowptr),
shape=(len(nonzero_rows), len(elemgen_gauge_basis)), dtype=action_mx.dtype)
updated_row_basis = _ExplicitElementaryErrorgenBasis(elemgen_row_basis.state_space, labels)
return culled_action_mx, updated_row_basis | 9c59db8d9555041a3a3ade2b0d92fb7104ee4a94 | 27,753 |
def FanOut(num):
"""Layer construction function for a fan-out layer."""
init_fun = lambda rng, input_shape: ([input_shape] * num, ())
apply_fun = lambda params, inputs, **kwargs: [inputs] * num
return init_fun, apply_fun | 7e6d07319be600dabf650a4b87f661bf20832455 | 27,754 |
import urllib
import logging
def is_online(url="http://detectportal.firefox.com", expected=b"success\n"):
"""
Checks if the user is able to reach a selected hostname.
:param hostname: The hostname to test against.
Default is packages.linuxmint.com.
:returns: True if able to connect or False otherwise.
"""
try:
with urllib.request.urlopen(url) as response:
response_data = response.read()
if response_data == expected:
return True
logging.error(
"Response from %s was not %s as expected. Received: %s",
url, expected, response_data
)
return False
except urllib.error.URLError as url_err:
logging.error(
"Unable to connect to %s", url, exc_info=url_err
)
return False
return False | 00081e05fbb1cfaa81a03a487201af4c07b23fd2 | 27,755 |
def get_Data_temblor():
"""Shows basic usage of the Sheets API.
Creates a Sheets API service object and prints the names and majors of
students in a sample spreadsheet:
https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit
"""
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
service = discovery.build('sheets',
'v4',
http=http,
discoveryServiceUrl=discoveryUrl)
# DAÑOS Y DERRUMBES VERIFICADOS
# Para descargar otras páginas cambiar el onmbre en el campo range
result = service.spreadsheets().values().get(
spreadsheetId='1i__c44wIg760LmxZcM8oTjDR0cGFVdL9YrjbCcb9Op0',
range='Form Responses 1!A1:AH10000').execute()
values = result.get('values', [])
if not values:
print('No data found.')
else:
return values | 1dca23ae977dc4400b31e323d1c2aac5e6e685a3 | 27,756 |
def triplet_loss(y_true, y_pred, alpha = 0.2):
"""
Implementation of the triplet loss as defined by formula (3)
Arguments:
y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.
y_pred -- python list containing three objects:
anchor -- the encodings for the anchor images, of shape (None, 128)
positive -- the encodings for the positive images, of shape (None, 128)
negative -- the encodings for the negative images, of shape (None, 128)
Returns:
loss -- real number, value of the loss
"""
anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]
# Compute the (encoding) distance between the anchor and the positive
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), axis=-1)
# Compute the (encoding) distance between the anchor and the negative
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), axis=-1)
# subtract the two previous distances and add alpha.
basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)
# Take the maximum of basic_loss and 0.0. Sum over the training examples.
loss = tf.reduce_sum(tf.maximum(basic_loss, 0.0), axis=None)
return loss | 41257b626fdb4773769bf20d2aa11caaa8896259 | 27,757 |
import inspect
def node_from_dict(node_dict):
"""Create a new node from its ``node_dict``.
This is effectively a shorthand for choosing the correct node class and
then calling its ``from_dict`` method.
Args:
node_dict (dict): dict-representation of the node.
Returns:
New schema node with the specified type and configuration.
"""
node_types = {name: cls for name, cls in globals().items()
if inspect.isclass(cls) and issubclass(cls, SchemaNode)}
if node_dict['node_type'] not in node_types:
raise ValueError('Invalid node type specified.')
node_type = node_types[node_dict['node_type']]
return node_type.from_dict(node_dict) | 299cc2e1f194338ff3bbc13f729284b61b884b1b | 27,758 |
def _get_activation(upsample_activation):
"""get activation"""
nonlinear = getattr(nn, upsample_activation)
return nonlinear | 4423edf977ccd6db6fa6fdb5de426ecbaaed7e55 | 27,759 |
import time
def computeSensitivity(P, W):
"""
The function at hand computes the sensitivity of each point using a reduction from L_\infty to L1.
:return: None
"""
P = np.hstack((P, np.arange(P.shape[0])[:, np.newaxis]))
B, idxs = applyBiCriterea(P[:, :-1], W) # attain set of flats which gives 2^j approximation to the optimal solution
sensitivity_additive_term = initializeSens(P, B, idxs) # initialize the sensitivities
unique_cetner_idxs = np.unique(idxs) # get unique indices of clusters
sensitivity = np.empty((P.shape[0], ))
clusters = [np.where(idxs == idx)[0] for idx in unique_cetner_idxs]
Qs = [[] for idx in range(len(clusters))]
for idx in range(len(clusters)): # apply L_\infty conversion to L_1 on each cluster of points
# Qs[idx] = np.hstack(((P[clusters[idx], :-1] - B[idx][1]).dot(B[idx][0].T.dot(B[idx][0])), P[clusters[idx], -1][:, np.newaxis]))
Qs[idx] = np.hstack(((P[clusters[idx], :-1] - B[idx][1]).dot(B[idx][0].T), P[clusters[idx], -1][:, np.newaxis]))
ts = time.time()
# s = computeSensitivityPerCluster(Qs[0])
# print('max = {}, min = {}'.format(np.max(s[0,:]), np.min(s[0,:])))
# print('Time for one cluster took {} secs'.format(time.time() - ts))
# input()
# pool = multiprocessing.Pool(3)
# list_of_sensitivities = pool.map(computeSensitivityPerCluster, Qs)
# print('Time for parallel took {} secs'.format(time.time() - ts))
for i in range(len(Qs)):
s = computeSensitivityPerCluster(Qs[i])
sensitivity[s[:, -1].astype(np.int)] = s[:, 0]
# print('Number of unique values = {}, max = {}, min = {}'.format(np.unique(sensitivity).shape[0],
# np.max(sensitivity), np.min(sensitivity)))
sensitivity += 2 ** Utils.J * sensitivity_additive_term # add the additive term for the sensitivity
return sensitivity | 089442a4c136ef473582db93b04575562ebecd96 | 27,760 |
def num_bytes_needed_for_data(rom_dict):
"""
Get the number of bytes to store the largest data in the rom.
Args:
rom_dict (dict(int:int)): Dictionary of address and data values
Returns:
(int): Number of bytes needed for largest piece of data
"""
largest_data = max(rom_dict.itervalues())
num_bytes = num_bytes_for_value(largest_data)
return num_bytes | af039a39c86c17d0349d53b2e5020d8cf3560f5d | 27,761 |
def run_simulation(model, **kwargs):
"""Runs the given model using KaSim and returns the parsed results.
Parameters
----------
**kwargs : List of keyword arguments
All keyword arguments specifying conditions for the simulation are
passed to the function :py:func:`run_kasim` (see documentation
associated with that function for more information).
Returns
-------
numpy.ndarray
Returns the kasim simulation data as a Numpy ndarray. Data is accessed
using the syntax::
results[index_name]
The index 'time' gives the data for the time coordinates of the
simulation. Data for the observables can be accessed by indexing the
array with the names of the observables.
"""
outs = run_kasim(model, **kwargs)
return parse_kasim_outfile(outs['out']) | 26aba6818a567faf8a08df8d5a47c213bb717183 | 27,762 |
import importlib
def _bot_exists(botname):
"""
Utility method to import a bot.
"""
module = None
try:
module = importlib.import_module('%s.%s' % (botname, botname))
except ImportError as e:
quit('Unable to import bot "%s.%s": %s' % (botname, botname, str(e)))
return module | c091be6d586faa8aacd48b30f4ce2f4fcc665e0b | 27,763 |
import functools
def validator(fn):
"""Decorator that constructs a scenario validator from given function.
Decorated function should return ValidationResult on error.
:param fn: function that performs validation
:returns: rally scenario validator
"""
def wrap_given(*args, **kwargs):
"""Dynamic validation decorator for scenario.
:param args: the arguments of the decorator of the benchmark scenario
ex. @my_decorator("arg1"), then args = ("arg1",)
:param kwargs: the keyword arguments of the decorator of the scenario
ex. @my_decorator(kwarg1="kwarg1"), then kwargs = {"kwarg1": "kwarg1"}
"""
@functools.wraps(fn)
def wrap_validator(config, clients, deployment):
# NOTE(amaretskiy): validator is successful by default
return (fn(config, clients, deployment, *args, **kwargs) or
ValidationResult(True))
def wrap_scenario(scenario):
# TODO(boris-42): remove this in future.
wrap_validator.permission = getattr(fn, "permission",
consts.EndpointPermission.USER)
scenario._meta_setdefault("validators", [])
scenario._meta_get("validators").append(wrap_validator)
return scenario
return wrap_scenario
return wrap_given | 0237711c6910eaf11eaa91df6bfe22283fdea080 | 27,764 |
def bootstrap_alert_message(msg, alert_type):
"""
Wrap Ajax error message for display
:param msg: Message text
:param alert_type: must be alert-danger, alert-success, alert-info, alert-warning
:return: html formatted message
"""
if not msg:
msg = _('An unknown error has occurred')
if alert_type == 'error':
alert_type = 'danger'
if not alert_type or alert_type not in 'danger, success, info, warning':
alert_type = 'warning'
alert_label = alert_type
if alert_label == 'danger':
alert_label = 'error'
f_msg = """
<div class="alert alert-{0} alert-dismissable fade show">
<button type="button" class="close" data-dismiss="alert">×</button>
<strong>{1}:</strong> {2}.
</div>
""".format(alert_type, alert_label.title(), msg)
return f_msg | cec9fa851272274a734dfbf6599ee67ed2c7c9f8 | 27,765 |
def get_message_dispatched(correlation_id, steps, primary=True):
"""
Sets a flag in cache to indicate that a message has been dispatched.
:param correlation_id: a str guid for the fsm
:param steps: an integer corresponding to the step in the fsm execution
:return: True if cached and False otherwise
"""
if primary:
source_arn = get_primary_cache_source()
else:
source_arn = get_secondary_cache_source()
service = get_arn_from_arn_string(source_arn).service
if not service: # pragma: no cover
log_once(logger.warning, "No cache source for primary=%s" % primary)
elif service == AWS.ELASTICACHE:
engine, _ = _get_elasticache_engine_and_connection(source_arn)
if engine == AWS_ELASTICACHE.ENGINE.MEMCACHED:
return _get_message_dispatched_memcache(source_arn, correlation_id, steps)
elif engine == AWS_ELASTICACHE.ENGINE.REDIS:
return _get_message_dispatched_redis(source_arn, correlation_id, steps)
elif service == AWS.DYNAMODB:
return _get_message_dispatched_dynamodb(source_arn, correlation_id, steps) | cd9a373f6b104617dfae2d488fc2348ffae61c5b | 27,766 |
def _format_exponent_notation(input_number, precision, num_exponent_digits):
"""
Format the exponent notation. Python's exponent notation doesn't allow
for a user-defined number of exponent digits.
Based on [Anurag Uniyal's answer][answer] to the StackOverflow
question ['Python - number of digits in exponent'][question]
[question]: http://stackoverflow.com/q/9910972/95592
[answer]: http://stackoverflow.com/a/9911741/95592
"""
python_exponent_notation = '{number:.{precision}e}'.format(
number=input_number,
precision=precision)
mantissa, exponent = python_exponent_notation.split('e')
# Add 1 to the desired number of exponenent digits to account for the sign
return '{mantissa}e{exponent:+0{exp_num}d}'.format(
mantissa=mantissa,
exponent=int(exponent),
exp_num=num_exponent_digits+1) | 59f61897c70ca1d9f95412b2892d5c9592e51561 | 27,767 |
import typing
def unicode_blackboard_activity_stream(
activity_stream: typing.List[blackboard.ActivityItem]=None,
indent: int=0,
show_title: bool=True
):
"""
Pretty print the blackboard stream to console.
Args:
activity_stream: the log of activity, if None, get the entire activity stream
indent: the number of characters to indent the blackboard
show_title: include the title in the output
"""
return _generate_text_activity(
activity_stream=activity_stream,
show_title=show_title,
indent=indent,
symbols=unicode_symbols if console.has_unicode() else ascii_symbols
) | 435c52b630d467aea2835644297e5099d1c69490 | 27,768 |
import argparse
import logging
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="Snapcraft Jsonnet parser")
parser.add_argument(
"--version",
action="version",
version="sc-jsonnet {ver}".format(ver=__version__))
parser.add_argument(
"-o",
"--output-file",
dest="outfile",
help="set the output file. Should be set to 'snap/snapcraft.yaml' " +
"for compatibility.",
type=str,
metavar="STR",
)
parser.add_argument(
"-v",
"--verbose",
dest="loglevel",
help="set loglevel to INFO",
action="store_const",
const=logging.INFO)
parser.add_argument(
"-vv",
"--very-verbose",
dest="loglevel",
help="set loglevel to DEBUG",
action="store_const",
const=logging.DEBUG)
return parser.parse_args(args) | 62d13ddc077d39f3675d6cdaf7944c23ad5735f2 | 27,769 |
def normalize(vector):
"""Normalize a vector to unit length.
Args:
vector (list/tuple/numpy.ndarray): Array to be normalized
Return:
vector (numpy.ndarray): Normalized vector.
"""
length = np.sqrt(np.sum(np.asarray(vector) ** 2))
if length == 0:
return np.asarray(vector)
else:
return np.asarray(vector) / length | 5f51933a93fd6b3df0955024585105a8887f3fd2 | 27,770 |
def get_struc_qty(*args):
"""get_struc_qty() -> size_t"""
return _idaapi.get_struc_qty(*args) | f370db3d60fdc0b5870baf8a9b616163d55860c6 | 27,771 |
from typing import Callable
def evaluate0(gametree_: Callable[[Board],
Node], static_eval_: Callable[[Board], int],
prune_: Callable[[Node], Node]) -> Callable[[Board], int]:
"""Return a tree evaluation function"""
def evaluate_(board: Board) -> int:
return maximize0(maptree(static_eval_, prune_(gametree_(board))))
return evaluate_ | eda97c4a6be5bac08ca0a73e0653b9c58321b539 | 27,772 |
import http
def fortune(inp):
"""
.fortune -- returns one random card and it's fortune
"""
try:
cards = http.get_json("https://tarot-api.com/draw/1")
except HTTPError:
return "The spirits are displeased."
card = cards[0]
return card["name"] + ": " + ", ".join(card["keywords"]) | d167238880a63afdd0aee11c2d02354a672d7fb8 | 27,773 |
def declin_0(x, obl):
"""
declination of a point of ecliptic
:param x: longitude of the point in degree
:param obl: obliquity of the ecliptic in degree
return declination in degree
"""
return DEG * m.asin(m.sin(x * RAD) * m.sin(obl * RAD)) | 5853ceb9d0424618a1b1406ee53ea87779f3b535 | 27,774 |
def network_data_structures(stream_names_tuple, agent_descriptor_dict):
"""Builds data structures for the network. These data
structures are helpful for animating the network and
for building networks of processes.
Parameters
----------
Same as for make_network.
Return Values
-------------
(stream_to_agent_list_dict,
agent_to_stream_dict,
agent_to_agent_list_dict)
stream_to_agent_list_dict
key: stream_name
value: list of agent_name.
The stream with name stream_name (the key)
is an input stream of each agent
whose name is in the list (the value).
For example if key is 's' and value is
['u', 'v', 'w'] then the stream with name 's'
is an input stream of the agents with names
'u', 'v', and 'w'.
agent_to_stream_dict
key: stream_name
value: str. A single agent_name.
The stream with name stream_name (the key)
is the unique output stream of the agent
with name agent_name (the value). For example,
if a key is 's' and the corresponding value
is 'a', then the stream with name 's' is
generated by the agent with name 'a'.
agent_to_agent_list_dict
key: agent_name
value: list of agent names
The agent with name agent_name (the key) has an
output stream to each agent whose name is in value.
agent_from_agent_list_dict
key: agent_name
value: list of agent names
The agent with name agent_name (the key) has an
input stream from each agent whose name is in value.
"""
stream_to_agent_list_dict = dict()
for stream_name in stream_names_tuple:
stream_to_agent_list_dict[stream_name] = list()
agent_to_stream_dict = dict()
# Construct stream_to_agent_list_dict and agent_to_stream_dict
# from agent_descriptor_dict
for agent_name, descriptor in agent_descriptor_dict.iteritems():
input_stream_list = descriptor[0]
output_stream_list = descriptor[1]
for stream_name in input_stream_list:
stream_to_agent_list_dict[stream_name].append(agent_name)
for stream_name in output_stream_list:
if stream_name in agent_to_stream_dict:
raise Exception(
stream_name+'output by'+agent_to_stream_dict[stream_name]+'and'+agent_name)
agent_to_stream_dict[stream_name] = agent_name
# Construct agent_to_agent_list_dict from
# agent_descriptor_dict, stream_to_agent_list_dict, and
# agent_to_stream_dict.
agent_to_agent_list_dict = dict()
# Initialize agent_to_agent_list_dict
for agent_name in agent_descriptor_dict.keys():
agent_to_agent_list_dict[agent_name] = list()
# Compute agent_to_agent_list_dict
# If a stream is output of agent x and input to agents y, z
# then agent x outputs to [y,z]
for stream_name, agent_name in agent_to_stream_dict.iteritems():
agent_to_agent_list_dict[agent_name].extend(
stream_to_agent_list_dict[stream_name])
# Construct agent_from_agent_list_dict from
# agent_descriptor_dict, stream_to_agent_list_dict, and
# agent_to_stream_dict.
agent_from_agent_list_dict = dict()
# Initialize agent_from_agent_list_dict
for agent_name in agent_descriptor_dict.keys():
agent_from_agent_list_dict[agent_name] = list()
# Compute agent_from_agent_list_dict
# If a stream is an input of agent x and is an output of agents y, z
# then agents[y,z] output to agent x.
for stream_name, agent_name_list in stream_to_agent_list_dict.iteritems():
for receiving_agent_name in agent_name_list:
agent_from_agent_list_dict[receiving_agent_name].append(
agent_to_stream_dict[stream_name])
return (stream_to_agent_list_dict, agent_to_stream_dict,
agent_to_agent_list_dict, agent_from_agent_list_dict) | 158484dd882eb14da1824408c35d7124282d47e1 | 27,775 |
import numpy
from typing import OrderedDict
def to_onnx(model, X=None, name=None, initial_types=None,
target_opset=None, options=None, rewrite_ops=False,
white_op=None, black_op=None, final_types=None):
"""
Converts a model using on :epkg:`sklearn-onnx`.
@param model model to convert or a function
wrapped into :epkg:`_PredictScorer` with
function :epkg:`make_scorer`
@param X training set (at least one row),
can be None, it is used to infered the
input types (*initial_types*)
@param initial_types if *X* is None, then *initial_types* must be
defined
@param name name of the produced model
@param target_opset to do it with a different target opset
@param options additional parameters for the conversion
@param rewrite_ops rewrites some existing converters,
the changes are permanent
@param white_op white list of ONNX nodes allowed
while converting a pipeline, if empty,
all are allowed
@param black_op black list of ONNX nodes allowed
while converting a pipeline, if empty,
none are blacklisted
@param final_types a python list. Works the same way as
initial_types but not mandatory, it is used
to overwrites the type (if type is not None)
and the name of every output.
@return converted model
The function rewrites function *to_onnx* from :epkg:`sklearn-onnx`
but may changes a few converters if *rewrite_ops* is True.
For example, :epkg:`ONNX` only supports *TreeEnsembleRegressor*
for float but not for double. It becomes available
if ``rewrite_ops=True``.
.. faqref::
:title: How to deal with a dataframe as input?
Each column of the dataframe is considered as an named input.
The first step is to make sure that every column type is correct.
:epkg:`pandas` tends to select the least generic type to
hold the content of one column. :epkg:`ONNX` does not automatically
cast the data it receives. The data must have the same type with
the model is converted and when the converted model receives
the data to predict.
.. runpython::
:showcode:
from io import StringIO
from textwrap import dedent
import numpy
import pandas
from pyquickhelper.pycode import ExtTestCase
from sklearn.preprocessing import OneHotEncoder
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from mlprodict.onnx_conv import to_onnx
from mlprodict.onnxrt import OnnxInference
text = dedent('''
__SCHEMA__
7.4,0.7,0.0,1.9,0.076,11.0,34.0,0.9978,3.51,0.56,9.4,5,red
7.8,0.88,0.0,2.6,0.098,25.0,67.0,0.9968,3.2,0.68,9.8,5,red
7.8,0.76,0.04,2.3,0.092,15.0,54.0,0.997,3.26,0.65,9.8,5,red
11.2,0.28,0.56,1.9,0.075,17.0,60.0,0.998,3.16,0.58,9.8,6,red
''')
text = text.replace(
"__SCHEMA__",
"fixed_acidity,volatile_acidity,citric_acid,residual_sugar,chlorides,"
"free_sulfur_dioxide,total_sulfur_dioxide,density,pH,sulphates,"
"alcohol,quality,color")
X_train = pandas.read_csv(StringIO(text))
for c in X_train.columns:
if c != 'color':
X_train[c] = X_train[c].astype(numpy.float32)
numeric_features = [c for c in X_train if c != 'color']
pipe = Pipeline([
("prep", ColumnTransformer([
("color", Pipeline([
('one', OneHotEncoder()),
('select', ColumnTransformer(
[('sel1', 'passthrough', [0])]))
]), ['color']),
("others", "passthrough", numeric_features)
])),
])
pipe.fit(X_train)
pred = pipe.transform(X_train)
print(pred)
model_onnx = to_onnx(pipe, X_train, target_opset=12)
oinf = OnnxInference(model_onnx)
# The dataframe is converted into a dictionary,
# each key is a column name, each value is a numpy array.
inputs = {c: X_train[c].values for c in X_train.columns}
inputs = {c: v.reshape((v.shape[0], 1)) for c, v in inputs.items()}
onxp = oinf.run(inputs)
print(onxp)
"""
if isinstance(model, OnnxOperatorMixin):
if not hasattr(model, 'op_version'):
raise RuntimeError( # pragma: no cover
"Missing attribute 'op_version' for type '{}'.".format(
type(model)))
return model.to_onnx(
X=X, name=name, options=options, black_op=black_op,
white_op=white_op, final_types=final_types)
if rewrite_ops:
old_values = register_rewritten_operators()
register_converters()
else:
old_values = None
def _guess_type_(X, itype, dtype):
initial_types = guess_initial_types(X, itype)
if dtype is None:
if hasattr(X, 'dtypes'): # DataFrame
dtype = numpy.float32
elif hasattr(X, 'dtype'):
dtype = X.dtype
elif hasattr(X, 'type'):
dtype = guess_numpy_type(X.type)
elif initial_types is not None:
dtype = guess_numpy_type(initial_types[0][1])
else:
raise RuntimeError( # pragma: no cover
"dtype cannot be guessed: {}".format(
type(X)))
if dtype != numpy.float64:
dtype = numpy.float32
if dtype is None:
raise RuntimeError("dtype cannot be None") # pragma: no cover
if isinstance(dtype, FloatTensorType):
dtype = numpy.float32
elif isinstance(dtype, DoubleTensorType):
dtype = numpy.float64
new_dtype = dtype
if isinstance(dtype, numpy.ndarray):
new_dtype = dtype.dtype
elif isinstance(dtype, DataType):
new_dtype = numpy.float32
if new_dtype not in (numpy.float32, numpy.float64, numpy.int64,
numpy.int32):
raise NotImplementedError( # pragma: no cover
"dtype should be real not {} ({})".format(new_dtype, dtype))
return initial_types, dtype, new_dtype
if isinstance(model, _PredictScorer):
if X is not None and not isinstance(X, OrderedDict):
raise ValueError("For a scorer, parameter X should be a OrderedDict not {}."
"".format(type(X)))
if initial_types is None:
dts = []
initial_types = []
for k, v in X.items():
if hasattr(v, 'dtype'):
dtype = guess_numpy_type(v.dtype)
else:
dtype = v
if dtype != numpy.float64:
dtype = numpy.float32
it, _, ndt = _guess_type_(v, None, dtype)
for i in range(len(it)): # pylint: disable=C0200
it[i] = (k, it[i][1]) # pylint: disable=C0200
initial_types.extend(it)
dts.append(ndt)
ndt = set(dts)
if len(ndt) != 1:
raise RuntimeError( # pragma: no cover
"Multiple dtype is not efficient {}.".format(ndt))
res = convert_scorer(model, initial_types, name=name,
target_opset=target_opset, options=options,
black_op=black_op, white_op=white_op,
final_types=final_types)
else:
if name is None:
name = "mlprodict_ONNX(%s)" % model.__class__.__name__
initial_types, dtype, _ = _guess_type_(X, initial_types, None)
res = convert_sklearn(model, initial_types=initial_types, name=name,
target_opset=target_opset, options=options,
black_op=black_op, white_op=white_op,
final_types=final_types)
if old_values is not None:
register_rewritten_operators(old_values)
return res | 27a21d66d78e2432362718b89d8df830471d521c | 27,776 |
def setRx(phi):
"""Rotation matrix around x axis"""
Rx = np.zeros((phi.size,3,3))
Rx[:,0,0] = 1.
Rx[:,1,1] = np.cos(phi)
Rx[:,2,2] = Rx[:,0,0]
Rx[:,1,2] = -np.sin(phi)
Rx[:,2,1] = -Rx[:,0,2]
return Rx | a70699ccc61b30d820fd9172b459680a1287e8e0 | 27,777 |
def discrete_resample(df, freq_code, agg_fun, remove_inter=False, **kwargs):
"""
Function to properly set up a resampling class for discrete data. This assumes a linear interpolation between data points.
Parameters
----------
df: DataFrame or Series
DataFrame or Series with a time index.
freq_code: str
Pandas frequency code. e.g. 'D'.
agg_fun : str
The aggregation function to be applied on the resampling object.
**kwargs
Any keyword args passed to Pandas resample.
Returns
-------
Pandas DataFrame or Series
"""
if isinstance(df, (pd.Series, pd.DataFrame)):
if isinstance(df.index, pd.DatetimeIndex):
reg1 = pd.date_range(df.index[0].ceil(freq_code), df.index[-1].floor(freq_code), freq=freq_code)
reg2 = reg1[~reg1.isin(df.index)]
if isinstance(df, pd.Series):
s1 = pd.Series(np.nan, index=reg2)
else:
s1 = pd.DataFrame(np.nan, index=reg2, columns=df.columns)
s2 = pd.concat([df, s1]).sort_index()
s3 = s2.interpolate('time')
s4 = (s3 + s3.shift(-1))/2
s5 = s4.resample(freq_code, **kwargs).agg(agg_fun).dropna()
if remove_inter:
index1 = df.index.floor(freq_code).unique()
s6 = s5[s5.index.isin(index1)].copy()
else:
s6 = s5
else:
raise ValueError('The index must be a datetimeindex')
else:
raise TypeError('The object must be either a DataFrame or a Series')
return s6 | 502a63b81c8cf027853e45f77e2c9b18c1beddb4 | 27,778 |
def reflect(data, width):
"""Ceflect a data word, means revert the bit order."""
reflected = data & 0x01
for _ in range(width - 1):
data >>= 1
reflected = (reflected << 1) | (data & 0x01)
return reflected | bd5a0b804419c52ebdc6777fa0256c6a2dd4475c | 27,779 |
def snell_angle_2(angle_1, n_1, n_2):
"""Calculate the angle of refraction of a ray travelling between two mediums
according to Snell's law.
Args: angle_1 (array_like[float]): angle of incidence with respect to surface
normal in radians. n_1 (float): index of refraction in first medium.
n_2 (float): index of refraction in second medium. Returns: float: angle of
refraction in radians.
"""
angle_2 = np.arcsin(n_1 / n_2 * np.sin(angle_1))
return angle_2 | 5917dfc412e002bdfe494b3648dfdb7ba63a3cd7 | 27,780 |
def npv(ico, nci, r, n):
""" This capital budgeting function computes the net present
value on a cash flow generating investment.
ico = Initial Capital Outlay
nci = net cash inflows per period
r = discounted rate
n = number of periods
Example: npv(100000, 15000, .03, 10)
"""
pv_nci = 0
for x in range(n):
pv_nci = pv_nci + (nci/((1 + r) ** (x + 1)))
return pv_nci - ico | fa3128de0fe8a2f7b8bbe754f0e1b1e1a0eb222d | 27,781 |
import time
import functools
import threading
def instrument_endpoint(time_fn=time.time):
"""Decorator to instrument Cloud Endpoint methods."""
def decorator(fn):
method_name = fn.__name__
assert method_name
@functools.wraps(fn)
def decorated(service, *args, **kwargs):
service_name = service.__class__.__name__
endpoint_name = '/_ah/spi/%s.%s' % (service_name, method_name)
start_time = time_fn()
response_status = 0
interface.state.store.initialize_context()
flush_thread = None
time_now = time_fn()
if need_to_flush_metrics(time_now):
flush_thread = threading.Thread(target=_flush_metrics, args=(time_now,))
flush_thread.start()
try:
ret = fn(service, *args, **kwargs)
response_status = 200
return ret
except endpoints.ServiceException as e:
response_status = e.http_status
raise
except Exception:
response_status = 500
raise
finally:
if flush_thread:
flush_thread.join()
elapsed_ms = int((time_fn() - start_time) * 1000)
http_metrics.update_http_server_metrics(
endpoint_name, response_status, elapsed_ms)
return decorated
return decorator | cf2fdfff5aa8854cc6c02850d6a9787ca45b7c7e | 27,782 |
def manhattan_distances(X, Y):
"""Compute pairwise Manhattan distance between the rows of two matrices X (shape MxK)
and Y (shape NxK). The output of this function is a matrix of shape MxN containing
the Manhattan distance between two rows.
Arguments:
X {np.ndarray} -- First matrix, containing M examples with K features each.
Y {np.ndarray} -- Second matrix, containing N examples with K features each.
Returns:
D {np.ndarray}: MxN matrix with Manhattan distances between rows of X and rows of Y.
"""
all_distances = []
for exampleX in X:
one_row = []
for exampleY in Y:
one_row.append(manhattan_distance(exampleX, exampleY))
all_distances.append(one_row)
return np.array(all_distances)
# raise NotImplementedError() | 0cbec7eae4cb33d0ed13947bbc7df7b4b6171807 | 27,783 |
def remove_access_group(request):
"""
Add access groups to the image.
"""
if request.method != "POST":
messages.error(request, "Invalid request method.")
return redirect('images')
if 'id' not in request.POST or not request.POST.get('id').isdigit() or 'access_group' not in request.POST:
messages.error(request, "Invalid POST request.")
return redirect('images')
group_id = request.POST.get('access_group')
img_id = request.POST.get('id')
client = get_httpclient_instance(request)
params = {"access_groups": [group_id]}
try:
client.containers.images(img_id).remove_access_groups.post(params)
messages.success(request, "Group successfully removed from image.")
except Exception as e:
messages.error(request, api_error_message(e, params))
return(redirect('image_manage', img_id))
return redirect('image_manage', img_id) | 1e61b0a403a06154dce5b7f745a401e7ddf635d6 | 27,784 |
import os
def convert_to_jpg_if_needed(src):
"""
If the src image is not in the supported image format list, convert it to jpg
:param src: the source file
:returns: None, or the new file (django file)
"""
im = Image.open(src)
if im.format in settings.XGDS_IMAGE_ACCEPTED_WEB_FORMATS:
return None
dest_bytes = BytesIO()
im.save(dest_bytes, "JPEG")
dest_file = File(dest_bytes)
src_name, src_ext = os.path.splitext(os.path.basename(src.name))
dest_file.name = '%s.jpg' % src_name
return File(dest_file) | 8ee46922c9098b2c15b83f71d5b41d4978e78059 | 27,785 |
def replace_strings_in_file(i):
"""
Input: {
file
(file_out) - if !='', use this file for output, otherwise overwrite original one!
replacement_json_file - replacement file with multiple strings to substitute
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
(updated) - if 'yes', files was updated
}
"""
o=i.get('out','')
fin=i['file']
rjf=i['replacement_json_file']
fout=i.get('file_out','')
if fout=='': fout=fin
rx=ck.load_text_file({'text_file':fin})
if rx['return']>0: return rx
s=rx['string']
rx=ck.load_json_file({'json_file':rjf})
if rx['return']>0: return rx
rep=rx['dict']
sx=s
for k in rep:
v=rep[k]
sx=sx.replace(k,v)
r={'return':0, 'updated':'no'}
if s!=sx or fin!=fout:
r=ck.save_text_file({'text_file':fout, 'string':sx})
r['updated']='yes'
return r | b15d3058e1486d6f6171438bd62da05cb8d5732b | 27,786 |
def fill_polygon(points, im_shape):
"""Fill the polygon defined by convex or contour points
Parameters
----------
points: array
Coordinates of the points that define the convex or contour of the mask
im_shape: array
Array shape of the mask
Returns
-------
im_cnt: array
Filled contour or convex
"""
im_cnt = np.zeros((im_shape[0],im_shape[1],1), np.uint8)
cv.fillPoly(im_cnt, [points], (255,255))
return im_cnt | abbe69180582c12233c8632ca2ca60485ce4d717 | 27,787 |
def load(section, option, archive=_ConfigFile):
"""
Load variable
"""
cfg = ConfigParser()
try:
cfg.readfp(file(archive))
except Exception, e:
sys.stderr.write("%s, %s\n" % (archive, e.strerror))
return
try:
return cfg.get(section, option)
except:
sys.stderr.write("Incorrect value for %s or %s parameter\n" % \
(section, option))
return | 3cf8bebb9ffdaf4a25ece950560680fabdf8c459 | 27,788 |
def merge_duplicates(model_name, keep_descriptors=False):
"""
Identifies repeated experimental values and returns mean values for those
data along with their standard deviation. Only aggregates experimental
values that have been acquired at the same temperature and pressure.
Parameters
----------
model_name: dev_model
the dev_model object to be interrogated
keep_descriptors: boolean, default False
if True descriptors will be included in the output DataFrame
Returns
-----------
out: dataframe
pandas DataFrame of the original data where repeated measurements
have been averaged and their variance stored in a separate column
"""
model_outputs = -6 + model_name.Data_summary.shape[0]
devmodel = model_name
cols = devmodel.Data.columns
if (devmodel.Data.iloc[:, -(4 + model_outputs):-4].max() < 700).all():
for output_index in range(model_outputs):
devmodel.Data.iloc[:, -(5 + output_index)] = \
devmodel.Data.iloc[:, -(5 + output_index)].apply(
lambda x: exp(float(x)))
output_val = pd.DataFrame()
output_xtd = pd.DataFrame()
for output_index in range(model_outputs):
val = devmodel.Data.groupby(['smiles-cation', 'smiles-anion']
)[cols[-(5 + output_index)]].mean().\
reset_index()
xtd = devmodel.Data.groupby(['smiles-cation', 'smiles-anion']
)[cols[-(5 + output_index)]].std().\
reset_index()
if output_index == 0:
output_val = val
output_xtd = xtd
else:
output_val = pd.merge(output_val, val)
output_xtd = pd.merge(output_xtd, xtd)
size = devmodel.Data.groupby(['smiles-cation', 'smiles-anion']
)[cols[-(5 + output_index)]].count().\
reset_index()
cations = devmodel.Data.groupby(['smiles-cation', 'smiles-anion']
)['name-cation'].first().reset_index()
anions = devmodel.Data.groupby(['smiles-cation', 'smiles-anion']
)['name-anion'].first().reset_index()
size.columns.values[2] = "count"
salts = (devmodel.Data["smiles-cation"] + "." + devmodel.
Data["smiles-anion"]).unique()
print("Identified {} unique salts in {} datapoints".
format(len(salts), devmodel.Data.shape[0]))
out = pd.merge(output_val, output_xtd,
on=['smiles-cation', 'smiles-anion'],
suffixes=['_mean', '_std'])
out = pd.merge(out, size)
out = pd.merge(out, cations)
out = pd.merge(out, anions)
if keep_descriptors:
cationDescriptors = load_data("cationDescriptors.csv")
cationDescriptors.columns = [str(col) + '-cation' for
col in cationDescriptors.columns]
anionDescriptors = load_data("anionDescriptors.csv")
anionDescriptors.columns = [str(col) + '-anion' for
col in anionDescriptors.columns]
new_df = pd.merge(cationDescriptors, out,
on=["name-cation", "smiles-cation"], how="right")
new_df = pd.merge(anionDescriptors, new_df,
on=["name-anion", "smiles-anion"], how="right")
out = new_df
return out | a3baa232535f9c3bfd9496259f0cf3fd6142665b | 27,789 |
def autoencoder_cost_and_grad_sparse(theta, visible_size, hidden_size, lambda_, rho_, beta_, data):
"""
Version of cost and grad that incorporates the hidden layer sparsity constraint
rho_ : the target sparsity limit for each hidden node activation
beta_ : controls the weight of the sparsity penalty term relative
to other loss components
The input theta is a 1-dimensional array because scipy.optimize.minimize expects
the parameters being optimized to be a 1d array.
First convert theta from a 1d array to the (W1, W2, b1, b2)
matrix/vector format, so that this follows the notation convention of the
lecture notes and tutorial.
You must compute the:
cost : scalar representing the overall cost J(theta)
grad : array representing the corresponding gradient of each element of theta
"""
m = data.shape[1]
len = visible_size * hidden_size
w1 = theta[0 : len].reshape((hidden_size, visible_size)) # (h,v)
w2 = theta[len : 2*len].reshape((visible_size, hidden_size)) # (v,h)
b1 = theta[2*len : 2*len + hidden_size].flatten() # (h)
b2 = theta[2*len + hidden_size: ].flatten() # (v)
# FORWARD PROPAGATION (Lecture 24, Slides 11-13)
# HW5 #4: Vectorized Implementation of Forward Propagation
# Code moved to autoencoder_feedforward(theta, visible_size, hidden_size, data)
tau = autoencoder_feedforward(theta, visible_size, hidden_size, data)
z2 = tau[0 : hidden_size] # (h,m)
a2 = tau[hidden_size : 2*hidden_size] # (h,m)
z3 = tau[2*hidden_size : 2*hidden_size + visible_size] # (v,m)
h = tau[2* hidden_size + visible_size:] # (v,m)
# COST FUNCTION (Equation on Lecture 24, Slide 15)
#
# J(W,b) = Squared Error Term + Weight Decay Term (for regularization)
#
# = Sum{m=1...m} ||h_{W,b}(x^(i)) - y^(i)||^2 / (2m) + lambda/2 * \
# Sum{l=1...n_l-1} Sum{i=1...s_l} Sum{j=1...s_l+1} (W_{j,i}^(i))^2
#
# where
# m = # training pairs {(x^(1), y^(1)), ... , (x^(m), y^(,))}
cost = np.linalg.norm(h - data)**2/2./m + lambda_/2. * (np.linalg.norm(w1)**2 + np.linalg.norm(w2)**2)
#---------------------------#
# SPARSITY PARAMETER
# http://ufldl.stanford.edu/wiki/index.php/Autoencoders_and_Sparsity
#
# rho-hat = Sum{i=1...m} [a_j^(2) * x^(i)] / m
rhat = np.sum(a2, axis=1) / m
# Kullback-Leibler (KL) Divergence
# KL(rho || rho-hat) = rho * log(rho/rho_hat) + (1-rho) * log((1-rho)/(1-rho_hat))
# Penalty = Sum{j=1...s_2} KL(rho || rho-hat)
kl = np.sum(rho_ * np.log(rho_/rhat) + (1-rho_) * np.log((1-rho_)/(1-rhat)))
cost += beta_ * kl
#---------------------------#
# BACKPROPAGATION (Lecture 24, Slides 15-16)
# Step 1: Perform feedforward pass, computing activations for layers L_{2...n}.
# Completed above.
# Step 2: Compute "error terms." (Slide 16)
# Use original equation for delta3.
# Use modified version for delta2.
sparsity_der = beta_ * (-rho_/rhat + (1-rho_)/(1-rhat))
delta3 = -(data - h) * derivative(z3)
delta2 = (w2.T.dot(delta3) + np.repeat(sparsity_der,m).reshape((sparsity_der.shape[0],m))) * derivative(z2)
#---------------------------#
# Step 3: Compute partial derivatives. (Slide 15)
# partial J / partial W^(l) = a_j^(l) * delta_i^(l+1)
# partial J / partial b_i^(l) = delta_i^(l+1)
w1_grad = delta2.dot(data.T) / m + lambda_ * w1
w2_grad = delta3.dot(a2.T) / m + lambda_ * w2
b1_grad = np.sum(delta2, axis=1) / m
b2_grad = np.sum(delta3, axis=1) / m
grad = np.concatenate((w1_grad.flatten(), w2_grad.flatten(), b1_grad, b2_grad))
#print("\tgrad shape:", grad.shape)
return cost, grad | 9810517cab7adc5ca71d46d602e347b301a392d4 | 27,790 |
def isnpixok(npix):
"""Return :const:`True` if npix is a valid value for healpix map size, :const:`False` otherwise.
Parameters
----------
npix : int, scalar or array-like
integer value to be tested
Returns
-------
ok : bool, scalar or array-like
:const:`True` if given value is a valid number of pixel, :const:`False` otherwise
Examples
--------
>>> import healpy as hp
>>> hp.isnpixok(12)
True
>>> hp.isnpixok(768)
True
>>> hp.isnpixok([12, 768, 1002])
array([ True, True, False], dtype=bool)
"""
nside = np.sqrt(np.asarray(npix) / 12.0)
return nside == np.floor(nside) | 9501ab6e3172761cba0ec2e3693e5442924162af | 27,791 |
from cep_price_console.utils import config
import logging
import os
import yaml
def setup_logging(
default_level=logging.DEBUG,
env_key='LOG_CFG'
):
"""Setup logging configuration
"""
global debug_log
global info_log
global error_log
config.LOGGING_PATH.mkdir(parents=True, exist_ok=True)
message = []
path = config.LOGGING_YAML
value = os.getenv(env_key, None)
message.append("Value: {}".format(value))
if value:
path = value
message.append("Value True. Path: {}".format(path))
else:
message.append("Value False. Path: {}".format(path))
if path.exists():
message.append("Path Exists")
with open(path, 'rt') as f:
message.append("Config file being interpreted")
yaml_config = yaml.safe_load(f.read())
message.append("Logging configuration being set")
yaml_config['handlers']['debug_file_handler']['filename'] = config.DEBUG_LOG
yaml_config['handlers']['info_file_handler']['filename'] = config.INFO_LOG
yaml_config['handlers']['error_file_handler']['filename'] = config.ERROR_LOG
logging.config.dictConfig(yaml_config)
else:
messagebox.showwarning(
"Log Configuration File Missing",
"Path does not exist. No configuration applied: {}".format(path)
)
logging.basicConfig(level=default_level)
return message | b3c437e7407dbb6a47f58e6a568d6f89c31241ac | 27,792 |
import shutil
import subprocess
def solve_field(fname, timeout=15, solve_opts=None, *args, **kwargs):
""" Plate solves an image.
Note: This is a low-level wrapper around the underlying `solve-field`
program. See `get_solve_field` for more typical usage and examples.
Args:
fname(str, required): Filename to solve in .fits extension.
timeout(int, optional): Timeout for the solve-field command,
defaults to 60 seconds.
solve_opts(list, optional): List of options for solve-field.
"""
solve_field_script = shutil.which('solve-field')
if solve_field_script is None: # pragma: no cover
raise error.InvalidSystemCommand("Can't find solve-field, is astrometry.net installed?")
# Add the options for solving the field
if solve_opts is not None:
options = solve_opts
else:
# Default options
options = [
'--guess-scale',
'--cpulimit', str(timeout),
'--no-verify',
'--crpix-center',
'--temp-axy',
'--index-xyls', 'none',
'--solved', 'none',
'--match', 'none',
'--rdls', 'none',
'--corr', 'none',
'--downsample', '4',
'--no-plots',
]
if 'ra' in kwargs:
options.append('--ra')
options.append(str(kwargs.get('ra')))
if 'dec' in kwargs:
options.append('--dec')
options.append(str(kwargs.get('dec')))
if 'radius' in kwargs:
options.append('--radius')
options.append(str(kwargs.get('radius')))
# Gather all the kwargs that start with `--` and are not already present.
logger.debug(f'Adding kwargs: {kwargs!r}')
def _modify_opt(opt, val):
if isinstance(val, bool):
opt_string = str(opt)
else:
opt_string = f'{opt}={val}'
return opt_string
options.extend([_modify_opt(opt, val)
for opt, val
in kwargs.items()
if opt.startswith('--') and opt not in options])
cmd = [solve_field_script] + options + [fname]
logger.debug(f'Solving with: {cmd}')
try:
proc = subprocess.Popen(cmd,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except Exception as e:
raise error.PanError(f"Problem plate-solving in solve_field: {e!r}")
return proc | aebbc1770a8eea0faf958bbb4cfc3a00acc83381 | 27,793 |
def get_team_metadata(convert_users_role_to_string=False):
"""Returns team metadata
:param convert_users_role_to_string: convert integer team users' roles to human comprehensible strings
:type convert_users_role_to_string: bool
:return: team metadata
:rtype: dict
"""
response = _api.send_request(req_type='GET', path=f'/team/{_api.team_id}')
if not response.ok:
raise SABaseException(
response.status_code, "Couldn't get team metadata. " + response.text
)
res = response.json()
if convert_users_role_to_string:
for user in res["users"]:
user["user_role"] = user_role_int_to_str(user["user_role"])
return res | 6b48cc6f30bd2304f54c1ff695be6faaab187a9f | 27,794 |
def get_request_timestamp(req_type=None, timestamp=getCurrentMillis()):
"""
:param req_type: YTD, QTD, MTD, WTD, TODAY or None
if None return first unix timestamp
:return: unix timestamp
"""
@dataclass
class YTD(object):
pass
bench_date = pd.to_datetime(getUTCBeginDay(timestamp), unit='ms')
if req_type not in (YTD, QTD, MTD, WTD, TODAY):
return DAY_OF_MILLISECONDS
if req_type == YTD:
date = bench_date + pd.tseries.offsets.DateOffset(months=1 - bench_date.month, days=1 - bench_date.day)
if req_type == QTD:
date = bench_date + pd.tseries.offsets.DateOffset(months=-((bench_date.month - 1) % 3),
days=1 - bench_date.day)
if req_type == MTD:
date = bench_date + pd.tseries.offsets.DateOffset(days=1 - bench_date.day)
if req_type == WTD:
date = bench_date + pd.tseries.offsets.DateOffset(days=-bench_date.weekday())
if req_type == TODAY:
date = bench_date
return getMillisSeconds(date) - DAY_OF_MILLISECONDS | 2e18c23167bca388b8f2ad4febfe6f3a39df1151 | 27,795 |
def to_nest_placeholder(nested_tensor_specs,
default=None,
name_scope="",
outer_dims=()):
"""Converts a nest of TensorSpecs to a nest of matching placeholders.
Args:
nested_tensor_specs: A nest of tensor specs.
default: Optional constant value to set as a default for the placeholder.
name_scope: String name for the scope to create the placeholders in.
outer_dims: Optional leading dimensions for the placeholder.
Returns:
A nest of placeholders matching the given tensor spec.
Raises:
ValueError: If a default is provided outside of the allowed types, or if
default is a np.array that does not match the spec shape.
"""
if default is None:
to_ph = lambda spec: to_placeholder(spec, outer_dims=outer_dims)
else:
if not isinstance(default, (int, float, np.ndarray)):
raise ValueError("to_nest_placeholder default value must be an int, "
"float, or np.ndarray")
def to_ph(spec):
shape = list(outer_dims) + spec.shape.as_list()
if isinstance(default, np.ndarray) and list(default.shape) != shape:
raise ValueError("Shape mismatch between default value and spec. "
"Got {}, expected {}".format(default.shape, shape))
const = tf.constant(default, shape=shape, dtype=spec.dtype)
return to_placeholder_with_default(const, spec, outer_dims=outer_dims)
with tf.name_scope(name_scope):
return tf.nest.map_structure(to_ph, nested_tensor_specs) | 329bf5e76f5753a07f78394a8a065e3e44334929 | 27,796 |
import asyncio
async def discover():
"""Discover and return devices on local network."""
discovery = TuyaDiscovery()
try:
await discovery.start()
await asyncio.sleep(DEFAULT_TIMEOUT)
finally:
discovery.close()
return discovery.devices | fb3fed149011983520d884f139f9296973686459 | 27,797 |
def update_firewall_policy(module, oneandone_conn):
"""
Updates a firewall policy based on input arguments.
Firewall rules and server ips can be added/removed to/from
firewall policy. Firewall policy name and description can be
updated as well.
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
"""
try:
firewall_policy_id = module.params.get('firewall_policy')
name = module.params.get('name')
description = module.params.get('description')
add_server_ips = module.params.get('add_server_ips')
remove_server_ips = module.params.get('remove_server_ips')
add_rules = module.params.get('add_rules')
remove_rules = module.params.get('remove_rules')
changed = False
firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy_id, True)
if firewall_policy is None:
_check_mode(module, False)
if name or description:
_check_mode(module, True)
firewall_policy = oneandone_conn.modify_firewall(
firewall_id=firewall_policy['id'],
name=name,
description=description)
changed = True
if add_server_ips:
if module.check_mode:
_check_mode(module, _add_server_ips(module,
oneandone_conn,
firewall_policy['id'],
add_server_ips))
firewall_policy = _add_server_ips(module, oneandone_conn, firewall_policy['id'], add_server_ips)
changed = True
if remove_server_ips:
chk_changed = False
for server_ip_id in remove_server_ips:
if module.check_mode:
chk_changed |= _remove_firewall_server(module,
oneandone_conn,
firewall_policy['id'],
server_ip_id)
_remove_firewall_server(module,
oneandone_conn,
firewall_policy['id'],
server_ip_id)
_check_mode(module, chk_changed)
firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True)
changed = True
if add_rules:
firewall_policy = _add_firewall_rules(module,
oneandone_conn,
firewall_policy['id'],
add_rules)
_check_mode(module, firewall_policy)
changed = True
if remove_rules:
chk_changed = False
for rule_id in remove_rules:
if module.check_mode:
chk_changed |= _remove_firewall_rule(module,
oneandone_conn,
firewall_policy['id'],
rule_id)
_remove_firewall_rule(module,
oneandone_conn,
firewall_policy['id'],
rule_id)
_check_mode(module, chk_changed)
firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True)
changed = True
return (changed, firewall_policy)
except Exception as e:
module.fail_json(msg=str(e)) | 0d99fbee3a67e2c571bd9d9ad5ce698588ac0df4 | 27,798 |
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
if DEBUG == 1:
print 'one_hot_vector:', labels_one_hot[0]
return labels_one_hot | 916b040993489754bd3f07b5c7ffc9f3abaf1ed9 | 27,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.