content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def parse_contact_name(row, name_cols, strict=False, type='person'):
"""Parses a person's name with probablepeople library
Concatenates all the contact name columns into a single string and then attempts to parse it
into standardized name components and return a subset of the name parts that are useful for
comparing contacts. This process eliminates notes and other non-name text from dirty data.
Args:
row (pd.Series): A record
name_cols (list): A list of column names in the record, in order, that when concatenated
comprise a person's name
strict (boolean, optional): Whether or not to raise a RepeatedLabelError when parsing, if
False, the last value of the repeated labels will be used for the parse
type (str): Which probableparser to use: 'generic', 'person' or 'company'
Returns:
A subset (tuple of str, or np.nan) of the standardized name components, namely:
(title, first, last, full_name)
"""
row = row.fillna('')
concat = []
for col in name_cols:
concat.append(row.get(col, ''))
concat = ' '.join(concat)
cleaned = re.sub(r'(not\s*available|not\s*provided|n/a)', '', concat, flags=re.IGNORECASE)
try:
parsed = probablepeople.tag(cleaned, type)
except probablepeople.RepeatedLabelError as e:
if strict:
raise e
problem_key, problem_vals, parsed = find_repeated_label(cleaned)
parsed = (parsed, '')
title = parsed[0].get('PrefixOther', np.nan)
first = parsed[0].get('GivenName', np.nan)
last = parsed[0].get('Surname', np.nan)
try:
full_name = first + ' ' + last
except TypeError as e:
full_name = np.nan
return title, first, last, full_name
| 11,800
|
def test_userkeys_to_json():
"""
GIVEN a Settings model
WHEN test the to_json method
THEN check the id and more
"""
uk = UserKeysModel.by_id(1)
uk_json = UserKeysModel.to_json(uk)
assert uk.id == uk_json['id']
# FIXME : test that key exist, is a string and decrypt it via paramiko => assert uk.key == uk_json['key']
assert uk.user_id == uk_json['user_id']
assert uk.comment == uk_json['comment']
assert uk.authorized_key == uk_json['authorized_key']
assert uk_json['created_at'] == (uk.created_at.isoformat() if uk.created_at else uk.created_at)
assert uk_json['updated_at'] == (uk.updated_at.isoformat() if uk.updated_at else uk.updated_at)
assert uk_json['deleted_at'] == (uk.deleted_at.isoformat() if uk.deleted_at else uk.deleted_at)
| 11,801
|
def write_six_img_grid_w_embedded_names(
rgb_img: np.ndarray,
pred: np.ndarray,
label_img: np.ndarray,
id_to_class_name_map: Mapping[int,str],
save_fpath: str
) -> None:
"""
Create a 6-image tile grid with the following structure:
------------------------------------------------------------
RGB Image | Blended RGB+GT Label Map | GT Label Map
------------------------------------------------------------
RGB Image | Blended RGB+Pred Label Map | Predicted Label Map
------------------------------------------------------------
We embed classnames directly into the predicted and ground
truth label maps, instead of using a colorbar.
Args:
- rgb_img:
- pred: predicted label map
- label_img
id_to_class_name_map
- save_fpath
Returns:
- None
"""
assert label_img.ndim == 2
assert pred.ndim == 2
assert rgb_img.ndim == 3
label_hgrid = form_mask_triple_embedded_classnames(
rgb_img,
label_img,
id_to_class_name_map,
save_fpath='dummy.jpg',
save_to_disk=False
)
pred_hgrid = form_mask_triple_embedded_classnames(
rgb_img,
pred,
id_to_class_name_map,
save_fpath='dummy.jpg',
save_to_disk=False
)
vstack_img = form_vstacked_imgs(
img_list=[label_hgrid,pred_hgrid],
vstack_save_fpath=save_fpath,
save_to_disk=True
)
| 11,802
|
def build_operator_attribute_dicts(parameters, n_op, prefix="op_"):
"""
Extracts elements of parameters dict whose keys begin with prefix and generates a list of dicts.
The values of the relevant elements of parameters must be either single values or a list of length n_op, or else
an exception will be raised.
:param parameters: dict (or dict-like config object) containing a superset of operator parameters
:type parameters: dict
:param n_op: number of operators expected
:type n_op: int
:param prefix: prefix by which to filter out operator parameters
:type prefix: str
"""
list_op_dicts = [dict() for i in range(n_op)] # initialize list of empty dicts
for k in [x for x in parameters if x.startswith(prefix)]:
# if only a single value is given, use it for all operators
if type(parameters[k]) in [str, int, float, bool, type(None), dict]:
for di in list_op_dicts:
di[k] = parameters[k]
# if a list of values is given and the length matches the number of operators, use them respectively
elif len(parameters[k]) == n_op:
for i, op in enumerate(list_op_dicts):
op[k] = parameters[k][i]
elif k == G_OP_REPO_TH_DEF: # TODO # lists as inputs for op
for di in list_op_dicts:
di[k] = parameters[k]
# if parameter has invalid number of values, raise exception
else:
raise ValueError("Number of values for parameter", k, "equals neither n_op nor 1.", type(parameters[k]))
return list_op_dicts
| 11,803
|
def stop_spark_standalone():
"""
Stop the Spark standalone cluster created from init_spark_standalone (master not specified).
"""
from zoo.util.spark import SparkRunner
SparkRunner.stop_spark_standalone()
| 11,804
|
def write_log(title, message=''):
"""Write formatted log message to stderr."""
sys.stderr.write(''.join([
title.center(40).center(60, '-'), '\n', message
]))
| 11,805
|
def mock_data(rootdir, data_dir):
"""Build mock functional data from available atlases"""
mock_dir = os.path.join(data_dir, 'mock')
if not os.path.exists(mock_dir):
subprocess.run("python setup_mock_data.py".split(), cwd=rootdir)
return mock_dir
| 11,806
|
def showapi():
"""Shows the official API available in jasyscript.py."""
from jasy.core.Inspect import generateApi
Console.info(generateApi(__api__))
| 11,807
|
def get_peer_addr(ifname):
"""Return the peer address of given peer interface.
None if address not exist or not a peer-to-peer interface.
"""
for addr in IP.get_addr(label=ifname):
attrs = dict(addr.get('attrs', []))
if 'IFA_ADDRESS' in attrs:
return attrs['IFA_ADDRESS']
| 11,808
|
def eiffel_artifact_created_event():
"""Eiffel artifact created event."""
return {
"meta": {
"id": "7c2b6c13-8dea-4c99-a337-0490269c374d",
"time": 1575981274307,
"type": "EiffelArtifactCreatedEvent",
"version": "3.0.0",
},
"links": [],
"data": {"identity": "pkg:artifact/created/test@1.0.0"},
}
| 11,809
|
def _hash(input_data, initVal=0):
"""
hash() -- hash a variable-length key into a 32-bit value
k : the key (the unaligned variable-length array of bytes)
len : the length of the key, counting by bytes
level : can be any 4-byte value
Returns a 32-bit value. Every bit of the key affects every bit of
the return value. Every 1-bit and 2-bit delta achieves avalanche.
About 36+6len instructions.
The best hash table sizes are powers of 2. There is no need to do
mod a prime (mod is so slow!). If you need less than 32 bits,
use a bitmask. For example, if you need only 10 bits, do
h = (h & hashmask(10));
In which case, the hash table should have hashsize(10) elements.
If you are hashing n strings (ub1 **)k, do it like this:
for (i=0, h=0; i<n; ++i) h = hash( k[i], len[i], h);
By Bob Jenkins, 1996. bob_jenkins@burtleburtle.net. You may use this
code any way you wish, private, educational, or commercial. It's free.
See http://burtleburtle.net/bob/hash/evahash.html
Use for hash table lookup, or anything where one collision in 2^32 is
acceptable. Do NOT use for cryptographic purposes.
"""
data = bytes(input_data, encoding='ascii')
len_pos = len(data)
length = len(data)
if length == 0:
return 0
a = 0x9e3779b9
b = 0x9e3779b9
c = initVal
p = 0
while len_pos >= 12:
a += ((data[p + 0]) + ((data[p + 1]) << 8) + ((data[p + 2]) << 16) + ((data[p + 3]) << 24))
b += ((data[p + 4]) + ((data[p + 5]) << 8) + ((data[p + 6]) << 16) + ((data[p + 7]) << 24))
c += ((data[p + 8]) + ((data[p + 9]) << 8) + ((data[p + 10]) << 16) + ((data[p + 11]) << 24))
q = _mix(a, b, c)
a = q[0]
b = q[1]
c = q[2]
p += 12
len_pos -= 12
c += length
if len_pos >= 11:
c += (data[p + 10]) << 24
if len_pos >= 10:
c += (data[p + 9]) << 16
if len_pos >= 9:
c += (data[p + 8]) << 8
# the first byte of c is reserved for the length
if len_pos >= 8:
b += (data[p + 7]) << 24
if len_pos >= 7:
b += (data[p + 6]) << 16
if len_pos >= 6:
b += (data[p + 5]) << 8
if len_pos >= 5:
b += (data[p + 4])
if len_pos >= 4:
a += (data[p + 3]) << 24
if len_pos >= 3:
a += (data[p + 2]) << 16
if len_pos >= 2:
a += (data[p + 1]) << 8
if len_pos >= 1:
a += (data[p + 0])
q = _mix(a, b, c)
a = q[0]
b = q[1]
c = q[2]
return rshift_zero_padded(c, 0)
| 11,810
|
def main():
"""
Main entry function.
Parameters
----------
:return: -
"""
print("> Allergens analysis <")
# #################
# initial processing
# #################
load_undesirables()
# initial_processing()
df_open_food_facts = pd.read_csv(datasets_intermediate_path + "openfoodfacts_cols.csv", low_memory=False, sep=";")
df_allergens = pd.read_csv("../data/01_raw/custom_allergens.csv", low_memory=False, sep="\t")
# #################
# dataset analysis before clean
# #################
dataset_analysis_pre_clean(df_open_food_facts)
# #################
# dataset cleaning
# #################
start = time.time()
clean_dataset(df_open_food_facts, save_title="openfoodfacts_cleaned")
clean_allergen_set(df_allergens, save_title="custom_allergens")
end = time.time()
print(f'Cleaned in: {round((end - start) / 60, 2)}min')
# #################
# allergy test
# #################
# df_open_food_facts = pd.read_csv(openf_datasets_intermediate_path + "openfoodfacts_cleaned.csv", low_memory=False, sep=";")
# recipe_kaggle_df_cleaned = pd.read_csv(kaggle_datasets_processed_path + "train.csv")
# Safely evaluate an expression node or a Unicode or Latin-1 encoded string containing a Python expression. The string or node provided may only consist of the following
# Python literal structures: strings, numbers, tuples, lists, dicts, booleans, and None.
# in this case it is used to evaluate the lists for allergens and ingredients
# df_open_food_facts.allergens = df_open_food_facts.allergens.apply(ast.literal_eval)
# df_open_food_facts.ingredients_text = df_open_food_facts.ingredients_text.apply(ast.literal_eval)
# load_allergen_list()
# #################
# dataset analysis after clean
# #################
dataset_analysis_post_clean(df_open_food_facts)
#
# for index, row in recipe_kaggle_df_cleaned.head(n=50).iterrows():
# ingredient_list = row.ingredients.split(",")
# check_allergens(ingredient_list)
#
# for key in found_allergens.keys():
# print(f'{colors.positive}{key}: {", ".join(found_allergens[key])}')
# #################
# one hot encoding
# #################
df_open_food_facts = pd.read_csv(datasets_intermediate_path + "openfoodfacts_cleaned.csv", low_memory=False, sep="\t")
custom_allergens = pd.read_csv(datasets_intermediate_path + "custom_allergens.csv", low_memory=False, sep="\t")
custom_allergens.drop('allergens', axis=1, inplace=True)
df_hot_encoded = multi_label_binarize_dataframe(df_open_food_facts)
df_hot_encoded.drop('allergens', axis=1, inplace=True)
df_hot_encoded = df_hot_encoded.append(custom_allergens)
df_hot_encoded.to_csv(r"" + openf_datasets_processed_path + "openfoodfacts.csv", index=False, sep='\t', encoding='utf-8')
print("> Done <")
| 11,811
|
def read_config_option(key, expected_type=None, default_value=None):
"""Read the specified value from the configuration file.
Args:
key: the name of the key to read from the config file.
expected_type: read the config option as the specified type (if specified)
default_value: if the key doesn't exist, just return the default value.
If the default value is not specified, the function will throw whatever
error was raised by the configuration parser
"""
logging.info("Reading config option {} with expected type {}".format(key, expected_type))
try:
if not expected_type:
value = conf_parser.get("Settings", key)
if key is "password":
logging.info("Got configuration for key {}: ****".format(key))
else:
logging.info("Got configuration for key {}: {}".format(key, value))
return conf_parser.get("Settings", key)
elif expected_type is bool:
return conf_parser.getboolean("Settings", key)
except (ValueError, NoOptionError) as e:
if default_value:
return default_value
else:
raise
| 11,812
|
def simple_password(request):
"""
Checks a password
"""
if request.method == "POST":
form = PasswordForm(data=request.POST)
if form.is_valid():
# TODO: set session with better param
request.session["simple_auth"] = True
return redirect(form.cleaned_data["url"] or "/")
else:
form = PasswordForm()
return render(request, "simple_auth/password_form.html",
{"form": form})
| 11,813
|
def removeKeys(array: dict = None, remove: Any = None) -> dict:
"""
Removes keys from array by given remove value.
:param array: dict[Any: Any]
:param remove: Any
:return:
- sorted_dict - dict[Any: Any]
"""
if remove is None:
remove = []
try:
sorted_dict = {}
for item_key in array:
if array[item_key] != remove:
sorted_dict[item_key] = array[item_key]
return sorted_dict
except Exception as e:
logging.exception(e)
| 11,814
|
def affine2boxmesh(affines):
"""
:param affines: (n_parts, 6), range (0, 1)
:return:
"""
from trimesh.path.creation import box_outline
from trimesh.path.util import concatenate
n_parts = len(affines)
colors = [[0, 0, 255, 255], # blue
[0, 255, 0, 255], # green
[255, 0, 0, 255], # red
[255, 255, 0, 255], # yellow
[0, 255, 255, 255], # cyan
[255, 0, 255, 255], # Magenta
[160, 32, 240, 255], # purple
[255, 255, 240, 255]] # ivory
shape_box = []
for idx in range(n_parts):
part_trans = affines[idx, :3]
part_size = affines[idx, 3:]
trans_mat = np.eye(4)
# translate to center of axis aligned bounds
trans_mat[:3, 3] = part_trans
part_box = box_outline(transform=trans_mat,
extents=part_size
)
shape_box.append(part_box)
shape_box = concatenate(shape_box)
return shape_box
| 11,815
|
def _select_index_code(code):
"""
1 - sh
0 - sz
"""
code = str(code)
if code[0] == '3':
return 0
return 1
| 11,816
|
def get_install_agent_cmd():
"""Get OS specific command to install Telegraf agent."""
agent_pkg_deb = "https://packagecloud.io/install/repositories/" \
"wavefront/telegraf/script.deb.sh"
agent_pkg_rpm = "https://packagecloud.io/install/repositories/" \
"wavefront/telegraf/script.rpm.sh"
dist = system.check_os()
cmd = None
if not dist:
print("Error: Unsupported OS version. Please contact"
" support@wavefront.com.")
return cmd
if dist.strip().startswith(("Oracle Linux Server", "Fedora",
"Amazon Linux", "CentOS",
"Red Hat Enterprise Linux")):
cmd = "curl -s %s | bash" % (agent_pkg_rpm)
cmd += " && yum -y -q install telegraf"
elif dist.strip().startswith("Ubuntu"):
cmd = "curl -s %s | bash" % (agent_pkg_deb)
cmd += ' && apt-get -y -qq -o Dpkg::Options::="--force-confold"' \
' install telegraf'
elif dist.strip().lower().startswith("debian"):
cmd = "curl -s %s | bash" % (agent_pkg_deb)
cmd += ' && apt-get -o Dpkg::Options::="--force-confnew"' \
' -y install telegraf'
elif dist.strip().startswith(("openSUSE", "SUSE Linux Enterprise Server",
"SLES")):
cmd = "curl -s %s | bash" % (agent_pkg_rpm)
cmd += ' && zypper install telegraf'
else:
message.print_warn("Error: Unsupported OS version: %s." % (dist))
return cmd
| 11,817
|
def unpack(manifests: LocalManifestLists) -> Tuple[ServerManifests, bool]:
"""Convert `manifests` to `ServerManifests` for internal processing.
Returns `False` unless all resources in `manifests` are unique. For
instance, returns False if two files define the same namespace or the same
deployment.
The primary use case is to convert the manifests we read from local files
into the format Square uses internally for the server manifests as well.
Inputs:
manifests: LocalManifestLists
Returns:
ServerManifests: flattened version of `manifests`.
"""
# Compile a dict that shows which meta manifest was defined in which file.
# We will shortly use this information to determine if all resources were
# defined exactly once across all files.
all_meta: DefaultDict[MetaManifest, list] = collections.defaultdict(list)
for fname in manifests:
for meta, _ in manifests[fname]:
all_meta[meta].append(fname)
# Find out if all meta manifests were unique. If not, log the culprits and
# return with an error.
unique = True
for meta, fnames in all_meta.items():
if len(fnames) > 1:
unique = False
tmp = [str(_) for _ in fnames]
logit.error(
f"Duplicate ({len(tmp)}x) manifest {meta}. "
f"Defined in {str.join(', ', tmp)}"
)
if not unique:
return ({}, True)
# Compile the input manifests into a new dict with the meta manifest as key.
out = {k: v for fname in manifests for k, v in manifests[fname]}
return (out, False)
| 11,818
|
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the switch from config."""
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
host = config[CONF_HOST]
token = config[CONF_TOKEN]
name = config[CONF_NAME]
model = config.get(CONF_MODEL)
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
devices = []
unique_id = None
if model is None:
try:
miio_device = Device(host, token)
device_info = await hass.async_add_executor_job(miio_device.info)
model = device_info.model
unique_id = f"{model}-{device_info.mac_address}"
_LOGGER.info(
"%s %s %s detected",
model,
device_info.firmware_version,
device_info.hardware_version,
)
except DeviceException:
raise PlatformNotReady
if model in ["090615.switch.switch01"]:
plug = PtxSwitch(host, token, model=model)
device = XiaomiPTXSwitch(name, plug, model, unique_id, 1)
devices.append(device)
hass.data[DATA_KEY][host] = device
elif model in ["090615.switch.switch02"]:
plug = PtxSwitch(host, token, model=model)
device = XiaomiPTXSwitch(name, plug, model, unique_id, 1)
devices.append(device)
hass.data[DATA_KEY][host] = device
plug2 = PtxSwitch(host, token, model=model)
device2 = XiaomiPTXSwitch(name, plug2, model, unique_id, 2)
devices.append(device2)
hass.data[DATA_KEY][host] = device2
elif model in ["090615.switch.switch03"]:
plug = PtxSwitch(host, token, model=model)
device = XiaomiPTXSwitch(name, plug, model, unique_id, 1)
devices.append(device)
hass.data[DATA_KEY][host] = device
plug2 = PtxSwitch(host, token, model=model)
device2 = XiaomiPTXSwitch(name, plug2, model, unique_id, 2)
devices.append(device2)
hass.data[DATA_KEY][host] = device2
plug3 = PtxSwitch(host, token, model=model)
device3 = XiaomiPTXSwitch(name, plug3, model, unique_id, 3)
devices.append(device3)
hass.data[DATA_KEY][host] = device3
else:
_LOGGER.error(
"Unsupported device found! Please create an issue at "
"https://github.com/volshebniks/python-miio-ptx/issues "
"and provide the following data: %s",
model,
)
return False
async_add_entities(devices, update_before_add=True)
| 11,819
|
def process_generate_metric_alarms_event(event):
"""Handles a new event request
Placeholder copied from alert_controller implementation
"""
LOG.info(str(event))
return create_response(200, body="Response to HealthCheck")
| 11,820
|
def check_if_usl_forecast_exists(config, stid, run, forecast_date):
"""
Checks the parent USL directory to see if USL has run for specified stid and run time. This avoids the server
automatically returning a "close enough" date instead of an "Error 300: multiple choices."
"""
run_date = (forecast_date - timedelta(days=1)).replace(hour=int(run))
run_strtime = run_date.strftime('%Y%m%d_%H')
api_url = 'http://www.microclimates.org/forecast/{}/'.format(stid)
req = Request(api_url)
try:
response = urlopen(req)
except HTTPError:
if config['debug'] > 9:
print("usl: forecast for %s at run time %s doesn't exist" % (stid, run_date))
raise
page = response.read().decode('utf-8', 'ignore')
# Look for string of USL run time in the home menu for this station ID (equal to -1 if not found)
if page.find(run_strtime) == -1:
if config['debug'] > 9:
print("usl: forecast for %s at run time %s hasn't run yet" % (stid, run_date))
raise URLError("- usl: no correct date/time choice")
| 11,821
|
def get_query_segment_info(collection_name, timeout=None, using="default"):
"""
Notifies Proxy to return segments information from query nodes.
:param collection_name: A string representing the collection to get segments info.
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: QuerySegmentInfo:
QuerySegmentInfo is the growing segments's information in query cluster.
:rtype: QuerySegmentInfo
:example:
>>> from pymilvus import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_float_vector], description="get collection entities num")
>>> collection = Collection(name="test_get_segment_info", schema=schema)
>>> import pandas as pd
>>> int64_series = pd.Series(data=list(range(10, 20)), index=list(range(10)))i
>>> float_vector_series = [[random.random() for _ in range _DIM] for _ in range (10)]
>>> data = pd.DataFrame({"int64" : int64_series, "float_vector": float_vector_series})
>>> collection.insert(data)
>>> collection.load() # load collection to memory
>>> res = utility.get_query_segment_info("test_get_segment_info")
"""
return _get_connection(using).get_query_segment_info(collection_name, timeout=timeout)
| 11,822
|
def IoU(pred, gt, n_classes, all_iou=False):
"""Computes the IoU by class and returns mean-IoU"""
# print("IoU")
iou = []
for i in range(n_classes):
if np.sum(gt == i) == 0:
iou.append(np.NaN)
continue
TP = np.sum(np.logical_and(pred == i, gt == i))
FP = np.sum(np.logical_and(pred == i, gt != i))
FN = np.sum(np.logical_and(pred != i, gt == i))
iou.append(TP / (TP + FP + FN))
# nanmean: if a class is not present in the image, it's a NaN
result = [np.nanmean(iou), iou] if all_iou else np.nanmean(iou)
return result
| 11,823
|
def from_torchvision(vision_transform, p=1):
"""Takes in an arbitary torchvision tranform and wrap it such that it can be
applied to a list of images of shape HxWxC
Returns a callable class that takes in list of images and target as input
NOTE:
Due to implementation difficuities, in order to apply the same
randomized transform to EACH image, it is best to pass in
a deterministic transform like the functional transforms
in torchvision and then pass in a p value for the wrapper
to roll a number and apply the transform with that probability
Additionally, it's also possible to wrap a torchvision functional transform
as long as it's a function that takes in an image as it's only argument
i.e can write something like:
lambda x: some_functional_transform(x,...)
"""
return TorchvisionWrapper(vision_transform, p=p)
| 11,824
|
def is_valid(inputted):
"""
Essa função irá verificar o QueryDict trago pelo método POST do nosso frontend e fazer uma verificação dos campos an
tes de persistir os dados no banco de dados.
:param inputted: Query Dict trago pelo POST
:return: Um valor booleano
"""
for key in inputted.keys():
if key != 'complemento':
if inputted.get(key) is None:
return False
elif len(inputted.get(key)) == 0:
return False
else:
pass
if key == 'estado':
if inputted.get(key) not in country_uf.values():
return False
else:
pass
if key == 'cep':
if len(inputted.get(key)) != 8:
return False
else:
cep = inputted.get(key)
try:
int(cep)
except (TypeError, ValueError):
return False
else:
url = f"https://viacep.com.br/ws/{cep}/json/"
response = requests.get(url)
response = response.json()
if 'erro' in response.keys():
return False
return True
| 11,825
|
def get_ports(context, project_id=None):
"""Returns all ports of VMs in EOS-compatible format.
:param project_id: globally unique neutron tenant identifier
"""
session = context.session
model = db_models.AristaProvisionedVms
if project_id:
all_ports = (session.query(model).
filter(model.project_id == project_id,
model.host_id.isnot(None),
model.vm_id.isnot(None),
model.network_id.isnot(None),
model.port_id.isnot(None)))
else:
all_ports = (session.query(model).
filter(model.project_id.isnot(None),
model.host_id.isnot(None),
model.vm_id.isnot(None),
model.network_id.isnot(None),
model.port_id.isnot(None)))
ports = {}
for port in all_ports:
if port.port_id not in ports:
ports[port.port_id] = port.eos_port_representation()
ports[port.port_id]['hosts'].append(port.host_id)
return ports
| 11,826
|
def WriteSchemaMarkdown(fileName):
""" Iterate on the global `OrderedDict` of `EoCalib` sub-classes
and write description of each to a markdown file """
with open(fileName, 'w') as fout:
for key, val in EO_CALIB_CLASS_DICT.items():
fout.write("## %s\n" % key)
val.writeMarkdown(fout)
fout.write("\n\n")
| 11,827
|
def dispatcher3(request, route_table):
"""
return an instance of a second DispatcherActor with another name that is
not launched the teardown of this fixtures terminate the actor (in case it
was started and close its socket)
"""
dispatcher_actor = DispatcherActor(
'test_dispatcher2-',
lambda name, log: FakeFormulaActor(name, FORMULA_SOCKET_ADDR,
level_logger=log),
route_table,
level_logger=LOG_LEVEL)
yield dispatcher_actor
dispatcher_actor.socket_interface.close()
dispatcher_actor.terminate()
dispatcher_actor.join()
| 11,828
|
def _HMAC(K, C, Mode=hashlib.sha1):
"""
Generate an HMAC value.
The default mode is to generate an HMAC-SHA-1 value w/ the SHA-1 algorithm.
:param K: shared secret between client and server.
Each HOTP generator has a different and unique secret K.
:type K: bytes
:param C: 8-byte counter value, the moving factor.
This counter MUST be synchronized between the HOTP generator
(client) and the HOTP validator (server).
:type C: bytes
:param Mode: The algorithm to use when generating the HMAC value
:type Mode: hashlib.sha1, hashlib.sha256, hashlib.sha512, or hashlib.md5
:return: HMAC result. If HMAC-SHA-1, result is 160-bits (20-bytes) long.
:rtype: bytes
"""
return hmac.new(K, C, Mode).digest()
| 11,829
|
def get_reference(planet_name):
"""
Return reference for a given planet's orbit fit
Args:
planet_name (str): name of planet. no space
Returns:
reference (str): Reference of orbit fit
"""
planet_name = planet_name.lower()
if planet_name not in post_dict:
raise ValueError("Invalid planet name '{0}'".format(planet_name))
filename, reference = post_dict[planet_name]
return reference
| 11,830
|
def makedirs(name, *args, **kwargs):
"""Shim to transparently backport exist_ok to Python < 3.2"""
if "exist_ok" in kwargs and python_lt(3, 2):
if not os.path.exists(name):
os.makedirs(name, *args, **kwargs)
else:
os.makedirs(name, *args, **kwargs)
| 11,831
|
def DefaultPortIfAvailable():
"""Returns default port if available.
Raises:
EmulatorArgumentsError: if port is not available.
Returns:
int, default port
"""
if portpicker.is_port_free(_DEFAULT_PORT):
return _DEFAULT_PORT
else:
raise EmulatorArgumentsError(
'Default emulator port [{}] is already in use'.format(_DEFAULT_PORT))
| 11,832
|
def mock_hub(hass):
"""Mock hub."""
mock_integration(hass, MockModule(DOMAIN))
hub = mock.MagicMock()
hub.name = "hub"
hass.data[DOMAIN] = {DEFAULT_HUB: hub}
return hub
| 11,833
|
def deferred_setting(name, default):
"""
Returns a function that calls settings with (name, default)
"""
return lambda: setting(name, default)
| 11,834
|
def watch_cli(optimization_id):
"""Watch the status of a bespoke optimization."""
pretty.install()
console = rich.get_console()
print_header(console)
from openff.bespokefit.executor import wait_until_complete
wait_until_complete(optimization_id)
| 11,835
|
def draw_donut(display, coord, box_size, color, bg_color):
"""Draw a donut at the given location."""
left, top = coord
half = int(box_size * 0.5)
quarter = int(box_size * 0.25)
center = (left + half, top + half)
pygame.draw.circle(display, color, center, half - 5)
pygame.draw.circle(display, bg_color, center, quarter - 5)
return
| 11,836
|
def extends_dict(target, source):
""" Will copy every key and value of source in target if key is not present in target """
for key, value in source.items():
if key not in target:
target[key] = value
elif type(target[key]) is dict:
extends_dict(target[key], value)
elif type(target[key]) is list:
target[key] += value
return target
| 11,837
|
def get_maps_interface_class(zep_inp):
"""
Takes the input of zephyrus and return the maps of
interfaces to classes and viceversa
"""
interface_to_classes = {}
class_to_interfaces = {}
for i in zep_inp["components"].keys():
class_name = i.split(settings.SEPARATOR)[-1]
interfaces = zep_inp["components"][i]["provides"][0]["ports"]
class_to_interfaces[class_name] = interfaces
for k in interfaces:
if k in interface_to_classes:
interface_to_classes[k].append(class_name)
else:
interface_to_classes[k] = [class_name]
return (interface_to_classes,class_to_interfaces)
| 11,838
|
def plextv_resources_base_fixture():
"""Load base payload for plex.tv resources and return it."""
return load_fixture("plex/plextv_resources_base.xml")
| 11,839
|
def _auth(machine='desi.lbl.gov'):
"""Get authentication credentials.
"""
from netrc import netrc
from requests.auth import HTTPDigestAuth
n = netrc()
try:
u,foo,p = n.authenticators(machine)
except:
raise ValueError('Unable to get user/pass from $HOME/.netrc for {}'.format(machine))
return HTTPDigestAuth(u,p)
| 11,840
|
def test_handle_html_dynmamic_with_encoding():
"""Test the handler converting dynamic HTML to utf-8 encoding."""
with open(TEST_VALUE_SOURCES["html_dynamic"]) as f:
test_value = f.read()
test_bytes = bytes(test_value, ALT_ENCODING)
with open(TEST_VALUE_SOURCES["html_dynamic_bytes"], "rb") as f:
# Work around the end-of-file-fixer pre-commit hook
expected_bytes = f.read().rstrip()
hasher = hash_http_content.UrlHasher(HASH_ALGORITHM)
result = hasher._handle_html(test_bytes, ALT_ENCODING)
assert result.hash == EXPECTED_DIGESTS["html_dynamic"]
assert result.contents == expected_bytes
| 11,841
|
def relative_date_add(date_rule: str, strict: bool = False) -> float:
"""Change the string in date rule format to the number of days. E.g 1d to 1, 1y to 365, 1m to 30, -1w to -7"""
days = ''
if re.search(DateRuleReg, date_rule) is not None:
res = re.search(DateRuleReg, date_rule)
date_str = res.group(1)
if date_str[0] == '-':
num = float(date_str[1:-1])
days = '-'
else:
num = float(date_str[:-1])
rule = date_str[-1:]
if rule in DictDayRule:
scale = DictDayRule[rule]
days = days + str(num * scale)
d = float(days)
return d
else:
raise MqValueError('There are no valid day rule for the point provided.')
if strict:
raise MqValueError(f'invalid date rule {date_rule}')
return 0
| 11,842
|
def store_survey_elements(survey, recom, df_fri, df_frii):
"""Match returned recommentations ids with ObjectCatalog entries
Parameters
----------
survey: zooniverse_web.models.Survey
recom: acton.proto.wrappers.Recommendations
df_fri: pandas.DataFrame
df_frii: pandas.DataFrame
"""
#
for recommendation_id in recom.recommendations:
try:
first = df_fri.loc[df_fri['idx'] == recommendation_id, 'first'].item()
except ValueError:
first = df_frii.loc[df_frii['idx'] == recommendation_id, 'first'].item()
# Gather the one question we will use here.
try:
galaxy = Galaxy.objects.get(first=first)
except Galaxy.MultipleObjectsReturned:
galaxy = Galaxy.objects.filter(first=first)[0]
question = Question.objects.all()[0]
# Create survey element
survey_element, created = SurveyElement.objects.get_or_create(
galaxy=galaxy,
question=question,
)
# Create survey question for this survey element
SurveyQuestion.objects.create(survey_element=survey_element, survey=survey)
save_image(galaxy)
| 11,843
|
def to_frames_using_nptricks(src: np.ndarray, window_size: int, stride: int) -> np.ndarray:
"""
np.ndarray をフレーム分けするプリミティブな実装で,分割に`np.lib.stride_tricks.as_strided`関数を使用しており,indexingを使用する`to_frames_using_index`より高速である.
Parameters
----------
src: np.ndarray
splited source.
window_size: int
sliding window size.
stride: int,
stride is int more than 0.
Returns
-------
frames: np.ndarray
a shape of frames is `(num_frames, window_size, *src.shape[1:])`, where num_frames is `(src.shape[0] - window_size) // stride + 1`.
"""
assert stride > 0, 'ストライドは正の整数である必要がある. stride={}'.format(stride)
num_frames = (src.shape[0] - window_size) // stride + 1
ret_shape = (num_frames, window_size, *src.shape[1:])
strides = (stride * src.strides[0], *src.strides)
return np.lib.stride_tricks.as_strided(src, shape=ret_shape, strides=strides)
| 11,844
|
def global_fit(
model_constructor,
pdf_transform=False,
default_rtol=1e-10,
default_atol=1e-10,
default_max_iter=int(1e7),
learning_rate=1e-6,
):
"""
Wraps a series of functions that perform maximum likelihood fitting in the
`two_phase_solver` method found in the `fax` python module. This allows for
the calculation of gradients of the best-fit parameters with respect to upstream
parameters that control the underlying model, i.e. the event yields (which are
then parameterized by weights or similar).
Args:
model_constructor: Function that takes in the parameters of the observable,
and returns a model object (and background-only parameters)
Returns:
global_fitter: Callable function that performs global fits.
Differentiable :)
"""
adam_init, adam_update, adam_get_params = optimizers.adam(learning_rate)
def make_model(model_pars):
m, bonlypars = model_constructor(model_pars)
bounds = m.config.suggested_bounds()
constrained_mu = (
to_inf(constrained_mu, bounds[0]) if pdf_transform else constrained_mu
)
exp_bonly_data = m.expected_data(bonlypars, include_auxdata=True)
def expected_logpdf(pars): # maps pars to bounded space if pdf_transform = True
return (
m.logpdf(to_bounded_vec(pars, bounds), exp_bonly_data)
if pdf_transform
else m.logpdf(pars, exp_bonly_data)
)
def global_fit_objective(pars): # NLL
return -expected_logpdf(pars)[0]
return global_fit_objective
def global_bestfit_minimized(hyper_param):
nll = make_model(hyper_param)
def bestfit_via_grad_descent(i, param): # gradient descent
g = jax.grad(nll)(param)
# param = param - g * learning_rate
param = adam_get_params(adam_update(i, g, adam_init(param)))
return param
return bestfit_via_grad_descent
global_solve = twophase.two_phase_solver(
param_func=global_bestfit_minimized,
default_rtol=default_rtol,
default_atol=default_atol,
default_max_iter=default_max_iter,
)
def global_fitter(init, hyper_pars):
solve = global_solve(init, hyper_pars)
return solve.value
return global_fitter
| 11,845
|
def get_connection_string(storage_account_name):
"""
Checks the environment for variable named AZ_<STORAGE_ACCOUNT_NAME> and
returns the corresponding connection string. Raises a
``ConnectionStringNotFound`` exception if environment variable is missing
"""
conn_string = os.environ.get("AZ_" + storage_account_name.upper(), None)
if conn_string is None:
raise ConnectionStringError(
"Environment variable AZ_" + storage_account_name.upper() + " not found!")
else:
return conn_string
| 11,846
|
def reorder_task(
token: 'auth.JWT',
task_id: 'typevars.ObjectID',
before_id: 'Optional[typevars.ObjectID]' = None,
after_id: 'Optional[typevars.ObjectID]' = None
) -> 'List[models.Task]':
"""Change the position of the task in the list."""
if before_id is None and after_id is None:
raise util_errors.APIError(
'One of before_id or after_id must be provided', 400)
if task_id == before_id or task_id == after_id:
raise util_errors.APIError(
'Task cannot be before or after itself', 400)
before = None
after = None
(task, before, after) = auth.load_owned_objects(
models.Task, token, 'get tasks', task_id, before_id, after_id)
if before is None:
before = after.before
if after is None:
after = before.after
if (
(before is not None and before.after is not after) or
(after is not None and after.before is not before)):
raise util_errors.APIError(
'Before and after tasks are not adjacent', 400)
mutated = [before, after, task, task.before, task.after]
if before is not None and task.parent is not before.parent:
mutated.extend([task.parent, before.parent])
check_reparent(task, before.parent)
elif after is not None and task.parent is not after.parent:
mutated.extend([task.parent, after.parent])
check_reparent(task, after.parent)
if task.before is not None:
task.before.after = task.after
elif task.after is not None:
task.after.before = None
task.before = before
task.after = after
db.DB.session.commit()
return [m for m in set(mutated) if m is not None]
| 11,847
|
def get_ldpc_code_params(ldpc_design_filename):
"""
Extract parameters from LDPC code design file.
Parameters
----------
ldpc_design_filename : string
Filename of the LDPC code design file.
Returns
-------
ldpc_code_params : dictionary
Parameters of the LDPC code.
"""
ldpc_design_file = open(ldpc_design_filename)
ldpc_code_params = {}
[n_vnodes, n_cnodes] = [int(x) for x in ldpc_design_file.readline().split(' ')]
[max_vnode_deg, max_cnode_deg] = [int(x) for x in ldpc_design_file.readline().split(' ')]
vnode_deg_list = np.array([int(x) for x in ldpc_design_file.readline().split(' ')[:-1]], np.int32)
cnode_deg_list = np.array([int(x) for x in ldpc_design_file.readline().split(' ')[:-1]], np.int32)
cnode_adj_list = -np.ones([n_cnodes, max_cnode_deg], int)
vnode_adj_list = -np.ones([n_vnodes, max_vnode_deg], int)
for vnode_idx in range(n_vnodes):
vnode_adj_list[vnode_idx, 0:vnode_deg_list[vnode_idx]] = \
np.array([int(x)-1 for x in ldpc_design_file.readline().split('\t')])
for cnode_idx in range(n_cnodes):
cnode_adj_list[cnode_idx, 0:cnode_deg_list[cnode_idx]] = \
np.array([int(x)-1 for x in ldpc_design_file.readline().split('\t')])
cnode_vnode_map = -np.ones([n_cnodes, max_cnode_deg], int)
vnode_cnode_map = -np.ones([n_vnodes, max_vnode_deg], int)
cnode_list = np.arange(n_cnodes)
vnode_list = np.arange(n_vnodes)
for cnode in range(n_cnodes):
for i, vnode in enumerate(cnode_adj_list[cnode, 0:cnode_deg_list[cnode]]):
cnode_vnode_map[cnode, i] = cnode_list[np.where(vnode_adj_list[vnode, :] == cnode)]
for vnode in range(n_vnodes):
for i, cnode in enumerate(vnode_adj_list[vnode, 0:vnode_deg_list[vnode]]):
vnode_cnode_map[vnode, i] = vnode_list[np.where(cnode_adj_list[cnode, :] == vnode)]
cnode_adj_list_1d = cnode_adj_list.flatten().astype(np.int32)
vnode_adj_list_1d = vnode_adj_list.flatten().astype(np.int32)
cnode_vnode_map_1d = cnode_vnode_map.flatten().astype(np.int32)
vnode_cnode_map_1d = vnode_cnode_map.flatten().astype(np.int32)
pmat = np.zeros([n_cnodes, n_vnodes], int)
for cnode_idx in range(n_cnodes):
pmat[cnode_idx, cnode_adj_list[cnode_idx, :]] = 1
ldpc_code_params['n_vnodes'] = n_vnodes
ldpc_code_params['n_cnodes'] = n_cnodes
ldpc_code_params['max_cnode_deg'] = max_cnode_deg
ldpc_code_params['max_vnode_deg'] = max_vnode_deg
ldpc_code_params['cnode_adj_list'] = cnode_adj_list_1d
ldpc_code_params['cnode_vnode_map'] = cnode_vnode_map_1d
ldpc_code_params['vnode_adj_list'] = vnode_adj_list_1d
ldpc_code_params['vnode_cnode_map'] = vnode_cnode_map_1d
ldpc_code_params['cnode_deg_list'] = cnode_deg_list
ldpc_code_params['vnode_deg_list'] = vnode_deg_list
ldpc_design_file.close()
return ldpc_code_params
| 11,848
|
def find_collection(*, collection, name):
"""
Looks through the pages of a collection for a resource with the specified name.
Returns it, or if not found, returns None
"""
if isinstance(collection, ProjectCollection):
try:
# try to use search if it is available
# call list() to collapse the iterator, otherwise the NotFound
# won't show up until collection_list is used
collection_list = list(collection.search(search_params={
"name": {
"value": name,
"search_method": "EXACT"
}
}))
except NotFound:
# Search must not be available yet
collection_list = collection.list()
else:
collection_list = collection.list()
matching_resources = [resource for resource in collection_list if resource.name == name]
if len(matching_resources) > 1:
raise ValueError("Found multiple collections with name '{}'".format(name))
if len(matching_resources) == 1:
result = matching_resources.pop()
print('Found existing: {}'.format(result))
return result
else:
return None
| 11,849
|
def terms_documents_matrix_ticcl_frequency(in_files):
"""Returns a terms document matrix and related objects of a corpus
A terms document matrix contains frequencies of wordforms, with wordforms
along one matrix axis (columns) and documents along the other (rows).
Inputs:
in_files: list of ticcl frequency files (one per document in the
corpus)
Returns:
corpus: a sparse terms documents matrix
vocabulary: the vectorizer object containing the vocabulary (i.e., all word forms
in the corpus)
"""
vocabulary = DictVectorizer()
corpus = vocabulary.fit_transform(ticcl_frequency(in_files))
return corpus, vocabulary
| 11,850
|
def cvt_continue_stmt(node: pytree.Base, ctx: Ctx) -> ast_cooked.Base:
"""continue_stmt: 'continue'"""
#-# Continue
assert ctx.is_REF, [node]
return ast_cooked.ContinueStmt()
| 11,851
|
def nth_even(n):
"""Function I wrote that returns the nth even number."""
return (n * 2) - 2
| 11,852
|
def _has__of__(obj):
"""Check whether an object has an __of__ method for returning itself
in the context of a container."""
# It is necessary to check both the type (or we get into cycles)
# as well as the presence of the method (or mixins of Base pre- or
# post-class-creation as done in, e.g.,
# zopefoundation/Persistence) can fail.
return isinstance(obj, ExtensionClass.Base) and hasattr(type(obj), '__of__')
| 11,853
|
def diff_main(args, output):
"""
Different stats depending on the args
:param output:
:return:
"""
f = open(os.path.join(output, "avg_methylation_60.out"), "w")
all_cpg_format_file_paths = create_chr_paths_dict(args)
for chr in tqdm(all_cpg_format_file_paths):
v = methylation_diff(all_cpg_format_file_paths[chr])
f.write("chr%s:%s\n" % (chr, v))
print(v)
| 11,854
|
def grayscale(img):
"""
Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')
"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
| 11,855
|
def get_pageinfo(response, tracktype='recenttracks'):
"""Check how many pages of tracks the user have."""
xmlpage = ET.fromstring(response)
totalpages = xmlpage.find(tracktype).attrib.get('totalPages')
return int(totalpages)
| 11,856
|
def test_delete_not_contained_2(multi_tree):
"""Test that delete leaves the list intact if value not in tree."""
multi_tree.instance.delete(9999999999999)
assert multi_tree.instance.size() == multi_tree.size
| 11,857
|
def _create_dispatcher_class(cls, classname, bases, dict_):
"""Create a :class:`._Dispatch` class corresponding to an
:class:`.Events` class."""
# there's all kinds of ways to do this,
# i.e. make a Dispatch class that shares the '_listen' method
# of the Event class, this is the straight monkeypatch.
dispatch_base = getattr(cls, 'dispatch', _Dispatch)
cls.dispatch = dispatch_cls = type("%sDispatch" % classname,
(dispatch_base, ), {})
dispatch_cls._listen = cls._listen
for k in dict_:
if _is_event_name(k):
setattr(dispatch_cls, k, _DispatchDescriptor(dict_[k]))
_registrars[k].append(cls)
| 11,858
|
def MIN(*args):
"""Return the minimum of a range or list of Number or datetime"""
return _group_function(min, *args)
| 11,859
|
def GF(order, irreducible_poly=None, primitive_element=None, verify_irreducible=True, verify_primitive=True, mode="auto", target="cpu"):
"""
Factory function to construct a Galois field array class of type :math:`\\mathrm{GF}(p^m)`.
The created class will be a subclass of :obj:`galois.FieldArray` with metaclass :obj:`galois.FieldMeta`.
The :obj:`galois.FieldArray` inheritance provides the :obj:`numpy.ndarray` functionality. The :obj:`galois.FieldMeta` metaclass
provides a variety of class attributes and methods relating to the finite field.
Parameters
----------
order : int
The order :math:`p^m` of the field :math:`\\mathrm{GF}(p^m)`. The order must be a prime power.
irreducible_poly : int, galois.Poly, optional
Optionally specify an irreducible polynomial of degree :math:`m` over :math:`\\mathrm{GF}(p)` that will
define the Galois field arithmetic. An integer may be provided, which is the integer representation of the
irreducible polynomial. Default is `None` which uses the Conway polynomial :math:`C_{p,m}` obtained from :func:`galois.conway_poly`.
primitive_element : int, galois.Poly, optional
Optionally specify a primitive element of the field :math:`\\mathrm{GF}(p^m)`. A primitive element is a generator of
the multiplicative group of the field. For prime fields :math:`\\mathrm{GF}(p)`, the primitive element must be an integer
and is a primitive root modulo :math:`p`. For extension fields :math:`\\mathrm{GF}(p^m)`, the primitive element is a polynomial
of degree less than :math:`m` over :math:`\\mathrm{GF}(p)` or its integer representation. The default is `None` which uses
:obj:`galois.primitive_root(p)` for prime fields and :obj:`galois.primitive_element(irreducible_poly)` for extension fields.
verify_irreducible : bool, optional
Indicates whether to verify that the specified irreducible polynomial is in fact irreducible. The default is
`True`. For large irreducible polynomials that are already known to be irreducible (and may take a long time to verify),
this argument can be set to `False`.
verify_primitive : bool, optional
Indicates whether to verify that the specified primitive element is in fact a generator of the multiplicative group.
The default is `True`.
mode : str, optional
The type of field computation, either `"auto"`, `"jit-lookup"`, or `"jit-calculate"`. The default is `"auto"`.
The "jit-lookup" mode will use Zech log, log, and anti-log lookup tables for efficient calculation. The "jit-calculate"
mode will not store any lookup tables, but instead perform field arithmetic on the fly. The "jit-calculate" mode is
designed for large fields that cannot or should not store lookup tables in RAM. Generally, "jit-calculate" mode will
be slower than "jit-lookup". The "auto" mode will determine whether to use "jit-lookup" or "jit-calculate" based on the field's
size. In "auto" mode, field's with `order <= 2**16` will use the "jit-lookup" mode.
target : str, optional
The `target` keyword argument from :func:`numba.vectorize`, either `"cpu"`, `"parallel"`, or `"cuda"`.
Returns
-------
galois.FieldMeta
A new Galois field array class that is a subclass of :obj:`galois.FieldArray` with :obj:`galois.FieldMeta` metaclass.
Examples
--------
Construct a Galois field array class with default irreducible polynomial and primitive element.
.. ipython:: python
# Construct a GF(2^m) class
GF256 = galois.GF(2**8)
# Notice the irreducible polynomial is primitive
print(GF256.properties)
poly = GF256.irreducible_poly
Construct a Galois field specifying a specific irreducible polynomial.
.. ipython:: python
# Field used in AES
GF256_AES = galois.GF(2**8, irreducible_poly=galois.Poly.Degrees([8,4,3,1,0]))
print(GF256_AES.properties)
# Construct a GF(p) class
GF571 = galois.GF(571); print(GF571.properties)
# Construct a very large GF(2^m) class
GF2m = galois.GF(2**100); print(GF2m.properties)
# Construct a very large GF(p) class
GFp = galois.GF(36893488147419103183); print(GFp.properties)
See :obj:`galois.FieldArray` for more examples of what Galois field arrays can do.
"""
if not isinstance(order, int):
raise TypeError(f"Argument `order` must be an integer, not {type(order)}.")
p, k = prime_factors(order)
if not len(p) == len(k) == 1:
s = " + ".join([f"{pp}**{kk}" for pp, kk in zip(p, k)])
raise ValueError(f"Argument `order` must be a prime power, not {order} = {s}.")
p, m = p[0], k[0]
if m == 1:
if not irreducible_poly is None:
raise ValueError(f"Argument `irreducible_poly` can only be specified for prime fields, not the extension field GF({p}^{m}).")
return GF_prime(p, primitive_element=primitive_element, verify_primitive=verify_primitive, target=target, mode=mode)
else:
return GF_extension(p, m, primitive_element=primitive_element, irreducible_poly=irreducible_poly, verify_primitive=verify_primitive, verify_irreducible=verify_irreducible, target=target, mode=mode)
| 11,860
|
def remove_fields_with_value_none(fields: typing.Dict) -> typing.Dict:
"""
Remove keys whose value is none
:param fields: the fields to clean
:return: a copy of fields, without the none values
"""
fields = dict((key, value) for key, value in fields.items() if
value is not None) # Strip out none values
return fields
| 11,861
|
def _register_services(hass):
"""Register Mixergy services."""
async def mixergy_set_charge(call):
tasks = [
tank.set_target_charge(call.data)
for tank in hass.data[DOMAIN].values()
if isinstance(tank, Tank)
]
results = await asyncio.gather(*tasks)
# Note that we'll get a "None" value for a successful call
if None not in results:
_LOGGER.warning("The request to charge the tank did not succeed")
if not hass.services.has_service(DOMAIN, SERVICE_SET_CHARGE):
# Register a local handler for scene activation
hass.services.async_register(
DOMAIN,
SERVICE_SET_CHARGE,
verify_domain_control(hass, DOMAIN)(mixergy_set_charge),
schema=vol.Schema(
{
vol.Required(ATTR_CHARGE): cv.positive_int
}
),
)
| 11,862
|
def get_repository(repository_id):
"""
Get the repository service address from accounts service
for storing data.
:param repository_id: the repository ID
:return: url of the repository url
:raise: HTTPError
"""
client = API(options.url_accounts, ssl_options=ssl_server_options())
try:
response = yield client.accounts.repositories[repository_id].get()
logging.debug(response['data'])
raise Return(response['data'])
except KeyError:
error = 'Cannot find a repository'
raise exceptions.HTTPError(404, error)
| 11,863
|
def test_all_fields_exist_in_the_json_file():
"""
Sprawdź poprawność samego pliku.
"""
with open(SLOWO_NA_DZIS_PATH, 'r', encoding='UTF-8') as plik:
slowo_na_dzis = json.load(plik)
for cytat in slowo_na_dzis:
pprint(cytat)
assert_quote_slowo_na_dzis_json(cytat)
| 11,864
|
def make_values(params, point): #240 (line num in coconut source)
"""Return a dictionary with the values replaced by the values in point,
where point is a list of the values corresponding to the sorted params.""" #242 (line num in coconut source)
values = {} #243 (line num in coconut source)
for i, k in (enumerate)((sorted)(params)): #244 (line num in coconut source)
values[k] = point[i] #245 (line num in coconut source)
return values
| 11,865
|
def elbow_method(data):
"""
This function will compute elbow method and generate elbow visualization
:param data: 2 columns dataframe for cluster analysis
:return: Plotly Figures
"""
distortions = []
K = range(1, 10)
for k in K:
elbow_kmean = model_kmeans(data, k)
distortions.append(elbow_kmean.inertia_)
elbow = pd.DataFrame({'k': K,
'inertia': distortions})
fig = go.Figure(data=go.Scatter(x=elbow['k'], y=elbow['inertia']))
fig.update_layout(title='Elbows Methods for finding best K values in KMeans',
xaxis_title='K',
yaxis_title='Inertia')
return fig
| 11,866
|
def _configure_logging(verbose=False, debug=False):
"""Configure the log global, message format, and verbosity settings."""
overall_level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(
format=('{levelname[0]}{asctime}.{msecs:03.0f} {thread} '
'{filename}:{lineno}] {message}'),
datefmt='%m%d %H:%M:%S',
style='{',
level=overall_level)
global log
log = logging.getLogger('portserver')
# The verbosity controls our loggers logging level, not the global
# one above. This avoids debug messages from libraries such as asyncio.
log.setLevel(logging.DEBUG if verbose else overall_level)
| 11,867
|
def sa_middleware(key: str = DEFAULT_KEY) -> 'Callable':
""" SQLAlchemy asynchronous middleware factory. """
@middleware
async def sa_middleware_(request: 'Request', handler: 'Callable')\
-> 'StreamResponse':
if key in request:
raise DuplicateRequestKeyError(key)
Session = request.config_dict.get(key)
async with Session() as request[key]:
return await handler(request)
return sa_middleware_
| 11,868
|
def load_kmeans_dataset():
"""A dataset with two 'time slices'
- a slice with two clear clusters along the x axis _now_ timestamp and
- a slice with two clear clusters along the y axis with tomorrow's timestamp
This will serve at testing if the when clause was applied correctly"""
kmeans_example = mldb.create_dataset({
"type": "sparse.mutable",
'id' : 'kmeans_example'
})
for i in range(1,3):
for j in range(0,100):
val_x = float(random.randint(-5, 5))
val_y = float(random.randint(-5, 5))
row = [
['x', val_x + 10**i, now], ['x', val_x, same_time_tomorrow],
['y', val_y, now], ['y', val_y + 10**i, same_time_tomorrow],
]
kmeans_example.record_row('row_%d_%d' % (i, j), row)
kmeans_example.commit()
| 11,869
|
def _check_id_format(feature_id):
"""
This function check the id format.
"""
id_format = "^(\w+).+\S$"
if not re.match(id_format, feature_id):
msg = "The id field cannot contains whitespaces."
raise exceptions.BadRequest({"message": msg})
| 11,870
|
def dice_loss(y_true, y_pred):
"""
dice_loss
"""
smooth = 1.
intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
dice_coef = (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + \
K.sum(K.square(y_pred),-1) + smooth)
return 1 - dice_coef
| 11,871
|
def webdriver_init(mobile):
"""
Initialize a mobile/desktop web driver.
This initialize a web driver with a default user agent regarding the mobile
demand. Default uer agents are defined by MOBILE_USER_AGENT and DESKTOP_USER_AGENT.
:param mobile: The mobile flag
:type conn: bool
:return: A web driver
:rtype: WebDriver
"""
if mobile:
return webdriver_init_with_caps(MOBILE_USER_AGENT)
else:
return webdriver_init_with_caps(DESKTOP_USER_AGENT)
| 11,872
|
def calculate_file_hash(f, alg, buf_size):
"""BUF_SIZE - 64 kb
need for large file"""
h = hashlib.new(alg)
for chunk in iter(lambda: f.read(buf_size), b""):
h.update(chunk)
return h.hexdigest()
| 11,873
|
def update(A, B, DA, DB, f, k, delta_t):
"""Apply the Gray-Scott update formula"""
# compute the diffusion part of the update
diff_A = DA * apply_laplacian(A)
diff_B = DB * apply_laplacian(B)
# Apply chemical reaction
reaction = A*B**2
diff_A -= reaction
diff_B += reaction
# Apply birth/death
diff_A += f * (1-A)
diff_B -= (k+f) * B
A += diff_A * delta_t
B += diff_B * delta_t
return A, B
| 11,874
|
def sha512(data: typing.Optional[bytes] = None):
"""Returns a sha512 hash object; optionally initialized with a string."""
if data is None:
return hashlib.sha512()
return hashlib.sha512(data)
| 11,875
|
def delete_novel(
id_or_url: str,
novel_service: BaseNovelService = Provide[Application.services.novel_service],
path_service: BasePathService = Provide[Application.services.path_service],
):
"""delete all records of novel. this includes chapters, and assets"""
try:
novel = cli_helpers.get_novel(id_or_url)
except ValueError:
sys.exit(1)
logger.info(f"Deleting '{novel.title}' ({novel.id})…")
novel_dir = path_service.novel_data_path(novel)
if novel_dir.exists():
shutil.rmtree(novel_dir)
logger.info(
f"Deleted data of novel: {{data.dir}}/{path_service.relative_to_data_dir(novel_dir)}."
)
novel_service.delete_novel(novel)
logger.info("Deleted novel entry.")
| 11,876
|
def combine_small_file_to_big():
"""Merge multiple csv into one based on header
https://stackoverflow.com/questions/44791212/concatenating-multiple-csv-files-into-a-single-csv-with-the-same-header-python/44791368
"""
#import csv files from folder
path = r'./'
allFiles = glob.glob(path + "/*.csv")
allFiles.sort() # glob lacks reliable ordering, so impose your own if output order matters
with open('combined_data.csv', 'wb') as outfile:
for i, fname in enumerate(allFiles):
with open(fname, 'rb') as infile:
if i != 0:
infile.readline() # Throw away header on all but first file
# Block copy rest of file from input to output without parsing
shutil.copyfileobj(infile, outfile)
print(fname + " has been imported.")
| 11,877
|
def _circle_ci_pr():
"""Get the current CircleCI pull request (if any).
Returns:
Optional[int]: The current pull request ID.
"""
try:
return int(os.getenv(env.CIRCLE_CI_PR_NUM, ''))
except ValueError:
return None
| 11,878
|
def get_entropy_of_maxes():
"""
Specialized code for retrieving guesses and confidence of largest model of each type from the images giving largest
entropy.
:return: dict containing the models predictions and confidence, as well as the correct label under "y".
"""
high_entropy_list = get_high_entropy_mnist_test()
d = {}
images = []
values = []
for i in high_entropy_list:
images.append(i[0])
values.append(i[1])
d["y"] = np.array(values)
d["d"] = []
d["f"] = []
model_paths = ["ffnn_models", "dropout_models"]
for model in model_paths:
pred = model_predictor(model + "/model_50000", np.array(images), np.array(values))[0]
for i in pred:
d[model[0]].append((np.argmax(i), i))
return d
| 11,879
|
def get_all_links() -> List[Dict[str, Any]]:
"""Returns all links as an iterator"""
return get_entire_collection(LINKS_COLLECTION)
| 11,880
|
def agent(states, actions):
"""
creating a DNN using keras
"""
model = Sequential()
model.add(Flatten(input_shape=(1, states)))
model.add(Dense(24, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(actions, activation='linear'))
return model
| 11,881
|
def search_configs(mwdb, formatter, query, limit):
"""
Search configs
"""
recent = islice(mwdb.search_configs(query), limit)
formatter.print_list(recent, formatter.format_config_list)
| 11,882
|
def exercise_2():
"""Get Variable Value"""
price = 10
print(price)
| 11,883
|
def get_package_nvr_from_spec(spec_file):
"""
Return a list of the NVR required for a given spec file
:param spec_file: The path to a spec file
:type spec_file: str
:return: list of nevra that should be built for that spec file
:rtype: str
"""
# Get the dep name & version
spec = rpm.spec(spec_file)
package_nvr = spec.sourceHeader[rpm.RPMTAG_NVR]
# split the dist from the end of the nvr
package_nvr = package_nvr[:package_nvr.rfind('.')]
return package_nvr
| 11,884
|
def rbo_ext(S, T, p):
"""Extrapolated RBO value as defined in equation (30).
Implementation handles uneven lists but not ties.
"""
if len(S) > len(T):
L, S = S, T
else:
L, S = T, S
l, s = len(L), len(S)
xl = overlap(L, S, l)
xs = overlap(L, S, s)
sum1 = sum(overlap(L, S, d) / d * p ** d for d in range(1, l + 1))
sum2 = sum(xs * (d - s) / (s * d) * p ** d for d in range(s + 1, l + 1))
return (1 - p) / p * (sum1 + sum2) + ((xl - xs) / l + xs / s) * p ** l
| 11,885
|
def index(request):
""" View of index page """
title = _("Home")
posts = Post.objects.all().order_by('-timestamp')[:5]
return render(request, 'dashboard/index.html', locals())
| 11,886
|
def incremental_str_maker(str_format='{:03.f}'):
"""Make a function that will produce a (incrementally) new string at every call."""
i = 0
def mk_next_str():
nonlocal i
i += 1
return str_format.format(i)
return mk_next_str
| 11,887
|
def get_time_slider_range(highlighted=True,
withinHighlighted=True,
highlightedOnly=False):
"""Return the time range from Maya's time slider.
Arguments:
highlighted (bool): When True if will return a selected frame range
(if there's any selection of more than one frame!) otherwise it
will return min and max playback time.
withinHighlighted (bool): By default Maya returns the highlighted range
end as a plus one value. When this is True this will be fixed by
removing one from the last number.
Returns:
list: List of two floats of start and end frame numbers.
"""
if highlighted is True:
gPlaybackSlider = mel.eval("global string $gPlayBackSlider; "
"$gPlayBackSlider = $gPlayBackSlider;")
if cmds.timeControl(gPlaybackSlider, query=True, rangeVisible=True):
highlightedRange = cmds.timeControl(gPlaybackSlider,
query=True,
rangeArray=True)
if withinHighlighted:
highlightedRange[-1] -= 1
return highlightedRange
if not highlightedOnly:
return [cmds.playbackOptions(query=True, minTime=True),
cmds.playbackOptions(query=True, maxTime=True)]
| 11,888
|
def update_IW(hyp_D_prev, xikk, xk, Pik_old):
"""
Do an update of Norm-IW conjugate in an exponential form.
"""
suff_D = get_suff_IW_conj(xikk, xk, Pik_old)
hyp_D = hyp_D_prev + suff_D
Dik = get_E_IW_hyp(hyp_D)
return Dik, hyp_D
| 11,889
|
def test_existing_path_FileLink():
"""FileLink: Calling _repr_html_ functions as expected on existing filepath
"""
tf = NamedTemporaryFile()
fl = display.FileLink(tf.name)
actual = fl._repr_html_()
expected = "<a href='files/%s' target='_blank'>%s</a><br>" % (tf.name,tf.name)
nt.assert_equal(actual,expected)
| 11,890
|
def compute_state(observations, configuration):
"""
:param observations:
:param configuration:
:return StateTensor:
"""
StateTensorType = configuration.STATE_TYPE
return StateTensorType([observations])
| 11,891
|
def supports_transfer_syntax(transfer_syntax: pydicom.uid.UID) -> bool:
"""Return ``True`` if the handler supports the `transfer_syntax`.
Parameters
----------
transfer_syntax : uid.UID
The Transfer Syntax UID of the *Pixel Data* that is to be used with
the handler.
"""
return transfer_syntax in SUPPORTED_TRANSFER_SYNTAXES
| 11,892
|
def test_item_many(testapp, amount):
"""reStructuredText processor has to work with stream."""
stream = restructuredtext.process(
testapp,
[
holocron.Item(
{
"content": "the key is **%d**" % i,
"destination": pathlib.Path("1.rst"),
}
)
for i in range(amount)
],
)
assert isinstance(stream, collections.abc.Iterable)
assert list(stream) == [
holocron.Item(
{
"content": "<p>the key is <strong>%d</strong></p>" % i,
"destination": pathlib.Path("1.html"),
}
)
for i in range(amount)
]
| 11,893
|
def enhance_user(user, json_safe=False):
"""
Adds computed attributes to AD user results
Args:
user: A dictionary of user attributes
json_safe: If true, converts binary data into base64,
And datetimes into human-readable strings
Returns:
An enhanced dictionary of user attributes
"""
if "memberOf" in user.keys():
user["memberOf"] = sorted(user["memberOf"], key=lambda dn: dn.lower())
if "showInAddressBook" in user.keys():
user["showInAddressBook"] = sorted(user["showInAddressBook"], key=lambda dn: dn.lower())
if "lastLogonTimestamp" in user.keys():
user["lastLogonTimestamp"] = _get_last_logon(user["lastLogonTimestamp"])
if "lockoutTime" in user.keys():
user["lockoutTime"] = convert_ad_timestamp(user["lockoutTime"], json_safe=json_safe)
if "pwdLastSet" in user.keys():
user["pwdLastSet"] = convert_ad_timestamp(user["pwdLastSet"], json_safe=json_safe)
if "userAccountControl" in user.keys():
user["userAccountControl"] = int(user["userAccountControl"])
user["disabled"] = user["userAccountControl"] & 2 != 0
user["passwordExpired"] = user["userAccountControl"] & 8388608 != 0
user["passwordNeverExpires"] = user["userAccountControl"] & 65536 != 0
user["smartcardRequired"] = user["userAccountControl"] & 262144 != 0
if "whenCreated" in user.keys():
user["whenCreated"] = convert_ad_timestamp(user["whenCreated"], json_safe=json_safe)
if "msExchRecipientTypeDetails" in user.keys():
user["msExchRecipientTypeDetails"] = int(user["msExchRecipientTypeDetails"])
user["remoteExchangeMailbox"] = user["msExchRecipientTypeDetails"] in remote_exchange_mailbox_values
user["exchangeMailbox"] = user["msExchRecipientTypeDetails"] in exchange_mailbox_values.keys()
if user["exchangeMailbox"]:
user["exchangeMailboxType"] = exchange_mailbox_values[user["msExchRecipientTypeDetails"]]
return user
| 11,894
|
def create_data(namespace_id, ocs_client):
"""Creates sample data for the script to use"""
double_type = SdsType(id='doubleType', sdsTypeCode=SdsTypeCode.Double)
datetime_type = SdsType(
id='dateTimeType', sdsTypeCode=SdsTypeCode.DateTime)
pressure_property = SdsTypeProperty(id='pressure', sdsType=double_type)
temperature_property = SdsTypeProperty(id=SAMPLE_FIELD_TO_CONSOLIDATE_TO,
sdsType=double_type)
ambient_temperature_property = SdsTypeProperty(id=SAMPLE_FIELD_TO_CONSOLIDATE,
sdsType=double_type)
time_property = SdsTypeProperty(id='time', sdsType=datetime_type,
isKey=True)
sds_type_1 = SdsType(
id=SAMPLE_TYPE_ID_1,
description='This is a sample Sds type for storing Pressure type '
'events for Data Views',
sdsTypeCode=SdsTypeCode.Object,
properties=[pressure_property, temperature_property, time_property])
sds_type_2 = SdsType(
id=SAMPLE_TYPE_ID_2,
description='This is a new sample Sds type for storing Pressure type '
'events for Data Views',
sdsTypeCode=SdsTypeCode.Object,
properties=[pressure_property, ambient_temperature_property, time_property])
print('Creating SDS Types...')
ocs_client.Types.getOrCreateType(namespace_id, sds_type_1)
ocs_client.Types.getOrCreateType(namespace_id, sds_type_2)
stream1 = SdsStream(
id=SAMPLE_STREAM_ID_1,
name=SAMPLE_STREAM_NAME_1,
description='A Stream to store the sample Pressure events',
typeId=SAMPLE_TYPE_ID_1)
stream2 = SdsStream(
id=SAMPLE_STREAM_ID_2,
name=SAMPLE_STREAM_NAME_2,
description='A Stream to store the sample Pressure events',
typeId=SAMPLE_TYPE_ID_2)
print('Creating SDS Streams...')
ocs_client.Streams.createOrUpdateStream(namespace_id, stream1)
ocs_client.Streams.createOrUpdateStream(namespace_id, stream2)
sample_start_time = datetime.datetime.now() - datetime.timedelta(hours=1)
sample_end_time = datetime.datetime.now()
values1 = []
values2 = []
def value_with_time(timestamp, value, field_name, value2):
"""Formats a JSON data object"""
return f'{{"time": "{timestamp}", "pressure": {str(value)}, "{field_name}": {str(value2)}}}'
print('Generating values...')
for i in range(1, 30, 1):
timestamp = (sample_start_time + datetime.timedelta(minutes=i * 2)
).isoformat(timespec='seconds')
val1 = value_with_time(timestamp, random.uniform(
0, 100), SAMPLE_FIELD_TO_CONSOLIDATE_TO, random.uniform(50, 70))
val2 = value_with_time(timestamp, random.uniform(
0, 100), SAMPLE_FIELD_TO_CONSOLIDATE, random.uniform(50, 70))
values1.append(val1)
values2.append(val2)
print('Sending values...')
ocs_client.Streams.insertValues(
namespace_id,
SAMPLE_STREAM_ID_1,
str(values1).replace("'", ""))
ocs_client.Streams.insertValues(
namespace_id,
SAMPLE_STREAM_ID_2,
str(values2).replace("'", ""))
return (sample_start_time, sample_end_time)
| 11,895
|
def bootstrap_aws(c):
"""Bootstrap AWS account for use with cdk."""
c.run("cdk bootstrap aws://$AWS_ACCOUNT/$AWS_DEFAULT_REGION")
| 11,896
|
def enhanced_feature_extractor_digit(datum):
"""Feature extraction playground for digits.
You should return a util.Counter() of features
for this datum (datum is of type samples.Datum).
## DESCRIBE YOUR ENHANCED FEATURES HERE...
"""
features = basic_feature_extractor_digit(datum)
"*** YOUR CODE HERE ***"
util.raise_not_defined()
return features
| 11,897
|
def train_8x8_digits(p=0.9, h=None, noise_strength=0, n_images=2, plot_cve=False):
"""
Built in autoencoder training and image showing. Uses mnist 8x8 digits. Change noise strength to a value between 0
and 1 to add noise to the test samples.
:param p: Total Variance Explained to be used
:param h: Number of primary components wanted to be used. If left blank or None, it is found using p.
:param noise_strength: Strength of the noise added to the test signal. Use a value between 0 and 1. Recommended
between 0.001 and 0.1 anything greater will not be recognizable
:param n_images: Amount of images to be shown
:param plot_cev: If true, plots the Cumulative Explained Variance vs Number of principle components
"""
training_samples, training_y, test_samples, test_y, image_shape = load_8x8_digits()
auto = autoencoder.autoencoder()
auto.pca_train(samples=training_samples, p=p, h=h)
if noise_strength != 0:
test_samples = add_noise(test_samples, strength=noise_strength)
auto.encode(test_samples=test_samples)
auto.decode()
random_indexes = sample(range(0, len(test_samples)), n_images)
for i in random_indexes:
plt.subplot(1, 2, 1)
plt.title('Test Sample: ' + str(test_y[i]))
plt.imshow(test_samples[i].reshape(image_shape), cmap='gray')
plt.subplot(1, 2, 2)
plt.title('Decoded Sample: ' + str(test_y[i]))
plt.imshow(auto.decoded_data[i].reshape(image_shape), cmap='gray')
plt.show()
if plot_cve:
auto.plot_cve()
| 11,898
|
def test():
"""Run tests"""
install()
test_setup()
sh("%s %s" % (PYTHON, TEST_SCRIPT))
| 11,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.