content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import hashlib
def get_checksum(file_name: str) -> str:
"""Returns checksum of the file"""
sha_hash = hashlib.sha224()
a_file = open(file_name, "rb")
content = a_file.read()
sha_hash.update(content)
digest = sha_hash.hexdigest()
a_file.close()
return digest | 6bb506accc6aa7826976a2d8033116dcff2f4a55 | 3,631,400 |
from sys import path
def find_tool(name, additional_paths = [], path_last = False):
""" Attempts to find tool (binary) named 'name' in PATH and in
'additional-paths'. If found in path, returns 'name'. If
found in additional paths, returns full name. If the tool
is found in several directories, returns the first path found.
Otherwise, returns the empty string. If 'path_last' is specified,
path is checked after 'additional_paths'.
"""
assert isinstance(name, basestring)
assert is_iterable_typed(additional_paths, basestring)
assert isinstance(path_last, (int, bool))
programs = path.programs_path()
match = path.glob(programs, [name, name + '.exe'])
additional_match = path.glob(additional_paths, [name, name + '.exe'])
result = []
if path_last:
result = additional_match
if not result and match:
result = match
else:
if match:
result = match
elif additional_match:
result = additional_match
if result:
return path.native(result[0])
else:
return '' | f43dfaab2044832703072943bbc0545c8f636e74 | 3,631,401 |
def sync_grains(name, **kwargs):
"""
Performs the same task as saltutil.sync_grains module
See :mod:`saltutil module for full list of options <salt.modules.saltutil>`
.. code-block:: yaml
sync_everything:
saltutil.sync_grains:
- refresh: True
"""
return _sync_single(name, "grains", **kwargs) | ae8847df3ce84cf63748ded81c79fb9286e9d356 | 3,631,402 |
import torch
from typing import Union
from typing import Tuple
def masked_topk(
input_: torch.FloatTensor,
mask: torch.BoolTensor,
k: Union[int, torch.LongTensor],
dim: int = -1,
) -> Tuple[torch.LongTensor, torch.LongTensor, torch.FloatTensor]:
"""
Extracts the top-k items along a certain dimension. This is similar to `torch.topk` except:
(1) we allow of a `mask` that makes the function not consider certain elements;
(2) the returned top input, mask, and indices are sorted in their original order in the input;
(3) May use the same k for all dimensions, or different k for each.
# Parameters
input_ : `torch.FloatTensor`, required.
A tensor containing the items that we want to prune.
mask : `torch.BoolTensor`, required.
A tensor with the same shape as `input_` that makes the function not consider masked out
(i.e. False) elements.
k : `Union[int, torch.LongTensor]`, required.
If a tensor of shape as `input_` except without dimension `dim`, specifies the number of
items to keep for each dimension.
If an int, keep the same number of items for all dimensions.
# Returns
top_input : `torch.FloatTensor`
The values of the top-k scoring items.
Has the same shape as `input_` except dimension `dim` has value `k` when it's an `int`
or `k.max()` when it's a tensor.
top_mask : `torch.BoolTensor`
The corresponding mask for `top_input`.
Has the shape as `top_input`.
top_indices : `torch.IntTensor`
The indices of the top-k scoring items into the original `input_`
tensor. This is returned because it can be useful to retain pointers to
the original items, if each item is being scored by multiple distinct
scorers, for instance.
Has the shape as `top_input`.
"""
if input_.size() != mask.size():
raise ValueError("`input_` and `mask` must have the same shape.")
if not -input_.dim() <= dim < input_.dim():
raise ValueError("`dim` must be in `[-input_.dim(), input_.dim())`")
dim = (dim + input_.dim()) % input_.dim()
max_k = k if isinstance(k, int) else k.max()
# We put the dim in question to the last dimension by permutation, and squash all leading dims.
# [0, 1, ..., dim - 1, dim + 1, ..., input.dim() - 1, dim]
permutation = list(range(input_.dim()))
permutation.pop(dim)
permutation += [dim]
# [0, 1, ..., dim - 1, -1, dim, ..., input.dim() - 2]; for restoration
reverse_permutation = list(range(input_.dim() - 1))
reverse_permutation.insert(dim, -1)
other_dims_size = list(input_.size())
other_dims_size.pop(dim)
permuted_size = other_dims_size + [max_k] # for restoration
# If an int was given for number of items to keep, construct tensor by repeating the value.
if isinstance(k, int):
# Put the tensor on same device as the mask.
k = k * torch.ones(*other_dims_size, dtype=torch.long, device=mask.device)
else:
if list(k.size()) != other_dims_size:
raise ValueError(
"`k` must have the same shape as `input_` with dimension `dim` removed."
)
num_items = input_.size(dim)
# (batch_size, num_items) -- "batch_size" refers to all other dimensions stacked together
input_ = input_.permute(*permutation).reshape(-1, num_items)
mask = mask.permute(*permutation).reshape(-1, num_items)
k = k.reshape(-1)
# Make sure that we don't select any masked items by setting their scores to be very
# negative.
input_ = replace_masked_values(input_, mask, min_value_of_dtype(input_.dtype))
# Shape: (batch_size, max_k)
_, top_indices = input_.topk(max_k, 1)
# Mask based on number of items to keep for each sentence.
# Shape: (batch_size, max_k)
top_indices_mask = get_mask_from_sequence_lengths(k, max_k).bool()
# Fill all masked indices with largest "top" index for that sentence, so that all masked
# indices will be sorted to the end.
# Shape: (batch_size, 1)
fill_value, _ = top_indices.max(dim=1, keepdim=True)
# Shape: (batch_size, max_num_items_to_keep)
top_indices = torch.where(top_indices_mask, top_indices, fill_value)
# Now we order the selected indices in increasing order with
# respect to their indices (and hence, with respect to the
# order they originally appeared in the `embeddings` tensor).
top_indices, _ = top_indices.sort(1)
# Combine the masks on spans that are out-of-bounds, and the mask on spans that are outside
# the top k for each sentence.
# Shape: (batch_size, max_k)
sequence_mask = mask.gather(1, top_indices)
top_mask = top_indices_mask & sequence_mask
# Shape: (batch_size, max_k)
top_input = input_.gather(1, top_indices)
return (
top_input.reshape(*permuted_size).permute(*reverse_permutation),
top_mask.reshape(*permuted_size).permute(*reverse_permutation),
top_indices.reshape(*permuted_size).permute(*reverse_permutation),
) | bdf84849f24deb23e183e825227c98e1c9db03f6 | 3,631,403 |
def bessel_kve(v, z, name=None):
"""Computes exponentially scaled modified Bessel function of the 2nd kind.
This function computes `Kve` which is an exponentially scaled version
of the modified Bessel function of the first kind.
`Kve(v, z) = Kv(v, z) * exp(abs(z))`
Warning: Gradients with respect to the first parameter `v` are currently not
defined.
Args:
v: Floating-point `Tensor` broadcastable with `z` for which `Kve(v, z)`
should be computed. `v` is expected to be non-negative.
z: Floating-point `Tensor` broadcastable with `v` for which `Kve(v, z)`
should be computed. If `z` is negative, `v` is expected to be an integer.
name: A name for the operation (optional).
Default value: `None` (i.e., 'bessel_kve').
Returns:
bessel_kve: Exponentially modified Bessel Function of the 2nd kind.
"""
with tf.name_scope(name or 'bessel_kve'):
dtype = dtype_util.common_dtype([v, z], tf.float32)
v = tf.convert_to_tensor(v, dtype=dtype)
z = tf.convert_to_tensor(z, dtype=dtype)
return _bessel_kve_custom_gradient(v, z) | 1c580585811391b007d2c37b6d08e85d9098b3f0 | 3,631,404 |
def cvReleaseConDensation(*args):
"""cvReleaseConDensation(PyObject obj)"""
return _cv.cvReleaseConDensation(*args) | ba8ebc2bc39d6d4792831c7f4025ace8a24d72d6 | 3,631,405 |
def solution(num_buns, num_required):
"""
Each choice of num_required-1 of the num_buns determines a missing key.
Therefore, we use binom[num_buns,num_required-1] different keys.
Each key is used in each num_buns-num_required+1 bunny.
Therefore, each key is repeated num_buns-num_required+1 times.
To distribute the keys, instead distribute the bunnies to the repeated keys.
The list of combinations of bunnies that get each repeated key is the same number
as the keys that we are planning to use. That is
binom[num_buns,num_required-1] = binom[num_buns,num_buns-num_required+1]
Interpret each combination as the bunnies that will gets the i-th key.
"""
result = [[] for x in range(num_buns)]
for key, buns in enumerate(combinationsIterative(num_buns,num_buns-num_required+1)):
for bun in buns:
result[bun].append(key)
return result | 4ff53d7c9e2b8bbfe348440bb702f965879bd6b6 | 3,631,406 |
import urllib
from datetime import datetime
import ssl
import socket
import json
def check_SSL_certificate(url, verbose):
""" Check SSL certificate expiration date of a server hostname """
hostname = urllib.parse.urlparse(url).hostname
port = urllib.parse.urlparse(url).port
if verbose == 1:
print("- Checking SSL certificate in progres...")
now = datetime.now()
context = ssl.create_default_context()
with socket.create_connection((hostname, port)) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as ssock:
if verbose == 1:
print(json.dumps(ssock.getpeercert()))
expire_data = json.dumps(ssock.getpeercert()["notAfter"][:-4])
# Stripping (") from string
expire_data = expire_data[1:]
expire_data = expire_data[:-1]
date_time_obj = datetime.strptime(expire_data, "%b %d %H:%M:%S %Y")
if date_time_obj > now:
return "valid"
else:
return "expired" | d8d1c3218a111d2f850c6bfa50b46a20dddb84b8 | 3,631,407 |
from datetime import datetime
def isToday(date_str):
"""
Check whether the last_checkt_time is today.
:param date:
:return:
"""
today = datetime.datetime.today()
date = datetime.datetime.strptime(date_str, '%Y-%m-%d %H:%M')
return today.year == date.year and today.month == date.month and today.day == date.day | 2e80d602a1370583b0ee2e7c404f6838ac9e2db3 | 3,631,408 |
from mpl_toolkits.mplot3d.axis3d import Axis
def mpl_3d_remove_margins():
"""
Remove thin margins in matplotlib 3d plots.
The Solution is from `Stackoverflow`_.
.. _Stackoverflow:
http://stackoverflow.com/questions/16488182/
"""
if not hasattr(Axis, "_get_coord_info_old"):
def _get_coord_info_new(self, renderer):
mins, maxs, centers, deltas, tc, highs = self._get_coord_info_old(renderer)
mins += deltas / 4
maxs -= deltas / 4
return mins, maxs, centers, deltas, tc, highs
Axis._get_coord_info_old = Axis._get_coord_info
Axis._get_coord_info = _get_coord_info_new | 10c208ecc11859ab34c66648adcefa23e98ff9d9 | 3,631,409 |
import os
import subprocess
import json
import requests
def try_compute_data(s3, webhook, old_data):
"""
Try to run the scraper and return course data. If something goes
wrong, raise `ScrapeError`. Otherwise, invoke the provided
`Webhook`. `old_data` is the previous course data or `util.Unset`.
"""
scraper_timeout = util.get_env("scraper_timeout")
try:
scraper_timeout = int(scraper_timeout)
if scraper_timeout <= 0:
raise ValueError
except ValueError:
util.warn("Illegal scraper timeout: {}".format(repr(scraper_timeout)))
util.log("Resetting timeout to 60 seconds")
os.environ["HYPERSCHEDULE_SCRAPER_TIMEOUT"] = "60"
scraper_timeout = 60
if old_data is util.Unset:
# For JSON.
old_data = None
try:
util.log("Running scraper")
process = subprocess.Popen(
["python", "-m", "hyperschedule.scrapers.claremont"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
output, _ = process.communicate(
input=json.dumps(old_data).encode(), timeout=scraper_timeout
)
if process.returncode != 0:
raise ScrapeError("scraper failed")
try:
output = output.decode()
except UnicodeDecodeError as e:
raise ScrapeError(
"scraper emitted malformed output: {}".format(e)
) from None
if "$delete" in output:
raise ScrapeError("scraper output contains '$delete'")
data = json.loads(output)
if util.get_env_boolean("snitch"):
webhook.get()
if util.get_env_boolean("cache"):
cache_file_write(data)
if util.get_env_boolean("s3_write"):
s3_write(s3, data)
except OSError as e:
raise ScrapeError(
"unexpected error while running scraper: {}".format(e)
) from None
except subprocess.TimeoutExpired:
process.kill()
process.communicate()
raise ScrapeError(
"scraper timed out after {} seconds".format(scraper_timeout)
) from None
except json.decoder.JSONDecodeError:
raise ScrapeError("scraper did not return valid JSON") from None
except requests.exceptions.RequestException as e:
util.warn("failed to reach success webhook: {}".format(e))
return data | f98cb4c0f2a88a0f349c5c63a5db02aa88896c44 | 3,631,410 |
def get_chinese_relation_name(request, user1, user2):
"""
Gets what user1 called user2 in Chinese
Response:
{'status': Http status,
'title': string
}
"""
try:
title = get_chinese_relation(user1, user2)
return Response({
'title': title
})
except Exception as e:
print e
return Response(status=status.HTTP_404_NOT_FOUND) | 162a243792db781b03e07b290b84fb4a8c05e907 | 3,631,411 |
import matplotlib.pyplot as plt
def projplot(theta, phi, fmt=None, **kwargs):
"""projplot is a wrapper around :func:`matplotlib.Axes.plot` to take into account the
spherical projection.
You can call this function as::
projplot(theta, phi) # plot a line going through points at coord (theta, phi)
projplot(theta, phi, 'bo') # plot 'o' in blue at coord (theta, phi)
Parameters
----------
theta, phi : float, array-like
Coordinates of point to plot in radians.
fmt : str
A format string (see :func:`matplotlib.Axes.plot` for details)
Notes
-----
Other keywords are passed to :func:`matplotlib.Axes.plot`.
See Also
--------
projscatter, projtext
"""
longitude, latitude = lonlat(theta, phi)
if fmt is None:
ret = plt.plot(longitude, latitude, **kwargs)
else:
ret = plt.plot(longitude, latitude, fmt, **kwargs)
return ret | 615b9865308adb5eb3a2c647ffbd1e3d813c961f | 3,631,412 |
def compress_vertex_list(individual_vertex: list) -> list:
"""
Given a list of vertices that should not be fillet'd,
search for a range and make them one compressed list.
If the vertex is a point and not a line segment, the returned tuple's
start and end are the same index.
Args:
individual_vertex (list): List of UNIQUE ints. Each int refers to an index of a LineString.
Returns:
list: A compressed list of tuples. So, it combines adjacent vertices into a longer one.
"""
reduced_idx = list()
sorted_vertex = sorted(individual_vertex)
len_vertex = len(sorted_vertex)
if len_vertex > 0:
# initialzie to unrealistic number.
start = -1
end = -1
size_of_range = 0
for index, item in enumerate(sorted_vertex):
if index == 0:
start = item
end = item
else:
if item == end + 1:
end = item
size_of_range += 1
else:
if size_of_range == 0:
# Only one vertex in range.
reduced_idx.append((start, end))
start = item
end = item
else:
# Two or more vertexes in range.
reduced_idx.append((start, end))
size_of_range = 0
start = item
end = item
if index == len_vertex - 1:
if size_of_range == 0:
reduced_idx.append((start, end))
else:
reduced_idx.append((start, end))
return reduced_idx
else:
return reduced_idx | a98f8b101219215f719b598ed8c47074a42ecb13 | 3,631,413 |
def update_context_with_user_data(update: Update, context: CallbackContext) -> tuple:
"""Update context.user_data with UserProfile data."""
# Update needed only when context.user_data is empty
if context.user_data:
return update, context
if hasattr(update.callback_query, 'message'):
chat = update.callback_query.message.chat
else:
chat = update.message.chat
user = database.create_user(chat)
key = str(uuid4())
context.user_data[key] = user
return update, context | 64cf6f6a18ce75b332cee910ab48728f9d154a14 | 3,631,414 |
def multiply_images( images, normalize_result = False, color_mode = MODE ):
"""Multiplica N imagens
Args:
images: lista de imagens
normalize_result: indica truncamento(False) ou normalização(True), default=False
color_mode = 'color color_mode' da imagem resultante, defaul='RGB'
Returns:
Um objeto de imagem contendo a multiplicação de todas as
imagens.
"""
new_image_width = find_min_image_width( images )
new_image_height = find_min_image_height( images )
baseImage = matrix_from_image( new_image_height, new_image_width )
for image in range( len( images ) - 1 ):
image1 = load_image_data( images[image] )
image2 = load_image_data( images[image + 1] )
for position_x in range( new_image_height ):
for position_y in range( new_image_width ):
baseImage[position_x][position_y] = (
image1[position_y, position_x][R] * image2[position_y, position_x][R],
image1[position_y, position_x][G] * image2[position_y, position_x][G],
image1[position_y, position_x][B] * image2[position_y, position_x][B]
)
if( normalize_result ):
baseImage = normalize_rgb_image( baseImage )
return image_from_matrix( baseImage ) | 2328b363bbac8377d029269ff48b2eb919eefbe0 | 3,631,415 |
from torch.optim import lr_scheduler
def setup_harn(**kwargs):
"""
CommandLine:
python ~/code/netharn/netharn/examples/ggr_matching.py setup_harn
Args:
dbname (str): Name of IBEIS database to use
nice (str): Custom tag for this run
workdir (PathLike): path to dump all the intermedate results
dim (int): Width and height of the network input
batch_size (int): Base batch size. Number of examples in GPU at any time.
bstep (int): Multiply by batch_size to simulate a larger batches.
lr (float): Base learning rate
decay (float): Weight decay (L2 regularization)
workers (int): Number of parallel data loader workers
xpu (str): Device to train on. Can be either `'cpu'`, `'gpu'`, a number
indicating a GPU (e.g. `0`), or a list of numbers (e.g. `[0,1,2]`)
indicating multiple GPUs
triple (bool): if True uses triplet loss, otherwise contrastive loss
norm_desc (bool): if True normalizes the descriptors
pretrained (PathLike): path to a compatible pretrained model
margin (float): margin for loss criterion
soft (bool): use soft margin
Example:
>>> harn = setup_harn(dbname='PZ_MTEST')
>>> harn.initialize()
"""
config = parse_config(**kwargs)
nh.configure_hacks(config)
datasets, workdir = setup_datasets(config)
loaders = {
tag: dset.make_loader(
shuffle=(tag == 'train'),
batch_size=config['batch_size'],
num_batches=(config['num_batches'] if tag == 'train' else config['num_batches'] // 10),
k=config['k'],
p=config['p'],
num_workers=config['workers'],
)
for tag, dset in datasets.items()
}
if config['scheduler'] == 'steplr':
scheduler_ = (lr_scheduler.StepLR,
dict(step_size=8, gamma=0.1, last_epoch=-1))
else:
scheduler_ = nh.Scheduler.coerce(config, scheduler='onecycle70')
hyper = nh.HyperParams(**{
'nice': config['nice'],
'workdir': config['workdir'],
'datasets': datasets,
'loaders': loaders,
'xpu': nh.XPU.coerce(config['xpu']),
'model': (nh.models.DescriptorNetwork, {
'input_shape': (1, 3, config['dim'], config['dim']),
'norm_desc': config['norm_desc'],
# 'hidden_channels': [512, 256]
'hidden_channels': [256],
'desc_size': 128,
}),
'initializer': nh.Initializer.coerce(config),
'optimizer': nh.Optimizer.coerce(config),
'scheduler': scheduler_,
'criterion': (nh.criterions.TripletLoss, {
'margin': config['margin'],
'soft': config['soft'],
}),
'monitor': nh.Monitor.coerce(
config,
minimize=['loss', 'pos_dist', 'brier'],
maximize=['accuracy', 'neg_dist', 'mcc'],
patience=100,
max_epoch=100,
),
'dynamics': nh.Dynamics.coerce(config),
'other': {
'n_classes': 2,
},
})
harn = MatchingHarness(hyper=hyper)
harn.preferences['prog_backend'] = 'progiter'
harn.intervals['log_iter_train'] = 1
harn.intervals['log_iter_test'] = None
harn.intervals['log_iter_vali'] = None
return harn | 2c73ded2db56cdde6d91a1b3391a90890f9e7e2d | 3,631,416 |
def sanitize_comment(comment):
"""Sanitize malicious tags from posted comments.
Takes an HTML comment string, returns that comment with malicious tags removed.
Defaults to bleach's default set of allowed tags:
['a', 'abbr', 'acronym', 'b', 'blockquote', 'code', 'em', 'i', 'li', 'ol', 'strong', 'ul']
but can be overriden by adding to settings e.g.:
BLEACH_ALLOWED_TAGS = ['hr', 'b', 'i']
"""
if hasattr(settings, 'BLEACH_ALLOWED_TAGS'):
allowed_tags = settings.BLEACH_ALLOWED_TAGS
else:
allowed_tags = bleach.sanitizer.ALLOWED_TAGS
return bleach.clean(comment, tags=allowed_tags, strip=True) | f92fbb9c967b3e95b41325cd00ea8f2732fe5440 | 3,631,417 |
import requests
def get_api_result(mode, extra_arguments={}):
""" Build JSON request to SABnzbd """
arguments = {'apikey': 'apikey', 'output': 'json', 'mode': mode}
arguments.update(extra_arguments)
r = requests.get('http://%s:%s/api' % (SAB_HOST, SAB_PORT), params=arguments)
return r.json() | 84b388b72611541b2eb486de5155f70ec2aa8833 | 3,631,418 |
async def get_exchange_info(exchange):
""" Fetches and returns relevant information about an exchange for historical data fetch.
Args:
exchange (str): The name of the exchange.
Returns:
str: JSON data with market exchange information.
"""
# Loads the market.
ex = getattr(ccxt_async, exchange)()
# Loads the market.
await ex.load_markets()
# Gathers market data.
data = ex.describe()
data["exchange"] = exchange
data["symbols"] = ex.symbols
# Closes the market connection due to async ccxt.
await ex.close()
# Returns the information.
return data | ef6a30a1e899e74c01a72cf8cc34ada7f2ca3173 | 3,631,419 |
def create_top_key_words_all(data_res, query, filter, filter_values):
"""Returns keywords graph as dcc.Graph component
Only displays it when all data is retrieved"""
dff_res = pd.DataFrame(data_res['data'])
dff_res['result'] = 'direct'
dff_res = data_preprocess.filter_data_by_time(dff_res, filter_values)
if filter == 'All':
fig = plots.make_top_key_words(dff_res, query)
else:
index_list = []
for index, row in dff_res.iterrows():
if isinstance(row.fieldsOfStudy, list):
if filter in row.fieldsOfStudy:
index_list.append(index)
dff_filtered = dff_res.loc[index_list]
fig = plots.make_top_key_words(dff_filtered,query)
return dcc.Graph(figure=fig, className= "keywords-plotly") | 804cadf9cf926d30203e8a733df66c5c663584d5 | 3,631,420 |
import re
def proccess_grains(grains_data, model_code, host_name, ip=None):
""" new_data 增加 model_code + "_HOSTNAME"""
all_data = grains_data.get("data")
data = all_data.get(host_name)
selinux = data.get("selinux", None)
dns = data.get("dns", None)
if dns:
dns = dns.get("nameservers")
else:
dns = ""
if selinux:
selinux = str(selinux.get("enabled")).lower()
else:
selinux = ""
re_internal_ip_tmp_list = data.get("ip4_interfaces").keys()
internal_ip = list()
for each in re_internal_ip_tmp_list:
reg_netcard = re.compile(r'^(eth|ens|enp|bond|Tencent VirtIO Ethernet Adapter)[\d]+', re.M)
netcard = reg_netcard.search(each)
if netcard:
tmp_ip = data.get("ip4_interfaces").get(netcard.group())
internal_ip.append(tmp_ip[0] if tmp_ip else '')
# 如果是Windows 采集 ip 使用fqdn_ip4
if data.get("os") == "Windows":
print("fqdn_ip4", data.get("fqdn_ip4"), type(data.get("fqdn_ip4")))
ip = data.get("fqdn_ip4")[-1] if data.get("fqdn_ip4") else ''
re_rule = "^(127\.0\.0\.1)|(localhost)|(10\.\d{1,3}\.\d{1,3}\.\d{1,3})|(172\.((1[6-9])|(2\d)|(3[01]))\.\d{1,3}\.\d{1,3})|(192\.168\.\d{1,3}\.\d{1,3})$"
c = re.findall(re_rule, ip)
new_data = {
"data": {
model_code + "_HOSTNAME": data.get("fqdn"),
model_code + "_INTERNAL_IP": internal_ip[0] if internal_ip else ip,
model_code + "_PUBLIC_IP": '' if len(c) > 0 else ip,
model_code + "_MEMORY": data.get("mem_total"),
model_code + "_SWAP": data.get("swap_total"),
model_code + "_CPU_MODEL": data.get("cpu_model"),
model_code + "_CPU_NUM": data.get("num_cpus"),
model_code + "_CPU_ARCH": data.get("cpuarch"),
model_code + "_OS": data.get("os"),
model_code + "_OS_TYPE": data.get("kernel"),
model_code + "_OS_FAMILY": data.get("os_family"),
model_code + "_OS_ARCH": data.get("osarch"),
model_code + "_OS_RELEASE": data.get("osrelease"),
model_code + "_SELINUX": selinux,
model_code + "_KERNEL_RELEASE": data.get("kernelrelease"),
model_code + "_BIOS_RELEASEDATA": data.get("biosreleasedate"),
model_code + "_BIOS_VERSION": data.get("biosversion"),
model_code + "_NAME_SERVER": dns,
model_code + "_CPU_FLAGS": data.get("cpu_flags"),
model_code + "_AGENT_VERSION": data.get("saltversion"),
model_code + "_AGENT_PATH": data.get("saltpath"),
model_code + "_SERIAL_NUMBER": data.get("serialnumber"),
model_code + "_PRODUCT_NAME": data.get("productname"),
model_code + "_VIRTUAL": data.get("virtual"),
model_code + "_MANUFACTURER": data.get("manufacturer"),
model_code + "_AGENT_STATE": "Agent运行中",
},
"pk_name": model_code + "_name",
"pk_value": host_name,
"model_code": model_code,
"import_type": "Agent采集",
"position": "zc"
}
# 如果云主机就采集实例名
if model_code == "CLOUD_SERVER":
new_data["data"][model_code + "_INSTANCE_ID"] = host_name
# 如果不是云主机 需要存入唯一标识 字段名为名称
if model_code != "CLOUD_SERVER":
new_data["data"][model_code + "_name"] = host_name
return new_data | 439c31846cad1d3283967a6320d647a3976bea00 | 3,631,421 |
def V2(params, fs, hs, ops, opsH, vector, shots=2**13, backend=Aer.get_backend('aer_simulator') ):
"""
Calculate the matrix A
"""
N = params.shape[0]
v = np.zeros(N)
for k in range(N):
v[k] = V_k(params, fs, hs, ops, opsH, vector, k, shots, backend )
return v | 46427c53b68c6626fcd2a0bb00031d5d62cb931d | 3,631,422 |
def cachedeterministic(parser, token):
"""
This will cache the contents of a template fragment for a given amount of
time, just like {% cache .. %} except that the key is deterministic and not
mangled or run through MD5.
Usage::
{% cachedeterministic [expire_time] [key] %}
.. some expensive processing ..
{% endcachedeterministic %}
"""
nodelist = parser.parse(("endcachedeterministic",))
parser.delete_first_token()
tokens = token.contents.split()
if len(tokens) != 3:
raise TemplateSyntaxError(u"'%r' tag requires 2 arguments." % tokens[0])
return CacheNode(nodelist, tokens[1], tokens[2]) | 1f7955a09fbc6a14ebe8e98ca4049b0febb35931 | 3,631,423 |
from typing import List
async def generate_acl6(participant: Participant) -> List[str]:
"""Generate a Participant-Specific IPv6 ACL."""
peer_acl = sorted(DEFAULT6.copy())
init_lines = (
f"no ipv6 access-list ipv6-{participant.asn}-in",
f"ipv6 access-list ipv6-{participant.asn}-in",
)
for i, l in enumerate(init_lines):
peer_acl.insert(i, l)
for ip in participant.ipv6:
peer_acl.insert(-2, f"900 permit tcp host {str(ip)} eq bgp any")
return peer_acl | 32aaeb586b44b3fd4bcfd3b171e509761958ebd8 | 3,631,424 |
import random
import time
def make_veth_signed_order(
asset_infos, # pylint: disable=redefined-outer-name
pydex_client, # pylint: disable=redefined-outer-name
exchange_address, # pylint: disable=redefined-outer-name
):
"""Convenience function for creating a new instance of a signed order"""
def _make_veth_signed_order( # pylint: disable=too-many-locals
asset_type,
qty,
price,
side,
maker_address=pydex_client.account_address,
expiration_time_seconds=600,
maker_fee="0",
taker_fee="0",
salt="1234567890",
taker_address=NULL_ADDRESS,
fee_recipient_address=NULL_ADDRESS,
sender_address=NULL_ADDRESS,
exchange_address=exchange_address, # pylint: disable=redefined-outer-name
pydex_client=pydex_client, # pylint: disable=redefined-outer-name
):
"""Convenience function for making valid orders to buy or sell
SHORT or LONG assets against VETH.
Keyword arguments:
asset_type - - str from {'LONG', 'SHORT'} to index into `full_asset_set_data`
qty - - how much of the ticker asset you want to buy against VETH
price - - Always in units of LONG or SHORT asset per VETH
side - - str from {'BUY', 'SELL'}
maker_address - - your address(defaults to MY_ADDRESS)
"""
asset_data = asset_infos.FULL_SET_ASSET_DATA[asset_type]
if side == 'BUY':
maker_asset_data = asset_infos.VETH_ASSET_DATA
taker_asset_data = asset_data
maker_amount = to_base_unit_amount(qty * price)
taker_amount = to_base_unit_amount(qty)
elif side == 'SELL':
maker_asset_data = asset_data
taker_asset_data = asset_infos.VETH_ASSET_DATA
maker_amount = to_base_unit_amount(qty)
taker_amount = to_base_unit_amount(qty * price)
else:
raise Exception("side must be one of {'BUY', 'SELL'}")
if salt is None:
salt = "{:.0f}".format(Decimal(random.uniform(0, 9223372036854775807)))
if not isinstance(maker_fee, str):
maker_fee = to_base_unit_amount(maker_fee)
if not isinstance(taker_fee, str):
taker_fee = to_base_unit_amount(taker_fee)
expiration_time_seconds = int(time.time() + expiration_time_seconds)
order = SignedOrder()
order.maker_address = maker_address
order.taker_address = taker_address
order.fee_recipient_address = fee_recipient_address
order.sender_address = sender_address
order.maker_asset_amount = maker_amount
order.taker_asset_amount = taker_amount
order.maker_fee = "{:.0f}".format(Decimal(maker_fee))
order.taker_fee = "{:.0f}".format(Decimal(taker_fee))
order.expiration_time_seconds = expiration_time_seconds
order.salt = salt
order.maker_asset_data = maker_asset_data
order.taker_asset_data = taker_asset_data
order.exchange_address = exchange_address
# sign the hash
order.signature = pydex_client.sign_hash_zx_compat(order.update().hash)
# make sure the signed_order is valid
assert_valid(order.to_json(), "/signedOrderSchema")
return order
return _make_veth_signed_order | 2e064678df9b1e5756c77debd1ba249f1268c32c | 3,631,425 |
def normalize(vec):
"""Return unit vector for parameter vec.
>>> normalize(np.array([3, 4]))
array([ 0.6, 0.8])
"""
if np.any(vec):
norm = np.linalg.norm(vec)
return vec / norm
else:
return vec | 9987224b84a30aee4e64afee8170cc763cfea955 | 3,631,426 |
def get_x_coordinate(width, year_index):
"""
Given the width of the canvas and the index of the current year
in the YEARS list, returns the x coordinate of the vertical
line associated with that year.
Input:
width (int): The width of the canvas
year_index (int): The index of the current year in the YEARS list
Returns:
x_coordinate (int): The x coordinate of the vertical line associated
with the specified year.
"""
start = GRAPH_MARGIN_SIZE
d = (width - GRAPH_MARGIN_SIZE * 2) / len(YEARS)
x = start + d * year_index
return x | e880be55ed530dd39257c0dae06d9301cadc869d | 3,631,427 |
import argparse
def add_rnaseq_args():
"""
Arguments for RNAseq pipeline
"""
parser = argparse.ArgumentParser(
description='RNA-seq pipeline')
parser.add_argument('-b', '--build-design', dest='build_design',
action='store_true',
help='Create design for fastq files')
parser.add_argument('-d', '--design', default=None,
help='design for RNAseq, json format, ignore fq1, fq2')
parser.add_argument('--wt', nargs='+', default=None, dest='wildtype',
help='read1 fq files for control sample')
parser.add_argument('--wt-fq2', nargs='+', dest='wildtype_fq2',
default=None, help='read2 fq files for control sample')
parser.add_argument('--mut', nargs='+', default=None, dest='mutant',
help='read2 fq files for treatment sample')
parser.add_argument('--mut-fq2', nargs='+', dest='mutant_fq2',
default=None, help='read2 fq files for treatment sample')
parser.add_argument('-1', '--fq1', nargs='+', default=None,
help='read1 files, (or read1 of PE reads)')
parser.add_argument('-2', '--fq2', nargs='+', default=None,
help='read2 of PE reads')
parser.add_argument('-c', '--wt-dir', nargs='+', dest='wildtype_dir',
default=None, help='path to the dirs of control samples')
parser.add_argument('-t', '--mut-dir', nargs='+', dest='mutant_dir',
default=None, help='path to the dirs of experiment samples')
parser.add_argument('-o', '--outdir', default=None,
help='The directory to save results, default, \
current working directory.')
parser.add_argument('-g', '--genome', default=None,
choices=['dm3', 'dm6', 'hg19', 'hg38', 'mm9', 'mm10'],
help='Reference genome : dm3, dm6, hg19, hg38, mm10, default: hg38')
parser.add_argument('--gtf', default=None,
help='The gtf file for quantification, defaut: genome.gtf (None)')
parser.add_argument('--gene-bed', dest='gene_bed', default=None,
help='The BED or GTF of genes')
# optional arguments - 0
parser.add_argument('--trimmed', action='store_true',
help='specify if input files are trimmed')
parser.add_argument('--cut-to-length', dest='cut_to_length', default=0,
type=int,
help='cut the read to specific length, from right, default: [0], \
not cut')
parser.add_argument('--recursive', action='store_true',
help='trim adapter recursively')
# optional arguments - 1
parser.add_argument('--overwrite', action='store_true',
help='if spcified, overwrite exists file')
## extra: index
parser.add_argument('-k', '--spikein', default=None,
choices=[None, 'dm3', 'hg19', 'hg38', 'mm10'],
help='Spike-in genome : dm3, hg19, hg38, mm10, default: None')
parser.add_argument('-x', '--extra-index', dest="extra_index",
help='Provide alignment index(es) for alignment, support multiple\
indexes. if specified, ignore -g, -k')
parser.add_argument('--align-to-rRNA', action='store_true',
help='Align to rRNA')
parser.add_argument('--aligner', default='STAR',
choices=['STAR', 'bowtie', 'bowtie2', 'bwa', 'hisat2', 'kallisto',
'salmon'],
help='Aligner option: [STAR, bowtie, bowtie2, bwa], default: [STAR]')
parser.add_argument('-p', '--threads', default=1, type=int,
help='Number of threads for each job, default [1]')
parser.add_argument('-j', '--parallel-jobs', default=1, type=int,
dest='parallel_jobs',
help='Number of jobs run in parallel, default: [1]')
## extra: para
parser.add_argument('--extra-para', dest='extra_para', default=None,
help='Extra parameters for aligner, eg: -X 2000 for bowtie2. \
default: [None]')
parser.add_argument('--norm-project', dest='norm_project', default=None,
help='The RNAseq_Rx project, for parseing norm scale. eg: \
RNAseq_gene/wt.vs.mut for RNAseq_te, default: [None]')
return parser | 327e79e26b44933b82f3b31a607112db0e650ce8 | 3,631,428 |
def get_downloader(start_date,
end_date,
granularity='daily',):
"""returns a downloader closure for oanda
:param start_date: the first day on which dat are downloaded
:param end_date: the last day on which data are downloaded
:param granularity: the frequency of price data, 'D' for daily and 'M1' for 1-minute data
:type start_date: str in format YYYY-MM-DD
:type end_date: str in format YYYY-MM-DD
:type granularity: str
"""
def downloader(symbol):
"""downloads symbol price data using oanda REST API
:param symbol: the symbol name
:type symbol: str
"""
yf = YahooFinancials(symbol)
res = yf.get_historical_price_data(str(start_date), str(end_date), granularity)
print("Yahoo Ingest: symbol={} start_date={} end_date={}".format(symbol,str(start_date),str(end_date)))
if not res or symbol not in res or 'prices' not in res[symbol]:
ValueError('Fetching price data for "{}" failed.'.format(symbol))
prices=res[symbol]['prices']
df = pd.DataFrame({'open': [p['open'] for p in prices],
'close': [p['close'] for p in prices],
'low': [p['low'] for p in prices],
'high': [p['high'] for p in prices],
'volume': [p['volume'] for p in prices],}, index=[pd.Timestamp(d['formatted_date']) for d in prices])
if 'dividend' in prices:
df['dividend'] = [p['dividend'] for p in prices]
else:
df['dividend'] = 0
if 'split' in prices:
df['split'] = [p['split'] for p in prices]
else:
df['split'] = 1
df.fillna(method='ffill')
#ajjc
print(df.tail(1))
return df
return downloader | 2f6b94df6253b6f9c1e7fd335bd45b1fe7238422 | 3,631,429 |
def stick_together(seg, factor, connectivity=1):
"""
For every segment which are immediate neighbors, determine the number
of neighboring pixels and the volume of the smaller of the two
segments. If n_neighbors / volume**(2/3) > factor, stick the two
segments together. This is based on the heuristic that if two segments
are highly connected, they actually are one segment that has mistakenly
been divided into two.
THIS IS DONE IN PLACE.
seg: Segmented image
factor: Cutoff factor for sticking two parts together
connectivity: Connectivity for neighbor count. Default is set to consider
a 1-connected neighborhood in xy, and no neighbors in z.
"""
pot_neighbors = get_potential_neighbors(seg)
for s1, s2 in pot_neighbors:
nb_neighbors = get_nb_neighbors(seg == s1, seg == s2,
connectivity=connectivity)
if nb_neighbors == 0:
# No neighbors
continue
vol_s1 = (seg == s1).sum()
vol_s2 = (seg == s2).sum()
if factor < nb_neighbors / min(vol_s1, vol_s2) ** (2 / 3):
seg[seg == s2] = s1
return seg | a2406bd26132ebe2f278cc2f3463787db1629d38 | 3,631,430 |
def spkacs(targ, et, arg3, arg4, obs):
"""spkacs(SpiceInt targ, SpiceDouble et, ConstSpiceChar * arg3, ConstSpiceChar * arg4, SpiceInt obs)"""
return _cspyce0.spkacs(targ, et, arg3, arg4, obs) | 59d9734f84f4b4a3fcfad413bfa2088a76db3ef2 | 3,631,431 |
from typing import Optional
def replicated_all_reduce_(t: Tensor,
op: CollectiveOperator = CollectiveOperator.Add,
group: Optional[CommGroup] = None) -> Tensor:
"""Reduces tensor `t` across replicas inplace on `t`.
Args:
t (Tensor): Tensor to be reduced
op (CollectiveOperator, optional): Operation to reduce with. Defaults to CollectiveOperator.Add.
group (Optional[CommGroup], optional): Replicas to reduce across. Defaults to All replicas.
Returns:
Tensor: Reduced tensor
"""
ctx = get_current_context()
g = ctx.graph
pb_g = g._pb_graph
check_in_graph(g, t)
if group is None:
group = CommGroup()
settings = ctx._get_op_settings('replicated_all_reduce_inplace')
opid = _ir.OperatorIdentifier("ai.graphcore", "ReplicatedAllReduceInplace",
1, _ir.NumInputs(1, 1), 1)
op = pb_g.createConnectedOp_ReplicatedAllReduceInplaceOp(
{0: t.id}, {0: g._create_tensor_id(t.name + "_all_reduce")}, opid, op,
group, settings)
return Tensor._from_pb_tensor(op.outTensor(0)) | e3f5c33ef6dd27ef147690552399e5968d3faf7d | 3,631,432 |
def main():
"""Main function."""
runner = IcePartialRunner()
return runner.start() | 5a7e65f77f5fe6f8976e5a909aa53d0b63ef1c71 | 3,631,433 |
from typing import Dict
from typing import Any
import requests
def get_kip_main_page_body(kip_main_info: Dict[str, Any]) -> str:
"""Gets the RAW HTML body of the KIP main page"""
kip_body_request: requests.Response = requests.get(
CONTENT_URL + "/" + kip_main_info["id"], params={"expand": "body.view"}
)
kip_body_request.raise_for_status()
return kip_body_request.json()["body"]["view"]["value"] | 86c402ff311b230aa385cf727aae3d30bd500eac | 3,631,434 |
def load_model_tf(checkpoint_path):
"""
Restores custom model class which imitates keras' Model behaviour
"""
model = Model()
model.load(checkpoint_path)
return model | 4a26d63f3c13439597e3f86daa2222c4de21fe19 | 3,631,435 |
def _get_project_folder(name: str) -> str:
"""
Returns the full folder path of the named project.
Args:
name (str): The name of the project.
Returns:
(str): The path of the project folder.
"""
reg_data = _get_registry_data()
return reg_data[name]["location"] | 399a45e0895f12d8b51004a83aa82b7789661968 | 3,631,436 |
def dcos_service_url(service):
"""Return the URL of a service running on DC/OS, based on the value of
shakedown.dcos.dcos_url() and the service name.
:param service: the name of a registered DC/OS service, as a string
:return: the full DC/OS service URL, as a string
"""
return _gen_url("/service/{}/".format(service)) | 916d5ed5f78efc69f46e22c433dbf2376de8b68e | 3,631,437 |
def get_configuration(resource_type, resource_id, configuration_capture_time):
"""Get configurationItem using getResourceConfigHistory API
in case of OversizedConfigurationItemChangeNotification
"""
result = AWS_CONFIG_CLIENT.get_resource_config_history(
resourceType=resource_type,
resourceId=resource_id,
laterTime=configuration_capture_time,
limit=1,
)
configurationItem = result["configurationItems"][0]
return convert_api_configuration(configurationItem) | 6cf92171d12b3059e1ee6630dea58e1ad477b6cd | 3,631,438 |
def scenario_development_one_hot_encoded(sources, scenarios, territory="Europe"):
"""
Creates a dataframe with the operation of a production facility encoded to its activity in the given
years
:param sources: List or string of carbon sources
:param scenarios: List of Desired scenarios
:param territory: List of desired territories
:return: DataFrame with encoded scenarios
"""
data = cost_potential_distribution(sources, territory, agg_nuts_level=-1)
data = data.data.reset_index().drop(columns=["id"])
data.index.rename("id", inplace=True)
years = [2025, 2030, 2035, 2040, 2045, 2050]
data["operating_2020"] = True
for scenario in scenarios:
for year in years:
data["{0}_{1}".format(year, scenario)] = False
index = create_index_for_scenario_mapping(data, year, scenario)
data.loc[index, "{0}_{1}".format(year, scenario)] = True
return data | f6eeb093a5b67a413bdd2b762c8f283d35bfe826 | 3,631,439 |
def urlparse(d, keys=None):
"""Return a copy of the given dictionary with url values parsed."""
d = d.copy()
if keys is None:
keys = d.keys()
for key in keys:
d[key] = _urlparse(d[key])
return d | 260079c2e223de8c5211faa5cdab530c30fac07d | 3,631,440 |
def URFeaturizer(input_shape, hparams, **kwargs):
"""Auto-select an appropriate featurizer for the given input shape."""
if input_shape[1:3] == (224, 224):
return URResNet(input_shape, hparams, **kwargs)
else:
raise NotImplementedError(f"Input shape {input_shape} is not supported") | 412422db5611c5efdc142196df080baa2f65bb9a | 3,631,441 |
def week_of_year(datetime_col):
"""Returns the week from a datetime column."""
return datetime_col.dt.week | c1bf4e0cd5d4aeddf2cff9a1142fcb45b17d1425 | 3,631,442 |
def _NamespaceKeyToString(key):
"""Extract namespace name from __namespace__ key.
Raises an ApplicationError if the key is not of the form '__namespace__'/name
or '__namespace__'/_EMPTY_NAMESPACE_ID.
Args:
key: a key for a __namespace__ instance.
Returns:
namespace specified by key.
"""
key_path = key.to_path()
if len(key_path) == 2 and key_path[0] == '__namespace__':
if key_path[1] == datastore_types._EMPTY_NAMESPACE_ID:
return ''
if isinstance(key_path[1], basestring):
return key_path[1]
Check(False, 'invalid Key for __namespace__ table') | febb6e084916e645b0eb7c39b0bc01b7463ecb7d | 3,631,443 |
import crypt
def novo_usuario(usuario,senha,root):
"""Cria e insere um usuário no banco"""
if("True" in root):
estado=1
else:
estado=""
dados={"login":usuario,"senha":crypt.crypt(senha),"root":bool(estado)}
try:
colecao.insert_one(dados)
#sucesso ao criar um usuário.
return True
except Exception as e:
print("erro ao criar o usuário")
print(e)
return False | a93f732b7a529cc0ddf03deb178e2fe34eaf183a | 3,631,444 |
def fill_dict(_dict, **kwargs):
"""A helper to fill the dict passed with the items passed as keyword
arguments if they are not yet in the dict. If the dict passed was
`None` a new dict is created and returned.
This can be used to prepopulate initial dicts in overriden constructors:
class MyForm(forms.Form):
foo = forms.TextField()
bar = forms.TextField()
def __init__(self, initial=None):
forms.Form.__init__(self, forms.fill_dict(initial,
foo="nothing",
bar="nothing"
))
"""
if _dict is None:
return kwargs
for key, value in kwargs.iteritems():
if key not in _dict:
_dict[key] = value
return _dict | 7e9cd1bb7b15633696d82ded89f39868bb77524c | 3,631,445 |
def list_live_assessment_results(request_ctx, course_id, assessment_id, user_id=None, **request_kwargs):
"""
Returns a list of live assessment results
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param assessment_id: (required) ID
:type assessment_id: string
:param user_id: (optional) If set, restrict results to those for this user
:type user_id: integer or None
:return: List live assessment results
:rtype: requests.Response (with void data)
"""
path = '/v1/courses/{course_id}/live_assessments/{assessment_id}/results'
payload = {
'user_id' : user_id,
}
url = request_ctx.base_api_url + path.format(course_id=course_id, assessment_id=assessment_id)
response = client.get(request_ctx, url, payload=payload, **request_kwargs)
return response | 871a592f97828c68cb844ab09c844ebca351ecb3 | 3,631,446 |
def extract_format_data(matrix):
"""Extract format information from the upper-left corner.
Parameters:
matrix (ndarray): 2D array containing the QR matrix.
Returns:
Tuple (error_correction_level, mask_pattern).
Raises:
QRDecodeError: If the format information can not be decoded.
"""
format_mask = 0b101010000010010
# Fetch format bits from matrix.
format_bits = []
for i in range(6):
format_bits.append(matrix[i, 8])
format_bits.append(matrix[7, 8])
format_bits.append(matrix[8, 8])
format_bits.append(matrix[8, 7])
for i in range(6):
format_bits.append(matrix[8, 5-i])
# Convert bits to word and apply the format mask.
format_word_raw = bits_to_word(format_bits)
format_word_raw ^= format_mask
# Decode error correction bits.
format_word = decode_format_word(format_word_raw)
# Decode error correction level and mask pattern.
error_correction_idx = ((format_word >> 3) & 3)
mask_pattern = (format_word & 7)
error_correction_table = "MLHQ"
error_correction_level = error_correction_table[error_correction_idx]
return (error_correction_level, mask_pattern) | 44fb1c4e1c305bee84dbec218fa675cb32b85bc9 | 3,631,447 |
def decrypt_and_print_message(args):
"""Try to decrypt and print a message."""
for key in args.keys:
for nounce in range(args.nounce_lower, args.nounce_upper):
if _decrypt_chacha20poly1305(args.message, nounce, key):
return 0
return 1 | 919a391862d727dac3596f07ad5df33e6fc08199 | 3,631,448 |
def readCylWFSRaw(fn):
"""
Load in data from WFS measurement of cylindrical mirror.
Assumes that data was processed using processHAS, and loaded into
a .fits file.
Scale to microns, remove misalignments,
strip NaNs.
If rotate is set to an array of angles, the rotation angle
which minimizes the number of NaNs in the image after
stripping perimeter nans is selected.
Distortion is bump positive looking at concave surface.
Imshow will present distortion in proper orientation as if
viewing the concave surface.
"""
#Remove NaNs and rescale
d = pyfits.getdata(fn)
d = man.stripnans(d)
# Negate to make bump positive.
d = -d
return d | 3302806ba302c87c55160569d5bebcd4a0fcc6d3 | 3,631,449 |
def normalize(df, df_ref=None):
"""
Normalize all numerical values in dataframe
:param df: dataframe
:param df_ref: reference dataframe
"""
if df_ref is None:
df_ref = df
df_norm = (df - df_ref.mean()) / df_ref.std()
return df_norm | 56c96f43c98593a5cf21425f23cfd92a7f6d6fe3 | 3,631,450 |
def get_shapes(ndim):
"""
produce a bunch of tensor shapes of order `ndim`.
Args:
ndim: The tensor order.
Returns:
list[tuple[int]]: A list of shapes.
"""
if ndim == 3:
shapes = unique_permutations((8, 64, 128))
some_combs = sum((list(zip(shapes, unique_permutations(pshape)))
for pshape in ((1, 2, 4), (2, 2, 2), (1, 1, 8))), [])
return some_combs
if ndim == 4:
shapes = unique_permutations((8, 8, 64, 128))
some_combs = sum((list(zip(shapes, unique_permutations(pshape)))
for pshape in ((1, 1, 2, 4), (1, 2, 2, 2), (1, 1, 1, 8))),
[])
return some_combs
raise ValueError(f"ndim={ndim} not implemented") | 05afb6198a9c2291c4645e4de47728800431db34 | 3,631,451 |
def gen_stimuli(M, N):
"""
This function generates the stimuli (taken from actual data)
"""
a = (np.random.randn(1, M) * 100).astype(np.float32)
B = (np.random.randn(M, N) * 100).astype(np.float32)
y = custom_vecmatmul(a, B)
return a, B, y | b9fec03e16fb45469e7e708d5a3ca8f5e8fe7ca5 | 3,631,452 |
def agentXML(request, identifier):
"""
Return a representation of a given agent
"""
if 'premis' in request.path:
identifier = identifier.replace('.premis', '')
try:
agentObject = Agent.objects.get(agent_identifier=identifier)
except Agent.DoesNotExist:
return HttpResponseNotFound(
"There is no agent with the identifier %s" % identifier
)
returnXML = objectToPremisAgentXML(
agentObject,
webRoot=request.get_host() + '/',
)
returnText = XML_HEADER % etree.tostring(returnXML, pretty_print=True)
content_type = "application/xml"
else:
try:
agentObject = Agent.objects.get(agent_identifier=identifier)
except Agent.DoesNotExist:
return HttpResponseNotFound(
"There is no agent with the identifier %s" % identifier
)
agent_obj_xml = objectToAgentXML(agentObject)
althref = request.build_absolute_uri(
reverse('agent-detail', args=[identifier, ])
)
return_atom = wrapAtom(
agent_obj_xml, identifier, identifier,
alt=althref
)
returnText = XML_HEADER % etree.tostring(return_atom, pretty_print=True)
content_type = "application/atom+xml"
return HttpResponse(returnText, content_type=content_type) | 4dabb9676f0389b170461a4a617975b09decb131 | 3,631,453 |
from dmlc_tracker import opts
def dmlc_opts(opts):
"""convert from mxnet's opts to dmlc's opts
"""
args = ['--num-workers', str(opts.num_workers),
'--num-servers', str(opts.num_servers),
'--cluster', opts.launcher,
'--host-file', opts.hostfile,
'--sync-dst-dir', opts.sync_dst_dir]
args += opts.command;
try:
except ImportError:
print("Can't load dmlc_tracker package. Perhaps you need to run")
print(" git submodule update --init --recursive")
raise
dmlc_opts = opts.get_opts(args)
return dmlc_opts | 2a83684512fa49d624e2e99169d1e600a45b5cdc | 3,631,454 |
from typing import Optional
from typing import Sequence
def get_virtual_border_routers(filters: Optional[Sequence[pulumi.InputType['GetVirtualBorderRoutersFilterArgs']]] = None,
ids: Optional[Sequence[str]] = None,
name_regex: Optional[str] = None,
output_file: Optional[str] = None,
status: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualBorderRoutersResult:
"""
This data source provides the Express Connect Virtual Border Routers of the current Alibaba Cloud user.
> **NOTE:** Available in v1.134.0+.
:param Sequence[pulumi.InputType['GetVirtualBorderRoutersFilterArgs']] filters: Custom filter block as described below.
:param Sequence[str] ids: A list of Virtual Border Router IDs.
:param str name_regex: A regex string to filter results by Virtual Border Router name.
:param str status: The VBR state.
"""
__args__ = dict()
__args__['filters'] = filters
__args__['ids'] = ids
__args__['nameRegex'] = name_regex
__args__['outputFile'] = output_file
__args__['status'] = status
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('alicloud:expressconnect/getVirtualBorderRouters:getVirtualBorderRouters', __args__, opts=opts, typ=GetVirtualBorderRoutersResult).value
return AwaitableGetVirtualBorderRoutersResult(
filters=__ret__.filters,
id=__ret__.id,
ids=__ret__.ids,
name_regex=__ret__.name_regex,
names=__ret__.names,
output_file=__ret__.output_file,
routers=__ret__.routers,
status=__ret__.status) | af55f1ed6b5713451599c8c0da40d35fa48cc61f | 3,631,455 |
import math
def cal_angle(center, point):
"""
利用向量点乘 ,计算center为顶点,center->point 与 x 轴 的夹角
:param center: 顶点 tuple
:param point: x轴外边上一点 tuple
:return:angle 角度
"""
center = center[:2]
point = point[:2]
vec1 = (point[0] - center[0], point[1] - center[1])
dis_of_vec1 = math.sqrt(vec1[0] ** 2 + vec1[1] ** 2) # 向量vec1长度
# vec1·vec2 = |vec1|*|vec2|*cos(θ)
vec2 = (1,0) #x轴方向
# 由于将(1,0)作为vec2,计算时 vec1·vec2 = vec1[0] ,|vec2| = 1
# TODO: consider math.atan2 so we don't need to worry about division by 0
cos_Theta = vec1[0] / max(dis_of_vec1, 0.00001)
cos_Theta = math.acos(cos_Theta) # 弧度
# Theta = math.atan2(vec1[1], vec1[0])
angle = radian2angle(cos_Theta) # 转换为角度
if vec1[1] < 0:
# 如果 vec1的y轴方向向下,角在第三、第四象限,故 360-angle
angle = 360 - angle
return angle | 7295a4ce7834621385a1fcb63535af957d81443c | 3,631,456 |
def PermutationGroup(gens=None, gap_group=None, domain=None, canonicalize=True, category=None):
"""
Return the permutation group associated to `x` (typically a
list of generators).
INPUT:
- ``gens`` - list of generators (default: ``None``)
- ``gap_group`` - a gap permutation group (default: ``None``)
- ``canonicalize`` - bool (default: ``True``); if ``True``,
sort generators and remove duplicates
OUTPUT:
- A permutation group.
EXAMPLES::
sage: G = PermutationGroup([[(1,2,3),(4,5)],[(3,4)]])
sage: G
Permutation Group with generators [(3,4), (1,2,3)(4,5)]
We can also make permutation groups from PARI groups::
sage: H = pari('x^4 - 2*x^3 - 2*x + 1').polgalois()
sage: G = PariGroup(H, 4); G
PARI group [8, -1, 3, "D(4)"] of degree 4
sage: H = PermutationGroup(G); H # optional - database_gap
Transitive group number 3 of degree 4
sage: H.gens() # optional - database_gap
[(1,2,3,4), (1,3)]
We can also create permutation groups whose generators are Gap
permutation objects::
sage: p = gap('(1,2)(3,7)(4,6)(5,8)'); p
(1,2)(3,7)(4,6)(5,8)
sage: PermutationGroup([p])
Permutation Group with generators [(1,2)(3,7)(4,6)(5,8)]
Permutation groups can work on any domain. In the following
examples, the permutations are specified in list notation,
according to the order of the elements of the domain::
sage: list(PermutationGroup([['b','c','a']], domain=['a','b','c']))
[(), ('a','b','c'), ('a','c','b')]
sage: list(PermutationGroup([['b','c','a']], domain=['b','c','a']))
[()]
sage: list(PermutationGroup([['b','c','a']], domain=['a','c','b']))
[(), ('a','b')]
There is an underlying gap object that implements each
permutation group::
sage: G = PermutationGroup([[(1,2,3,4)]])
sage: G._gap_()
Group( [ (1,2,3,4) ] )
sage: gap(G)
Group( [ (1,2,3,4) ] )
sage: gap(G) is G._gap_()
True
sage: G = PermutationGroup([[(1,2,3),(4,5)],[(3,4)]])
sage: current_randstate().set_seed_gap()
sage: G._gap_().DerivedSeries()
[ Group( [ (3,4), (1,2,3)(4,5) ] ), Group( [ (1,5)(3,4), (1,5)(2,4), (1,5,3) ] ) ]
TESTS::
sage: r = Permutation("(1,7,9,3)(2,4,8,6)")
sage: f = Permutation("(1,3)(4,6)(7,9)")
sage: PermutationGroup([r,f]) #See Trac #12597
Permutation Group with generators [(1,3)(4,6)(7,9), (1,7,9,3)(2,4,8,6)]
sage: PermutationGroup(SymmetricGroup(5))
Traceback (most recent call last):
...
TypeError: gens must be a tuple, list, or GapElement
"""
if not is_ExpectElement(gens) and hasattr(gens, '_permgroup_'):
return gens._permgroup_()
if gens is not None and not isinstance(gens, (tuple, list, GapElement)):
raise TypeError("gens must be a tuple, list, or GapElement")
return PermutationGroup_generic(gens=gens, gap_group=gap_group, domain=domain,
canonicalize=canonicalize, category=category) | fcff1b525590544d108accc51780fde7b61d0428 | 3,631,457 |
def lunar_diameter(tee):
"""Return the geocentric apparent lunar diameter of the moon (in
degrees) at moment, tee. Adapted from 'Astronomical
Algorithms' by Jean Meeus, Willmann_Bell, Inc., 2nd ed."""
return deg(1792367000/9) / lunar_distance(tee) | e13e514c449f89b5fb10c1f927f3460a22a0a888 | 3,631,458 |
def findCongressPerson(name, nicknames_json):
"""
Checks the nicknames endpoint of the NYT Congress API
to determine if the inputted name is that of a member of Congress
"""
congress_json = [x['nickname'] for x in nicknames_json if x['nickname'] == name]
if len(congress_json) > 0:
return True
return False | d03dc1f55c970379b283f78cfd23e393e494bd48 | 3,631,459 |
from typing import Tuple
def make_canonical_transform_np(
n_xyz: np.ndarray,
ca_xyz: np.ndarray,
c_xyz: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Returns translation and rotation matrices to canonicalize residue atoms.
Note that this method does not take care of symmetries. If you provide the
atom positions in the non-standard way, the N atom will end up not at
[-0.527250, 1.359329, 0.0] but instead at [-0.527250, -1.359329, 0.0]. You
need to take care of such cases in your code.
Args:
n_xyz: An array of shape [batch, 3] of nitrogen xyz coordinates.
ca_xyz: An array of shape [batch, 3] of carbon alpha xyz coordinates.
c_xyz: An array of shape [batch, 3] of carbon xyz coordinates.
Returns:
A tuple (translation, rotation) where:
translation is an array of shape [batch, 3] defining the translation.
rotation is an array of shape [batch, 3, 3] defining the rotation.
After applying the translation and rotation to all atoms in a residue:
* All atoms will be shifted so that CA is at the origin,
* All atoms will be rotated so that C is at the x-axis,
* All atoms will be shifted so that N is in the xy plane.
"""
assert len(n_xyz.shape) == 2, n_xyz.shape
assert n_xyz.shape[-1] == 3, n_xyz.shape
assert n_xyz.shape == ca_xyz.shape == c_xyz.shape, (n_xyz.shape, ca_xyz.shape, c_xyz.shape)
# Place CA at the origin.
translation = -ca_xyz
n_xyz = n_xyz + translation
c_xyz = c_xyz + translation
# Place C on the x-axis.
c_x, c_y, c_z = [c_xyz[:, i] for i in range(3)]
# Rotate by angle c1 in the x-y plane (around the z-axis).
sin_c1 = -c_y / np.sqrt(1e-20 + c_x**2 + c_y**2)
cos_c1 = c_x / np.sqrt(1e-20 + c_x**2 + c_y**2)
zeros = np.zeros_like(sin_c1)
ones = np.ones_like(sin_c1)
# pylint: disable=bad-whitespace
c1_rot_matrix = np.stack([np.array([cos_c1, -sin_c1, zeros]),
np.array([sin_c1, cos_c1, zeros]),
np.array([zeros, zeros, ones])])
# Rotate by angle c2 in the x-z plane (around the y-axis).
sin_c2 = c_z / np.sqrt(1e-20 + c_x**2 + c_y**2 + c_z**2)
cos_c2 = np.sqrt(c_x**2 + c_y**2) / np.sqrt(
1e-20 + c_x**2 + c_y**2 + c_z**2)
c2_rot_matrix = np.stack([np.array([cos_c2, zeros, sin_c2]),
np.array([zeros, ones, zeros]),
np.array([-sin_c2, zeros, cos_c2])])
c_rot_matrix = _multiply_np(c2_rot_matrix, c1_rot_matrix)
n_xyz = np.stack(apply_rot_to_vec_np(c_rot_matrix, n_xyz, unstack=True)).T
# Place N in the x-y plane.
_, n_y, n_z = [n_xyz[:, i] for i in range(3)]
# Rotate by angle alpha in the y-z plane (around the x-axis).
sin_n = -n_z / np.sqrt(1e-20 + n_y**2 + n_z**2)
cos_n = n_y / np.sqrt(1e-20 + n_y**2 + n_z**2)
n_rot_matrix = np.stack([np.array([ones, zeros, zeros]),
np.array([zeros, cos_n, -sin_n]),
np.array([zeros, sin_n, cos_n])])
return (translation, np.transpose(_multiply_np(n_rot_matrix, c_rot_matrix), [2, 0, 1])) | 59c0a5ca06f3f0d612cc25ce453e201c4daff184 | 3,631,460 |
def get_data_for_result_table(all_answers):
"""
Generate simple data for result table (question, answer status true/false).
@param all_answers: dict with pairs question_id and list of answers for question.
@return: dict with all data for result table.
"""
result_data = {}
for question_id, answers_list in all_answers.items():
question_text = Question.objects.get(id=question_id).text
answer_status = 'верно' if check_answer(question_id, answers_list) else 'ошибка'
result_data[question_text] = answer_status
return result_data | c9bdd9920698ed758d27ceb2bcf04e143f705bd4 | 3,631,461 |
def _validate_positive_int(value):
"""Validate value is a natural number."""
try:
value = int(value)
except ValueError as err:
raise ValueError("Could not convert to int") from err
if value > 0:
return value
else:
raise ValueError("Only positive values are valid") | ddc2087d69c96fa72594da62192df58555b25029 | 3,631,462 |
def transpose(table):
"""
Returns a copy of table with rows and columns swapped
Example:
1 2 1 3 5
3 4 => 2 4 6
5 6
Parameter table: the table to transpose
Precondition: table is a rectangular 2d List of numbers
"""
result = [] # Result (new table) accumulator
# Loop over columns
# Add each column as a row to result
return result | fe84714d3e09deb22058fd75ac3333c2206f77c3 | 3,631,463 |
def predict_posterior_marginals(
F, features, mean, kernel, chol_fact, pred_mat, test_features,
test_intermediates=None):
"""
Computes posterior means and variances for test_features.
If pred_mat is a matrix, so will be posterior_means, but not
posterior_variances. Reflects the fact that for GP regression and fixed
hyperparameters, the posterior mean depends on the targets y, but the
posterior covariance does not.
:param F: mx.sym or mx.nd
:param features: Training inputs
:param mean: Mean function
:param kernel: Kernel function
:param chol_fact: Part L of posterior state
:param pred_mat: Part P of posterior state
:param test_features: Test inputs
:return: posterior_means, posterior_variances
"""
k_tr_te = kernel(features, test_features)
linv_k_tr_te = F.linalg.trsm(chol_fact, k_tr_te)
posterior_means = F.broadcast_add(
F.linalg.gemm2(
linv_k_tr_te, pred_mat, transpose_a=True, transpose_b=False),
F.reshape(mean(test_features), shape=(-1, 1)))
posterior_variances = kernel.diagonal(F, test_features) - F.sum(
F.square(linv_k_tr_te), axis=0)
# For testing:
if test_intermediates is not None and mxnet_is_ndarray(F):
assert isinstance(test_intermediates, dict)
test_intermediates.update({
'k_tr_te': k_tr_te.asnumpy(),
'linv_k_tr_te': linv_k_tr_te.asnumpy(),
'test_features': test_features.asnumpy(),
'pred_means': posterior_means.asnumpy(),
'pred_vars': F.reshape(F.maximum(
posterior_variances, MIN_POSTERIOR_VARIANCE),
shape=(-1,)).asnumpy()})
return posterior_means, F.reshape(F.maximum(
posterior_variances, MIN_POSTERIOR_VARIANCE), shape=(-1,)) | 702ec32d43566e8e17a19f597ed0dd90b28d85d8 | 3,631,464 |
def rule_ContributesLight_possessions_can_light_person(x, world) : # maybe should handle concealment at some point?
"""A person contributes light if any of their posessions contribute light."""
if any(world[ContributesLight(o)] for o in world[Contents(x)]) :
return True
else : raise NotHandled() | d67c50903e1c7b50b6e6ec2c6369b91db50a494c | 3,631,465 |
import os
def update_keras_bn_ops_trainable_flag(model: tf.keras.Model, trainable: bool, load_save_path: str) -> tf.keras.Model:
"""
helper method to update Keras BN ops trainable state in a given keras model.
:param model: Keras model to be updated with BN ops trainable flag
:param trainable: bool flag to indicate trainable to be set to true or false
:param load_save_path: temp folder to perform load/save, cleans up file created
:return: updated keras model
"""
if not os.path.exists(load_save_path):
os.mkdir(load_save_path)
output_file_with_path = os.path.join(load_save_path, 't.h5')
# update BN ops trainable flag
for layer in model.layers:
if isinstance(layer, tf.keras.layers.BatchNormalization):
layer.trainable = trainable
save_model(model, output_file_with_path)
tf.compat.v1.keras.backend.clear_session()
model = load_model(output_file_with_path)
# clean up file after use
if os.path.exists(output_file_with_path):
os.remove(output_file_with_path)
# return updated keras model
return model | 347aa5ebc8bff4d1b65b6c696607293eb091b5ba | 3,631,466 |
def xml():
""" Return an XML response with an HTTP 200 OK status """
data: str = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url>
<loc>http://www.example.com/</loc>
<lastmod>2005-01-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.8</priority>
</url>
</urlset>
"""
return Responder.xml.ok(data) | 4b4391ff9b22570885e4056ba62f4dbe84724c18 | 3,631,467 |
from typing import Iterable
import pathlib
def read_device_files(directory_paths: Iterable[pathlib.Path]) -> DeviceFileInfo:
"""Read data from files contained on an mbed enabled device's USB mass storage device.
If details.txt exists and it contains a product code, then we will use that code. If not then we try to grep the
code from the mbed.htm file. We extract an OnlineID from mbed.htm as we also make use of that information to find a
board entry in Mbed OS's various target databases and JSON files.
Args:
directory_paths: Paths to the directories containing device files.
"""
device_file_paths = _get_device_file_paths(directory_paths)
if not device_file_paths:
paths = "\n".join(str(p) for p in directory_paths)
logger.warning(
f"No files were found in the device's mass storage device. The following paths were searched:\n{paths}."
"\nThis device may not be identifiable as Mbed enabled. Check the files exist, are not hidden and are not "
"corrupted."
)
return DeviceFileInfo(None, None, {})
htm_file_contents = _read_htm_file_contents(device_file_paths)
details_txt_contents = _read_first_details_txt_contents(device_file_paths)
# details.txt is the "preferred" source of truth for the product_code
code = details_txt_contents.get("code")
if code is None:
# erk! well, let's get it from the mbed.htm file instead...
code = _extract_product_code_from_htm(htm_file_contents)
online_id = _extract_online_id_from_htm(htm_file_contents)
if online_id is None:
# If no online ID available from .htm, may be a J-Link
online_id = _extract_online_id_jlink_html(device_file_paths)
details_txt_contents.update(_extract_version_jlink_html(device_file_paths))
return DeviceFileInfo(code, online_id, details_txt_contents) | e11c2dee5db2fa957bc9794a87414d72b708a85e | 3,631,468 |
def max_rl(din):
"""
A MAX function should "go high" only when all of
its inputs have arrived. Thus, AND gates are
used for its implementation.
Input: a list of 1-bit WireVectors
Output: a 1-bit WireVector
"""
if len(din) == 1:
dout = din[0]
else:
dout = din[0] & max_rl(din[1:])
return dout | b65710967a8a785e1ca0679252ac69c140b4c560 | 3,631,469 |
def compute_success(
classifier: "CLASSIFIER_TYPE",
x_clean: np.ndarray,
labels: np.ndarray,
x_adv: np.ndarray,
targeted: bool = False,
batch_size: int = 1,
) -> float:
"""
Compute the success rate of an attack based on clean samples, adversarial samples and targets or correct labels.
:param classifier: Classifier used for prediction.
:param x_clean: Original clean samples.
:param labels: Correct labels of `x_clean` if the attack is untargeted, or target labels of the attack otherwise.
:param x_adv: Adversarial samples to be evaluated.
:param targeted: `True` if the attack is targeted. In that case, `labels` are treated as target classes instead of
correct labels of the clean samples.
:param batch_size: Batch size.
:return: Percentage of successful adversarial samples.
"""
attack_success = compute_success_array(classifier, x_clean, labels, x_adv, targeted, batch_size)
return np.sum(attack_success) / x_adv.shape[0] | 0e8f44038b8d661912393edb6b248ff93356f180 | 3,631,470 |
def set_up_basic_stubs(app_id):
"""Set up a basic set of stubs.
Configures datastore and memcache stubs for testing.
Args:
app_id: Application ID to configure stubs with.
Returns:
Dictionary mapping stub name to stub.
"""
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
ds_stub = datastore_file_stub.DatastoreFileStub(app_id, None)
apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', ds_stub)
mc_stub = memcache_stub.MemcacheServiceStub()
apiproxy_stub_map.apiproxy.RegisterStub('memcache', mc_stub)
tq_stub = taskqueue_stub.TaskQueueServiceStub()
apiproxy_stub_map.apiproxy.RegisterStub('taskqueue', tq_stub)
return {
'datastore': ds_stub,
'memcache': mc_stub,
'taskqueue': tq_stub,
} | 197d1c94cbca97ed0b8f4663280a2feb6518b00e | 3,631,471 |
from typing import Callable
from typing import List
def _load_init_model_weights(
model_fn: Callable[[],
tff.learning.Model]) -> List[tff.learning.ModelWeights]:
"""Load model weights to warm-start HypCluster."""
state_manager = tff.program.FileProgramStateManager(FLAGS.warmstart_root_dir)
learning_process_for_metedata = tff.learning.algorithms.build_weighted_fed_avg(
model_fn=model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(1.0),
server_optimizer_fn=lambda: tf.keras.optimizers.Adam(1.0),
client_weighting=tff.learning.ClientWeighting.NUM_EXAMPLES,
model_aggregator=tff.learning.robust_aggregator(
zeroing=True, clipping=True, add_debug_measurements=True))
init_state = learning_process_for_metedata.initialize()
loaded_models = []
versions_saved = state_manager.versions()
if FLAGS.num_clusters >= len(versions_saved):
raise ValueError(
f'The checkpoint directory {FLAGS.warmstart_root_dir} only has '
f'{len(versions_saved)-1} checkpoints, but expected to load '
f'{FLAGS.num_clusters} models. Please use a smaller value for '
'FLAGS.num_clusters, or use a different checkpoint directory.')
for i in range(1, FLAGS.num_clusters + 1):
version = versions_saved[-i]
state = state_manager.load(version=version, structure=init_state)
loaded_models.append(learning_process_for_metedata.get_model_weights(state))
return loaded_models | 9abfb5e155ab6408da36d21a04556e6094866ff2 | 3,631,472 |
import requests
def process_request(url, auth):
"""Perform an http request.
:param url: full url to query
:type url: ``str``
:param auth: username, password credentials
:type auth: ``tuple`` || ``None``
:returns: ``dict``
"""
content = requests.get(url, auth=auth)
if content.status_code >= 300:
raise SystemExit(content.content)
return content.json() | 051c60e03458e3c38d93dfd65d15f355ec284c12 | 3,631,473 |
def close_channel(sender_addr, receiver_addr,channel_name):
"""
:param sender_addr: String, the sender address
:param receiver_addr: String, receiver's address
:param channel_name: String, channel name
:return:
"""
sender, receiver = split_channel_name(channel_name)
ch = Channel(sender, receiver)
return ch.settle_balance_onchain() | ee7bd2311f7ef7c3ad4abb0c7701116208079ef2 | 3,631,474 |
from typing import Collection
from typing import Tuple
from typing import Iterator
from typing import Set
def get_proj_edges(edges: Collection[Tuple[int, int]]) -> Iterator[Tuple[int, int]]:
"""Obtain projective edges from a collection of edges of a dependency tree."""
adj_set: dict = defaultdict(set)
for u, v in edges:
adj_set[u].add(v)
def dfs(root: int) -> Set[int]:
stack, seen = [root], set()
while stack:
u = stack.pop()
seen.add(u)
for v in adj_set[u]:
if v not in seen:
stack.append(v)
return seen
nodes = {u for e in edges for u in e}
reachable_from = {u: dfs(u) for u in nodes}
for u, v in edges:
for w in range(min(u, v) + 1, max(u, v)):
if w not in reachable_from[u]:
break
else:
yield (u, v) | 6c822140e2627046ee8f36769fd14cda14829a5f | 3,631,475 |
from typing import Union
from typing import Any
def convertClrs(clr: Union[dict[Any, Union[str, Color]], Color], conversion: str) -> Union[str, tuple, dict, None]:
"""
Convert color values to HEX and vice-versa
@clr: Color value to convert.
@conversion: Type of conversion to do ('RGB' or 'HEX')
"""
if isinstance(clr, dict): # its a dict, call itself with the value
return mapDict(clr, lambda x: convertClrs(x, conversion))
elif isinstance(clr, str):
color = clr.lower()
if color in _HTML_COLOR_NAMES: color = _HTML_COLOR_NAMES[color] # check if its a html color name
else:
color = clr
if conversion == "HEX":
if not isinstance(color, (tuple, list)) or len(color) != 3: return color
capped = tuple(capValue(value, 255, 0) for value in color)
return f"#{capped[0]:02x}{capped[1]:02x}{capped[2]:02x}"
elif conversion == "RGB":
if not isinstance(color, str) or not color.startswith("#"):
return color
clrs = color.lstrip("#")
if len(clrs) == 3:
clrs = "".join(c*2 for c in clrs) # if size of hex color is 3, just duplicate chars to make it 6
try:
return tuple(int(clrs[i:i+2], 16) for i in (0, 2, 4))
except ValueError:
return color
else:
return "" | 70329d19984f970fcaee86ca9403615a2995e9a9 | 3,631,476 |
def fuzzy_op(x, a, y, b, op):
"""Operation of two fuzzy sets.
Operate fuzzy set ``a`` with fuzzy set ``b``,
using +, * or any other binary operator.
Parameters
----------
x : 1d array, length N
Universe variable for fuzzy set ``a``.
a : 1d array, length N
Fuzzy set for universe ``x``.
y : 1d array, length M
Universe variable for fuzzy set ``b``.
b : 1d array, length M
Fuzzy set for universe ``y``.
op: Function, pointwise binary operator on two matrices
(pointwise version of) +, -, *, /, min, max etc.
Returns
-------
z : 1d array
Output variable.
mfz : 1d array
Fuzzy membership set for variable ``z``.
Notes
-----
Uses Zadeh's Extension Principle as described in Ross, Fuzzy Logic with
Engineering Applications (2010), pp. 414, Eq. 12.17.
If these results are unexpected and your membership functions are convex,
consider trying the ``skfuzzy.dsw_*`` functions for fuzzy mathematics
using interval arithmetic via the restricted Dong, Shah, and Wong method.
"""
# a and x, and b and y, are formed into (MxN) matrices. The former has
# identical rows; the latter identical identical columns.
yy, xx = np.meshgrid(y, x, sparse=True) # consider broadcasting rules
bb, aa = np.meshgrid(b, a, sparse=True)
# Do the operation
zz = op(xx, yy).ravel()
zz_index = np.argsort(zz)
zz = np.sort(zz)
# Array min() operation
c = np.fmin(aa, bb).ravel()
c = c[zz_index]
# Initialize loop
z, mfz = np.zeros(0), np.zeros(0)
idx = 0
for _ in range(len(c)):
index = np.nonzero(zz == zz[idx])[0]
z = np.hstack((z, zz[idx]))
mfz = np.hstack((mfz, c[index].max()))
idx = index[-1] + 1
if idx >= len(zz):
break
return z, mfz | c4b10f024fd7c4bfb0ec4faaedef1f57c62527f7 | 3,631,477 |
def check_int(item):
"""
:param item: txtcrtl containing a value
"""
flag = True
try:
mini = int(item.GetValue())
item.SetBackgroundColour(wx.WHITE)
item.Refresh()
except:
flag = False
item.SetBackgroundColour("pink")
item.Refresh()
return flag | c5ded12ef242a4286fe1b0e9af160dfc9698b5af | 3,631,478 |
def load_h5py(path):
"""Loads datasets from a file.
params:
path: A string, which is a path to the dataset
return: A dictionary, which contains the dataset
"""
dataset = {}
with h5py.File(path, 'r') as hf:
if 'train_x' in hf:
dataset['train_x'] = hf['train_x'][:]
if 'train_y' in hf:
dataset['train_y'] = hf['train_y'][:]
if 'validation_x' in hf:
dataset['validation_x'] = hf['validation_x'][:]
if 'validation_y' in hf:
dataset['validation_y'] = hf['validation_y'][:]
if 'test_x' in hf:
dataset['test_x'] = hf['test_x'][:]
if 'test_y' in hf:
dataset['test_y'] = hf['test_y'][:]
return dataset | 9bc12ee86249a20931c0f5a83ad0cc7910f55ced | 3,631,479 |
def from_timedelta(val):
"""escape a python datetime.timedelta"""
sec = int(val.total_seconds())
hour = sec // 3600
sec = sec % 3600
mns = sec // 60
sec = sec % 60
msec = val.microseconds
return _time(hour, mns, sec, msec) | f618a82b9253f27bb8c0574c697ee3e35f7d9d22 | 3,631,480 |
def stringify_column(df: DataFrame, column: str) -> DataFrame:
"""Takes dataframe and column that contains array structures. Stringify that column values."""
array_to_string_udf = udf(array_to_string, StringType())
df = df.withColumn(column, array_to_string_udf(df[column]))
return df | d0496053279c39e4decd349b649ddd899277719a | 3,631,481 |
import platform
def get_os():
"""
Get operating system.
:return: operating system
:rtype: str or unicode
"""
return platform.platform() | 104c8547c751388a2ea4be675be1fa44758d61d0 | 3,631,482 |
def powerLaw(y, x):
"""
'When the frequency of an event varies as power of some attribute of that
event the frequency is said to follow a power law.' (wikipedia)
This is represented by the following equation, where c and alpha are
constants:
y = c . x ^ alpha
Args
--------
y: array with frequency of events >0
x: numpy array with attribute of events >0
Output
--------
(c, alpha)
c: the maximum frequency of any event
alpha: defined by (Newman, 2005 for details):
alpha = 1 + n * sum(ln( xi / xmin )) ^ -1
"""
c = 0
alpha = .0
if len(y) and len(y)==len(x):
c = max(y)
xmin = float(min(x))
alpha = 1 + len(x) * pow(sum(np.log(x/xmin)),-1)
return (c, alpha) | 39ad30d5f0c150df06faa41bbcd960352c708b6a | 3,631,483 |
def _separate_talairach_levels(atlas_img, labels, verbose=1):
"""Separate the multiple annotation levels in talairach raw atlas.
The Talairach atlas has five levels of annotation: hemisphere, lobe, gyrus,
tissue, brodmann area. They are mixed up in the original atlas: each label
in the atlas corresponds to a 5-tuple containing, for each of these levels,
a value or the string '*' (meaning undefined, background).
This function disentangles the levels, and stores each on an octet in an
int64 image (the level with most labels, ba, has 72 labels).
This way, any subset of these levels can be accessed by applying a bitwise
mask.
In the created image, the least significant octet contains the hemisphere,
the next one the lobe, then gyrus, tissue, and ba. Background is 0.
The labels contain
[('level name', ['labels', 'for', 'this', 'level' ...]), ...],
where the levels are in the order mentioned above.
The label '*' is replaced by 'Background' for clarity.
"""
labels = np.asarray(labels)
if verbose:
print(
'Separating talairach atlas levels: {}'.format(_TALAIRACH_LEVELS))
levels = []
new_img = np.zeros(atlas_img.shape, dtype=np.int64)
for pos, level in enumerate(_TALAIRACH_LEVELS):
if verbose:
print(level)
level_img = np.zeros(atlas_img.shape, dtype=np.int64)
level_labels = {'*': 0}
for region_nb, region in enumerate(labels[:, pos]):
level_labels.setdefault(region, len(level_labels))
level_img[get_data(atlas_img) == region_nb] = level_labels[
region]
# shift this level to its own octet and add it to the new image
level_img <<= 8 * pos
new_img |= level_img
# order the labels so that image values are indices in the list of
# labels for each level
level_labels = list(list(
zip(*sorted(level_labels.items(), key=lambda t: t[1])))[0])
# rename '*' -> 'Background'
level_labels[0] = 'Background'
levels.append((level, level_labels))
new_img = new_img_like(atlas_img, data=new_img)
return new_img, levels | 1d05eab354eada01322bfd2fb79bcdeaeaf4ab34 | 3,631,484 |
def merge(a, b):
"""
Hierarchical merge of dictionaries, lists, tuples and sets.
If b is None, it keeps a, otherwise it merges with a.
In case of ambiguities, b overrides a
it returns is a deepcopy, not a reference of the original objects.
"""
if isinstance(b, dict) and isinstance(a, dict):
a_and_b = set(a.keys()) & set(b.keys())
every_key = set(a.keys()) | set(b.keys())
return {k: merge(a[k], b[k]) if k in a_and_b else deepcopy(a[k] if k in a else b[k]) for k in every_key}
if isinstance(b, list) and isinstance(a, list):
return deepcopy(a) + deepcopy(b)
if isinstance(b, tuple) and isinstance(a, tuple):
return deepcopy(a) + deepcopy(b)
if isinstance(b, set) and isinstance(a, set):
return deepcopy(a) | deepcopy(b)
# if b is None, inherit from a
return deepcopy(a if b is None else b) | 5ae3533ded3018a8e7789d0b50cf150c19c4a6d5 | 3,631,485 |
def block_inception_a(blk, net):
"""Builds Inception-A block for Inception v4 network."""
# By default use stride=1 and SAME padding
s = net.add(Split('%s/Split' % blk, 4))
br0 = conv2d(net, '%s/Branch_0/Conv2d_0a_1x1' % blk, 96, 1, src=s)
conv2d(net, '%s/Branch_1/Conv2d_0a_1x1' % blk, 64, 1, src=s)
br1 = conv2d(net, '%s/Branch_1/Conv2d_0b_3x3' % blk, 96, 3)
conv2d(net, '%s/Branch_2/Conv2d_0a_1x1' % blk, 64, 1, src=s)
conv2d(net, '%s/Branch_2/Conv2d_0b_3x3' % blk, 96, 3)
br2 = conv2d(net, '%s/Branch_2/Conv2d_0c_3x3' % blk, 96, 3)
net.add(AvgPooling2D('%s/Branch_3/AvgPool_0a_3x3' % blk, 3, stride=1), s)
br3 = conv2d(net, '%s/Branch_3/Conv2d_0b_1x1' % blk, 96, 1)
return net.add(Concat('%s/Concat' % blk, 1), [br0, br1, br2, br3]) | c09d1d0c3c2465f9cd273a611ea16871635a6bea | 3,631,486 |
def mgas(sg, sp, gpotential, potential, xv, dt, kappa=1.0, alpha=1.0):
"""
Evolve satellite gas mass due to tidal stripping, by an amount of
[m - m(l_rp)] * dt / t_dyn
where m is the satellite gas mass; m(l_rp) is the satellite gas mass
within ram pressure radius l_rp; dt is the timestep size; and t_dyn
is the host dynamical time within radius r.
( r is given by np.sqrt(xv[0]**2.+xv[2]**2.) )
Syntax:
mgas(sg,sp,gpotential,potential,xv,dt,kappa=1.,alpha=1.)
where
sg: satellite gas profile (an object of one of the classes
defined in profiles.py)
sp: satellite potential (an object of one of the classes defined
in profiles.py)
gpotential: the gas part of the host potential (a density profile
object, or a list of such objects that constitute a composite
potential)
potential: host potential (a density profile object, or a list of
such objects that constitute a composite potential)
xv: phase-space coordinates [R,phi,z,VR,Vphi,Vz] in units of
[kpc,radian,kpc,kpc/Gyr,kpc/Gyr,kpc/Gyr] (float array)
dt: time interval [Gyr] (float)
kappa: the fudge factor of order unity in front of the
gravitational restoring pressure (0.5-2 depending on
assumptions, see Zinger+18 for details) (default=1.)
alpha: stripping efficienty parameter -- the larger the
more effient (default=1.)
Return:
evolved gas mass, m [M_sun] (float)
ram-pressure radius, l_rp [kpc] (float)
"""
lrp = lram(sg, sp, gpotential, xv, kappa)
if lrp < sg.rh:
dm = alpha * (sg.Mh - sg.M(lrp)) * dt / pr.tdyn(potential, xv[0], xv[2])
m = max(sg.Mh - dm, cfg.Mres)
else:
m = sg.Mh
return m, lrp | 4d8b823a0814bf78f3e30c20bfbab17d0d5b87e5 | 3,631,487 |
import random
def genpass(pwds_amount=1, paswd_length=8):
""" Returns a list of 'pwds_amount' random passwords, having length of 'paswd_length' """
return [ ''.join([chr(random.randint(32, 126)) for _ in range(paswd_length)]) for _ in range(pwds_amount)] | d5d4e38cc334f44e837c72f265a391bf72f5bd5f | 3,631,488 |
import os
def collect_fastq_data_irma(fc_root, fc_proj_src, proj_root=None, pid=None):
"""Collect the fastq files that have to be removed from IRMA
return a tuple with files and total size of collected files"""
size = 0
file_list = {'flowcells': defaultdict(dict)}
fc_proj_path = os.path.join(fc_root, fc_proj_src)
fc_id = os.path.basename(fc_root)
file_list['flowcells'][fc_id] = {'proj_root': fc_proj_path,
'fq_files': collect_files_by_ext(fc_proj_path, "*.fastq.gz")}
if proj_root and pid:
proj_abs_path = os.path.join(proj_root, pid)
if not os.path.exists(proj_abs_path):
file_list['proj_data'] = None
elif os.path.exists(os.path.join(proj_abs_path, "cleaned")):
file_list['proj_data'] = "cleaned"
else:
file_list['proj_data'] = {'proj_data_root': proj_abs_path,
'fastq_files' : collect_files_by_ext(proj_abs_path, "*.fastq.gz")}
size += sum(map(os.path.getsize, file_list['flowcells'][fc_id]['fq_files']))
return (file_list, size) | a56c4fcc50d1bee2698138624befcf883df1ba76 | 3,631,489 |
def _gini(x):
"""
Memory efficient calculation of Gini coefficient in relative mean difference form
Parameters
----------
x : array-like
Attributes
----------
g : float
Gini coefficient
Notes
-----
Based on http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm
"""
n = len(x)
try:
x_sum = x.sum()
except AttributeError:
x = np.asarray(x)
x_sum = x.sum()
n_x_sum = n * x_sum
r_x = (2. * np.arange(1, len(x)+1) * x[np.argsort(x)]).sum()
return (r_x - n_x_sum - x_sum) / n_x_sum | 581f12e46544df307b8f53f2f4261779b7069d67 | 3,631,490 |
def _AddListFieldsMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
# Ensure that we always list in ascending field-number order.
# For non-extension fields, we can do the sort once, here, at import-time.
# For extensions, we sort on each ListFields() call, though
# we could do better if we have to.
fields = sorted(message_descriptor.fields, key=lambda f: f.number)
has_field_names = (_HasFieldName(f.name) for f in fields)
value_field_names = (_ValueFieldName(f.name) for f in fields)
triplets = zip(has_field_names, value_field_names, fields)
def ListFields(self):
# We need to list all extension and non-extension fields
# together, in sorted order by field number.
# Step 0: Get an iterator over all "set" non-extension fields,
# sorted by field number.
# This iterator yields (field_number, field_descriptor, value) tuples.
def SortedSetFieldsIter():
# Note that triplets is already sorted by field number.
for has_field_name, value_field_name, field_descriptor in triplets:
if field_descriptor.label == _FieldDescriptor.LABEL_REPEATED:
value = getattr(self, _ValueFieldName(field_descriptor.name))
if len(value) > 0:
yield (field_descriptor.number, field_descriptor, value)
elif getattr(self, _HasFieldName(field_descriptor.name)):
value = getattr(self, _ValueFieldName(field_descriptor.name))
yield (field_descriptor.number, field_descriptor, value)
sorted_fields = SortedSetFieldsIter()
# Step 1: Get an iterator over all "set" extension fields,
# sorted by field number.
# This iterator ALSO yields (field_number, field_descriptor, value) tuples.
# TODO(robinson): It's not necessary to repeat this with each
# serialization call. We can do better.
sorted_extension_fields = sorted(
[(f.number, f, v) for f, v in self.Extensions._ListSetExtensions()])
# Step 2: Create a composite iterator that merges the extension-
# and non-extension fields, and that still yields fields in
# sorted order.
all_set_fields = _ImergeSorted(sorted_fields, sorted_extension_fields)
# Step 3: Strip off the field numbers and return.
return [field[1:] for field in all_set_fields]
cls.ListFields = ListFields | 10f7d51ea1d65562c330a75922fc7dcbc6f5f729 | 3,631,491 |
def render_edit_view(request, form_name, nid):
"""according resource primary key,render edit view
Arguments:
request {object} -- wsgi http request object
form_name {str} -- resources type name
nid {int} -- resources id
Returns:
html -- html template
"""
if form_name not in register_form:
return render(request, 'admin/error.html', {'error_msg': 'illegal request!'})
perm_action_flag = 'change'
view_subject = request.GET.get('view', 0)
if view_subject == "1":
perm_action_flag = 'view'
perm = register_form[form_name]['perm'] % perm_action_flag
model = register_form[form_name]['model']
# permission verify
if not request.user.has_perm(perm):
return render(request, 'admin/error.html')
temp_name = 'admin/add_or_edit_%s.html' % form_name
edit_obj = get_object_or_404(model, id=nid)
# get all foreigenkey data
cluster_data = ClusterInfo.objects.filter(is_active=0)
branch_data = Branch.objects.filter(isenable=1)
esxi_data = None
if form_name == 'vm':
esxi_data = HostInfo.objects.filter(cluster_tag=edit_obj.host.cluster_tag)
context = {
'obj': edit_obj,
'view': view_subject
}
extra_context = {
'vm': {
'esxi_list': esxi_data,
'zabbix_api': settings.ZABBIX_API,
'cluster_data': cluster_data
},
'host': {
'cluster_data': cluster_data,
'branch_data': branch_data
},
'lan_net': {
'branch_data': branch_data
},
'wan_net': {
'branch_data': branch_data
},
'net_devices': {
'branch_data': branch_data
},
'monitor': {
'branch_data': branch_data
}
}
# append default object
if form_name in extra_context:
context.update(extra_context[form_name])
return render(
request,
temp_name,
context
) | d69d0f2f45351077253b750fe0e197951c1fe853 | 3,631,492 |
def decrypt_default_password(message):
"""
You Can Use this for internal data (Aka Non-User controlled data) that needs to be encrypted.
:param message:
:return:
"""
if type(message) == bytes:
f = Fernet(getkey(Settings.ENCRYPTION_PASSWORD))
decrypted = f.decrypt(message)
return decrypted
else:
raise TypeError("Passed a string object to a function accepting only bytes objects as input.") | 2d9b087db80a0645bae88564373642a900e5ea28 | 3,631,493 |
def getfeed(user):
"""Test post
:user: Stuff from the interwebs
:returns: stuff to the interwebs
"""
response = jsonify({'result_count': 'This is ' + user + '\'s feed!'})
response.headers.add('Access-Control-Allow-Origin', '*')
return response | 687e9d723f8a2037aeafbf457411134bab5f54d2 | 3,631,494 |
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
# if not title:
# if normalize:
# title = 'Normalized confusion matrix'
# else:
# title = 'Confusion matrix, without normalization'
# Compute confusion matrix
plt.rcParams.update({'font.size': 25, 'axes.labelsize': 25,'xtick.labelsize':25,'ytick.labelsize':25})
cm = confusion_matrix(y_true, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig, ax = plt.subplots(figsize=(12,10))
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), ha="right",
rotation_mode="anchor", rotation=45)
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax | c18270860e090810a1a7d7aabbd1d2f2957609a2 | 3,631,495 |
def variables_and_orphans(i, o):
"""
Extract list of variables between i and o nodes via
dfs traversal and chooses the orphans among them
Parameters
----------
i : list
Input variables.
o : list
Output variables.
"""
def expand(r):
if r.owner and r not in i:
l = list(r.owner.inputs) + list(r.owner.outputs)
l.reverse()
return l
variables = stack_search(deque(o), expand, "dfs")
orphans = [r for r in variables if r.owner is None and r not in i]
return variables, orphans | 34ec8ba9b92442462c15d00e0401a71b12de6551 | 3,631,496 |
def get_collected_quotas():
"""获取我的收藏"""
form = PaginationForm().validate_for_api()
per_page = current_app.config['COUNT_DEFAULT']
current_uer = get_current_user()
paginate = paginate_data(current_uer.not_deleted_collected_quotas, form.page.data, per_page)
return jsonify(paginate) | 802ed5593163fbc5c4bffd30d1a8dc2685709438 | 3,631,497 |
from typing import List
def main(
gcal_calendar: str,
google_secret: str,
oauth_port: int,
tw_tags: List[str],
tw_project: str,
resolution_strategy: str,
verbose: int,
combination_name: str,
custom_combination_savename: str,
do_list_combinations: bool,
):
"""Synchronize calendars from your Google Calendar with filters from Taskwarrior.
The list of TW tasks is determined by a combination of TW tags and a TW project while the
calendar in GCal should be provided by their name. if it doesn't exist it will be crated
"""
# setup logger ----------------------------------------------------------------------------
loguru_tqdm_sink(verbosity=verbose)
log_to_syslog(name="tw_gcal_sync")
logger.debug("Initialising...")
inform_about_config = False
if do_list_combinations:
list_named_combinations(config_fname="tw_gcal_configs")
return 0
# cli validation --------------------------------------------------------------------------
check_optional_mutually_exclusive(combination_name, custom_combination_savename)
combination_of_tw_project_tags_and_gcal_calendar = any(
[
tw_project,
tw_tags,
gcal_calendar,
]
)
check_optional_mutually_exclusive(
combination_name, combination_of_tw_project_tags_and_gcal_calendar
)
# existing combination name is provided ---------------------------------------------------
if combination_name is not None:
app_config = fetch_app_configuration(
config_fname="tw_gcal_configs", combination=combination_name
)
tw_tags = app_config["tw_tags"]
tw_project = app_config["tw_project"]
gcal_calendar = app_config["gcal_calendar"]
# combination manually specified ----------------------------------------------------------
else:
inform_about_config = True
combination_name = cache_or_reuse_cached_combination(
config_args={
"gcal_calendar": gcal_calendar,
"tw_project": tw_project,
"tw_tags": tw_tags,
},
config_fname="tw_gcal_configs",
custom_combination_savename=custom_combination_savename,
)
# at least one of tw_tags, tw_project should be set ---------------------------------------
if not tw_tags and not tw_project:
raise RuntimeError(
"You have to provide at least one valid tag or a valid project ID to use for"
" the synchronization"
)
# announce configuration ------------------------------------------------------------------
logger.info(
format_dict(
header="Configuration",
items={
"TW Tags": tw_tags,
"TW Project": tw_project,
"Google Calendar": gcal_calendar,
},
prefix="\n\n",
suffix="\n",
)
)
# initialize sides ------------------------------------------------------------------------
tw_side = TaskWarriorSide(tags=tw_tags, project=tw_project)
gcal_side = GCalSide(
calendar_summary=gcal_calendar, oauth_port=oauth_port, client_secret=google_secret
)
# sync ------------------------------------------------------------------------------------
try:
with Aggregator(
side_A=gcal_side,
side_B=tw_side,
converter_B_to_A=convert_tw_to_gcal,
converter_A_to_B=convert_gcal_to_tw,
resolution_strategy=get_resolution_strategy(
resolution_strategy, side_A_type=type(gcal_side), side_B_type=type(tw_side)
),
config_fname=combination_name,
ignore_keys=(
(),
("due", "end", "entry", "modified", "urgency"),
),
) as aggregator:
aggregator.sync()
except KeyboardInterrupt:
logger.error("Exiting...")
return 1
except:
report_toplevel_exception(is_verbose=verbose >= 1)
return 1
if inform_about_config:
inform_about_combination_name_usage(combination_name)
return 0 | 20779ca8560fa84097cd4fc553442fa7a64ab132 | 3,631,498 |
import requests
def image_from_url(url):
"""
Download image from url
:param url: url of image
:return: image Pillow object
"""
response = requests.get(url)
return Image.open(BytesIO(response.content)) | d666ba001045eb7bb34d3c37930c97739d9c01d6 | 3,631,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.