content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import requests
import json
def get_table_count(url, table_id):
"""
Count the number of rowns in a ActivityTable
:param url:
:param table_id: The ActivityTable ID to update count from and return
:return: count : count of rows from ActivityTable
"""
token = ActivitySites.objects.get(site_id=1)
if token.activity_tables_token:
headers = {'content-type': 'application/json',
'Authorization': 'Token ' + token.activity_tables_token}
else:
headers = {'content-type': 'application/json'}
print("Token Not Found")
response = requests.get(url, headers=headers, verify=True)
data = json.loads(response.content)
count = None
try:
count = data['data_count']
ActivityTable.objects.filter(table_id=table_id)\
.update(unique_count=count)
except KeyError:
pass
return count
|
d7243c202317f0302fb2515f09aa096f0c275619
| 3,639,100
|
def get_audio_mfcc_features(txt_files, wav_files, n_input, n_context, word_num_map, txt_labels=None):
"""
提取音频数据的MFCC特征
:param txt_files:
:param wav_files:
:param n_input:
:param n_context:
:param word_num_map:
:param txt_labels:
:return:
"""
audio_features = []
audio_features_len = []
text_vector = []
text_vector_len = []
if txt_files != None:
txt_labels = txt_files
for txt_obj, wav_file in zip(txt_labels, wav_files):
# 载入音频数据并转化为特征值
audio_data = audiofile_to_input_vector(wav_file, n_input, n_context)
audio_data = audio_data.astype('float32')
audio_features.append(audio_data)
audio_features_len.append(np.int32(len(audio_data)))
# 载入音频对应的文本
target = []
if txt_files != None: # txt_obj是文件
target = trans_text_ch_to_vector(txt_obj, word_num_map)
else:
target = trans_text_ch_to_vector(None, word_num_map, txt_obj) # txt_obj是labels
# target = text_to_char_array(target)
text_vector.append(target)
text_vector_len.append(len(target))
audio_features = np.asarray(audio_features)
audio_features_len = np.asarray(audio_features_len)
text_vector = np.asarray(text_vector)
text_vector_len = np.asarray(text_vector_len)
return audio_features, audio_features_len, text_vector, text_vector_len
|
bed03fb10944d00e27af400776a8efc894770e46
| 3,639,101
|
def getOffsetsFromPixelFractions(col, row):
"""
Determine just the fractional part (the intra-pixel part) of the col,row position.
For example, if (col, row) = (123.4, 987.6), then
(colFrac, rowFrac) = (.4, .6).
Function then returns the offset necessary for addressing the interleaved PRF array.
to ensure you get the location appropriate for your sub-pixel values.
Inputs
------
col
(float) Column position
row
(float) Row position.
Returns
------
(colFrac, rowFrac)
(int, int) offset necessary for addressing the interleaved PRF array.
"""
gridSize = 9
colFrac = np.remainder(float(col), 1)
rowFrac = np.remainder(float(row), 1)
colOffset = gridSize - np.round(gridSize * colFrac) - 1
rowOffset = gridSize - np.round(gridSize * rowFrac) - 1
return int(colOffset), int(rowOffset)
|
4f5945f4e3e6e2dd71056b615dc3571f6ece42c6
| 3,639,102
|
def all_index(request):
"""
Inventory Index View
"""
# build changelist
item_changelist = HTSChangeList(request, Item,
list_filter=[],
search_fields=[],
list_per_page=200,
model_admin=ItemAdmin(Item, None)
)
context_dict = {
'item_changelist': item_changelist,
'page_name': 'Inventory Index'
}
context_dict.update(INVENTORY_CONTEXT_DEFAULTS)
return render(request,
'inventory/inventory_all_index.html',
context_dict)
|
7fdd0b5f278b55767a7918e2977315312e823e93
| 3,639,103
|
def calcSeason(ra, time):
"""Calculate the 'season' in the survey for a series of ra/dec/time values of an observation.
Based only on the RA of the point on the sky, it calculates the 'season' based on when this
point would be overhead. To convert to an integer season label, take np.floor of the returned
float season values.
Note that seasons should be calculated for a fixed point on the sky, not for each pointing that
overlaps a point on the sky. For example, bad things might happen if you compute the season
for observations that overlap RA=0, but were centered on RA=359.
Parameters
----------
ra : float
The RA (in degrees) of the point on the sky
time : np.ndarray
The times of the observations, in MJD
Returns
-------
np.ndarray
The season values
"""
# A reference RA and equinox to anchor ra/season calculation - RA = 0 is overhead at this (local) time.
# This time was chosen as it is close to the expected start of the survey.
# Generally speaking, this is the equinox (RA=0 is overhead at midnight)
Equinox = 60208.00106863426
# convert ra into 'days'
dayRA = ra / 360 * 365.25
firstSeasonBegan = Equinox + dayRA - 0.5 * 365.25
seasons = (time - firstSeasonBegan) / 365.25
# Set first season to 0
seasons = seasons - np.floor(np.min(seasons))
return seasons
# The value for the equinox above was calculated as follows:
#from astropy.time import Time
#from astropy.coordinates import EarthLocation
#loc = EarthLocation.of_site('Cerro Pachon')
#t = Time('2023-09-21T00:01:32.33', format='isot', scale='utc', location=loc)
#print(t.sidereal_time('apparent') - loc.lon, t.utc.mjd)
|
1309a302fac9d01d7b5567d5722bf8f04dc9b88e
| 3,639,104
|
def set_node_event_info(info: NodeEventInfo) -> Item:
"""Encaches an item.
:param info: Node event information.
:returns: Item to be cached.
"""
if info.event_type in (
EventType.MONIT_CONSENSUS_FINALITY_SIGNATURE,
EventType.MONIT_BLOCK_FINALIZED,
EventType.MONIT_BLOCK_ADDED,
):
names = [
info.block_hash,
]
elif info.event_type == EventType.MONIT_DEPLOY_PROCESSED:
names = [
info.block_hash,
info.deploy_hash,
]
else:
names=[]
return Item(
item_key=ItemKey(
paths=[
info.network,
COL_EVENT,
info.event_type.name[6:],
],
names=names,
),
data=info,
expiration=EXPIRATION_COL_EVENT
)
|
9ee50e73b1c50172ada1b6040b675cbda5aede44
| 3,639,105
|
def check_hashtarget(bible_hash, target):
""" tests if the biblepay hash is valid for the hashtarget, means that is it lower.
True = is lower and all is fine """
rs = False
try:
rs = int(bible_hash, 16) < int(target, 16)
except:
pass
return rs
|
a0041d8834b2a0af0a08c2562ffed599925ed5a8
| 3,639,106
|
def assert_and_infer_cfg_fl(cfg_fl, args, make_immutable=True, train_mode=True):
"""
Calls /semantic-segmentation/config.assert_and_infer_cfg and adds additional assertions
"""
if args.manual_client_setup:
cfg_fl.CLIENT.MANUAL = args.manual_client_setup
if cfg_fl.CLIENT.MANUAL:
print('-------------------------')
print('> Clients manual settings')
print('-------------------------')
for i in cfg_fl.CLIENT.POPULATION:
print(i)
if args.replicate:
cfg_fl.REPLICATE = args.replicate
if args.seed:
cfg_fl.SEED = args.seed
cfg_fl.TORCH_SEED = args.seed
if args.task:
cfg_fl.TASK = args.task
if args.dataset:
cfg_fl.DATASET.DATASET_NAME = args.dataset
if args.clients_per_dist:
cfg_fl.FEDERATION.CLIENTS_PER_DIST = args.clients_per_dist
if cfg_fl.FEDERATION.CLIENTS_PER_DIST is not None and cfg_fl.FEDERATION.NUM_CLIENTS is None:
cfg_fl.FEDERATION.NUM_CLIENTS = cfg_fl.FEDERATION.CLIENTS_PER_DIST * cfg_fl.FEDERATION.NUM_DISTRIBUTIONS
if args.num_clients:
cfg_fl.FEDERATION.NUM_CLIENTS = args.num_clients
if args.print_logx:
cfg_fl.LOGX_STDOUT = True
if args.num_distributions:
cfg_fl.FEDERATION.NUM_DISTRIBUTIONS = args.num_distributions
assertion_num_clients = "Either 'clients_per_dist' or 'num_clients' needs to be specified"
assert cfg_fl.FEDERATION.CLIENTS_PER_DIST or cfg_fl.FEDERATION.NUM_CLIENTS, assertion_num_clients
# if args.dist_type:
# cfg.FEDERATION.DIST_TYPE = args.dist_type
if args.clustering_method:
cfg.FEDERATION.CLUSTERING_METHOD = args.clustering_method
if args.federation_method:
assert args.federation_method in ['fomo', 'embeddings', 'local', 'fedavg']
cfg_fl.FEDERATION.METHOD = args.federation_method
if args.federation_method == 'fedavg':
cfg_fl.FEDERATION.FED_AVERAGING = True
if args.random_distributions:
cfg_fl.FEDERATION.RANDOM_DISTS = args.random_distributions # True
if args.federated_averaging:
cfg_fl.FEDERATION.FED_AVERAGING = True
cfg_fl.FEDERATION.METHOD = 'fedavg'
if args.local_train_val_size:
cfg_fl.FEDERATION.LOCAL_TRAIN_VAL_SIZE = args.local_train_val_size
if args.federation_epoch:
cfg_fl.FEDERATION.EPOCH = args.federation_epoch
if args.num_update_clients:
cfg_fl.CLIENT_WEIGHT.NUM_UPDATE_CLIENTS = args.num_update_clients
if args.model_weight_delta:
cfg_fl.CLIENT_WEIGHT.WEIGHT_DELTA = args.model_weight_delta
if args.explicit_weight_delta:
cfg_fl.CLIENT_WEIGHT.WEIGHT_DELTA = args.explicit_weight_delta
cfg_fl.CLIENT_WEIGHT.LEAVE_ONE_OUT = False
if args.client_weight_epsilon:
cfg_fl.CLIENT_WEIGHT.EPSILON = args.client_weight_epsilon
if args.client_weight_epsilon_decay:
cfg_fl.CLIENT_WEIGHT.EPSILON_DECAY = args.client_weight_epsilon_decay
if args.client_weight_method:
cfg_fl.CLIENT_WEIGHT.METHOD = args.client_weight_method
if args.update_positive_delta_only:
cfg_fl.MODEL_WEIGHT.UPDATE_POSITIVE_ONLY = args.update_positive_delta_only
if args.leave_one_out:
cfg_fl.CLIENT_WEIGHT.LEAVE_ONE_OUT = args.leave_one_out
if args.baseline_model:
cfg_fl.CLIENT_WEIGHT.BASELINE = args.baseline_model
if args.train_split:
cfg_fl.CLIENT.TRAIN_SPLIT = args.train_split
cfg_fl.CLIENT.VAL_SPLIT = 1 - args.train_split
if args.dataset == 'cifar100':
args.num_classes = 100
elif args.dataset == 'cifar10':
args.num_classes = 10
elif args.dataset == 'mnist':
args.num_classes = 10
return cfg_fl
|
b779e0a5f06d06b9ebc542f3cd7c190efb70bca5
| 3,639,107
|
def replace_service(name,
metadata,
spec,
source,
template,
old_service,
saltenv,
namespace="default",
**kwargs):
"""
Replaces an existing service with a new one defined by name and namespace,
having the specificed metadata and spec.
"""
body = __create_object_body(
kind="Service",
obj_class=kubernetes.client.V1Service,
spec_creator=__dict_to_service_spec,
name=name,
namespace=namespace,
metadata=metadata,
spec=spec,
source=source,
template=template,
saltenv=saltenv,
)
# Some attributes have to be preserved
# otherwise exceptions will be thrown
body.spec.cluster_ip = old_service["spec"]["cluster_ip"]
body.metadata.resource_version = old_service["metadata"][
"resource_version"]
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.replace_namespaced_service(
name, namespace, body)
return api_response.to_dict()
except (ApiException, HTTPError) as exc:
if isinstance(exc, ApiException) and exc.status == 404:
return None
else:
log.exception("Exception when calling "
"CoreV1Api->replace_namespaced_service")
raise CommandExecutionError(exc)
finally:
_cleanup(**cfg)
|
e363ed9d9233ff6455963edba5bfa8109f6c7260
| 3,639,108
|
from typing import Optional
from typing import Dict
from typing import Any
from typing import List
from datetime import datetime
import logging
def timesketch_add_manual_event(
data: Text, timestamp: Optional[int] = 0,
date_string: Optional[Text] = '',
timestamp_desc: Optional[Text] = '',
attributes: Optional[Dict[str, Any]] = None,
tags: Optional[List[str]] = None) -> Dict[str, str]:
"""Add a manually generated event to the sketch.
Args:
data (str): The message string for for the event to be generated.
timestamp (int): Optional timestamp in either seconds since Epoch or
microseconds since Epoch.
date_string (str): An optional date time as a human readable string. If
neither date_string nor timestamp is provided then the current timestamp
will be used as the time of the event.
timestamp_desc (str): Optional timestamp description field.
attributes (dict): Optional dict which contains extra attributes to add
to the manual event.
tags (list): Optional list of tags to add to the manual event.
Returns:
Dictionary with query results.
"""
connect()
state_obj = state.state()
sketch = state_obj.get_from_cache('timesketch_sketch')
if not sketch:
print('Not able to connect to a sketch.')
return {}
# Default timestamp.
date_obj = datetime.datetime.now(datetime.timezone.utc)
date = date_obj.isoformat()
if timestamp:
try:
date_obj = datetime.datetime.fromtimestamp(
timestamp, datetime.timezone.utc)
except ValueError:
date_obj = datetime.datetime.fromtimestamp(
timestamp / 1e6, datetime.timezone.utc)
date = date_obj.isoformat()
elif date_string:
elements = time_elements.TimeElements()
if 'T' in date_string:
try:
elements.CopyFromStringISO8601(date_string)
except ValueError:
logging.error(
'Unable to convert date string, is it really in ISO 8601 format?')
return {}
try:
elements.CopyFromString(date_string)
except ValueError:
try:
elements.CopyFromStringRFC1123(date_string)
except ValueError:
logging.error(
'Unable to convert date string, needs to be in ISO 8601, 1123 or '
'in the format YYYY-MM-DD hh:mm:ss.######[+-]##:##')
return {}
date = elements.CopyToDateTimeStringISO8601()
if not timestamp_desc:
timestamp_desc = 'Event Logged'
if not isinstance(tags, (tuple, list)):
tags = []
if not isinstance(attributes, dict):
attributes = {}
if not date:
logging.error('Unable to convert date string, please check it.')
return {}
return sketch.add_event(
data, date, timestamp_desc, attributes=attributes, tags=tags)
|
c84f04bbd3a9344c5797e6d79be141b05f6edae0
| 3,639,109
|
def filter_vcf_by_sex(vcf_file, data):
"""Post-filter a single sample VCF, handling sex chromosomes.
Handles sex chromosomes and mitochondrial. Does not try to resolve called
hets into potential homozygotes when converting diploid to haploid.
Skips filtering on pooled samples, we still need to implement.
"""
if len(vcfutils.get_samples(vcf_file)) > 1:
return vcf_file
_, sexes = _configured_ploidy_sex([data])
sex = sexes.pop()
out_file = "%s-ploidyfix%s" % utils.splitext_plus(vcf_file)
if not utils.file_exists(out_file):
orig_out_file = out_file
out_file = orig_out_file.replace(".vcf.gz", ".vcf")
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
with utils.open_gzipsafe(vcf_file) as in_handle:
for line in in_handle:
if line.startswith("#"):
out_handle.write(line)
else:
line = _fix_line_ploidy(line, sex)
if line:
out_handle.write(line)
if orig_out_file.endswith(".gz"):
out_file = vcfutils.bgzip_and_index(out_file, data["config"])
return out_file
|
6eb6528ce4deb86b8c8ecd8746143cb0f6c82fde
| 3,639,110
|
def gen_spacer(spacer_char="-", nl=2):
"""
Returns a spacer string with 60 of designated character, "-" is default
It will generate two lines of 60 characters
"""
spacer = ""
for i in range(nl):
spacer += spacer_char * 60
spacer += "\n"
return spacer
|
7434f191dafdf500c2fc3e67373afc664e543ce0
| 3,639,111
|
def repo_config_factory(repo_type, repo_id, repo_label, **kwargs):
"""
Constructs a repository configuration in form of a
TTL structure utilizing the TTL templates from
./repo_types_template.
"""
# Check if the repo_type is a known template
if repo_type not in REPO_TYPES:
raise RepositoryTypeUnknown
# Get the path to the template
template_path = TEMPLATE_FOLDER / '{}{}'.format(repo_type, '.ttl')
# Open the template file and read it
with open(template_path) as template_file:
template = template_file.read()
# get the default values for the template
params = DEFAULTS
# Overwrite them with the given kwargs
params.update(kwargs)
# Fill the params in the template
ttl = template.format(repo_id=repo_id.replace('-', '_'), repo_label=repo_label, **params)
# return the final TTL
return ttl
|
3840d698691f226d56d25233c3fc00db23abd5d9
| 3,639,112
|
def oil_isothermal_density(rho: NDArrayOrFloat, p: NDArrayOrFloat) -> NDArrayOrFloat:
"""Calculates the oil density for a given pressure at 15.6 degC
B&W 1992 Equation 18
Args:
rho: The oil reference density (g/cc) at 15.6 degC
can be compensated for disovled gases by running `oil_rho_sat` first.
p: Pressure (MPa)
Returns:
The oil density (g/cc) at pressure p
"""
return (
rho
+ (0.00277 * p - 1.71e-7 * np.power(p, 3)) * np.power(rho - 1.15, 2)
+ 3.49e-4 * p
)
|
f8184f4820b5a19525b47f357b92ea7059e2bd74
| 3,639,113
|
def get_waveform_dataset(path):
"""Loads the waveform dataset from a given path.
Args:
path: The path to the .npz file containing the waveform data set.
Returns:
An array of waveform chunks loaded from the given path.
"""
dataset = np.load(path)['arr_0']
return dataset
|
3d8e13cddd7abdb3bc459b68761e4a6385208c77
| 3,639,114
|
import logging
def logger(filename: str, name: str) -> logging.Logger:
"""configure task logger
"""
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(filename)
formatter = logging.Formatter(
'%(asctime)s %(name)s %(levelname)s: %(message)s')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
|
60200abbd7a97204bf143694058ba87ff1ea7a2a
| 3,639,115
|
def verify_signature(pubkey_path, message, signature):
"""
Use Crypto.Signature.PKCS1_v1_5 to verify the signature on a message.
Returns True for valid signature.
"""
log.debug("salt.crypt.verify_signature: Loading public key")
pubkey = get_rsa_pub_key(pubkey_path)
log.debug("salt.crypt.verify_signature: Verifying signature")
if HAS_M2:
md = EVP.MessageDigest("sha1")
md.update(salt.utils.stringutils.to_bytes(message))
digest = md.final()
return pubkey.verify(digest, signature)
else:
verifier = PKCS1_v1_5.new(pubkey)
return verifier.verify(
SHA.new(salt.utils.stringutils.to_bytes(message)), signature
)
|
c0f6d9b36fd00eb7a17c656546bd685ccee97609
| 3,639,116
|
def untranslate_module_name(module):
"""Rename module names mention in JSON to names that we can import
This reverses the translation applied by translate_module_name() to
a module name available to the current version of Python.
"""
if PY3:
# remap `__builtin__` and `exceptions` to the `builtins` module
if module == '__builtin__':
module = 'builtins'
elif module == 'exceptions':
module = 'builtins'
return module
|
fae87c9fb852ff1b6b82e4ebccf9c058fb4a313f
| 3,639,117
|
def RGBRamp(size=256, upperValue=.6666666666666667):
"""Generate an RGB color ramp, values range from 0.0 to 1.0"""
assert size > 0
hsv = HSVRamp(size, upperValue)
rgb = Numeric.zeros( (hsv.shape[0], 3), viewerConst.FPRECISION )
for i in xrange(hsv.shape[0]):
rgb[i] = ToRGB(hsv[i])
return rgb
|
10be72b654ac9e36610bc4c08fd05edbba45de8a
| 3,639,118
|
def find_poly_ras_intersect(shape, raster_dir, extension='.tif'):
""" Finds all the tiles falling within raster object
the get shape geometry should be seperated from the intesect check,
currently causes a exit code 139 on unix box
:param polygon:
:param extension:
:param raster_dir:
"""
print 'starting shape: {}'.format(shape)
# get vector geometry
if not os.path.isfile(shape):
raise NotImplementedError('Shapefile not found')
polygon = ogr.Open(shape)
layer = polygon.GetLayer()
feature = layer.GetFeature(0)
vector_geo = feature.GetGeometryRef()
# print 'vector geometry: {}'.format(vector_geo)
tiles = [os.path.join(raster_dir, x) for x in
os.listdir(os.path.join(raster_dir)) if x.endswith(extension)]
raster_list = []
for tile in tiles:
print tile, srt.tif_proj4_spatial_reference(tile)
if srt.check_same_reference_system(shape, tile):
raster_geo = get_polygon_from_raster(tile)
if raster_geo.Intersect(vector_geo):
print 'tile: {} intersects {}'.format(os.path.basename(tile), os.path.basename(shape))
raster_list.append(tile)
return raster_list
|
8f7ae23a2c442ff5b61bde46d8b42ac4c2c8eade
| 3,639,119
|
from typing import Iterable
import requests
def Session(
retries: int = 10,
backoff_factor: float = 0.3,
allowed_methods: Iterable[str] = ('HEAD', 'TRACE', 'GET', 'POST', 'PUT', 'OPTIONS', 'DELETE'),
status_forcelist: Iterable[int] = (408, 429, 500, 502, 503, 504),
) -> requests.Session:
"""Return a Session object with full retry capabilities.
Args:
retries (int): number of retries
backoff_factor (float): speed factor for retries (in seconds)
allowed_methods (iterable): http methods to retry on
status_forcelist (iterable): http status codes to retry on
Returns:
:py:class:`requests.Session`: session object
"""
session = requests.Session()
retry = Retry(
total=retries,
connect=retries,
read=retries,
redirect=retries,
# status=retries,
allowed_methods=allowed_methods,
status_forcelist=status_forcelist,
backoff_factor=backoff_factor,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
|
ca7d5f4d3f34e24c67eae47c01a6bd63796b03be
| 3,639,120
|
def tp53():
"""Create a TP53 gene fixture."""
params = {
'label': 'tumor protein p53',
'concept_id': 'hgnc:11998',
'symbol': 'TP53',
'location_annotations': [],
'strand': None,
'locations': [
{
'_id': 'ga4gh:VCL._Cl_XG2bfBUVG6uwi-jHtCHavOAyfPXN',
'chr': '17',
'interval': {
'end': 'p13.1',
'start': 'p13.1',
'type': 'CytobandInterval'
},
'species_id': 'taxonomy:9606',
'type': 'ChromosomeLocation'
}
],
'previous_symbols': [],
'aliases': [
'p53',
'LFS1'
],
'symbol_status': 'approved',
'associated_with': [
'vega:OTTHUMG00000162125',
'refseq:NM_000546',
'cosmic:TP53',
'omim:191170',
'ucsc:uc060aur.1',
'uniprot:P04637',
'orphanet:120204',
'ccds:CCDS73968',
'ccds:CCDS73971',
'ccds:CCDS73970',
'ccds:CCDS73969',
'ccds:CCDS73967',
'ccds:CCDS73966',
'ccds:CCDS73965',
'ccds:CCDS73964',
'ccds:CCDS73963',
'ccds:CCDS11118',
'ccds:CCDS45605',
'ccds:CCDS45606',
'ena.embl:AF307851',
'pubmed:6396087',
'pubmed:3456488',
'pubmed:2047879'
],
'xrefs': [
'ensembl:ENSG00000141510',
'ncbigene:7157'
]
}
return Gene(**params)
|
d1c41af9dce6b5eee3aa475c207a669529001b7d
| 3,639,121
|
def factorOrder(factors, varOrder):
"""Return an order of factors for sampling given a variable order for sampling"""
pri = [0 for x in varOrder]
for i,x in enumerate(varOrder): # first, find position of each var in sampling order
pri[x]=i
factorOrder = [ Factor() for x in varOrder ] # fill order with blanks initially
for f in factors:
f_pri = max([pri[x] for x in f.vars]) # get last-sampled variable for this factor
if factorOrder[f_pri].nvar == 0:
factorOrder[f_pri] = f # if first factor for this variable, save it
else: # o.w. take one with the lowest conditional entropy:
if ent[f_pri] < 0: # (compute previous' if necessary)
ent[f_pri] = factorOrder[f_pri].entropy() - factorOrder[f_pri].sum([f_pri]).entropy()
ent_new = f.entropy() - f.sum([f_pri]).entropy() # (and this factor's)
if ent_new < ent[f_pri]: # (keep whichever is lower)
factorOrder[f_pri] = f
ent[f_pri] = ent_new
return factorOrder
|
98ec337ab126d77b854be28f937eef392b9c8144
| 3,639,122
|
def boundary_nodes(graph, nodes):
# TODO: move to utils
#TODO: use networkx boundary nodes directly: does the same thing
""" returns nodes at boundary of G based on edge_boundary from networkx """
graph = unwrap_graph(graph)
nodes = list(nodes)
nbunch = list(unwrap_nodes(nodes))
# find boundary
b_edges = nx.edge_boundary(graph, nbunch) # boundary edges
internal_nodes = [s for (s, t) in b_edges]
assert(all(n in nbunch for n in internal_nodes)) # check internal
return wrap_nodes(graph, internal_nodes)
|
e498b74ce3d36c3fc7f4ef0913fa470e2cfa12bc
| 3,639,123
|
def home(request):
"""
rendering ui by template for homepage
this view never cache for delivering correct translation inside template
"""
template = loader.get_template('weather/home.html')
return HttpResponse(template.render({}, request))
|
b2fdf6facd633441da9d11a53a781e9e418b42de
| 3,639,124
|
def plot_histogram(df, path, col_x, ax=None, size=None, save=True, suffix=None,
show=False, **kwargs):
"""Geneate a histogram plot.
Args:
df (:class:`pandas.DataFrame`): Data frame to plot.
path (str): Path to data frame to use if ``df`` is None, also used
as the basis for output path.
col_x (str): Name of column with values to plot.
ax (:class:`matplotlib.axes.Axes`): Matplotlib axes; defaults to
None to generate a new figure with axes.
size (Sequence[float]): Sequence of ``width, height`` to size the
figure; defaults to None.
save (bool): True to save the plot; defaults to True.
suffix: String to append to output path before extension;
defaults to None to ignore.
show: True to display the image; otherwise, the figure will only
be saved to file, if :attr:``config.savefig`` is set.
Defaults to True.
kwargs (Any): Extra arguments to :meth:`decorate_plot`.
Returns:
:class:`matplotlib.axes.Axes`: Matplotlib axes.
"""
# load data frame from CSV unless already given and set up figure
if df is None:
df = pd.read_csv(path)
if ax is None:
fig, gs = plot_support.setup_fig(1, 1, size)
ax = plt.subplot(gs[0, 0])
# generate histogram
n, bins, patches = ax.hist(df[col_x])
decorate_plot(ax, **kwargs)
# save and display plot if indicated
if save:
out_path = libmag.make_out_path(path, suffix=suffix)
plot_support.save_fig(out_path, config.savefig)
if show: plt.show()
return ax
|
ec97358a9b7f8c3d20dd7a15d77b588fc2bffbe0
| 3,639,125
|
def __check_interface_state(duthost, interface, state='up'):
"""
Check interface status
Args:
duthost: DUT host object
interface: Interface of DUT
state: state of DUT's interface
Returns:
Bool value which confirm port state
"""
ports_down = duthost.interface_facts(up_ports=[interface])['ansible_facts']['ansible_interface_link_down_ports']
if 'down' in state:
return interface in ports_down
return interface not in ports_down
|
bc17d489064e9a81ec77dad5ab3682c9a96fa88d
| 3,639,126
|
def find_dateTime_in_html(text):
"""
find dateTime in html
"""
r = findall('<time dateTime="(.*?)">', text)
if r:
return r
return []
|
0ba36b69a52f421e303da4c10b70362d6d724c96
| 3,639,127
|
import torch
def get_number_of_voxels_per_class(labels: torch.Tensor) -> torch.Tensor:
"""
Computes the number of voxels for each class in a one-hot label map.
:param labels: one-hot label map in shape Batches x Classes x Z x Y x X or Classes x Z x Y x X
:return: A tensor of shape [Batches x Classes] containing the number of non-zero voxels along Z, Y, X
"""
if not len(labels.shape) in [5, 4]:
raise Exception("labels must have either 4 (Classes x Z x Y x X) "
"or 5 dimensions (Batches x Classes x Z x Y x X), found:{}"
.format(len(labels.shape)))
if len(labels.shape) == 4:
labels = labels[None, ...]
return torch.count_nonzero(labels, dim=(2, 3, 4))
|
568a91639a42cf3cd3debe365c5a963512d95dfc
| 3,639,128
|
def get_columns_width(user_width):
"""define width of the report columns"""
default_width = [30, 7, 60]
if not user_width:
return default_width
try:
return [7 if user_width[i] < 7 else user_width[i] for i in range(3)]
except (TypeError, IndexError):
_LOGGER.error(
"Invalid configuration for table column widths, default values" " used %s",
default_width,
)
return default_width
|
96901c79ac7ba2cf6d5dc56fe26d63e81a2437d4
| 3,639,129
|
def tx_failure():
"""
Failed ```tx```.
"""
message = request.args.get('m')
protocol = request.args.get('p')
address = request.args.get('a')
command = request.args.get('c')
repeats = request.args.get('r')
bits = request.args.get('b')
response = make_response(
render_template(
"tx.html",
success=False,
message=message,
protocol=protocol,
address=address,
command=command,
repeats=repeats,
bits=bits
)
)
response.headers.set('Irbox-Success', 'false')
return response
|
f5938cf59207125030502113ce3b541301279b98
| 3,639,130
|
import pydoc
def read_docstring(object_):
"""
Returns object docstring without the FILE information.
"""
fmt = "```\n{}\n```\n"
docs = pydoc.plain(pydoc.render_doc(object_)).split("FILE")[0].rstrip()
return fmt.format(docs)
|
5c21f6eadf400ac9316e3f44d98464536b9b7536
| 3,639,131
|
def _bernoulli_spiral(theta, theta_offset=0., *args, **kwargs):
"""Return Equiangular (Bernoulli's) spiral
Args:
theta: array-like, angles from polar coordinates to be converted
theta_offset: float, angle offset in radians (2*pi = 0)
Kwargs:
exp_scale: growth rate of the exponential
"""
exp_scale = kwargs.pop('exp_scale', 0.1)
x, y = np.exp(exp_scale * theta) * np.cos(theta + theta_offset), np.exp(
exp_scale * theta) * np.sin(theta + theta_offset)
x_norm = np.max(np.abs(x))
y_norm = np.max(np.abs(y))
x, y = x / x_norm, y / y_norm
return x, y
|
3e889bc61ab8e93daefc2feeaad40ae86c167627
| 3,639,132
|
import httpx
import copy
def _redacted_to_curl(request: httpx.Request) -> str:
"""Pass through to curlify2.to_curl that redacts the authorization in the headers
"""
if (auth_header := request.headers.get('authorization')) is None:
return curlify2.to_curl(request)
req_copy = copy.copy(request)
req_copy.headers = copy.deepcopy(request.headers)
if "Bearer" in auth_header:
req_copy.headers['authorization'] = "Bearer [REDACTED]"
else:
req_copy.headers['authorization'] = "[REDACTED]"
return curlify2.to_curl(req_copy)
|
e3a713c3fcf6c875af4cae6ab4c5e696eb0bd432
| 3,639,133
|
def get_norm(norm):
"""
Args:
norm (str or callable):
Returns:
nn.Module or None: the normalization layer
"""
support_norm_type = ['BN', 'SyncBN', 'FrozenBN', 'GN', 'nnSyncBN']
assert norm in support_norm_type, 'Unknown norm type {}, support norm types are {}'.format(
norm, support_norm_type)
if isinstance(norm, str):
if len(norm) == 0:
return None
norm = {
"BN": nn.BatchNorm2d,
"SyncBN": NaiveSyncBatchNorm,
"FrozenBN": FrozenBatchNorm2d,
"GN": groupNorm,
"nnSyncBN": nn.SyncBatchNorm, # keep for debugging
}[norm]
return norm
|
299525099ecb38b171a8bcddd2661f943a1514ec
| 3,639,134
|
def parse_scales_line(line):
"""
Args:
- line:
Returns:
- scales_dict
"""
def advance_past_token(str, token):
return str[str.find(token) + len(token):]
scales_dict = {}
line = advance_past_token(line, 'Scales:')
pair_str = line.split(',')
for pair_str in pair_str:
dname, scale = pair_str.split(':')
scales_dict[dname.strip()] = float(scale)
return scales_dict
|
b16e1f431b878aa6418beaed3f141fe928a229e1
| 3,639,135
|
import collections
def parse_remove_configuration(configuration):
"""
Turns the configuration line of splitting into a name and a set of params.
"""
if configuration is None:
return "None", None
print('conf', configuration)
conf_dict = collections.OrderedDict(configuration)
name = 'remove'
for key in conf_dict.keys():
if key != 'weights' and key != 'boost':
name += '_'
name += key
return name, conf_dict
|
40bf749c2e142cef534f945179b987fd3c7ba6d8
| 3,639,136
|
def _calc_cost_grad_first(data_input, w, label, features):
"""Calculate the partial cost and gradient."""
train_data = read_stage_file(data_input, features + [label])
size_train = train_data.shape[0]
labels = train_data[label].values
train_data = train_data[features].values
if size_train > 0:
dim = train_data.shape[1]
if dim != len(w):
w = np.zeros(dim, dtype=float) # initial
prediction = (labels * np.dot(train_data, w))
# hinge loss (select negative values)
idx = np.nonzero((prediction - 1) < 0)
loss = np.sum(1 - prediction[idx])
# -y * x for all values lesser than 1
grad = - np.dot(labels[idx], train_data[idx])
return [loss, grad, size_train], [labels, train_data]
else:
return [0, 0, size_train], [labels, train_data]
|
d7b62ac39f824f7598cc83a078bc0f5e4e49c4ea
| 3,639,137
|
def subtract_dbm(dbm1: float, dbm2: float):
"""Adds two decibel values"""
watt1 = dbm_to_watt(dbm1)
watt2 = dbm_to_watt(dbm2)
return watt_to_dbm(watt1 - watt2)
|
ea7c6f9372182a6a39d72265428e86b26b4da765
| 3,639,138
|
import os
import subprocess
def _validateConfigFile(configFilePath):
"""
Test a configuration file path to be sure it is usable in the plugin
Uses a binary included in the project to test a given configuration file, and will raise an exception
if something is not valid.
The idea if to fail fast at startup for any configuration file issue.
:param configFilePath: absolute path to a yaml file
:raises ValueError if configFilePath is missing or is not a valid yaml file
"""
if not os.path.isfile(configFilePath):
raise ValueError('missing configuration file')
return subprocess.call(
[constants.LAYER_ALCHEMY_CONFIGTESTER_BIN, '--config', configFilePath, '--quiet']
)
|
bc79a36701a8b97f619245e06a2e190936e3ce64
| 3,639,139
|
def focused_evaluate(board):
"""
Given a board, return a numeric rating of how good
that board is for the current player.
A return value >= 1000 means that the current player has won;
a return value <= -1000 means that the current player has lost
"""
score = board.longest_chain(board.get_current_player_id()) * 10
# Prefer having your pieces in the center of the board.
for row in range(6):
for col in range(7):
if board.get_cell(row, col) == board.get_current_player_id():
score -= abs(3-col)
elif board.get_cell(row, col) == board.get_other_player_id():
score += abs(3-col)
if board.is_game_over():
if int(board.is_win()) == int(board.get_current_player_id()):
score = +1000;
score -= board.num_tokens_on_board()
elif int(board.is_win()) == int(board.get_other_player_id()):
score = -1000
return score
|
b2cbb91cdb048ef41a13532e400173daa05af4b8
| 3,639,140
|
def tanh(x, name=None):
"""
sparse tanh activation, requiring x to be a sparse coo or sparse csr tensor.
.. math::
out = tanh(x)
Parameters:
x (Tensor): The input Sparse Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Sparse Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
dense_x = paddle.to_tensor([-2, 0, 1], dtype='float32')
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.sparse.tanh(sparse_x)
"""
assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode"
if x.is_sparse_coo():
return _C_ops.final_state_sparse_coo_tanh(x)
elif x.is_sparse_csr():
return _C_ops.final_state_sparse_csr_tanh(x)
else:
raise ValueError(
"Currently, sparse.tanh only support the input of SparseCooTensor or SparseCsrTensor"
)
|
24bf0889c2e1ba642442e0d8f6b11eeeaf94bf6c
| 3,639,141
|
import os
import errno
import subprocess
import sys
import json
from datetime import datetime
def record(args, filename):
"""Record a snapshot in a json file, as specified by arguments in args.
Return 0 on success, 1 on failure."""
LOGGER.debug('In subcommand record.')
os.chdir(args.project)
projectpath = os.getcwd()
# parse addons.make into a list of addons
addons_list = []
try:
with open('addons.make', 'r') as addons_make:
for line in addons_make.readlines():
addons_list.append(line.rstrip())
except IOError as exc:
if exc.errno == errno.ENOENT:
LOGGER.debug('No addons.make file found.')
else: # pragma: no cover
raise
if len(addons_list) is 0:
LOGGER.info('No addons found.')
# search config.make for OF location
with open('config.make', 'r') as config_make:
of_path = ''
for line in config_make.readlines():
if 'OF_ROOT =' in line:
of_path = line.split('=', 1)[-1].strip()
break
if len(of_path) == 0:
LOGGER.error('Did not find OF location in config.make in ' +
os.getcwd())
return 1
LOGGER.info('Processing OF at ' + of_path)
os.chdir(of_path)
core_dict = {'path': of_path}
if validate_git_repo() != 0:
LOGGER.error('OF git repo could not be validated successfully.')
return 1
LOGGER.debug('Recording commit SHA')
out = subprocess.check_output(['git', 'rev-parse', 'HEAD'],
universal_newlines=True)
core_dict['sha'] = out.strip()
LOGGER.debug('OF commit SHA: ' + core_dict['sha'])
LOGGER.info('Processing addons')
addons_path = os.path.join(os.getcwd(), 'addons')
os.chdir(addons_path)
# get list of official addons
official_addons = []
with open('.gitignore', 'r') as gitignore_file:
for line in gitignore_file:
if line.startswith('!ofx'):
official_addons.append(line[1:].strip())
# prune official addons (which are in the OF repo already)
# not very efficient (better with sets),
# but irrelevant for the small lists we expect
addons_list = [{'name': x}
for x
in addons_list
if x
not in official_addons]
for addon in addons_list:
LOGGER.info('Processing addon ' + addon['name'])
try:
os.chdir(os.path.join(addons_path, addon['name']))
except Exception as exc:
if exc.errno == errno.ENOENT:
LOGGER.error(addon['name'] + ' does not exist at ' +
addons_path + '.')
sys.exit('Aborting')
else: # pragma: no cover
raise
ret = validate_git_repo(strict=False)
if ret == 0:
out_string = subprocess.check_output(['git', 'rev-parse', 'HEAD'],
universal_newlines=True)
addon['sha'] = out_string.strip()
elif ret == 2:
addon['sha'] = 'non-git'
else:
LOGGER.error(addon['name'] +
' git repo could not be validated successfully.')
return 1
LOGGER.info('Storing metadata')
os.chdir(projectpath)
# Open/initialise metadata file
try:
with open(filename, 'r') as metafile:
json_object = json.load(metafile)
LOGGER.info('loaded data from ' + filename)
LOGGER.debug(json_object)
except IOError as exc:
if exc.errno == errno.ENOENT:
LOGGER.info(filename + ' does not exist yet. Creating..')
open(filename, 'w').close()
# create new skeleton json_object
json_object = json.loads('{ "snapshots": [] }')
else: # pragma: no cover
raise
# Store/update metadata
# check if snapshot entry already exists
for entry in json_object['snapshots']:
if entry['name'] == args.name:
if (args.update is False) and (args.name is not 'latest'):
LOGGER.error(args.name +
': entry with the same name already exists. ' +
'Use -u option to overwrite.')
return 1
json_object['snapshots'].remove(entry)
# write updated entry
temp = {'name': args.name,
'date': datetime.now().isoformat(),
'description': args.description,
'core': core_dict,
'addons': addons_list}
json_object['snapshots'].append(temp)
LOGGER.info('Writing updated data to ' + filename)
with open(filename, 'w') as metafile:
json.dump(json_object, metafile, indent=1, sort_keys=True)
return 0
|
07393a4d7947914ed694b47badbf0aafc7348dc6
| 3,639,142
|
def collector(monkeypatch):
"""
Unit test: base case
"""
col = SunPowerPVSupervisorCollector(use_device_data_timestamp=False)
attrs = [
'connect',
'disconnect',
'info_metrics',
]
mocked = MagicMock()
mocked.connect.return_value = []
mocked.disconnect.return_value = []
mocked.info_metrics.return_value = []
for attr in attrs:
monkeypatch.setattr(col, attr, getattr(mocked, attr))
return col
|
f9e99071b2dde231b4a3fc7c89e00846d26efb12
| 3,639,143
|
def GetParents_old(con, cur, term):
"""
Get all the parents of the term in the ontology tree
input:
con,cur
term : str
The term for which to look for parents
output:
err : str
Error message or empty string if ok
parents : list of str
the parents of term
"""
# termid = dbidval.GetIdFromDescription(con, cur, 'OntologyTable', term)
err, termids = get_term_ids(con, cur, term)
if err:
debug(3, err)
return err, []
# if termid < 0:
if len(termids) == 0:
err, termid = GetSynonymTermId(con, cur, term)
if err:
debug(3, 'ontology term not found for %s' % term)
return 'ontolgy term %s not found' % term, []
debug(2, 'converted synonym to termid')
termids = [termid]
# plist = [termid]
plist = termids
parents = [term]
parents_id_set = set()
while len(plist) > 0:
cid = plist.pop(0)
origid = cid
if cid in parents_id_set:
continue
err, cparentids = GetTreeParentsById(con, cur, cid)
if err:
continue
plist.extend(cparentids)
for cid in cparentids:
err, cparent = dbidval.GetDescriptionFromId(con, cur, 'OntologyTable', cid)
if err:
continue
parents.append(cparent)
parents_id_set.add(origid)
debug(2, 'found %d parents' % len(parents))
return '', parents
|
7e3cfcd821d746fc10e68a9ca94ef6f19a3ba7e3
| 3,639,144
|
def uploadResourceFileUsingSession(url, session, resourceName, fileName, fullPath, scannerId):
"""
upload a file for the resource - e.g. a custom lineage csv file
works with either csv for zip files (.csv|.zip)
returns rc=200 (valid) & other rc's from the post
"""
print(
"uploading file for resource "
+ url
+ " resource="
+ resourceName
)
apiURL = url + "/access/1/catalog/resources/" + resourceName + "/files"
print("\turl=" + apiURL)
# header = {"accept": "*/*", }
params = {"scannerid": scannerId, "filename": fileName, "optionid": "File"}
print("\t" + str(params))
# files = {'file': fullPath}
mimeType = "text/csv"
readMode = "rt"
if fileName.endswith(".zip"):
mimeType = "application/zip"
readMode = "rb"
if fileName.endswith(".dsx"):
mimeType = "text/plain"
file = {"file": (fileName, open(fullPath, readMode), mimeType)}
# file = {"file": (fileName, open(fullPath, readMode), )}
print(f"\t{file}")
# print(f"session header:{session.headers}")
uploadResp = session.post(
apiURL,
data=params,
files=file,
)
print("\tresponse=" + str(uploadResp.status_code))
if uploadResp.status_code == 200:
# valid - return the json
return uploadResp.status_code
else:
# not valid
print("\tupload file failed")
print("\t" + str(uploadResp))
print("\t" + str(uploadResp.text))
return uploadResp.status_code
|
8a4a8c21563f1467db284f2e98dd1b48dbb65a3c
| 3,639,145
|
from typing import Literal
def read_inc_stmt(line: str) -> tuple[Literal["inc"], str] | None:
"""Attempt to read INCLUDE statement"""
inc_match = FRegex.INCLUDE.match(line)
if inc_match is None:
return None
inc_path: str = inc_match.group(1)
return "inc", inc_path
|
64ac4b53363a4aa5b9e2c4cf91b27f169ad0465c
| 3,639,146
|
import platform
import os
def _clear_screen():
""" http://stackoverflow.com/questions/18937058/python-clear-screen-in-shell """
if platform.system() == "Windows":
tmp = os.system('cls') #for window
else:
tmp = os.system('clear') #for Linux
return True
|
2958ef538e95d717d60c577c631ddd91240c48f9
| 3,639,147
|
def sent2vec(s, model):
"""
Transform a sentence to a vector.
Pre: No parameters may be None.
Args:
s: The sentence to transform.
model: A word2vec model.
Returns: A vector, representing the given sentence.
"""
words = word_tokenize(s.lower())
# Stopwords and numbers must be removed, as well as words that are not
# part of the model
M = [model[w] for w in words if w not in stop_words and w.isalpha() and w in model]
M = np.array(M)
if len(M) > 0:
v = M.sum(axis=0)
return v / np.sqrt((v ** 2).sum())
else:
# When the sentence is empty after removing unvalid tokens, the vector
# is equal to the null-vector
return model.get_vector('null')
|
1e61639cc27e3a430257ff3ac4b2a002a42cf177
| 3,639,148
|
def subnet_group_present(
name,
subnet_ids=None,
subnet_names=None,
description=None,
tags=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Ensure ElastiCache subnet group exists.
.. versionadded:: 2015.8.0
name
The name for the ElastiCache subnet group. This value is stored as a lowercase string.
subnet_ids
A list of VPC subnet IDs for the cache subnet group. Exclusive with subnet_names.
subnet_names
A list of VPC subnet names for the cache subnet group. Exclusive with subnet_ids.
description
Subnet group description.
tags
A list of tags.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
exists = __salt__["boto_elasticache.subnet_group_exists"](
name=name, tags=tags, region=region, key=key, keyid=keyid, profile=profile
)
if not exists:
if __opts__["test"]:
ret["comment"] = "Subnet group {} is set to be created.".format(name)
ret["result"] = None
return ret
created = __salt__["boto_elasticache.create_subnet_group"](
name=name,
subnet_ids=subnet_ids,
subnet_names=subnet_names,
description=description,
tags=tags,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not created:
ret["result"] = False
ret["comment"] = "Failed to create {} subnet group.".format(name)
return ret
ret["changes"]["old"] = None
ret["changes"]["new"] = name
ret["comment"] = "Subnet group {} created.".format(name)
return ret
ret["comment"] = "Subnet group present."
return ret
|
d7d441dcfacd92f33b4172e33299df398cfa3ba2
| 3,639,149
|
def GetTensorFlowVersion(vm):
"""Returns the version of tensorflow installed on the vm.
Args:
vm: the target vm on which to check the tensorflow version
Returns:
installed python tensorflow version as a string
"""
stdout, _ = vm.RemoteCommand(
('echo -e "import tensorflow\nprint(tensorflow.__version__)" | {0} python'
.format(GetEnvironmentVars(vm)))
)
return stdout.strip()
|
4380ec75f2b5713ab0ead31189cdd7b3f81c6b9b
| 3,639,150
|
def process_step_collect_parse(project, step, process_result, format_args=None):
"""
Function will parse the file from an output
:type step: structures.project_step.ProjectStep
:type project: structures.project.Project
:type process_result: proc.step.step_shell.ProcessStepResult
"""
logger.debug(f'parsing output artifacts')
if not process_result.output:
logger.warning(f'Empty output received, make sure the field ouutput is set to \n'
f'output: log+stdout \n'
f'in order to capture output of the shell step')
return []
index = 0
output = process_result.output
start = step.collect.parse.start
stop = step.collect.parse.start
ls, le = len(start), len(stop)
length = len(output)
reports = list()
while True:
# no more reports found
s = output.find(start, index, length)
if s == -1:
break
# broken report
e = output.find(stop, s + 1, length)
if e == -1:
logger.debug(f'could not find end of the report file while parsing the output')
break
index = e + le + 1
# try to parse the data or skip it
reports.append(output[s + ls:e].strip())
return reports
|
3f20af272635592bf682f38f29d48e227f631a24
| 3,639,151
|
import json
from typing import OrderedDict
def datetime_column_evrs():
"""hand-crafted EVRS for datetime columns"""
with open(
file_relative_path(__file__, "../fixtures/datetime_column_evrs.json")
) as infile:
return expectationSuiteValidationResultSchema.load(
json.load(infile, object_pairs_hook=OrderedDict)
)
|
c229f08250c51a805a15db653e3e70513a6f6e9a
| 3,639,152
|
from typing import List
from typing import Dict
def chat_header_args(panel_vars: List[PanelVariable], parsed_args: Dict) -> List:
"""Creates a list of tuples containing the passed in arguments from the chat command.
Args:
panel_vars (list(nautobot_plugin_chatops_grafana.models.PanelVariable)): A list of PanelVariable objects.
parsed_args (dict): Dictionary of parsed arguments from argparse.
Returns:
args (List): List of tuples containing the arguments passed into the chat command.
Examples:
>>> print(chat_header_args([PanelVariable(name="test")], {"test": "testing", "timespan": "P12M"}))
[("test", "testing"), ("timespan", "timespan=P12M")]
"""
args = []
# Check the parsed args to see if they match a panel variable. If so, and the
# value isn't the default value, then append it on as a passed in arg.
for panel_var in panel_vars:
arg = parsed_args.get(panel_var.name)
if arg and arg != panel_var.response:
args.append((panel_var.name, arg))
continue
# If we didn't find the parsed arg in the panel variable, look in the default variables.
# Do the same here, if it does not match the default value, append it on as a passed in arg.
for def_param, def_value in handler.default_params.items():
arg = parsed_args.get(def_param)
if arg and def_value != arg and def_param not in [a[0] for a in args]:
args.append((def_param, f"{def_param}={arg}"))
return args
|
645a550d098d71dda9bf21d18b3e98bb5b8f9aa0
| 3,639,153
|
def pd_df_timeseries():
"""Create a pandas dataframe for testing, with timeseries in one column"""
return pd.DataFrame(
{
"time": pd.date_range(start="1/1/2018", periods=100),
"A": np.random.randint(0, 100, size=100),
}
)
|
9b6b217e2a4bc80b5f54cecf56c55d5fb229d288
| 3,639,154
|
from typing import Union
def n_tokens(doc: Union[Doc, Span]):
"""Return number of words in the document."""
return len(doc._._filtered_tokens)
|
4b1f1cbb9cb6baf5cb70d6bd38a88d3e0e54610a
| 3,639,155
|
def getJobs(numJobs=1):
"""
Return a list of dictionary data as provided to the plugin `submit` method
"""
job = {'allowOpportunistic': False,
'bulkid': None,
'cache_dir': TEST_DIR + '/JobCollection_1_0/job_1',
'estimatedDiskUsage': 5000000,
'estimatedJobTime': 28800,
'estimatedMemoryUsage': 6000.0,
'gridid': None,
'id': 1L,
'inputDataset': '/HLTPhysics/Run2017B-PromptReco-v1/AOD',
'inputDatasetLocations': ['T2_CH_CERN_HLT', 'T2_CH_CERN'],
'jobid': 1L,
'location': 'T2_CH_CERN',
'name': '934a7f0d-2934-4939-b366-0a9efe0df15e-0',
'numberOfCores': 8,
'packageDir': TEST_DIR + '/batch_1-0',
'plugin': 'SimpleCondorPlugin',
'possibleSites': [u'T2_CH_CERN', u'T1_US_FNAL'],
'potentialSites': frozenset([u'T1_US_FNAL', u'T2_CH_CERN']),
'proxyPath': None,
'request_name': 'amaltaro_test_submission_180620_105409_2045',
'retry_count': 0L,
'sandbox': TEST_DIR + '/Blah-Sandbox.tar.bz2',
'scramArch': ['slc6_amd64_gcc630'],
'siteName': u'T2_CH_CERN',
'site_cms_name': 'T2_CH_CERN',
'status': None,
'status_time': None,
'swVersion': ['CMSSW_9_4_0'],
'taskPriority': 0L,
'task_id': 383L,
'task_name': '/amaltaro_test_submission_180620_105409_2045/Blah_Task',
'task_type': 'Processing',
'userdn': '/DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=amaltaro/CN=718748/CN=Alan Malta Rodrigues',
'usergroup': 'unknown',
'userrole': 'unknown',
'wf_priority': 420000L}
jobs = []
for i in range(0, numJobs):
job.update({'id': long(i), 'jobid': long(i), 'name': makeUUID()})
jobs.append(deepcopy(job))
return jobs
|
56543a5a6ef66ec7fdf9f3ef26594eafa3f7bb41
| 3,639,156
|
def create_test_user():
"""Creates a new user with random username for testing
If two randomly assigned usernames overlap, it will fail
"""
UserModel = get_user_model()
username = '%s_%s' % ('test', uuid4().get_hex()[:10],)
user = UserModel.objects.create(username=username)
return user
|
d20ecbdb07db886a526402c09d7d14d768329c2b
| 3,639,157
|
def make_logical_or_tests(options):
"""Make a set of tests to do logical_or."""
return _make_logical_tests(tf.logical_or)(options, expected_tf_failures=1)
|
b4c7f5c0d89139938881f7301930651c9a3e7d0a
| 3,639,158
|
def guess(key, values):
"""
Returns guess values for the parameters of this function class based on the input. Used for fitting using this
class.
:param key:
:param values:
:return:
"""
return [min(values)-max(values), (max(key)-min(key))/3, min(values)]
|
908868b150340b02ba61fcc6ccf5937ba31bfe30
| 3,639,159
|
from datetime import datetime
import time
def add_metadata_values_to_record(record_message, schema_message):
"""Populate metadata _sdc columns from incoming record message
The location of the required attributes are fixed in the stream
"""
extended_record = record_message['record']
extended_record['_sdc_batched_at'] = datetime.now().isoformat()
extended_record['_sdc_deleted_at'] = record_message.get('record', {}).get('_sdc_deleted_at')
extended_record['_sdc_extracted_at'] = record_message.get('time_extracted')
extended_record['_sdc_primary_key'] = schema_message.get('key_properties')
extended_record['_sdc_received_at'] = datetime.now().isoformat()
extended_record['_sdc_sequence'] = int(round(time.time() * 1000))
extended_record['_sdc_table_version'] = record_message.get('version')
return extended_record
|
e85e2620b816907204443af1c014ca4d927cb20c
| 3,639,160
|
from datetime import datetime
def manipulate_reservation_action(request: HttpRequest, default_foreward_url: str):
"""
This function is used to alter the reservation beeing build inside
a cookie. This function automatically crafts the required response.
"""
js_string: str = ""
r: GroupReservation = None
u: Profile = get_current_user(request)
forward_url: str = default_foreward_url
if request.GET.get("redirect"):
forward_url = request.GET["redirect"]
if "srid" in request.GET:
if not request.GET.get("rid"):
return HttpResponseRedirect("/admin?error=missing%20primary%20reservation%20id")
srid: int = int(request.GET["srid"])
sr: SubReservation = None
if srid == 0:
sr = SubReservation()
else:
sr = SubReservation.objects.get(id=srid)
if request.POST.get("notes"):
sr.notes = escape(request.POST["notes"])
else:
sr.notes = " "
sr.primary_reservation = GroupReservation.objects.get(id=int(request.GET["rid"]))
sr.save()
print(request.POST)
print(sr.notes)
return HttpResponseRedirect("/admin/reservations/edit?rid=" + str(int(request.GET["rid"])) + "&srid=" + str(sr.id))
if "rid" in request.GET:
# update reservation
r = GroupReservation.objects.get(id=int(request.GET["rid"]))
elif u.number_of_allowed_reservations > GroupReservation.objects.all().filter(createdByUser=u).count():
r = GroupReservation()
r.createdByUser = u
r.ready = False
r.open = True
r.pickupDate = datetime.datetime.now()
else:
return HttpResponseRedirect("/admin?error=Too%20Many%20reservations")
if request.POST.get("notes"):
r.notes = escape(request.POST["notes"])
if request.POST.get("contact"):
r.responsiblePerson = escape(str(request.POST["contact"]))
if (r.createdByUser == u or o.rights > 1) and not r.submitted:
r.save()
else:
return HttpResponseRedirect("/admin?error=noyb")
response: HttpResponseRedirect = HttpResponseRedirect(forward_url + "?rid=" + str(r.id))
return response
|
f93b8e2ed68daebdf04aa15898e52f41a5df1e49
| 3,639,161
|
def _dense_to_sparse(data):
"""Convert a numpy array to a tf.SparseTensor."""
indices = np.where(data)
return tf.SparseTensor(
np.stack(indices, axis=-1), data[indices], dense_shape=data.shape)
|
b1fe24dd82eff2aa31e40f6b86e75f655e7141c7
| 3,639,162
|
def getflookup(facetid):
"""
find out if a facet with this id has been saved to the facet_files table
"""
found = FacetLookup.objects.all().values_list('graphdb', flat=True).get(id=facetid)
if found:
return True
else:
return False
|
a1c6b0ec7e8ab96eef16574e64ac1948f0fa8419
| 3,639,163
|
def numeric_to_string(year):
"""
Convert numeric year to string
"""
if year < 0 :
yearstring = "{}BC".format(year*-1)
elif year >= 0:
yearstring = "{}AD".format(year)
else:
raise
return yearstring
|
3469e2dd5e05c49b4861782da2dd88bac781c61d
| 3,639,164
|
def _get_num_ve_sve_and_max_num_cells(cell_fracs):
"""
Calculate the num_ve, num_sve and max_num_cells
Parameters
----------
cell_fracs : structured array, optional
A sorted, one dimensional array,
each entry containing the following fields:
:idx: int
The volume element index.
:cell: int
The geometry cell number.
:vol_frac: float
The volume fraction of the cell withing the mesh ve.
:rel_error: float
The relative error associated with the volume fraction.
Returns
-------
num_ve : int
Number of the total voxels
num_sve : int
Number of the total subvoxels, eqaul to or greater than num_ve
max_num_cells : int
Max number of cells (subvoxels) in a voxel
"""
num_sve = len(cell_fracs)
num_ve = len(set(cell_fracs["idx"]))
max_num_cells = -1
for i in range(num_sve):
max_num_cells = max(max_num_cells, len(cell_fracs[cell_fracs["idx"] == i]))
return num_ve, num_sve, max_num_cells
|
c0d154898bbfeafd66d89a2741dda8c2aa885a9a
| 3,639,165
|
from datetime import datetime
def is_void(at):
"""Returns True if the given object is an ``adatetime`` with all of its
attributes equal to None.
"""
if isinstance(at, datetime):
return False
return all((getattr(at, attr) is None) for attr in adatetime.units)
|
49744c361177060b508d5537a1ace16da6aef37d
| 3,639,166
|
def _get_metric_fn(params):
"""Get the metrix fn used by model compile."""
batch_size = params["batch_size"]
def metric_fn(y_true, y_pred):
"""Returns the in_top_k metric."""
softmax_logits = y_pred
logits = tf.slice(softmax_logits, [0, 1], [batch_size, 1])
# The dup mask should be obtained from input data, but we did not yet find
# a good way of getting it with keras, so we set it to zeros to neglect the
# repetition correction
dup_mask = tf.zeros([batch_size, 1])
cross_entropy, metric_fn, in_top_k, ndcg, metric_weights = (
neumf_model.compute_eval_loss_and_metrics_helper(
logits,
softmax_logits,
dup_mask,
params["num_neg"],
params["match_mlperf"],
params["use_xla_for_gpu"]))
in_top_k = tf.cond(
tf.keras.backend.learning_phase(),
lambda: tf.zeros(shape=in_top_k.shape, dtype=in_top_k.dtype),
lambda: in_top_k)
return in_top_k
return metric_fn
|
2793975542241f36850aaaaef4256aa59ea4873f
| 3,639,167
|
def check():
"""Check if all required modules are present.
Returns 0 on success, non-zero on error.
"""
flag = 0
for package in import_list:
try:
exec( "import " + package )
except Exception:
log.error( "Missing module: %s", package )
flag = True
if flag:
return 1
return 0
|
027ae4346a642740ca4b1ef4ebec5a831688f850
| 3,639,168
|
def flip_nums(text):
""" flips numbers on string to the end (so 2019_est --> est_2019)"""
if not text:
return ''
i = 0
s = text + '_'
while text[i].isnumeric():
s += text[i]
i += 1
if text[i] == '_':
i += 1
return s[i:]
|
e0534e25e95b72e1d6516111413e32a6dae207ef
| 3,639,169
|
def nnls(A, b, k=None, maxiter=None):
"""
Compute the least-squares solution to the equation ``A @ x = b`` subject to
the nonnegativity constraints ``x[:k] >= 0``.
Parameters
----------
A : array_like, shape (m, n)
Matrix `A` as shown above.
b : array_like, shape (m,)
Right-hand side vector `b` as shown above.
k : int, optional
Number of nonnegativity constraints. The first `k` components of the
solution vector are nonnegative (the default is ``A.shape[1]``).
maxiter : int, optional
Maximum number of inner iterations (the default is ``3 * A.shape[1]``).
Returns
-------
x : numpy.ndarray, shape (n,)
Solution vector ``x`` as shown above.
See Also
--------
bvtcg : Bounded variable truncated conjugate gradient
cpqp : Convex piecewise quadratic programming
lctcg : Linear constrained truncated conjugate gradient
Notes
-----
The method is adapted from the NNLS algorithm [1]_.
References
----------
.. [1] C. L. Lawson and R. J. Hanson. Solving Least Squares Problems.
Classics Appl. Math. Philadelphia, PA, US: SIAM, 1974.
"""
A = np.atleast_2d(A)
if A.dtype.kind in np.typecodes['AllInteger']:
A = np.asarray(A, dtype=float)
A = np.asfortranarray(A)
b = np.atleast_1d(b)
if b.dtype.kind in np.typecodes['AllInteger']:
b = np.asarray(b, dtype=float)
n = A.shape[1]
if k is None:
k = n
if k < 0 or k > n:
raise ValueError('Number of nonnegative constraints is invalid')
if maxiter is None:
maxiter = 3 * n
# Check the sizes of the inputs.
assert_(A.ndim == 2)
assert_(b.ndim == 1)
assert_(A.shape[0] == b.size)
x = _nnls(A, b, k, maxiter) # noqa
return np.array(x, dtype=float)
|
4d6c7e7d53e570222b752c4bf2013100c15b7297
| 3,639,170
|
import os
def read_inputs(filename, height, padding, num_quant_levels, p_norm,
predict_semantics):
"""Reads inputs for scan completion.
Reads input_sdf, target_df/sem (if any), previous predicted df/sem (if any).
Args:
filename: TFRecord containing input_sdf.
height: height in voxels to be processed by model.
padding: amount of padding (in voxels) around test scene (height is cropped
by padding for processing).
num_quant_levels: amount of quantization (if applicable).
p_norm: which p-norm is used (0, 1, 2; 0 for none).
predict_semantics: whether semantics is predicted.
Returns:
input scan: input_scan as np array.
ground truth targets: target_scan/target_semantics as np arrays (if any).
previous resolution predictions: prediction_scan_low_resolution /
prediction_semantics_low_resolution as
np arrays (if any).
"""
for record in tf.python_io.tf_record_iterator(filename):
example = tf.train.Example()
example.ParseFromString(record)
feature_map = example.features
# Input scan as sdf.
input_scan = read_input_float_feature(feature_map, 'input_sdf', shape=None)
(scene_dim_z, scene_dim_y, scene_dim_x) = input_scan.shape
# Target scan as df.
if 'target_df' in feature_map.feature:
target_scan = read_input_float_feature(
feature_map, 'target_df', [scene_dim_z, scene_dim_y, scene_dim_x])
if 'target_sem' in feature_map.feature:
target_semantics = read_input_bytes_feature(
feature_map, 'target_sem', [scene_dim_z, scene_dim_y, scene_dim_x])
# Adjust dimensions for model (clamp height, make even for voxel groups).
height_y = min(height, scene_dim_y - padding)
scene_dim_x = (scene_dim_x // 2) * 2
scene_dim_y = (height_y // 2) * 2
scene_dim_z = (scene_dim_z // 2) * 2
input_scan = input_scan[:scene_dim_z, padding:padding + scene_dim_y, :
scene_dim_x]
input_scan = util.preprocess_sdf(input_scan, constants.TRUNCATION)
if target_scan is not None:
target_scan = target_scan[:scene_dim_z, padding:padding + scene_dim_y, :
scene_dim_x]
target_scan = util.preprocess_df(target_scan, constants.TRUNCATION)
if target_semantics is not None:
target_semantics = target_semantics[:scene_dim_z, padding:
padding + scene_dim_y, :scene_dim_x]
target_semantics = util.preprocess_target_sem(target_semantics)
# Default values for previous resolution inputs.
prediction_scan_low_resolution = np.zeros(
[scene_dim_z // 2, scene_dim_y // 2, scene_dim_x // 2, 2])
prediction_semantics_low_resolution = np.zeros(
[scene_dim_z // 2, scene_dim_y // 2, scene_dim_x // 2], dtype=np.uint8)
if target_semantics is None:
target_semantics = np.zeros([scene_dim_z, scene_dim_y, scene_dim_x])
# Load previous level prediction.
if not FLAGS.is_base_level:
previous_file = os.path.join(
FLAGS.output_dir_prev, 'level' + str(FLAGS.hierarchy_level - 1) + '_' +
os.path.splitext(os.path.basename(filename))[0] + 'pred.tfrecord')
tf.logging.info('Reading previous predictions frome file: %s',
previous_file)
assert os.path.isfile(previous_file)
for record in tf.python_io.tf_record_iterator(previous_file):
prev_example = tf.train.Example()
prev_example.ParseFromString(record)
prev_feature_map = prev_example.features
prediction_scan_low_resolution = read_input_float_feature(
prev_feature_map, 'prediction_df', None)
(prev_scene_dim_z, prev_scene_dim_y,
prev_scene_dim_x) = prediction_scan_low_resolution.shape
offset_z = (prev_scene_dim_z - scene_dim_z // 2) // 2
offset_x = (prev_scene_dim_x - scene_dim_x // 2) // 2
prediction_scan_low_resolution = prediction_scan_low_resolution[
offset_z:offset_z + scene_dim_z // 2, :scene_dim_y // 2, offset_x:
offset_x + scene_dim_x // 2]
prediction_scan_low_resolution = util.preprocess_target_sdf(
prediction_scan_low_resolution, num_quant_levels, constants.TRUNCATION,
p_norm == 0)
if predict_semantics:
prediction_semantics_low_resolution = read_input_bytes_feature(
prev_feature_map, 'prediction_sem',
[prev_scene_dim_z, prev_scene_dim_y, prev_scene_dim_x])
prediction_semantics_low_resolution = prediction_semantics_low_resolution[
offset_z:offset_z + scene_dim_z // 2, :scene_dim_y // 2, offset_x:
offset_x + scene_dim_x // 2]
return (input_scan, target_scan, target_semantics,
prediction_scan_low_resolution, prediction_semantics_low_resolution)
|
fac5ec6ae02bf930d881a75d483f4001aabbf9d4
| 3,639,171
|
def f_elas_linear_tsswlc(x, t3, t2, e_b, gam, e_par, e_perp, eta):
"""Compute spring forces and torques on each bead of dsswlc."""
N, _ = x.shape
f = np.zeros(x.shape)
t = np.zeros(x.shape)
for i in range(0, N - 1):
dx = x[i+1] - x[i]
dx_par = dx @ t3[i]
dx_perp = dx - dx_par*t3[i]
cos_u1_u2 = t3[i+1]@t3[i]
Gi = t3[i+1] - cos_u1_u2*t3[i] - eta*dx_perp
Fi = -eta*e_b*Gi + e_par*(dx_par - gam)*t3[i] + e_perp*dx_perp
f[i] += Fi
f[i + 1] -= Fi
Gi = (t3[i+1] - t3[i]) - eta*dx_perp
t[i] += e_b*Gi - eta*e_b*dx_par*Gi + eta*e_b*(1 - cos_u1_u2)*dx \
- e_par*(dx_par - gam)*dx + e_perp*dx_par*dx_perp
t[i+1] -= e_b*Gi
# TODO: implement extra torque due to orientation differences
return f, t
|
b5a217521667e95b4ba7bafa74f2d1371e01dc34
| 3,639,172
|
def extent2(texture):
""" Returns the extent of the image data (0.0-1.0, 0.0-1.0) inside its texture owner.
Textures have a size power of 2 (512, 1024, ...), but the actual image can be smaller.
For example: a 400x250 image will be loaded in a 512x256 texture.
Its extent is (0.78, 0.98), the remainder of the texture is transparent.
"""
return (texture.tex_coords[3], texture.tex_coords[7])
|
16c6d220ad48201fd133ed11c97452bf0831c0d8
| 3,639,173
|
def calculate_handlen(hand):
"""
Returns the length (number of letters) in the current hand.
hand: dictionary (string-> int)
returns: integer
"""
# Store the total length of the hand
hand_len = 0
# For every letter in the hand
for key in hand.keys():
# Add the number of times that letter appears in the hand
# to the variable storing hand length
hand_len += hand[key]
# Return the number of letters in the current hand
return hand_len
|
297f8af5943bf87bb7999a1212d54430857de12b
| 3,639,174
|
def add_fieldmap(fieldmap: BIDSFile, layout: BIDSLayout) -> dict:
"""
Locates fieldmap-related json file and adds them in an appropriate dictionary with keys that describe their directionality
Parameters
----------
fieldmap : BIDSFile
Fieldmap's NIfTI
layout : BIDSLayout
BIDSLayout instance for the queried bids directory.
Returns
-------
dict
Dictionary of fieldmap's NIfTI and json with appropriate keys.
"""
entities = fieldmap.get_entities()
entities.pop("fmap")
direction = entities.get("direction")
entities["extension"] = "json"
json = layout.get(**entities)
fieldmap_dict = {f"fmap_{direction}": fieldmap.path}
if json:
fieldmap_dict[f"fmap_{direction}_json"] = json[0].path
return fieldmap_dict
|
227fa27d9ecb2f260700debc6b2837d60018bd61
| 3,639,175
|
def fit_plane_lstsq(XYZ):
"""
Fits a plane to a point cloud.
Where z=a.x+b.y+c; Rearranging: a.x+b.y-z+c=0
@type XYZ: list
@param XYZ: list of points
@rtype: np.array
@return: normalized normal vector of the plane in the form C{(a,b,-1)}
"""
[rows, cols] = XYZ.shape
G = np.ones((rows, 3))
G[:, 0] = XYZ[:, 0] # X
G[:, 1] = XYZ[:, 1] # Y
Z = XYZ[:, 2]
(a, b, c), resid, rank, s = np.linalg.lstsq(G, Z)
normal = (a, b, -1)
nn = np.linalg.norm(normal)
normal = normal / nn
return normal
|
c734cb17462e72c40bb65464c42d298c21e4a922
| 3,639,176
|
def clean_name(name: str) -> str:
"""Clean a string by capitalizing and removing extra spaces.
Args:
name: the name to be cleaned
Returns:
str: the cleaned name
"""
name = " ".join(name.strip().split())
return str(titlecase.titlecase(name))
|
e19354767d38164004c984c76827b2882ef4c4fd
| 3,639,177
|
from typing import Callable
from re import T
from typing import List
def pull_list(buf: Buffer, capacity: int, func: Callable[[], T]) -> List[T]:
"""
Pull a list of items.
"""
items = []
with pull_block(buf, capacity) as length:
end = buf.tell() + length
while buf.tell() < end:
items.append(func())
return items
|
ab9833fdab157e05df00d65dee96080c98140bb2
| 3,639,178
|
def ResNet(
stack_fn, preact, use_bias, model_name='resnet', include_top=True, weights='imagenet',
input_tensor=None, input_shape=None, pooling=None, classes=1000,
classifier_activation='softmax', bottomright_maxpool_test=False,
use_group_norm=False, **kwargs):
"""Instantiates the ResNet, ResNetV2, and ResNeXt architecture.
Reference:
- [Deep Residual Learning for Image Recognition](
https://arxiv.org/abs/1512.03385) (CVPR 2015)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Arguments:
stack_fn: a function that returns output tensor for the
stacked residual blocks.
preact: whether to use pre-activation or not
(True for ResNetV2, False for ResNet and ResNeXt).
use_bias: whether to use biases for convolutional layers or not
(True for ResNet and ResNetV2, False for ResNeXt).
model_name: string, model name.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 inputs channels.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
**kwargs: For backwards compatibility only.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
global layers
if 'layers' in kwargs:
layers = kwargs.pop('layers')
else:
layers = VersionAwareLayers()
if kwargs:
raise ValueError('Unknown argument(s): %s' % (kwargs,))
if not (weights in {'imagenet', None} or file_io.file_exists_v2(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape, default_size=224, min_size=32, data_format=backend.image_data_format(),
require_flatten=include_top, weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(img_input)
x = layers.Conv2D(
64, 7, strides=2, use_bias=use_bias and not use_group_norm, name='conv1_conv')(x)
if use_group_norm:
def norm_layer(name):
return tfa.layers.GroupNormalization(epsilon=batchnorm_epsilon, name=name)
else:
def norm_layer(name):
return layers.BatchNormalization(
axis=bn_axis, epsilon=batchnorm_epsilon, momentum=batchnorm_momentum,
name=name)
if not preact:
x = norm_layer(name='conv1_gn' if use_group_norm else 'conv1_bn')(x)
x = layers.Activation('relu', name='conv1_relu')(x)
padding_layer = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')
if bottomright_maxpool_test:
padding_test = layers.ZeroPadding2D(padding=((0, 2), (0, 2)), name='pool1_pad')
padding_layer = TrainTestSwitchLayer(padding_layer, padding_test)
x = padding_layer(x)
x = layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = stack_fn(x)
if preact:
x = norm_layer(name='post_gn' if use_group_norm else 'post_bn')(x)
x = layers.Activation('relu', name='post_relu')(x)
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation, name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D(name='max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
if use_group_norm:
model_name = model_name + '_groupnorm'
model = training.Model(inputs, x, name=model_name)
# Load weights.
if (weights == 'imagenet') and (model_name in WEIGHTS_HASHES):
if include_top:
file_name = model_name + f'_weights_tf_dim_ordering_tf_kernels.h5'
file_hash = WEIGHTS_HASHES[model_name][0]
else:
file_name = model_name + f'_weights_tf_dim_ordering_tf_kernels_notop.h5'
file_hash = WEIGHTS_HASHES[model_name][1]
weights_path = data_utils.get_file(
file_name, BASE_WEIGHTS_PATH + file_name, cache_subdir='models', file_hash=file_hash)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
|
810b04481eb6ad5d8b3723b87581b3f2136cc80f
| 3,639,179
|
import yaml
def read_yaml(yaml_path):
"""
Read yaml file from the path
:param yaml_path:
:return:
"""
stream = open(yaml_path, "r")
docs = yaml.load_all(stream)
result = dict()
for doc in docs:
for k, v in doc.items():
result[k] = v
return result
|
a3f32d6f5c6cb5c8e94ad9b68a0540aa001f83b2
| 3,639,180
|
def _server_allow_run_on_save() -> bool:
"""Allows users to automatically rerun when app is updated.
Default: true
"""
return True
|
3a895abd8201ce97c8f2f928b841eb86bf6327d1
| 3,639,181
|
def _strip_schema(url):
"""Returns the url without the s3:// part"""
result = urlparse(url)
return result.netloc + result.path
|
9e7dc96c23d799f202603109cd08b2fe049951a5
| 3,639,182
|
def simple_word_tokenize(text, _split=GROUPING_SPACE_REGEX.split):
"""
Split text into tokens. Don't split by a hyphen.
Preserve punctuation, but not whitespaces.
"""
return [t for t in _split(text) if t and not t.isspace()]
|
5b9e66d2a369340028b4ece2eee083511d0e9746
| 3,639,183
|
def merge_strategy(media_identifier, target_site, sdc_data, strategy):
"""
Check if the file already holds Structured Data, if so resolve what to do.
@param media_identifier: Mid of the file
@param target_site: pywikibot.Site object to which file should be uploaded
@param sdc_data: internally formatted Structured Data in json format
@param strategy: Strategy used for merging uploaded data with pre-existing
data. Allowed values are None, "New", "Blind", "Add" and "Nuke".
@return: dict of pids and caption languages removed from sdc_data due to
conflicts.
@raises: ValueError, SdcException
"""
prior_data = _get_existing_structured_data(media_identifier, target_site)
if not prior_data:
# even unknown strategies should pass if there is no prior data
return
if not strategy:
raise SdcException(
'warning', 'pre-existing sdc-data',
('Found pre-existing SDC data, no new data will be added. '
'Found data: {}'.format(prior_data))
)
strategy = strategy.lower()
if strategy in ('new', 'add'):
pre_pids = prior_data['statements'].keys()
pre_langs = prior_data['labels'].keys()
new_langs = sdc_data.get('caption', dict()).keys()
if strategy == 'add':
pid_clash = set(pre_pids).intersection(sdc_data.keys())
lang_clash = set(pre_langs).intersection(new_langs)
for pid in pid_clash:
sdc_data.pop(pid, None)
for lang in lang_clash:
sdc_data['caption'].pop(lang, None)
if (not any(is_prop_key(key) for key in sdc_data.keys())
and not sdc_data.get('caption')):
# warn if not data left to upload
raise SdcException(
'warning', 'all conflicting pre-existing sdc-data',
('Found pre-existing SDC data, no new non-conflicting '
'data could be added. Found data: {}'.format(
prior_data))
)
elif pid_clash or lang_clash:
return {'pids': pid_clash, 'langs': lang_clash}
elif (not set(pre_pids).isdisjoint(sdc_data.keys())
or not set(pre_langs).isdisjoint(new_langs)):
raise SdcException(
'warning', 'conflicting pre-existing sdc-data',
('Found pre-existing SDC data, no new data will be added. '
'Found data: {}'.format(prior_data))
)
elif strategy not in STRATEGIES:
raise ValueError(
'The `strategy` parameter must be None, "{0}" or "{1}" '
'but "{2}" was provided'.format(
'", "'.join([s.capitalize() for s in STRATEGIES[:-1]]),
STRATEGIES[-1].capitalize(),
strategy.capitalize()))
# pass if strategy is "Blind" or "Nuke"
|
0e59cc312e00cc7d492bfe725b0a9a297734a5e0
| 3,639,184
|
def convert_translations_to_dict(js_translations):
"""Convert a GNUTranslations object into a dict for jsonifying.
Args:
js_translations: GNUTranslations object to be converted.
Returns:
A dictionary representing the GNUTranslations object.
"""
plural, n_plural = _get_plural_forms(js_translations)
translations_dict = {'plural': plural, 'catalog': {}, 'fallback': None}
if js_translations._fallback is not None:
translations_dict['fallback'] = convert_translations_to_dict(
js_translations._fallback
)
for key, value in js_translations._catalog.items():
if key == '':
continue
if isinstance(key, basestring):
translations_dict['catalog'][key] = value
elif isinstance(key, tuple):
if key[0] not in translations_dict['catalog']:
translations_dict['catalog'][key[0]] = [''] * n_plural
translations_dict['catalog'][key[0]][int(key[1])] = value
return translations_dict
|
8db0fc022002504a943f46b429ca71b6e0e90b06
| 3,639,185
|
import asyncio
def reduce(coro, iterable, initializer=None, limit=1, right=False, loop=None):
"""
Apply function of two arguments cumulatively to the items of sequence,
from left to right, so as to reduce the sequence to a single value.
Reduction will be executed sequentially without concurrency,
so passed values would be in order.
This function is the asynchronous coroutine equivalent to Python standard
`functools.reduce()` function.
This function is a coroutine.
This function can be composed in a pipeline chain with ``|`` operator.
Arguments:
coro (coroutine function): reducer coroutine binary function.
iterable (iterable|asynchronousiterable): an iterable collection
yielding coroutines functions.
initializer (mixed): initial accumulator value used in
the first reduction call.
limit (int): max iteration concurrency limit. Use ``0`` for no limit.
right (bool): reduce iterable from right to left.
loop (asyncio.BaseEventLoop): optional event loop to use.
Raises:
TypeError: if input arguments are not valid.
Returns:
mixed: accumulated final reduced value.
Usage::
async def reducer(acc, num):
return acc + num
await paco.reduce(reducer, [1, 2, 3, 4, 5], initializer=0)
# => 15
"""
assert_corofunction(coro=coro)
assert_iter(iterable=iterable)
# Reduced accumulator value
acc = initializer
# If interable is empty, just return the initializer value
if len(iterable) == 0:
return initializer
# Create concurrent executor
pool = ConcurrentExecutor(limit=limit, loop=loop)
# Reducer partial function for deferred coroutine execution
def reducer(element):
@asyncio.coroutine
def wrapper():
nonlocal acc
acc = yield from coro(acc, element)
return wrapper
# Support right reduction
if right:
iterable.reverse()
# Iterate and attach coroutine for defer scheduling
for element in iterable:
pool.add(reducer(element))
# Wait until all coroutines finish
yield from pool.run(ignore_empty=True)
# Returns final reduced value
return acc
|
64b55a082df11fa9d6b7971ecd1508c1e4c9f1c9
| 3,639,186
|
def sigm_temp(base_sim_param, assumptions, t_base_type):
"""Calculate base temperature depending on sigmoid diff and location
Parameters
----------
base_sim_param : dict
Base simulation assumptions
assumptions : dict
Dictionary with assumptions
Return
------
t_base_cy : float
Base temperature of current year
Note
----
Depending on the base temperature in the base and end year
a sigmoid diffusion from the base temperature from the base year
to the end year is calculated
This allows to model changes e.g. in thermal confort
"""
# Base temperature of end year minus base temp of base year
t_base_diff = assumptions[t_base_type]['end_yr'] - assumptions[t_base_type]['base_yr']
# Sigmoid diffusion
t_base_frac = diffusion_technologies.sigmoid_diffusion(
base_sim_param['base_yr'],
base_sim_param['curr_yr'],
base_sim_param['end_yr'],
assumptions['smart_meter_diff_params']['sig_midpoint'],
assumptions['smart_meter_diff_params']['sig_steeppness']
)
# Temp diff until current year
t_diff_cy = t_base_diff * t_base_frac
# Add temp change to base year temp
t_base_cy = t_diff_cy + assumptions[t_base_type]['base_yr']
return t_base_cy
|
276af880050698a9f15dcd142aac952809807fdb
| 3,639,187
|
import select
import socket
def is_socket_closed(sock):
"""Check if socket ``sock`` is closed."""
if not sock:
return True
try:
if not poll: # pragma nocover
if not select:
return False
try:
return bool(select([sock], [], [], 0.0)[0])
except socket.error:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
except Exception:
return True
|
e89ddec6e7603b5636f6a6d87831d12f0a76e9d9
| 3,639,188
|
def _fit_ovo_binary(estimator, X, y, i, j):
"""Fit a single binary estimator (one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
y_binary = np.empty(y.shape, np.int)
y_binary[y == i] = 0
y_binary[y == j] = 1
ind = np.arange(X.shape[0])
return _fit_binary(estimator, X[ind[cond]], y_binary, classes=[i, j])
|
59325562549656d35b615a3274112357b0c4854c
| 3,639,189
|
def get_implicit_permissions_for_user(user: str, domain=None):
"""
GetImplicitPermissionsForUser gets implicit permissions for a user or role.
Compared to GetPermissionsForUser(), this function retrieves permissions for inherited roles.
For example:
p, admin, data1, read
p, alice, data2, read
g, alice, admin
GetPermissionsForUser("alice") can only get: [["alice", "data2", "read"]].
But GetImplicitPermissionsForUser("alice") will get: [["admin", "data1", "read"], ["alice", "data2", "read"]].
"""
return enforcer.get_implicit_permissions_for_user(user, domain=None)
|
08477a3ac772597f66f36b7b04fc7d8a29f2522b
| 3,639,190
|
def Law_f(text):
"""
:param text: The "text" of this Law
"""
return '\\begin{block}{Law}\n' + text + '\n\\end{block}\n'
|
594b279c5971a9d379666179c4d0633fc02a8bd9
| 3,639,191
|
import operator
from typing import OrderedDict
def ordered_dict_intersection(first_dict, second_dict, compat=operator.eq):
"""Return the intersection of two dictionaries as a new OrderedDict.
Items are retained if their keys are found in both dictionaries and the
values are compatible.
Parameters
----------
first_dict, second_dict : dict-like
Mappings to merge.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equality.
Returns
-------
intersection : OrderedDict
Intersection of the contents.
"""
new_dict = OrderedDict(first_dict)
remove_incompatible_items(new_dict, second_dict, compat)
return new_dict
|
cfef1a1d5c3cc9fc5b792a68bae0fe8279b752da
| 3,639,192
|
import scipy
def get_cl2cf_matrices(theta_bin_edges, lmin, lmax):
"""
Returns the set of matrices to go from one entire power spectrum to one binned correlation function.
Args:
theta_bin_edges (1D numpy array): Angular bin edges in radians.
lmin (int): Minimum l.
lmax (int): Maximum l.
Returns:
(2D numpy array, \
2D numpy array, \
2D numpy array): Tuple of matrices to each go from one entire power spectrum to one binned \
correlation function for different spins: (0-0, 2-2, 0-2). The spin-2-2 matrix is only for \
xi+, not xi-.
"""
# Calculate Legendre functions and their derivatives up to lmax
# pl and dpl indexed as [theta_idx, l]
cos_thetas = np.cos(theta_bin_edges)
pl_dpl = np.array([scipy.special.lpn(lmax + 1, cos_theta) for cos_theta in cos_thetas])
pl = pl_dpl[:, 0, :]
dpl = pl_dpl[:, 1, :]
# Calculate various offset combinations of Pl and dPl, and some other useful things
assert lmin >= 2
plplus1 = pl[:, (lmin + 1):] # first is l=lmin+1, last is lmax+1
plminus1 = pl[:, (lmin - 1):lmax] # first is l=lmin-1, last is lmax-1
xpl = cos_thetas[:, np.newaxis] * pl[:, lmin:(lmax + 1)]
xdpl = cos_thetas[:, np.newaxis] * dpl[:, lmin:(lmax + 1)]
dplminus1 = dpl[:, (lmin - 1):lmax]
xdplminus1 = cos_thetas[:, np.newaxis] * dplminus1
ell = np.arange(lmin, lmax + 1)
two_ell_plus1 = 2 * ell + 1
cos_theta_diff = np.diff(cos_thetas)
# Calculate bin-averaged Pl, Pl^2 and Gl+/- following Fang et al. eqs 5.6-5.8
# (Also Friedrich et al. DES Y3 covariance paper, which uses a different sign convention but this cancels out.)
# All of these vectorised equations have been validated against much slower loop implementations
# Pl
pl_bin_top_prediff = plplus1 - plminus1
pl_bin_top = np.diff(pl_bin_top_prediff, axis=0)
pl_bin_bottom = np.outer(cos_theta_diff, two_ell_plus1)
pl_bin = pl_bin_top / pl_bin_bottom
# Pl^2
plminus1_coeff = ell + 2 / two_ell_plus1
plminus1_term = plminus1_coeff[np.newaxis, :] * plminus1
xpl_coeff = 2 - ell
xpl_term = xpl_coeff[np.newaxis, :] * xpl
plplus1_coeff = 2 / two_ell_plus1
plplus1_term = plplus1_coeff[np.newaxis, :] * plplus1
pl2_bin_top_prediff = plminus1_term + xpl_term - plplus1_term
pl2_bin_top = np.diff(pl2_bin_top_prediff, axis=0)
pl2_bin_bottom = cos_theta_diff[:, np.newaxis]
pl2_bin = pl2_bin_top / pl2_bin_bottom
# Gl2+ + Gl2-
plminus1_coeff = - ell * (ell - 1) / 2 * (ell + 2 / two_ell_plus1) - (ell + 2)
plminus1_term = plminus1_coeff[np.newaxis, :] * plminus1
xpl_coeff = - ell * (ell - 1) * (2 - ell) / 2
xpl_term = xpl_coeff[np.newaxis, :] * xpl
plplus1_coeff = ell * (ell - 1) / two_ell_plus1
plplus1_term = plplus1_coeff[np.newaxis, :] * plplus1
dpl_coeff = 4 - ell
dpl_term = dpl_coeff * dpl[:, lmin:(lmax + 1)]
xdplminus1_coeff = ell + 2
xdplminus1_term = xdplminus1_coeff[np.newaxis, :] * xdplminus1
xdpl_coeff = 2 * (ell - 1)
xdpl_term = xdpl_coeff[np.newaxis, :] * xdpl
pl_coeff = - 2 * (ell - 1)
pl_term = pl_coeff[np.newaxis, :] * pl[:, lmin:(lmax + 1)]
dplminus1_coeff = - 2 * (ell + 2)
dplminus1_term = dplminus1_coeff[np.newaxis, :] * dplminus1
gplus_bin_top_prediff = (plminus1_term + xpl_term + plplus1_term + dpl_term + xdplminus1_term + xdpl_term + pl_term
+ dplminus1_term)
gplus_bin_top = np.diff(gplus_bin_top_prediff, axis=0)
gplus_bin_bottom = cos_theta_diff[:, np.newaxis]
gplus_bin = gplus_bin_top / gplus_bin_bottom
# Apply relevant prefactors to obtain bin-averaged Wigner d symbols
ell_ellplus1 = (ell * (ell + 1))[np.newaxis, :]
d00_bin = pl_bin
d22plus_bin = 2 / ell_ellplus1 ** 2 * gplus_bin
d02_bin = 1 / ell_ellplus1 * pl2_bin
# Apply final Wigner prefactor to obtain Cl->CF matrices
prefac = (two_ell_plus1 / (4 * np.pi))[np.newaxis, :]
cl2cf_00 = prefac * d00_bin
cl2cf_22plus = prefac * d22plus_bin
cl2cf_02 = prefac * d02_bin
return cl2cf_00, cl2cf_22plus, cl2cf_02
|
0231218c8501409e3660ed6c446b0c163229ab8a
| 3,639,193
|
from operator import concat
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
"""
Frame a time series as a supervised learning dataset.
Arguments:
data: Sequence of observations as a list or NumPy array.
n_in: Number of lag observations as input (X).
n_out: Number of observations as output (y).
dropnan: Boolean whether or not to drop rows with NaN values.
Returns:
Pandas DataFrame of series framed for supervised learning.
"""
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg.reset_index(drop=True)
|
1756380140dd74045880cc4501623c8b48ce5773
| 3,639,194
|
import torch
def valid_from_done(done):
"""Returns a float mask which is zero for all time-steps after a
`done=True` is signaled. This function operates on the leading dimension
of `done`, assumed to correspond to time [T,...], other dimensions are
preserved."""
done = done.type(torch.float)
valid = torch.ones_like(done)
valid[1:] = 1 - torch.clamp(torch.cumsum(done[:-1], dim=0), max=1)
return valid
|
0ca2bd0f9e23605091b2f8d1bc15e67e1632b82b
| 3,639,195
|
import logging
def get_transfer_options(transfer_kind='upload', transfer_method=None):
"""Returns hostnames that the current host can upload or download to.
transfer_kind: 'upload' or 'download'
transfer_method: is specified and not None, return only hosts with which
we can work using this method (e.g. scp)
"""
try:
transfer_options = get_config(get_hostname())[
'%s_options' % transfer_kind]
except LookupError:
logging.info("Host %s has no known transfer options.",
get_hostname())
return []
if transfer_method is not None:
transfer_options = [to for to in transfer_options
if get_config(to['host'])['method'] == 'method']
return transfer_options
|
f5aea7498bf98d3be3fe9e97eda4e6eaa9181cea
| 3,639,196
|
def calc_utility_np(game, iter):
"""Calc utility of current position
Parameters
----------
game : camel up game
Camel up game class
iter : int
Iterations to run the monte carlo simulations
Returns
-------
np.array
Numpy structured array with expected utilities
"""
coins = coins_to_numpy(game)
if str(game.camel_dict) + str(game.tiles_dict) in CACHE.keys():
turn_prob_first, turn_prob_second, turn_prob_other, exp_tile_points = CACHE[
str(game.camel_dict) + str(game.tiles_dict)
][0]
game_prob_first, game_prob_last = CACHE[
str(game.camel_dict) + str(game.tiles_dict)
][1]
else:
turn_prob_first, turn_prob_second, turn_prob_other, exp_tile_points = turn_prob_numpy(
game, iter
)
game_prob_first, game_prob_last = game_prob_numpy(game, iter)
game_prob_first["prob"] = np.where(
game_prob_first["prob"] < 0.30, 0, game_prob_first["prob"]
)
game_prob_last["prob"] = np.where(
game_prob_last["prob"] < 0.30, 0, game_prob_last["prob"]
)
CACHE[str(game.camel_dict) + str(game.tiles_dict)] = [
(turn_prob_first, turn_prob_second, turn_prob_other, exp_tile_points),
(game_prob_first, game_prob_last),
]
winner_bets, loser_bets = winner_loser_bets_to_numpy(game)
bet_tiles = bet_tiles_to_numpy(game)
util.rename_np(turn_prob_first, ["counts", "prob"], "first")
util.rename_np(turn_prob_second, ["counts", "prob"], "second")
util.rename_np(turn_prob_other, ["counts", "prob"], "other")
bets = util.numpy_left_join(bet_tiles, turn_prob_first, "camel")
bets = util.numpy_left_join(bets, turn_prob_second, "camel")
bets = util.numpy_left_join(bets, turn_prob_other, "camel")
multiply_array = (
(bets["value"] * bets["prob_first"])
+ (bets["bets"] * bets["prob_second"])
- (bets["bets"] * bets["prob_other"])
)
bets = util.add_col_np(bets, "exp_value", multiply_array)
bets_groupby = util.numpy_group_by_sum(bets, "player", "exp_value")
final = util.numpy_left_join(coins, exp_tile_points, "player")
final = util.numpy_left_join(final, bets_groupby, "player")
game_first = util.numpy_left_join(winner_bets, game_prob_first, "camel")
game_last = util.numpy_left_join(loser_bets, game_prob_last, "camel")
game_winner_other = deepcopy(game_first)
game_winner_other["prob"] = 1 - game_first["prob"]
game_loser_other = deepcopy(game_last)
game_loser_other["prob"] = 1 - game_last["prob"]
game_first = util.add_col_np(
game_first, "points", config.BET_SCALING[0 : game_first.shape[0]]
)
game_last = util.add_col_np(
game_last, "points", config.BET_SCALING[0 : game_last.shape[0]]
)
game_winner_other = util.add_col_np(
game_winner_other, "points", [1] * game_winner_other.shape[0]
)
game_loser_other = util.add_col_np(
game_loser_other, "points", [1] * game_loser_other.shape[0]
)
final = util.numpy_left_join(
final, calc_exp_value_np(game_first, "exp_value_first"), "player"
)
final = util.numpy_left_join(
final, calc_exp_value_np(game_last, "exp_value_last"), "player"
)
final = util.numpy_left_join(
final, calc_exp_value_np(game_winner_other, "exp_value_winner_other"), "player"
)
final = util.numpy_left_join(
final, calc_exp_value_np(game_loser_other, "exp_value_loser_other"), "player"
)
multiply_array = (
final["coins"]
+ final["exp_points"]
+ final["exp_value"]
+ final["exp_value_first"]
+ final["exp_value_last"]
- final["exp_value_winner_other"]
- final["exp_value_loser_other"]
)
final = util.add_col_np(final, "utility", multiply_array)
return final
|
c69740652ea18d753c9a2a894f1ba36ab1eecff8
| 3,639,197
|
def add_masses(line, mass_light, mass_heavy):
"""
Add m/z information in the output lines
"""
new_line = "{} {} {}\n".format(round_masses(mass_light), round_masses(mass_heavy), line)
return new_line
|
d8e92acf43d17e9a00de1e985e6cecadec0fa4b4
| 3,639,198
|
def load_r_ind_sent_bars():
"""
Loads the random index-barcodes of the actual networks
"""
bars = []
for text in texts:
bars.append(np.load('Textbooks/{}/r_ind_sent_bars.npy'.format(text)))
return bars
|
331b217976bc5a03a4e3a20331f06ba33a7aaad1
| 3,639,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.