content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def standardize_ants_data(ants_data, subject_ID_col):
""" Takes df from ANTs output and stadardizes column names for both left and right hemi
"""
ants_useful_cols = ['Structure Name']
ants_to_std_naming_dict = {}
ants_to_std_naming_dict['Structure Name'] = subject_ID_col #'SubjID'
for roi in ants_data.columns:
prefix = None
name_split = roi.split(' ')
if name_split[0] == 'left':
prefix = 'L'
if name_split[0] == 'right':
prefix = 'R'
if prefix is not None:
ants_useful_cols.append(roi)
std_name = prefix + '_' + ''.join(name_split[1:])
ants_to_std_naming_dict[roi] = std_name
ants_data_std = ants_data[ants_useful_cols].copy()
ants_data_std = ants_data_std.rename(columns=ants_to_std_naming_dict)
# Splitting SubjID column to ignore site name
_, ants_data_std[subject_ID_col] = ants_data_std[subject_ID_col].str.rsplit('_', 1).str
return ants_data_std | 5,329,900 |
def make_gridpoints(bbox, resolution=1, return_coords=False):
"""It constructs a grid of points regularly spaced.
Parameters
----------
bbox : str, GeoDataFrame or dict.
Corresponds to the boundary box in which the grid will be formed.
If a str is provided, it should be in '(S,W,N,E)' format. With a
GeoDataFrame, we will use the coordinates of the extremities. Also
one can provide a dict with 'south', 'north', 'east', 'west'.
resolution : float, default is 1.
Space between the arbitrary points of resulting grid.
return_coords : bool
If it is wanted to return the coordinate sequences.
"""
bbox_ = parse_bbox(bbox)
b_s, b_w, b_n, b_e = map(float, bbox_[1:-1].split(','))
nlon = int(ceil((b_e-b_w) / (resolution/111.32)))
nlat = int(ceil((b_n-b_s) / (resolution/110.57)))
lonv, latv = meshgrid(linspace(b_w, b_e, nlon), linspace(b_s, b_n, nlat))
gridpoints = pd.DataFrame(vstack([lonv.ravel(), latv.ravel()]).T,
columns=['lon', 'lat'])
gridpoints['geometry'] = gridpoints.apply(lambda x: Point([x['lon'], x['lat']]),
axis=1)
gridpoints = gpd.GeoDataFrame(gridpoints, crs={'init': 'epsg:4326'})
if isinstance(bbox, gpd.GeoDataFrame):
grid_ix = gpd.sjoin(gridpoints, bbox, op='intersects').index.unique()
gridpoints = gridpoints.loc[grid_ix]
if return_coords:
return gridpoints, lonv, latv
return gridpoints | 5,329,901 |
def import_certbot_cert_version(
c, domain_certs_path, certificate_version, server_url_root=DEFAULT_SERVER_ROOT
):
"""
!!! HEY THIS PROBABLY HAPPENS ON UNENCRYPTED TRAFFIC !!!
imports a particular version of a certificate
eg, if a folder has a family of certs numbered like this:
/domain-certs/fullchain1.pem
/domain-certs/fullchain2.pem
/domain-certs/fullchain3.pem
you can import a specific version, for example "3", with this command
usage:
invoke import-certbot-cert-version --server-url-root="http://127.0.0.1:7201/.well-known/admin" --domain-certs-path="/path/to/ssl/archive/example.com" --certificate-version=3
usage:
export PETER_SSLERS_SERVER_ROOT="http://127.0.0.1:7201/.well-known/admin"
invoke import-certbot-cert-version --domain-certs-path="/path/to/ssl/archive/example.com" --certificate-version=3
"""
peter_sslers.commandline.import_certbot_cert_version(
domain_certs_path, certificate_version, server_url_root
)
return | 5,329,902 |
def removeElement_2(nums, val):
"""
Using one loop and two pointers
Don't preserve order
"""
# Remove the elment from the list
i = 0
j = len(nums) - 1
count = 0
while i < j:
if nums[i] == val:
while j > i and nums[j] == val:
j -= 1
print('i:', i, 'j:', j)
# swap elements
temp = nums[i]
nums[i] = nums[j]
nums[j] = temp
count += 1
print(nums)
i += 1
if count == 0:
j = j + 1
return j | 5,329,903 |
def decrypt_ballot_shares(
request: DecryptBallotSharesRequest = Body(...),
scheduler: Scheduler = Depends(get_scheduler),
) -> DecryptBallotSharesResponse:
"""
Decrypt this guardian's share of one or more ballots
"""
ballots = [
SubmittedBallot.from_json_object(ballot) for ballot in request.encrypted_ballots
]
context = CiphertextElectionContext.from_json_object(request.context)
election_key_pair = read_json_object(
request.guardian.election_keys, ElectionKeyPair
)
shares = [
compute_decryption_share_for_ballot(
election_key_pair, ballot, context, scheduler
)
for ballot in ballots
]
response = DecryptBallotSharesResponse(
shares=[write_json_object(share) for share in shares]
)
return response | 5,329,904 |
def returnHumidity(dd):
""" Returns humidity data if it exists in the dictionary"""
rh = []
if 'RH' in dd:
rh = dd['RH']
elif 'RH1' in dd:
rh = dd['RH1']
else:
# Convert the dew point temperature to relative humidity
Pmb = dd['airpres']/10 # hPa to mb
rh = airsea.relHumFromTdew(dd['airtemp'],dd['airdewpoint'],Pmb)
return rh | 5,329,905 |
def fixture_penn_chime_raw_df_no_beta(penn_chime_setup) -> DataFrame:
"""Runs penn_chime SIR model for no social policies
"""
p, simsir = penn_chime_setup
n_days = simsir.raw_df.day.max() - simsir.raw_df.day.min()
policies = [(simsir.beta, n_days)]
raw = sim_sir(
simsir.susceptible,
simsir.infected,
p.recovered,
simsir.gamma,
-simsir.i_day,
policies,
)
calculate_dispositions(raw, simsir.rates, market_share=p.market_share)
calculate_admits(raw, simsir.rates)
calculate_census(raw, simsir.days)
raw_df = DataFrame(raw)
return raw_df | 5,329,906 |
def _verify_manifest_signature(manifest, text, digest):
"""
Verify the manifest digest and signature
"""
format_length = None
format_tail = None
if 'signatures' in manifest:
for sig in manifest['signatures']:
protected_json = _jose_decode_base64(sig['protected'])
protected = json.loads(protected_json)
curr_tail = _jose_decode_base64(protected['formatTail'])
if format_tail is None:
format_tail = curr_tail
elif format_tail != curr_tail:
msg = 'formatTail did not match between signature blocks'
raise ValueError(msg)
if format_length is None:
format_length = protected['formatLength']
elif format_length != protected['formatLength']:
msg = 'formatLen did not match between signature blocks'
raise ValueError(msg)
message = text[0:format_length] + format_tail
if hashlib.sha256(message).hexdigest() != digest:
msg = 'Failed to match manifest digest to downloaded content'
raise ValueError(msg)
return True | 5,329,907 |
def create_hierarchy(
src_assets: List[Asset],
dst_assets: List[Asset],
project_src: str,
runtime: int,
client: CogniteClient,
subtree_ids: Optional[List[int]] = None,
subtree_external_ids: Optional[List[str]] = None,
subtree_max_depth: Optional[int] = None,
):
"""
Creates/updates the asset hierarchy in batches by depth, starting with the root assets and then moving on to the
children of those roots, etc.
Args:
src_assets: A list of the assets that are in the source.
dst_assets: A list of the assets that are in the destination.
project_src: The name of the project the object is being replicated from.
runtime: The timestamp to be used in the new replicated metadata.
client: The client corresponding to the destination project.
subtree_ids: The id of the subtree root to replicate,
subtree_external_ids: The external id of the subtree root to replicate,
subtree_max_depth: The maximum tree depth to replicate,
"""
depth = 0
parents = [None] # root nodes parent id is None
if subtree_ids is not None or subtree_external_ids is not None:
unlink_subtree_parents(src_assets, subtree_ids, subtree_external_ids)
children = find_children(src_assets, parents)
src_dst_ids: Dict[int, int] = {}
src_id_dst_asset = replication.make_id_object_map(dst_assets)
while children:
logging.info(f"Starting depth {depth}, with {len(children)} assets.")
create_assets, update_assets, unchanged_assets = replication.make_objects_batch(
children,
src_id_dst_asset,
src_dst_ids,
build_asset_create,
build_asset_update,
project_src,
runtime,
depth=depth,
)
logging.info(f"Attempting to create {len(create_assets)} assets.")
created_assets = replication.retry(client.assets.create, create_assets)
logging.info(f"Attempting to update {len(update_assets)} assets.")
updated_assets = replication.retry(client.assets.update, update_assets)
src_dst_ids = replication.existing_mapping(*created_assets, *updated_assets, *unchanged_assets, ids=src_dst_ids)
logging.debug(f"Dictionary of current asset mappings: {src_dst_ids}")
num_assets = len(created_assets) + len(updated_assets)
logging.info(
f"Finished depth {depth}, updated {len(updated_assets)} and "
f"posted {len(created_assets)} assets (total of {num_assets} assets)."
)
depth += 1
if subtree_max_depth is not None and depth > subtree_max_depth:
logging.info("Reached max depth")
break
children = find_children(src_assets, children)
return src_dst_ids | 5,329,908 |
def build_and_save_entity_inputs(
save_entity_dataset_name,
X_entity_storage,
data_config,
dataset_threads,
tokenizer,
entity_symbols,
):
"""Create entity features.
Args:
save_entity_dataset_name: memmap filename to save the entity data
X_entity_storage: storage type for memmap file
data_config: data config
dataset_threads: number of threads
tokenizer: tokenizer
entity_symbols: entity symbols
"""
num_processes = min(dataset_threads, int(0.8 * multiprocessing.cpu_count()))
# IMPORTANT: for distributed writing to memmap files, you must create them in w+
# mode before being opened in r+ mode by workers
memfile = np.memmap(
save_entity_dataset_name,
dtype=X_entity_storage,
mode="w+",
shape=(entity_symbols.num_entities_with_pad_and_nocand,),
order="C",
)
# We'll use the -1 to check that things were written correctly later because at
# the end, there should be no -1
memfile["entity_token_type_ids"][:] = -1
# The memfile corresponds to eids. As eid 0 and -1 are reserved for UNK/PAD
# we need to set the values. These get a single [SEP] for title [SEP] rest of entity
empty_ent = tokenizer(
"[SEP]",
padding="max_length",
add_special_tokens=True,
truncation=True,
max_length=data_config.max_ent_len,
)
memfile["entity_input_ids"][0] = empty_ent["input_ids"][:]
memfile["entity_token_type_ids"][0] = empty_ent["token_type_ids"][:]
memfile["entity_attention_mask"][0] = empty_ent["attention_mask"][:]
memfile["entity_to_mask"][0] = [0 for _ in range(len(empty_ent["input_ids"]))]
memfile["entity_input_ids"][-1] = empty_ent["input_ids"][:]
memfile["entity_token_type_ids"][-1] = empty_ent["token_type_ids"][:]
memfile["entity_attention_mask"][-1] = empty_ent["attention_mask"][:]
memfile["entity_to_mask"][-1] = [0 for _ in range(len(empty_ent["input_ids"]))]
constants = {
"train_in_candidates": data_config.train_in_candidates,
"max_ent_len": data_config.max_ent_len,
"max_ent_type_len": data_config.entity_type_data.max_ent_type_len,
"max_ent_kg_len": data_config.entity_kg_data.max_ent_kg_len,
"use_types": data_config.entity_type_data.use_entity_types,
"use_kg": data_config.entity_kg_data.use_entity_kg,
"use_desc": data_config.use_entity_desc,
"print_examples_prep": data_config.print_examples_prep,
}
if num_processes == 1:
if data_config.entity_type_data.use_entity_types:
type_symbols = TypeSymbols.load_from_cache(
load_dir=os.path.join(
data_config.entity_dir,
data_config.entity_type_data.type_symbols_dir,
)
)
else:
type_symbols = None
if data_config.entity_kg_data.use_entity_kg:
kg_symbols = KGSymbols.load_from_cache(
load_dir=os.path.join(
data_config.entity_dir, data_config.entity_kg_data.kg_symbols_dir
)
)
else:
kg_symbols = None
input_qids = list(entity_symbols.get_all_qids())
num_qids, overflowed = build_and_save_entity_inputs_single(
input_qids,
constants,
memfile,
type_symbols,
kg_symbols,
tokenizer,
entity_symbols,
)
else:
input_qids = list(entity_symbols.get_all_qids())
chunk_size = int(np.ceil(len(input_qids) / num_processes))
input_chunks = [
input_qids[i : i + chunk_size]
for i in range(0, len(input_qids), chunk_size)
]
log_rank_0_debug(logger, f"Starting pool with {num_processes} processes")
pool = multiprocessing.Pool(
processes=num_processes,
initializer=build_and_save_entity_inputs_initializer,
initargs=[
constants,
data_config,
save_entity_dataset_name,
X_entity_storage,
tokenizer,
],
)
cnt = 0
overflowed = 0
for res in tqdm(
pool.imap_unordered(
build_and_save_entity_inputs_hlp, input_chunks, chunksize=1
),
total=len(input_chunks),
desc="Building entity data",
):
c, overfl = res
cnt += c
overflowed += overfl
pool.close()
log_rank_0_debug(
logger,
f"{overflowed} out of {len(input_qids)} were overflowed",
)
memfile = np.memmap(save_entity_dataset_name, dtype=X_entity_storage, mode="r")
for i in tqdm(
range(entity_symbols.num_entities_with_pad_and_nocand),
desc="Verifying entity data",
):
assert all(memfile["entity_token_type_ids"][i] != -1), f"Memfile at {i} is -1."
memfile = None
return | 5,329,909 |
def ETL_work():
""" ETL page"""
return render_template("ETL_work.html") | 5,329,910 |
def find_by_key(data, target):
"""Search for target values in nested dict"""
for key, value in data.items():
if isinstance(value, dict):
yield from find_by_key(value, target)
elif key == target:
yield value | 5,329,911 |
async def test_setup_error_no_station(get_fuel_prices, hass):
"""Test the setup with specified station not existing."""
with assert_setup_component(2, sensor.DOMAIN):
assert await async_setup_component(
hass,
sensor.DOMAIN,
{
"sensor": [
{
"platform": "nsw_fuel_station",
"station_id": 350,
"fuel_types": ["E10"],
},
{
"platform": "nsw_fuel_station",
"station_id": 351,
"fuel_types": ["P95"],
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get("sensor.my_fake_station_e10") is not None
assert hass.states.get("sensor.my_fake_station_p95") is None | 5,329,912 |
def advanced_perm_check_function(*rules_sets, restrictions=None):
"""
Check channels and permissions, use -s -sudo or -a -admin to run it.
Args:
*rules_sets: list of rules, 1d or 2d,
restrictions: Restrictions must be always met
Returns:
message object returned by calling given function with given params
"""
def decorator(coro):
async def f(ctx, *args, **kwargs):
valid = _check_advanced_perm(ctx,
*args,
**kwargs,
rule_sets=[*rules_sets], restrictions=restrictions)
if valid:
output = await coro(ctx, *args, **kwargs)
return output
else:
# logger.error(f"Permission check failed! Exceptions should be raised earlier!")
raise CommandError("Permission check failed.")
f.__name__ = coro.__name__
f.__doc__ = coro.__doc__
return f
return decorator | 5,329,913 |
def worker_id():
"""Return a predefined worker ID.
Returns:
int: The static work id
"""
return 123 | 5,329,914 |
def get_urls(spec):
"""Small convenience method to construct the URLs of the Jupyter server."""
host_url = f"http{'s' if spec['routing']['tls']['enabled'] else ''}://{spec['routing']['host']}"
full_url = urljoin(
host_url,
spec["routing"]["path"].rstrip("/"),
)
return host_url, full_url | 5,329,915 |
def resubs(resubpairs,target):
"""takes several regex find replace pairs [(find1, replace1), (find2,replace2), ... ]
and applies them to a target on the order given"""
return resubpair[0].sub(resubpair[1],target)
for resubpair in resubpairs:
target = resub(resubpair,target)
return target | 5,329,916 |
def time_profile(func):
"""Time Profiled for optimisation
Notes:
* Do not use this in production
"""
@functools.wraps(func)
def profile(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
print(f"{func.__name__} : {time.time() - start}")
return result
return profile | 5,329,917 |
def visualize_net_layers(model_path, weights_path, snapshot_iter, out_dir_path):
"""
:param: model_path is the full path of a mode.deploy_prototxt
:param: weights_path is the full path of caffenet weights or some snapshot
:type mode: Mode
"""
makedirs_ok(out_dir_path)
caffe = import_caffe()
net = caffe.Net(model_path, # defines the structure of the model (deploy....)
caffe.TEST,
weights=weights_path
)
filters = net.params['conv1'][0].data
vis_square(filters.transpose(0, 2, 3, 1), filename=path.join(out_dir_path, 'conv1_squares_{}.png'.format(snapshot_iter)))
#plt.show()
feat = net.blobs['conv1'].data[0, :36]
vis_square(feat, padval=1, filename=path.join(out_dir_path, 'conv1_squares_closeup_{}.png'.format(snapshot_iter)))
#plt.show()
filters = net.params['conv2'][0].data
vis_square(filters[:48].reshape(48**2, 5, 5), filename=path.join(out_dir_path, 'conv2_squares_{}.png'.format(snapshot_iter)))
#plt.show()
feat = net.blobs['conv2'].data[0, 100:136]
vis_square(feat, padval=1, filename=path.join(out_dir_path, 'conv2_squares_closeup_{}.png'.format(snapshot_iter)))
#plt.show()
feat = net.blobs['conv3'].data[0]
vis_square(feat, padval=0.5, filename=path.join(out_dir_path, 'conv3_squares_{}.png'.format(snapshot_iter)))
#plt.show()
feat = net.blobs['conv4'].data[0]
vis_square(feat, padval=0.5, filename=path.join(out_dir_path, 'conv4_squares_{}.png'.format(snapshot_iter)))
#plt.show()
feat = net.blobs['conv5'].data[0]
vis_square(feat, padval=0.5, filename=path.join(out_dir_path, 'conv5_squares_{}.png'.format(snapshot_iter)))
#plt.show()
feat = net.blobs['pool5'].data[0]
vis_square(feat, padval=1, filename=path.join(out_dir_path, 'pool5_squares_{}.png'.format(snapshot_iter)))
#plt.show()
plt.figure(figsize=(10,10))
plt.axis('off')
feat = net.blobs['fc6'].data[0]
plt.subplot(2, 1, 1)
plt.plot(feat.flat)
plt.subplot(2, 1, 2)
_ = plt.hist(feat.flat[feat.flat > 0], bins=100)
plt.savefig(path.join(out_dir_path, 'fc6_{}.png'.format(snapshot_iter)), bbox_inches='tight', pad_inches=0)
plt.close()
#plt.show()
plt.figure(figsize=(10,10))
plt.axis('off')
feat = net.blobs['fc7'].data[0]
plt.subplot(2, 1, 1)
plt.plot(feat.flat)
plt.subplot(2, 1, 2)
_ = plt.hist(feat.flat[feat.flat > 0], bins=100)
plt.savefig(path.join(out_dir_path, 'fc7_{}.png'.format(snapshot_iter)), bbox_inches='tight', pad_inches=0)
plt.close() | 5,329,918 |
async def create_guid(data: GuidIn) -> Type[GuidOut]:
"""
Create a record w/o specifying a guid.
Also cleans up expired records & caches the new record.
"""
guid = uuid.uuid4().hex
validated = data.dict()
try:
await create_guid_record(guid, validated['name'], validated['expire'])
except Exception as detail:
raise HTTPException(status_code=400, detail=f'{detail}')
# Build serialized response
out = GuidOut(
id=guid,
expire=validated['expire'],
name=validated['name'],
)
# Cache stuff
ttl = validated['expire'] - datetime.now(timezone.utc)
await cache.set(guid, out, ttl=ttl.seconds)
return out | 5,329,919 |
def exp_for(iterable, filename, display = False):
"""
Run an experiment for words in given iterable and save its
results to PATH_RESULTS/filename. If display is set to True, also
print output to screen.
The output is formated as follows:
[word]RESULT_SEP[size]RESULT_SEP[n+x]RESULT_SEP[diff with upperbound]
"""
fp = open(os.path.join(PATH_RESULTS, 'mdfa_'+filename), 'w')
i = 0
for word, size in results_for(iterable):
output = '%s%s%d%s%d+%d%s%d' % (word, RESULT_SEP,
size, RESULT_SEP, len(word), size - len(word),
RESULT_SEP, size - (len(word) + len(word) / 2))
fp.write(output+'\n')
if display:
print output
i += 1
fp.close()
return i | 5,329,920 |
def arg_parse():
"""
Parse arguments to the detect module
"""
parser = argparse.ArgumentParser(description='YOLO v3 Detection Module')
parser.add_argument("--bs", dest="bs", help="Batch size", default=1)
parser.add_argument("--confidence", dest="confidence", help="Object Confidence to filter predictions", default=0.5)
parser.add_argument("--nms_thresh", dest="nms_thresh", help="NMS Threshhold", default=0.4)
parser.add_argument("--cfg", dest = 'cfgfile', help=
"Config file",
default="cfg/yolov3.cfg", type=str)
parser.add_argument("--weights", dest='weightsfile', help=
"weightsfile",
default="cfg/yolov3.weights", type=str)
parser.add_argument("--reso", dest='reso', help=
"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed",
default="416", type=str)
parser.add_argument("--video", dest="videofile", help="Video file to run detection on", default="videos/drone2.mp4",
type=str)
return parser.parse_args() | 5,329,921 |
def test_me_rec():
""" test_me_rec """
res = rec(10)
assert res == 0 | 5,329,922 |
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Securitas platform."""
alarms = []
if int(hub.config.get(CONF_ALARM, 1)):
for item in hub.Installations:
current_state: CheckAlarmStatus = hub.update_overview(
item, no_throttle=True
)
alarms.append(
SecuritasAlarm(
item, state=current_state, digits=hub.config.get(CONF_CODE_DIGITS)
)
)
add_entities(alarms) | 5,329,923 |
def remove_useless_lines(text):
"""Removes lines that don't contain a word nor a number.
Args:
text (string): markdown text that is going to be processed.
Returns:
string: text once it is processed.
"""
# Useless lines
useless_line_regex = re.compile(r'^[^\w\n]*$', re.MULTILINE | re.UNICODE)
processed_text = useless_line_regex.sub(r'', text)
return processed_text | 5,329,924 |
def get_candidate(word):
"""get candidate word set
@word -- the given word
@return -- a set of candidate words
"""
candidates = set()
candidates |= meanslike(word)
candidates |= senselike(word)
# remove '_' and '-' between words --> candidates is a LIST now
candidates = [w.replace('_', ' ') for w in candidates]
candidates = [w.replace('-', ' ') for w in candidates]
# remove words which contains special characters (e.g. Ann's book)
candidates = [w for w in candidates if ''.join(w.split()).isalpha()]
# remove phrase has more than two words
candidates = [w for w in candidates if len(w.split()) < 3]
# turn all words into lowercase
candidates = [w.lower() for w in candidates]
# remove words contain word itself
candidates = [w for w in candidates if not (word in w)]
return candidates | 5,329,925 |
def _build_proxy_response(response: RawResponse, error_handler: callable) -> dict:
"""Once the application completes the request, maps the results into the format required by
AWS.
"""
try:
if response.caught_exception is not None:
raise response.caught_exception
message = ''.join([str(message) for message in response.result])
except Exception as e:
return error_handler(e)
return {'statusCode': response.response_status.split(' ')[0],
'headers': response.outbound_headers,
'body': message} | 5,329,926 |
def parse_and_execute(config: dict) -> Dict[str, Any]:
"""Validate, parse and transform the config. Execute backends based
on the transformed config to perform I/O operations.
If `prompt` and/or `default` contains a macro, it will be expanded.
:param config: The original config
:type config: dict
:return: A dict containing values collected during I/O operations for the
corresponding key
:rtype: dict
"""
parsed_config = _parse(config)
res: Dict[str, Any] = dict()
for key, value in parsed_config.items():
backend = value.get("backend")
params = value.get("params")
# TODO: Maybe use Jinja2 template rendering instead of macro expansion?
_expand_macro(key=key, params=params, param_key="prompt", inputs=res)
_expand_macro(key=key, params=params, param_key="default", inputs=res)
res[key] = _execute(backend_module_name=backend, **params)
return res | 5,329,927 |
def reduce_any(input_tensor, axis=None, keepdims=None,
name=None, reduction_indices=None):
"""
Wrapper around the tf.reduce_any to handle argument keep_dims
"""
return reduce_function(tf.reduce_any, input_tensor, axis=axis,
keepdims=keepdims, name=name,
reduction_indices=reduction_indices) | 5,329,928 |
def _dict_merge(
dct: MutableMapping[str, Any], merge_dct: MutableMapping[str, Any]
) -> None:
"""Recursive dictionary merge.
Inspired by :meth:``dict.update()``, instead of updating only top-level
keys, dict_merge recurses down into dicts nested to an arbitrary depth,
updating keys. The ``merge_dct`` is merged into ``dct``.
:param dct: dict onto which the merge is executed
:param merge_dct: dct merged into dct
:return: None
"""
for key in merge_dct:
if (
key in dct
and isinstance(dct[key], dict)
and isinstance(merge_dct[key], MutableMapping)
):
_dict_merge(dct[key], merge_dct[key])
else:
dct[key] = merge_dct[key] | 5,329,929 |
def get_customer_profile_ids():
"""get customer profile IDs"""
merchantAuth = apicontractsv1.merchantAuthenticationType()
merchantAuth.name = constants.apiLoginId
merchantAuth.transactionKey = constants.transactionKey
CustomerProfileIdsRequest = apicontractsv1.getCustomerProfileIdsRequest()
CustomerProfileIdsRequest.merchantAuthentication = merchantAuth
CustomerProfileIdsRequest.refId = "Sample"
controller = getCustomerProfileIdsController(CustomerProfileIdsRequest)
controller.execute()
# Work on the response
response = controller.getresponse()
# if (response.messages.resultCode == "Ok"):
# print("Successfully retrieved customer ids:")
# for identity in response.ids.numericString:
# print(identity)
# else:
# print("response code: %s" % response.messages.resultCode)
if response is not None:
if response.messages.resultCode == apicontractsv1.messageTypeEnum.Ok:
if hasattr(response, 'ids'):
if hasattr(response.ids, 'numericString'):
print('Successfully retrieved customer IDs.')
if response.messages is not None:
print('Message Code: %s' % response.messages.message[0]['code'].text)
print('Message Text: %s' % response.messages.message[0]['text'].text)
print('Total Number of IDs Returned in Results: %s'
% len(response.ids.numericString))
print()
# There's no paging options in this API request; the full list is returned every call.
# If the result set is going to be large, for this sample we'll break it down into smaller
# chunks so that we don't put 72,000 lines into a log file
print('First 20 results:')
for profileId in range(0,19):
print(response.ids.numericString[profileId])
else:
if response.messages is not None:
print('Failed to get list.')
print('Code: %s' % (response.messages.message[0]['code'].text))
print('Text: %s' % (response.messages.message[0]['text'].text))
else:
if response.messages is not None:
print('Failed to get list.')
print('Code: %s' % (response.messages.message[0]['code'].text))
print('Text: %s' % (response.messages.message[0]['text'].text))
else:
print('Error. No response received.')
return response | 5,329,930 |
def download_flexibility_data(
es=None, fpath="", sz_path="C:/Program Files/7-Zip/7z.exe"
):
"""Downloads the NREL EFS flexibility data for the specified electrification
scenarios.
:param set/list es: The electrification scenarios that will be downloaded. Can
choose any of: *'Reference'*, *'Medium'*, *'High'*, or *'All'*. Defaults to
None.
:param str fpath: The file path to which the NREL EFS data will be downloaded.
:param str sz_path: The file path on Windows machines that points to the 7-Zip tool.
Defaults to *'C:/Program Files/7-Zip/7z.exe'*.
:raises TypeError: if sz_path is not input as a str.
"""
# Account for the immutable default parameter
if es is None:
es = {"All"}
# Check the inputs
es = _check_electrification_scenarios_for_download(es)
fpath = _check_path(fpath)
if not isinstance(sz_path, str):
raise TypeError("The 7-Zip path must be input as a str.")
# Download each of the specified load profiles
z = {}
for i in es:
# Assign path and file names
zip_name = f"EFS Flexible Load Profiles - {i} Electrification.zip"
url = f"https://data.nrel.gov/system/files/127/{zip_name}"
# Store the data in memory to try extracting with Python's zipfile module
z[i] = _download_data(zip_name, url, fpath)
# Try to extract the .csv file from the .zip file
zf_works = True
for i in es:
# Assign path and file names
zip_name = f"EFS Flexible Load Profiles - {i} Electrification.zip"
csv_name = f"EFSFlexLoadProfiles_{i}.csv"
# Try to extract the .csv file from the .zip file
zf_works = _extract_data(z[i], zf_works, zip_name, csv_name, fpath, sz_path) | 5,329,931 |
def recip(curvelist):
"""
Take the reciprocal of the y values of the curve or list of curves.
>>> curves = pydvif.read('testData.txt')
>>> pydvif.recip(curves[1])
>>> pydvif.create_plot(curves, legend=True, stylename='ggplot')
:param curvelist: The curve or list of curves
:type curvelist: Curve or list
"""
curves = list()
if isinstance(curvelist, list):
curves.extend(curvelist)
else:
curves.append(curvelist)
for c in curves:
c.y = np.reciprocal(c.y) | 5,329,932 |
def compare_alignments_(prediction: List[dict], ground_truth: List[dict], types: List[str]) -> (float, float, float):
"""
Parameters
----------
prediction: List of dictionaries containing the predicted alignments
ground_truth: List of dictionaries containing the ground truth alignments
types: List of alignment types to consider for evaluation (e.g ['match', 'deletion', 'insertion']
Returns
-------
precision, recall, f score
"""
pred_filtered = list(filter(lambda x: x['label'] in types, prediction))
gt_filtered = list(filter(lambda x: x['label'] in types, ground_truth))
filtered_correct = [pred for pred in pred_filtered if pred in gt_filtered]
n_pred_filtered = len(pred_filtered)
n_gt_filtered = len(gt_filtered)
n_correct = len(filtered_correct)
if n_pred_filtered > 0 or n_gt_filtered > 0:
precision = n_correct / n_pred_filtered if n_pred_filtered > 0 else 0.
recall = n_correct / n_gt_filtered if n_gt_filtered > 0 else 0
f_score = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
else:
# no prediction and no ground truth for a given type -> correct alignment
precision, recall, f_score = 1., 1., 1.
return precision, recall, f_score | 5,329,933 |
def shim_unpack(
unpack_fn, # type: TShimmedFunc
download_dir, # type str
tempdir_manager_provider, # type: TShimmedFunc
ireq=None, # type: Optional[Any]
link=None, # type: Optional[Any]
location=None, # type Optional[str],
hashes=None, # type: Optional[Any]
progress_bar="off", # type: str
only_download=None, # type: Optional[bool]
downloader_provider=None, # type: Optional[TShimmedFunc]
session=None, # type: Optional[Any]
verbosity=0, # type: Optional[int]
):
# (...) -> None
"""
Accepts all parameters that have been valid to pass
to :func:`pip._internal.download.unpack_url` and selects or
drops parameters as needed before invoking the provided
callable.
:param unpack_fn: A callable or shim referring to the pip implementation
:type unpack_fn: Callable
:param str download_dir: The directory to download the file to
:param TShimmedFunc tempdir_manager_provider: A callable or shim referring to
`global_tempdir_manager` function from pip or a shimmed no-op context manager
:param Optional[:class:`~pip._internal.req.req_install.InstallRequirement`] ireq:
an Install Requirement instance, defaults to None
:param Optional[:class:`~pip._internal.models.link.Link`] link: A Link instance,
defaults to None.
:param Optional[str] location: A location or source directory if the target is
a VCS url, defaults to None.
:param Optional[Any] hashes: A Hashes instance, defaults to None
:param str progress_bar: Indicates progress par usage during download, defatuls to
off.
:param Optional[bool] only_download: Whether to skip install, defaults to None.
:param Optional[ShimmedPathCollection] downloader_provider: A downloader class
to instantiate, if applicable.
:param Optional[`~requests.Session`] session: A PipSession instance, defaults to
None.
:param Optional[int] verbosity: 1 or 0 to indicate verbosity flag, defaults to 0.
:return: The result of unpacking the url.
:rtype: None
"""
unpack_fn = resolve_possible_shim(unpack_fn)
downloader_provider = resolve_possible_shim(downloader_provider)
tempdir_manager_provider = resolve_possible_shim(tempdir_manager_provider)
required_args = inspect.getargs(unpack_fn.__code__).args # type: ignore
unpack_kwargs = {"download_dir": download_dir}
with tempdir_manager_provider():
if ireq:
if not link and ireq.link:
link = ireq.link
if only_download is None:
only_download = ireq.is_wheel
if hashes is None:
hashes = ireq.hashes(True)
if location is None and getattr(ireq, "source_dir", None):
location = ireq.source_dir
unpack_kwargs.update({"link": link, "location": location})
if hashes is not None and "hashes" in required_args:
unpack_kwargs["hashes"] = hashes
if "progress_bar" in required_args:
unpack_kwargs["progress_bar"] = progress_bar
if only_download is not None and "only_download" in required_args:
unpack_kwargs["only_download"] = only_download
if session is not None and "session" in required_args:
unpack_kwargs["session"] = session
if (
"download" in required_args or "downloader" in required_args
) and downloader_provider is not None:
arg_name = "download" if "download" in required_args else "downloader"
assert session is not None
assert progress_bar is not None
unpack_kwargs[arg_name] = downloader_provider(session, progress_bar)
if "verbosity" in required_args:
unpack_kwargs["verbosity"] = verbosity
return unpack_fn(**unpack_kwargs) | 5,329,934 |
def xavier_uniform(x):
"""Wrapper for torch.nn.init.xavier_uniform method.
Parameters
----------
x : torch.tensor
Input tensor to be initialized. See torch.nn.init.py for more information
Returns
-------
torch.tensor
Initialized tensor
"""
return init.xavier_uniform_(x) | 5,329,935 |
def _get_interpreters_win(): # pyzo_in_leo.py
"""
Monkey-patch pyzo/util/interpreters._get_interpreters_win.
This patched code fixes an apparent pyzo bug.
Unlike shutil.which, this function returns all plausible python executables.
Copyright (C) 2013-2019 by Almar Klein.
"""
import pyzo.util.interpreters as interps ### EKR
found = []
# Query from registry
for v in interps.get_interpreters_in_reg(): ### EKR
found.append(v.installPath() )
# Check common locations
for rootname in ['C:/', '~/',
'C:/program files/', 'C:/program files (x86)/', 'C:/ProgramData/',
'~/appdata/local/programs/python/',
'~/appdata/local/continuum/', '~/appdata/local/anaconda/',
]:
rootname = os.path.expanduser(rootname)
if not os.path.isdir(rootname):
continue
for dname in os.listdir(rootname):
if dname.lower().startswith(('python', 'pypy', 'miniconda', 'anaconda')):
found.append(os.path.join(rootname, dname))
# Normalize all paths, and remove trailing backslashes
### found = [os.path.normcase(os.path.abspath(v)).strip('\\') for v in found]
found = [
os.path.normcase(os.path.abspath(v)).strip('\\') for v in found
if v is not None ### EKR: Add guard.
]
# Append "python.exe" and check if that file exists
found2 = []
for dname in found:
for fname in ('python.exe', 'pypy.exe'):
exename = os.path.join(dname, fname)
if os.path.isfile(exename):
found2.append(exename)
break
# Returnas set (remove duplicates)
return set(found2) | 5,329,936 |
def create_test_data(site, start=None, end="now", interval=5, units='minutes' , val=50, db='test_db', data={}):
"""
data = {'R1':[0,0,0,..],'R2':[0,0,123,12,...]...} will not generate date but use fixed data set
if val is not set random data will be generated if data is not existing
"""
_influx_db_name = db
i = Influx(database=_influx_db_name)
data_point_dates = generate_date_array(start=start, end=end, interval=interval, units=units)
voltage_in = 220
voltage_out = 220
soc = val
R1 = val
R2 = val
R3 = val
R4 = val
R5 = val
count = 0
print "creating %s test data points"%len(data_point_dates)
print "between %s and %s "%(data_point_dates[0],data_point_dates[len(data_point_dates)-1:])
# Simulate Grid outage
for time_val in data_point_dates:
if not val:
try:
soc = data.get('soc',[])[count]
except:
soc = get_random_int()
try:
R1 = data.get('R1',[])[count]
except:
R1 = voltage_in * get_random_binary()
try:
R2 = data.get('R2',[])[count]
except:
R2 = get_random_interval(100,500)
try:
R3 = data.get('R3',[])[count]
except:
R3 = get_random_interval(22,28)
try:
R4 = data.get('R4',[])[count]
except:
R4 = get_random_interval(100,500)
try:
R5 = data.get('R5',[])[count]
except:
R5 = get_random_interval(100,500)
dp = Data_Point.objects.create(
site=site,
soc = soc ,
battery_voltage = R3,
time=time_val,
AC_Voltage_in = R1,
AC_Voltage_out = voltage_out,
AC_input = R4,
AC_output = R5,
AC_output_absolute = R2,
AC_Load_in = R2,
AC_Load_out = R4,
pv_production = R5)
# Also send ton influx
dp_dict = model_to_dict(dp)
dp_dict.pop('time')
dp_dict.pop('inverter_state')
dp_dict.pop('id')
i.send_object_measurements(dp_dict,timestamp=time_val.isoformat(),tags={"site_name":site.site_name})
count = count + 1
# Count number of outages
return len(data_point_dates) | 5,329,937 |
def make_album (artist_name, album_name, number_track = ''):
"""
Storing information in a dictionnary about artist (name, album, nb of
tracks optionnaly)
"""
album = {
'artist_name' : artist_name.title(),
'album_name' : album_name.title(),
}
if number_track:
album['number_track'] = number_track
print(album) | 5,329,938 |
def test_ghz():
"""Generates a GHZ state and checks that the correct correlations (noise reductions) are observed
from the samples
See Eq. 5 of https://advances.sciencemag.org/content/5/5/eaaw4530
"""
# Set up the circuit
np.random.seed(42)
n = 10
vac_modes = 1
copies = 1000
sq_r = 5
alpha = [np.arccos(np.sqrt(1 / (n - i + 1))) if i != n + 1 else 0 for i in range(n + vac_modes)]
alpha[0] = 0.0
phi = [0] * (n + vac_modes)
phi[0] = np.pi / 2
# Measuring X nullifier
theta = [0] * (n + vac_modes)
samples_X = singleloop(sq_r, alpha, phi, theta, copies)
reshaped_samples_X = np.array(samples_X).reshape([copies, n + vac_modes])
# We will check that the x of all the modes equal the x of the last one
nullifier_X = lambda sample: (sample - sample[-1])[vac_modes:-1]
val_nullifier_X = np.var([nullifier_X(x) for x in reshaped_samples_X], axis=0)
assert np.allclose(val_nullifier_X, 2 * np.exp(-2 * sq_r), rtol=5 / np.sqrt(copies))
# Measuring P nullifier
theta = [np.pi / 2] * (n + vac_modes)
samples_P = singleloop(sq_r, alpha, phi, theta, copies)
# We will check that the sum of all the p is equal to zero
reshaped_samples_P = np.array(samples_P).reshape([copies, n + vac_modes])
nullifier_P = lambda sample: np.sum(sample[vac_modes:])
val_nullifier_P = np.var([nullifier_P(p) for p in reshaped_samples_P], axis=0)
assert np.allclose(val_nullifier_P, n * np.exp(-2 * sq_r), rtol=5 / np.sqrt(copies)) | 5,329,939 |
def delete_version(session, package_name, version):
"""
Parameters
----------
session : :class:`sqlalchemy.orm.session.Session`
package_name : str
The NuGet name of the package - the "id" tag in the NuSpec file.
version : str
"""
msg = "db.delete_version({}, {})"
logger.debug(msg.format(package_name, version))
sql = (session.query(Version).join(Package)
.filter(Package.name == package_name)
.filter(Version.version == version)
)
version = sql.one()
pkg = version.package
session.delete(version)
# update the Package.latest_version value, or delete the Package
versions = (session.query(Version)
.join(Package)
.filter(Package.name == package_name)
).all()
if len(versions) > 0:
pkg.latest_version = max(v.version for v in versions)
else:
logger.info("No more versions exist. Deleting package %s" % pkg)
session.delete(pkg)
session.commit() | 5,329,940 |
def Nmin(e, dz, s, a, a_err):
"""Estimates the minimum number of independent structures
to detect a difference in dN/dz w/r to a field value given
by dNdz|field = a +- a_err, at a statistical significance s,
using a redshift path of dz per structure"""
e = np.array(e).astype(float)
dz = np.array(dz).astype(float)
s = np.array(s).astype(float)
a = np.array(a).astype(float)
a_err = np.array(a_err).astype(float)
# this is a analytical expression was derived by N.T.
return (e / dz / a) * (s ** 2) / ((e - 1.) - s * a_err / a) ** 2 | 5,329,941 |
def remove_punctuation(list_of_string, item_to_keep=""):
"""
Remove punctuation from a list of strings.
Parameters
----------
- list_of_string : a dataframe column or variable containing the text stored as a list of string sentences
- item_to_keep : a string of punctuation signs you want to keep in text (e.g., '!?.,:;')
"""
# Update string of punctuation signs
if len(item_to_keep) > 0:
punctuation_list = "".join(
c for c in string.punctuation if c not in item_to_keep
)
else:
punctuation_list = string.punctuation
# Remove punctuation from each sentence
transtable = str.maketrans("", "", punctuation_list)
return [sent.translate(transtable) for sent in list_of_string] | 5,329,942 |
def save_sp500_tickers(force_download=False):
"""Get the S&P 500 tickers from Wikipedia
Parameters
----------
force_download : bool
if True, force redownload of data
Returns
-------
tickers : pandas.DataFrame
The S&P500 tickers data
"""
resp = requests.get('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
soup = bs.BeautifulSoup(resp.text, "lxml")
table = soup.find('table', {'class':'wikitable sortable'})
tickers = []
for row in table.findAll('tr')[1:]:
ticker = row.findAll('td')[1].text
# fix for . and - tickers
mapping = str.maketrans(".","-")
ticker = ticker.translate(mapping)
tickers.append(ticker)
# cache the results for local access
with open("sp500tickers.pickle", "wb") as f:
pickle.dump(tickers, f)
#print(tickers)
return tickers | 5,329,943 |
def get_search_keywords(testcase):
"""Get search keywords for a testcase."""
crash_state_lines = testcase.crash_state.splitlines()
# Use top 2 frames for searching.
return crash_state_lines[:2] | 5,329,944 |
def make_protein_index(proteins):
"""Indexes proteins
"""
prot_index = {}
skip = set(['sp', 'tr', 'gi', 'ref', ''])
for i, p in enumerate(proteins):
accs = p.accession.split('|')
for acc in accs:
if acc in skip:
continue
prot_index[acc] = i
return prot_index | 5,329,945 |
def jvp_solve_Hz(g, Hz, info_dict, eps_vec, source, iterative=False, method=DEFAULT_SOLVER):
""" Gives jvp for solve_Hz with respect to eps_vec """
# construct the system matrix again and the RHS of the gradient expersion
A = make_A_Hz(info_dict, eps_vec)
ux = spdot(info_dict['Dxb'], Hz)
uy = spdot(info_dict['Dyb'], Hz)
diag = sp.spdiags(1 / eps_vec, [0], eps_vec.size, eps_vec.size)
# the g gets multiplied in at the middle of the expression
ux = ux * diag * g * diag
uy = uy * diag * g * diag
ux = spdot(info_dict['Dxf'], ux)
uy = spdot(info_dict['Dyf'], uy)
# add the x and y components and multiply by A_inv on the left
u = (ux + uy)
Hz_for = sparse_solve(A, u, iterative=iterative, method=method)
return 1 / EPSILON_0 * Hz_for | 5,329,946 |
def questions(difficulty):
"""This function branches to the desired difficulty.
It takes a string specifiying the difficulty as
an argument and returns nothing."""
easy_difficulty = 4 #This number means that in easy
medium_difficulty = 6 #difficulty four questions will be asked
hard_difficulty = 8
if difficulty == "easy":
ask(easy_difficulty)
elif difficulty == "medium":
ask(medium_difficulty)
else: # means hard difficulty
ask(hard_difficulty)
return | 5,329,947 |
def encode(X: Union[tf.Tensor, np.ndarray], encoder: keras.Model, **kwargs) -> tf.Tensor:
"""
Encodes the input tensor.
Parameters
----------
X
Input to be encoded.
encoder
Pretrained encoder network.
Returns
-------
Input encoding.
"""
return encoder(X, training=False) | 5,329,948 |
async def test_simulation_is_not_injected_when_not_simulating():
"""
Make sure the simulation is not instantiated when running in non-simulation mode.
"""
with pytest.raises(Exception):
i = await _get_container(False, True, True)
i.get('simulation_runner') | 5,329,949 |
def _raiden_cleanup(request, raiden_apps):
""" Helper to do cleanup a Raiden App. """
def _cleanup():
for app in raiden_apps:
try:
app.stop(leave_channels=False)
except RaidenShuttingDown:
pass
# Two tests in sequence could run a UDP server on the same port, a hanging
# greenlet from the previous tests could send packet to the new server and
# mess things up. Kill all greenlets to make sure that no left-over state
# from a previous test interferes with a new one.
cleanup_tasks()
request.addfinalizer(_cleanup) | 5,329,950 |
def _step2(input):
"""
_step2 - function to apply step2 rules
Inputs:
- input : str
- m : int
Measurement m of c.v.c. sequences
Outputs:
- input : str
"""
# ational -> ate
if input.endswith('ational') and _compute_m(input[:-7]) > 0:
return input[:-1 * len('ational')] + 'ate'
# tional -> tion
elif input.endswith('tional') and _compute_m(input[:-6]) > 0:
return input[:-1*len('tional')] + 'tion'
# enci -> ence
elif input.endswith('enci') and _compute_m(input[:-4]) > 0:
return input[:-1] + 'e'
# anci -> ance
elif input.endswith('anci') and _compute_m(input[:-4]) > 0:
return input[:-1] + 'e'
# izer -> ize
elif input.endswith('izer') and _compute_m(input[:-4]) > 0:
return input[:-1]
# abli -> able
elif input.endswith('abli') and _compute_m(input[:-4]) > 0:
return input[:-1] + 'e'
# alli -> al
elif input.endswith('alli') and _compute_m(input[:-4]) > 0:
return input[:-2]
# entli -> ent
elif input.endswith('entli'):
return input[:-2]
# eli -> e
elif input.endswith('eli') and _compute_m(input[:-3]) > 0:
return input[:-2]
# ousli -> ous
elif input.endswith('ousli') and _compute_m(input[:-5]) > 0:
return input[:-2] + 's'
# ization -> ize
elif input.endswith('ization') and _compute_m(input[:-7]) > 0:
return input[:-5] + 'e'
# ation -> ate
elif input.endswith('ation') and _compute_m(input[:-5]) > 0:
return input[:-3] + 'e'
# ator -> ate
elif input.endswith('ator') and _compute_m(input[:-5]) > 0:
return input[:-2] + 'e'
# alism -> al
elif input.endswith('alism') and _compute_m(input[:-5]) > 0:
return input[:-3]
# iveness -> ive
elif input.endswith('iveness') and _compute_m(input[:-7]) > 0:
return input[:-4]
# fulness -> ful
elif input.endswith('fulness') and _compute_m(input[:-7]) > 0:
return input[:-4]
# ousness -> ous
elif input.endswith('ousness') and _compute_m(input[:-7]) > 0:
return input[:-4]
# aliti -> ali
elif input.endswith('aliti') and _compute_m(input[:-5]) > 0:
return input[:-3]
# iviti -> ive
elif input.endswith('iviti') and _compute_m(input[:-5]) > 0:
return input[:-3] + 'e'
# biliti -> ble
elif input.endswith('biliti') and _compute_m(input[:-6]) > 0:
return input[:-5] + 'le'
return input | 5,329,951 |
def test_check_duplicates():
"""test check duplicates"""
d = {
"col1": [1, 2, 3, 4, 5, 6, 7],
"col2": ["Value 1", "Value 1", "", " ", "Value 5", "Value 6", "Value 7"],
"col4": [1, 2, 3, 4, 4, np.nan, np.nan],
}
df = pd.DataFrame(data=d)
print(df)
dfdupes = check_duplicates(df, ["col1"])
assert dfdupes is None
dfdupes = check_duplicates(df, ["col2"])
assert len(dfdupes) == 2
dfdupes = check_duplicates(df, ["col3"])
assert dfdupes is None
dfdupes = check_duplicates(df, "col2")
assert len(dfdupes) == 2
dfdupes = check_duplicates(df, "col3")
assert dfdupes is None
dfdupes = check_duplicates(df, ["col2"], keep="first")
assert len(dfdupes) == 1
assert dfdupes["col1"].iloc[0] == 1 # the first record is kept
dfdupes = check_duplicates(
df, ["col2"], keep="first", indicator=True, return_non_duplicates=True
)
assert len(dfdupes) == 6
assert dfdupes["_duplicates_keep"].any() == False
dfdupes = check_duplicates(
df, ["col2"], keep=False, indicator=True, return_non_duplicates=False
)
assert len(dfdupes) == 2
assert dfdupes["_duplicates"].all() == True
dfdupes = check_duplicates(
df, ["col2"], keep=False, indicator=False, return_non_duplicates=True
)
assert len(dfdupes) == 7 # we should get all the population
assert sum(dfdupes["_duplicates"]) == 2
# total number of duplicates as we got a keep=False argument
# we still get an indicator column , overriding the indicator=False otherwise what is the point
# note that the col2 has a blank and a space value and gets reported as blanks both, but not
# as duplicates as we are not stripping them for the duplicate count.
# TODO: #26 TBC if we want to strip the blanks as an option, before looking for dupes.
dfdupes = check_duplicates(
df, ["col4"], keep=False, indicator=False, return_non_duplicates=False
)
assert len(dfdupes) == 4 | 5,329,952 |
def _toposort(data):
"""Dependencies are expressed as a dictionary whose keys are items
and whose values are a set of dependent items. Output is a list of
sets in topological order. The first set consists of items with no
dependences, each subsequent set consists of items that depend upon
items in the preceding sets.
"""
# Special case empty input.
if len(data) == 0:
return
# Ignore self dependencies.
for k, v in data.items():
v.discard(k)
# Find all items that don't depend on anything.
extra_items_in_deps = _reduce(set.union, data.values()) - set(data.keys())
# Add empty dependences where needed.
data.update({item: set() for item in extra_items_in_deps})
while True:
ordered = sorted(set(item for item, dep in data.items() if len(dep) == 0))
if not ordered:
break
for item in ordered:
yield item
data.pop(item, None)
for dep in sorted(data.values()):
dep -= set(ordered)
if len(data) != 0:
from ..exceptions import CondaValueError
msg = 'Cyclic dependencies exist among these items: {}'
raise CondaValueError(msg.format(' -> '.join(repr(x) for x in data.keys()))) | 5,329,953 |
def compute_annualized_volatility(srs: pd.Series) -> float:
"""
Annualize sample volatility.
:param srs: series with datetimeindex with `freq`
:return: annualized volatility (stdev)
"""
srs = hdataf.apply_nan_mode(srs, mode="fill_with_zero")
ppy = hdataf.infer_sampling_points_per_year(srs)
std = srs.std()
annualized_volatility = np.sqrt(ppy) * std
annualized_volatility = cast(float, annualized_volatility)
return annualized_volatility | 5,329,954 |
async def on_ready():
"""
on_ready() will display a message when the bot is connected to Discord.
"""
print(f'{bot.user} has successfully connected to Discord') | 5,329,955 |
def json_sanitized(value, stringify=stringified, dt=str, none=False):
"""
Args:
value: Value to sanitize
stringify (callable | None): Function to use to stringify non-builtin types
dt (callable | None): Function to use to stringify dates
none (str | bool): States how to treat `None` keys/values
- string: Replace `None` *keys* with given string (keep `None` *values* as-is)
- False (default): Filter out `None` keys/values
- True: No filtering, keep `None` keys/values as-is
Returns:
An object that should be json serializable
"""
if value is None or is_basetype(value):
return value
if hasattr(value, "to_dict"):
value = value.to_dict()
elif isinstance(value, set):
value = sorted(value)
if isinstance(value, dict):
return dict(
(
json_sanitized(none if k is None and isinstance(none, str) else k, stringify=stringify, dt=dt, none=none),
json_sanitized(v, stringify=stringify, dt=dt, none=none),
)
for k, v in value.items()
if none or (k is not None and v is not None)
)
if is_iterable(value):
return [json_sanitized(v, stringify=stringify, dt=dt, none=none) for v in value]
if isinstance(value, datetime.date):
return value if dt is None else dt(value)
if stringify is None:
return value
return stringify(value) | 5,329,956 |
def rescan_organization_task(task, org, allpr, dry_run, earliest, latest):
"""A bound Celery task to call rescan_organization."""
meta = {"org": org}
task.update_state(state="STARTED", meta=meta)
callback = PaginateCallback(task, meta)
return rescan_organization(org, allpr, dry_run, earliest, latest, page_callback=callback) | 5,329,957 |
def python(cc):
"""Format the character for a Python string."""
codepoint = ord(cc)
if 0x20 <= codepoint <= 0x7f:
return cc
if codepoint > 0xFFFF:
return "\\U%08x" % codepoint
return "\\u%04x" % codepoint | 5,329,958 |
def stop_execution(execution_id):
"""
Stop the current workflow execution.
swagger_from_file: docs/stop.yml
"""
name = execution_id # str | the custom object's name
body = kubernetes.client.V1DeleteOptions() # V1DeleteOptions |
grace_period_seconds = 56 # int | The duration in seconds before the object should be
# deleted. Value must be non-negative integer. The value zero indicates delete immediately.
# If this value is nil, the default grace period for the specified type will be used.
# Defaults to a per object value if not specified. zero means delete immediately. (optional)
orphan_dependents = True # bool | Deprecated: please use the PropagationPolicy, this field
# will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false,
# the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either
# this field or PropagationPolicy may be set, but not both. (optional)
propagation_policy = 'propagation_policy_example' # str | Whether and how garbage collection
# will be performed. Either this field or OrphanDependents may be set, but not both. The
# default policy is decided by the existing finalizer set in the metadata.finalizers and the
# resource-specific default policy. (optional)
try:
api_response = v1alpha1.delete_namespaced_workflow(namespace, name, body=body,
grace_period_seconds=grace_period_seconds,
orphan_dependents=orphan_dependents,
propagation_policy=propagation_policy)
logging.info(api_response)
except ApiException as e:
print("Exception when calling CustomObjectsApi->delete_namespaced_custom_object: %s\n" % e)
return 'Successfully delete', 200 | 5,329,959 |
def reshape_signal_batch(signal):
"""Convert the signal into a standard batch shape for use with cochleagram.py
functions. The first dimension is the batch dimension.
Args:
signal (array): The sound signal (waveform) in the time domain. Should be
either a flattened array with shape (n_samples,), a row vector with shape
(1, n_samples), a column vector with shape (n_samples, 1), or a 2D
matrix of the form [batch, waveform].
Returns:
array:
**out_signal**: If the input `signal` has a valid shape, returns a
2D version of the signal with the first dimension as the batch
dimension.
Raises:
ValueError: Raises an error of the input `signal` has invalid shape.
"""
if signal.ndim == 1: # signal is a flattened array
out_signal = signal.reshape((1, -1))
elif signal.ndim == 2: # signal is a row or column vector
if signal.shape[0] == 1:
out_signal = signal
elif signal.shape[1] == 1:
out_signal = signal.reshape((1, -1))
else: # first dim is batch dim
out_signal = signal
else:
raise ValueError('signal should be flat array, row or column vector, or a 2D matrix with dimensions [batch, waveform]; found %s' % signal.ndim)
return out_signal | 5,329,960 |
def tryallmedoids(dmat, c, weights=None, potential_medoid_inds=None, fuzzy=True, fuzzyParams=('FCM', 2)):
"""Brute force optimization of k-medoids or fuzzy c-medoids clustering.
To apply to points in euclidean space pass dmat using:
dmat = sklearn.neighbors.DistanceMetric.get_metric('euclidean').pairwise(points_array)
Parameters
----------
dmat : array-like of floats, shape (n_samples, n_samples)
Pairwise distance matrix of observations to cluster.
c : int
Number of clusters to form as well as the number of medoids to generate.
weights : array-like of floats, shape (n_samples)
Relative weights for each observation in inertia computation.
potential_medoid_inds : array of indices
If specified, then medoids are constrained to be chosen from this array.
fuzzy : boolean
If True, use fuzzy inertia function,
otherwis use crisp cluster definition.
fuzzyParams : tuple of (method str/int, param)
Method and parameter for computing fuzzy membership matrix.
Returns
-------
medoids : float ndarray with shape (c)
Indices into dmat that indicate optimal medoids.
membership or labels: float ndarray with shape (n_samples, c) or shape (n_samples,)
Each row contains the membership of a point to each of the clusters
OR with hard clusters, the medoid/cluster index of each point."""
if fuzzy:
wdmat = precomputeWeightedDmat(dmat, weights, squared=False)
else:
wdmat = precomputeWeightedDmat(dmat, weights, squared=True)
N = dmat.shape[0]
if potential_medoid_inds is None:
potential_medoid_inds = np.arange(N)
combinations = scipy.misc.comb(len(potential_medoid_inds), c)
if combinations > 1e7:
print("Too many combinations to try: %1.1g > 10M" % combinations)
bestInertia = None
for medInds in itertools.combinations(list(range(len(potential_medoid_inds))), c):
medoids = potential_medoid_inds[np.array(medInds)]
if fuzzy:
membership = computeMembership(dmat, medoids, method=fuzzyParams[0], param=fuzzyParams[1])
else:
membership = np.zeros((N, c))
membership[np.arange(N), np.argmin(dmat[:, medoids], axis=1)] = 1.
inertia = (wdmat[:, medoids] * membership).sum()
if bestInertia is None or inertia < bestInertia:
bestMedoids = medoids
bestInertia = inertia
bestMembership = membership
if not fuzzy:
membership = np.argmax(membership, axis=1)
return medoids, membership | 5,329,961 |
def deleteFactoid(appStore, key, number):
"""
Delete a factoid.
@type appStore: C{axiom.store.Store}
@type key: C{unicode}
@param key: Factoid key
@type number: C{int} or C{None}
@param number: The factoid index to delete or C{None} to delete all
factoids associated with C{key}
"""
factoids = getFactoids(appStore, key)
if number is not None:
factoids = list(factoids)[number]
factoids.deleteFromStore() | 5,329,962 |
def human_timedelta(s: Union[int, float]) -> str:
"""Convert a timedelta from seconds into a string using a more sensible unit.
Args:
s: Amount of seconds
Returns:
A string representing `s` seconds in an easily understandable way
"""
if s >= MONTH_SECONDS:
return f"{round(s / MONTH_SECONDS)} month(s)"
if s >= DAY_SEC:
return f"{round(s / DAY_SEC)} day(s)"
if s >= HOUR_SEC:
return f"{round(s / HOUR_SEC)} hour(s)"
elif s >= _FIVE_MINUTE_SEC:
return f"{round(s / MINUTE_SEC)} minute(s)"
else:
return f"{round(s)} second(s)" | 5,329,963 |
def rate_cell(cell, board, snake, bloom_level=4):
""" rates a cell based on proximity to other snakes, food, the edge of the board, etc """
cells = []
# Get all the cells of "bloom_level" number of circles surrounding the given cell.
for x in range(-bloom_level, bloom_level+1):
for y in range(-bloom_level, bloom_level+1):
division_factor = dist((cell[0]+x, cell[1]+y), cell)
if division_factor == 0:
division_factor = 1
cells.append(((cell[0]+x, cell[1]+y), division_factor))
# EMPTY = 0
# SNAKE = 1
# FOOD = 2
# SPOILED = 3
cell_weightings = [EMPTY_RATING, ENEMY_RATING, FOOD_RATING, SPOILED_RATING, BODY_RATING, OUT_SIDE_BOARD_RATING]
cell_values = []
for m_cell in cells:
weight_key = 5 # Outside the board
if board.inside(m_cell[0]):
weight_key = board.get_cell(m_cell[0])
if m_cell[0] in snake.coords:
weight_key = 4
cell_values.append((weight_key, m_cell[1]))
return reduce(lambda carry, m_cell: carry + cell_weightings[m_cell[0]]/m_cell[1], cell_values, 0) | 5,329,964 |
def clean_tmp_data_from_device(device, remove_page_data=True, remove_pids_data=True):
"""Removes collected pagedata from connected android device
:param remove_page_data: True if page data has to be removed, False if not
:param remove_pids_data: True if pids data has to be removed, False if not
:return: None
"""
page_data_mask = '*_page_data'
pids_file_mask = '*.csv'
# Remove binary data from telephone
try:
if remove_page_data:
exec_command(f'adb -s {device}', 'shell', 'rm', f'/data/local/testing/{page_data_mask}')
if remove_pids_data:
exec_command(f'adb -s {device}', 'shell', 'rm', f'/data/local/testing/{pids_file_mask}')
except CalledProcessError:
pass | 5,329,965 |
def edges_are_same(a, b):
"""
Function to check if two tuple elements (src, tgt, val) correspond
to the same directed edge (src, tgt).
Args:
tuple_elements : a = (src, val, val) and b = (src, val, val)
Returns:
True or False
"""
if a[0:2] == b[0:2]:
return True
else:
return False | 5,329,966 |
def misc3d():
"""Miscellaneous 3D sizes."""
yield from default_length_params("misc3d", lengths['misc3d'], 1) | 5,329,967 |
def conv_mrf(A, B):
"""
:param A: conv kernel 1 x 120 x 180 x 1 (prior)
:param B: input heatmaps: hps.batch_size x 60 x 90 x 1 (likelihood)
:return: C is hps.batch_size x 60 x 90 x 1
"""
B = tf.transpose(B, [1, 2, 3, 0])
B = tf.reverse(B, axis=[0, 1]) # [h, w, 1, b], we flip kernel to get convolution, and not cross-correlation
# conv between 1 x 120 x 180 x 1 and 60 x 90 x 1 x ? => 1 x 61 x 91 x ?
C = tf.nn.conv2d(A, B, strides=[1, 1, 1, 1], padding='VALID') # 1 x 61 x 91 x ?
# C = C[:, :hm_height, :hm_width, :] # 1 x 60 x 90 x ?
C = tf.image.resize_images(C, [hm_height, hm_width])
C = tf.transpose(C, [3, 1, 2, 0])
return C | 5,329,968 |
def get_alert_by_id_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]:
"""Get alert by id and return outputs in Demisto's format
Args:
client: Client object with request
args: Usually demisto.args()
Returns:
Outputs
"""
_id = args.get('id')
headers = argToList(args.get('headers'))
raw_response = client.get_alert_by_id(_id=_id)
if raw_response:
title = f'{INTEGRATION_NAME} - Alert {_id}:'
context_entry = create_context_result(raw_response, ALERTS_TRANS)
context = {
f'{INTEGRATION_CONTEXT_NAME}.Alert(val.ID && val.ID === obj.ID)': context_entry
}
human_readable = tableToMarkdown(title, context_entry, headers=headers, removeNull=True)
return human_readable, context, raw_response
else:
return f'{INTEGRATION_NAME} - Could not find any alerts.', {}, {} | 5,329,969 |
def close_issues() -> list[res.Response]:
"""Batch close issues on GitHub."""
settings = _get_connection_settings(CONFIG_MANAGER.config)
try:
github_service = ghs.GithubService(settings)
except ghs.GithubServiceError as gse:
return [res.ResponseFailure(res.ResponseTypes.RESOURCE_ERROR, gse)]
list_object = "issue"
title_query = p.InquirerPrompter.query_by_title(
CONFIG_MANAGER.config.github_selected_repos, list_object
)
list_request = il.build_list_request(
filters={
"obj__eq": list_object,
"state__eq": "open",
"title__contains": title_query,
}
)
return ghcli.GhCloseIssueUseCase(CONFIG_MANAGER, github_service).execute(
list_request
) | 5,329,970 |
def _invert_lambda(node: tn.Node) -> tn.Node:
"""Invert a diagonal lambda matrix. """
tensor = node.get_tensor()
assert _is_diagonal_matrix(tensor)
diagonal = tensor.diagonal()
return tn.Node(np.diag(1/diagonal)) | 5,329,971 |
def test_with_list_value():
"""Verify that urlencode works with list value."""
d = {'a': {"b": [1, 2, 3]}}
expected = "a[b][]=1&a[b][]=2&a[b][]=3"
assert urllib.unquote(urlencode(d)) == expected | 5,329,972 |
def map_field_name_to_label(form):
"""Takes a form and creates label to field name map.
:param django.forms.Form form: Instance of ``django.forms.Form``.
:return dict:
"""
return dict([(field_name, field.label)
for (field_name, field)
in form.base_fields.items()]) | 5,329,973 |
def teststrip(path1):
"""
Compare the results from serial running to the results from parallel
running to judge whether the results from parallel running is right
or not. The results are gotten by stripe decomposition
Parameters
----------
path1: the path of the folder
Returns
----------
void
"""
print("----------------Test stripe decomposition start-------------------")
serial_Gradient1 = np.loadtxt(path1+'\output_Gradient_100x100.dat')
MPI_Graddient1 = np.loadtxt(path1+'\Grid_MPI_Gradient_100x100.dat')
result1 = judgeSame(serial_Gradient1, MPI_Graddient1)
print("Test Neumann boundry size 100x100: ", result1)
serial_Gradient2 = np.loadtxt(path1 + '\output_Gradient_100x50.dat')
MPI_Graddient2 = np.loadtxt(path1 + '\Strip_MPI_Gradient_100x50.dat')
result2 = judgeSame(serial_Gradient2, MPI_Graddient2)
print("Test Neumann boundry size 100x50: ", result2)
serial_Dir1 = np.loadtxt(path1 + '\output_Dir_100x100.dat')
MPI_Dir1 = np.loadtxt(path1 + '\Strip_MPI_Dir_100x100.dat')
result3 = judgeSame(serial_Dir1, MPI_Dir1)
print("Test Dirichlet boundry size 100x100: ", result3)
serial_Dir2 = np.loadtxt(path1 + '\output_Dir_100x50.dat')
MPI_Dir2 = np.loadtxt(path1 + '\Strip_MPI_Dir_100x50.dat')
result4 = judgeSame(serial_Dir2, MPI_Dir2)
print("Test Dirichlet boundry size 100x50: ", result4)
print("----------------Test stripe decomposition end-------------------") | 5,329,974 |
def solve_compound_rec(
recurrence_func: Callable,
parameter_list: List[float],
std_of_compound_dist: float,
max_mRNA_copy_number: int,
recursion_length: int,
index_compound_parameter: int = 3,
compounding_distribution: str = "normal",
decimal_precision: int = 100,
) -> List[float]:
"""Compound distribution.
Calls solve_compound() to obtain recurrence coefficients h_i and computes probability distribution using invgenfunc()
Arguments:
recurrence_func: the recurrence relation function over which to compound
parameter_list: list of parameters accepted by solFunc
std_of_compound_dist: standard deviation of the compounding distribution
max_mRNA_copy_number: maximal mRNA copy number. The distribution is evaluated for n=0:N-1
recursion_length: recursion length. The number of terms evaluated recursively
index_compound_parameter: index of the parameter over which the solution is compunded
compounding_distribution: string specifying the type of compounding distribution
decimal_precision: integer specifying the precision used by the Decimal class
Returns:
probability distribution for mRNa copy numbers n=0:N-1.
Raises:
AssertionError: distribution given not supported
"""
assert compounding_distribution in ["normal", "lognormal", "gamma"]
H = solve_compound(
recurrence_func,
parameter_list,
std_of_compound_dist,
recursion_length,
index_compound_parameter,
compounding_distribution,
compound_over_recurrence_terms=True,
decimal_precision=decimal_precision,
)
return [
invgenfunc(H, n, precision=decimal_precision)
for n in range(0, max_mRNA_copy_number)
] | 5,329,975 |
def create_brand():
"""
Creates a new brand with the given info
:return: Status of the request
"""
check = check_brand_parameters(request)
if check is not None:
return check
name = request.json[NAME]
brand = Brand.query.filter(Brand.name == name).first()
if brand is not None:
return BadRequestResponse('Existing brand').make()
# retrieve the brand name from the request, create a new Brand object and save it
brand = Brand(name=name)
assign_parameters_to_brand(brand, request)
brand.save()
return DataResponse({RESULTS: brand.to_json()}).make() | 5,329,976 |
def test_trace_parent_wrong_version_255(caplog):
"""Version FF or 255 is explicitly forbidden"""
header = "ff-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-03"
with caplog.at_level("DEBUG", "elasticapm.utils"):
trace_parent = TraceParent.from_string(header)
record = caplog.records[0]
assert trace_parent is None
assert record.message == "Invalid version field, value 255" | 5,329,977 |
def test_draw_arrow_uniform():
"""
Draw arrows using the same radius for all arrows.
The radius is taken is example property here,
to test if uniform values are properly arrayfied.
"""
reset()
draw_arrows([(0,0,0), (1,0,1)], [(1,0,0), (1,0,1)], radius=0.1) | 5,329,978 |
def print_board(board):
"""prints the state of the board"""
print(f'{get_square_representation(board[0][0])} | {get_square_representation(board[0][1])} | {get_square_representation(board[0][2])}\n'
'---------\n'
f'{get_square_representation(board[1][0])} | {get_square_representation(board[1][1])} | {get_square_representation(board[1][2])}\n'
'---------\n'
f'{get_square_representation(board[2][0])} | {get_square_representation(board[2][1])} | {get_square_representation(board[2][2])}') | 5,329,979 |
def purge_reports():
"""Delete expired reports."""
GeneralReport.objects.filter(is_verified=False, date_created__lte=time_threshold(hours=24)).delete() | 5,329,980 |
def update(args):
"""Traverse third-party repos / submodules and emit version-strings"""
failures = []
ver = "///< This file is autogenerated by 'scripts/xnvme_3p.py'\n"
ver += "const char *xnvme_3p_ver[] = {\n"
for project, err in traverse_projects(args):
print("project: %s, success: %r" % (project["name"], not err))
if err:
failures.append(project)
continue
guard = args.guards[project["name"]]
if not guard:
ver += '\t"%s",\n' % (project["ver"])
continue
ver += "#ifdef %s\n" % guard
ver += '\t"%s",\n' % (project["ver"])
ver += "#else\n"
ver += '\t"%s;NOSYS",\n' % (project["name"])
ver += "#endif\n"
ver += "\t0,\t///< For array-termination\n"
ver += "};\n"
if failures:
print("Got failures -- not updating")
return 1
with open(os.path.join(args.repos, "src", "xnvme_3p_ver.c"), "wt") as vfd:
vfd.write(ver)
return 0 | 5,329,981 |
def feature_contained(boundary: geo, **kwargs):
"""Analyse containment for all features within a single-layer vector file according to a Geometry
and return multiple GeoJSON files."""
geom, prop = kwargs["geom"], kwargs["prop"]
if isinstance(geom, geo.Polygon):
prop["valid"] = boundary.contains(geom)
return geom, prop | 5,329,982 |
def compute(n=26):
""" Computes 2 to the power of n and returns elapsed time"""
start = time.time()
res = 0
for i in range(2**n):
res += 1
end = time.time()
dt = end - start
print(f'Result {res} in {dt} seconds!')
return dt | 5,329,983 |
def _real_to_rational(expr, tolerance=None, rational_conversion='base10'):
"""
Replace all reals in expr with rationals.
Examples
========
>>> from sympy import Rational
>>> from sympy.simplify.simplify import _real_to_rational
>>> from sympy.abc import x
>>> _real_to_rational(.76 + .1*x**.5)
sqrt(x)/10 + 19/25
If rational_conversion='base10', this uses the base-10 string. If
rational_conversion='exact', the exact, base-2 representation is used.
>>> _real_to_rational(0.333333333333333, rational_conversion='exact')
6004799503160655/18014398509481984
>>> _real_to_rational(0.333333333333333)
1/3
"""
expr = _sympify(expr)
inf = Float('inf')
p = expr
reps = {}
reduce_num = None
if tolerance is not None and tolerance < 1:
reduce_num = ceiling(1/tolerance)
for fl in p.atoms(Float):
key = fl
if reduce_num is not None:
r = Rational(fl).limit_denominator(reduce_num)
elif (tolerance is not None and tolerance >= 1 and
fl.is_Integer is False):
r = Rational(tolerance*round(fl/tolerance)
).limit_denominator(int(tolerance))
else:
if rational_conversion == 'exact':
r = Rational(fl)
reps[key] = r
continue
elif rational_conversion != 'base10':
raise ValueError("rational_conversion must be 'base10' or 'exact'")
r = nsimplify(fl, rational=False)
# e.g. log(3).n() -> log(3) instead of a Rational
if fl and not r:
r = Rational(fl)
elif not r.is_Rational:
if fl == inf or fl == -inf:
r = S.ComplexInfinity
elif fl < 0:
fl = -fl
d = Pow(10, int((mpmath.log(fl)/mpmath.log(10))))
r = -Rational(str(fl/d))*d
elif fl > 0:
d = Pow(10, int((mpmath.log(fl)/mpmath.log(10))))
r = Rational(str(fl/d))*d
else:
r = Integer(0)
reps[key] = r
return p.subs(reps, simultaneous=True) | 5,329,984 |
def insert_videos(video_lst):
""" id (auto) | video_id | name | views | likes | comments | date"""
crawler = ApiCrawler()
db = DBConnector()
values = []
for video_id in video_lst:
name, view_count, like_count, comment_count, date = crawler.get_stats(video_id)
value = tuple([video_id, name, view_count, like_count, comment_count, date])
values.append(value)
formatted_values = str(values)[1:-1]
db.execute_sql_query(f"INSERT INTO testdatabase.d_videos(video_id, name, views, likes, comments, date) VALUES {formatted_values} ON DUPLICATE KEY UPDATE id=id, name=values(name), views=values(views), comments=values(comments), likes=values(likes), date=values(date)")
print("!Successfully loaded into d_videos!") | 5,329,985 |
def run(block, epsilon, ratio, prng, alpha=2, beta=1.2, gamma=1.0, theta=None, verbose=False):
"""Run HDPView
1st phase, divide blocks.
2nd phase, perturbation.
Prepare parameters and execute HDPView
Args:
block (CountTable): block
epsilon (float): privacy budget
ratio (float): ubdget ratio of block division and perturbation, 0 to 1 value
prng (np.random.RandomState): random state
alpha (float), beta(float), gamma(float)
verbose (bool)
"""
seed = prng.randint(0, 2949672950)
block.set_random(seed)
if verbose:
print("seed: ", seed)
n_dash = block.size()
kappa = np.ceil(np.log2(n_dash)*beta)
epsilon_r = epsilon * ratio
epsilon_p = epsilon * (1 - ratio)
if theta is None:
theta = 1/epsilon_p
epsilon_cut = (1 - gamma) * epsilon_r / kappa
lamb = ((2 * alpha - 1)/(alpha - 1) + 1) * (2 / (gamma * epsilon_r))
delta = lamb*np.log(alpha)
# prepare shared memories for parallelization
manager = multiprocessing.Manager()
block_queue = manager.Queue()
block_queue.put(block)
block_result_list = []
MAX_PROCESS = multiprocessing.cpu_count()-1
pool = multiprocessing.Pool(MAX_PROCESS)
while True:
async_results = []
while not block_queue.empty():
result = pool.apply_async(
recursive_bisection, (block_queue.get(), block_queue, epsilon_cut, kappa, theta, lamb, delta, verbose)
)
async_results.append(result)
results = list(itertools.chain.from_iterable([ r.get() for r in async_results ]))
block_result_list.extend(results)
if block_queue.empty():
break
block_result_list.sort(key=functools.cmp_to_key(range__gt__))
for block_result in block_result_list:
mean, ae = calculate_mean_and_aggregation_error(block, block_result.domain_dict)
block_result.mean = mean
block_result.aggregation_error = ae
pe = prng.laplace(0.0, 1.0 / epsilon_p)
block_result.perturbation_error = pe
return NoisedCountTable.from_count_table(block, block_result_list), block_result_list | 5,329,986 |
def test_qubit_state_bra():
"""Test sum_i alpha_i <i| for TLS"""
i = IdxSym('i')
alpha = IndexedBase('alpha')
alpha_i = alpha[i]
hs_tls = LocalSpace('tls', basis=('g', 'e'))
term = alpha_i * BasisKet(FockIndex(i), hs=hs_tls).dag()
expr = KetIndexedSum.create(
term, IndexOverFockSpace(i, hs=hs_tls))
assert IndexOverFockSpace(i, hs=hs_tls) in expr.ket.args
assert ascii(expr) == "Sum_{i in H_tls} alpha_i * <i|^(tls)"
assert expr.ket.term.free_symbols == set([i, symbols('alpha')])
assert expr.free_symbols == set([symbols('alpha'), ])
assert expr.ket.variables == [i]
assert expr.space == hs_tls
assert len(expr.ket.args) == 2
assert len(expr.ket.operands) == 1
assert expr.ket.args[0] == term.ket
assert expr.ket.term == term.ket
assert len(expr.kwargs) == 0
expr_expand = Bra.create(expr.ket.doit().substitute(
{alpha[0]: alpha['g'], alpha[1]: alpha['e']}))
assert expr_expand == (
alpha['g'] * BasisKet('g', hs=hs_tls).dag() +
alpha['e'] * BasisKet('e', hs=hs_tls).dag())
assert (
ascii(expr_expand) == 'alpha_e * <e|^(tls) + alpha_g * <g|^(tls)') | 5,329,987 |
def wait_for_view(class_name):
"""
Waits for a View matching the specified class. Default timeout is 20 seconds.
:param class_name:the {@link View} class to wait for
:return:{@code true} if the {@link View} is displayed and {@code false} if it is not displayed before the timeout
"""
return get_solo().wait_for_view(class_name) | 5,329,988 |
def test_list_unsigned_int_min_length_4_nistxml_sv_iv_list_unsigned_int_min_length_5_2(mode, save_output, output_format):
"""
Type list/unsignedInt is restricted by facet minLength with value 10.
"""
assert_bindings(
schema="nistData/list/unsignedInt/Schema+Instance/NISTSchema-SV-IV-list-unsignedInt-minLength-5.xsd",
instance="nistData/list/unsignedInt/Schema+Instance/NISTXML-SV-IV-list-unsignedInt-minLength-5-2.xml",
class_name="NistschemaSvIvListUnsignedIntMinLength5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,329,989 |
def clear_session():
"""
Clears all non-login, non-csrf security keys from the session.
NOTE: this must be in the views.py even though it doesn't have
a route, because it needs access to the session object.
"""
required_keys = ['_user_id', '_fresh', '_flashes', '_id', 'csrf_token']
session_keys = [x for x in session.keys() if x not in required_keys]
for key in session_keys:
session.pop(key, None) | 5,329,990 |
def make_v2_environ(event: dict, environ: dict):
"""
Create environ object from HTTP API Gateway event.
Note: This function mutates the incoming eviron object.
"""
qs = event['queryStringParameters']
environ['REQUEST_METHOD'] = event['requestContext']['http']['method']
environ['PATH_INFO'] = event['requestContext']['http']['path']
environ['QUERY_STRING'] = urlencode(qs) if qs else ''
environ['REMOTE_ADDR'] = event['requestContext']['http']['sourceIp']
environ['SERVER_PROTOCOL'] = event['requestContext']['http']['protocol'] | 5,329,991 |
def _create_table():
"""helper for crc calculation"""
table = []
for i in range(256):
k = i
for _ in range(8):
if k & 1:
k = (k >> 1) ^ 0xEDB88320
else:
k >>= 1
table.append(k)
return table | 5,329,992 |
def average_syllables(verses):
"""
Takes a list of verses
Returns the mean number of syllables among input verses
"""
verse_count = len(verses)
syll_counts = list(map(count_syllables, verses))
syll_count = sum(syll_counts)
return syll_count / verse_count | 5,329,993 |
def _create_subplots_if_needed(ntotal,
ncols=None,
default_ncols=1,
fieldorder='C',
avoid_single_column=False,
sharex=False,
sharey=False,
subfigsize=(12,3),
wspace=0.2,
hspace=0.2,
fig=None,
ax=None
):
"""
Auxiliary function to create fig and ax
If fig and ax are None:
- Set nrows and ncols based on ntotal and specified ncols,
accounting for fieldorder and avoid_single_column
- Create fig and ax with nrows and ncols, taking into account
sharex, sharey, subfigsize, wspace, hspace
If fig and ax are not None:
- Try to determine nrows and ncols from ax
- Check whether size of ax corresponds to ntotal
"""
if ax is None:
if not ncols is None:
# Use ncols if specified and appropriate
assert(ntotal%ncols==0), 'Error: Specified number of columns is not a true divisor of total number of subplots'
nrows = int(ntotal/ncols)
else:
# Defaut number of columns
ncols = default_ncols
nrows = int(ntotal/ncols)
if fieldorder=='F':
# Swap number of rows and columns
nrows, ncols = ncols, nrows
if avoid_single_column and ncols==1:
# Swap number of rows and columns
nrows, ncols = ncols, nrows
# Create fig and ax with nrows and ncols
fig,ax = plt.subplots(nrows=nrows,ncols=ncols,sharex=sharex,sharey=sharey,figsize=(subfigsize[0]*ncols,subfigsize[1]*nrows))
# Adjust subplot spacing
fig.subplots_adjust(wspace=wspace,hspace=hspace)
else:
# Make sure user-specified axes has appropriate size
assert(np.asarray(ax).size==ntotal), 'Specified axes does not have the right size'
# Determine nrows and ncols in specified axes
if isinstance(ax,mpl.axes.Axes):
nrows, ncols = (1,1)
else:
try:
nrows,ncols = np.asarray(ax).shape
except ValueError:
# ax array has only one dimension
# Determine whether ax is single row or single column based
# on individual ax positions x0 and y0
x0s = [axi.get_position().x0 for axi in ax]
y0s = [axi.get_position().y0 for axi in ax]
if all(x0==x0s[0] for x0 in x0s):
# All axis have same relative x0 position
nrows = np.asarray(ax).size
ncols = 1
elif all(y0==y0s[0] for y0 in y0s):
# All axis have same relative y0 position
nrows = 1
ncols = np.asarray(ax).size
else:
# More complex axes configuration,
# currently not supported
raise InputError('could not determine nrows and ncols in specified axes, complex axes configuration currently not supported')
return fig, ax, nrows, ncols | 5,329,994 |
def template_file_counter(session, templates, fetch_count=False):
"""Create template file counter."""
file_counts = {}
default_count = None
if fetch_count:
file_counts = TemplatesDAO.query_file_counts(session=session, templates=templates)
default_count = 0
def counter(template: Template) -> int:
"""Get matched files count for the template."""
return file_counts.get(template.id, default_count)
return counter | 5,329,995 |
def print_json(raw_output):
"""Format raw_output as json, print it and exit"""
print(json.dumps(raw_output))
sys.exit(0) | 5,329,996 |
def _init_matrices_nw(aln1, aln2, gap_open_penalty, gap_extend_penalty):
"""initialize score matrix and traceback matrix for global alignment
Parameters
----------
aln1 : list
list of activities, which is the first sequence to be aligned
aln2 : list
list of activities, which is the second sequence to be aligned
gap_open_penalty : int
gap_extend_penalty : int
Returns
-------
score_matrix: matrix
traceback_matrix: matrix
"""
shape = (len(aln2)+1, len(aln1)+1)
score_matrix = np.zeros(shape)
traceback_matrix = np.zeros(shape, dtype=np.int)
traceback_matrix += _traceback_encoding['uninitialized']
traceback_matrix[0, 0] = _traceback_encoding['alignment-end']
# cache some values for quicker access
vgap = _traceback_encoding['vertical-gap']
hgap = _traceback_encoding['horizontal-gap']
for i in range(1, shape[0]):
score_matrix[i, 0] = +gap_open_penalty + ((i-1) * gap_extend_penalty)
traceback_matrix[i, 0] = vgap
for i in range(1, shape[1]):
score_matrix[0, i] = +gap_open_penalty + ((i-1) * gap_extend_penalty)
traceback_matrix[0, i] = hgap
return score_matrix, traceback_matrix | 5,329,997 |
def parse_path_params(end_point_path):
"""Parse path parameters."""
numeric_item_types = ['Lnn', 'Zone', 'Port', 'Lin']
params = []
for partial_path in end_point_path.split('/'):
if (not partial_path or partial_path[0] != '<' or
partial_path[-1] != '>'):
continue
# remove all non alphanumeric characters
param_name = re.sub('[^0-9a-zA-Z]+', '', partial_path.title())
if param_name in numeric_item_types:
param_type = 'integer'
else:
param_type = 'string'
params.append((param_name, param_type))
return params | 5,329,998 |
def read_pid_stat(pid="self"):
"""
Returns system process stat information.
:param pid: The process ID.
:returns: The system stat information.
:rtype: dict
"""
with open("/proc/%s/stat" % (pid,), "rb") as f:
stat = f.readline().split()
return {
"utime": int(stat[13]),
"stime": int(stat[14]),
"cutime": int(stat[15]),
"cstime": int(stat[16]),
} | 5,329,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.