content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def puzzles():
"""
Pick one of the TOP95 puzzle strings
"""
return [l for l in TOP95.split("\n") if l]
|
def2fefe114fe2867f2d465dbe4b55ae74287e09
| 3,639,700
|
from datetime import datetime
def test_declarative_sfc_obs_full(ccrs):
"""Test making a full surface observation plot."""
data = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False),
infer_datetime_format=True, parse_dates=['valid'])
obs = PlotObs()
obs.data = data
obs.time = datetime(1993, 3, 12, 13)
obs.time_window = timedelta(minutes=15)
obs.level = None
obs.fields = ['tmpf', 'dwpf', 'emsl', 'cloud_cover', 'wxsym']
obs.locations = ['NW', 'SW', 'NE', 'C', 'W']
obs.colors = ['red', 'green', 'black', 'black', 'blue']
obs.formats = [None, None, lambda v: format(10 * v, '.0f')[-3:], 'sky_cover',
'current_weather']
obs.vector_field = ('uwind', 'vwind')
obs.reduce_points = 1
# Panel for plot with Map features
panel = MapPanel()
panel.layout = (1, 1, 1)
panel.area = (-124, -72, 20, 53)
panel.area = 'il'
panel.projection = ccrs.PlateCarree()
panel.layers = ['coastline', 'borders', 'states']
panel.plots = [obs]
# Bringing it all together
pc = PanelContainer()
pc.size = (10, 10)
pc.panels = [panel]
pc.draw()
return pc.figure
|
780b4462ba01ddcd20a1e87ef8637ca174293af8
| 3,639,701
|
def standardize_ants_data(ants_data, subject_ID_col):
""" Takes df from ANTs output and stadardizes column names for both left and right hemi
"""
ants_useful_cols = ['Structure Name']
ants_to_std_naming_dict = {}
ants_to_std_naming_dict['Structure Name'] = subject_ID_col #'SubjID'
for roi in ants_data.columns:
prefix = None
name_split = roi.split(' ')
if name_split[0] == 'left':
prefix = 'L'
if name_split[0] == 'right':
prefix = 'R'
if prefix is not None:
ants_useful_cols.append(roi)
std_name = prefix + '_' + ''.join(name_split[1:])
ants_to_std_naming_dict[roi] = std_name
ants_data_std = ants_data[ants_useful_cols].copy()
ants_data_std = ants_data_std.rename(columns=ants_to_std_naming_dict)
# Splitting SubjID column to ignore site name
_, ants_data_std[subject_ID_col] = ants_data_std[subject_ID_col].str.rsplit('_', 1).str
return ants_data_std
|
0f5216fd75244b0b9b60fdcdf05d63bfd02a2ed9
| 3,639,702
|
def make_gridpoints(bbox, resolution=1, return_coords=False):
"""It constructs a grid of points regularly spaced.
Parameters
----------
bbox : str, GeoDataFrame or dict.
Corresponds to the boundary box in which the grid will be formed.
If a str is provided, it should be in '(S,W,N,E)' format. With a
GeoDataFrame, we will use the coordinates of the extremities. Also
one can provide a dict with 'south', 'north', 'east', 'west'.
resolution : float, default is 1.
Space between the arbitrary points of resulting grid.
return_coords : bool
If it is wanted to return the coordinate sequences.
"""
bbox_ = parse_bbox(bbox)
b_s, b_w, b_n, b_e = map(float, bbox_[1:-1].split(','))
nlon = int(ceil((b_e-b_w) / (resolution/111.32)))
nlat = int(ceil((b_n-b_s) / (resolution/110.57)))
lonv, latv = meshgrid(linspace(b_w, b_e, nlon), linspace(b_s, b_n, nlat))
gridpoints = pd.DataFrame(vstack([lonv.ravel(), latv.ravel()]).T,
columns=['lon', 'lat'])
gridpoints['geometry'] = gridpoints.apply(lambda x: Point([x['lon'], x['lat']]),
axis=1)
gridpoints = gpd.GeoDataFrame(gridpoints, crs={'init': 'epsg:4326'})
if isinstance(bbox, gpd.GeoDataFrame):
grid_ix = gpd.sjoin(gridpoints, bbox, op='intersects').index.unique()
gridpoints = gridpoints.loc[grid_ix]
if return_coords:
return gridpoints, lonv, latv
return gridpoints
|
8ccc5b257666cb8bd3a87662c6b021b4ed49ccb9
| 3,639,703
|
def removeElement_2(nums, val):
"""
Using one loop and two pointers
Don't preserve order
"""
# Remove the elment from the list
i = 0
j = len(nums) - 1
count = 0
while i < j:
if nums[i] == val:
while j > i and nums[j] == val:
j -= 1
print('i:', i, 'j:', j)
# swap elements
temp = nums[i]
nums[i] = nums[j]
nums[j] = temp
count += 1
print(nums)
i += 1
if count == 0:
j = j + 1
return j
|
e1a836514a09fc925a49b144880960b057dfff80
| 3,639,704
|
def decrypt_ballot_shares(
request: DecryptBallotSharesRequest = Body(...),
scheduler: Scheduler = Depends(get_scheduler),
) -> DecryptBallotSharesResponse:
"""
Decrypt this guardian's share of one or more ballots
"""
ballots = [
SubmittedBallot.from_json_object(ballot) for ballot in request.encrypted_ballots
]
context = CiphertextElectionContext.from_json_object(request.context)
election_key_pair = read_json_object(
request.guardian.election_keys, ElectionKeyPair
)
shares = [
compute_decryption_share_for_ballot(
election_key_pair, ballot, context, scheduler
)
for ballot in ballots
]
response = DecryptBallotSharesResponse(
shares=[write_json_object(share) for share in shares]
)
return response
|
c10b9961c2f86e9d9cf26d75e276cd65b3dcfdc4
| 3,639,705
|
def returnHumidity(dd):
""" Returns humidity data if it exists in the dictionary"""
rh = []
if 'RH' in dd:
rh = dd['RH']
elif 'RH1' in dd:
rh = dd['RH1']
else:
# Convert the dew point temperature to relative humidity
Pmb = dd['airpres']/10 # hPa to mb
rh = airsea.relHumFromTdew(dd['airtemp'],dd['airdewpoint'],Pmb)
return rh
|
b51d5d23247780683d9d644f59e442b1c77210e8
| 3,639,706
|
def fixture_penn_chime_raw_df_no_beta(penn_chime_setup) -> DataFrame:
"""Runs penn_chime SIR model for no social policies
"""
p, simsir = penn_chime_setup
n_days = simsir.raw_df.day.max() - simsir.raw_df.day.min()
policies = [(simsir.beta, n_days)]
raw = sim_sir(
simsir.susceptible,
simsir.infected,
p.recovered,
simsir.gamma,
-simsir.i_day,
policies,
)
calculate_dispositions(raw, simsir.rates, market_share=p.market_share)
calculate_admits(raw, simsir.rates)
calculate_census(raw, simsir.days)
raw_df = DataFrame(raw)
return raw_df
|
8e1d654b4e171e8ab55023bfb55135d5067d7052
| 3,639,707
|
import json
import hashlib
def _verify_manifest_signature(manifest, text, digest):
"""
Verify the manifest digest and signature
"""
format_length = None
format_tail = None
if 'signatures' in manifest:
for sig in manifest['signatures']:
protected_json = _jose_decode_base64(sig['protected'])
protected = json.loads(protected_json)
curr_tail = _jose_decode_base64(protected['formatTail'])
if format_tail is None:
format_tail = curr_tail
elif format_tail != curr_tail:
msg = 'formatTail did not match between signature blocks'
raise ValueError(msg)
if format_length is None:
format_length = protected['formatLength']
elif format_length != protected['formatLength']:
msg = 'formatLen did not match between signature blocks'
raise ValueError(msg)
message = text[0:format_length] + format_tail
if hashlib.sha256(message).hexdigest() != digest:
msg = 'Failed to match manifest digest to downloaded content'
raise ValueError(msg)
return True
|
d3c5cebcb6f63723d7356be8def0824bb3cd2726
| 3,639,708
|
from typing import List
from typing import Optional
from typing import Dict
import logging
def create_hierarchy(
src_assets: List[Asset],
dst_assets: List[Asset],
project_src: str,
runtime: int,
client: CogniteClient,
subtree_ids: Optional[List[int]] = None,
subtree_external_ids: Optional[List[str]] = None,
subtree_max_depth: Optional[int] = None,
):
"""
Creates/updates the asset hierarchy in batches by depth, starting with the root assets and then moving on to the
children of those roots, etc.
Args:
src_assets: A list of the assets that are in the source.
dst_assets: A list of the assets that are in the destination.
project_src: The name of the project the object is being replicated from.
runtime: The timestamp to be used in the new replicated metadata.
client: The client corresponding to the destination project.
subtree_ids: The id of the subtree root to replicate,
subtree_external_ids: The external id of the subtree root to replicate,
subtree_max_depth: The maximum tree depth to replicate,
"""
depth = 0
parents = [None] # root nodes parent id is None
if subtree_ids is not None or subtree_external_ids is not None:
unlink_subtree_parents(src_assets, subtree_ids, subtree_external_ids)
children = find_children(src_assets, parents)
src_dst_ids: Dict[int, int] = {}
src_id_dst_asset = replication.make_id_object_map(dst_assets)
while children:
logging.info(f"Starting depth {depth}, with {len(children)} assets.")
create_assets, update_assets, unchanged_assets = replication.make_objects_batch(
children,
src_id_dst_asset,
src_dst_ids,
build_asset_create,
build_asset_update,
project_src,
runtime,
depth=depth,
)
logging.info(f"Attempting to create {len(create_assets)} assets.")
created_assets = replication.retry(client.assets.create, create_assets)
logging.info(f"Attempting to update {len(update_assets)} assets.")
updated_assets = replication.retry(client.assets.update, update_assets)
src_dst_ids = replication.existing_mapping(*created_assets, *updated_assets, *unchanged_assets, ids=src_dst_ids)
logging.debug(f"Dictionary of current asset mappings: {src_dst_ids}")
num_assets = len(created_assets) + len(updated_assets)
logging.info(
f"Finished depth {depth}, updated {len(updated_assets)} and "
f"posted {len(created_assets)} assets (total of {num_assets} assets)."
)
depth += 1
if subtree_max_depth is not None and depth > subtree_max_depth:
logging.info("Reached max depth")
break
children = find_children(src_assets, children)
return src_dst_ids
|
54ec8460f6eaedee44c0cdeb423da2a757d018a7
| 3,639,709
|
def ETL_work():
""" ETL page"""
return render_template("ETL_work.html")
|
08806ed7154f4820db961b54a5c852bd0c275532
| 3,639,710
|
def advanced_perm_check_function(*rules_sets, restrictions=None):
"""
Check channels and permissions, use -s -sudo or -a -admin to run it.
Args:
*rules_sets: list of rules, 1d or 2d,
restrictions: Restrictions must be always met
Returns:
message object returned by calling given function with given params
"""
def decorator(coro):
async def f(ctx, *args, **kwargs):
valid = _check_advanced_perm(ctx,
*args,
**kwargs,
rule_sets=[*rules_sets], restrictions=restrictions)
if valid:
output = await coro(ctx, *args, **kwargs)
return output
else:
# logger.error(f"Permission check failed! Exceptions should be raised earlier!")
raise CommandError("Permission check failed.")
f.__name__ = coro.__name__
f.__doc__ = coro.__doc__
return f
return decorator
|
acf6f5494fcc632fec3bb665778a6bed3e58f19d
| 3,639,711
|
def worker_id():
"""Return a predefined worker ID.
Returns:
int: The static work id
"""
return 123
|
8c8e9c570a2355a15fd9a4d1d03d0159a33ffba0
| 3,639,712
|
def get_urls(spec):
"""Small convenience method to construct the URLs of the Jupyter server."""
host_url = f"http{'s' if spec['routing']['tls']['enabled'] else ''}://{spec['routing']['host']}"
full_url = urljoin(
host_url,
spec["routing"]["path"].rstrip("/"),
)
return host_url, full_url
|
059913ed12b021fce5964d54bf8b9b22132f914f
| 3,639,713
|
def resubs(resubpairs,target):
"""takes several regex find replace pairs [(find1, replace1), (find2,replace2), ... ]
and applies them to a target on the order given"""
return resubpair[0].sub(resubpair[1],target)
for resubpair in resubpairs:
target = resub(resubpair,target)
return target
|
49b371211de991c323fdec313801aba0ded8ff93
| 3,639,714
|
import functools
import time
def time_profile(func):
"""Time Profiled for optimisation
Notes:
* Do not use this in production
"""
@functools.wraps(func)
def profile(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
print(f"{func.__name__} : {time.time() - start}")
return result
return profile
|
a4dde1d66f5987b4be1e9179da1570c252540363
| 3,639,715
|
from typing import Type
import uuid
from datetime import datetime
async def create_guid(data: GuidIn) -> Type[GuidOut]:
"""
Create a record w/o specifying a guid.
Also cleans up expired records & caches the new record.
"""
guid = uuid.uuid4().hex
validated = data.dict()
try:
await create_guid_record(guid, validated['name'], validated['expire'])
except Exception as detail:
raise HTTPException(status_code=400, detail=f'{detail}')
# Build serialized response
out = GuidOut(
id=guid,
expire=validated['expire'],
name=validated['name'],
)
# Cache stuff
ttl = validated['expire'] - datetime.now(timezone.utc)
await cache.set(guid, out, ttl=ttl.seconds)
return out
|
0ff0f95acc6268e5ccd081156cc9355c8db520db
| 3,639,716
|
def exp_for(iterable, filename, display = False):
"""
Run an experiment for words in given iterable and save its
results to PATH_RESULTS/filename. If display is set to True, also
print output to screen.
The output is formated as follows:
[word]RESULT_SEP[size]RESULT_SEP[n+x]RESULT_SEP[diff with upperbound]
"""
fp = open(os.path.join(PATH_RESULTS, 'mdfa_'+filename), 'w')
i = 0
for word, size in results_for(iterable):
output = '%s%s%d%s%d+%d%s%d' % (word, RESULT_SEP,
size, RESULT_SEP, len(word), size - len(word),
RESULT_SEP, size - (len(word) + len(word) / 2))
fp.write(output+'\n')
if display:
print output
i += 1
fp.close()
return i
|
11933d4b4c77dcae354b307cb27ab27af3e49311
| 3,639,717
|
import argparse
def arg_parse():
"""
Parse arguments to the detect module
"""
parser = argparse.ArgumentParser(description='YOLO v3 Detection Module')
parser.add_argument("--bs", dest="bs", help="Batch size", default=1)
parser.add_argument("--confidence", dest="confidence", help="Object Confidence to filter predictions", default=0.5)
parser.add_argument("--nms_thresh", dest="nms_thresh", help="NMS Threshhold", default=0.4)
parser.add_argument("--cfg", dest = 'cfgfile', help=
"Config file",
default="cfg/yolov3.cfg", type=str)
parser.add_argument("--weights", dest='weightsfile', help=
"weightsfile",
default="cfg/yolov3.weights", type=str)
parser.add_argument("--reso", dest='reso', help=
"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed",
default="416", type=str)
parser.add_argument("--video", dest="videofile", help="Video file to run detection on", default="videos/drone2.mp4",
type=str)
return parser.parse_args()
|
24ec26a23278b59d09f12df3950faa9996828958
| 3,639,718
|
import re
def remove_useless_lines(text):
"""Removes lines that don't contain a word nor a number.
Args:
text (string): markdown text that is going to be processed.
Returns:
string: text once it is processed.
"""
# Useless lines
useless_line_regex = re.compile(r'^[^\w\n]*$', re.MULTILINE | re.UNICODE)
processed_text = useless_line_regex.sub(r'', text)
return processed_text
|
fd33cdb243b6887d11846736f922bb4e1332d549
| 3,639,719
|
def get_candidate(word):
"""get candidate word set
@word -- the given word
@return -- a set of candidate words
"""
candidates = set()
candidates |= meanslike(word)
candidates |= senselike(word)
# remove '_' and '-' between words --> candidates is a LIST now
candidates = [w.replace('_', ' ') for w in candidates]
candidates = [w.replace('-', ' ') for w in candidates]
# remove words which contains special characters (e.g. Ann's book)
candidates = [w for w in candidates if ''.join(w.split()).isalpha()]
# remove phrase has more than two words
candidates = [w for w in candidates if len(w.split()) < 3]
# turn all words into lowercase
candidates = [w.lower() for w in candidates]
# remove words contain word itself
candidates = [w for w in candidates if not (word in w)]
return candidates
|
8e2b7359f681cd96bb1ddad68cbfe77c1ae2e79b
| 3,639,720
|
def _build_proxy_response(response: RawResponse, error_handler: callable) -> dict:
"""Once the application completes the request, maps the results into the format required by
AWS.
"""
try:
if response.caught_exception is not None:
raise response.caught_exception
message = ''.join([str(message) for message in response.result])
except Exception as e:
return error_handler(e)
return {'statusCode': response.response_status.split(' ')[0],
'headers': response.outbound_headers,
'body': message}
|
c07e52da6c6e952c35bda18b9d5600d756280f9b
| 3,639,721
|
from typing import Dict
from typing import Any
def parse_and_execute(config: dict) -> Dict[str, Any]:
"""Validate, parse and transform the config. Execute backends based
on the transformed config to perform I/O operations.
If `prompt` and/or `default` contains a macro, it will be expanded.
:param config: The original config
:type config: dict
:return: A dict containing values collected during I/O operations for the
corresponding key
:rtype: dict
"""
parsed_config = _parse(config)
res: Dict[str, Any] = dict()
for key, value in parsed_config.items():
backend = value.get("backend")
params = value.get("params")
# TODO: Maybe use Jinja2 template rendering instead of macro expansion?
_expand_macro(key=key, params=params, param_key="prompt", inputs=res)
_expand_macro(key=key, params=params, param_key="default", inputs=res)
res[key] = _execute(backend_module_name=backend, **params)
return res
|
dd350df8b282aa5fa459b89aedc000c326d4923d
| 3,639,722
|
def reduce_any(input_tensor, axis=None, keepdims=None,
name=None, reduction_indices=None):
"""
Wrapper around the tf.reduce_any to handle argument keep_dims
"""
return reduce_function(tf.reduce_any, input_tensor, axis=axis,
keepdims=keepdims, name=name,
reduction_indices=reduction_indices)
|
bdf25f573caef2d9c0926c92d8ce4b4a3b682775
| 3,639,723
|
def get_customer_profile_ids():
"""get customer profile IDs"""
merchantAuth = apicontractsv1.merchantAuthenticationType()
merchantAuth.name = constants.apiLoginId
merchantAuth.transactionKey = constants.transactionKey
CustomerProfileIdsRequest = apicontractsv1.getCustomerProfileIdsRequest()
CustomerProfileIdsRequest.merchantAuthentication = merchantAuth
CustomerProfileIdsRequest.refId = "Sample"
controller = getCustomerProfileIdsController(CustomerProfileIdsRequest)
controller.execute()
# Work on the response
response = controller.getresponse()
# if (response.messages.resultCode == "Ok"):
# print("Successfully retrieved customer ids:")
# for identity in response.ids.numericString:
# print(identity)
# else:
# print("response code: %s" % response.messages.resultCode)
if response is not None:
if response.messages.resultCode == apicontractsv1.messageTypeEnum.Ok:
if hasattr(response, 'ids'):
if hasattr(response.ids, 'numericString'):
print('Successfully retrieved customer IDs.')
if response.messages is not None:
print('Message Code: %s' % response.messages.message[0]['code'].text)
print('Message Text: %s' % response.messages.message[0]['text'].text)
print('Total Number of IDs Returned in Results: %s'
% len(response.ids.numericString))
print()
# There's no paging options in this API request; the full list is returned every call.
# If the result set is going to be large, for this sample we'll break it down into smaller
# chunks so that we don't put 72,000 lines into a log file
print('First 20 results:')
for profileId in range(0,19):
print(response.ids.numericString[profileId])
else:
if response.messages is not None:
print('Failed to get list.')
print('Code: %s' % (response.messages.message[0]['code'].text))
print('Text: %s' % (response.messages.message[0]['text'].text))
else:
if response.messages is not None:
print('Failed to get list.')
print('Code: %s' % (response.messages.message[0]['code'].text))
print('Text: %s' % (response.messages.message[0]['text'].text))
else:
print('Error. No response received.')
return response
|
ba6cb076870961b4ab226b2aeb3ab07b1b5eb848
| 3,639,724
|
from typing import List
def compare_alignments_(prediction: List[dict], ground_truth: List[dict], types: List[str]) -> (float, float, float):
"""
Parameters
----------
prediction: List of dictionaries containing the predicted alignments
ground_truth: List of dictionaries containing the ground truth alignments
types: List of alignment types to consider for evaluation (e.g ['match', 'deletion', 'insertion']
Returns
-------
precision, recall, f score
"""
pred_filtered = list(filter(lambda x: x['label'] in types, prediction))
gt_filtered = list(filter(lambda x: x['label'] in types, ground_truth))
filtered_correct = [pred for pred in pred_filtered if pred in gt_filtered]
n_pred_filtered = len(pred_filtered)
n_gt_filtered = len(gt_filtered)
n_correct = len(filtered_correct)
if n_pred_filtered > 0 or n_gt_filtered > 0:
precision = n_correct / n_pred_filtered if n_pred_filtered > 0 else 0.
recall = n_correct / n_gt_filtered if n_gt_filtered > 0 else 0
f_score = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
else:
# no prediction and no ground truth for a given type -> correct alignment
precision, recall, f_score = 1., 1., 1.
return precision, recall, f_score
|
57648544c152bff0acc52271d453b58a0d8e8cad
| 3,639,725
|
import inspect
def shim_unpack(
unpack_fn, # type: TShimmedFunc
download_dir, # type str
tempdir_manager_provider, # type: TShimmedFunc
ireq=None, # type: Optional[Any]
link=None, # type: Optional[Any]
location=None, # type Optional[str],
hashes=None, # type: Optional[Any]
progress_bar="off", # type: str
only_download=None, # type: Optional[bool]
downloader_provider=None, # type: Optional[TShimmedFunc]
session=None, # type: Optional[Any]
verbosity=0, # type: Optional[int]
):
# (...) -> None
"""
Accepts all parameters that have been valid to pass
to :func:`pip._internal.download.unpack_url` and selects or
drops parameters as needed before invoking the provided
callable.
:param unpack_fn: A callable or shim referring to the pip implementation
:type unpack_fn: Callable
:param str download_dir: The directory to download the file to
:param TShimmedFunc tempdir_manager_provider: A callable or shim referring to
`global_tempdir_manager` function from pip or a shimmed no-op context manager
:param Optional[:class:`~pip._internal.req.req_install.InstallRequirement`] ireq:
an Install Requirement instance, defaults to None
:param Optional[:class:`~pip._internal.models.link.Link`] link: A Link instance,
defaults to None.
:param Optional[str] location: A location or source directory if the target is
a VCS url, defaults to None.
:param Optional[Any] hashes: A Hashes instance, defaults to None
:param str progress_bar: Indicates progress par usage during download, defatuls to
off.
:param Optional[bool] only_download: Whether to skip install, defaults to None.
:param Optional[ShimmedPathCollection] downloader_provider: A downloader class
to instantiate, if applicable.
:param Optional[`~requests.Session`] session: A PipSession instance, defaults to
None.
:param Optional[int] verbosity: 1 or 0 to indicate verbosity flag, defaults to 0.
:return: The result of unpacking the url.
:rtype: None
"""
unpack_fn = resolve_possible_shim(unpack_fn)
downloader_provider = resolve_possible_shim(downloader_provider)
tempdir_manager_provider = resolve_possible_shim(tempdir_manager_provider)
required_args = inspect.getargs(unpack_fn.__code__).args # type: ignore
unpack_kwargs = {"download_dir": download_dir}
with tempdir_manager_provider():
if ireq:
if not link and ireq.link:
link = ireq.link
if only_download is None:
only_download = ireq.is_wheel
if hashes is None:
hashes = ireq.hashes(True)
if location is None and getattr(ireq, "source_dir", None):
location = ireq.source_dir
unpack_kwargs.update({"link": link, "location": location})
if hashes is not None and "hashes" in required_args:
unpack_kwargs["hashes"] = hashes
if "progress_bar" in required_args:
unpack_kwargs["progress_bar"] = progress_bar
if only_download is not None and "only_download" in required_args:
unpack_kwargs["only_download"] = only_download
if session is not None and "session" in required_args:
unpack_kwargs["session"] = session
if (
"download" in required_args or "downloader" in required_args
) and downloader_provider is not None:
arg_name = "download" if "download" in required_args else "downloader"
assert session is not None
assert progress_bar is not None
unpack_kwargs[arg_name] = downloader_provider(session, progress_bar)
if "verbosity" in required_args:
unpack_kwargs["verbosity"] = verbosity
return unpack_fn(**unpack_kwargs)
|
561d473584da5f96cbffadb543cee129c9c6e0ef
| 3,639,726
|
def xavier_uniform(x):
"""Wrapper for torch.nn.init.xavier_uniform method.
Parameters
----------
x : torch.tensor
Input tensor to be initialized. See torch.nn.init.py for more information
Returns
-------
torch.tensor
Initialized tensor
"""
return init.xavier_uniform_(x)
|
f407fa3e35d1bd708e6cfe4b003a788bed4eb443
| 3,639,727
|
import pyzo.util.interpreters as interps ### EKR
import os
def _get_interpreters_win(): # pyzo_in_leo.py
"""
Monkey-patch pyzo/util/interpreters._get_interpreters_win.
This patched code fixes an apparent pyzo bug.
Unlike shutil.which, this function returns all plausible python executables.
Copyright (C) 2013-2019 by Almar Klein.
"""
found = []
# Query from registry
for v in interps.get_interpreters_in_reg(): ### EKR
found.append(v.installPath() )
# Check common locations
for rootname in ['C:/', '~/',
'C:/program files/', 'C:/program files (x86)/', 'C:/ProgramData/',
'~/appdata/local/programs/python/',
'~/appdata/local/continuum/', '~/appdata/local/anaconda/',
]:
rootname = os.path.expanduser(rootname)
if not os.path.isdir(rootname):
continue
for dname in os.listdir(rootname):
if dname.lower().startswith(('python', 'pypy', 'miniconda', 'anaconda')):
found.append(os.path.join(rootname, dname))
# Normalize all paths, and remove trailing backslashes
### found = [os.path.normcase(os.path.abspath(v)).strip('\\') for v in found]
found = [
os.path.normcase(os.path.abspath(v)).strip('\\') for v in found
if v is not None ### EKR: Add guard.
]
# Append "python.exe" and check if that file exists
found2 = []
for dname in found:
for fname in ('python.exe', 'pypy.exe'):
exename = os.path.join(dname, fname)
if os.path.isfile(exename):
found2.append(exename)
break
# Returnas set (remove duplicates)
return set(found2)
|
35b64945c0531b34f5733b9587a9000e70a9e4e3
| 3,639,728
|
def create_test_data(site, start=None, end="now", interval=5, units='minutes' , val=50, db='test_db', data={}):
"""
data = {'R1':[0,0,0,..],'R2':[0,0,123,12,...]...} will not generate date but use fixed data set
if val is not set random data will be generated if data is not existing
"""
_influx_db_name = db
i = Influx(database=_influx_db_name)
data_point_dates = generate_date_array(start=start, end=end, interval=interval, units=units)
voltage_in = 220
voltage_out = 220
soc = val
R1 = val
R2 = val
R3 = val
R4 = val
R5 = val
count = 0
print "creating %s test data points"%len(data_point_dates)
print "between %s and %s "%(data_point_dates[0],data_point_dates[len(data_point_dates)-1:])
# Simulate Grid outage
for time_val in data_point_dates:
if not val:
try:
soc = data.get('soc',[])[count]
except:
soc = get_random_int()
try:
R1 = data.get('R1',[])[count]
except:
R1 = voltage_in * get_random_binary()
try:
R2 = data.get('R2',[])[count]
except:
R2 = get_random_interval(100,500)
try:
R3 = data.get('R3',[])[count]
except:
R3 = get_random_interval(22,28)
try:
R4 = data.get('R4',[])[count]
except:
R4 = get_random_interval(100,500)
try:
R5 = data.get('R5',[])[count]
except:
R5 = get_random_interval(100,500)
dp = Data_Point.objects.create(
site=site,
soc = soc ,
battery_voltage = R3,
time=time_val,
AC_Voltage_in = R1,
AC_Voltage_out = voltage_out,
AC_input = R4,
AC_output = R5,
AC_output_absolute = R2,
AC_Load_in = R2,
AC_Load_out = R4,
pv_production = R5)
# Also send ton influx
dp_dict = model_to_dict(dp)
dp_dict.pop('time')
dp_dict.pop('inverter_state')
dp_dict.pop('id')
i.send_object_measurements(dp_dict,timestamp=time_val.isoformat(),tags={"site_name":site.site_name})
count = count + 1
# Count number of outages
return len(data_point_dates)
|
56e5f65650a2fb1eb2a829d9443cd0a588402c3a
| 3,639,729
|
def Nmin(e, dz, s, a, a_err):
"""Estimates the minimum number of independent structures
to detect a difference in dN/dz w/r to a field value given
by dNdz|field = a +- a_err, at a statistical significance s,
using a redshift path of dz per structure"""
e = np.array(e).astype(float)
dz = np.array(dz).astype(float)
s = np.array(s).astype(float)
a = np.array(a).astype(float)
a_err = np.array(a_err).astype(float)
# this is a analytical expression was derived by N.T.
return (e / dz / a) * (s ** 2) / ((e - 1.) - s * a_err / a) ** 2
|
c603f90e35802b6b7401c14abda3bb350d0e6941
| 3,639,730
|
import string
def remove_punctuation(list_of_string, item_to_keep=""):
"""
Remove punctuation from a list of strings.
Parameters
----------
- list_of_string : a dataframe column or variable containing the text stored as a list of string sentences
- item_to_keep : a string of punctuation signs you want to keep in text (e.g., '!?.,:;')
"""
# Update string of punctuation signs
if len(item_to_keep) > 0:
punctuation_list = "".join(
c for c in string.punctuation if c not in item_to_keep
)
else:
punctuation_list = string.punctuation
# Remove punctuation from each sentence
transtable = str.maketrans("", "", punctuation_list)
return [sent.translate(transtable) for sent in list_of_string]
|
cb9190bc160f8e725479b531afab383c6857ceac
| 3,639,731
|
import requests
import pickle
def save_sp500_tickers(force_download=False):
"""Get the S&P 500 tickers from Wikipedia
Parameters
----------
force_download : bool
if True, force redownload of data
Returns
-------
tickers : pandas.DataFrame
The S&P500 tickers data
"""
resp = requests.get('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
soup = bs.BeautifulSoup(resp.text, "lxml")
table = soup.find('table', {'class':'wikitable sortable'})
tickers = []
for row in table.findAll('tr')[1:]:
ticker = row.findAll('td')[1].text
# fix for . and - tickers
mapping = str.maketrans(".","-")
ticker = ticker.translate(mapping)
tickers.append(ticker)
# cache the results for local access
with open("sp500tickers.pickle", "wb") as f:
pickle.dump(tickers, f)
#print(tickers)
return tickers
|
94ddf34acbda542fe039f988887b904bb2ac9da4
| 3,639,732
|
def get_search_keywords(testcase):
"""Get search keywords for a testcase."""
crash_state_lines = testcase.crash_state.splitlines()
# Use top 2 frames for searching.
return crash_state_lines[:2]
|
15c1611aeff33f9d8bba843f076b31abfb4023ba
| 3,639,733
|
def make_protein_index(proteins):
"""Indexes proteins
"""
prot_index = {}
skip = set(['sp', 'tr', 'gi', 'ref', ''])
for i, p in enumerate(proteins):
accs = p.accession.split('|')
for acc in accs:
if acc in skip:
continue
prot_index[acc] = i
return prot_index
|
be54ca3a123fe13efbb8c694187dd34d944fd654
| 3,639,734
|
def jvp_solve_Hz(g, Hz, info_dict, eps_vec, source, iterative=False, method=DEFAULT_SOLVER):
""" Gives jvp for solve_Hz with respect to eps_vec """
# construct the system matrix again and the RHS of the gradient expersion
A = make_A_Hz(info_dict, eps_vec)
ux = spdot(info_dict['Dxb'], Hz)
uy = spdot(info_dict['Dyb'], Hz)
diag = sp.spdiags(1 / eps_vec, [0], eps_vec.size, eps_vec.size)
# the g gets multiplied in at the middle of the expression
ux = ux * diag * g * diag
uy = uy * diag * g * diag
ux = spdot(info_dict['Dxf'], ux)
uy = spdot(info_dict['Dyf'], uy)
# add the x and y components and multiply by A_inv on the left
u = (ux + uy)
Hz_for = sparse_solve(A, u, iterative=iterative, method=method)
return 1 / EPSILON_0 * Hz_for
|
0d861f9c6a899c70da7d095cb6ac436586e75bdd
| 3,639,735
|
from typing import Union
def encode(X: Union[tf.Tensor, np.ndarray], encoder: keras.Model, **kwargs) -> tf.Tensor:
"""
Encodes the input tensor.
Parameters
----------
X
Input to be encoded.
encoder
Pretrained encoder network.
Returns
-------
Input encoding.
"""
return encoder(X, training=False)
|
a97abdc611643e4cd1ed944ff27ef7402e824acb
| 3,639,736
|
def _step2(input):
"""
_step2 - function to apply step2 rules
Inputs:
- input : str
- m : int
Measurement m of c.v.c. sequences
Outputs:
- input : str
"""
# ational -> ate
if input.endswith('ational') and _compute_m(input[:-7]) > 0:
return input[:-1 * len('ational')] + 'ate'
# tional -> tion
elif input.endswith('tional') and _compute_m(input[:-6]) > 0:
return input[:-1*len('tional')] + 'tion'
# enci -> ence
elif input.endswith('enci') and _compute_m(input[:-4]) > 0:
return input[:-1] + 'e'
# anci -> ance
elif input.endswith('anci') and _compute_m(input[:-4]) > 0:
return input[:-1] + 'e'
# izer -> ize
elif input.endswith('izer') and _compute_m(input[:-4]) > 0:
return input[:-1]
# abli -> able
elif input.endswith('abli') and _compute_m(input[:-4]) > 0:
return input[:-1] + 'e'
# alli -> al
elif input.endswith('alli') and _compute_m(input[:-4]) > 0:
return input[:-2]
# entli -> ent
elif input.endswith('entli'):
return input[:-2]
# eli -> e
elif input.endswith('eli') and _compute_m(input[:-3]) > 0:
return input[:-2]
# ousli -> ous
elif input.endswith('ousli') and _compute_m(input[:-5]) > 0:
return input[:-2] + 's'
# ization -> ize
elif input.endswith('ization') and _compute_m(input[:-7]) > 0:
return input[:-5] + 'e'
# ation -> ate
elif input.endswith('ation') and _compute_m(input[:-5]) > 0:
return input[:-3] + 'e'
# ator -> ate
elif input.endswith('ator') and _compute_m(input[:-5]) > 0:
return input[:-2] + 'e'
# alism -> al
elif input.endswith('alism') and _compute_m(input[:-5]) > 0:
return input[:-3]
# iveness -> ive
elif input.endswith('iveness') and _compute_m(input[:-7]) > 0:
return input[:-4]
# fulness -> ful
elif input.endswith('fulness') and _compute_m(input[:-7]) > 0:
return input[:-4]
# ousness -> ous
elif input.endswith('ousness') and _compute_m(input[:-7]) > 0:
return input[:-4]
# aliti -> ali
elif input.endswith('aliti') and _compute_m(input[:-5]) > 0:
return input[:-3]
# iviti -> ive
elif input.endswith('iviti') and _compute_m(input[:-5]) > 0:
return input[:-3] + 'e'
# biliti -> ble
elif input.endswith('biliti') and _compute_m(input[:-6]) > 0:
return input[:-5] + 'le'
return input
|
5181d55de4ef7c33778dfbe80707e4e621018d5c
| 3,639,737
|
from typing import cast
def compute_annualized_volatility(srs: pd.Series) -> float:
"""
Annualize sample volatility.
:param srs: series with datetimeindex with `freq`
:return: annualized volatility (stdev)
"""
srs = hdataf.apply_nan_mode(srs, mode="fill_with_zero")
ppy = hdataf.infer_sampling_points_per_year(srs)
std = srs.std()
annualized_volatility = np.sqrt(ppy) * std
annualized_volatility = cast(float, annualized_volatility)
return annualized_volatility
|
517d56e53885fdcb5eee3ed0fa3acae766d9c7e2
| 3,639,738
|
from datetime import datetime
def json_sanitized(value, stringify=stringified, dt=str, none=False):
"""
Args:
value: Value to sanitize
stringify (callable | None): Function to use to stringify non-builtin types
dt (callable | None): Function to use to stringify dates
none (str | bool): States how to treat `None` keys/values
- string: Replace `None` *keys* with given string (keep `None` *values* as-is)
- False (default): Filter out `None` keys/values
- True: No filtering, keep `None` keys/values as-is
Returns:
An object that should be json serializable
"""
if value is None or is_basetype(value):
return value
if hasattr(value, "to_dict"):
value = value.to_dict()
elif isinstance(value, set):
value = sorted(value)
if isinstance(value, dict):
return dict(
(
json_sanitized(none if k is None and isinstance(none, str) else k, stringify=stringify, dt=dt, none=none),
json_sanitized(v, stringify=stringify, dt=dt, none=none),
)
for k, v in value.items()
if none or (k is not None and v is not None)
)
if is_iterable(value):
return [json_sanitized(v, stringify=stringify, dt=dt, none=none) for v in value]
if isinstance(value, datetime.date):
return value if dt is None else dt(value)
if stringify is None:
return value
return stringify(value)
|
6f50e3bb5a07417b05cb95813d2cc5a89ad12a2b
| 3,639,739
|
def rescan_organization_task(task, org, allpr, dry_run, earliest, latest):
"""A bound Celery task to call rescan_organization."""
meta = {"org": org}
task.update_state(state="STARTED", meta=meta)
callback = PaginateCallback(task, meta)
return rescan_organization(org, allpr, dry_run, earliest, latest, page_callback=callback)
|
2241bf6630bdd63c231f011708ac392c3d1a8234
| 3,639,740
|
def python(cc):
"""Format the character for a Python string."""
codepoint = ord(cc)
if 0x20 <= codepoint <= 0x7f:
return cc
if codepoint > 0xFFFF:
return "\\U%08x" % codepoint
return "\\u%04x" % codepoint
|
b0c2042c653043c0831a35ffc13d73850e29af2f
| 3,639,741
|
import logging
def stop_execution(execution_id):
"""
Stop the current workflow execution.
swagger_from_file: docs/stop.yml
"""
name = execution_id # str | the custom object's name
body = kubernetes.client.V1DeleteOptions() # V1DeleteOptions |
grace_period_seconds = 56 # int | The duration in seconds before the object should be
# deleted. Value must be non-negative integer. The value zero indicates delete immediately.
# If this value is nil, the default grace period for the specified type will be used.
# Defaults to a per object value if not specified. zero means delete immediately. (optional)
orphan_dependents = True # bool | Deprecated: please use the PropagationPolicy, this field
# will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false,
# the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either
# this field or PropagationPolicy may be set, but not both. (optional)
propagation_policy = 'propagation_policy_example' # str | Whether and how garbage collection
# will be performed. Either this field or OrphanDependents may be set, but not both. The
# default policy is decided by the existing finalizer set in the metadata.finalizers and the
# resource-specific default policy. (optional)
try:
api_response = v1alpha1.delete_namespaced_workflow(namespace, name, body=body,
grace_period_seconds=grace_period_seconds,
orphan_dependents=orphan_dependents,
propagation_policy=propagation_policy)
logging.info(api_response)
except ApiException as e:
print("Exception when calling CustomObjectsApi->delete_namespaced_custom_object: %s\n" % e)
return 'Successfully delete', 200
|
d7d4264a81f106de31c49d5825de114a20f79966
| 3,639,742
|
def reshape_signal_batch(signal):
"""Convert the signal into a standard batch shape for use with cochleagram.py
functions. The first dimension is the batch dimension.
Args:
signal (array): The sound signal (waveform) in the time domain. Should be
either a flattened array with shape (n_samples,), a row vector with shape
(1, n_samples), a column vector with shape (n_samples, 1), or a 2D
matrix of the form [batch, waveform].
Returns:
array:
**out_signal**: If the input `signal` has a valid shape, returns a
2D version of the signal with the first dimension as the batch
dimension.
Raises:
ValueError: Raises an error of the input `signal` has invalid shape.
"""
if signal.ndim == 1: # signal is a flattened array
out_signal = signal.reshape((1, -1))
elif signal.ndim == 2: # signal is a row or column vector
if signal.shape[0] == 1:
out_signal = signal
elif signal.shape[1] == 1:
out_signal = signal.reshape((1, -1))
else: # first dim is batch dim
out_signal = signal
else:
raise ValueError('signal should be flat array, row or column vector, or a 2D matrix with dimensions [batch, waveform]; found %s' % signal.ndim)
return out_signal
|
344ce1a9a695e99fa470a5d849afb40bc381c9df
| 3,639,743
|
import scipy
import itertools
def tryallmedoids(dmat, c, weights=None, potential_medoid_inds=None, fuzzy=True, fuzzyParams=('FCM', 2)):
"""Brute force optimization of k-medoids or fuzzy c-medoids clustering.
To apply to points in euclidean space pass dmat using:
dmat = sklearn.neighbors.DistanceMetric.get_metric('euclidean').pairwise(points_array)
Parameters
----------
dmat : array-like of floats, shape (n_samples, n_samples)
Pairwise distance matrix of observations to cluster.
c : int
Number of clusters to form as well as the number of medoids to generate.
weights : array-like of floats, shape (n_samples)
Relative weights for each observation in inertia computation.
potential_medoid_inds : array of indices
If specified, then medoids are constrained to be chosen from this array.
fuzzy : boolean
If True, use fuzzy inertia function,
otherwis use crisp cluster definition.
fuzzyParams : tuple of (method str/int, param)
Method and parameter for computing fuzzy membership matrix.
Returns
-------
medoids : float ndarray with shape (c)
Indices into dmat that indicate optimal medoids.
membership or labels: float ndarray with shape (n_samples, c) or shape (n_samples,)
Each row contains the membership of a point to each of the clusters
OR with hard clusters, the medoid/cluster index of each point."""
if fuzzy:
wdmat = precomputeWeightedDmat(dmat, weights, squared=False)
else:
wdmat = precomputeWeightedDmat(dmat, weights, squared=True)
N = dmat.shape[0]
if potential_medoid_inds is None:
potential_medoid_inds = np.arange(N)
combinations = scipy.misc.comb(len(potential_medoid_inds), c)
if combinations > 1e7:
print("Too many combinations to try: %1.1g > 10M" % combinations)
bestInertia = None
for medInds in itertools.combinations(list(range(len(potential_medoid_inds))), c):
medoids = potential_medoid_inds[np.array(medInds)]
if fuzzy:
membership = computeMembership(dmat, medoids, method=fuzzyParams[0], param=fuzzyParams[1])
else:
membership = np.zeros((N, c))
membership[np.arange(N), np.argmin(dmat[:, medoids], axis=1)] = 1.
inertia = (wdmat[:, medoids] * membership).sum()
if bestInertia is None or inertia < bestInertia:
bestMedoids = medoids
bestInertia = inertia
bestMembership = membership
if not fuzzy:
membership = np.argmax(membership, axis=1)
return medoids, membership
|
34ba39d57bdce6b52b3c5e8eef085ef928d02038
| 3,639,744
|
from typing import Union
def human_timedelta(s: Union[int, float]) -> str:
"""Convert a timedelta from seconds into a string using a more sensible unit.
Args:
s: Amount of seconds
Returns:
A string representing `s` seconds in an easily understandable way
"""
if s >= MONTH_SECONDS:
return f"{round(s / MONTH_SECONDS)} month(s)"
if s >= DAY_SEC:
return f"{round(s / DAY_SEC)} day(s)"
if s >= HOUR_SEC:
return f"{round(s / HOUR_SEC)} hour(s)"
elif s >= _FIVE_MINUTE_SEC:
return f"{round(s / MINUTE_SEC)} minute(s)"
else:
return f"{round(s)} second(s)"
|
84725ecf4e4d59d423505978b8255f96a6483cd0
| 3,639,745
|
from functools import reduce
def rate_cell(cell, board, snake, bloom_level=4):
""" rates a cell based on proximity to other snakes, food, the edge of the board, etc """
cells = []
# Get all the cells of "bloom_level" number of circles surrounding the given cell.
for x in range(-bloom_level, bloom_level+1):
for y in range(-bloom_level, bloom_level+1):
division_factor = dist((cell[0]+x, cell[1]+y), cell)
if division_factor == 0:
division_factor = 1
cells.append(((cell[0]+x, cell[1]+y), division_factor))
# EMPTY = 0
# SNAKE = 1
# FOOD = 2
# SPOILED = 3
cell_weightings = [EMPTY_RATING, ENEMY_RATING, FOOD_RATING, SPOILED_RATING, BODY_RATING, OUT_SIDE_BOARD_RATING]
cell_values = []
for m_cell in cells:
weight_key = 5 # Outside the board
if board.inside(m_cell[0]):
weight_key = board.get_cell(m_cell[0])
if m_cell[0] in snake.coords:
weight_key = 4
cell_values.append((weight_key, m_cell[1]))
return reduce(lambda carry, m_cell: carry + cell_weightings[m_cell[0]]/m_cell[1], cell_values, 0)
|
ab78b4b822789b32e2a768dcb9d55f5398b34a13
| 3,639,746
|
def edges_are_same(a, b):
"""
Function to check if two tuple elements (src, tgt, val) correspond
to the same directed edge (src, tgt).
Args:
tuple_elements : a = (src, val, val) and b = (src, val, val)
Returns:
True or False
"""
if a[0:2] == b[0:2]:
return True
else:
return False
|
04c4d414402a57cafa0028d0ecd140bedd2539d7
| 3,639,747
|
def conv_mrf(A, B):
"""
:param A: conv kernel 1 x 120 x 180 x 1 (prior)
:param B: input heatmaps: hps.batch_size x 60 x 90 x 1 (likelihood)
:return: C is hps.batch_size x 60 x 90 x 1
"""
B = tf.transpose(B, [1, 2, 3, 0])
B = tf.reverse(B, axis=[0, 1]) # [h, w, 1, b], we flip kernel to get convolution, and not cross-correlation
# conv between 1 x 120 x 180 x 1 and 60 x 90 x 1 x ? => 1 x 61 x 91 x ?
C = tf.nn.conv2d(A, B, strides=[1, 1, 1, 1], padding='VALID') # 1 x 61 x 91 x ?
# C = C[:, :hm_height, :hm_width, :] # 1 x 60 x 90 x ?
C = tf.image.resize_images(C, [hm_height, hm_width])
C = tf.transpose(C, [3, 1, 2, 0])
return C
|
5bca01b656b135f20325441ebcbcfac883627565
| 3,639,748
|
from typing import Dict
from typing import Tuple
def get_alert_by_id_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]:
"""Get alert by id and return outputs in Demisto's format
Args:
client: Client object with request
args: Usually demisto.args()
Returns:
Outputs
"""
_id = args.get('id')
headers = argToList(args.get('headers'))
raw_response = client.get_alert_by_id(_id=_id)
if raw_response:
title = f'{INTEGRATION_NAME} - Alert {_id}:'
context_entry = create_context_result(raw_response, ALERTS_TRANS)
context = {
f'{INTEGRATION_CONTEXT_NAME}.Alert(val.ID && val.ID === obj.ID)': context_entry
}
human_readable = tableToMarkdown(title, context_entry, headers=headers, removeNull=True)
return human_readable, context, raw_response
else:
return f'{INTEGRATION_NAME} - Could not find any alerts.', {}, {}
|
74208c66627e2441ff62ad6b4207844241ab7cd6
| 3,639,749
|
def close_issues() -> list[res.Response]:
"""Batch close issues on GitHub."""
settings = _get_connection_settings(CONFIG_MANAGER.config)
try:
github_service = ghs.GithubService(settings)
except ghs.GithubServiceError as gse:
return [res.ResponseFailure(res.ResponseTypes.RESOURCE_ERROR, gse)]
list_object = "issue"
title_query = p.InquirerPrompter.query_by_title(
CONFIG_MANAGER.config.github_selected_repos, list_object
)
list_request = il.build_list_request(
filters={
"obj__eq": list_object,
"state__eq": "open",
"title__contains": title_query,
}
)
return ghcli.GhCloseIssueUseCase(CONFIG_MANAGER, github_service).execute(
list_request
)
|
ac59a72b893ebc91090d3ba38a1cbf6cb8844be1
| 3,639,750
|
def _invert_lambda(node: tn.Node) -> tn.Node:
"""Invert a diagonal lambda matrix. """
tensor = node.get_tensor()
assert _is_diagonal_matrix(tensor)
diagonal = tensor.diagonal()
return tn.Node(np.diag(1/diagonal))
|
b5c06728b1e88bedec7d19591f0aa4823e90f412
| 3,639,751
|
def map_field_name_to_label(form):
"""Takes a form and creates label to field name map.
:param django.forms.Form form: Instance of ``django.forms.Form``.
:return dict:
"""
return dict([(field_name, field.label)
for (field_name, field)
in form.base_fields.items()])
|
dfc2779f498fb479553602a72d9520d398746302
| 3,639,752
|
from typing import Callable
from typing import List
def solve_compound_rec(
recurrence_func: Callable,
parameter_list: List[float],
std_of_compound_dist: float,
max_mRNA_copy_number: int,
recursion_length: int,
index_compound_parameter: int = 3,
compounding_distribution: str = "normal",
decimal_precision: int = 100,
) -> List[float]:
"""Compound distribution.
Calls solve_compound() to obtain recurrence coefficients h_i and computes probability distribution using invgenfunc()
Arguments:
recurrence_func: the recurrence relation function over which to compound
parameter_list: list of parameters accepted by solFunc
std_of_compound_dist: standard deviation of the compounding distribution
max_mRNA_copy_number: maximal mRNA copy number. The distribution is evaluated for n=0:N-1
recursion_length: recursion length. The number of terms evaluated recursively
index_compound_parameter: index of the parameter over which the solution is compunded
compounding_distribution: string specifying the type of compounding distribution
decimal_precision: integer specifying the precision used by the Decimal class
Returns:
probability distribution for mRNa copy numbers n=0:N-1.
Raises:
AssertionError: distribution given not supported
"""
assert compounding_distribution in ["normal", "lognormal", "gamma"]
H = solve_compound(
recurrence_func,
parameter_list,
std_of_compound_dist,
recursion_length,
index_compound_parameter,
compounding_distribution,
compound_over_recurrence_terms=True,
decimal_precision=decimal_precision,
)
return [
invgenfunc(H, n, precision=decimal_precision)
for n in range(0, max_mRNA_copy_number)
]
|
22e9106872ed185cf635fe958707da4244eee60c
| 3,639,753
|
def create_brand():
"""
Creates a new brand with the given info
:return: Status of the request
"""
check = check_brand_parameters(request)
if check is not None:
return check
name = request.json[NAME]
brand = Brand.query.filter(Brand.name == name).first()
if brand is not None:
return BadRequestResponse('Existing brand').make()
# retrieve the brand name from the request, create a new Brand object and save it
brand = Brand(name=name)
assign_parameters_to_brand(brand, request)
brand.save()
return DataResponse({RESULTS: brand.to_json()}).make()
|
5c745aa1050b574cc659cf17bc06d1bdeb424b13
| 3,639,754
|
import os
def update(args):
"""Traverse third-party repos / submodules and emit version-strings"""
failures = []
ver = "///< This file is autogenerated by 'scripts/xnvme_3p.py'\n"
ver += "const char *xnvme_3p_ver[] = {\n"
for project, err in traverse_projects(args):
print("project: %s, success: %r" % (project["name"], not err))
if err:
failures.append(project)
continue
guard = args.guards[project["name"]]
if not guard:
ver += '\t"%s",\n' % (project["ver"])
continue
ver += "#ifdef %s\n" % guard
ver += '\t"%s",\n' % (project["ver"])
ver += "#else\n"
ver += '\t"%s;NOSYS",\n' % (project["name"])
ver += "#endif\n"
ver += "\t0,\t///< For array-termination\n"
ver += "};\n"
if failures:
print("Got failures -- not updating")
return 1
with open(os.path.join(args.repos, "src", "xnvme_3p_ver.c"), "wt") as vfd:
vfd.write(ver)
return 0
|
bb66e6d49f8c255d68bac75c7ead8bb53b6753c7
| 3,639,755
|
def feature_contained(boundary: geo, **kwargs):
"""Analyse containment for all features within a single-layer vector file according to a Geometry
and return multiple GeoJSON files."""
geom, prop = kwargs["geom"], kwargs["prop"]
if isinstance(geom, geo.Polygon):
prop["valid"] = boundary.contains(geom)
return geom, prop
|
5fbad0dbb915dfa5e422bfeb356a63d4290df07d
| 3,639,756
|
import time
def compute(n=26):
""" Computes 2 to the power of n and returns elapsed time"""
start = time.time()
res = 0
for i in range(2**n):
res += 1
end = time.time()
dt = end - start
print(f'Result {res} in {dt} seconds!')
return dt
|
d816c587302830f0acd20a59905c8634fcf20b49
| 3,639,757
|
from re import S
import mpmath
def _real_to_rational(expr, tolerance=None, rational_conversion='base10'):
"""
Replace all reals in expr with rationals.
Examples
========
>>> from sympy import Rational
>>> from sympy.simplify.simplify import _real_to_rational
>>> from sympy.abc import x
>>> _real_to_rational(.76 + .1*x**.5)
sqrt(x)/10 + 19/25
If rational_conversion='base10', this uses the base-10 string. If
rational_conversion='exact', the exact, base-2 representation is used.
>>> _real_to_rational(0.333333333333333, rational_conversion='exact')
6004799503160655/18014398509481984
>>> _real_to_rational(0.333333333333333)
1/3
"""
expr = _sympify(expr)
inf = Float('inf')
p = expr
reps = {}
reduce_num = None
if tolerance is not None and tolerance < 1:
reduce_num = ceiling(1/tolerance)
for fl in p.atoms(Float):
key = fl
if reduce_num is not None:
r = Rational(fl).limit_denominator(reduce_num)
elif (tolerance is not None and tolerance >= 1 and
fl.is_Integer is False):
r = Rational(tolerance*round(fl/tolerance)
).limit_denominator(int(tolerance))
else:
if rational_conversion == 'exact':
r = Rational(fl)
reps[key] = r
continue
elif rational_conversion != 'base10':
raise ValueError("rational_conversion must be 'base10' or 'exact'")
r = nsimplify(fl, rational=False)
# e.g. log(3).n() -> log(3) instead of a Rational
if fl and not r:
r = Rational(fl)
elif not r.is_Rational:
if fl == inf or fl == -inf:
r = S.ComplexInfinity
elif fl < 0:
fl = -fl
d = Pow(10, int((mpmath.log(fl)/mpmath.log(10))))
r = -Rational(str(fl/d))*d
elif fl > 0:
d = Pow(10, int((mpmath.log(fl)/mpmath.log(10))))
r = Rational(str(fl/d))*d
else:
r = Integer(0)
reps[key] = r
return p.subs(reps, simultaneous=True)
|
ce685079459e3bc6e19decc2f22c3bc8198411c0
| 3,639,758
|
import multiprocessing
import itertools
import functools
def run(block, epsilon, ratio, prng, alpha=2, beta=1.2, gamma=1.0, theta=None, verbose=False):
"""Run HDPView
1st phase, divide blocks.
2nd phase, perturbation.
Prepare parameters and execute HDPView
Args:
block (CountTable): block
epsilon (float): privacy budget
ratio (float): ubdget ratio of block division and perturbation, 0 to 1 value
prng (np.random.RandomState): random state
alpha (float), beta(float), gamma(float)
verbose (bool)
"""
seed = prng.randint(0, 2949672950)
block.set_random(seed)
if verbose:
print("seed: ", seed)
n_dash = block.size()
kappa = np.ceil(np.log2(n_dash)*beta)
epsilon_r = epsilon * ratio
epsilon_p = epsilon * (1 - ratio)
if theta is None:
theta = 1/epsilon_p
epsilon_cut = (1 - gamma) * epsilon_r / kappa
lamb = ((2 * alpha - 1)/(alpha - 1) + 1) * (2 / (gamma * epsilon_r))
delta = lamb*np.log(alpha)
# prepare shared memories for parallelization
manager = multiprocessing.Manager()
block_queue = manager.Queue()
block_queue.put(block)
block_result_list = []
MAX_PROCESS = multiprocessing.cpu_count()-1
pool = multiprocessing.Pool(MAX_PROCESS)
while True:
async_results = []
while not block_queue.empty():
result = pool.apply_async(
recursive_bisection, (block_queue.get(), block_queue, epsilon_cut, kappa, theta, lamb, delta, verbose)
)
async_results.append(result)
results = list(itertools.chain.from_iterable([ r.get() for r in async_results ]))
block_result_list.extend(results)
if block_queue.empty():
break
block_result_list.sort(key=functools.cmp_to_key(range__gt__))
for block_result in block_result_list:
mean, ae = calculate_mean_and_aggregation_error(block, block_result.domain_dict)
block_result.mean = mean
block_result.aggregation_error = ae
pe = prng.laplace(0.0, 1.0 / epsilon_p)
block_result.perturbation_error = pe
return NoisedCountTable.from_count_table(block, block_result_list), block_result_list
|
51a7f9f8a739de76b3c8b188a2d0495f42be1cbe
| 3,639,759
|
def wait_for_view(class_name):
"""
Waits for a View matching the specified class. Default timeout is 20 seconds.
:param class_name:the {@link View} class to wait for
:return:{@code true} if the {@link View} is displayed and {@code false} if it is not displayed before the timeout
"""
return get_solo().wait_for_view(class_name)
|
8829f1af924540d92923c1ce44742f8d4d223ca8
| 3,639,760
|
def _create_table():
"""helper for crc calculation"""
table = []
for i in range(256):
k = i
for _ in range(8):
if k & 1:
k = (k >> 1) ^ 0xEDB88320
else:
k >>= 1
table.append(k)
return table
|
830317e62dcfb7bca63f1186b46a2882e0bb399f
| 3,639,761
|
def average_syllables(verses):
"""
Takes a list of verses
Returns the mean number of syllables among input verses
"""
verse_count = len(verses)
syll_counts = list(map(count_syllables, verses))
syll_count = sum(syll_counts)
return syll_count / verse_count
|
4cb4d53431b5ccaa2c4ca08083242089feb61be5
| 3,639,762
|
def _create_subplots_if_needed(ntotal,
ncols=None,
default_ncols=1,
fieldorder='C',
avoid_single_column=False,
sharex=False,
sharey=False,
subfigsize=(12,3),
wspace=0.2,
hspace=0.2,
fig=None,
ax=None
):
"""
Auxiliary function to create fig and ax
If fig and ax are None:
- Set nrows and ncols based on ntotal and specified ncols,
accounting for fieldorder and avoid_single_column
- Create fig and ax with nrows and ncols, taking into account
sharex, sharey, subfigsize, wspace, hspace
If fig and ax are not None:
- Try to determine nrows and ncols from ax
- Check whether size of ax corresponds to ntotal
"""
if ax is None:
if not ncols is None:
# Use ncols if specified and appropriate
assert(ntotal%ncols==0), 'Error: Specified number of columns is not a true divisor of total number of subplots'
nrows = int(ntotal/ncols)
else:
# Defaut number of columns
ncols = default_ncols
nrows = int(ntotal/ncols)
if fieldorder=='F':
# Swap number of rows and columns
nrows, ncols = ncols, nrows
if avoid_single_column and ncols==1:
# Swap number of rows and columns
nrows, ncols = ncols, nrows
# Create fig and ax with nrows and ncols
fig,ax = plt.subplots(nrows=nrows,ncols=ncols,sharex=sharex,sharey=sharey,figsize=(subfigsize[0]*ncols,subfigsize[1]*nrows))
# Adjust subplot spacing
fig.subplots_adjust(wspace=wspace,hspace=hspace)
else:
# Make sure user-specified axes has appropriate size
assert(np.asarray(ax).size==ntotal), 'Specified axes does not have the right size'
# Determine nrows and ncols in specified axes
if isinstance(ax,mpl.axes.Axes):
nrows, ncols = (1,1)
else:
try:
nrows,ncols = np.asarray(ax).shape
except ValueError:
# ax array has only one dimension
# Determine whether ax is single row or single column based
# on individual ax positions x0 and y0
x0s = [axi.get_position().x0 for axi in ax]
y0s = [axi.get_position().y0 for axi in ax]
if all(x0==x0s[0] for x0 in x0s):
# All axis have same relative x0 position
nrows = np.asarray(ax).size
ncols = 1
elif all(y0==y0s[0] for y0 in y0s):
# All axis have same relative y0 position
nrows = 1
ncols = np.asarray(ax).size
else:
# More complex axes configuration,
# currently not supported
raise InputError('could not determine nrows and ncols in specified axes, complex axes configuration currently not supported')
return fig, ax, nrows, ncols
|
d164cd0d73632fcbcb38face930e2aa5c7300728
| 3,639,763
|
def template_file_counter(session, templates, fetch_count=False):
"""Create template file counter."""
file_counts = {}
default_count = None
if fetch_count:
file_counts = TemplatesDAO.query_file_counts(session=session, templates=templates)
default_count = 0
def counter(template: Template) -> int:
"""Get matched files count for the template."""
return file_counts.get(template.id, default_count)
return counter
|
9a931e11385cba0c7f2968740cc8059da341cd50
| 3,639,764
|
def _init_matrices_nw(aln1, aln2, gap_open_penalty, gap_extend_penalty):
"""initialize score matrix and traceback matrix for global alignment
Parameters
----------
aln1 : list
list of activities, which is the first sequence to be aligned
aln2 : list
list of activities, which is the second sequence to be aligned
gap_open_penalty : int
gap_extend_penalty : int
Returns
-------
score_matrix: matrix
traceback_matrix: matrix
"""
shape = (len(aln2)+1, len(aln1)+1)
score_matrix = np.zeros(shape)
traceback_matrix = np.zeros(shape, dtype=np.int)
traceback_matrix += _traceback_encoding['uninitialized']
traceback_matrix[0, 0] = _traceback_encoding['alignment-end']
# cache some values for quicker access
vgap = _traceback_encoding['vertical-gap']
hgap = _traceback_encoding['horizontal-gap']
for i in range(1, shape[0]):
score_matrix[i, 0] = +gap_open_penalty + ((i-1) * gap_extend_penalty)
traceback_matrix[i, 0] = vgap
for i in range(1, shape[1]):
score_matrix[0, i] = +gap_open_penalty + ((i-1) * gap_extend_penalty)
traceback_matrix[0, i] = hgap
return score_matrix, traceback_matrix
|
46c4426a5570ed9dceb0409b5bd4ac2ccb8efb10
| 3,639,765
|
import re
def parse_path_params(end_point_path):
"""Parse path parameters."""
numeric_item_types = ['Lnn', 'Zone', 'Port', 'Lin']
params = []
for partial_path in end_point_path.split('/'):
if (not partial_path or partial_path[0] != '<' or
partial_path[-1] != '>'):
continue
# remove all non alphanumeric characters
param_name = re.sub('[^0-9a-zA-Z]+', '', partial_path.title())
if param_name in numeric_item_types:
param_type = 'integer'
else:
param_type = 'string'
params.append((param_name, param_type))
return params
|
895c3b3663c33a6883ba34d7bbfb20de1491910d
| 3,639,766
|
def read_pid_stat(pid="self"):
"""
Returns system process stat information.
:param pid: The process ID.
:returns: The system stat information.
:rtype: dict
"""
with open("/proc/%s/stat" % (pid,), "rb") as f:
stat = f.readline().split()
return {
"utime": int(stat[13]),
"stime": int(stat[14]),
"cutime": int(stat[15]),
"cstime": int(stat[16]),
}
|
5ec6b21b09372e71e6dcf8c60f418bcbc4beee64
| 3,639,767
|
def simple_split_with_list(x, y, train_fraction=0.8, seed=None):
"""Splits data stored in a list.
The data x and y are list of arrays with shape [batch, ...].
These are split in two sets randomly using train_fraction over the number of
element of the list. Then these sets are returned with
the arrays concatenated along the first dimension
"""
n_subjects = len(x)
n_train = int(n_subjects * train_fraction)
print('Split: Total %d -- Training %d' % (n_subjects, n_train))
random_idx = np.random.RandomState(seed=seed).permutation(n_subjects)
train_idx = random_idx[:n_train]
test_idx = random_idx[n_train:]
x_train = np.concatenate([x[i] for i in train_idx], axis=0)
y_train = np.concatenate([y[i] for i in train_idx], axis=0)
x_test = np.concatenate([x[i] for i in test_idx], axis=0)
y_test = np.concatenate([y[i] for i in test_idx], axis=0)
return x_train, y_train, x_test, y_test
|
c99ae6507b934b42577949ee3a9226e68e870da9
| 3,639,768
|
from typing import Tuple
def save_correlation_heatmap_results(
correlations: pd.DataFrame, intensity_label: str = "Intensity", show_suptitle: bool = True,
close_plots: str = "all", exp_has_techrep: bool = False, **kwargs
) -> Tuple[plt.Figure, plt.Axes]:
"""
Saves the plot with prefix: {{name}}
Parameters
----------
correlations
DataFrame containing the correlations to be plotted
intensity_label
label of the dataframe
show_suptitle
should the figure title be shown
close_plots
which plots should be closed when creating the plot, if None no plots will be closed
exp_has_techrep
whether technical replicates were aggregated for the plot
kwargs
{kwargs}
"""
# TODO save csv
if close_plots is not None:
plt.close(close_plots)
num_cols, a = correlations.shape
assert num_cols == a, "Dataframe needs to be quadratic"
mask = np.zeros_like(correlations).astype(bool)
mask[np.triu_indices_from(mask)] = True
wid_hei = 4 + 0.5 * num_cols
fig, ax = plt.subplots(1, 1, figsize=(wid_hei, wid_hei))
if show_suptitle:
fig.suptitle(f"Correlation Heatmap {intensity_label}" + (TECHREP_SUFFIX if exp_has_techrep else ""))
mesh = ax.pcolormesh(np.ma.masked_where(mask, correlations.values), cmap="coolwarm")
ax.figure.colorbar(mesh, ax=ax)
ax.invert_yaxis()
# set x and y ticks
ax.set_xticks(np.linspace(start=0.5, stop=num_cols - 0.5, num=num_cols))
ax.set_xticklabels(correlations.index, rotation=90)
ax.set_yticks(np.linspace(start=0.5, stop=num_cols - 0.5, num=num_cols))
ax.set_yticklabels(correlations.index)
# annotate values
for x, col in enumerate(correlations.columns):
for y, idx in enumerate(correlations.index):
if not mask[y, x]:
ax.text(x + 0.5, y + 0.5, f"{correlations.loc[idx, col]:.4f}", ha="center", va="center")
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
return fig, ax
|
78f2bffcd9fc200ad152aa95177ba3a3bb6f3c3c
| 3,639,769
|
def get_star(star_path, verbose=False, recreate=False):
"""Return a varconlib.star.Star object based on its name.
Parameters
----------
star_path : str
A string representing the name of the directory where the HDF5 file
containing a `star.Star`'s data can be found.
Optional
--------
verbose : bool, Default: False
If *True*, write out additional information.
recreate : bool, Default: False
If *True*, first recreate the star from observations before returning
it. This will only work on stars which already have an HDF5 file saved,
and will not create new ones.
Returns
-------
`star.Star`
A Star object from the directory. Note that this will only use already-
existing stars, it will not create ones which do not already exist from
their observations.
"""
assert star_path.exists(), FileNotFoundError('Star directory'
f' {star_path}'
' not found.')
# Flip boolean value, since to recreate (True) the star requires setting its
# load_data argument to False.
recreate = not recreate
try:
return Star(star_path.stem, star_path, load_data=recreate)
except IndexError:
vprint(f'Excluded {star_path.stem}.')
pass
except HDF5FileNotFoundError:
vprint(f'No HDF5 file for {star_path.stem}.')
pass
except AttributeError:
vprint(f'Affected star is {star_path.stem}.')
raise
except PickleFilesNotFoundError:
vprint(f'No pickle files found for {star_path.stem}')
pass
|
f6ccd804e5998e42a0fb4d1d4368e2d65244d855
| 3,639,770
|
import glob
import os
def get_terminal_map():
"""Get a map of device-id -> path as a dict.
Used by Process.terminal()
"""
ret = {}
ls = glob.glob('/dev/tty*') + glob.glob('/dev/pts/*')
for name in ls:
assert name not in ret, name
try:
ret[os.stat(name).st_rdev] = name
except FileNotFoundError:
pass
return ret
|
50a4f56e3e2db87a620ab97f485b776c6ac35b6c
| 3,639,771
|
import os
def targets(inventory="/etc/ansible/hosts", **kwargs):
"""
Return the targets from the ansible inventory_file
Default: /etc/salt/roster
"""
if not os.path.isfile(inventory):
raise CommandExecutionError("Inventory file not found: {}".format(inventory))
extra_cmd = []
if "export" in kwargs:
extra_cmd.append("--export")
if "yaml" in kwargs:
extra_cmd.append("--yaml")
inv = salt.modules.cmdmod.run(
"ansible-inventory -i {} --list {}".format(inventory, " ".join(extra_cmd))
)
if kwargs.get("yaml", False):
return salt.utils.stringutils.to_str(inv)
else:
return salt.utils.json.loads(salt.utils.stringutils.to_str(inv))
|
630d525ef671dcc62c1cd7ae600bb95e3f8c97d5
| 3,639,772
|
from datetime import datetime
def get_relative_days(days):
"""Calculates a relative date/time in the past without any time offsets.
This is useful when a service wants to have a default value of, for example 7 days back. If an ISO duration format
is used, such as P7D then the current time will be factored in which results in the earliest day being incomplete
when computing an absolute time stamp.
:param days: The number of days back to calculate from now.
:type days: int
:returns: An absolute time stamp that is the complete range of relative days back.
:rtype: datetime.datetime
"""
base_date = (timezone.now() - datetime.timedelta(days=days)).date()
return datetime.datetime.combine(base_date, datetime.time.min).replace(tzinfo=timezone.utc)
|
58e429708e7d1c3cbda88c09bfb978f32cb1892a
| 3,639,773
|
def find_pending_trade(df):
""" Find the trade value according to its sign like negative number means Sell type
or positive number means Buy """
p_df = pd.DataFrame()
p_df['Type'] = df['Buy_Qty'] - df['Sell_Qty']
return p_df['Type'].map(lambda val: trade_type_conversion(val))
|
1e764929cb047b6d8314732902dcc273176c924b
| 3,639,774
|
def rtri(x, a, b):
"""Convolution of rect(ax) with tri(bx)."""
assert a > 0
assert b > 0
return b*(step2(x + 1/(2*a) + 1/b) - 2*step2(x + 1/(2*a)) + step2(x + 1/(2*a) - 1/b) - step2(x - 1/(2*a) + 1/b) + 2*step2(x - 1/(2*a)) - step2(x - 1/(2*a) - 1/b))
|
74745bf680507a2d3627c31c14ccc24db4b2d2d1
| 3,639,775
|
def make_xgboost_predict_extractor(
eval_shared_model: tfma.EvalSharedModel,
eval_config: tfma.EvalConfig,
) -> extractor.Extractor:
"""Creates an extractor for performing predictions using a xgboost model.
The extractor's PTransform loads and runs the serving pickle against
every extract yielding a copy of the incoming extracts with an additional
extract added for the predictions keyed by tfma.PREDICTIONS_KEY. The model
inputs are searched for under tfma.FEATURES_KEY.
Args:
eval_shared_model: Shared model (single-model evaluation).
Returns:
Extractor for extracting predictions.
"""
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
return extractor.Extractor(
stage_name=_PREDICT_EXTRACTOR_STAGE_NAME,
ptransform=_ExtractPredictions( # pylint: disable=no-value-for-parameter
eval_shared_models={m.model_name: m
for m in eval_shared_models},
eval_config=eval_config))
|
01c44814023e2c960ec695ee8a7282f2a9d0b21f
| 3,639,776
|
def training_dataset() -> Dataset:
"""Creating the dataframe."""
data = {
"record1": [
{"@first_name": "Hans", "@last_name": "Peter"},
{"@first_name": "Heinrich", "@last_name": "Meier"},
{"@first_name": "Hans", "@last_name": "Peter"},
],
"record2": [
{"@first_name": "Hans", "@last_name": "Petre"},
{"@first_name": "Heinz", "@last_name": "Meier"},
{"@first_name": "Hansel", "@last_name": "Peter"},
],
"label": ["duplicate", "not_duplicate", "duplicate"],
}
return Dataset.from_dict(data)
|
1d3ecc780044036aa8f8425abf8dba556649af98
| 3,639,777
|
def getWCSForcamera(cameraname, crpix1, crpix2):
""" Return SIP non-linear coordiante correction object intialized for a camera from a lookup table.
If the camera is not in the lookup table, an identify transformation is returned.
TODO: variable order, so far limit ouselves to second order
TODO: Time-constraint lookup.
:param cameraname: Name of camera, e.g., ak01
:param crpix1: CRPIX1 for camera, as that my have changed over time
:param crpix2: CRPIX2 for camera, as that my have changed over time
:return:
"""
m = 2
sip_a = np.zeros((m + 1, m + 1), np.double)
sip_b = np.zeros((m + 1, m + 1), np.double)
if cameraname in akwcslookup:
sip_a[1][1] = akwcslookup[cameraname]['SIPA_1_1']
sip_a[2][0] = akwcslookup[cameraname]['SIPA_2_0']
sip_a[0][2] = akwcslookup[cameraname]['SIPA_0_2']
sip_b[1][1] = akwcslookup[cameraname]['SIPB_1_1']
sip_b[2][0] = akwcslookup[cameraname]['SIPB_2_0']
sip_b[0][2] = akwcslookup[cameraname]['SIPB_0_2']
sip = Sip(sip_a, sip_b, None, None, [crpix1, crpix2])
return sip
|
f2905e16eada7b3f4806b9a5db7fba065283de6c
| 3,639,778
|
def get_full_frac_val(r_recalc,fs,diff_frac=0,bypass_correction=0):
"""
Compute total offset in number of samples, and also fractional sample correction.
Parameters
----------
r_recalc : float
delay.
fs : float
sampling frequency.
diff_frac : 0
[unused] 0 by default.
bypass_correction : int
| if 0: corrects the fractional sample correction to be between -0.5 and +0.5.
| if 1: returns the fractional sample correction between 0 and 1.
Returns
-------
full_fractional_recalc : float
total offset in number of samples, including fractional part.
fractional_recalc : float
fractional sample correction.
Notes
-----
Bypass correction used in get_frac_over for simplicity.
"""
fractional_recalc=((r_recalc)*fs)
full_fractional_recalc=fractional_recalc
fractional_recalc=np.mod(fractional_recalc,1)
if bypass_correction==0:
fractional_recalc_out=fractional_recalc-(fractional_recalc>0.5).astype(np.float64)
else:
fractional_recalc_out=fractional_recalc
return([full_fractional_recalc,fractional_recalc_out])
|
23bf8326472844b16c87ac28e1065156bf20ce8b
| 3,639,779
|
import inspect
def get_code():
"""
returns the code for the min cost path function
"""
return inspect.getsource(calculate_path)
|
bad3e06b11b0897b9225ffc9ab6dc81972f4442b
| 3,639,780
|
import functools
def box_net(images,
level,
num_anchors,
num_filters,
is_training,
act_type,
repeats=4,
separable_conv=True,
survival_prob=None,
strategy=None,
data_format='channels_last'):
"""Box regression network."""
if separable_conv:
conv_op = functools.partial(
tf.layers.separable_conv2d, depth_multiplier=1,
data_format=data_format,
pointwise_initializer=tf.initializers.variance_scaling(),
depthwise_initializer=tf.initializers.variance_scaling())
else:
conv_op = functools.partial(
tf.layers.conv2d,
data_format=data_format,
kernel_initializer=tf.random_normal_initializer(stddev=0.01))
for i in range(repeats):
orig_images = images
images = conv_op(
images,
num_filters,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding='same',
name='box-%d' % i)
images = utils.batch_norm_act(
images,
is_training,
act_type=act_type,
init_zero=False,
strategy=strategy,
data_format=data_format,
name='box-%d-bn-%d' % (i, level))
if i > 0 and survival_prob:
images = utils.drop_connect(images, is_training, survival_prob)
images = images + orig_images
boxes = conv_op(
images,
4 * num_anchors,
kernel_size=3,
bias_initializer=tf.zeros_initializer(),
padding='same',
name='box-predict')
return boxes
|
8caab72f716c0f754efb82467556a5b016d2f72e
| 3,639,781
|
from typing import Set
def get_distance_to_center(
element: object, centers: "Set[object]", distance_function: "function"
) -> float:
"""
Returns the distance from the given point to its center
:param element: a point to get the distance for
:param centers: an iteratable of the center points
:param distance_function: a function that will compare two datapoints
:return: the distance from the element to its center
"""
return distance_function(
element, get_nearest_center(centers, element, distance_function)
)
|
bd4a400e5a98711d00c5c236ce02945bd2014719
| 3,639,782
|
def auto_delete_file_on_change(sender, instance, **kwargs):
"""
Deletes old file from filesystem when corresponding
`Worksheet` object is updated with a new file.
"""
if not instance.pk:
return False
db_obj = Worksheet.objects.get(pk=instance.pk)
exists = True
try:
old_file = db_obj.worksheet_file
except Worksheet.DoesNotExist:
exists = False
if exists:
new_file = instance.worksheet_file
if old_file != new_file:
db_obj.worksheet_file.delete(save=False)
exists = True
try:
old_file = db_obj.solution_file
except Worksheet.DoesNotExist:
exists = False
if exists:
new_file = instance.solution_file
if old_file != new_file:
db_obj.solution_file.delete(save=False)
|
2ab584fffbe2224109c4d4ea0446c22650df493f
| 3,639,783
|
def clean_immigration_data(validPorts: dict, immigration_usa_df: psd.DataFrame, spark: pss.DataFrame) -> psd.DataFrame:
"""[This cleans immigration data in USA. It casts date of immigrant entry, city of destination, and port entry.]
Args:
validPorts (dict): [dictionery that includes valid entry ports in USA]
immigration_usa_df (psd.DataFrame): [Spark dataframe that includes immigration data in USA]
spark (pss.DataFrame): [Spark session that is used for executing queries]
Returns:
psd.DataFrame: [spark dataframe that includes clean data of immigration movement in usa]
"""
# cast cities that are valid
valid_city_code = list(set(city_code.keys()))
str_valid_city_code = str(valid_city_code).replace('[', '(').replace(']', ')')
# cast ports that are valid
valid_port_code = list(set(validPorts.keys()))
str_valid_port_code = str(valid_port_code).replace('[', '(').replace(']', ')')
# clean immigration data
clean_immigration_usa_df = spark.sql(f'''
select date(concat(cast(i94yr as int), "-", cast(i94mon as int), "-01")) as dt, cast(i94addr as varchar(2)), cast(i94port as varchar(3))
from immigration_usa_table
where i94yr is not null and i94mon is not null and i94addr is not null and i94port is not null and
i94addr in {str_valid_city_code} and i94port in {str_valid_port_code}
''')
return clean_immigration_usa_df
|
8746f0f4e35745d653549c32c5a804422d22c588
| 3,639,784
|
from pathlib import Path
def prepare(args: dict, overwriting: bool) -> Path:
"""Load config and key file,create output directories and setup log files.
Args:
args (dict): argparser dictionary
Returns:
Path: output directory path
"""
output_dir = make_dir(args, "results_tmp", "aggregation", overwriting)
create_log_files(output_dir)
return output_dir
|
dca02f4180e91423fa61e8da36e9ac095dbe3ca4
| 3,639,785
|
def dd_wave_function_array(x, u_array, Lx):
"""Returns numpy array of all second derivatives
of waves in Fourier sum"""
coeff = 2 * np.pi / Lx
f_array = wave_function_array(x, u_array, Lx)
return - coeff ** 2 * u_array ** 2 * f_array
|
d815afe12916643c46fcc7b8f108ce5aad840e3b
| 3,639,786
|
from typing import Iterable
import os
def plot_energy_fluxes(solver, fsrs, group_bounds=None, norm=True,
loglog=True, get_figure=False):
"""Plot the scalar flux vs. energy for one or more FSRs.
The Solver must have converged the FSR sources before calling this routine.
The routine will generate a step plot of the flux across each energy group.
An optional parameter for the energy group bounds may be input. The group
bounds should be input in increasing order of energy. If group bounds are
not specified, the routine will use equal width steps for each energy group.
Parameters
----------
solver : openmoc.Solver
An OpenMOC solver used to compute the flux
fsrs : Iterable of Integral
The FSRs for which to plot the flux
group_bounds : Iterable of Real or None, optional
The bounds of the energy groups
norm : bool, optional
Whether to normalize the fluxes to a unity flux sum (True by default)
loglog : bool
Whether to use a log scale on the x- and y-axes (True by default)
get_figure : bool
Whether to return the Matplotlib figure
Returns
-------
fig : list of matplotlib.Figure or None
The Matplotlib figures are returned if get_figure is True
Examples
--------
A user may invoke this function from an OpenMOC Python file as follows:
>>> openmoc.plotter.plot_energy_fluxes(solver, fsrs=[1,5,20], \
group_bounds=[0., 0.625, 2e7])
"""
global solver_types
cv.check_type('solver', solver, solver_types)
cv.check_type('fsrs', fsrs, Iterable, Integral)
cv.check_type('norm', norm, bool)
cv.check_type('loglog', loglog, bool)
cv.check_type('get_figure', get_figure, bool)
geometry = solver.getGeometry()
num_groups = geometry.getNumEnergyGroups()
if group_bounds:
cv.check_type('group_bounds', group_bounds, Iterable, Real)
if not all(low < up for low, up in zip(group_bounds, group_bounds[1:])):
py_printf('ERROR', 'Unable to plot the flux vs. energy since ' +
'the group bounds are not monotonically increasing')
else:
group_bounds = np.arange(num_groups+1, dtype=np.int)
loglog = False
py_printf('NORMAL', 'Plotting the scalar fluxes vs. energy...')
global subdirectory, matplotlib_rcparams
directory = openmoc.get_output_directory() + subdirectory
# Ensure that normal settings are used even if called from ipython
curr_rc = matplotlib.rcParams.copy()
update_rc_param(curr_rc)
# Make directory if it does not exist
try:
os.makedirs(directory)
except OSError:
pass
# Compute difference in energy bounds for each group
group_deltas = np.ediff1d(group_bounds)
group_bounds = np.flipud(group_bounds)
group_deltas = np.flipud(group_deltas)
# Initialize an empty list of Matplotlib figures if requestd by the user
figures = []
# Iterate over all source regions
for fsr in fsrs:
# Allocate memory for an array of this FSR's fluxes
fluxes = np.zeros(num_groups, dtype=np.float)
# Extract the flux in each energy group
for group in range(num_groups):
fluxes[group] = solver.getFlux(fsr, group+1)
# Normalize fluxes to the total integrated flux
if norm:
fluxes /= np.sum(group_deltas * fluxes)
# Initialize a separate plot for this FSR's fluxes
fig = plt.figure()
fig.patch.set_facecolor('none')
# Draw horizontal/vertical lines on the plot for each energy group
for group in range(num_groups):
# Horizontal line
if loglog:
plt.loglog(group_bounds[group:group+2], [fluxes[group]]*2,
linewidth=3, c='b', label='openmoc', linestyle='-')
else:
plt.plot(group_bounds[group:group+2], [fluxes[group]]*2,
linewidth=3, c='b', label='openmoc', linestyle='-')
# Vertical lines
if group < num_groups - 1:
if loglog:
plt.loglog([group_bounds[group+1]]*2, fluxes[group:group+2],
c='b', linestyle='--')
else:
plt.plot([group_bounds[group+1]]*2, fluxes[group:group+2],
c='b', linestyle='--')
plt.xlabel('Energy')
plt.ylabel('Flux')
plt.xlim((min(group_bounds), max(group_bounds)))
plt.grid()
plt.title('FSR {0} Flux ({1} groups)'.format(fsr, num_groups))
# Save the figure to a file or return to user if requested
if geometry.isRootDomain():
if get_figure:
figures.append(fig)
else:
filename = 'flux-fsr-{0}.png'.format(fsr)
plt.savefig(directory+filename, bbox_inches='tight')
plt.close(fig)
# Restore settings if called from ipython
update_rc_param(curr_rc)
# Return the figures if requested by user
if get_figure:
return figures
|
e640a70b9f002f41755bc6dfbec45121f237f27f
| 3,639,787
|
def handle_health_check():
"""Return response 200 for successful health check"""
return Response(status=200)
|
3ff055a7dc5e1318dd0e283ace87399c54e361b2
| 3,639,788
|
def pow(x, n):
""" pow(x, n)
Power function.
"""
return x**n
|
09d62a68607bf0dab8b380a0c3ee58c6ed4497d6
| 3,639,789
|
def download_cad_model():
"""Download cad dataset."""
return _download_and_read('42400-IDGH.stl')
|
1f7aad5ed9c8f62ffef16cb3f44207b83aced204
| 3,639,790
|
def dot_product(u, v):
"""Computes dot product of two vectors u and v, each represented as a tuple
or list of coordinates. Assume the two vectors are the same length."""
output = 0
for i in range(len(u)):
output += (u[i]*v[i])
return output
|
6362776bef32870d3b380aecbb2037483e049092
| 3,639,791
|
from datetime import datetime
def precise_diff(
d1, d2
): # type: (typing.Union[datetime.datetime, datetime.date], typing.Union[datetime.datetime, datetime.date]) -> PreciseDiff
"""
Calculate a precise difference between two datetimes.
:param d1: The first datetime
:type d1: datetime.datetime or datetime.date
:param d2: The second datetime
:type d2: datetime.datetime or datetime.date
:rtype: PreciseDiff
"""
sign = 1
if d1 == d2:
return PreciseDiff(0, 0, 0, 0, 0, 0, 0, 0)
tzinfo1 = d1.tzinfo if isinstance(d1, datetime.datetime) else None
tzinfo2 = d2.tzinfo if isinstance(d2, datetime.datetime) else None
if (
tzinfo1 is None
and tzinfo2 is not None
or tzinfo2 is None
and tzinfo1 is not None
):
raise ValueError(
"Comparison between naive and aware datetimes is not supported"
)
if d1 > d2:
d1, d2 = d2, d1
sign = -1
d_diff = 0
hour_diff = 0
min_diff = 0
sec_diff = 0
mic_diff = 0
total_days = _day_number(d2.year, d2.month, d2.day) - _day_number(
d1.year, d1.month, d1.day
)
in_same_tz = False
tz1 = None
tz2 = None
# Trying to figure out the timezone names
# If we can't find them, we assume different timezones
if tzinfo1 and tzinfo2:
if hasattr(tzinfo1, "name"):
# Pendulum timezone
tz1 = tzinfo1.name
elif hasattr(tzinfo1, "zone"):
# pytz timezone
tz1 = tzinfo1.zone
if hasattr(tzinfo2, "name"):
tz2 = tzinfo2.name
elif hasattr(tzinfo2, "zone"):
tz2 = tzinfo2.zone
in_same_tz = tz1 == tz2 and tz1 is not None
if isinstance(d2, datetime.datetime):
if isinstance(d1, datetime.datetime):
# If we are not in the same timezone
# we need to adjust
#
# We also need to adjust if we do not
# have variable-length units
if not in_same_tz or total_days == 0:
offset1 = d1.utcoffset()
offset2 = d2.utcoffset()
if offset1:
d1 = d1 - offset1
if offset2:
d2 = d2 - offset2
hour_diff = d2.hour - d1.hour
min_diff = d2.minute - d1.minute
sec_diff = d2.second - d1.second
mic_diff = d2.microsecond - d1.microsecond
else:
hour_diff = d2.hour
min_diff = d2.minute
sec_diff = d2.second
mic_diff = d2.microsecond
if mic_diff < 0:
mic_diff += 1000000
sec_diff -= 1
if sec_diff < 0:
sec_diff += 60
min_diff -= 1
if min_diff < 0:
min_diff += 60
hour_diff -= 1
if hour_diff < 0:
hour_diff += 24
d_diff -= 1
y_diff = d2.year - d1.year
m_diff = d2.month - d1.month
d_diff += d2.day - d1.day
if d_diff < 0:
year = d2.year
month = d2.month
if month == 1:
month = 12
year -= 1
else:
month -= 1
leap = int(is_leap(year))
days_in_last_month = DAYS_PER_MONTHS[leap][month]
days_in_month = DAYS_PER_MONTHS[int(is_leap(d2.year))][d2.month]
if d_diff < days_in_month - days_in_last_month:
# We don't have a full month, we calculate days
if days_in_last_month < d1.day:
d_diff += d1.day
else:
d_diff += days_in_last_month
elif d_diff == days_in_month - days_in_last_month:
# We have exactly a full month
# We remove the days difference
# and add one to the months difference
d_diff = 0
m_diff += 1
else:
# We have a full month
d_diff += days_in_last_month
m_diff -= 1
if m_diff < 0:
m_diff += 12
y_diff -= 1
return PreciseDiff(
sign * y_diff,
sign * m_diff,
sign * d_diff,
sign * hour_diff,
sign * min_diff,
sign * sec_diff,
sign * mic_diff,
sign * total_days,
)
|
21c2a2a275ce23e8282c0563218d4aacc1f0accd
| 3,639,792
|
import utool as ut # NOQA
import importlib
import utool as ut
import utool as ut
from xdoctest import docscrape_google
from xdoctest import core as xdoc_core
from xdoctest import static_analysis as static
import types
import sys
import inspect
def get_module_doctest_tup(
testable_list=None,
check_flags=True,
module=None,
allexamples=None,
needs_enable=None,
N=0,
verbose=True,
testslow=False,
):
"""
Parses module for testable doctesttups
enabled_testtup_list (list): a list of testtup
testtup (tuple): (name, num, src, want, flag) describes a valid doctest in the module
name (str): test name
num (str): test number of the module / function / class / method
src (str): test source code
want (str): expected test result
flag (str): a valid commandline flag to enable this test
frame_fpath (str): module fpath that will be tested
all_testflags (list): the command line arguments that will enable different tests
module (module): the actual module that will be tested
exclude_inherited (bool): does not included tests defined in other modules
Args:
testable_list (list): a list of functions (default = None)
check_flags (bool): (default = True)
module (None): (default = None)
allexamples (None): (default = None)
needs_enable (None): (default = None)
N (int): (default = 0)
verbose (bool): verbosity flag(default = True)
testslow (bool): (default = False)
Returns:
ModuleDoctestTup : (enabled_testtup_list, frame_fpath, all_testflags, module)
CommandLine:
python -m utool.util_tests --exec-get_module_doctest_tup
Example:
>>> from utool.util_tests import * # NOQA
>>> import utool as ut
>>> #testable_list = [ut.util_import.package_contents]
>>> testable_list = None
>>> check_flags = False
>>> module = ut.util_cplat
>>> allexamples = False
>>> needs_enable = None
>>> N = 0
>>> verbose = True
>>> testslow = False
>>> mod_doctest_tup = get_module_doctest_tup(testable_list, check_flags,
>>> module, allexamples,
>>> needs_enable, N, verbose,
>>> testslow)
>>> result = ('mod_doctest_tup = %s' % (ut.repr4(mod_doctest_tup, nl=4),))
>>> print(result)
"""
# +------------------------
if VERBOSE_TEST:
print('[util_test.get_module_doctest tup][DEPTH 2] get_module_doctest tup()')
if needs_enable is None:
needs_enable = not ut.get_argflag('--enableall')
# needs_enable = True
TEST_ALL_EXAMPLES = allexamples or ut.get_argflag(('--allexamples', '--all-examples'))
parse_testables = True
if isinstance(testable_list, types.ModuleType):
# hack
module = testable_list
testable_list = []
testable_name_list = []
elif testable_list is None:
testable_list = []
testable_name_list = []
else:
testable_name_list = [ut.get_funcname(func) for func in testable_list]
parse_testables = False
# L________________________
# +------------------------
# GET_MODULE_DOCTEST_TUP Step 1:
# Inspect caller module for testable names
if module is None:
frame_fpath = '???'
try:
# This is a bit finky. Need to be exactly N frames under the main
# module
frame = ut.get_parent_frame(N=N)
main_modname = '__main__'
frame_name = frame.f_globals['__name__']
frame_fpath = frame.f_globals['__file__']
if frame_name == main_modname:
module = sys.modules[main_modname]
entry_modname = ut.get_modname_from_modpath(module.__file__)
if entry_modname in ['kernprof', 'kernprof-script']:
# kernprof clobbers the __main__ variable.
# workaround by reimporting the module name
modname = ut.get_modname_from_modpath(frame_fpath)
module = importlib.import_module(modname)
except Exception as ex:
print(frame.f_globals)
ut.printex(ex, keys=['frame', 'module'])
raise
allexamples = False
else:
frame_fpath = module.__file__
allexamples = True
# L________________________
# +------------------------
# GET_MODULE_DOCTEST_TUP Step 2:
# --- PARSE TESTABLE FUNCTIONS ---
# Get testable functions
if parse_testables:
try:
if verbose or VERBOSE_TEST and ut.NOT_QUIET:
print('[ut.test] Iterating over module funcs')
print('[ut.test] module =%r' % (module,))
_testableiter = ut.iter_module_doctestable(module, include_inherited=False)
for key, val in _testableiter:
if isinstance(val, staticmethod):
docstr = inspect.getdoc(val.__func__)
else:
docstr = inspect.getdoc(val)
docstr = ut.ensure_unicode(docstr)
if docstr is not None and (
docstr.find('Example') >= 0 or docstr.find('Doctest') >= 0
):
testable_name_list.append(key)
testable_list.append(val)
if VERBOSE_TEST and ut.NOT_QUIET:
print('[ut.test] Testable: %s' % (key,))
else:
if VERBOSE_TEST and ut.NOT_QUIET:
if docstr.find('Example') >= 0 or docstr.find('Doctest') >= 0:
print('[ut.test] Ignoring (disabled) : %s' % key)
else:
print('[ut.test] Ignoring (no Example) : %s' % key)
except Exception as ex:
print('FAILED')
print(docstr)
ut.printex(ex, keys=['frame'])
raise
# OUTPUTS: testable_list
# L________________________
# +------------------------
# GET_MODULE_DOCTEST_TUP Step 3:
# --- FILTER TESTABLES_---
# Get testable function examples
test_sentinals = [
'ENABLE_DOCTEST',
'ENABLE_GRID_DOCTEST',
]
if testslow or ut.get_argflag(('--testall', '--testslow', '--test-slow')):
test_sentinals.append('SLOW_DOCTEST')
if testslow or ut.get_argflag(('--testall', '--testunstable')):
test_sentinals.append('UNSTABLE_DOCTEST')
# FIND THE TEST NAMES REQUESTED
# Grab sys.argv enabled tests
cmdline_varargs = ut.get_cmdline_varargs()
force_enable_testnames_ = cmdline_varargs[:]
valid_prefix_list = ['--test-', '--exec-', '--dump-']
# if False:
for arg in sys.argv:
for prefix in valid_prefix_list:
if arg.startswith(prefix):
testname = arg[len(prefix) :]
# testname = testname.split(':')[0].replace('-', '_')
force_enable_testnames_.append(testname)
# break
# PartA: Fixup names
# TODO: parse out requested test number here
# instead of later in the code. See PartB
force_enable_testnames = []
for testname in force_enable_testnames_:
testname = testname.split(':')[0].replace('-', '_')
testname.split(':')[0].replace('-', '_')
force_enable_testnames.append(testname)
def _get_testable_name(testable):
if isinstance(testable, staticmethod):
testable = testable.__func__
try:
testable_name = testable.func_name
except AttributeError as ex1:
try:
testable_name = testable.__name__
except AttributeError as ex2:
ut.printex(ex1, ut.repr4(dir(testable)))
ut.printex(ex2, ut.repr4(dir(testable)))
raise
return testable_name
sorted_testable = sorted(list(set(testable_list)), key=_get_testable_name)
# Append each testable example
if VERBOSE_TEST:
print('Vars:')
print(' * needs_enable = %r' % (needs_enable,))
print(' * force_enable_testnames = %r' % (force_enable_testnames,))
print(' * len(sorted_testable) = %r' % (len(sorted_testable),))
print(' * cmdline_varargs = %r' % (cmdline_varargs,))
indenter = ut.Indenter('[FIND_AVAIL]')
indenter.start()
# PARSE OUT THE AVAILABLE TESTS FOR EACH REQUEST
local_testtup_list = []
for testable in sorted_testable:
short_testname = _get_testable_name(testable)
full_testname = None # Namespaced classname (within module)
if isinstance(testable, staticmethod):
testable = testable.__func__
if hasattr(testable, '__ut_parent_class__'):
# HACK for getting classname.funcname
test_namespace = testable.__ut_parent_class__.__name__
full_testname = test_namespace + '.' + short_testname
else:
test_namespace = None
full_testname = short_testname
nametup = tuple(ut.unique([full_testname, short_testname]))
# modpath = ut.get_modpath(module)
examptup = get_doctest_examples(testable)
examples, wants, linenums, func_lineno, docstr = examptup
total_examples = len(examples)
if total_examples > 0:
for testno, srcwant_tup in enumerate(zip(examples, wants)):
src, want = srcwant_tup
src_ = ut.regex_replace('from __future__ import.*$', '', src)
test_disabled = not any([src_.find(s) >= 0 for s in test_sentinals])
skip = (
needs_enable
and test_disabled
and ut.isdisjoint(nametup, force_enable_testnames)
)
if not skip:
if VERBOSE_TEST:
print(
' * HACK adding testname=%r to local_testtup_list'
% (full_testname,)
)
local_testtup = (
nametup,
testno,
src_,
want,
test_namespace,
short_testname,
total_examples,
)
local_testtup_list.append(local_testtup)
else:
if VERBOSE_TEST:
# print('force_enable_testnames = %r' % (force_enable_testnames,))
# print('nametup = %r' % (nametup,))
# print('needs_enable = %r' % (needs_enable,))
# print('test_disabled = %r' % (test_disabled,))
print(' * skipping: %r / %r' % (short_testname, full_testname))
else:
print(
'WARNING: no examples in %r for testname=%r'
% (frame_fpath, full_testname)
)
if verbose:
print(testable)
print(examples)
print(wants)
print(docstr)
if VERBOSE_TEST:
print(' --')
if VERBOSE_TEST:
indenter.stop()
# L________________________
# +------------------------
# Get enabled (requested) examples
if VERBOSE_TEST:
print('\n-----\n')
indenter = ut.Indenter('[IS_ENABLED]')
indenter.start()
print('Finished parsing available doctests.')
print('Now we need to find which examples are enabled')
print('len(local_testtup_list) = %r' % (len(local_testtup_list),))
print(
'local_testtup_list.T[0:2].T = %s'
% ut.repr4(ut.take_column(local_testtup_list, [0, 1]))
)
print('sys.argv = %r' % (sys.argv,))
all_testflags = []
enabled_testtup_list = []
distabled_testflags = []
subx = ut.get_argval(
'--subx', type_=int, default=None, help_='Only tests the subxth example'
)
def make_valid_testnames(name, num, total):
return [
name + ':' + str(num),
name,
name + ':' + str(num - total), # allow negative indices
# prefix + name.replace('_', '-') + ':' + str(num),
# prefix + name.replace('_', '-')
]
def make_valid_test_argflags(prefix, name, num, total):
valid_testnames = make_valid_testnames(name, num, total)
return [prefix + testname for testname in valid_testnames]
def check_if_test_requested(nametup, num, total, valid_prefix_list):
# cmdline_varargs
if VERBOSE_TEST:
print('Checking cmdline for %r %r' % (nametup, num))
valid_argflags = []
# FIXME: PartB
# should parse out test number above instead of here
# See PartA
mode = None
veryverb = 0
# First check positional args
testflag = None
for name in nametup:
valid_testnames = make_valid_test_argflags('', name, num, total)
if veryverb:
print('Checking if positional* %r' % (valid_testnames[0:1],))
print('name = %r' % (name,))
if any([x in cmdline_varargs for x in valid_testnames]):
# hack
mode = 'exec'
testflag = name
flag1 = '--exec-' + name + ':' + str(num)
if testflag is not None:
if veryverb:
print('FOUND POSARG')
print(' * testflag = %r' % (testflag,))
print(' * num = %r' % (num,))
break
# Then check keyword-ish args
if mode is None:
for prefix, name in reversed(list(ut.iprod(valid_prefix_list, nametup))):
valid_argflags = make_valid_test_argflags(prefix, name, num, total)
if veryverb:
print('Checking for flags*: %r' % (valid_argflags[0],))
flag1 = valid_argflags[0]
testflag = ut.get_argflag(valid_argflags)
mode = prefix.replace('-', '')
if testflag:
if veryverb:
print('FOUND VARARG')
break
else:
# print('WARNING NO TEST IS ENABLED %r ' % (nametup,))
pass
checktup = flag1, mode, name, testflag
return checktup
for local_testtup in local_testtup_list:
(nametup, num, src, want, shortname, test_namespace, total) = local_testtup
checktup = check_if_test_requested(nametup, num, total, valid_prefix_list)
flag1, mode, name, testflag = checktup
testenabled = TEST_ALL_EXAMPLES or not check_flags or testflag
if subx is not None and subx != num:
continue
all_testflags.append(flag1)
if testenabled:
if VERBOSE_TEST:
print('... enabling test')
testtup = TestTuple(
name,
num,
src,
want,
flag1,
frame_fpath=frame_fpath,
mode=mode,
total=total,
nametup=nametup,
shortname=shortname,
test_namespace=test_namespace,
)
if VERBOSE_TEST:
print('... ' + str(testtup))
enabled_testtup_list.append(testtup)
else:
if VERBOSE_TEST:
print('... disabling test')
distabled_testflags.append(flag1)
# Attempt to run test without any context
# This will only work if the function exist and is self contained
if len(force_enable_testnames_) > 0 and len(enabled_testtup_list) == 0:
if VERBOSE_TEST:
print('Forced test did not have a doctest example')
print('Maybe it can be run without any context')
# assert len(force_enable_testnames) == 1
test_funcname_ = force_enable_testnames[0]
if test_funcname_.find('.') != -1:
test_classname, test_funcname = test_funcname_.split('.')
class_ = getattr(module, test_classname, None)
assert class_ is not None
func_ = getattr(class_, test_funcname, None)
else:
test_funcname = test_funcname_
func_ = getattr(module, test_funcname, None)
if VERBOSE_TEST:
print('test_funcname = %r' % (test_funcname,))
print('func_ = %r' % (func_,))
if func_ is not None:
testno = 0
modname = ut.get_modname_from_modpath(module.__file__)
want = None
try:
if VERBOSE_TEST:
print('attempting xdoctest hack')
# hack to get classmethods to read their example using
# the xdoctest port
if func_.__doc__ is None:
raise TypeError
blocks = docscrape_google.split_google_docblocks(func_.__doc__)
example_blocks = []
for type_, block in blocks:
if type_.startswith('Example') or type_.startswith('Doctest'):
example_blocks.append((type_, block))
if len(example_blocks) == 0:
if VERBOSE_TEST:
print('xdoctest found no blocks')
raise KeyError
callname = test_funcname_
hack_testtups = []
for num, (type_, block) in enumerate(example_blocks):
# print('modname = %r' % (modname,))
# print('callname = %r' % (callname,))
# print('num = %r' % (num,))
modpath = static.modname_to_modpath(modname)
example = xdoc_core.DocTest(
modpath=modpath, callname=callname, docsrc=block, num=num
)
src = example.format_src(colored=False, want=False, linenos=False)
want = '\n'.join(list(example.wants()))
testtup = TestTuple(
test_funcname_,
num,
src,
want=want,
flag='--exec-' + test_funcname_,
frame_fpath=frame_fpath,
mode='exec',
total=len(example_blocks),
nametup=[test_funcname_],
)
hack_testtups.append(testtup)
if VERBOSE_TEST:
print('hack_testtups = %r' % (hack_testtups,))
enabled_testtup_list.extend(hack_testtups)
# src = '\n'.join([line[4:] for line in src.split('\n')])
except (ImportError, KeyError, TypeError):
if VERBOSE_TEST:
print('xdoctest hack failed')
# varargs = ut.get_cmdline_varargs()
varargs = force_enable_testnames[1:]
# Create dummy doctest
src = ut.codeblock(
"""
# DUMMY_DOCTEST
from {modname} import * # NOQA
args = {varargs}
result = {test_funcname_}(*args)
print(result)
"""
).format(
modname=modname, test_funcname_=test_funcname_, varargs=repr(varargs)
)
testtup = TestTuple(
test_funcname_,
testno,
src,
want=want,
flag='--exec-' + test_funcname_,
frame_fpath=frame_fpath,
mode='exec',
total=1,
nametup=[test_funcname_],
)
enabled_testtup_list.append(testtup)
else:
print('function %r was not found in %r' % (test_funcname_, module))
if VERBOSE_TEST:
indenter.stop()
if ut.get_argflag('--list'):
# HACK: Should probably just return a richer structure
print('testable_name_list = %s' % (ut.repr4(testable_name_list),))
mod_doctest_tup = ModuleDoctestTup(
enabled_testtup_list, frame_fpath, all_testflags, module
)
# L________________________
return mod_doctest_tup
|
ac7f2fd69180ce7ca24651ae585a7bf55c624399
| 3,639,793
|
def subset_lists(L, min_size=0, max_size=None):
"""Strategy to generate a subset of a `list`.
This should be built in to hypothesis (see hypothesis issue #1115), but was rejected.
Parameters
----------
L : list
List of elements we want to get a subset of.
min_size : int
Minimum size of the resulting subset list.
max_size : int or None
Maximum size of the resulting subset list.
Returns
-------
L : list
List that is subset of `L` with all unique elements.
"""
_check_valid_size_interval(min_size, max_size, "subset list size")
uniq_len = len(set(L))
order_check("input list size", 0, min_size, uniq_len)
max_size = uniq_len if max_size is None else min(uniq_len, max_size)
# Avoid deprecation warning HypothesisDeprecationWarning: sampled_from()
elements_st = nothing() if uniq_len == 0 else sampled_from(L)
S = lists(elements=elements_st, min_size=min_size, max_size=max_size, unique=True)
return S
|
1ad343ed6459c12b6c454c71505e8cfa04e9e36e
| 3,639,794
|
def DCNPack(x, extra_feat, out_channels, kernel_size=(3, 3), strides=(1, 1), padding='same', dilations=(1, 1),
use_bias=True, num_groups=1, num_deform_groups=1, trainable=True, dcn_version='v2', name='DCN'):
"""Deformable convolution encapsulation that acts as normal convolution layers."""
with tf.variable_scope(name):
x = tf.cast(x, tf.float32)
if dcn_version == 'v1':
offset = Conv2D(extra_feat, num_deform_groups * 2 * kernel_size[0] * kernel_size[1],
kernel_size=kernel_size, strides=strides, padding=padding, dilations=dilations,
use_bias=use_bias, trainable=trainable, name='conv_offset')
offset = tf.cast(offset, tf.float32)
mask = None
elif dcn_version == 'v2':
conv_offset = Conv2D(extra_feat, num_deform_groups * 3 * kernel_size[0] * kernel_size[1],
kernel_size=kernel_size, strides=strides, padding=padding, dilations=dilations,
use_bias=use_bias, trainable=trainable, name='conv_offset')
conv_offset = tf.cast(conv_offset, tf.float32)
offset = conv_offset[:, :, :, :num_deform_groups * 2 * kernel_size[0] * kernel_size[1]]
mask = conv_offset[:, :, :, num_deform_groups * 2 * kernel_size[0] * kernel_size[1]:]
mask = tf.nn.sigmoid(mask)
else:
raise NotImplementedError
out = DeformableConvLayer(
in_channels=int(x.shape[-1]), out_channels=out_channels,
kernel_size=kernel_size, strides=strides, padding=padding, dilations=dilations,
use_bias=use_bias, num_groups=num_groups, num_deform_groups=num_deform_groups,
trainable=trainable)(x, offset, mask)
return out
|
091669ff8608c2783916d042bac2b2756ca25973
| 3,639,795
|
def endtiming(fn):
"""
Decorator used to end timing.
Keeps track of the count for the first and second calls.
"""
NITER = 10000
def new(*args, **kw):
ret = fn(*args, **kw)
obj = args[0]
if obj.firststoptime == 0:
obj.firststoptime = time.time()
elif obj.secondstoptime == 0:
obj.secondstoptime = time.time()
elif obj.count == NITER:
now = time.time()
total = now - obj.secondstarttime
perrequest = total/NITER
filename = "output/%s-%s" % (str(len(obj.groups[NODE_REPLICA])+1),
str(len(obj.groups[NODE_ACCEPTOR])))
outputfile = open("./"+filename, "a")
# numreplicas #numacceptors #perrequest #total
outputfile.write("%s\t%s\t%s\t%s\n" % (str(len(obj.groups[NODE_REPLICA])+1),
str(len(obj.groups[NODE_ACCEPTOR])),
str(perrequest), str(total)))
outputfile.close()
obj.count += 1
sys.stdout.flush()
profile_off()
profilerdict = get_profile_stats()
for key, value in sorted(profilerdict.iteritems(),
key=lambda (k,v): (v[2],k)):
print ("%s: %s" % (key, value))
time.sleep(10)
sys.stdout.flush()
os._exit(0)
else:
obj.count += 1
return ret
return new
|
493fd06b0c28ef1c8c4f3c38c555cf5e52013d80
| 3,639,796
|
def CreateRailFrames(thisNurbsCurve, parameters, multiple=False):
"""
Computes relatively parallel rail sweep frames at specified parameters.
Args:
parameters (IEnumerable<double>): A collection of curve parameters.
Returns:
Plane[]: An array of planes if successful, or an empty array on failure.
"""
url = "rhino/geometry/nurbscurve/createrailframes-nurbscurve_doublearray"
if multiple: url += "?multiple=true"
args = [thisNurbsCurve, parameters]
if multiple: args = list(zip(thisNurbsCurve, parameters))
response = Util.ComputeFetch(url, args)
return response
|
bf62e197d6b7cb83453b43ce441eb062000ca069
| 3,639,797
|
def news(stock):
"""analyzes analyst recommendations using keywords and assigns values to them
:param stock: stock that will be analyzed
:return recommendations value"""
stock = yf.Ticker(str(stock))
reco = str(stock.recommendations) # Stands for recomend
reco = reco.split()
reco.reverse()
del reco[15 :-1]
#### KEY WORDS ###
buy = reco.count("Buy") #Means price is going up = Good
sell = reco.count("Sell") #Means price is going down = Bad
hold = reco.count("Hold") #Means price is going to increase = Good
neutral = reco.count("Neutral") #Means price is not going to drastically change = Neutral
overweight = reco.count("Overweight") #Means stock is better value for money than others = Good
equalweight = reco.count("Equal-Weight") #Means stock is about the same value compared to others = Neutral
underweight = reco.count("Underweight") #Means stock is worse value than what it is assesed to be = Bad
perform = reco.count("Perform") #Means stock performance is on par with the industry average = Neutral
outperform = reco.count("Outperform") #Means stock performance will be slightly better than industry = Good
underperform = reco.count("Underperform") #Means stock performance will be slightly worse than industry = Bad
if (buy + hold + neutral + equalweight + overweight + outperform) == 0:
news = .95 / (sell + underweight + perform + underperform)
elif (sell + underweight + perform + underperform) == 0:
news = 1.05 * (buy + .5 * hold + .1 * neutral + .1 * equalweight +
overweight + outperform)
else:
news = (buy + .5 * hold + .1 * neutral + .1 * equalweight + overweight +
outperform)/(sell + underweight + perform + underperform)
if news < 1:
if news < .5:
news = 1 - news
return news
|
2385f5c212c8802e6572b6efe69c1c791a68c261
| 3,639,798
|
def get_pulse_coefficient(pulse_profile_dictionary, tt):
"""
This function generates an envelope that smoothly goes from 0 to 1, and back down to 0.
It follows the nomenclature introduced to me by working with oscilloscopes.
The pulse profile dictionary will contain a rise time, flat time, and fall time.
The rise time is the time over which the pulse goes from 0 to 1
The flat time is the duration of the flat-top of the pulse
The fall time is the time over which the pulse goes from 1 to 0.
:param pulse_profile_dictionary:
:param tt:
:return:
"""
start_time = pulse_profile_dictionary["start_time"]
end_time = (
start_time
+ pulse_profile_dictionary["rise_time"]
+ pulse_profile_dictionary["flat_time"]
+ pulse_profile_dictionary["fall_time"]
)
this_pulse = 0
if (tt > start_time) and (tt < end_time):
rise_time = pulse_profile_dictionary["rise_time"]
flat_time = pulse_profile_dictionary["flat_time"]
fall_time = pulse_profile_dictionary["fall_time"]
end_rise = start_time + rise_time
end_flat = start_time + rise_time + flat_time
if tt <= end_rise:
normalized_time = (tt - start_time) / rise_time
this_pulse = _get_rise_fall_coeff_(normalized_time)
elif (tt > end_rise) and (tt < end_flat):
this_pulse = 1.0
elif tt >= end_flat:
normalized_time = (tt - end_flat) / fall_time
this_pulse = 1.0 - _get_rise_fall_coeff_(normalized_time)
this_pulse *= pulse_profile_dictionary["a0"]
return this_pulse
|
1db21359bbbcec44214752ecb5c08a6ee0592593
| 3,639,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.