content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def hex2int(s: str):
"""Convert a hex-octets (a sequence of octets) to an integer"""
return int(s, 16)
| 5,339,300
|
def add_gender_data(candidates, wd_session, gdata):
"""Add gender data (P21) for Wikidata items if humans (P31:Q5)"""
qids = '|'.join([c['pageprops']['wikibase_item'] for c in candidates if c.get('pageprops', {}).get('wikibase_item')])
GENDER_QUERY_BASE = {
'action': 'wbgetentities',
'props': 'claims',
'format': 'json',
'formatversion': 2,
'ids': qids
}
gender_data = wd_session.get(**GENDER_QUERY_BASE)
for i in range(len(candidates)):
c = candidates[i]
qid = c.get('pageprops', {}).get('wikibase_item')
if qid and qid in gender_data['entities']:
entity = gender_data['entities'][qid]['claims']
is_human = False
gender = None
for iof in entity.get('P31', []):
if iof.get('mainsnak', {}).get('datavalue', {}).get('value', {}).get('id') == 'Q5':
is_human = True
break
if is_human:
gdata['humans'] = gdata.get('humans', 0) + 1
if entity.get('P21'):
gender = entity['P21'][0].get('mainsnak', {}).get('datavalue', {}).get('value', {}).get('id')
if gender:
c['gender'] = gender
gdata[gender] = gdata.get(gender, 0) + 1
else:
print("Missing from gender data:", c)
| 5,339,301
|
def into_two(lhs, ctx):
"""Element I
(num) -> push a spaces
(str) -> equivlaent to `qp`
(lst) -> split a list into two halves
"""
ts = vy_type(lhs, simple=True)
return {
NUMBER_TYPE: lambda: " " * int(lhs),
str: lambda: quotify(lhs, ctx) + lhs,
list: lambda: [
index(lhs, [None, int(len(lhs) / 2)], ctx),
index(lhs, [int(len(lhs) / 2), None], ctx),
],
}.get(ts)()
| 5,339,302
|
def get_subgroup_df_row_generator(csv_path, subgroup_path):
"""
Loads benchmark subgroup dataframe containing "file" (str), "gender" (int), "race" (int), "expression_id" (int)
:param csv_path: path to sub-group .csv file.
:return: DataFrame for sub-group.
"""
col_names = ["file", "gender", "race", "expression_id"]
df = pd.read_csv(csv_path)
for index, row in df.iterrows():
file_path = os.path.join(subgroup_path, row[col_names[0]])
yield index, file_path, row[col_names[1]], row[col_names[2]], row[col_names[3]]
| 5,339,303
|
def closelog(log, runspyder=True):
"""
Close the log machinery used by the main counting routines by removing the
handlers
.. rubric :: Parameters
log : logging object
The log object
runspyder : bool. Default=True
If ``runspyder=True``, remove the extra handler for stdout added when
running under a Spyder console
"""
# this only works if handler[0] is a file handler
if runspyder: log.removeHandler(log.handlers[1]) #remove only under Spyder
log.handlers[0].close()
log.removeHandler(log.handlers[0])
| 5,339,304
|
def find_renter_choice(par,sol,t,i_beta,i_ht_lag,i_p,a_lag,
inv_v,inv_mu,v,mu,p,valid,do_mu=True):
""" find renter choice - used in both solution and simulation """
v_agg = np.zeros(2)
p_agg = np.zeros(2)
# a. x
iota_lag = -1
i_h_lag = -1
LTV_lag = np.nan
_m,x,_LTV = misc.mx_func(t,iota_lag,i_h_lag,i_p,LTV_lag,a_lag,par)
i_x = linear_interp.binary_search(0,par.Nx,par.grid_x,x)
wx = (x-par.grid_x[i_x])/(par.grid_x[i_x+1]-par.grid_x[i_x])
# b. choices
# 1. renter
i = 0
j = i + par.Nrt
inv_v0 = sol.rt_inv_v[t,i_beta,i_ht_lag,i_p,i_x,:].ravel()
inv_v1 = sol.rt_inv_v[t,i_beta,i_ht_lag,i_p,i_x+1,:].ravel()
inv_mu0 = sol.rt_inv_mu[t,i_beta,i_ht_lag,i_p,i_x,:]
inv_mu1 = sol.rt_inv_mu[t,i_beta,i_ht_lag,i_p,i_x+1,:]
v_agg[0] = update(par,i,j,inv_v0,inv_v1,inv_mu0,inv_mu1,inv_v,inv_mu,wx,valid,v,p,mu,do_mu)
i_rt = i
j_rt = j
# 2. buyer
i = j
j = i + par.Nbt # = par.Ncr
inv_v0 = sol.bt_inv_v[t,i_beta,i_p,i_x,:,:,:].ravel()
inv_v1 = sol.bt_inv_v[t,i_beta,i_p,i_x+1,:,:,:].ravel()
inv_mu0 = sol.bt_inv_mu[t,i_beta,i_p,i_x,:,:,:].ravel()
inv_mu1 = sol.bt_inv_mu[t,i_beta,i_p,i_x+1,:,:,:].ravel()
v_agg[1] = update(par,i,j,inv_v0,inv_v1,inv_mu0,inv_mu1,inv_v,inv_mu,wx,valid,v,p,mu,do_mu)
i_bt = i
j_bt = j
# c. aggregate
if np.any(~np.isinf(v_agg)):
_logsum = logsum_and_choice_probabilities(v_agg,par.sigma_agg,p_agg)
p[i_rt:j_rt] *= p_agg[0]
p[i_bt:j_bt] *= p_agg[1]
Ev = np.nansum(p*v)
if do_mu:
Emu = np.nansum(p*mu)
else:
Emu = np.nan
else:
p[:] = np.nan
Ev = np.nan
Emu = np.nan
return Ev,Emu
| 5,339,305
|
def main():
""" EgoisticLily クライアントモジュール
:return:
"""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-p', '--port', help='server port number', default='50055')
args = arg_parser.parse_args()
port_str = '[::]:' + args.port
with grpc.insecure_channel(port_str) as channel:
stub = egoisticlily.proto.egoisticlily_pb2_grpc.EgoisticLilyServiceStub(channel)
print('--EgoisticLily Client--')
while True:
kana = input("かな > ")
to_server(stub, kana)
| 5,339,306
|
def met_zhengkl_gh(p, rx, cond_source, n, r):
"""
Zheng 2000 test implemented with Gauss Hermite quadrature.
"""
X, Y = sample_xy(rx, cond_source, n, r)
rate = (cond_source.dx() + cond_source.dy()) * 4./5
# start timing
with util.ContextTimer() as t:
# the test
zheng_gh = cgof.ZhengKLTestGaussHerm(p, alpha, rate=rate)
result = zheng_gh.perform_test(X, Y)
return {
# 'test': zheng_test,
'test_result': result, 'time_secs': t.secs}
| 5,339,307
|
def get_domains(admin_managed: Optional[bool] = None,
include_unverified: Optional[bool] = None,
only_default: Optional[bool] = None,
only_initial: Optional[bool] = None,
only_root: Optional[bool] = None,
supports_services: Optional[Sequence[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDomainsResult:
"""
Use this data source to access information about existing Domains within Azure Active Directory.
## API Permissions
The following API permissions are required in order to use this data source.
When authenticated with a service principal, this data source requires one of the following application roles: `Domain.Read.All` or `Directory.Read.All`
When authenticated with a user principal, this data source does not require any additional roles.
## Example Usage
```python
import pulumi
import pulumi_azuread as azuread
aad_domains = azuread.get_domains()
pulumi.export("domainNames", [__item.domain_name for __item in [aad_domains.domains]])
```
:param bool admin_managed: Set to `true` to only return domains whose DNS is managed by Microsoft 365. Defaults to `false`.
:param bool include_unverified: Set to `true` if unverified Azure AD domains should be included. Defaults to `false`.
:param bool only_default: Set to `true` to only return the default domain.
:param bool only_initial: Set to `true` to only return the initial domain, which is your primary Azure Active Directory tenant domain. Defaults to `false`.
:param bool only_root: Set to `true` to only return verified root domains. Excludes subdomains and unverified domains.
:param Sequence[str] supports_services: A list of supported services that must be supported by a domain. Possible values include `Email`, `Sharepoint`, `EmailInternalRelayOnly`, `OfficeCommunicationsOnline`, `SharePointDefaultDomain`, `FullRedelegation`, `SharePointPublic`, `OrgIdAuthentication`, `Yammer` and `Intune`.
"""
__args__ = dict()
__args__['adminManaged'] = admin_managed
__args__['includeUnverified'] = include_unverified
__args__['onlyDefault'] = only_default
__args__['onlyInitial'] = only_initial
__args__['onlyRoot'] = only_root
__args__['supportsServices'] = supports_services
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azuread:index/getDomains:getDomains', __args__, opts=opts, typ=GetDomainsResult).value
return AwaitableGetDomainsResult(
admin_managed=__ret__.admin_managed,
domains=__ret__.domains,
id=__ret__.id,
include_unverified=__ret__.include_unverified,
only_default=__ret__.only_default,
only_initial=__ret__.only_initial,
only_root=__ret__.only_root,
supports_services=__ret__.supports_services)
| 5,339,308
|
def get_writer(Writer=None, fast_writer=True, **kwargs):
"""
Initialize a table writer allowing for common customizations. Most of the
default behavior for various parameters is determined by the Writer class.
Parameters
----------
Writer : ``Writer``
Writer class (DEPRECATED). Defaults to :class:`Basic`.
delimiter : str
Column delimiter string
comment : str
String defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
formats : dict
Dictionary of format specifiers or formatting functions
strip_whitespace : bool
Strip surrounding whitespace from column values.
names : list
List of names corresponding to each data column
include_names : list
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
fast_writer : bool
Whether to use the fast Cython writer.
Returns
-------
writer : `~astropy.io.ascii.BaseReader` subclass
ASCII format writer instance
"""
if Writer is None:
Writer = basic.Basic
if 'strip_whitespace' not in kwargs:
kwargs['strip_whitespace'] = True
writer = core._get_writer(Writer, fast_writer, **kwargs)
# Handle the corner case of wanting to disable writing table comments for the
# commented_header format. This format *requires* a string for `write_comment`
# because that is used for the header column row, so it is not possible to
# set the input `comment` to None. Without adding a new keyword or assuming
# a default comment character, there is no other option but to tell user to
# simply remove the meta['comments'].
if (isinstance(writer, (basic.CommentedHeader, fastbasic.FastCommentedHeader))
and not isinstance(kwargs.get('comment', ''), six.string_types)):
raise ValueError("for the commented_header writer you must supply a string\n"
"value for the `comment` keyword. In order to disable writing\n"
"table comments use `del t.meta['comments']` prior to writing.")
return writer
| 5,339,309
|
def find_last_service(obj):
"""Identify last service event for instrument"""
return Service.objects.filter(equipment=obj).order_by('-date').first()
| 5,339,310
|
def swap_tree(tree):
""" Swaps the left and right branches of a tree. """
if tree is None:
return
tree.left, tree.right = tree.right, tree.left
swap_tree(tree.left)
swap_tree(tree.right)
| 5,339,311
|
def SectionsMenu(base_title=_("Sections"), section_items_key="all", ignore_options=True):
"""
displays the menu for all sections
:return:
"""
items = get_all_items("sections")
return dig_tree(SubFolderObjectContainer(title2=_("Sections"), no_cache=True, no_history=True), items, None,
menu_determination_callback=determine_section_display, pass_kwargs={"base_title": base_title,
"section_items_key": section_items_key,
"ignore_options": ignore_options},
fill_args={"title": "section_title"})
| 5,339,312
|
def perform_save_or_create_role(is_professor, created_user, req_main, is_creating):
"""Performs update or create Student or Professor for user"""
response_verb = 'created' if is_creating else 'updated'
if is_professor is True:
professor_data = None
if 'professor' in req_main.keys():
professor_data = req_main['professor']
if professor_data is not None:
serialized_prof = CreateUpdateProfessorSerializer(data=professor_data)
if serialized_prof.is_valid():
save_or_create_data_in_role(professor_data,
True, is_creating, 'Professor',
created_user)
return 'success'
else:
return Response({"message": f"Professor account could not not be {response_verb}."},
status=status.HTTP_400_BAD_REQUEST)
else:
student_data = None
if 'student' in req_main.keys():
student_data = req_main['student']
if student_data is not None:
serialized_student = CreateUpdateStudentSerializer(data=student_data)
if serialized_student.is_valid():
save_or_create_data_in_role(student_data,
False,
is_creating,
'Student',
created_user)
return 'success'
else:
return Response({"message": f"Student account could not not be {response_verb}."},
status=status.HTTP_400_BAD_REQUEST)
return 'success'
| 5,339,313
|
def ecg_rsp(ecg_rate, sampling_rate=1000, method="vangent2019"):
"""Extract ECG Derived Respiration (EDR).
This implementation is far from being complete, as the information in the related papers
prevents me from getting a full understanding of the procedure. Help is required!
Parameters
----------
ecg_rate : array
The heart rate signal as obtained via `ecg_rate()`.
sampling_rate : int
The sampling frequency of the signal that contains the R-peaks (in Hz,
i.e., samples/second). Defaults to 1000Hz.
method : str
Can be one of 'vangent2019' (default), 'soni2019', 'charlton2016' or 'sarkar2015'.
Returns
-------
array
A Numpy array containing the heart rate.
Examples
--------
>>> import neurokit2 as nk
>>> import pandas as pd
>>>
>>> # Get heart rate
>>> data = nk.data("bio_eventrelated_100hz")
>>> rpeaks, info = nk.ecg_peaks(data["ECG"], sampling_rate=100)
>>> ecg_rate = nk.signal_rate(rpeaks, sampling_rate=100, desired_length=len(rpeaks))
>>>
>>>
>>> # Get ECG Derived Respiration (EDR)
>>> edr = nk.ecg_rsp(ecg_rate, sampling_rate=100)
>>> nk.standardize(pd.DataFrame({"EDR": edr, "RSP": data["RSP"]})).plot() #doctest: +ELLIPSIS
<AxesSubplot:>
>>>
>>> # Method comparison (the closer to 0 the better)
>>> nk.standardize(pd.DataFrame({"True RSP": data["RSP"],
... "vangent2019": nk.ecg_rsp(ecg_rate, sampling_rate=100, method="vangent2019"),
... "sarkar2015": nk.ecg_rsp(ecg_rate, sampling_rate=100, method="sarkar2015"),
... "charlton2016": nk.ecg_rsp(ecg_rate, sampling_rate=100, method="charlton2016"),
... "soni2019": nk.ecg_rsp(ecg_rate, sampling_rate=100,
... method="soni2019")})).plot() #doctest: +ELLIPSIS
<AxesSubplot:>
References
----------
- van Gent, P., Farah, H., van Nes, N., & van Arem, B. (2019). HeartPy: A novel heart rate algorithm
for the analysis of noisy signals. Transportation research part F: traffic psychology and behaviour,
66, 368-378.
- Sarkar, S., Bhattacherjee, S., & Pal, S. (2015). Extraction of respiration signal from ECG for
respiratory rate estimation.
- Charlton, P. H., Bonnici, T., Tarassenko, L., Clifton, D. A., Beale, R., & Watkinson, P. J. (2016).
An assessment of algorithms to estimate respiratory rate from the electrocardiogram and photoplethysmogram.
Physiological measurement, 37(4), 610.
- Soni, R., & Muniyandi, M. (2019). Breath rate variability: a novel measure to study the meditation
effects. International Journal of Yoga, 12(1), 45.
"""
method = method.lower()
if method in [
"sarkar2015"
]: # https://www.researchgate.net/publication/304221962_Extraction_of_respiration_signal_from_ECG_for_respiratory_rate_estimation # noqa: E501
rsp = signal_filter(ecg_rate, sampling_rate, lowcut=0.1, highcut=0.7, order=6)
elif method in ["charlton2016"]: # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5390977/#__ffn_sectitle
rsp = signal_filter(ecg_rate, sampling_rate, lowcut=4 / 60, highcut=60 / 60, order=6)
elif method in ["soni2019"]: # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6329220/
rsp = signal_filter(ecg_rate, sampling_rate, highcut=0.5, order=6)
elif method in [
"vangent2019"
]: # https://github.com/paulvangentcom/heartrate_analysis_python/blob/1597e8c0b2602829428b22d8be88420cd335e939/heartpy/analysis.py#L541 # noqa: E501
rsp = signal_filter(ecg_rate, sampling_rate, lowcut=0.1, highcut=0.4, order=2)
else:
raise ValueError(
"NeuroKit error: ecg_rsp(): 'method' should be "
"one of 'sarkar2015', 'charlton2016', 'soni2019' or "
"'vangent2019'."
)
return rsp
| 5,339,314
|
def bootstrap(request):
"""Concatenates bootstrap.js files from all installed Hue apps."""
# Has some None's for apps that don't have bootsraps.
all_bootstraps = [(app, app.get_bootstrap_file()) for app in appmanager.DESKTOP_APPS if request.user.has_hue_permission(action="access", app=app.name)]
# Iterator over the streams.
concatenated = ["\n/* %s */\n%s" % (app.name, b.read()) for app, b in all_bootstraps if b is not None]
# HttpResponse can take an iteratable as the first argument, which
# is what happens here.
return HttpResponse(concatenated, content_type='text/javascript')
| 5,339,315
|
def get_selection(selection):
"""Return a valid model selection."""
if not isinstance(selection, str) and not isinstance(selection, list):
raise TypeError('The selection setting must be a string or a list.')
if isinstance(selection, str):
if selection.lower() == 'all' or selection == '':
selection = None
elif selection.startswith('topics'):
selection = [selection]
return selection
| 5,339,316
|
def getAllImageFilesInHierarchy(path):
"""
Returns a list of file paths relative to 'path' for all images under the given directory,
recursively looking in subdirectories
"""
return [f for f in scan_tree(path)]
| 5,339,317
|
def list_package(connection, args):
"""List information about package contents"""
package = sap.adt.Package(connection, args.name)
for pkg, subpackages, objects in sap.adt.package.walk(package):
basedir = '/'.join(pkg)
if basedir:
basedir += '/'
if not args.recursive:
for subpkg in subpackages:
print(f'{basedir}{subpkg}')
for obj in objects:
print(f'{basedir}{obj.name}')
if not args.recursive:
break
if not subpackages and not objects:
print(f'{basedir}')
return 0
| 5,339,318
|
def calc_hebrew_bias(probs):
"""
:param probs: list of negative log likelihoods for a Hebrew corpus
:return: gender bias in corpus
"""
bias = 0
for idx in range(0, len(probs), 16):
bias -= probs[idx + 1] + probs[idx + 5] + probs[idx + 9] + probs[idx + 13]
bias += probs[idx + 2] + probs[idx + 6] + probs[idx + 10] + probs[idx + 14]
return bias / 4
| 5,339,319
|
def load_wiki(size = 128, validate = True):
"""
Return malaya pretrained wikipedia ELMO size N.
Parameters
----------
size: int, (default=128)
validate: bool, (default=True)
Returns
-------
dictionary: dictionary of dictionary, reverse dictionary and vectors
"""
if not isinstance(size, int):
raise ValueError('size must be an integer')
if size not in [128, 256]:
raise ValueError('size only support [128,256]')
if validate:
check_file(PATH_ELMO[size], S3_PATH_ELMO[size])
else:
if not check_available(PATH_ELMO[size]):
raise Exception(
'elmo-wiki is not available, please `validate = True`'
)
with open(PATH_ELMO[size]['setting'], 'rb') as fopen:
setting = pickle.load(fopen)
g = load_graph(PATH_ELMO[size]['model'])
return ELMO(
g.get_tensor_by_name('import/tokens_characters:0'),
g.get_tensor_by_name('import/tokens_characters_reverse:0'),
g.get_tensor_by_name('import/softmax_score:0'),
generate_session(graph = g),
setting['dictionary'],
setting['char_maxlen'],
setting['steps'],
setting['softmax_weight'],
)
| 5,339,320
|
def check_sentence(rc, s, annotations, is_foreign=False, ignore_warnings=False):
""" Check whether a given single sentence gets the
specified annotations when checked """
def check_sent(sent):
assert sent is not None
if sent.tree is None and not is_foreign:
# If the sentence should not parse, call
# check_sentence with annotations=None
assert annotations is None
return
assert annotations is not None
if not is_foreign:
assert sent.tree is not None
# Compile a list of error annotations, omitting warnings
if not hasattr(sent, "annotations"):
sent_errors = []
else:
sent_errors = [a for a in sent.annotations if not a.code.endswith("/w")]
if not annotations:
# This sentence is not supposed to have any annotations
if ignore_warnings:
assert len(sent_errors) == 0
else:
assert (not hasattr(sent, "annotations")) or len(sent.annotations) == 0
return
assert hasattr(sent, "annotations")
if ignore_warnings:
assert len(sent_errors) == len(annotations)
for a, (start, end, code) in zip(sent_errors, annotations):
assert a.start == start
assert a.end == end
assert a.code == code
else:
assert len(sent.annotations) == len(annotations)
for a, (start, end, code) in zip(sent.annotations, annotations):
assert a.start == start
assert a.end == end
assert a.code == code
# Test check_single()
check_sent(rc.parse_single(s))
# Test check()
for pg in reynir_correct.check(s):
for sent in pg:
check_sent(sent)
# Test check_with_stats()
for pg in reynir_correct.check_with_stats(s)["paragraphs"]:
for sent in pg:
check_sent(sent)
| 5,339,321
|
def generate_pibindex_rois_fs(aparc_aseg):
""" given an aparc aseg in pet space:
generate wm, gm and pibindex rois
make sure they are non-overlapping
return 3 rois"""
wm = mask_from_aseg(aparc_aseg, wm_aseg())
gm = mask_from_aseg(aparc_aseg, gm_aseg())
pibi = mask_from_aseg(aparc_aseg, pibindex_aseg())
# make non-overlapping
wm[pibi==1] = 0
gm[pibi ==1] = 0
gm[wm==1] = 0
return wm, gm, pibi
| 5,339,322
|
def get_log_name(GLASNOST_ROOT, start_time, client_ip, mlab_server):
"""Helper method that given a test key, finds the logfile"""
log_glob = "%s/%s.measurement-lab.org/%s_%s_*" % (start_time.strftime('%Y/%m/%d'), mlab_server, start_time.strftime('%Y-%m-%dT%H:%M:%S'), client_ip)
if start_time < datetime(2010,1,8,5,7,0):
# before this time, the days are +1 in the filenames
dy = start_time.day + 1
log_glob = log_glob[:8] + '%02d'%dy + log_glob[10:]
log_glob = log_glob[:51] + '%02d'%dy + log_glob[53:]
if not sys.platform.startswith("linux"):
log_glob = log_glob.replace(':','_')
logs = glob.glob(os.path.join(GLASNOST_ROOT, log_glob))
if not logs:
# sometimes filename seconds differs by +/-1! change to wildcard
log_glob = log_glob[:61] + '?' + log_glob[62:]
logs = glob.glob(os.path.join(GLASNOST_ROOT, log_glob))
if not logs:
log_glob = log_glob[:60] + '?' + log_glob[61:]
logs = glob.glob(os.path.join(GLASNOST_ROOT, log_glob))
#endif
if len(logs)!=1:
raise Exception('!! log file not found (=%d): %s' % (len(logs),log_glob))
return logs[0]
| 5,339,323
|
def generate_mpc_imitate(dataset, data_params, nn_params, train_params):
"""
Will be used for imitative control of the model predictive controller.
Could try adding noise to the sampled acitons...
"""
class ImitativePolicy(nn.Module):
def __init__(self, nn_params):
super(ImitativePolicy, self).__init__()
# Store the parameters:
self.hidden_w = nn_params['hid_width']
self.depth = nn_params['hid_depth']
self.n_in_input = nn_params['dx']
self.n_out = nn_params['du']
self.activation = nn_params['activation']
self.d = nn_params['dropout']
self.loss_fnc = nn.MSELoss()
# super(ImitativePolicy, self).__init__()
# Takes objects from the training parameters
layers = []
layers.append(nn.Linear(self.n_in_input, self.hidden_w)
) # input layer
layers.append(self.activation)
layers.append(nn.Dropout(p=self.d))
for d in range(self.depth):
# add modules
# input layer
layers.append(nn.Linear(self.hidden_w, self.hidden_w))
layers.append(self.activation)
layers.append(nn.Dropout(p=self.d))
# output layer
layers.append(nn.Linear(self.hidden_w, self.n_out))
self.features = nn.Sequential(*layers)
# Need to scale the state variables again etc
# inputs state, output an action (PWMs)
self.scalarX = StandardScaler() # MinMaxScaler(feature_range=(-1, 1))
self.scalarU = MinMaxScaler(feature_range=(-1, 1))
def forward(self, x):
# Max pooling over a (2, 2) window
x = self.features(x)
return x
def preprocess(self, dataset): # X, U):
"""
Preprocess X and U for passing into the neural network. For simplicity, takes in X and U as they are output from generate data, but only passed the dimensions we want to prepare for real testing. This removes a lot of potential questions that were bugging me in the general implementation. Will do the cosine and sin conversions externally.
"""
# Already done is the transformation from
# [yaw, pitch, roll, x_ddot, y_ddot, z_ddot] to
# [sin(yaw), sin(pitch), sin(roll), cos(pitch), cos(yaw), cos(roll), x_ddot, y_ddot, z_ddot]
# dX = np.array([utils_data.states2delta(val) for val in X])
if len(dataset) == 2:
X = dataset[0]
U = dataset[1]
else:
raise ValueError("Improper data shape for training")
self.scalarX.fit(X)
self.scalarU.fit(U)
#Normalizing to zero mean and unit variance
normX = self.scalarX.transform(X)
normU = self.scalarU.transform(U)
inputs = torch.Tensor(normX)
outputs = torch.Tensor(normU)
return list(zip(inputs, outputs))
def postprocess(self, U):
"""
Given the raw output from the neural network, post process it by rescaling by the mean and variance of the dataset
"""
# de-normalize so to say
U = self.U.inverse_transform(U.reshape(1, -1))
U = U.ravel()
return np.array(U)
def train_cust(self, dataset, train_params, gradoff=False):
"""
Train the neural network.
if preprocess = False
dataset is a list of tuples to train on, where the first value in the tuple is the training data (should be implemented as a torch tensor), and the second value in the tuple
is the label/action taken
if preprocess = True
dataset is simply the raw output of generate data (X, U)
Epochs is number of times to train on given training data,
batch_size is hyperparameter dicating how large of a batch to use for training,
optim is the optimizer to use (options are "Adam", "SGD")
split is train/test split ratio
"""
epochs = train_params['epochs']
batch_size = train_params['batch_size']
optim = train_params['optim']
split = train_params['split']
lr = train_params['lr']
lr_step_eps = train_params['lr_schedule'][0]
lr_step_ratio = train_params['lr_schedule'][1]
preprocess = train_params['preprocess']
if preprocess:
dataset = self.preprocess(dataset) # [0], dataset[1])
trainLoader = DataLoader(
dataset[:int(split*len(dataset))], batch_size=batch_size, shuffle=True)
testLoader = DataLoader(
dataset[int(split*len(dataset)):], batch_size=batch_size)
# Papers seem to say ADAM works better
if(optim == "Adam"):
optimizer = torch.optim.Adam(
super(ImitativePolicy, self).parameters(), lr=lr)
elif(optim == "SGD"):
optimizer = torch.optim.SGD(
super(ImitativePolicy, self).parameters(), lr=lr)
else:
raise ValueError(optim + " is not a valid optimizer type")
# most results at .6 gamma, tried .33 when got NaN
if lr_step_eps != []:
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=lr_step_eps, gamma=lr_step_ratio)
testloss, trainloss = self._optimize(
self.loss_fnc, optimizer, split, scheduler, epochs, batch_size, dataset) # trainLoader, testLoader)
return testloss, trainloss
def predict(self, X):
"""
Given a state X, predict the desired action U. This function is used when simulating, so it does all pre and post processing for the neural net
"""
#normalizing and converting to single sample
normX = self.scalarX.transform(X.reshape(1, -1))
input = torch.Tensor(normX)
NNout = self.forward(input).data[0]
return NNout
# trainLoader, testLoader):
def _optimize(self, loss_fn, optim, split, scheduler, epochs, batch_size, dataset, gradoff=False):
errors = []
error_train = []
split = split
testLoader = DataLoader(
dataset[int(split*len(dataset)):], batch_size=batch_size)
trainLoader = DataLoader(
dataset[:int(split*len(dataset))], batch_size=batch_size, shuffle=True)
for epoch in range(epochs):
scheduler.step()
avg_loss = torch.zeros(1)
num_batches = len(trainLoader)/batch_size
for i, (input, target) in enumerate(trainLoader):
# Add noise to the batch
if False:
if self.prob:
n_out = int(self.n_out/2)
else:
n_out = self.n_out
noise_in = torch.tensor(np.random.normal(
0, .01, (input.size())), dtype=torch.float)
noise_targ = torch.tensor(np.random.normal(
0, .01, (target.size())), dtype=torch.float)
input.add_(noise_in)
target.add_(noise_targ)
optim.zero_grad() # zero the gradient buffers
# compute the output
output = self.forward(input)
loss = loss_fn(output, target)
# add small loss term on the max and min logvariance if probablistic network
# note, adding this term will backprob the values properly
if loss.data.numpy() == loss.data.numpy():
# print(self.max_logvar, self.min_logvar)
if not gradoff:
# backpropagate from the loss to fill the gradient buffers
loss.backward()
optim.step() # do a gradient descent step
# print('tain: ', loss.item())
# if not loss.data.numpy() == loss.data.numpy(): # Some errors make the loss NaN. this is a problem.
else:
# This is helpful: it'll catch that when it happens,
print("loss is NaN")
# print("Output: ", output, "\nInput: ", input, "\nLoss: ", loss)
errors.append(np.nan)
error_train.append(np.nan)
# and give the output and input that made the loss NaN
return errors, error_train
# update the overall average loss with this batch's loss
avg_loss += loss.item()/(len(trainLoader)*batch_size)
# self.features.eval()
test_error = torch.zeros(1)
for i, (input, target) in enumerate(testLoader):
output = self.forward(input)
loss = loss_fn(output, target)
test_error += loss.item()/(len(testLoader)*batch_size)
test_error = test_error
#print("Epoch:", '%04d' % (epoch + 1), "loss=", "{:.9f}".format(avg_loss.data[0]),
# "test_error={:.9f}".format(test_error))
if (epoch % 1 == 0):
print("Epoch:", '%04d' % (epoch + 1), "train loss=", "{:.6f}".format(
avg_loss.data[0]), "test loss=", "{:.6f}".format(test_error.data[0]))
# if (epoch % 50 == 0) & self.prob: print(self.max_logvar, self.min_logvar)
error_train.append(avg_loss.data[0].numpy())
errors.append(test_error.data[0].numpy())
#loss_fn.print_mmlogvars()
return errors, error_train
# create policy object
policy = ImitativePolicy(nn_params)
# train policy
# X, U, _ = df_to_training(df, data_params)
X = dataset[0]
U = dataset[1]
acctest, acctrain = policy.train_cust((X, U), train_params)
if True:
ax1 = plt.subplot(211)
# ax1.set_yscale('log')
ax1.plot(acctest, label='Test Loss')
plt.title('Test Loss')
ax2 = plt.subplot(212)
# ax2.set_yscale('log')
ax2.plot(acctrain, label='Train Loss')
plt.title('Training Loss')
ax1.legend()
plt.show()
# return policy!
return policy
| 5,339,324
|
def rescale(img, mask, factor):
"""Rescale image and mask."""
logging.info('Scaling: %s', array_info(img))
info = img.info
img = ndimage.interpolation.zoom(img, factor + (1,), order=0)
info['spacing'] = [s/f for s, f in zip(info['spacing'], factor)]
mask = rescale_mask(mask, factor)
assert img[..., 0].shape == mask.shape, (img.shape, mask.shape)
img = dwi.image.Image(img, info=info)
return img, mask
| 5,339,325
|
def get_data_all(path):
"""
Get all data of Nest and reorder them.
:param path: the path of the Nest folder
:return:
"""
nb = count_number_of_label(path+ 'labels.csv')
data_pop = {}
for i in range(nb):
label, type = get_label_and_type(path + 'labels.csv', i)
field, data = get_data(label, path)
if type == 'spikes':
data_pop[label]=reorder_data_spike_detector(data)
else:
data_pop[label]=reorder_data_multimeter(data)
return data_pop
| 5,339,326
|
def from_tfrecord_parse(
record,
pre_process_func=None,
jpeg_encoded=False):
"""
This function is made to work with the prepare_data.TFRecordWriter class.
It parses a single tf.Example records.
Arguments:
record : the tf.Example record with the features of
prepare_data.TFRecordWriter
pre_process_func: if not None, must be a pre-processing function that will be applied on the data.
jpeg_encoded : is the data encoded in jpeg format?
Returns:
image: a properly shaped and encoded 2D image.
label: its corresponding label.
"""
features = tf.io.parse_single_example(record, features={
'shape': tf.io.FixedLenFeature([3], tf.int64),
'image': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([1], tf.int64)})
data = tf.io.decode_jpeg(features['image']) if jpeg_encoded else tf.io.decode_raw(features['image'], tf.uint8)
data = tf.reshape(data, features['shape'])
labels = features['label']
# data pre_processing
if pre_process_func:
data, labels = pre_process_func(data, labels)
return data, labels
| 5,339,327
|
def load_shapes_coords(annotation_path):
"""
> TODO: Ensure and correct the clockwise order of the coords of a QUAD
"""
quads_coords = pd.read_csv(annotation_path, header=None)
quads_coords = quads_coords.iloc[:,:-1].values # [n_box, 8]
quads_coords = quads_coords.reshape(-1, 4, 2)
if geometry == "QUAD":
shapes_coords = quads_coords
elif geometry == "RBOX":
shapes_coords = quads_to_rboxes(coords)
else:
raise ValueError("Invalid Geometry")
return shapes_coords
| 5,339,328
|
def _GenerateAggregatorReduction(emitter, registers, aggregators,
output_address, multiplicative_sum_offset,
additive_sum_offset):
"""Reduce 4 lane sum aggregators to 1 value and store the sums."""
emitter.EmitNewline()
emitter.EmitComment('Aggregator Reduction.')
multiplier = registers.DoubleRegister()
emitter.EmitVMov('32',
emitter.Lane(32, multiplier, 0), multiplicative_sum_offset)
offset = registers.QuadRegister()
emitter.EmitVDup('32', offset, additive_sum_offset)
for aggregator in aggregators:
emitter.EmitVPaddl('u16', aggregator, aggregator)
reduced_count = (len(aggregators) + 3) / 4
reduced = aggregators[:reduced_count]
emitter.EmitVSumReduce('u32', len(aggregators), 4, reduced, aggregators)
for temp in reduced:
emitter.EmitVMulScalar('i32', temp, temp, emitter.Lane(32, multiplier, 0))
for temp in reduced:
emitter.EmitVAdd('i32', temp, temp, offset)
emitter.EmitVStoreA(1, 32, reduced,
emitter.Dereference(output_address,
_AlignForSums(len(aggregators))))
| 5,339,329
|
def caller_path(steps=1, names=None):
"""Return the path to the file of the current frames' caller."""
frame = sys._getframe(steps + 1)
try:
path = os.path.dirname(frame.f_code.co_filename)
finally:
del frame
if not path:
path = os.getcwd()
if names is not None:
path = os.path.join(path, *names)
return os.path.realpath(path)
| 5,339,330
|
def reader_json_totals(list_filenames):
"""
This reads the json files with totals and returns them as a list of dicts.
It will verify that the name of the file starts with totals.json to read it.
This way, we can just send to the function all the files in the directory and it will take care
of selecting the appropriate.
Returns
----------
list_totals_dict: list dicts
list of dictionaries with the totals
"""
list_totals_dict = []
for file in list_filenames:
# if it is a json results file, we process it.
if "totals.json" in file:
with open(file, 'r') as fp:
data = json.load(fp)
try:
data['1st_react_temp'] = float(re.findall(r"(\d+)C", file)[0])
except IndexError:
data['1st_react_temp'] = np.nan
try:
data['2nd_react_temp'] = float(re.findall(r"(\d+)C", file)[1])
except IndexError:
data['2nd_react_temp'] = np.nan
try:
data['mass ug'] = float(re.findall(r"(\d+) ug", file)[0])
except IndexError:
data['mass ug'] = np.nan
list_totals_dict.append(data)
return list_totals_dict
| 5,339,331
|
def test_cdf(dist, grid):
"""
Validate cumulative distribution function.
"""
cdf = dist.cdf
for x, y in grid:
assert_allclose(cdf(x), y, atol=1e-3, err_msg=f'{dist} CDF, x={x}')
| 5,339,332
|
def main(argv=None):
"""
"""
if argv == None:
argv = sys.argv[1:]
try:
ancestor1_file = argv[0]
ancestor2_file = argv[1]
except IndexError:
err = "Incorrect number of arguments!\n\n%s\n\n" % __usage__
raise CompareAncestorError(err)
out = compareAncestors(ancestor1_file,ancestor2_file)
print out
| 5,339,333
|
def list_scripts(zap_helper):
"""List scripts currently loaded into ZAP."""
scripts = zap_helper.zap.script.list_scripts
output = []
for s in scripts:
if 'enabled' not in s:
s['enabled'] = 'N/A'
output.append([s['name'], s['type'], s['engine'], s['enabled']])
click.echo(tabulate(output, headers=['Name', 'Type', 'Engine', 'Enabled'], tablefmt='grid'))
| 5,339,334
|
def support_message(bot, update):
"""
Receives a message from the user.
If the message is a reply to the user, the bot speaks with the user
sending the message content. If the message is a request from the user,
the bot forwards the message to the support group.
"""
if update.message.reply_to_message and \
update.message.reply_to_message.forward_from:
# If it is a reply to the user, the bot replies the user
bot.send_message(chat_id=update.message.reply_to_message
.forward_from.id,
text=update.message.text)
else:
# If it is a request from the user, the bot forwards the message
# to the group
bot.forward_message(chat_id=int(config['DEFAULT']['support_chat_id']),
from_chat_id=update.message.chat_id,
message_id=update.message.message_id)
bot.send_message(chat_id=update.message.chat_id,
text=_("Give me some time to think. Soon I will return to you with an answer."))
| 5,339,335
|
def is_hitachi(dicom_input):
"""
Use this function to detect if a dicom series is a hitachi dataset
:param dicom_input: directory with dicom files for 1 scan of a dicom_header
"""
# read dicom header
header = dicom_input[0]
if 'Manufacturer' not in header or 'Modality' not in header:
return False # we try generic conversion in these cases
# check if Modality is mr
if header.Modality.upper() != 'MR':
return False
# check if manufacturer is hitachi
if 'HITACHI' not in header.Manufacturer.upper():
return False
return True
| 5,339,336
|
def index_of_first_signal(evt_index, d, qsets, MAXT3):
""" Check the evt_index of the last signal triplet (MC truth).
Args:
Returns:
"""
first_index = -1
k = 0
for tset in qsets:
for ind in tset: # Pick first of alternatives and break
#[HERE ADD THE OPTION TO CHOOSE e.g. THE BEST RECONSTRUCTION QUALITY !!]
y = np.asarray(d['_BToKEE_is_signal'][evt_index])[ind]
break
if y == 1:
first_index = k
break
k += 1
return first_index
| 5,339,337
|
def before_scenario(request, feature, scenario):
"""Create scenario report for the item."""
request.node.__scenario_report__ = ScenarioReport(scenario=scenario, node=request.node)
| 5,339,338
|
def readcrd(filename, REAL):
"""
It reads the crd file, file that contains the charges information.
Arguments
----------
filename : name of the file that contains the surface information.
REAL : data type.
Returns
-------
pos : (Nqx3) array, positions of the charges.
q : (Nqx1) array, value of the charges.
Nq : int, number of charges.
"""
pos = []
q = []
start = 0
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.split()
if len(line) > 8 and line[0] != '*': # and start==2:
x = line[4]
y = line[5]
z = line[6]
q.append(REAL(line[9]))
pos.append([REAL(x), REAL(y), REAL(z)])
pos = numpy.array(pos)
q = numpy.array(q)
return pos, q
| 5,339,339
|
def load_and_initialize_hub_module(module_path, signature='default'):
"""Loads graph of a TF-Hub module and initializes it into a session.
Args:
module_path: string Path to TF-Hub module.
signature: string Signature to use when creating the apply graph.
Return:
graph: tf.Graph Graph of the module.
session: tf.Session Session with initialized variables and tables.
inputs: dict Dictionary of input tensors.
outputs: dict Dictionary of output tensors.
Raises:
ValueError: If signature contains a SparseTensor on input or output.
"""
graph = tf.Graph()
with graph.as_default():
tf.compat.v1.logging.info('Importing %s', module_path)
module = hub.Module(module_path)
signature_inputs = module.get_input_info_dict(signature)
signature_outputs = module.get_output_info_dict(signature)
# First check there are no SparseTensors in input or output.
for key, info in list(signature_inputs.items()) + list(
signature_outputs.items()):
if info.is_sparse:
raise ValueError(
'Signature "%s" has a SparseTensor on input/output "%s".'
' SparseTensors are not supported.' % (signature, key))
# Create placeholders to represent the input of the provided signature.
inputs = {}
for input_key, input_info in signature_inputs.items():
inputs[input_key] = tf.compat.v1.placeholder(
shape=input_info.get_shape(), dtype=input_info.dtype, name=input_key)
outputs = module(inputs=inputs, signature=signature, as_dict=True)
session = tf.compat.v1.Session(graph=graph)
session.run(tf.compat.v1.global_variables_initializer())
session.run(tf.compat.v1.tables_initializer())
return graph, session, inputs, outputs
| 5,339,340
|
def get_mention_token_dist(m1, m2):
""" Returns distance in tokens between two mentions """
succ = m1.tokens[0].doc_index < m2.tokens[0].doc_index
first = m1 if succ else m2
second = m2 if succ else m1
return max(0, second.tokens[0].doc_index - first.tokens[-1].doc_index)
| 5,339,341
|
def shlcar3x3(x,y,z, ps):
"""
This subroutine returns the shielding field for the earth's dipole, represented by
2x3x3=18 "cartesian" harmonics, tilted with respect to the z=0 plane (nb#4, p.74)
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:param ps: geo-dipole tilt angle in radius.
:return: bx,by,bz. Field components in GSM system, in nT.
"""
# The 36 coefficients enter in pairs in the amplitudes of the "cartesian" harmonics (A(1)-A(36).
# The 14 nonlinear parameters (A(37)-A(50) are the scales Pi,Ri,Qi,and Si entering the arguments of exponents, sines, and cosines in each of the
# 18 "cartesian" harmonics plus two tilt angles for the cartesian harmonics (one for the psi=0 mode and another for the psi=90 mode)
a = np.array([
-901.2327248,895.8011176,817.6208321,-845.5880889,-83.73539535,
86.58542841,336.8781402,-329.3619944,-311.2947120,308.6011161,
31.94469304,-31.30824526,125.8739681,-372.3384278,-235.4720434,
286.7594095,21.86305585,-27.42344605,-150.4874688,2.669338538,
1.395023949,-.5540427503,-56.85224007,3.681827033,-43.48705106,
5.103131905,1.073551279,-.6673083508,12.21404266,4.177465543,
5.799964188,-.3977802319,-1.044652977,.5703560010,3.536082962,
-3.222069852,9.620648151,6.082014949,27.75216226,12.44199571,
5.122226936,6.982039615,20.12149582,6.150973118,4.663639687,
15.73319647,2.303504968,5.840511214,.8385953499E-01,.3477844929])
p1,p2,p3, r1,r2,r3, q1,q2,q3, s1,s2,s3 = a[36:48]
t1,t2 = a[48:50]
cps=np.cos(ps)
sps=np.sin(ps)
s2ps=2*cps # modified here (sin(2*ps) instead of sin(3*ps))
st1=np.sin(ps*t1)
ct1=np.cos(ps*t1)
st2=np.sin(ps*t2)
ct2=np.cos(ps*t2)
x1=x*ct1-z*st1
z1=x*st1+z*ct1
x2=x*ct2-z*st2
z2=x*st2+z*ct2
# make the terms in the 1st sum ("perpendicular" symmetry):
# i=1:
sqpr= np.sqrt(1/p1**2+1/r1**2)
cyp = np.cos(y/p1)
syp = np.sin(y/p1)
czr = np.cos(z1/r1)
szr = np.sin(z1/r1)
expr= np.exp(sqpr*x1)
fx1 =-sqpr*expr*cyp*szr
hy1 = expr/p1*syp*szr
fz1 =-expr*cyp/r1*czr
hx1 = fx1*ct1+fz1*st1
hz1 =-fx1*st1+fz1*ct1
sqpr= np.sqrt(1/p1**2+1/r2**2)
cyp = np.cos(y/p1)
syp = np.sin(y/p1)
czr = np.cos(z1/r2)
szr = np.sin(z1/r2)
expr= np.exp(sqpr*x1)
fx2 =-sqpr*expr*cyp*szr
hy2 = expr/p1*syp*szr
fz2 =-expr*cyp/r2*czr
hx2 = fx2*ct1+fz2*st1
hz2 =-fx2*st1+fz2*ct1
sqpr= np.sqrt(1/p1**2+1/r3**2)
cyp = np.cos(y/p1)
syp = np.sin(y/p1)
czr = np.cos(z1/r3)
szr = np.sin(z1/r3)
expr= np.exp(sqpr*x1)
fx3 =-expr*cyp*(sqpr*z1*czr+szr/r3*(x1+1/sqpr))
hy3 = expr/p1*syp*(z1*czr+x1/r3*szr/sqpr)
fz3 =-expr*cyp*(czr*(1+x1/r3**2/sqpr)-z1/r3*szr)
hx3 = fx3*ct1+fz3*st1
hz3 =-fx3*st1+fz3*ct1
# i=2:
sqpr= np.sqrt(1/p2**2+1/r1**2)
cyp = np.cos(y/p2)
syp = np.sin(y/p2)
czr = np.cos(z1/r1)
szr = np.sin(z1/r1)
expr= np.exp(sqpr*x1)
fx4 =-sqpr*expr*cyp*szr
hy4 = expr/p2*syp*szr
fz4 =-expr*cyp/r1*czr
hx4 = fx4*ct1+fz4*st1
hz4 =-fx4*st1+fz4*ct1
sqpr= np.sqrt(1/p2**2+1/r2**2)
cyp = np.cos(y/p2)
syp = np.sin(y/p2)
czr = np.cos(z1/r2)
szr = np.sin(z1/r2)
expr= np.exp(sqpr*x1)
fx5 =-sqpr*expr*cyp*szr
hy5 = expr/p2*syp*szr
fz5 =-expr*cyp/r2*czr
hx5 = fx5*ct1+fz5*st1
hz5 =-fx5*st1+fz5*ct1
sqpr= np.sqrt(1/p2**2+1/r3**2)
cyp = np.cos(y/p2)
syp = np.sin(y/p2)
czr = np.cos(z1/r3)
szr = np.sin(z1/r3)
expr= np.exp(sqpr*x1)
fx6 =-expr*cyp*(sqpr*z1*czr+szr/r3*(x1+1/sqpr))
hy6 = expr/p2*syp*(z1*czr+x1/r3*szr/sqpr)
fz6 =-expr*cyp*(czr*(1+x1/r3**2/sqpr)-z1/r3*szr)
hx6 = fx6*ct1+fz6*st1
hz6 =-fx6*st1+fz6*ct1
# i=3:
sqpr= np.sqrt(1/p3**2+1/r1**2)
cyp = np.cos(y/p3)
syp = np.sin(y/p3)
czr = np.cos(z1/r1)
szr = np.sin(z1/r1)
expr= np.exp(sqpr*x1)
fx7 =-sqpr*expr*cyp*szr
hy7 = expr/p3*syp*szr
fz7 =-expr*cyp/r1*czr
hx7 = fx7*ct1+fz7*st1
hz7 =-fx7*st1+fz7*ct1
sqpr= np.sqrt(1/p3**2+1/r2**2)
cyp = np.cos(y/p3)
syp = np.sin(y/p3)
czr = np.cos(z1/r2)
szr = np.sin(z1/r2)
expr= np.exp(sqpr*x1)
fx8 =-sqpr*expr*cyp*szr
hy8 = expr/p3*syp*szr
fz8 =-expr*cyp/r2*czr
hx8 = fx8*ct1+fz8*st1
hz8 =-fx8*st1+fz8*ct1
sqpr= np.sqrt(1/p3**2+1/r3**2)
cyp = np.cos(y/p3)
syp = np.sin(y/p3)
czr = np.cos(z1/r3)
szr = np.sin(z1/r3)
expr= np.exp(sqpr*x1)
fx9 =-expr*cyp*(sqpr*z1*czr+szr/r3*(x1+1/sqpr))
hy9 = expr/p3*syp*(z1*czr+x1/r3*szr/sqpr)
fz9 =-expr*cyp*(czr*(1+x1/r3**2/sqpr)-z1/r3*szr)
hx9 = fx9*ct1+fz9*st1
hz9 =-fx9*st1+fz9*ct1
a1=a[0]+a[1]*cps
a2=a[2]+a[3]*cps
a3=a[4]+a[5]*cps
a4=a[6]+a[7]*cps
a5=a[8]+a[9]*cps
a6=a[10]+a[11]*cps
a7=a[12]+a[13]*cps
a8=a[14]+a[15]*cps
a9=a[16]+a[17]*cps
bx=a1*hx1+a2*hx2+a3*hx3+a4*hx4+a5*hx5+a6*hx6+a7*hx7+a8*hx8+a9*hx9
by=a1*hy1+a2*hy2+a3*hy3+a4*hy4+a5*hy5+a6*hy6+a7*hy7+a8*hy8+a9*hy9
bz=a1*hz1+a2*hz2+a3*hz3+a4*hz4+a5*hz5+a6*hz6+a7*hz7+a8*hz8+a9*hz9
# make the terms in the 2nd sum ("parallel" symmetry):
# i=1
sqqs= np.sqrt(1/q1**2+1/s1**2)
cyq = np.cos(y/q1)
syq = np.sin(y/q1)
czs = np.cos(z2/s1)
szs = np.sin(z2/s1)
exqs= np.exp(sqqs*x2)
fx1 =-sqqs*exqs*cyq*czs *sps
hy1 = exqs/q1*syq*czs *sps
fz1 = exqs*cyq/s1*szs *sps
hx1 = fx1*ct2+fz1*st2
hz1 =-fx1*st2+fz1*ct2
sqqs= np.sqrt(1/q1**2+1/s2**2)
cyq = np.cos(y/q1)
syq = np.sin(y/q1)
czs = np.cos(z2/s2)
szs = np.sin(z2/s2)
exqs= np.exp(sqqs*x2)
fx2 =-sqqs*exqs*cyq*czs *sps
hy2 = exqs/q1*syq*czs *sps
fz2 = exqs*cyq/s2*szs *sps
hx2 = fx2*ct2+fz2*st2
hz2 =-fx2*st2+fz2*ct2
sqqs= np.sqrt(1/q1**2+1/s3**2)
cyq = np.cos(y/q1)
syq = np.sin(y/q1)
czs = np.cos(z2/s3)
szs = np.sin(z2/s3)
exqs= np.exp(sqqs*x2)
fx3 =-sqqs*exqs*cyq*czs *sps
hy3 = exqs/q1*syq*czs *sps
fz3 = exqs*cyq/s3*szs *sps
hx3 = fx3*ct2+fz3*st2
hz3 =-fx3*st2+fz3*ct2
# i=2:
sqqs= np.sqrt(1/q2**2+1/s1**2)
cyq = np.cos(y/q2)
syq = np.sin(y/q2)
czs = np.cos(z2/s1)
szs = np.sin(z2/s1)
exqs= np.exp(sqqs*x2)
fx4 =-sqqs*exqs*cyq*czs *sps
hy4 = exqs/q2*syq*czs *sps
fz4 = exqs*cyq/s1*szs *sps
hx4 = fx4*ct2+fz4*st2
hz4 =-fx4*st2+fz4*ct2
sqqs= np.sqrt(1/q2**2+1/s2**2)
cyq = np.cos(y/q2)
syq = np.sin(y/q2)
czs = np.cos(z2/s2)
szs = np.sin(z2/s2)
exqs= np.exp(sqqs*x2)
fx5 =-sqqs*exqs*cyq*czs *sps
hy5 = exqs/q2*syq*czs *sps
fz5 = exqs*cyq/s2*szs *sps
hx5 = fx5*ct2+fz5*st2
hz5 =-fx5*st2+fz5*ct2
sqqs= np.sqrt(1/q2**2+1/s3**2)
cyq = np.cos(y/q2)
syq = np.sin(y/q2)
czs = np.cos(z2/s3)
szs = np.sin(z2/s3)
exqs= np.exp(sqqs*x2)
fx6 =-sqqs*exqs*cyq*czs *sps
hy6 = exqs/q2*syq*czs *sps
fz6 = exqs*cyq/s3*szs *sps
hx6 = fx6*ct2+fz6*st2
hz6 =-fx6*st2+fz6*ct2
# i=3:
sqqs= np.sqrt(1/q3**2+1/s1**2)
cyq = np.cos(y/q3)
syq = np.sin(y/q3)
czs = np.cos(z2/s1)
szs = np.sin(z2/s1)
exqs= np.exp(sqqs*x2)
fx7 =-sqqs*exqs*cyq*czs *sps
hy7 = exqs/q3*syq*czs *sps
fz7 = exqs*cyq/s1*szs *sps
hx7 = fx7*ct2+fz7*st2
hz7 =-fx7*st2+fz7*ct2
sqqs= np.sqrt(1/q3**2+1/s2**2)
cyq = np.cos(y/q3)
syq = np.sin(y/q3)
czs = np.cos(z2/s2)
szs = np.sin(z2/s2)
exqs= np.exp(sqqs*x2)
fx8 =-sqqs*exqs*cyq*czs *sps
hy8 = exqs/q3*syq*czs *sps
fz8 = exqs*cyq/s2*szs *sps
hx8 = fx8*ct2+fz8*st2
hz8 =-fx8*st2+fz8*ct2
sqqs= np.sqrt(1/q3**2+1/s3**2)
cyq = np.cos(y/q3)
syq = np.sin(y/q3)
czs = np.cos(z2/s3)
szs = np.sin(z2/s3)
exqs= np.exp(sqqs*x2)
fx9 =-sqqs*exqs*cyq*czs *sps
hy9 = exqs/q3*syq*czs *sps
fz9 = exqs*cyq/s3*szs *sps
hx9 = fx9*ct2+fz9*st2
hz9 =-fx9*st2+fz9*ct2
a1=a[18]+a[19]*s2ps
a2=a[20]+a[21]*s2ps
a3=a[22]+a[23]*s2ps
a4=a[24]+a[25]*s2ps
a5=a[26]+a[27]*s2ps
a6=a[28]+a[29]*s2ps
a7=a[30]+a[31]*s2ps
a8=a[32]+a[33]*s2ps
a9=a[34]+a[35]*s2ps
bx=bx+a1*hx1+a2*hx2+a3*hx3+a4*hx4+a5*hx5+a6*hx6+a7*hx7+a8*hx8+a9*hx9
by=by+a1*hy1+a2*hy2+a3*hy3+a4*hy4+a5*hy5+a6*hy6+a7*hy7+a8*hy8+a9*hy9
bz=bz+a1*hz1+a2*hz2+a3*hz3+a4*hz4+a5*hz5+a6*hz6+a7*hz7+a8*hz8+a9*hz9
return bx, by, bz
| 5,339,342
|
def raan2ltan(date, raan, type="mean"):
"""Conversion to True Local Time at Ascending Node (LTAN)
Args:
date (Date) : Date of the conversion
raan (float) : RAAN in radians, in EME2000
type (str) : either "mean" or "true"
Return:
float : LTAN in hours
"""
if type == "mean":
mean_solar_angle = raan - _mean_sun_raan(date)
ltan = (12 + mean_solar_angle * 12 / np.pi) % 24
elif type == "true":
theta_sun = (
get_body("Sun")
.propagate(date)
.copy(frame="EME2000", form="spherical")
.theta
)
ltan = ((24 * (raan - theta_sun) / (2 * np.pi)) + 12) % 24
else: # pragma: no cover
raise ValueError("Unknwon Local Time type : {}".format(type))
return ltan
| 5,339,343
|
def _extract_bbox_annotation(prediction, b, obj_i):
"""Constructs COCO format bounding box annotation."""
height = prediction['eval_height'][b]
width = prediction['eval_width'][b]
bbox = _denormalize_to_coco_bbox(
prediction['groundtruth_boxes'][b][obj_i, :], height, width)
if 'groundtruth_area' in prediction:
area = float(prediction['groundtruth_area'][b][obj_i])
else:
# Using the box area to replace the polygon area. This value will not affect
# real evaluation but may fail the unit test.
area = bbox[2] * bbox[3]
annotation = {
'id': b * 1000 + obj_i, # place holder of annotation id.
'image_id': int(prediction['source_id'][b]), # source_id,
'category_id': int(prediction['groundtruth_classes'][b][obj_i]),
'bbox': bbox,
'iscrowd': int(prediction['groundtruth_is_crowd'][b][obj_i]),
'area': area,
'segmentation': [],
}
return annotation
| 5,339,344
|
def main(argv):
"""
Script main method, which loads two machine learning models, performing stance detection and subsequent veracity
determination for a dataset of branches using these models, and printing the results.
See project README for more in-depth description of command-line interfaces.
:param argv: user-specified arguments parsed from command line.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-smp', '--stance_model_path', default=stance_lstm_model,
help='Path to pre-trained stance detection model')
parser.add_argument('-vmp', '--veracity_model_path', default=None,
help='Path to pre-trained veracity prediction model')
parser.add_argument('-ts', '--timestamps', default=True,
help='Include normalized timestamps of comments as features?')
subparsers = parser.add_subparsers(help='Choose whether to use new or stored data for veracity preciction')
# Create parser for using new data for veracity prediction
new_parser = subparsers.add_parser('new', help='Using new data for veracity prediction')
new_parser.add_argument('id', help='The ID of a tweet from the conversation, for which veracity will be determined')
new_parser.set_defaults(func=veracity_new)
# Create parser for using stored data for veracity prediction
stored_parser = subparsers.add_parser('stored', help='Using stored data for veracity prediction. Defauilts are'
'supplied for all parameters')
stored_parser.add_argument('-dt', '--data_type', default='twitter',
help='Type of data used for veracity prediction, either \'twitter\' or \'dast\'')
stored_parser.add_argument('-dp', '--data_path', default=None, help='Path to data')
stored_parser.set_defaults(func=veracity_stored)
args = parser.parse_args(argv)
if args.veracity_model_path is None:
if args.timestamps:
args.veracity_model_path = veracity_hmm_model_timestamps
else:
args.veracity_model_path = veracity_hmm_model_no_timestamps
features = dict(text=False, lexicon=False, sentiment=False, pos=False, wembs=False, lstm_wembs=True)
dataset, feature_vectors = args.func(args, features)
for veracity_prediction in predict_veracity(args, dataset, feature_vectors):
print(veracity_prediction)
| 5,339,345
|
def value_and_entropy(emax, F, bw, grid_size=1000):
"""
Compute the value function and entropy levels for a θ path
increasing until it reaches the specified target entropy value.
Parameters
==========
emax: scalar
The target entropy value
F: array_like
The policy function to be evaluated
bw: str
A string specifying whether the implied shock path follows best
or worst assumptions. The only acceptable values are 'best' and
'worst'.
Returns
=======
df: pd.DataFrame
A pandas DataFrame containing the value function and entropy
values up to the emax parameter. The columns are 'value' and
'entropy'.
"""
if bw == 'worst':
θs = 1 / np.linspace(1e-8, 1000, grid_size)
else:
θs = -1 / np.linspace(1e-8, 1000, grid_size)
df = pd.DataFrame(index=θs, columns=('value', 'entropy'))
for θ in θs:
df.loc[θ] = evaluate_policy(θ, F)
if df.loc[θ, 'entropy'] >= emax:
break
df = df.dropna(how='any')
return df
| 5,339,346
|
def browse_directory():
"""
Browse the local file system starting at the given path and provide the following information:
- project_name_unique: If the given project name is not yet registered in the projects list
- project_path_prefix: The given path with a final separator, e.g. /data/
- project_dir: Name of the project directory generated from the project name
- project_dir_exists: If the project directory already exists in the given path
- path_exists: If the given path exists
- path_unique: If the given path is not yet registered for another project
- subdirs: The list of sub-directories at the given path
"""
data = request.json
path = data['path']
project = data['project']
subdirs = [d for d in glob.glob(f'{path}*') if os.path.isdir(d)] if os.path.isabs(path) else []
project_dir = project_utils.get_folder_name_for_project(project)
full_path = os.path.join(path, project_dir)
video_files = [f for f in glob.glob(f'{path}*{VIDEO_EXT}')]
projects = project_utils.load_project_overview_config()
return jsonify(
project_name_unique=project not in projects,
project_path_prefix=os.path.join(path, ''), # Append a separator
project_dir=project_dir,
project_dir_exists=os.path.exists(full_path),
path_exists=os.path.exists(path),
path_unique=path not in [p['path'] for p in projects.values()],
subdirs=subdirs,
video_files=video_files,
)
| 5,339,347
|
def test_laplace_obs_derivatives():
"""
The AdjointDoubleLayer kernel should be equal to the stress from the displacement from
the SingleLayer kernel. The same should be true of the Hypersingula kernel with
respect to the DoubleLayer kernel.
"""
t = sp.var("t")
line = refine_surfaces(
[(t, 1 * t, 0.0 * t)],
gauss_rule(10),
control_points=np.array([[0, 0, 100, 0.5]]),
)
delta = 0.0001
obs_pt = np.array([[0.5, 0.5]])
obs_pts_fd = np.array(
[
[obs_pt[0, 0] - delta, obs_pt[0, 1]],
[obs_pt[0, 0] + delta, obs_pt[0, 1]],
[obs_pt[0, 0], obs_pt[0, 1] - delta],
[obs_pt[0, 0], obs_pt[0, 1] + delta],
]
)
for K_base, K_deriv in [
(SingleLayer, AdjointDoubleLayer),
(DoubleLayer, Hypersingular),
]:
Iv = integrate_term(K_base(), obs_pts_fd, line)
vs = Iv[:, 0, :, 0].sum(axis=1)
fd_deriv_vx = (vs[1] - vs[0]) / (2 * delta)
fd_deriv_vy = (vs[3] - vs[2]) / (2 * delta)
Id = integrate_term(K_deriv(), obs_pt, line)
deriv_vs = Id[0, :, :, 0].sum(axis=1)
np.testing.assert_allclose(deriv_vs, [fd_deriv_vx, fd_deriv_vy], atol=1e-3)
| 5,339,348
|
def _biorthogonal_window_loopy(analysis_window, shift):
"""
This version of the synthesis calculation is as close as possible to the
Matlab implementation in terms of variable names.
The results are equal.
The implementation follows equation A.92 in
Krueger, A. Modellbasierte Merkmalsverbesserung zur robusten automatischen
Spracherkennung in Gegenwart von Nachhall und Hintergrundstoerungen
Paderborn, Universitaet Paderborn, Diss., 2011, 2011
"""
fft_size = len(analysis_window)
assert np.mod(fft_size, shift) == 0
number_of_shifts = len(analysis_window) // shift
sum_of_squares = np.zeros(shift)
for synthesis_index in range(0, shift):
for sample_index in range(0, number_of_shifts+1):
analysis_index = synthesis_index + sample_index * shift
if analysis_index + 1 < fft_size:
sum_of_squares[synthesis_index] \
+= analysis_window[analysis_index] ** 2
sum_of_squares = np.kron(np.ones(number_of_shifts), sum_of_squares)
synthesis_window = analysis_window / sum_of_squares / fft_size
# Why? Line created by Hai, Lukas does not know, why it exists.
synthesis_window *= fft_size
return synthesis_window
| 5,339,349
|
def bool_exprs():
"""
Generate all boolean expressions:
- Boolean operators: and([Var]), or([Var]), xor([Var]) (CPMpy class 'Operator', is_bool())
- Boolean equality: Var == Var (CPMpy class 'Comparison')
"""
if SOLVER_CLASS is None:
return
names = [(name, arity) for name, (arity, is_bool) in Operator.allowed.items() if is_bool]
names = [(name, arity) for name, arity in names if name not in EXCLUDE_OPERATORS[SOLVER_CLASS]]
for name, arity in names:
if arity != 0:
operator_args = BOOL_ARGS[:arity]
else:
operator_args = BOOL_ARGS
yield Operator(name, operator_args)
# Negated boolean values
yield Operator(name, [~ arg for arg in operator_args])
for eq_name in ["==", "!="]:
yield Comparison(eq_name, *BOOL_ARGS[:2])
for cpm_cons in global_constraints():
if cpm_cons.is_bool():
yield cpm_cons
| 5,339,350
|
def depthwise(data, N, H, W, CI, k_ch, KH, KW, PAD_H, PAD_W, SH, SW, block_size, use_bias=False):
"""
Depthwise 5-D convolutions,every channel has its filter-kernel
Args:
data (list):a list,the size is 3 if use_bias else the size is 2;
data[0] tvm.tensor.Tensor of type float16 ,shape 5D(N, CI//C0, C0, H, W)
data[1] tvm.tensor.Tensor of type float16 ,shape 6D(CI//(CI//C0)//C0, KH, KW, k_ch*CI//C0, C0, C0)
data[2] tvm.tensor.Tensor of type float16 ,shape 5D(N, CI*k_ch//C0, OH, OW, C0)
N (int): batchsize
H (int): height of featureMap
W (int): width of featureMap
CI (int): channel of featureMap
k_ch (int): channel of Filter
KH (int): height of Filter
KW (int): width of Filter
PAD_H (int): padding pixels in vertical direction
PAD_W (int): padding pixels in horizontal direction
SH (int): stride in vertical direction
SW (int): stride in horizontal direction
block_size (int): a int var also called "C0"
use_bias (bool ): If True need add bias, else bias equal to zero.
Returns:
akg.tvm.Tensor of same type as data, shape is 5D(N, CI*k_ch//C0, OH, OW, C0)
"""
check_list = ["float16"]
dtype = data[0].dtype
if not (dtype in check_list):
raise RuntimeError("depthwise only support %s while dtype is %s" % (",".join(check_list), dtype))
for i in range(len(data)):
shape = data[i].shape
utils.check_shape(shape)
conv_dtype = 'float16'
group = CI // block_size
CO = CI * k_ch
assert k_ch == 1
assert CO % group == 0 and CI % group == 0
assert CO % block_size == 0 and (CI // group) % block_size == 0
clear = False # if clear, use auto tiling
# (N, CI, H, W) -> (N, C0, H, W, C1)
A = data[0]
# (CO, CI // group, KH, KW) -> (CI // group // block * KH * KW, CO // block, block, block)
B = data[1]
if use_bias:
bias = data[2]
bias_name = bias.op.name
else:
bias = None
bias_name = "bias_name"
key = [N, H, W, CI, k_ch, KH, KW, PAD_H, PAD_W, SH, SW]
hash_key = str((tuple(key)))
if hash_key in depthwise_set_dim_map:
cutH, cutCo, cutM, cutK, cutN = depthwise_set_dim_map[hash_key]
else:
# raise RuntimeError("other can not find cutH, cutCo, cutM, cutK, cutN")
cutH = (KH - 1) * KH + 1
cutCo = 16
cutM = 16
cutK = 16 * KH * KW
cutN = 16
clear = True # use auto tiling
OH = (H + 2 * PAD_H - KH) // SH + 1
OW = (W + 2 * PAD_W - KW) // SW + 1
kc1 = akg.tvm.reduce_axis((0, CI // block_size // group), name="kc1")
kh = akg.tvm.reduce_axis((0, KH), name="kh")
kw = akg.tvm.reduce_axis((0, KW), name="kw")
kc0 = akg.tvm.reduce_axis((0, block_size), name="kc0")
p_top, p_bottom, p_left, p_right = PAD_H, PAD_H, PAD_W, PAD_W
output_name = "output"
output_bias_name = "output_bias"
attr = {
"pragma_conv_kernel_n": CO,
"pragma_conv_kernel_h": KH,
"pragma_conv_kernel_w": KW,
"pragma_conv_padding_top": p_top,
"pragma_conv_padding_bottom": p_bottom,
"pragma_conv_padding_left": p_left,
"pragma_conv_padding_right": p_right,
"pragma_conv_bypass_l1": 1,
"pragma_conv_stride_h": SH,
"pragma_conv_stride_w": SW,
"pragma_conv_fm_n": N,
"pragma_conv_fm_c": CI,
"pragma_conv_fm_h": H,
"pragma_conv_fm_w": W,
"pragma_conv_dilation_h": 1,
"pragma_conv_dilation_w": 1,
"feature": A.op.name,
"filter": B.op.name,
"bias": bias_name,
"res": output_name,
"res_bias": output_bias_name
}
if not clear:
attr["pragma_conv_h_cut"] = cutH
attr["pragma_conv_w_cut"] = W + 2 * PAD_W
attr["pragma_conv_co_cut"] = cutCo
attr["pragma_conv_m_cut"] = cutM
attr["pragma_conv_k_cut"] = cutK
attr["pragma_conv_n_cut"] = cutN
C = akg.tvm.compute((N, CO // block_size, OH, OW, block_size),
lambda n, c1, h, w, c0: akg.lang.ascend.mmad(
akg.tvm.if_then_else(akg.tvm.any((h * SH + kh) < p_top, (h * SH + kh) > (H + p_top - 1),
(w * SW + kw) < p_left, (w * SW + kw) > (W + p_left - 1)),
akg.tvm.const(0.0, conv_dtype),
A[n, c1 // ((CO // block_size) // group) * (
(CI // block_size) // group) + kc1, (
h * SH + kh - p_top), (w * SW + kw - p_left), kc0])
# A[n, kc1, (h * SH + kh - p_top), (w * SW + kw - p_left), kc0])
* B[(kc1 * KH + kh) * KW + kw, c1, c0, kc0], axis=[kc1, kh, kw, kc0]),
attrs=attr, name=output_name)
if use_bias:
out = akg.tvm.compute(C.shape, lambda n, c1, h, w, c0: C[n, c1, h, w, c0] + bias[0, c1, 0, 0, c0],
name=output_bias_name)
else:
out = C
return out
| 5,339,351
|
def init_full_x_new_cli(init_full_x):
"""Change commandline call"""
name = "full_x"
branch = "full_x_new_cli"
orion.core.cli.main(
(
"hunt --init-only -n {branch} --branch-from {name} --cli-change-type noeffect "
"--enable-evc "
"./black_box_new.py -x~uniform(-10,10) --a-new argument"
)
.format(name=name, branch=branch)
.split(" ")
)
orion.core.cli.main(
"insert -n {branch} script -x=1.2".format(branch=branch).split(" ")
)
orion.core.cli.main(
"insert -n {branch} script -x=-1.2".format(branch=branch).split(" ")
)
| 5,339,352
|
async def get_last_recipe_json():
""" Doc Str """
with open(DEBUG_DIR.joinpath("last_recipe.json"), "r") as f:
return json.loads(f.read())
| 5,339,353
|
def FindDescendantComponents(config, component_def):
"""Return a list of all nested components under the given component."""
path_plus_delim = component_def.path.lower() + '>'
return [cd for cd in config.component_defs
if cd.path.lower().startswith(path_plus_delim)]
| 5,339,354
|
def overlapbatch(args):
"""
%prog overlapbatch ctgfasta poolfasta
Fish out the sequences in `poolfasta` that overlap with `ctgfasta`.
Mix and combine using `minimus2`.
"""
p = OptionParser(overlap.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
ctgfasta, poolfasta = args
f = Fasta(ctgfasta)
for k, rec in f.iteritems_ordered():
fastafile = k + ".fasta"
fw = open(fastafile, "w")
SeqIO.write([rec], fw, "fasta")
fw.close()
overlap([fastafile, poolfasta])
| 5,339,355
|
def queue_job(script, submit=True):
"""Queue a pipeline script given as a string."""
import os
with tempfile.NamedTemporaryFile("w+") as fh:
fh.write(script)
fh.flush()
# TODO: do this in a better way
if submit:
cmd = "caput-pipeline queue %s"
else:
cmd = "caput-pipeline queue --nosubmit %s"
os.system(cmd % fh.name)
| 5,339,356
|
def _get_source(loader, fullname):
"""
This method is here as a replacement for SourceLoader.get_source. That
method returns unicode, but we prefer bytes.
"""
path = loader.get_filename(fullname)
try:
return loader.get_data(path)
except OSError:
raise ImportError('source not available through get_data()',
name=fullname)
| 5,339,357
|
def get_source_files(sf: Path) -> list:
"""
Search for files ending in .FLAC/.flac and add them to a list.
Args:
sf (str/pathlib.Path): Folder location to search for files.
Returns:
list: List of file locations found to match .FLAC/.fladc.
"""
return re_file_search.get_list(sf, r".+\.[fF][lL][aA][cC]$")
| 5,339,358
|
def extract_features(clip):
"""
Feature extraction from an audio clip
Args:
clip ():
Returns: A list of feature vectors
"""
sr, clip_array = wav_read(io.BytesIO(clip))
if clip_array.ndim > 1:
clip_array = clip_array[:, 0]
segments = frame_breaker.get_frames(clip_array, sample_rate=sr)
segments_encoded = [np2base64(s, sr) for s in segments]
segment_features = [
[f.feature_value for f in extract_feats_for_segment(s).features]
for s in segments_encoded
]
# extracted_feats = speech_feat_client.extract_speech_features(
# clip,
# opensmile_config=emorec_pytorch_config.ModelBaseline.opensmile_config,
# response_format='list'
# )
# feats = np.array([f.feature_value for f in extracted_feats])
return segment_features
| 5,339,359
|
def summarize(vocab, write_to_file=True):
"""
>>> summarize({'var', '{', '/regexp/'}, write_to_file=False)
The size of vocabulary is 5
"""
# Add /*start*/ and /*end*/ tokens to the count.
size = len(vocab) + 2
if size < 128:
size = t.green(str(size))
elif 128 <= size < 256:
size = t.yellow(str(size))
else:
size = t.red(str(size))
print("The size of vocabulary is", size)
total_vocab = [START_TOKEN] + sorted(list(vocab)) + [END_TOKEN]
if not write_to_file:
return
filename = 'autogenerated_vocabulary.py'
with open(filename, 'wt', encoding='utf-8') as vocab_file:
vocab_file.write('VOCAB = ')
pprint(total_vocab, stream=vocab_file)
| 5,339,360
|
def warn(msg, file=sys.stderr):
"""Log warning message ``msg`` to stderr."""
msg = 'WARNING: %s' %msg
if six.PY2:
msg = msg.encode('utf-8')
print(msg, file=file)
| 5,339,361
|
def _attribute_tester(message, attribute_name: str, attribute_type: str, num_different_values=2, num_edges_of_interest=1):
"""
Tests attributes of a message
message: returned from _do_arax_query
attribute_name: the attribute name to test (eg. 'jaccard_index')
attribute_type: the attribute type (eg. 'EDAM:data_1234')
num_different_values: the number of distinct values you wish to see have been added as attributes
num_edges_of_interest: the minimum number of edges in the KG you wish to see have the attribute of interest
"""
edges_of_interest = []
values = set()
for edge in message.knowledge_graph.edges.values():
if hasattr(edge, 'attributes') and edge.attributes:
for attr in edge.attributes:
if attr.original_attribute_name == attribute_name:
edges_of_interest.append(edge)
assert attr.attribute_type_id == attribute_type
values.add(attr.value)
assert len(edges_of_interest) >= num_edges_of_interest
if edges_of_interest:
assert len(values) >= num_different_values
| 5,339,362
|
def comparison_func(target: TwoQubitWeylDecomposition,
basis: TwoQubitBasisDecomposer,
base_fid: float,
comp_method: str):
"""
Decompose traces for arbitrary angle rotations.
This assumes that the tq angles go from highest to lowest.
"""
dep_param = (4 * base_fid - 1)/3
if comp_method == 'fid':
traces = fixed_traces(target, basis)
values = [((abs(tr)**2 - 1) * dep_param**i + 1)/ 16
for i, tr in enumerate(traces)]
elif comp_method == 'arb_fid':
traces = arb_traces(target)
values = [((abs(tr)**2 - 1) * dep_param**i + 1)/ 16
for i, tr in enumerate(traces)]
elif comp_method == 'arb_total':
traces = arb_traces(target)
total_angles = [
0,
abs(target.a),
abs(target.a) + abs(target.b),
abs(target.a) + abs(target.b) + abs(target.c)
]
values = [((abs(tr)**2 - 1) * dep_param**(a/np.pi) + 1)/ 16
for a, tr in zip(total_angles, traces)]
elif comp_method == 'arb_total_quad':
traces = arb_traces(target)
total_angles = [
0,
abs(target.a),
abs(target.a) + abs(target.b),
abs(target.a) + abs(target.b) + abs(target.c)
]
values = [((abs(tr)**2 - 1) * dep_param**((a/np.pi)**2) + 1) / 16
for a, tr in zip(total_angles, traces)]
elif comp_method == 'arb_total_sqrt':
traces = arb_traces(target)
total_angles = [
0,
abs(target.a),
abs(target.a) + abs(target.b),
abs(target.a) + abs(target.b) + abs(target.c)
]
values = [((abs(tr)**2 - 1) * dep_param**(np.sqrt(a/np.pi)) + 1) / 16
for a, tr in zip(total_angles, traces)]
elif comp_method == 'total_angle':
traces = arb_traces(target)
# negate to find smallest total angle (uses max later)
values = [-10, -10, -10, -abs(target.a) - abs(target.b) - abs(target.c)]
return values
| 5,339,363
|
def values_graph(my_path, calced, my_experimental, graph_name):
"""X axis -> residue numbers, Y axis -> values
"calced" is a dict containing values for residues (as keys)
"experimental" is a list containing STR record objects"""
experimental = copy.deepcopy(my_experimental)
exp_line, calc_line = [], []
for k in range(min(calced.keys()) - 1, max(calced.keys()) + 1):
if k in list(calced.keys()):
calc = calced[k]
exp = experimental.pop(0).value
exp_line.append(exp)
calc_line.append(calc)
else:
exp_line.append(None) # append 'None' where data is missing
calc_line.append(None)
# connect line over missing (None) values, more info at ->
# http://stackoverflow.com/questions/14399689/
# matplotlib-drawing-lines-between-points-ignoring-missing-data
exp_line = np.array(exp_line).astype(np.double)
exp_mask = np.where(np.isfinite(exp_line))
calc_line = np.array(calc_line).astype(np.double)
calc_mask = np.where(np.isfinite(calc_line))
# x axis values as numpy array
xs = np.arange(min(calced.keys())-1, max(calced.keys())+2)
plt.figure(figsize=(10, 5), dpi=80)
# experimental values with 'None' values masked
plt.plot(
xs[exp_mask], exp_line[exp_mask],
linewidth=2.0, color='#FD6C6C', marker='o', label='exp', alpha=.7
)
# calculated values with 'None' values masked
plt.plot(
xs[calc_mask], calc_line[calc_mask],
linewidth=2.0, color='#027A8B', marker='o', label='calc', alpha=.7
)
plt.legend(loc='lower left')
plt.xlabel('residue number')
plt.ylabel('value')
plt.grid(axis="y")
plt.tight_layout(pad=1.08)
plt.savefig(my_path + "/" + graph_name, format="svg", transparent=True)
plt.close()
| 5,339,364
|
def create_bs4_obj(connection):
"""Creates a beautiful Soup object"""
soup = BeautifulSoup(connection, 'html.parser')
return soup
| 5,339,365
|
def __create_dataframe_from_cassandra(query,con):
"""
Function to query into Cassandra and Create Pandas DataFrame
Parameter
---------
query : String - Cassandra Query
con : cassandra connection object
Return
------
df : pd.DataFrame - DataFrame created using the cassandra query output
"""
all_records = list(con.execute(query))
df = pd.DataFrame(all_records)
return df
| 5,339,366
|
def get_onehot_attributes(attr_dict, attr2idx, split):
"""get the labels in onehot format
Args:
attr_dict (dict: the dictory contains image_id and its top 5 attributes
attr2idx (dict): the dictory contains corresponding index of attributes
split (str): the split of the dataset (train, val, test)
Returns:
dict: the dictory contains every image and its top 5 attributes
"""
# print("Getting the onehot labels of images...")
attr_label_file_name = os.path.join(
WORKING_PATH, "finetune", split + "_onehot_attribute.pickle"
)
if os.path.exists(attr_label_file_name):
# print(
# "The {} has already existed...".format(split + "_onehot_attribute.pickle")
# )
attr_label_file = open(attr_label_file_name, "rb")
attr_label = pickle.load(attr_label_file)
return attr_label
attr_label = defaultdict()
def generate_onehot(attr):
onehot = [0] * 1000
for idx in attr:
onehot[idx] = 1
return tf.stack(onehot)
for img_id in tqdm(attr_dict.keys()):
attr_index = [attr2idx[word] for word in attr_dict[img_id]]
attr_label[img_id] = generate_onehot(attr_index)
attr_label_file = open(attr_label_file_name, "wb")
pickle.dump(attr_label, attr_label_file)
return attr_label
| 5,339,367
|
def create_worker_snapshot(compute, project, zone, worker):
"""Creates a snapshot for the worker if it does not already exists."""
try:
compute.snapshots().get(project=project, snapshot=f"{worker}-worker-boot").execute()
print("snaphost is already present won\'t be created")
return
except googleapiclient.errors.HttpError:
print("snaphost will be created")
except:
print("error calling the API will exit")
return
stop_worker(compute=compute, project=project, zone=zone, worker=worker)
operations = []
body = {
"name": f"{worker}-worker-boot"
}
disk = f"{worker}-boot"
op = compute.disks().createSnapshot(
project=project,
body=body,
zone=zone,
disk=disk
).execute()
operations.append(op)
body = {
"name": f"{worker}-worker-pdssd"
}
disk = f"{worker}-pdssd"
op = compute.disks().createSnapshot(
project=project,
body=body,
zone=zone,
disk=disk
).execute()
operations.append(op)
for op in operations:
wait_for_operation(
compute=compute,
project=project,
zone=zone,
operation=op["name"]
)
start_worker(compute=compute, project=project, zone=zone, worker=worker)
print("disks copied")
| 5,339,368
|
def test_re_b37_re_b37_v(mode, save_output, output_format):
"""
TEST :branch : base='string', pattern='abc*', value='abc',
type='valid', RULE='2,3,4'
"""
assert_bindings(
schema="msData/regex/reB37.xsd",
instance="msData/regex/reB37.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 5,339,369
|
def generate_activity_histogram(messages, filename):
"""
Save a graph to filename of the times messages were sent.
"""
times = range(24)
fig, ax = plt.subplots()
ax.hist([message.time.hour for message in messages], times, density=True)
ax.set_xlabel("Time")
ax.set_xlim(min(times), max(times))
ax.set_xticks(times)
ax.set_xticklabels(f"{item}" for item in times)
ax.set_ylabel("Messages / Total Messages")
ax.set_ylim(0, 0.2)
fig.savefig(filename)
plt.close(fig)
| 5,339,370
|
async def post(
url: str,
content: bytes,
*,
headers: List[Tuple[bytes, bytes]] = None,
loop: Optional[AbstractEventLoop] = None,
cafile: Optional[str] = None,
capath: Optional[str] = None,
cadata: Optional[str] = None,
ssl_context: Optional[ssl.SSLContext] = None,
protocols: Iterable[str] = DEFAULT_PROTOCOLS,
ciphers: Iterable[str] = DEFAULT_CIPHERS,
options: Iterable[int] = DEFAULT_OPTIONS,
chunk_size: int = -1,
connect_timeout: Optional[Union[int, float]] = None,
middleware: Optional[List[HttpClientMiddlewareCallback]] = None
) -> Optional[bytes]:
"""Issues a POST request
Args:
url (str): The url
content (bytes): The body content
headers (List[Tuple[bytes, bytes]], optional): Any extra headers required. Defaults to
None.
loop (Optional[AbstractEventLoop], optional): The optional asyncio event
loop.. Defaults to None.
cafile (Optional[str], optional): The path to a file of concatenated CA
certificates in PEM format. Defaults to None.
capath (Optional[str], optional): The path to a directory containing
several CA certificates in PEM format. Defaults to None.
cadata (Optional[str], optional): Either an ASCII string of one or more
PEM-encoded certificates or a bytes-like object of DER-encoded
certificates. Defaults to None.
ssl_context (Optional[SSLContext], optional): An ssl context to be
used instead of generating one from the certificates.
protocols (Iterable[str], optional): The supported protocols. Defaults
to DEFAULT_PROTOCOLS.
ciphers (Iterable[str], optional): The supported ciphers. Defaults
to DEFAULT_CIPHERS.
options (Iterable[int], optional): The ssl.SSLContext.options. Defaults
to DEFAULT_OPTIONS.
chunk_size (int, optional): The size of each chunk to send or -1 to send
as a single chunk.. Defaults to -1.
connect_timeout (Optional[Union[int, float]], optional): The number
of seconds to wait for the connection. Defaults to None.
middleware (Optional[List[HttpClientMiddlewareCallback]], optional):
Optional middleware. Defaults to None.
Raises:
HTTPError: Is the status code is not ok.
asyncio.TimeoutError: If the connect times out.
Returns:
Optional[bytes]: The response body
"""
data = bytes_writer(content, chunk_size) if content else None
async with HttpClient(
url,
method='POST',
headers=headers,
body=data,
loop=loop,
cafile=cafile,
capath=capath,
cadata=cadata,
ssl_context=ssl_context,
protocols=protocols,
ciphers=ciphers,
options=options,
connect_timeout=connect_timeout,
middleware=middleware
) as response:
await response.raise_for_status()
return await response.raw()
| 5,339,371
|
def parse_config_file(path):
"""Parse TOML config file and return dictionary"""
try:
with open(path, 'r') as f:
return toml.loads(f.read())
except:
open(path,'a').close()
return {}
| 5,339,372
|
def DPP2607_Write_CcaC7r1Coefficient(ccac7r1):
"""
Writes: CCA C7R1 Coefficient.
DPP2607_Write_CcaC7r1Coefficient(DWORD CCAC7R1).
:type ccac7r1: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaC7r1Coefficient(%r)', ccac7r1)
payload = [0x71]
payload.extend(list(bytearray(struct.pack(">I", ccac7r1 & 0x1ff))))
i2c.write(payload)
| 5,339,373
|
def _write_labelbuddy_part(
all_docs: Iterator[Tuple[pd.Series, pd.Series, pd.DataFrame]],
part_nb: int,
part_size: Optional[int],
output_dir: Path,
) -> None:
"""Write labelbuddy documents to jsonl file.
Writes at most `part_size` documents (or all documents if `part_size` is
`None`) taken from `all_docs` to the appropriate jsonl file. Raises
`StopIteration` if the `all_docs` iterator runs out.
"""
pagination = {"part": part_nb, "chapter": 1, "page": 1}
# get the first document so we don't create the file if the generator is
# exhausted.
doc_info = next(all_docs)
with open(
output_dir.joinpath(f"documents_{part_nb:0>5}.jsonl"),
"w",
encoding="utf-8",
) as out_f:
out_f.write(json.dumps(_prepare_document(*doc_info, **pagination)))
out_f.write("\n")
pagination["page"] += 1
n_written = 1
while part_size is None or n_written != part_size:
doc_info = next(all_docs)
out_f.write(json.dumps(_prepare_document(*doc_info, **pagination)))
out_f.write("\n")
n_written += 1
if not pagination["page"] % _CHAPTER_SIZE:
pagination["chapter"] += 1
pagination["page"] = 1
else:
pagination["page"] += 1
| 5,339,374
|
def wordnet_pos(tag):
"""
Transforms nltk part-of-speech tag strings to wordnet part-of-speech tag string.
:param tag: nltk part-of-speech tag string
:type: str
:return: the corresponding wordnet tag
:type: wordnet part-of-speech tag string
"""
return getattr(nltk_wordnet_pos_dict, tag[0], nltk_wordnet_pos_dict["N"])
| 5,339,375
|
def plot_featurelist_learning_curve(df, data_subset='allBacktests', metric= None):
"""
Plot the featurelist length and error metric to generate a learning curve
df: Pandas df
Contains information on feature lists, and accuracy for iterations on a model. output of test_feature_selection()
data_subset: str
desired backtest to plot. Inputs are: 'backtest_1, all_Backtests, holdout'
metric: str
error metric to plot. Inputs are: 'RMSE', 'MASE', 'Theils_U', 'SMAPE', 'R_Squared'
Returns:
--------
Plotly lineplot
"""
assert data_subset.lower() in [
'backtest_1',
'allbacktests',
'holdout',
], 'data_subset must be either allBacktests or holdout'
assert metric.upper() in [
'RMSE',
'MASE',
'Theils_U',
'SMAPE',
'R_Squared'
], "metric must be 'RMSE','MASE', 'Theils_U', 'SMAPE', 'R_Squared'"
df = df[df['Featurelist'].str.contains('(?<=Top )\d*(?= features)', regex= True)].copy()
df['Feature_length'] = df['Featurelist'].apply(lambda x: re.search('(?<=Top )\d*(?= features)', x).group(0))
if data_subset == 'allBacktests':
data_subset = data_subset.title().replace('b','_B')
metric_column = data_subset + "_" + metric
df[metric_column] = df[metric_column].apply(lambda x: mean([v for v in x if v != None]))
df = df[['Feature_length', metric_column]].drop_duplicates()
else:
data_subset = data_subset.title()
metric_column = data_subset + "_" + metric
df = df[['Feature_length', metric_column]].drop_duplicates()
df.drop_duplicates(inplace= True)
df = df[['Feature_length', metric_column]].sort_values('Feature_length', ascending= True)
fig = px.scatter(df, x='Feature_length', y=metric_column)
fig.update_layout(title_text='Top Series By Target Over Time')
fig.show()
| 5,339,376
|
def call_telegram_api(function: str, data: dict):
"""Make a raw call to Telegram API."""
return requests.post(
f'https://api.telegram.org/bot{TELEGRAM_TOKEN}/{function}', data=data)
| 5,339,377
|
def test_POMDP(POMDP, policy, test_data, status):
"""simulation"""
# Basic settings
p = POMDP
ind_iter = 0
horizon = len(test_data)
state = status
action = p.actions[0]
belief = p.init_belief
reward = 0
state_set = [state]
action_set = []
observation_set = ["null"]
alpha_length = len(p.states)
while True:
# make an action
ind_key = np.argmax([
np.dot(
policy[key][:alpha_length],
belief
)
for key in policy.keys()
])
action = policy[list(policy.keys())[ind_key]][alpha_length]
action_set.append(action)
# get a reward
reward = reward + p.reward_func(state=state, action=action)
# check stop condition
ind_iter = ind_iter + 1
if ind_iter >= horizon:
break
# state doesn't change
state = state
state_set.append(state)
# make an observation
observation = test_data.iloc[ind_iter]
observation_set.append(observation)
# update belief
belief = [
p.observ_func(observation, s_new, action) *
np.sum([
p.trans_func(s_new, s_old, action) *
belief[p.states.index(s_old)]
for s_old in p.states
])
for s_new in p.states
]
normalize_const = 1 / sum(belief)
belief = np.multiply(belief, normalize_const)
return action_set
| 5,339,378
|
def set_testcase_with_impacts(testcase, impacts):
"""Set testcase's impact-related fields given impacts."""
testcase.impact_stable_version = impacts.stable.version
testcase.impact_stable_version_likely = impacts.stable.likely
testcase.impact_beta_version = impacts.beta.version
testcase.impact_beta_version_likely = impacts.beta.likely
testcase.is_impact_set_flag = True
| 5,339,379
|
def compute_bkr_collection(myCollection,percentile=10,make_images=False,image_name=''):
""" Computes a synthetic background value for a given collection, based on
the lowest values at each point for each image. it treats row (long axis of laser line)
and wavenumber seperately.
Notes:
-Does NOT consider bleach! If you want to include the 'bleach' in this analysis, you should run the 'use_bleach' code
inputs:
myCollection: This is your collection data. Should be Collection class
percentile: this is the lower percentile you wish to treat as the 'background'.
default:10%
make_images: this tells this code to dump images of the BKR.
default: False
image_name = this is prepended onto the image filenames if make_image
is set to true.
default: ''
outputs:
bkr_values: ths is a nxm matrix where n is the number of pixels along the
laser long axis and m is the number of bins in the wavenumber dimension.
This value correspoinds to the "synthetic" background values.
"""
import matplotlib.pyplot as plt
import numpy as np
from copy import deepcopy
#GEt a dircube. Currently only use the 'replicates'
#
# There was an issue here but it is fixed now.
#
dc00,dx,dz,t,fl,ft,fm = collection_process(myCollection,method='avg')
num_rows,num_wns,num_images = dc00.shape
pctl = np.nanpercentile(dc00,percentile,axis=2)
nudc_pctl = deepcopy(dc00)
for j in range(0,num_images):
#Make all values here equal to NaN
myv = nudc_pctl[:,:,j]
myv[np.where(myv > pctl)] = np.NaN
bkr_values = np.nanmean(nudc_pctl,axis=2)
#Should we output figures?
if make_images==False:
return bkr_values
else:
for myn in range(0,num_rows):
#Name to use to save the data
savename_fign = image_name + '_' + str(myn)
mydcv = dc00[myn,:,:]
#####
##
## THis shows a list of sorted values at various locations
##
#####
#THis could be used to test different values
plt.subplot(2,1,2)
plt.plot(np.diff(np.sort(mydcv[111,:]))) #420...this is* in the realm of montmorillonite.
plt.plot(np.diff(np.sort(mydcv[272,:])))#INorganicSpike
plt.plot(np.diff(np.sort(mydcv[367,:])))#D/G
plt.plot(np.diff(np.sort(mydcv[445,:])))#D/G
plt.plot(np.diff(np.sort(mydcv[909,:])))#CH
plt.plot(np.diff(np.sort(mydcv[600,:])))
plt.plot(np.diff(np.sort(mydcv[700,:])))
plt.plot(np.diff(np.sort(mydcv[1000,:])))
plt.legend(('420','1000','D','G','CH','Test1','Test2','end'),loc=9,ncol=4,prop={'size':12})
plt.ylim([0,0.002])
plt.subplot(2,1,1)
plt.plot(np.sort(mydcv[111,:])) #420...this is* in the realm of montmorillonite.
plt.plot(np.sort(mydcv[272,:]))#INorganicSpike
plt.plot(np.sort(mydcv[367,:]))#D/G
plt.plot(np.sort(mydcv[445,:]))#D/G
plt.plot(np.sort(mydcv[909,:]))#CH
plt.plot(np.sort(mydcv[600,:]))
plt.plot(np.sort(mydcv[700,:]))
plt.plot(np.sort(mydcv[1000,:]))
#plt.legend(('420','1000','D','G','CH','Test1','Test2','end'),loc=9,ncol=2)
plt.savefig(savename_fign + '_sorted.png',transparent=True)
plt.close()
#Plot raw values (gray)
for j in range(0,num_images):
plt.plot(dc00[myn,:,j],color=[0.6,0.6,0.6])
#Plot 10th percentile data (blue)
plt.plot(pctl[myn,:],color='magenta')
plt.plot(bkr_values[myn,:],color='cyan')
savename_fign = image_name + '_' + str(j)
savedata = savename_fign + 'allcomp.jpg'
plt.savefig(savedata)
plt.close()
return bkr_values
| 5,339,380
|
def inverse_cell_center(structure):
"""
make an inversion against the cell center
:param structure: an instance of pymatflow.structure.crystal.Crystal()
"""
# first transfer to fractional coordinate and inverse against [0.5, 0.5, 0.5]
structure.natom = len(structure.atoms)
frac = structure.get_fractional()
for atom in frac:
atom[1] = 0.5 * 2 - atom[1]
atom[2] = 0.5 * 2 - atom[2]
atom[3] = 0.5 * 2 - atom[3]
# convert frac to cartesian again
latcell = np.array(structure.cell)
convmat = latcell.T
from pymatflow.base.atom import Atom
structure.atoms = []
for atom in frac:
cartesian = list(convmat.dot(np.array([atom[1], atom[2], atom[3]])))
structure.atoms.append(Atom(name=atom[0], x=cartesian[0], y=cartesian[1], z=cartesian[2]))
#
| 5,339,381
|
def courses_to_take(input):
"""
Time complexity: O(n) (we process each course only once)
Space complexity: O(n) (array to store the result)
"""
# Normalize the dependencies, using a set to track the
# dependencies more efficiently
course_with_deps = {}
to_take = []
for course, deps in input.items():
if not deps:
# Course with no dependencies:
# candidate to start the search
to_take.append(course)
else:
course_with_deps[course] = set(deps)
result = []
while to_take:
course = to_take.pop()
# Add course to journey
result.append(course)
# Iterate through courses and remove this course from
# dependencies
for prereq_course, prereq_deps in course_with_deps.items():
if course in prereq_deps:
prereq_deps.remove(course)
if not prereq_deps:
# Course has all the dependencies solved:
# add to the "to_take" queue
to_take.append(prereq_course)
del course_with_deps[prereq_course]
return result if len(result) == len(input) else None
| 5,339,382
|
def _merge_GlyphOrders(font, lst, values_lst=None, default=None):
"""Takes font and list of glyph lists (must be sorted by glyph id), and returns
two things:
- Combined glyph list,
- If values_lst is None, return input glyph lists, but padded with None when a glyph
was missing in a list. Otherwise, return values_lst list-of-list, padded with None
to match combined glyph lists.
"""
if values_lst is None:
dict_sets = [set(l) for l in lst]
else:
dict_sets = [{g:v for g,v in zip(l,vs)} for l,vs in zip(lst,values_lst)]
combined = set()
combined.update(*dict_sets)
sortKey = font.getReverseGlyphMap().__getitem__
order = sorted(combined, key=sortKey)
# Make sure all input glyphsets were in proper order
assert all(sorted(vs, key=sortKey) == vs for vs in lst)
del combined
paddedValues = None
if values_lst is None:
padded = [[glyph if glyph in dict_set else default
for glyph in order]
for dict_set in dict_sets]
else:
assert len(lst) == len(values_lst)
padded = [[dict_set[glyph] if glyph in dict_set else default
for glyph in order]
for dict_set in dict_sets]
return order, padded
| 5,339,383
|
def complete_multipart_upload(bucket, key, credentials, uploadId, parts):
"""
Complete multipart upload.
Raise exception if something wrong happens; otherwise success
Args:
bucket(str): bucket name
key(str): object key or `GUID/filename`
credentials(dict): aws credentials
uploadId(str): upload id of the current upload
parts(list(set)): List of part infos
[{"Etag": "1234567", "PartNumber": 1}, {"Etag": "4321234", "PartNumber": 2}]
Return:
None
"""
session = boto3.Session(
aws_access_key_id=credentials["aws_access_key_id"],
aws_secret_access_key=credentials["aws_secret_access_key"],
aws_session_token=credentials.get("aws_session_token"),
)
s3client = session.client("s3")
try:
retry_call(
s3client.complete_multipart_upload,
fkwargs={
"Bucket": bucket,
"Key": key,
"MultipartUpload": {"Parts": parts},
"UploadId": uploadId,
},
tries=MAX_TRIES,
jitter=10,
)
except botocore.exceptions.ClientError as error:
logger.error(
"Error when completing multiple part upload for object with uuid {}. Detail {}".format(
key, error
)
)
raise InternalError(
"Can not complete multipart upload for {}. Detail {}".format(key, error)
)
| 5,339,384
|
def sig_for_ops(opname):
"""sig_for_ops(opname : str) -> List[str]
Returns signatures for operator special functions (__add__ etc.)"""
# we have to do this by hand, because they are hand-bound in Python
assert opname.endswith('__') and opname.startswith('__'), "Unexpected op {}".format(opname)
name = opname[2:-2]
if name in binary_ops:
return ['def {}(self, other: Any) -> Tensor: ...'.format(opname)]
elif name in comparison_ops:
# unsafe override https://github.com/python/mypy/issues/5704
return ['def {}(self, other: Any) -> Tensor: ... # type: ignore'.format(opname)]
elif name in unary_ops:
return ['def {}(self) -> Tensor: ...'.format(opname)]
elif name in to_py_type_ops:
if name in {'bool', 'float', 'complex'}:
tname = name
elif name == 'nonzero':
tname = 'bool'
else:
tname = 'int'
if tname in {'float', 'int', 'bool', 'complex'}:
tname = 'builtins.' + tname
return ['def {}(self) -> {}: ...'.format(opname, tname)]
else:
raise Exception("unknown op", opname)
| 5,339,385
|
def substr(source, start_index, count):
"""
substr(source, start_index, count) -> list object
Return a subset of a string `source`, starting at `start_index` and
of length `count`
"""
pass
| 5,339,386
|
def add_qos(tenant_id, qos_name, qos_desc):
"""Adds a qos to tenant association."""
LOG.debug(_("add_qos() called"))
session = db.get_session()
try:
qos = (session.query(network_models_v2.QoS).
filter_by(tenant_id=tenant_id).
filter_by(qos_name=qos_name).one())
raise c_exc.QosNameAlreadyExists(qos_name=qos_name,
tenant_id=tenant_id)
except exc.NoResultFound:
qos = network_models_v2.QoS(qos_id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
qos_name=qos_name,
qos_desc=qos_desc)
session.add(qos)
session.flush()
return qos
| 5,339,387
|
def lstm2(hidden_nodes, steps_in=5, steps_out=1, features=1):
"""
A custom LSTM model.
:param hidden_nodes: number of hidden nodes
:param steps_in: number of (look back) time steps for each sample input
:param steps_out: number of (look front) time steps for each sample output
:param features: number of features for each sample input (e.g. 1 for univariate or 2+ for multivariate time series)
:return: simple LSTM model
"""
model = Sequential()
model.add(LSTM(hidden_nodes, input_shape=(steps_in, features), return_sequences=True)) # default activation: tanh
model.add(LSTM(hidden_nodes)) # default activation: tanh
model.add(Dense(steps_out)) # default activation: None
model.compile(optimizer='adam', loss='mse')
return model
| 5,339,388
|
def build_carousel_scroller(items):
"""
Usage:
item_layout = widgets.Layout(height='120px', min_width='40px')
items = [pn.Row(a_widget, layout=item_layout, margin=0, background='black') for a_widget in single_pf_output_panels]
# items = [widgets.Button(layout=item_layout, description=str(i), button_style='success') for i in range(40)]
# build_carousel_scroller(items)
build_carousel_scroller(single_pf_output_panels)
"""
box_layout = pn.widgets.Layout(overflow_x='scroll', border='3px solid black',
width='1024px',
height='',
flex_flow='row',
display='flex')
carousel = pn.widgets.Box(children=items, layout=box_layout)
return pn.widgets.VBox([pn.widgets.Label('Scroll horizontally:'), carousel])
| 5,339,389
|
def smolsolve(x, xBound, f0, t, K_A, source, Nt):
""" solve Smoluchowski equations
Input: x, initial condition, time, kernel, # timestep
Output: solution f(t,x)
"""
dx = xBound[1] - xBound[0]
Nx = x.size
dt = t / Nt
g = x * f0
for t in range(Nt):
JL = 0*x
fsrc = 0*x
# source term for f
if source == 'none':
fsrc = 0*x
elif source == 'myGaussian':
fsrc = 0.05*np.exp(-((x-8.)**2))
else:
sys.exit("source incorrectly specified")
# Flux term
for i in range(1,Nx):
for p in range(0,i):
# K_A = 1
# this is analytic expression for int_{x_j}^{x_j+1} K_A(x_mid(i),y)/y \, dy
if K_A == '1':
kernBndry = np.log(xBound[i-p]/x[i-p-1])
kern = np.log(xBound[i-p+1:-1]/xBound[i-p:-2])
# K_A = x*y
elif K_A == 'x*y':
xA = x[i-p-1]
xB = xBound[i-p]
kernBndry = (xB - xA) * x[p]
xA = xBound[i-p:-2]
xB = xBound[i-p+1:-1]
kern = (xB - xA) * x[p]
elif K_A == '2+(x/y)^2+(y/x)^2':
xA = x[i-p-1]
xB = xBound[i-p]
kernBndry = (-xA**2 + xB**2 + x[p]**4 * (1./xA**2-1./xB**2)) / (2.*x[p]**2) + 2.*np.log(xB/xA)
xA = xBound[i-p:-2]
xB = xBound[i-p+1:-1]
kern = (-xA**2 + xB**2 + x[p]**4 * (1./xA**2-1./xB**2)) / (2.*x[p]**2) + 2.*np.log(xB/xA)
elif K_A == '(x*y)^(15/14)*(x+y)^(9/14)': # https://arxiv.org/pdf/astro-ph/0201102.pdf
normConst = 0.001 # make physically meaningful!
xA = x[i-p-1]
xB = xBound[i-p]
kernBndry = normConst*-(7./120.) * x[p]**(15./14.) * (x[p] * (9. * x[p] * (xA/(xA+x[p])**5.)**(1./14.)+19. * (xA**15./(xA+x[p])**5.)**(1./14.)-9. * x[p] * (xB/(xB+x[p])**5.)**(1./14.)-19.*(xB**15./(xB+x[p])**5.)**(1./14.))+10.*((xA**29./(xA+x[p])**5.)**(1./14.)-(xB**29./(xB+x[p])**5.)**(1./14.))-9.*(xA * x[p]**23.)**(1./14.) * sps.hyp2f1(1./14.,5./14.,15./14.,-(xA/x[p]))+9.*(xB * x[p]**23.)**(1./14.)*sps.hyp2f1(1./14.,5./14.,15./14.,-(xB/x[p])))
xA = xBound[i-p:-2]
xB = xBound[i-p+1:-1]
kern = normConst*-(7./120.) * x[p]**(15./14.) * (x[p] * (9. * x[p] * (xA/(xA+x[p])**5.)**(1./14.)+19. * (xA**15./(xA+x[p])**5.)**(1./14.)-9. * x[p] * (xB/(xB+x[p])**5.)**(1./14.)-19.*(xB**15./(xB+x[p])**5.)**(1./14.))+10.*((xA**29./(xA+x[p])**5.)**(1./14.)-(xB**29./(xB+x[p])**5.)**(1./14.))-9.*(xA * x[p]**23.)**(1./14.) * sps.hyp2f1(1./14.,5./14.,15./14.,-(xA/x[p]))+9.*(xB * x[p]**23.)**(1./14.)*sps.hyp2f1(1./14.,5./14.,15./14.,-(xB/x[p])))
else:
sys.exit("kernel incorrectly specified")
JL[i] = JL[i] + dx*g[p] * (kernBndry*g[i-p-1] + np.sum(kern*g[i-p:-1]))
JR = np.roll(JL,-1)
JR[-1]= 0
g = g - dt / dx * ( JR - JL ) + dt*fsrc*x
f = g / x
return f
| 5,339,390
|
def on_stickerpack_removed(bot, user_id, stickerpack_name):
"""Called whenever a stickerpack of a user gets removed"""
bot.send_message(user_id, "The sticker pack %s has been removed, sorry for the inconvenience" % stickerpack_name)
with storage.session_scope() as session:
storage.remove_every_pack_mention(session, user_id, stickerpack_name)
| 5,339,391
|
def base_round(x, base):
"""
This function takes in a value 'x' and rounds it to the nearest multiple
of the value 'base'.
Parameters
----------
x : int
Value to be rounded
base : int
Tase for x to be rounded to
Returns
-------
int
The rounded value
"""
return base*round(x/base)
| 5,339,392
|
def numbers_basics():
"""
Here we will be seeing different numbers data type in Python.
:return: none
"""
log_debug(2 + 2)
log_debug(2 * 4)
log_debug(14 / 3)
log_debug(14 % 3)
log_debug(2 ** 3)
# BODMAS rule is applied
log_debug(2 + 10 * 10)
# The below code will not give 0.0 because of floating point accuracy.
# Check https://docs.python.org/3/tutorial/floatingpoint.html
log_debug(0.1 + 0.2 - 0.3)
| 5,339,393
|
def _update_schema_1_to_2(table_metadata, table_path):
"""
Given a `table_metadata` of version 1, update it to version 2.
:param table_metadata: Table Metadata
:param table_path: [String, ...]
:return: Table Metadata
"""
table_metadata['path'] = tuple(table_path)
table_metadata['schema_version'] = 2
table_metadata.pop('table_mappings', None)
return table_metadata
| 5,339,394
|
def train_cnn_7layer(data, file_name, params, num_epochs=10, batch_size=256, train_temp=1, init=None, lr=0.01, decay=1e-5, momentum=0.9, activation="relu", optimizer_name="sgd"):
"""
Train a 7-layer cnn network for MNIST and CIFAR (same as the cnn model in Clever)
mnist: 32 32 64 64 200 200
cifar: 64 64 128 128 256 256
"""
# create a Keras sequential model
model = Sequential()
print("training data shape = {}".format(data.train_data.shape))
params = [int(p) for p in params]
# define model structure
model.add(Conv2D(params[0], (3, 3),
input_shape=data.train_data.shape[1:]))
model.add(Lambda(tf.atan) if activation == "arctan" else Activation(activation))
model.add(Conv2D(params[1], (3, 3)))
model.add(Lambda(tf.atan) if activation == "arctan" else Activation(activation))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(params[2], (3, 3)))
model.add(Lambda(tf.atan) if activation == "arctan" else Activation(activation))
model.add(Conv2D(params[3], (3, 3)))
model.add(Lambda(tf.atan) if activation == "arctan" else Activation(activation))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(params[4]))
model.add(Lambda(tf.atan) if activation == "arctan" else Activation(activation))
model.add(Dropout(0.5))
model.add(Dense(params[5]))
model.add(Lambda(tf.atan) if activation == "arctan" else Activation(activation))
model.add(Dense(200))
# load initial weights when given
if init != None:
model.load_weights(init)
# define the loss function which is the cross entropy between prediction and true label
def fn(correct, predicted):
return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
logits=predicted/train_temp)
if optimizer_name == "sgd":
# initiate the SGD optimizer with given hyper parameters
optimizer = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)
elif optimizer_name == "adam":
optimizer = Adam(lr=lr, beta_1 = 0.9, beta_2 = 0.999, epsilon = None, decay=decay, amsgrad=False)
# compile the Keras model, given the specified loss and optimizer
model.compile(loss=fn,
optimizer=optimizer,
metrics=['accuracy'])
model.summary()
print("Traing a {} layer model, saving to {}".format(len(params) + 1, file_name))
# run training with given dataset, and print progress
history = model.fit(data.train_data, data.train_labels,
batch_size=batch_size,
validation_data=(data.validation_data, data.validation_labels),
epochs=num_epochs,
shuffle=True)
# save model to a file
if file_name != None:
model.save(file_name)
print('model saved to ', file_name)
return {'model':model, 'history':history}
| 5,339,395
|
def validate():
"""
Goes over all season, make sure they are all there, and they should have length 52, except last one
"""
input_file = processedDatafileName
clusters_file = 'data/SeasonClustersFinal'
seasonDic = {}
allSeasons = {}
for line in open(clusters_file):
arr = line.strip().split()
year = int(arr[0])
season = int(arr[1])
seasonDic[year] = season
allSeasons[season] = True
# indexed by region
all_data = {}
in_f = open(input_file)
in_f.readline()
in_f.readline()
for line in in_f:
raw = line.strip().split(',')
region = raw[1].strip()
year = int(raw[2].strip())
week = int(raw[3].strip())
## upto 20th week belongs to last years cycle
if(week <= 20):
year -= 1
infection = raw[4].strip()
inf = 0
if is_number(infection):
inf = float(infection)
if region not in all_data:
all_data[region]={}
if year not in all_data[region]:
all_data[region][year] = []
all_data[region][year].append(inf)
isValid = True
region_order = []
for region, raw in all_data.items():
region_order.append(region)
keylist = list(raw.keys())
keylist.sort()
for year in keylist:
if year>=1998 and year<=2018 and len(raw[year]) != 52:
print(region, year)
isValid = False
return isValid
| 5,339,396
|
def get_registry(): # noqa: E501
"""Get registry information
Get information about the registry # noqa: E501
:rtype: Registry
"""
try:
res = Registry(
name="Challenge Registry",
description="A great challenge registry",
user_count=DbUser.objects.count(),
org_count=DbOrg.objects.count(),
challenge_count=DbChallenge.objects.count(),
)
status = 200
except Exception as error:
status = 500
res = Error("Internal error", status, str(error))
return res, status
| 5,339,397
|
def test_deformation_grid():
""" Test deformation grid, there is only so much that we can do ...
"""
im0, c0, dfield1, weight1, dfield2, weight2, pp1, pp2, pp3 = get_data_small_deform()
gridsampling = 6
# Create identity deform
d1 = DeformationGridBackward(im0, gridsampling)
assert d1.ndim == 2
assert d1.field_sampling == im0.sampling
for grid in d1.grids:
assert grid.field_sampling == im0.sampling
assert grid.grid_sampling == gridsampling
#
im1 = d1.apply_deformation(im0)
assert np.all(im1 == im0)
# Deform from field
# Create a deform - wrong weight, so is unit
d2 = DeformationGridBackward.from_field(dfield1, gridsampling, weight2, fd=im0, injective=False, frozenedge=False)
im2 = d2.apply_deformation(im0)
assert np.all(im2 == im0)
for grid in d2.grids:
assert np.all(grid._knots == 0)
# Create a deform - now get it right
d2 = DeformationGridBackward.from_field(dfield1, gridsampling, weight1, fd=im0, injective=False, frozenedge=False)
assert d2.field_sampling == im0.sampling
for grid in d2.grids:
assert grid.field_sampling == im0.sampling
assert grid.grid_sampling == gridsampling
im2 = d2.apply_deformation(im0)
c2 = cog(im2)
# Assert that we shifted down, if only by a bit
assert abs(c2[0,0] - c0[0,0]) < 1 and c2[0,1] > c0[0,1] + 1
# Deform from points, single-step
# Create a deform - now get it right
d3 = DeformationGridBackward.from_points(im0, gridsampling, pp1, pp2, injective=False, frozenedge=False)
assert d3.field_sampling == im0.sampling
im3 = d3.apply_deformation(im0)
c3 = cog(im3)
assert np.all(im2 == im3)
assert c2.distance(c3)[0] == 0
# vv.figure(1); vv.clf(); vv.subplot(221); vv.imshow(im0); vv.subplot(222); vv.imshow(im2); vv.subplot(223); vv.imshow(im3);
# vv.figure(2); vv.clf(); vv.subplot(211); d2.show(); vv.subplot(212); d3.show()
print('deformation_grid ok')
| 5,339,398
|
def get_fraction_vaccinated(model, trajectories, area=None, include_recovered=True):
"""Get fraction of individuals that are vaccinated or immune (by area) by
state.
Parameters
----------
model : amici.model
Amici model which should be evaluated.
trajectories : pd.DataFrame
Trajectories of the model simulation.
areas : list
List of area names as strings.
include_recovered : bool
If True, recovered individuals are counted as well.
Returns
-------
percentage_vaccinated: pd.Series
Trajectories of the fraction that is vaccinated or immune.
"""
vaccinated = get_vaccinated_model(model, area=area)
sus_inf = get_alive_model(model, area=area)
df_vaccinated = trajectories[vaccinated]
df_sus_inf = trajectories[sus_inf]
total_vaccinated = df_vaccinated.sum(axis=1)
sus_inf = vaccinated = df_sus_inf.sum(axis=1)
percentage_vaccinated = total_vaccinated / sus_inf
return percentage_vaccinated
| 5,339,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.