content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def logger():
"""
Setting upp root and zeep logger
:return: root logger object
"""
root_logger = logging.getLogger()
level = logging.getLevelName(os.environ.get('logLevelDefault', 'INFO'))
root_logger.setLevel(level)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
stream_handler.setFormatter(formatter)
root_logger.addHandler(stream_handler)
zeep_logger = logging.getLogger('proarc')
zeep = logging.getLevelName(os.environ.get('logLevelZeep', 'CRITICAL'))
zeep_logger.setLevel(zeep)
return root_logger | 29,800 |
def some_function(t):
"""Another silly function."""
return t + " python" | 29,801 |
def build_model(args):
"""
Function: Build a deep learning model
Input:
args: input parameters saved in the type of parser.parse_args
Output:
"""
if args['debug_mode'] is True:
print("BUILDING MODEL......")
model = Sequential()
# Normalize
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3)))
# Add a layer to crop images
model.add(Cropping2D(cropping=((70, 25), (0, 0)))) # remaining size: 65,320,3
# Add three 5x5 convolution layers
model.add(Convolution2D(24, 5, 5, activation='elu', subsample=(2, 2)))
model.add(Convolution2D(36, 5, 5, activation='elu', subsample=(2, 2)))
model.add(Convolution2D(48, 5, 5, activation='elu', subsample=(2, 2)))
# Add two 3x3 convolution layers
model.add(Convolution2D(64, 3, 3, activation='elu'))
model.add(Convolution2D(64, 3, 3, activation='elu'))
# Add a flatten layer
model.add(Flatten())
# Add a dropout to overcome overfitting
model.add(Dropout(args['keep_prob']))
# Add three fully connected layers
model.add(Dense(100, activation='elu'))
model.add(Dense(50, activation='elu'))
model.add(Dense(10, activation='elu'))
# Add a fully connected output layer
model.add(Dense(1))
# Summary
model.summary()
return model | 29,802 |
def add_context_for_join_form(context, request):
""" Helper function used by view functions below """
# If the client has already joined a market
if 'trader_id' in request.session:
# If trader is in database
if Trader.objects.filter(id=request.session['trader_id']).exists():
trader = Trader.objects.get(id=request.session['trader_id'])
# If trader has been removed from market
if trader.removed_from_market:
request.session['removed_from_market'] = True
# If trader has been deleted from database
else:
request.session['removed_from_market'] = True
# We add this market to the context to notify the client
market = get_object_or_404(
Market, market_id=request.session['market_id'])
context['market'] = market
return context | 29,803 |
def create_list(inner_type_info: CLTypeInfo) -> CLTypeInfoForList:
"""Returns CL type information for a list.
:param CLTypeInfo inner_type_info: Type information pertaining to each element within list.
"""
return CLTypeInfoForList(
typeof=CLType.LIST,
inner_type_info=inner_type_info
) | 29,804 |
def get_baseline(baseline_filename, plugin_filenames=None):
"""
:type baseline_filename: string
:param baseline_filename: name of the baseline file
:type plugin_filenames: tuple
:param plugin_filenames: list of plugins to import
:raises: IOError
:raises: ValueError
"""
if not baseline_filename:
return
raise_exception_if_baseline_file_is_unstaged(baseline_filename)
return SecretsCollection.load_baseline_from_string(
_get_baseline_string_from_file(
baseline_filename,
),
plugin_filenames=plugin_filenames,
) | 29,805 |
def del_ind_purged(*args):
"""
del_ind_purged(ea)
"""
return _ida_nalt.del_ind_purged(*args) | 29,806 |
def hash_password(password, salthex=None, reps=1000):
"""Compute secure (hash, salthex, reps) triplet for password.
The password string is required. The returned salthex and reps
must be saved and reused to hash any comparison password in
order for it to match the returned hash.
The salthex string will be chosen randomly if not provided, and
if provided must be an even-length string of hexadecimal
digits, recommended length 16 or
greater. E.g. salt="([0-9a-z][0-9a-z])*"
The reps integer must be 1 or greater and should be a
relatively large number (default 1000) to slow down brute-force
attacks."""
if not salthex:
salthex = ''.join([ "%02x" % random.randint(0, 0xFF)
for d in range(0,8) ])
salt = []
for p in range(0, len(salthex), 2):
salt.append(int(salthex[p:p+2], 16))
salt = bytes(salt)
if reps < 1:
reps = 1
msg = password.encode()
for r in range(0,reps):
msg = hmac.HMAC(salt, msg, digestmod='MD5').hexdigest().encode()
return (msg.decode(), salthex, reps) | 29,807 |
def create_scheduled_job_yaml_spec(
descriptor_contents: Dict, executor_config: ExecutorConfig, job_id: str, event: BenchmarkEvent
) -> str:
"""
Creates the YAML spec file corresponding to a descriptor passed as parameter
:param event: event that triggered this execution
:param descriptor_contents: dict containing the parsed descriptor
:param executor_config: configuration for the transpiler
:param job_id: str
:return: Tuple with (yaml string for the given descriptor, job_id)
"""
descriptor = BenchmarkDescriptor.from_dict(descriptor_contents, executor_config.descriptor_config)
bai_k8s_builder = create_scheduled_benchmark_bai_k8s_builder(
descriptor, executor_config.bai_config, job_id, event=event
)
return bai_k8s_builder.dump_yaml_string() | 29,808 |
def isCameraSet(cameraSet):
"""
Returns true if the object is a camera set. This is simply
a wrapper objectType -isa
"""
pass | 29,809 |
def _parse_args():
"""
Parse arguments for the CLI
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--fovs',
type=str,
required=True,
help="Path to the fov data",
)
parser.add_argument(
'--exp',
type=str,
required=True,
help="Path to experiment file",
)
return parser.parse_args() | 29,810 |
def ArgMin(iterable, key=None, default=None, retvalue=False):
"""
iterable >> ArgMin(key=None, default=None, retvalue=True)
Return index of first minimum element (and minimum) in input
(transformed or extracted by key function).
>>> [1, 2, 0, 2] >> ArgMin()
2
>>> ['12', '1', '123'] >> ArgMin(key=len, retvalue=True)
(1, '1')
>>> ['12', '1', '123'] >> ArgMin(key=len)
1
>>> [] >> ArgMin(default=0)
0
>>> [] >> ArgMin(default=(None, 0), retvalue=True)
(None, 0)
>>> data = [(3, 10), (2, 20), (1, 30)]
>>> data >> ArgMin(key=0)
2
>>> data >> ArgMin(1)
0
:param iterable iterable: Iterable over numbers
:param int|tuple|function|None key: Key function to extract or
transform elements. None = identity function.
:param object default: Value returned if iterable is empty.
:param bool retvalue: If True the index and the value of the
minimum element is returned.
:return: index of smallest element according to key function
and the smallest element itself if retvalue==True.
:rtype: object | tuple
"""
try:
f = colfunc(key)
i, v = min(enumerate(iterable), key=lambda i_e1: f(i_e1[1]))
return (i, v) if retvalue else i
except Exception:
return default | 29,811 |
def localize_peaks_monopolar_triangulation(traces, local_peak, contact_locations, neighbours_mask, nbefore, nafter, max_distance_um):
"""
This method is from Julien Boussard see spikeinterface.toolki.postprocessing.unit_localization
"""
peak_locations = np.zeros(local_peak.size, dtype=dtype_localize_by_method['monopolar_triangulation'])
for i, peak in enumerate(local_peak):
chan_mask = neighbours_mask[peak['channel_ind'], :]
chan_inds, = np.nonzero(chan_mask)
local_contact_locations = contact_locations[chan_inds, :]
# wf is (nsample, nchan) - chann is only nieghboor
wf = traces[peak['sample_ind']-nbefore:peak['sample_ind']+nafter, :][:, chan_inds]
wf_ptp = wf.ptp(axis=0)
x0, bounds = make_initial_guess_and_bounds(wf_ptp, local_contact_locations, max_distance_um)
args = (wf_ptp, local_contact_locations)
output = scipy.optimize.least_squares(estimate_distance_error, x0=x0, bounds=bounds, args = args)
peak_locations[i] = tuple(output['x'])
return peak_locations | 29,812 |
def test_broadcast_receive_short(feeder):
"""Test the receivement of a normal broadcast message
For this test we receive the GFI1 (Fuel Information 1 (Gaseous)) PGN 65202 (FEB2).
Its length is 8 Bytes. The contained values are bogous of cause.
"""
feeder.accept_all_messages()
feeder.can_messages = [
(Feeder.MsgType.CANRX, 0x00FEB201, [1, 2, 3, 4, 5, 6, 7, 8], 0.0),
]
feeder.pdus = [(Feeder.MsgType.PDU, 65202, [1, 2, 3, 4, 5, 6, 7, 8])]
feeder.receive() | 29,813 |
def format_dict_with_indention(data):
"""Return a formatted string of key value pairs
:param data: a dict
:rtype: a string formatted to key='value'
"""
if data is None:
return None
return jsonutils.dumps(data, indent=4) | 29,814 |
def get_logger(name, info_file, error_file, raw=False):
"""
Get a logger forwarding message to designated places
:param name: The name of the logger
:param info_file: File to log information less severe than error
:param error_file: File to log error and fatal
:param raw: If the output should be log in raw format
:return: Generated logger
"""
# Generate or get the logger object
if isinstance(name, str):
logger = logging.getLogger(name)
else:
logger = name
logger.setLevel(logging.DEBUG)
# Config info level logger handler
# If the file argument is None, forward the log to standard output
if info_file:
info_handler = logging.handlers.TimedRotatingFileHandler(info_file, when='midnight', interval=1)
else:
info_handler = logging.StreamHandler()
info_handler.setLevel(logging.DEBUG)
info_handler.setFormatter(logging.Formatter(RAW_FORMAT if raw else LOG_FORMAT))
# Config error level logger handler
if error_file:
error_handler = logging.FileHandler(error_file)
else:
error_handler = logging.StreamHandler()
error_handler.setLevel(logging.ERROR)
error_handler.setFormatter(logging.Formatter(RAW_FORMAT if raw else LOG_FORMAT))
# Add handlers to loggers
logger.addHandler(info_handler)
logger.addHandler(error_handler)
return logger | 29,815 |
def _Rx(c, s):
"""Construct a rotation matrix around X-axis given cos and sin.
The `c` and `s` MUST satisfy c^2 + s^2 = 1 and have the same shape.
See https://en.wikipedia.org/wiki/Rotation_matrix#Basic_rotations.
"""
o = np.zeros_like(c)
i = np.ones_like(o)
return _tailstack2([[i, o, o], [o, c, -s], [o, s, c]]) | 29,816 |
def is_fraud(data):
"""
Identifies if the transaction was fraud
:param data: the data in the transaction
:return: true if the transaction was fraud, false otherwise
"""
return data[1] == 1 | 29,817 |
def EvalBinomialPmf(k, n, p):
"""Evaluates the binomial pmf.
Returns the probabily of k successes in n trials with probability p.
"""
return scipy.stats.binom.pmf(k, n, p) | 29,818 |
def add_prospect(site_id, fname, lname, byear, bmonth, bday, p_type, id_type='all'):
"""
Looks up a prospets prospect_id given their first name (fname) last name (lname), site id (site_id), site (p_type), and birthdate (byear, bmonth, bday).
If no prospect is found, adds the player to the professional_prospects table and returns the newly created prospect_id.
"""
fname_search = fname_lookup(fname)
if id_type == 'all':
qry_add = """((mlb_id = "%s" AND mlb_id != 0)
OR (mlb_draft_id = "%s" AND mlb_draft_id IS NOT NULL)
OR (mlb_international_id = "%s" AND mlb_international_id IS NOT NULL)
OR (fg_minor_id = "%s" AND fg_minor_id IS NOT NULL)
OR (fg_major_id = "%s" AND fg_major_id IS NOT NULL)
OR (fg_temp_id = "%s" AND fg_temp_id IS NOT NULL))""" % (site_id, site_id, site_id, site_id, site_id, site_id)
else:
qry_add = """(%s = "%s" AND (%s != 0 OR %s IS NOT NULL))""" % (id_type, site_id, id_type, id_type)
check_qry = """SELECT prospect_id
FROM professional_prospects
WHERE 1
AND %s
;
"""
check_query = check_qry % (qry_add)
check_val = db.query(check_query)
if check_val != ():
prospect_id = check_val[0][0]
return prospect_id
else:
check_other_qry = """SELECT prospect_id
FROM professional_prospects
WHERE birth_year = %s
AND birth_month = %s
AND birth_day = %s
AND (
( REPLACE(REPLACE(REPLACE(REPLACE(mlb_lname, ".", ""),"'",""),"-","")," ","")
LIKE REPLACE(REPLACE(REPLACE(REPLACE("%%%s%%", ".", ""),"'",""),"-","")," ","") ) OR
( REPLACE(REPLACE(REPLACE(REPLACE("%s",".",""),"'",""),"-","")," ","")
LIKE REPLACE(REPLACE(REPLACE(REPLACE(CONCAT("%%",mlb_lname,"%%"), ".", ""),"'",""),"-","")," ","") ) OR
( REPLACE(REPLACE(REPLACE(REPLACE(fg_lname, ".", ""),"'",""),"-","")," ","")
LIKE REPLACE(REPLACE(REPLACE(REPLACE("%%%s%%", ".", ""),"'",""),"-","")," ","") ) OR
( REPLACE(REPLACE(REPLACE(REPLACE("%s", ".", ""),"'",""),"-","")," ","")
LIKE REPLACE(REPLACE(REPLACE(REPLACE(CONCAT("%%",fg_lname,"%%"), ".", ""),"'",""),"-","")," ","") )
)
AND (
( REPLACE(REPLACE(REPLACE(REPLACE(mlb_fname, ".", ""),"'",""),"-","")," ","")
LIKE REPLACE(REPLACE(REPLACE(REPLACE("%%%s%%", ".", ""),"'",""),"-","")," ","") ) OR
( REPLACE(REPLACE(REPLACE(REPLACE("%s",".",""),"'",""),"-","")," ","")
LIKE REPLACE(REPLACE(REPLACE(REPLACE(CONCAT("%%",mlb_fname,"%%"), ".", ""),"'",""),"-","")," ","") ) OR
( REPLACE(REPLACE(REPLACE(REPLACE(fg_fname, ".", ""),"'",""),"-","")," ","")
LIKE REPLACE(REPLACE(REPLACE(REPLACE("%%%s%%", ".", ""),"'",""),"-","")," ","") ) OR
( REPLACE(REPLACE(REPLACE(REPLACE("%s", ".", ""),"'",""),"-","")," ","")
LIKE REPLACE(REPLACE(REPLACE(REPLACE(CONCAT("%%",fg_fname,"%%"), ".", ""),"'",""),"-","")," ","") )
)
;"""
check_other_query = check_other_qry % (byear, bmonth, bday, lname, lname, lname, lname, fname_search, fname_search, fname_search, fname_search)
check_other_val = db.query(check_other_query)
if check_other_val != ():
prospect_id = check_other_val[0][0]
f_name = "mlb_fname"
l_name = "mlb_lname"
if p_type == "professional":
id_column = "mlb_id"
elif p_type == "draft":
id_column = "mlb_draft_id"
elif p_type in ("int", "international"):
id_column = "mlb_international_id"
elif p_type == "fg":
if "_" in site_id:
id_column = "fg_temp_id"
elif site_id[0] == "s":
id_column = "fg_minor_id"
else:
id_column = "fg_major_id"
f_name = "fg_fname"
l_name = "fg_lname"
print "\n\n\t\t\tadding", fname, lname, id_column, site_id, '\n\n'
for col, val in {f_name:fname, l_name:lname, id_column:site_id}.items():
set_str = 'SET %s = "%s"' % (col,val)
set_str2 = "# AND (%s IS NULL OR %s IS NULL)" % (col, col)
update_qry = """UPDATE professional_prospects
%s
WHERE prospect_id = %s
%s;"""
update_query = update_qry % (set_str, prospect_id, set_str2)
print update_query
db.query(update_query)
db.conn.commit()
return prospect_id
else:
entry = {"birth_year":int(byear), "birth_month":int(bmonth), "birth_day":int(bday)}
if p_type == "fg":
if "_" in site_id:
entry["fg_temp_id"] = site_id
elif site_id[0] == "s":
entry["fg_minor_id"] = site_id
else:
entry["fg_major_id"] = site_id
entry["fg_fname"] = fname
entry["fg_lname"] = lname
else:
entry["mlb_fname"] = fname
entry["mlb_lname"] = lname
if p_type == "professional":
entry["mlb_id"] = site_id
elif p_type == "draft":
entry["mlb_draft_id"] = site_id
elif p_type in ("int", "international"):
entry["mlb_international_id"] = site_id
db.insertRowDict(entry, "professional_prospects", debug=1)
db.conn.commit()
print '\n\n\n\n', check_other_query, '\n\n\n\n\n', check_query, '\n\n\n\n'
recheck_val = db.query(check_query)
prospect_id = recheck_val[0][0]
return prospect_id | 29,819 |
def reformat_icd_code(icd_code: str, is_diag: bool = True) -> str:
"""Put a period in the right place because the MIMIC-III data files exclude them.
Generally, procedure ICD codes have dots after the first two digits, while diagnosis
ICD codes have dots after the first three digits.
Adopted from: https://github.com/jamesmullenbach/caml-mimic
"""
icd_code = "".join(icd_code.split("."))
if is_diag:
if icd_code.startswith("E"):
if len(icd_code) > 4:
icd_code = icd_code[:4] + "." + icd_code[4:]
else:
if len(icd_code) > 3:
icd_code = icd_code[:3] + "." + icd_code[3:]
else:
icd_code = icd_code[:2] + "." + icd_code[2:]
return icd_code | 29,820 |
def parse_parent(self):
"""Parse enclosing arglist of self"""
gtor_left = tokens_leftwards(self.begin)
gtor_right = tokens_rightwards(self.end)
enc = Arglist()
enc.append_subarglist_right(self) # _left could have worked equally well
try:
parse_left(enc, gtor_left)
parse_right(enc, gtor_right)
except StopIteration:
return None
return enc.complete() | 29,821 |
def gradthickellipserot(bmp: array,
x: int, y: int, b: int, a: int,
degrot: float, penradius: int,
lumrange: list[int, int],
RGBfactors: list[float, float, float]):
"""Draws an thick ellipse
with a gradient fill
and a defined pen radius
with centerpoint at (x, y)
and major and minor axis (b, a)
rotated by degrot degrees
Args:
bmp : unsigned byte array
with bmp format
x, y : center of ellipse
b, a : major amd minor axis
degrot : rotation of
the ellipse
in degrees
penradius : defines the
thickness
of the pen
lumrange : [byte:byte] sets
the range of the
luminosity gradient
rgbfactors: [r, g, b] range are
from 0.0 min to
1.0 max
Returns:
byref modified
unsigned byte array
"""
lum1, lumrang = _rng2bsndel(lumrange)
for i in range(penradius, 0, -1):
c = colormix(int(
lum1 + (lumrang * i / penradius)),
RGBfactors)
if bmp[_bmclrbits] != 24:
c = matchRGBtopal(
int2RGBarr(c),
getallRGBpal(bmp))
thickellipserot(bmp,
x, y, b, a, degrot, i, c) | 29,822 |
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Zigbee Home Automation cover from config entry."""
entities_to_create = hass.data[DATA_ZHA][Platform.COVER]
unsub = async_dispatcher_connect(
hass,
SIGNAL_ADD_ENTITIES,
functools.partial(
discovery.async_add_entities, async_add_entities, entities_to_create
),
)
config_entry.async_on_unload(unsub) | 29,823 |
def dump_annotation(ribo_handle):
"""
Returns annotation of a ribo file in bed format
in string form.
Parameters
----------
ribo_handle : h5py.File
hdf5 handle for the ribo file
Returns
-------
A string that can be output directly as a bed file.
"""
boundaries = get_region_boundaries(ribo_handle)
names = get_reference_names(ribo_handle)
bed_rows = list()
for ref_name, ref_boundaries in zip(names, boundaries):
for region_name, region_boundaries in zip(REGION_names, ref_boundaries ):
if region_boundaries[1] <= region_boundaries[0]:
continue
bed_entries = tuple( map( str,
[ref_name,
region_boundaries[0], region_boundaries[1],
region_name, 0, "+"] ) )
bed_rows.append( "\t".join(bed_entries) )
return "\n".join(bed_rows) | 29,824 |
def save_stl10(db):
"""Save STL-10 to HDF5 dataset.
Parameters
----------
db : h5py.File
file object for HDF5 dataset.
"""
folder_name = "stl10_binary"
stl10_ds = db.create_dataset("STL10", (10, 32, 32, 3),
maxshape=(None, 32, 32, 3),
dtype="uint8")
data = load_stl10_batch(folder_name, "train_X.bin")
stl10_ds.resize(data.shape[0], axis=0)
stl10_ds[:, :, :, :] = data
data = load_stl10_batch(folder_name, "test_X.bin")
stl10_ds.resize(stl10_ds.shape[0]+data.shape[0], axis=0)
stl10_ds[-data.shape[0]:, :, :, :] = data
data = load_stl10_batch(folder_name, "unlabeled_X.bin")
stl10_ds.resize(stl10_ds.shape[0]+data.shape[0], axis=0)
stl10_ds[-data.shape[0]:, :, :, :] = data
print ("[MESSAGE] CIFAR-100 dataset is saved.")
print ("[MESSAGE] The dataset size is ", stl10_ds.shape) | 29,825 |
def encode_snippets_with_states(snippets, states):
""" Encodes snippets by using previous query states instead.
Inputs:
snippets (list of Snippet): Input snippets.
states (list of dy.Expression): Previous hidden states to use.
TODO: should this by dy.Expression or vector values?
"""
for snippet in snippets:
snippet.set_embedding(dy.concatenate([states[snippet.startpos],
states[snippet.endpos]]))
return snippets | 29,826 |
def get_openshift_console_url(namespace: str) -> str:
"""Get the openshift console url for a namespace"""
cmd = (
"oc get route -n openshift-console console -o jsonpath='{.spec.host}'",
)
ret = subprocess.run(cmd, shell=True, check=True, capture_output=True)
if ret.returncode != 0:
raise UMBNotificationError(
"Could not detect the location of openshift console url: {ret.stdout.decode()}"
)
return f"https://{ret.stdout.decode()}/k8s/ns/{namespace}/tekton.dev~v1beta1~PipelineRun/" | 29,827 |
def extract_phrases(line, pron):
"""Finds candidate phrases in iambic pentameter from a given line and its
pronunciation."""
# First, extract the stress pattern and scan for iambic pentameter
stress, bound, wordidx = get_stress_and_boundaries(pron)
words, phon_by_word = None, None
for match in iambic_pentameter.finditer(stress):
idx = match.start()
# Distinguish two possible types of rhyme: masculine (ends in stressed
# vowel) and feminine (ends in an additional unstressed vowel);
# depending on where the word boundaries fall, a candidate phrase can be
# suitable for only one of these or both
has_masc, has_fem = False, False
if bound[idx] != "1":
continue # start of the pattern does not coincide with word
# boundary; discard
if len(bound) <= (idx + 10): # pattern is at the end of the line; can
# use for masc rhyme but not fem
has_masc = True
else:
if bound[idx + 10] == "1":
has_masc = True # pattern ends at word boundary, can use for masc rhyme
if stress[idx + 10] == "0" and (
len(bound) <= (idx + 11) or bound[idx + 11] == "1"
):
has_fem = True # there is another unstressed vowel after the
# pattern and it ends at word boundary, can use
# for fem rhyme
if not (has_masc or has_fem):
continue
# The rest of this function just reconstructs the matched section from
# `line` and `pron`. We also return the pronunciation in order to check
# for rhyme later.
if words is None:
words = g2p_preprocess(line)
phon_by_word = "÷".join(pron).split(" ")
if has_masc:
phrase = words[wordidx[idx] : wordidx[idx + 10]]
if phrase[-1] in string.punctuation:
phrase = phrase[:-1]
phrase_phon = (
"".join(phon_by_word[wordidx[idx] : wordidx[idx + 10]])
.replace("÷", " ")
.split()
)
log.debug(" ".join(phrase))
yield (phrase, phrase_phon)
if has_fem:
phrase = words[wordidx[idx] : wordidx[idx + 11]]
if phrase[-1] in string.punctuation:
phrase = phrase[:-1]
phrase_phon = (
"".join(phon_by_word[wordidx[idx] : wordidx[idx + 11]])
.replace("÷", " ")
.split()
)
log.debug(" ".join(phrase))
yield (phrase, phrase_phon) | 29,828 |
def calculate_ani(blast_results, fragment_length):
"""
Takes the input of the blast results, and calculates the ANI versus the reference genome
"""
sum_identity = float(0)
number_hits = 0 # Number of hits that passed the criteria
total_aligned_bases = 0 # Total of DNA bases that passed the criteria
total_unaligned_fragments = 0
total_unaligned_bases = 0
conserved_dna_bases = 0
for query in blast_results:
identity = blast_results[query][2]
queryEnd = blast_results[query][7]
queryStart = blast_results[query][6]
perc_aln_length = (float(queryEnd) - float(queryStart)) / fragment_length[query]
if float(identity) > float(69.9999) and float(perc_aln_length) > float(0.69999):
sum_identity += float(identity)
number_hits += 1
total_aligned_bases += fragment_length[query]
else:
total_unaligned_fragments += 1
total_unaligned_bases += fragment_length[query]
if float(identity) > float(89.999):
conserved_dna_bases += fragment_length[query]
return sum_identity, number_hits, total_aligned_bases, total_unaligned_fragments, total_unaligned_bases | 29,829 |
def _save_state_from_in_memory_checkpointer(
save_path, experiment_class: experiment.AbstractExperiment):
"""Saves experiment state to a checkpoint."""
logging.info('Saving model.')
for (checkpoint_name,
checkpoint) in pipeline_utils.GLOBAL_CHECKPOINT_DICT.items():
if not checkpoint.history:
logging.info('Nothing to save in "%s"', checkpoint_name)
continue
pickle_nest = checkpoint.history[-1].pickle_nest
global_step = pickle_nest['global_step']
state_dict = {'global_step': global_step}
for attribute, key in experiment_class.CHECKPOINT_ATTRS.items():
state_dict[key] = pipeline_utils.get_first(
getattr(pickle_nest['experiment_module'], attribute))
save_dir = os.path.join(
save_path, checkpoint_name, _get_step_date_label(global_step))
python_state_path = os.path.join(save_dir, 'checkpoint.dill')
os.makedirs(save_dir, exist_ok=True)
with open(python_state_path, 'wb') as f:
dill.dump(state_dict, f)
logging.info(
'Saved "%s" checkpoint to %s', checkpoint_name, python_state_path) | 29,830 |
def process_line(line: str, conditional_chain: List[str],
fields: Dict[str, str]):
""" Processes a line in the template, i.e. returns the output html code
after evaluating all if statements and filling the fields. Since we
oftentimes are in the middle of several if statements, we need to pass
the current conditional_chain (i.e. the list of if statments the following
line will be subject to) on (and also need to use it).
Args:
line: Line we are processing
conditional_chain: In which conditionals are we currently enclosed?
fields: field values
Returns:
(html output, conditional_chain)
"""
after = line
out = ""
while after:
before, enclosed, after = next_braces(after)
if evaluate_conditional_chain(conditional_chain, fields):
out += before
if is_pos_conditional(enclosed) or is_neg_conditional(enclosed):
conditional_chain.append(enclosed)
elif is_close_conditional(enclosed):
if not len(conditional_chain) >= 1:
_ = "Closing conditional '{}' found, but we didn't encounter" \
" a conditional before.".format(enclosed)
logger.error(_)
else:
field_name = get_field_name(enclosed)
if field_name not in conditional_chain[-1]:
_ = "Closing conditional '{}' found, but the last opened" \
" conditional was {}. I will " \
"ignore this.".format(enclosed, field_name)
logger.error(_)
else:
conditional_chain.pop()
elif is_field(enclosed):
field_name = get_field_name(enclosed)
if field_name in fields:
out += fields[field_name]
else:
_ = "Could not find value for field '{}'".format(field_name)
logger.error(_)
return out, conditional_chain | 29,831 |
def aasgui(ctx, name):
"""
Display AAS graph from AWR
"""
dblist = get_db_list(name)
for db in dblist:
dbobj = get_db_obj(ctx, db)
dbobj.getAAS((db, ctx.obj["database_list"][db]), GUI=True) | 29,832 |
def get_groundstation_code(gsi):
"""
Translate a GSI code into an EODS domain code.
Domain codes are used in dataset_ids.
It will also translate common gsi aliases if needed.
:type gsi: str
:rtype: str
>>> get_groundstation_code('ASA')
'002'
>>> get_groundstation_code('HOA')
'011'
>>> # Aliases should work too.
>>> get_groundstation_code('ALSP')
'002'
"""
groundstation = metadata.get_groundstation(gsi)
if not groundstation:
return None
return groundstation.eods_domain_code | 29,833 |
def checkmember(value, values, msg=""):
"""Check value for membership; raise ValueError if fails."""
if value not in values:
raise ValueError(f"ERROR {msg}: {value} should be in {values}") | 29,834 |
def concatenate(*data_frames: DataFrame,
**data_points_or_frames: Union[DataFrame, DataPoint]) \
-> DataFrame:
"""
Concatenate DataFrame's objects or DataPoint's into one DataFrame.
Example:
if one DataFrame represents as:
df1 -> {'a': 1, 'b': 2}
another as:
df2 -> {'c': 3, 'd': 4}
you can concatenate simple keys into one DataFrame:
df3 = concatenate(b=df1.get('b'), c=df2.get('c'))
It's not performance friendly operation for long DataFrames (DataFrames
which has a lot of function). Because data will be duplicated and merged
in the end of first pipeline graph.
:param data_frames: list of DataFrames to concatenate together, if
some keys overlapped - it will replaced by latest DataFrame.
:param data_points_or_frames: mapping of key->DataPoint or key->DataFrame
which will store in new DataFrame. If value is a DataFrame it will be wrapped
in new key.
:return: New DataFrame which represents concatenation of provided values.
"""
ensure_concatenate_allowed(data_frames, data_points_or_frames)
base_pipeline = None
if data_frames:
base_pipeline = data_frames[0].data_pipeline
if data_points_or_frames:
base_pipeline = list(data_points_or_frames.values())[0].data_pipeline
keys = sorted(data_points_or_frames.keys())
connector_name = "%s:%s" % ("concatenate", "/".join(keys))
last_p_component = PipelineConnectorComponent(
pipeline=base_pipeline,
name=connector_name)
# TODO: check if all data_pipelines_transformations actually transformations
data_pipeline_items = [dp.data_pipeline for dp in data_points_or_frames.values()]
# generate's result transformation object
for key, data_point_or_frame in data_points_or_frames.items():
p = data_point_or_frame.data_pipeline
leaves = pipeline_graph.get_leaves(p.graph)
assert len(leaves) == 1
if isinstance(data_point_or_frame, DataPoint):
data_point = data_point_or_frame
keys_to_transform = {data_point.get_key(): key}
transformation_func = transformations_types.KeyToKey(keys_to_transform)
leaves[0].p_component.add_context(last_p_component,
transformation_func)
else:
data_frame = data_point_or_frame
keys_to_transform = data_frame.transformation
transformation_func = transformations_types.KeysToDict(key,
keys_to_transform)
leaves[0].p_component.add_context(last_p_component,
transformation_func)
for data_frame in data_frames:
p = data_frame.data_pipeline
leaves = pipeline_graph.get_leaves(p.graph)
assert len(leaves) == 1
keys_to_transform = data_frame.transformation
transformation_func = transformations_types.AllKeys(keys_to_transform)
leaves[0].p_component.add_context(last_p_component,
transformation_func)
base_data_pipeline = copy.copy(data_pipeline_items[0])
base_graph = base_data_pipeline.graph
base_worker_info = data_pipeline_items[0].worker_info
for data_pipeline in data_pipeline_items[1:]:
if data_pipeline.worker_info is not base_worker_info:
raise RuntimeError("Not possible to use different worker configs, "
"for pipeline")
graph, base_g_item, handler_item = \
concatenate_sequentially(base_graph, data_pipeline.graph)
base_graph = graph
base_graph.add_pipeline_component(last_p_component)
return DataFrame(base_data_pipeline) | 29,835 |
def mixed_social_welfare(game, mix):
"""Returns the social welfare of a mixed strategy profile"""
return game.expected_payoffs(mix).dot(game.num_role_players) | 29,836 |
def bdh(
tickers, flds=None, start_date=None, end_date='today', adjust=None, **kwargs
) -> pd.DataFrame:
"""
Bloomberg historical data
Args:
tickers: ticker(s)
flds: field(s)
start_date: start date
end_date: end date - default today
adjust: `all`, `dvd`, `normal`, `abn` (=abnormal), `split`, `-` or None
exact match of above words will adjust for corresponding events
Case 0: `-` no adjustment for dividend or split
Case 1: `dvd` or `normal|abn` will adjust for all dividends except splits
Case 2: `adjust` will adjust for splits and ignore all dividends
Case 3: `all` == `dvd|split` == adjust for all
Case 4: None == Bloomberg default OR use kwargs
**kwargs: overrides
Returns:
pd.DataFrame
Examples:
>>> res = bdh(
... tickers='VIX Index', flds=['High', 'Low', 'Last_Price'],
... start_date='2018-02-05', end_date='2018-02-07',
... ).round(2).transpose()
>>> res.index.name = None
>>> res.columns.name = None
>>> res
2018-02-05 2018-02-06 2018-02-07
VIX Index High 38.80 50.30 31.64
Low 16.80 22.42 21.17
Last_Price 37.32 29.98 27.73
>>> bdh(
... tickers='AAPL US Equity', flds='Px_Last',
... start_date='20140605', end_date='20140610', adjust='-'
... ).round(2)
ticker AAPL US Equity
field Px_Last
2014-06-05 647.35
2014-06-06 645.57
2014-06-09 93.70
2014-06-10 94.25
>>> bdh(
... tickers='AAPL US Equity', flds='Px_Last',
... start_date='20140606', end_date='20140609',
... CshAdjNormal=False, CshAdjAbnormal=False, CapChg=False,
... ).round(2)
ticker AAPL US Equity
field Px_Last
2014-06-06 645.57
2014-06-09 93.70
"""
logger = logs.get_logger(bdh, level=kwargs.pop('log', logs.LOG_LEVEL))
# Dividend adjustments
if isinstance(adjust, str) and adjust:
if adjust == 'all':
kwargs['CshAdjNormal'] = True
kwargs['CshAdjAbnormal'] = True
kwargs['CapChg'] = True
else:
kwargs['CshAdjNormal'] = 'normal' in adjust or 'dvd' in adjust
kwargs['CshAdjAbnormal'] = 'abn' in adjust or 'dvd' in adjust
kwargs['CapChg'] = 'split' in adjust
con, _ = create_connection()
elms = assist.proc_elms(**kwargs)
ovrds = assist.proc_ovrds(**kwargs)
if isinstance(tickers, str): tickers = [tickers]
if flds is None: flds = ['Last_Price']
if isinstance(flds, str): flds = [flds]
e_dt = utils.fmt_dt(end_date, fmt='%Y%m%d')
if start_date is None:
start_date = pd.Timestamp(e_dt) - relativedelta(months=3)
s_dt = utils.fmt_dt(start_date, fmt='%Y%m%d')
logger.info(
f'loading historical data from Bloomberg:\n'
f'{assist.info_qry(tickers=tickers, flds=flds)}'
)
logger.debug(
f'\nflds={flds}\nelms={elms}\novrds={ovrds}\nstart_date={s_dt}\nend_date={e_dt}'
)
res = con.bdh(
tickers=tickers, flds=flds, elms=elms, ovrds=ovrds, start_date=s_dt, end_date=e_dt
)
res.index.name = None
if (len(flds) == 1) and kwargs.get('keep_one', False):
return res.xs(flds[0], axis=1, level=1)
return res | 29,837 |
def isValid(text):
"""
"Play Blackjack"
"""
return bool(re.search(r'\bblackjack\b', text, re.IGNORECASE)) | 29,838 |
def init_app(app):
"""init the flask application
:param app:
:return:
"""
return app | 29,839 |
def bounds(geometry, **kwargs):
"""Computes the bounds (extent) of a geometry.
For each geometry these 4 numbers are returned: min x, min y, max x, max y.
Parameters
----------
geometry : Geometry or array_like
**kwargs
For other keyword-only arguments, see the
`NumPy ufunc docs <https://numpy.org/doc/stable/reference/ufuncs.html#ufuncs-kwargs>`_.
Examples
--------
>>> bounds(Geometry("POINT (2 3)")).tolist()
[2.0, 3.0, 2.0, 3.0]
>>> bounds(Geometry("LINESTRING (0 0, 0 2, 3 2)")).tolist()
[0.0, 0.0, 3.0, 2.0]
>>> bounds(Geometry("POLYGON EMPTY")).tolist()
[nan, nan, nan, nan]
>>> bounds(None).tolist()
[nan, nan, nan, nan]
"""
# We need to provide the `out` argument here for compatibility with
# numpy < 1.16. See https://github.com/numpy/numpy/issues/14949
geometry_arr = np.asarray(geometry, dtype=np.object_)
out = np.empty(geometry_arr.shape + (4,), dtype="float64")
return lib.bounds(geometry_arr, out=out, **kwargs) | 29,840 |
def test_build_mechfile_entry_ftp_location_with_other_values():
"""Test if mechfile_entry is filled out."""
expected = {
'box': 'bbb',
'box_version': 'ccc',
'name': 'aaa',
'provider': 'vmware',
'windows': False,
'shared_folders': [{'host_path': '.', 'share_name': 'mech'}],
'url': 'ftp://foo'
}
assert mech.utils.build_mechfile_entry(location='ftp://foo', name='aaa',
box='bbb', box_version='ccc',
provider='vmware') == expected | 29,841 |
def getRidgeEdge(distComponent, maxCoord, direction):
"""
最大値〜最大値-1の範囲で、指定された方向から見て最も遠い点と近い点を見つける。
緑領域からの距離が最大値近辺で、カメラから見て最も遠い点と近い点を見つけるための関数。
これにより、石の天面の中心と底面の中心を求める
"""
# 最大値
maxValue = distComponent[maxCoord]
# 最大値-1以上の点の座標群
ridge = np.array(np.where(distComponent >= maxValue - 1)).T
# 隣の石を検出しないよう、maxCoordからの距離がmaxValue以内という制約を設ける
ridge = ridge[np.apply_along_axis(
lambda pt: np.linalg.norm( np.array(pt) - maxCoord ) <= maxValue ,
axis=1,
arr=ridge)]
# 内積の値
dotValue = np.apply_along_axis(
lambda pt: np.dot(np.array(pt) - maxCoord, direction),
axis=1,
arr=ridge
)
# 内積が最大になる点の座標と最小になる点の座標を返す
maxEdgePoint = np.array(ridge[np.argmax(dotValue)])
minEdgePoint = np.array(ridge[np.argmin(dotValue)])
return maxEdgePoint, minEdgePoint | 29,842 |
def map_vL(X, w):
"""
Maps a random sample drawn from vector Langevin with orientation u = [0,...,0,1] to
a sample that follows vector Langevin with orientation w.
"""
assert w.shape[0] == X.shape[0]
#assert np.linalg.norm(w) == 1.
#print('Orientation vector length : ' + str(np.linalg.norm(w)))
d = w.shape[0]
w = w.reshape(w.shape[0],1)
H = np.eye(d) - 2 * np.dot(w, w.T)
[l, v] = np.linalg.eigh(H)
V = v[:,::-1]
if np.sum( w.flatten()*V[:,-1] ) < 0:
V[:,-1] = -V[:,-1].copy()
return np.dot(V, X) | 29,843 |
def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int = -1):
"""
Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate
increases linearly between 0 and the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1.0, num_warmup_steps))
return 1.0
return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) | 29,844 |
def slid_window_avg(a, wi):
""" a simple window-averaging function, centerd on the current point """
# TODO: replace with pandas rolling average. - rth
acopy = np.array(a).copy()
a_smoothed = np.zeros(acopy.shape)
wi_half = wi // 2
wi_other_half = wi - wi_half
for i in range(acopy.shape[0]):
aslice = acopy[
max(0, i - wi_half) : min(i + wi_other_half, acopy.shape[0])
]
a_smoothed[i] = np.mean(aslice, axis=0)
# a_smoothed[i] += np.sum(aslice,axis=0)/ (aslice).shape[0]
# print(aslice,a_smoothed[i] , acopy[i])
return a_smoothed | 29,845 |
def sepg(env, policy,
horizon,
batchsize = 100,
iterations = 1000,
gamma = 0.99,
rmax = 1.,
phimax = 1.,
safety_requirement = 'mi',
delta = 1.,
confidence_schedule = None,
clip_at = 100,
test_batchsize = False,
render = False,
seed = None,
baseline = 'peters',
shallow = True,
action_filter = None,
parallel = False,
logger = Logger(name='SEPG'),
save_params = 1000,
log_params = True,
verbose = True):
"""
Only for SIMPLE Gaussian policy w/ scalar variance
Policy must have learn_std = False, as std is META-learned
"""
#Defaults
assert policy.learn_std
if action_filter is None:
action_filter = clip(env)
#Seed agent
if seed is not None:
seed_all_agent(seed)
#Prepare logger
algo_info = {'Algorithm': 'SEPG',
'Environment': str(env),
'BatchSize': batchsize,
'Max horizon': horizon,
'Iterations': iterations,
'gamma': gamma,
'actionFilter': action_filter,
'rmax': rmax,
'phimax': phimax}
logger.write_info({**algo_info, **policy.info()})
log_keys = ['Perf', 'UPerf', 'AvgHorizon',
'Alpha', 'BatchSize', 'Exploration', 'Eta',
'ThetaGradNorm', 'OmegaGrad', 'OmegaMetagrad',
'Penalty', 'MetaPenalty',
'IterationKind',
'ThetaGradNorm', 'Eps', 'Up', 'Down', 'C', 'Cmax', 'Delta'] #0: theta, 1: omega
if log_params:
log_keys += ['param%d' % i for i in range(policy.num_params())]
if test_batchsize:
log_keys.append('DetPerf')
log_row = dict.fromkeys(log_keys)
logger.open(log_row.keys())
#Safety requirements
if safety_requirement == 'mi':
thresholder = MonotonicImprovement()
elif safety_requirement == 'budget':
batch = generate_batch(env, policy, horizon, batchsize, action_filter)
thresholder = Budget(performance(batch, gamma))
else:
thresholder = FixedThreshold(float(safety_requirement))
#Learning loop
omega_grad = float('nan')
omega_metagrad = float('nan')
metapenalty = float('nan')
eta = float('nan')
it = 0
while(it < iterations):
#Begin iteration
if verbose:
print('\nIteration ', it)
if verbose:
print('Params: ', policy.get_flat())
#Test mean parameters on deterministic policy
if test_batchsize:
test_batch = generate_batch(env, policy, horizon, test_batchsize,
action_filter=action_filter,
seed=seed,
njobs=parallel,
deterministic=True)
log_row['DetPerf'] = performance(test_batch, gamma)
#Render behavior
if render:
generate_batch(env, policy, horizon, 1, action_filter, render=True)
#
if it % 2 == 0:
#Std update
omega = policy.get_scale_params()
sigma = torch.exp(omega).item()
batch = generate_batch(env, policy, horizon, batchsize,
action_filter=action_filter,
njobs=parallel,
seed=seed)
if confidence_schedule is not None:
delta = confidence_schedule.next(it)
log_row['Delta'] = delta
if delta <1:
grad, grad_var = simple_gpomdp_estimator(batch, gamma, policy, baseline, result='moments')
omega_grad = grad[0]
omega_grad_var = grad_var[0]
omega_metagrad, omega_metagrad_var = metagrad(batch, gamma, policy, alpha, clip_at, baseline, result='moments')
quant = 2 * sts.t.interval(1 - delta, batchsize-1,loc=0.,scale=1.)[1]
eps = torch.tensor(quant * torch.sqrt(omega_grad_var / batchsize), dtype=torch.float)
log_row['Eps'] = torch.norm(eps).item()
metaeps = torch.tensor(quant * torch.sqrt(omega_metagrad_var / batchsize), dtype=torch.float)
if torch.sign(omega_grad).item() >= 0 and torch.sign(omega_metagrad).item() >= 0:
up = torch.clamp(torch.abs(omega_grad - eps), min=0.) * torch.clamp(torch.abs(omega_metagrad - metaeps), min=0.)
elif torch.sign(omega_grad).item() >= 0 and torch.sign(omega_metagrad).item() < 0:
up = (omega_grad + eps) * (omega_metagrad - metaeps)
elif torch.sign(omega_grad).item() < 0 and torch.sign(omega_metagrad).item() >=0:
up = (omega_grad - eps) * (omega_metagrad + eps)
else:
up = torch.abs(omega_grad + eps) * torch.abs(omega_metagrad + metaeps)
down = omega_metagrad + metaeps * torch.sign(omega_metagrad)
log_row['Up'] = up.item()
log_row['Down'] = down.item()
metapenalty = rmax / (1 - gamma)**2 * (0.53 * avol / (2 * sigma) + gamma / (1 - gamma))
eta_star = (up / (2 * metapenalty * down**2 + 1e-12)).item()
Cmax = up**2 / (4 * metapenalty * down**2).item()
else:
log_row['Eps'] = 0
grad = gpomdp_estimator(batch, gamma, policy,
baselinekind=baseline,
shallow=shallow)
theta_grad = grad[1:]
omega_grad = grad[0]
#->
mixed, _ = mixed_estimator(batch, gamma, policy, baseline, theta_grad)
norm_grad = 2 * theta_grad.dot(mixed)
A = omega_grad
B = 2 * alpha * torch.norm(theta_grad)**2
C = sigma * alpha * norm_grad
C = torch.clamp(C, min=-clip_at, max=clip_at)
omega_metagrad = A + B + C
metapenalty = rmax / (1 - gamma)**2 * (0.53 * avol / (2 * sigma) + gamma / (1 - gamma))
eta_star = (omega_grad / (2 * metapenalty * omega_metagrad) + 1e-12).item()
Cmax = (omega_grad ** 2 / (4 * metapenalty)).item()
log_row['Up'] = torch.tensor(omega_grad).item()
log_row['Down'] = torch.tensor(omega_metagrad).item()
perf = performance(batch, gamma)
Co = thresholder.next(perf)
Co = min(Co, Cmax)
log_row['C'] = Co
log_row['Cmax'] = Cmax
eta = eta_star + abs(eta_star) * math.sqrt(1 - Co / (Cmax + 1e-12) + 1e-12)
new_omega = omega + eta * omega_metagrad
policy.set_scale_params(new_omega)
###
else:
#Mean update
omega = policy.get_scale_params()
sigma = torch.exp(omega).item()
batch = generate_batch(env, policy, horizon, batchsize,
action_filter=action_filter,
n_jobs=parallel,
seed=seed)
if confidence_schedule is not None:
delta = confidence_schedule.next(it)
log_row['Delta'] = delta
if delta < 1:
grad, grad_var = simple_gpomdp_estimator(batch, gamma, policy, baseline, result='moments')
theta_grad = grad[1:]
theta_grad_var = grad_var[1:]
quant = 2*sts.t.interval(1 - delta, batchsize-1,loc=0.,scale=1.)[1]
eps = quant * torch.sqrt(theta_grad_var / batchsize)
log_row['Eps'] = torch.norm(eps).item()
norm2 = torch.norm(torch.clamp(torch.abs(theta_grad) - eps, min=0.))
norm1 = torch.sum(torch.abs(theta_grad) + eps)
log_row['Up'] = norm1.item()
log_row['Down'] = norm2.item()
else:
log_row['Eps'] = 0
grad = simple_gpomdp_estimator(batch, gamma, policy, baseline)
theta_grad = grad[1:]
norm2 = torch.norm(theta_grad)
norm1 = torch.sum(torch.abs(theta_grad))
log_row['Up'] = norm1.item()
log_row['Down'] = norm2.item()
penalty = rmax * phimax**2 / (1-gamma)**2 * (avol / (sigma * math.sqrt(2*math.pi)) + gamma / (2*(1-gamma)))
alpha_star = sigma ** 2 * norm2 ** 2 / (2 * penalty * norm1 ** 2 + 1e-12)
Cmax = (alpha_star * norm2**2 / 2).item()
perf = performance(batch, gamma)
Co = thresholder.next(perf)
Co = min(Co, Cmax)
log_row['C'] = Co
log_row['Cmax'] = Cmax
alpha = alpha_star * (1 + math.sqrt(1 - Co / (Cmax + 1e-12) + 1e-12))
theta = policy.get_loc_params()
new_theta = theta + alpha * theta_grad
policy.set_loc_params(new_theta)
###
# Log
log_row['IterationKind'] = it % 2
log_row['ThetaGradNorm'] = torch.norm(theta_grad).item()
log_row['Alpha'] = alpha
log_row['Eta'] = eta
log_row['Penalty'] = penalty
log_row['MetaPenalty'] = metapenalty
log_row['OmegaGrad'] = torch.tensor(omega_grad).item()
log_row['OmegaMetagrad'] = torch.tensor(omega_metagrad).item()
log_row['ThetaGradNorm'] = torch.norm(theta_grad).item()
log_row['BatchSize'] = batchsize
log_row['Exploration'] = policy.exploration()
log_row['Alpha'] = alpha.item()
log_row['Perf'] = perf
log_row['UPerf'] = performance(batch, 1.)
log_row['AvgHorizon'] = avg_horizon(batch)
params = policy.get_flat()
if log_params:
for i in range(policy.num_params()):
log_row['param%d' % i] = params[i].item()
logger.write_row(log_row, it)
if save_params and it % save_params == 0:
logger.save_params(params, it)
# Next iteration
it += 1
# Final policy
if save_params:
logger.save_params(params, it) | 29,846 |
def find_bigrams(textdict, threshold=0.1):
"""
find bigrams in the texts
Input:
- textdict: a dict with {docid: preprocessed_text}
- threshold: for bigrams scores
Returns:
- bigrams: a list of "word1 word2" bigrams
"""
docids = set(textdict.keys())
# to identify bigrams, transform the texts into lists of words (assume texts are preprocessed)
text_words = [textdict[did].split() for did in docids]
bigram_scores = get_bigram_scores(text_words)
return [bigram for bigram in bigram_scores if bigram_scores[bigram] > threshold] | 29,847 |
def test_thresholdOn():
"""
Testing export movie with different values of mv_thresholdOn
"""
threshold_on_vals = {"foto1": [("a1000", "a400"), ("r50", "r30")],
"raw1": [("a1000", "a400"), ("r60", "r10")],
"sig1": [("a0.75", "a0.65"), ("r60", "r10")]}
for within_area in (True, False):
for threshold_on, threshold_vals in threshold_on_vals.items():
for (threshold_pos, threshold_neg) in threshold_vals:
export_fake_data_movie({
'mv_withinArea': within_area,
'mv_thresholdOn': threshold_on,
'mv_lowerThreshPositiveResps': threshold_pos,
'mv_upperThreshNegativeResps': threshold_neg,
'mv_individualScale': 3,
"mv_indiScale3factor": 0.25},
f"mv_thresholdOn_{threshold_on}"
f"_vals_{threshold_pos}{threshold_neg}_withinArea_{within_area}") | 29,848 |
def test_not_existing_inv_file(monkeypatch):
"""Test if the inventory path is valid
"""
monkeypatch.setenv('SQ_CONTROLLER_POLLER_CRED', 'dummy_key')
monkeypatch.setenv('SQ_INVENTORY_PATH', 'not_existing')
with pytest.raises(InventorySourceError, match=r'No inventory found at *'):
StaticManagerInventory(MagicMock()) | 29,849 |
def _rec_filter_to_info(line):
"""Move a DKFZBias filter to the INFO field, for a record.
"""
parts = line.rstrip().split("\t")
move_filters = {"bSeq": "strand", "bPcr": "damage"}
new_filters = []
bias_info = []
for f in parts[6].split(";"):
if f in move_filters:
bias_info.append(move_filters[f])
elif f not in ["."]:
new_filters.append(f)
if bias_info:
parts[7] += ";DKFZBias=%s" % ",".join(bias_info)
parts[6] = ";".join(new_filters or ["PASS"])
return "\t".join(parts) + "\n" | 29,850 |
def token_mapper(token):
"""
Token Mapper
"""
try:
if type(token) is m.FieldDeclaration:
try:
parse_field_declaration(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.MethodInvocation:
try:
parse_method_invocation(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.VariableDeclaration:
try:
parse_variable_declaration(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.VariableDeclarator:
try:
parse_variable_declarator(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.MethodDeclaration:
try:
parse_method_declaration(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.ClassInitializer:
try:
parse_class_initializer(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.ConstructorDeclaration:
try:
parse_constructor_declaration(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.EnumDeclaration:
try:
parse_enum_declaration(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.InterfaceDeclaration:
try:
parse_interface_declaration(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.ExpressionStatement:
try:
parse_expression_statement(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.IfThenElse:
try:
parse_if_then_else(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.Return:
try:
parse_return(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.Try:
try:
parse_try(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.Catch:
try:
parse_catch(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.ForEach:
try:
parse_for_each(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.For:
try:
parse_for(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.Switch:
try:
parse_switch(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.SwitchCase:
try:
parse_switch_case(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.Block:
try:
parse_block(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.Synchronized:
try:
parse_synchronized(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.Variable:
try:
parse_variable(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.EmptyDeclaration:
common.logger.debug("OOPS - EmptyDeclaration: " + str(type(token)))
elif type(token) is type(None):
try:
parse_none()
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.ClassDeclaration:
try:
parse_class_declaration(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.Assignment:
try:
parse_assignment(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.Literal:
try:
parse_literal(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.ClassLiteral:
try:
parse_class_literal(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.InstanceCreation:
try:
parse_instance_creation(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.Cast:
try:
parse_cast(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.Unary:
try:
parse_unary(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.Additive:
try:
parse_additive(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.Type:
try:
parse_type(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.Equality:
try:
parse_equality(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.ConditionalAnd:
try:
parse_conditional_and(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.Conditional:
try:
parse_conditional(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.ArrayCreation:
try:
parse_array_creation(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.While:
try:
parse_while(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is bool:
try:
parse_bool(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.Break:
try:
parse_break(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.ArrayInitializer:
try:
parse_array_initializer(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.Annotation:
try:
parse_annotation(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.FormalParameter:
try:
parse_formal_parameter(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.ConditionalOr:
try:
parse_conditional_or(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.Wildcard:
try:
parse_wildcard(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is str:
try:
parse_string(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is int:
try:
parse_int(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.Relational:
try:
parse_relational(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.Name:
try:
parse_name(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
elif type(token) is m.FieldAccess:
try:
parse_field_access(token)
except Exception as e:
common.logger.error("Problem in findMethods.py parsing token type: " + str(type(token)) + ". " +str(e))
else:
common.logger.debug("NOT COVERED IN TOKEN MAPPER: ")
common.logger.debug(type(token))
common.logger.debug(str(token))
common.logger.debug(str(tracker))
except Exception as e:
common.logger.error("Problem with token_mapper in findMethods.py: " + str(e))
return | 29,851 |
def find_files(config, file_to_find, exact_filename=False):
"""finds all the files in config.diag_dir that matches the prefix or will use
the config.files string (split on ,) if present and not use a prefix but a full
file name match.
Example:
files = [my.log], diag_dir = "" => only matches my.log NOT my.log.1
file_to_find = "my.log", files = [], diag_dir = "mydir" => matches my.log, my.log.1, my.log.2, etc
"""
files = []
use_as_prefix = not exact_filename
if config.files:
files = config.files.split(",")
use_as_prefix = False
elif config.diag_dir:
try:
files = find_logs(config.diag_dir, file_to_find, use_as_prefix)
except Exception as ex:
if env.DEBUG:
print(ex)
raise UnableToReadDiagException(config.diag_dir, ex)
else:
print("no --diagdir or --files flag used")
return None
return files | 29,852 |
def ast_node_to_source(ast_node: ast.AST) -> str:
"""
Uses astor package to produce source code from ast
Also handles low-level ast functions, such as wrapping in a module if necessary,
and fixing line numbers for modified/extracted ast
Args:
ast_node:
Returns:
"""
# Must be a module to output to source. Wrap in module if not already
if not isinstance(ast_node, ast.Module):
ast_node = ast.Module([ast_node])
# Fix line numbers
ast.fix_missing_locations(ast_node)
return astor.to_source(ast_node) | 29,853 |
def read(*parts):
"""
returns contents of file
"""
with codecs.open(os.path.join(PROJECT, *parts), "rb", "utf-8") as file:
return file.read() | 29,854 |
def _get_base(*, name: str, schemas: oa_types.Schemas) -> typing.Type:
"""
Retrieve the base class of a schema considering inheritance.
If x-inherits is True, retrieve the parent. If it is a string, verify that the
parent is valid. In either case, the model for that schema is used as the base
instead of the usual base.
If x-inherits is not present or False, return the usual base.
Raise InheritanceConstructionOrderError if the parent of the schema has not been
constructed when attempting to construct the child.
Args:
name: The name of the schema to determine the base for.
schemas: All the schemas.
Returns:
The base of the model. Either the usual base or the model parent in the case of
inheritance.
"""
schema = schemas.get(name)
if schema is None:
raise exceptions.SchemaNotFoundError(f"Could not fund schema {name}.")
if _schema_helper.inherits(schema=schema, schemas=schemas):
parent = _inheritance.retrieve_parent(schema=schema, schemas=schemas)
try:
return getattr(models, parent)
except AttributeError as exc:
raise exceptions.InheritanceError(
"Any parents of a schema must be constructed before the schema can be "
"constructed."
) from exc
return getattr(models, "Base") | 29,855 |
def stopUuidAdvertise(INTERFACE = 'hci0'):
""" This method gets called to stop the advertisement. """
print("Stopping advertising")
subprocess.call(
"sudo hcitool -i "+ INTERFACE + " cmd 0x08 0x000a 00", shell=True, stdout=DEVNULL
) | 29,856 |
def classNumber(A):
""" Returns the number of transition classes in the matrix A """
cos = 0
if type(A[0][0]) == list:
cos = len(A)
else:
cos = 1
return cos | 29,857 |
def lz4_decompress_c(src, dlen, dst=None):
"""
Decompresses src, a bytearray of compressed data.
The dst argument can be an optional bytearray which will have the output appended.
If it's None, a new bytearray is created.
The output bytearray is returned.
"""
if dst is None:
dst = bytearray()
print(str(src))
b = bytes(src)
d=lz4zfs.decompress(b,dlen)
l=len(d)
if (dlen != l):
print("[-] decompress size differ from %d, got %d" %(dlen,l))
raise RuntimeError("decompress size differ from %d, got %d" %(dlen,l))
else:
if (dlen < l):
dst[0:dlen] = d;
else:
dst[0:l] = d;
print(str(dst))
return dst | 29,858 |
def format_coordinates(obj, no_seconds=True, wgs_link=True):
"""Format WGS84 coordinates as HTML.
.. seealso:: https://en.wikipedia.org/wiki/ISO_6709#Order.2C_sign.2C_and_units
"""
def degminsec(dec, hemispheres):
_dec = abs(dec)
degrees = int(floor(_dec))
_dec = (_dec - int(floor(_dec))) * 60
minutes = int(floor(_dec))
_dec = (_dec - int(floor(_dec))) * 60
seconds = _dec
if no_seconds:
if seconds > 30:
if minutes < 59:
minutes += 1
else:
minutes = 0
degrees += 1
fmt = "{0}\xb0"
if minutes:
fmt += "{1:0>2d}'"
if not no_seconds and seconds:
fmt += '{2:0>2f}"'
fmt += hemispheres[0] if dec > 0 else hemispheres[1]
return str(fmt).format(degrees, minutes, seconds)
if not isinstance(obj.latitude, float) or not isinstance(obj.longitude, float):
return ''
return HTML.div(
HTML.table(
HTML.tr(
HTML.td(
'Coordinates ',
external_link(
'https://en.wikipedia.org/wiki/World_Geodetic_System_1984',
label="WGS84") if wgs_link else ''),
HTML.td(
HTML.span('%s, %s' % (
degminsec(obj.latitude, 'NS'), degminsec(obj.longitude, 'EW'))),
HTML.br(),
HTML.span(
'{0.latitude:.2f}, {0.longitude:.2f}'.format(obj),
class_='geo'))),
class_="table table-condensed")) | 29,859 |
def batch_info(test_x, args):
"""display some relevant infos about the dataset format and statistics"""
if args.model_type == "diagnosis":
print(test_x[args.perspectives[0]].shape)
print(torch.min(test_x[args.perspectives[0]]))
print(torch.max(test_x[args.perspectives[0]]))
else:
print(f"Input Dimension: {test_x.shape}")
print(f"Minimum Value in Batch: {torch.min(test_x)}")
print(f"Maximum Value in Batch: {torch.max(test_x)}") | 29,860 |
def _defaultGromacsIncludeDir():
"""Find the location where gromacs #include files are referenced from, by
searching for (1) gromacs environment variables, (2) for the gromacs binary
'pdb2gmx' or 'gmx' in the PATH, or (3) just using the default gromacs
install location, /usr/local/gromacs/share/gromacs/top """
if 'GMXDATA' in os.environ:
return os.path.join(os.environ['GMXDATA'], 'top')
if 'GMXBIN' in os.environ:
return os.path.abspath(os.path.join(os.environ['GMXBIN'], '..', 'share', 'gromacs', 'top'))
pdb2gmx_path = distutils.spawn.find_executable('pdb2gmx')
if pdb2gmx_path is not None:
return os.path.abspath(os.path.join(os.path.dirname(pdb2gmx_path), '..', 'share', 'gromacs', 'top'))
else:
gmx_path = distutils.spawn.find_executable('gmx')
if gmx_path is not None:
return os.path.abspath(os.path.join(os.path.dirname(gmx_path), '..', 'share', 'gromacs', 'top'))
return '/usr/local/gromacs/share/gromacs/top' | 29,861 |
def test_get_starfile_metadata_names():
"""Check if the names returned have the right instance."""
class Config:
"""Class to instantiate the config object."""
shift = True
ctf = True
names = get_starfile_metadata_names(Config)
for name in names:
assert isinstance(name, str) and name[:5] == "__rln" | 29,862 |
def delete_keys_on_selected():
"""
deletes set driven keys from selected controllers.
:return: <bool> True for success.
"""
s_ctrls = object_utils.get_selected_node(single=False)
if not s_ctrls:
raise IndexError("[DeleteKeysOnSelectedError] :: No controllers are selected.")
selected_ctrls = s_ctrls[:-1]
interface_ctrl = s_ctrls[-1]
for c_ctrl in selected_ctrls:
if not check_if_object_is_control(c_ctrl):
continue
print('[DeleteKeysOnSelected] :: Deleting keys on {}.'.format(c_ctrl))
delete_keys_on_controller(c_ctrl, interface_ctrl)
return True | 29,863 |
def scan_continuation(curr, prompt_tag, look_for=None, escape=False):
"""
Segment a continuation based on a given continuation-prompt-tag.
The head of the continuation, up to and including the desired continuation
prompt is reversed (in place), and the tail is returned un-altered.
The hint value |look_for| is used to determine when the continuation being
installed is a prefix of the extant continuation.
In this case, installing the continuation is much simpler, as the expensive
merge operation needed to find common substructure is the two continuation is
not needed.
"""
handlers = False
xs = []
while isinstance(curr, Cont):
if curr is look_for:
return None, handlers
handlers |= isinstance(curr, DynamicWindValueCont)
xs.append(curr)
if isinstance(curr, Prompt) and curr.tag is prompt_tag:
break
curr = curr.prev
if not escape and not jit.isvirtual(curr):
return _scan_continuation(curr, prompt_tag, look_for, xs, handlers)
return xs, handlers | 29,864 |
def reloadATSConfigs(conf:Configuration) -> bool:
"""
This function will reload configuration files for the Apache Trafficserver caching HTTP
proxy. It does this by calling ``traffic_ctl config reload`
:param conf: An object representing the configuration of :program:`traffic_ops_ort`
:returns: whether or not the reload succeeded (as indicated by the exit code of
``traffic_ctl``)
:raises OSError: when something goes wrong executing the child process
"""
# First of all, ATS must be running for this to work
if not setATSStatus(True, conf):
logging.error("Cannot reload configs, ATS not running!")
return False
cmd = [os.path.join(conf.tsroot, "bin", "traffic_ctl"), "config", "reload"]
cmdStr = ' '.join(cmd)
if ( conf.mode is Configuration.Modes.INTERACTIVE and
not getYN("Run command '%s' to reload configuration?" % cmdStr, default='Y')):
logging.warning("Configuration will not be reloaded for Apache Trafficserver!")
logging.warning("Changes will NOT be applied!")
return True
logging.info("Apache Trafficserver configuration reload will be done via: %s", cmdStr)
if conf.mode is Configuration.Modes.REPORT:
return True
sub = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = sub.communicate()
if sub.returncode:
logging.debug("========== PROCESS STDOUT ==========")
logging.debug("%s", out.decode())
logging.debug("========== PROCESS STDERR ==========")
logging.debug("%s", err.decode())
logging.debug("====================================")
return False
return True | 29,865 |
def run_for_duration(func, duration, num_runs, sleep_after_count):
"""
Args:
sleep_after_count (int, int): Tuple of (count to sleep at, time to sleep)
"""
count = 0
start = time.time()
elapsed = 0
if sleep_after_count:
sleep_after_count_random = randomize_sleep_after_count(sleep_after_count[0])
print(f"First sleep_after_count: {sleep_after_count_random} for {sleep_after_count[1]} seconds")
completed_time = datetime.fromtimestamp(start) + timedelta(seconds=duration)
print(f"Starting {func.__name__}. Will finish at: {completed_time.strftime('%I:%M:%S%p')}")
while elapsed < DURATION and count < num_runs:
count += 1
if count % 10 == 0:
print(f"Repetition: {count} Elapsed: {round(elapsed/60)}m")
func()
elapsed = time.time() - start
if sleep_after_count and count % sleep_after_count_random == 0:
print(f"sleep_after_count: {sleep_after_count_random} for {sleep_after_count[1]} seconds'ish")
rsleep(sleep_after_count[1], factor=0.5)
sleep_after_count_random = sleep_after_count_random + randomize_sleep_after_count(sleep_after_count[0]) | 29,866 |
def test_check_package_global():
"""Test for an installed package."""
installed_package = list(pkg_resources.working_set)[0].project_name
assert package.is_installed(installed_package) | 29,867 |
def solar_wcs_frame_mapping(wcs):
"""
This function registers the coordinates frames to their FITS-WCS coordinate
type values in the `astropy.wcs.utils.wcs_to_celestial_frame` registry.
"""
dateobs = wcs.wcs.dateobs if wcs.wcs.dateobs else None
# SunPy Map adds 'heliographic_observer' and 'rsun' attributes to the WCS
# object. We check for them here, and default to None.
if hasattr(wcs, 'heliographic_observer'):
observer = wcs.heliographic_observer
else:
observer = None
if hasattr(wcs, 'rsun'):
rsun = wcs.rsun
else:
rsun = None
# First we try the Celestial sub, which rectifies the order.
# It will return anything matching ??LN*, ??LT*
wcss = wcs.sub([WCSSUB_CELESTIAL])
# If the SUB works, use it.
if wcss.naxis == 2:
wcs = wcss
xcoord = wcs.wcs.ctype[0][0:4]
ycoord = wcs.wcs.ctype[1][0:4]
if xcoord == 'HPLN' and ycoord == 'HPLT':
return Helioprojective(obstime=dateobs, observer=observer, rsun=rsun)
if xcoord == 'HGLN' and ycoord == 'HGLT':
return HeliographicStonyhurst(obstime=dateobs)
if xcoord == 'CRLN' and ycoord == 'CRLT':
return HeliographicCarrington(obstime=dateobs)
if xcoord == 'SOLX' and ycoord == 'SOLY':
return Heliocentric(obstime=dateobs, observer=observer) | 29,868 |
def toa_error_peak_detection(snr):
"""
Computes the error in time of arrival estimation for a peak detection
algorithm, based on input SNR.
Ported from MATLAB Code
Nicholas O'Donoughue
11 March 2021
:param snr: Signal-to-Noise Ratio [dB]
:return: expected time of arrival error variance [s^2]
"""
# Convert SNR to linear units
snr_lin = utils.unit_conversions.db_to_lin(snr)
# Compute Error
return 1/(2*snr_lin) | 29,869 |
def scrape_random_contracts(data_dir: str, max_contracts=10000,
verbose: bool = True, filtering: bool = True, stop_words: Set[str] = None) -> List[LabeledProvision]:
"""Randomly sample contracts to extract labeled provisions from"""
if verbose:
print('Fetching contracts from', data_dir)
contracts = glob.glob(os.path.join(data_dir, '*/*/*/*.htm'))
if verbose:
print(len(contracts), 'contracts found, sampling', max_contracts)
random.shuffle(contracts)
contracts_scraped = 0
provisions: List[LabeledProvision] = []
for contract in contracts:
if verbose:
print('Scraping', contracts_scraped, contract)
provisions_doc = scrape_exhibit_10(contract, filtering=filtering, stop_words=stop_words)
if provisions_doc:
provisions.extend(provisions_doc)
contracts_scraped += 1
if contracts_scraped == max_contracts:
break
return provisions | 29,870 |
def statCellFraction(gridLimit, gridSpace, valueFile):
"""
Calculate the fractional value of each grid cell, based on the
values stored in valueFile.
:param dict gridLimit: Dictionary of bounds of the grid.
:param dict gridSpace: Resolution of the grid to calculate values.
:param str valueFile: Path to the ascii grid file containing values to sample.
:returns: :class:`numpy.ndarray` of fractional values, with length equal to the number
of cells
Notes: Still need to include bounds checking to ensure the valueFile
data actually covers the gridLimits.
"""
gLon, gLat, gData = grdRead(valueFile)
nCells = maxCellNum(gridLimit, gridSpace) + 1
output = np.zeros(nCells)
for cellNum in range(nCells):
cellLon, cellLat = getCellLonLat(cellNum, gridLimit, gridSpace)
wLon = cellLon
eLon = cellLon + gridSpace['x']
nLat = cellLat
sLat = cellLat - gridSpace['y']
ii = np.where((gLon <= eLon) & (gLon >= wLon))
jj = np.where((gLat <= nLat) & (gLat >= sLat))
cellValues = gData[np.meshgrid(jj[0], ii[0])]
if abs(cellValues).max() == 0:
output[cellNum] = np.average(cellValues)
else:
output[cellNum] = np.average(cellValues) / abs(cellValues).max()
return output | 29,871 |
def tabulate_metaclonotype(
file,
metaclonotype_source_path,
metaclonotype_file,
source_path,
ncpus =1,
max_radius = 36,
write = False,
project_path = "counts"):
"""
Tabulate a set of meta-clonotypes in a single bulk repertoires
Parameters
----------
metaclonotype_source_path : str
filepath to metaclonotype file
metaclonotype_file : str
filename containing meta-clonotype definitions
source_path : str
filepath to bulk files
file: str
filename
ncpus = 6
maximum number of cpus to use in meta-clonotype vs. bulk distance computation
max_radius = 36
maximum radius to store
Returns
-------
df_join : pd.DataFrame
"""
ncpus = min(multiprocessing.cpu_count(), ncpus)
df_search = pd.read_csv(os.path.join(metaclonotype_source_path, metaclonotype_file), sep = "\t")
df_bulk = pd.read_csv(os.path.join(source_path, file), sep = "\t")
# When one want to track each clone indivually regardless of identical TRBV-CDR3-TRBJ
df_bulk = df_bulk.sort_values('count').reset_index(drop = True)
df_bulk['rank'] = df_bulk.index.to_list()
from tcrdist.repertoire import TCRrep
tr = TCRrep(
cell_df = df_search,
organism = "human",
chains = ['beta'],
compute_distances= False)
tr.cpus = ncpus
tr_bulk = TCRrep(
cell_df = df_bulk,
organism = "human",
chains = ['beta'],
compute_distances= False)
chunk_size = get_safe_chunk(tr.clone_df.shape[0], tr_bulk.clone_df.shape[0])
tr.compute_sparse_rect_distances(
df = tr.clone_df,
df2 = tr_bulk.clone_df,
radius = max_radius ,
chunk_size = chunk_size)
df_join = join_by_dist(
how = 'inner',
csrmat = tr.rw_beta,
left_df = tr.clone_df,
right_df = tr_bulk.clone_df,
left_cols = tr.clone_df.columns.to_list(),
right_cols = tr_bulk.clone_df.columns.to_list(),
left_suffix = '_search',
right_suffix = '_bulk',
max_n= 1000,
radius = max_radius )
# df_join has more results
df_join['RADIUS'] = df_join.apply(lambda x: x['dist'] <= x['radius_search'], axis = 1)
import re
df_join['MOTIF'] = df_join.apply(lambda x: re.search(string = x['cdr3_b_aa_bulk'],
pattern = x['regex_search']) is not None, axis = 1)
df_join['RADIUSANDMOTIF'] = df_join['RADIUS'] & df_join['MOTIF']
df_join['EXACT'] = df_join.apply(lambda x: x['dist'] <= 0, axis = 1)
df_join['unique_clones'] = 1
df_join['feature'] = df_join['v_b_gene_search'] + "+" \
+ df_join['cdr3_b_aa_search'] + "+" \
+ df_join['radius_search'].apply(lambda x : str(x)) + "+"\
+ df_join['regex_search']
# mc_file = 'mira_epitope_55_524_ALRKVPTDNYITTY_KVPTDNYITTY.tcrdist3.csv.ranked_centers_bkgd_ctlr_1E6_manuscript.tsv'
df_mc = pd.read_csv(os.path.join(metaclonotype_source_path, metaclonotype_file), sep = "\t")
df_mc['feature'] = df_mc['v_b_gene'] + "+" \
+ df_mc['cdr3_b_aa'] + "+" \
+ df_mc['radius'].apply(lambda x : str(x)) + "+" \
+ df_mc['regex']
radius = df_join.query('RADIUS').groupby('feature')['templates_bulk'].sum()
motif = df_join.query('RADIUSANDMOTIF').groupby('feature')['templates_bulk'].sum()
exact = df_join.query('EXACT').groupby('feature')['templates_bulk'].sum()
df_result = pd.concat([pd.DataFrame(index = df_mc['feature'] ), radius, motif, exact], axis = 1)
df_result.columns = ['RADIUS','MOTIF','EXACT']
df_result['file'] = file
df_result = df_result.fillna(0)
if write:
outname = os.path.join(project_path, f"{file}.counts.tsv")
df_result.reset_index(drop = False).to_csv(outname, sep = "\t", index = False)
return (df_join, df_result) | 29,872 |
def _return_ids_to_df(temp_trip_stack, origin_activity, destination_activity, spts, tpls, trip_id_counter):
"""
Write trip ids into the staypoint and tripleg GeoDataFrames.
Parameters
----------
temp_trip_stack : list
list of dictionary like elements (either pandas series or python dictionary). Contains all elements
that will be aggregated into a trip
origin_activity : dictionary like
Either dictionary or pandas series
destination_activity : dictionary like
Either dictionary or pandas series
spts : GeoDataFrame
Staypoints
tpls :
Triplegs
trip_id_counter : int
current trip id
Returns
-------
None
Function alters the staypoint and tripleg GeoDataFrames inplace
"""
spts.loc[spts.index == origin_activity['id'], ['next_trip_id']] = trip_id_counter
spts.loc[spts.index == destination_activity['id'], ['prev_trip_id']] = trip_id_counter
for row in temp_trip_stack:
if row['type'] == 'tripleg':
tpls.loc[tpls.index == row['id'], ['trip_id']] = trip_id_counter
elif row['type'] == 'staypoint':
spts.loc[spts.index == row['id'], ['trip_id']] = trip_id_counter | 29,873 |
def relative_url_functions(current_url, course, lesson):
"""Return relative URL generators based on current page.
"""
def lesson_url(lesson, *args, **kwargs):
if not isinstance(lesson, str):
lesson = lesson.slug
if course is not None:
absolute = url_for('course_page', course=course, lesson=lesson, *args, **kwargs)
else:
absolute = url_for('lesson', lesson=lesson, *args, **kwargs)
return get_relative_url(current_url, absolute)
def subpage_url(page_slug):
if course is not None:
absolute = url_for('course_page', course=course, lesson=lesson, page=page_slug)
else:
absolute = url_for('lesson', lesson=lesson, page=page_slug)
return get_relative_url(current_url, absolute)
def static_url(path):
absolute = url_for('lesson_static', lesson=lesson, path=path, course=course)
return get_relative_url(current_url, absolute)
return lesson_url, subpage_url, static_url | 29,874 |
def fix_CompanySize(r):
"""
Fix the CompanySize column
"""
if type(r.CompanySize) != str:
if r.Employment == "Independent contractor, freelancer, or self-employed":
r.CompanySize = "0 to 1 Employees"
elif r.Employment in [
"Not employed, but looking for work",
"full-time",
"Not employed, and not looking for work",
"part-time",
"Retired",
]:
r.CompanySize = "Not Applicable"
return r | 29,875 |
def write_thermo_yaml(phases=None, species=None, reactions=None,
lateral_interactions=None, units=None,
filename=None, T=300., P=1., newline='\n',
ads_act_method='get_H_act',
yaml_options={'default_flow_style': None, 'indent': 2,
'sort_keys': False, 'width': 79}):
"""Writes the units, phases, species, lateral interactions, reactions and
additional options in the CTI format for OpenMKM
Parameters
----------
phases : list of :class:`~pmutt.omkm.phase.Phase` objects
Phases to write in YAML file. The species should already be assigned.
species : list of :class:`~pmutt.empirical.nasa.Nasa`, :class:`~pmutt.empirical.nasa.Nasa9` or :class:`~pmutt.empirical.shomate.Shomate`
Species to write in YAML file.
reactions : list of :class:`~pmutt.omkm.reaction.SurfaceReaction`
Reactions to write in YAML file.
lateral_interactions : list of :class:`~pmutt.mixture.cov.PiecewiseCovEffect` objects, optional
Lateral interactions to include in YAML file. Default is None.
units : dict or :class:`~pmutt.omkm.units.Unit` object, optional
Units to write file. If a dict is inputted, the key is the quantity
and the value is the unit. If not specified, uses the default units
of :class:`~pmutt.omkm.units.Unit`.
filename: str, optional
Filename for the input.yaml file. If not specified, returns file
as str.
T : float, optional
Temperature in K. Default is 300 K.
P : float, optional
Pressure in atm. Default is 1 atm.
newline : str, optional
Type of newline to use. Default is Linux newline ('\\n')
ads_act_method : str, optional
Activation method to use for adsorption reactions. Accepted
options include 'get_H_act' and 'get_G_act'. Default is
'get_H_act'.
Returns
-------
lines_out : str
If ``filename`` is None, CTI file is returned.
"""
lines = [
_get_file_timestamp(comment_char='# '),
'# See documentation for OpenMKM YAML file here:',
'# https://vlachosgroup.github.io/openmkm/input',
]
yaml_dict = {}
'''Organize units units'''
if units is None:
units = Units()
elif isinstance(units, dict):
units = Units(**units)
units_out = units.to_omkm_yaml()
'''Pre-assign IDs for lateral interactions so phases can be written'''
if lateral_interactions is not None:
interactions_out = []
i = 0
for lat_interaction in lateral_interactions:
if lat_interaction.name is None:
lat_interaction.name = 'i_{:04d}'.format(i)
i += 1
interaction_dict = lat_interaction.to_omkm_yaml(units=units)
interactions_out.append(interaction_dict)
'''Pre-assign IDs for reactions so phases can be written'''
beps = []
if reactions is not None:
reactions_out = []
i = 0
for reaction in reactions:
# Assign reaction ID if not present
if reaction.id is None:
reaction.id = 'r_{:04d}'.format(i)
i += 1
# Write reaction
reaction_dict = reaction.to_omkm_yaml(units=units, T=T)
reactions_out.append(reaction_dict)
# Add unique BEP relationship if any
try:
bep = reaction.bep
except AttributeError:
pass
else:
if bep is not None and bep not in beps:
beps.append(bep)
'''Write phases'''
if phases is not None:
phases_out = []
for phase in phases:
phase_dict = _force_pass_arguments(phase.to_omkm_yaml, units=units)
phases_out.append(phase_dict)
# yaml_dict['phases'] = phases_out
'''Write species'''
if species is not None:
species_out = []
for ind_species in species:
ind_species_dict = _force_pass_arguments(ind_species.to_omkm_yaml,
units=units)
species_out.append(ind_species_dict)
# yaml_dict['species'] = species_out
'''Organize BEPs'''
if len(beps) > 0:
beps_out = []
i = 0
for bep in beps:
# Assign name if necessary
if bep.name is None:
bep.name = 'b_{:04d}'.format(i)
i += 1
bep_dict = _force_pass_arguments(bep.to_omkm_yaml, units=units)
beps_out.append(bep_dict)
# yaml_dict['beps'] = beps_out
'''Organize fields'''
fields = ('units', 'phases', 'species', 'reactions',
'beps', 'interactions',)
for field in fields:
try:
val = locals()['{}_out'.format(field)]
except:
pass
else:
# Create a YAML string
yaml_str = yaml.dump(data={field: val},
stream=None,
**yaml_options)
lines.extend(
['',
'#' + '-' * 79,
'# {}'.format(field.upper()),
'#' + '-' * 79,
yaml_str])
# yaml_dict[field] = val
# Convert to YAML format
# yaml_str = yaml.dump(data=yaml_dict, stream=None, **yaml_options)
# Remove redundant quotes
# yaml_str = yaml_str.replace('\'', '')
# lines.append(yaml_str)
lines_out = '\n'.join(lines)
# Remove redundant strings
lines_out = lines_out.replace('\'', '')
# Add spacing between list elements
lines_out = lines_out.replace('\n-', '\n\n-')
if filename is not None:
filename = Path(filename)
with open(filename, 'w', newline=newline) as f_ptr:
f_ptr.write(lines_out)
else:
# Or return as string
return lines_out | 29,876 |
def move_weighted(state: State, nnet: NNet) -> tuple:
"""
Returns are random move with weighted probabilities from the neural network.
:param state: State to evaluate
:param nnet: Neural network used for evaluation
:return: Move as ((origin_row, origin_column),(target_row,target_column)
"""
policy = nnet.prediction(state)[0]
moves = list(policy.keys())
weights = list(policy.values())
return random.choices(moves, weights=weights)[0] | 29,877 |
def build_get301_request(**kwargs: Any) -> HttpRequest:
"""Return 301 status code and redirect to /http/success/200.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", "/http/redirect/301")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs) | 29,878 |
def error_function_latticeparameters(varying_parameters_values_array,
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
initrot=IDENTITYMATRIX,
pureRotation=0,
verbose=0,
pixelsize=165.0 / 2048,
dim=(2048, 2048),
weights=None,
kf_direction="Z>0",
returnalldata=False,
additional_expression="none"):
"""
q = UzUyUz Ustart B0 G*
Interface error function to return array of pair (exp. - model) distances
Sum_i [weights_i((Xmodel_i-Xexp_i)**2+(Ymodel_i-Yexp_i)**2) ]
Xmodel,Ymodel comes from G*=ha*+kb*+lc*
q = refinedUzUyUz Ustart refinedB0 G*
B0 reference structure reciprocal space frame (a*,b*,c*) a* // ki b* perp to a* and perp to z (z belongs to the plane of ki and detector normal vector n)
i.e. columns of B0 are components of a*,b* and c* expressed in x,y,z LT frame
refinedB0 is obtained by refining the 5 /6 lattice parameters
possible keys for parameters to be refined are:
five detector frame calibration parameters:
det_distance,det_xcen,det_ycen,det_beta, det_gamma
three misorientation angles with respect to LT orthonormal frame (x, y, z) matrices Ux, Uy,Uz:
anglex,angley,anglez
5 lattice parameters among 6 (a,b,c,alpha, beta,gamma)
"""
# reading default parameters
# CCD plane calibration parameters
if isinstance(allparameters, np.ndarray):
calibrationparameters = (allparameters.tolist())[:5]
else:
calibrationparameters = allparameters[:5]
# allparameters[5:8] = 0,0,0
Uy, Ux, Uz = IDENTITYMATRIX, IDENTITYMATRIX, IDENTITYMATRIX
latticeparameters = np.array(allparameters[8:14])
nb_varying_parameters = len(varying_parameters_keys)
# factorscale = 1.
for varying_parameter_index, parameter_name in enumerate(varying_parameters_keys):
# print "varying_parameter_index,parameter_name", varying_parameter_index, parameter_name
if parameter_name in ("anglex", "angley", "anglez"):
# print "got angles!"
if nb_varying_parameters > 1:
anglevalue = varying_parameters_values_array[varying_parameter_index] * DEG
else:
anglevalue = varying_parameters_values_array[0] * DEG
# print "anglevalue (rad)= ",anglevalue
ca = np.cos(anglevalue)
sa = np.sin(anglevalue)
if parameter_name is "angley":
Uy = np.array([[ca, 0, sa], [0, 1, 0], [-sa, 0, ca]])
elif parameter_name is "anglex":
Ux = np.array([[1.0, 0, 0], [0, ca, sa], [0, -sa, ca]])
elif parameter_name is "anglez":
Uz = np.array([[ca, -sa, 0], [sa, ca, 0], [0, 0, 1.0]])
elif parameter_name in ("alpha", "beta", "gamma"):
# print 'got Tc elements: ', parameter_name
indparam = dict_lattice_parameters[parameter_name]
# if nb_varying_parameters > 1:
# latticeparameters[indparam] = latticeparameters[3] * np.exp(varying_parameters_values_array[varying_parameter_index] / factorscale)
# else:
# latticeparameters[indparam] = latticeparameters[3] * np.exp(varying_parameters_values_array[0] / factorscale)
if nb_varying_parameters > 1:
latticeparameters[indparam] = varying_parameters_values_array[varying_parameter_index]
else:
latticeparameters[indparam] = varying_parameters_values_array[0]
elif parameter_name in ("a", "b", "c"):
# print 'got Tc elements: ', parameter_name
indparam = dict_lattice_parameters[parameter_name]
# if nb_varying_parameters > 1:
# latticeparameters[indparam] = latticeparameters[0] * np.exp(varying_parameters_values_array[varying_parameter_index] / factorscale)
# else:
# latticeparameters[indparam] = latticeparameters[0] * np.exp(varying_parameters_values_array[0] / factorscale)
if nb_varying_parameters > 1:
latticeparameters[indparam] = varying_parameters_values_array[varying_parameter_index]
else:
latticeparameters[indparam] = varying_parameters_values_array[0]
Uxyz = np.dot(Uz, np.dot(Ux, Uy))
if additional_expression == "a==b":
indparam = dict_lattice_parameters["b"]
indparam1 = dict_lattice_parameters["a"]
latticeparameters[indparam] = latticeparameters[indparam1]
newB0matrix = CP.calc_B_RR(latticeparameters, directspace=1, setvolume=False)
# if verbose:
# print("\n-------\nvarying_parameters_keys", varying_parameters_keys)
# print("varying_parameters_values_array", varying_parameters_values_array)
# print("Uxyz", Uxyz)
# print("latticeparameters", latticeparameters)
# print("newB0matrix", newB0matrix)
# DictLT.RotY40 such as X=DictLT.RotY40 Xsample (xs,ys,zs =columns expressed in x,y,z frame)
# transform in sample frame Ts
# same transform in x,y,z LT frame T
# Ts = DictLT.RotY40-1 T DictLT.RotY40
# T = DictLT.RotY40 Ts DictLT.RotY40-1
newmatrix = np.dot(Uxyz, initrot)
# if 0: # verbose:
# print("initrot", initrot)
# print("newmatrix", newmatrix)
Xmodel, Ymodel, _, _ = calc_XY_pixelpositions(calibrationparameters,
Miller_indices,
absolutespotsindices,
UBmatrix=newmatrix,
B0matrix=newB0matrix,
pureRotation=0,
labXMAS=0,
verbose=0,
pixelsize=pixelsize,
dim=dim,
kf_direction=kf_direction)
if 0: # verbose:
print("Xmodel, Ymodel", Xmodel, Ymodel)
if 0: # verbose:
print("Xexp, Yexp", Xexp, Yexp)
distanceterm = np.sqrt((Xmodel - Xexp) ** 2 + (Ymodel - Yexp) ** 2)
if weights is not None:
allweights = np.sum(weights)
distanceterm = distanceterm * weights / allweights
# if verbose:
# # print "** distance residues = " , distanceterm, " ********"
# print("** mean distance residue = ", np.mean(distanceterm), " ********")
# print "twthe, chi", twthe, chi
alldistances_array = distanceterm
if verbose:
# print "varying_parameters_values in error_function_on_demand_strain",varying_parameters_values
# print "arr_indexvaryingparameters",arr_indexvaryingparameters
# print "Xmodel",Xmodel
# print "pixX",pixX
# print "Ymodel",Ymodel
# print "pixY",pixY
# print "newmatrix",newmatrix
# print "newB0matrix",newB0matrix
# print "deltamat",deltamat
# print "initrot",initrot
# print "param_orient",param_calib
# print "distanceterm",distanceterm
pass
# if weights is not None:
# print("***********mean weighted pixel deviation ",
# np.mean(alldistances_array), " ********")
# else:
# print(
# "***********mean pixel deviation ", np.mean(alldistances_array),
# " ********")
# print "newmatrix", newmatrix
if returnalldata:
# concatenated all pairs distances, all UB matrices, all UB.newB0matrix matrices
return alldistances_array, Uxyz, newmatrix, newB0matrix, latticeparameters
else:
return alldistances_array | 29,879 |
def run_forward_model(z_in):
"""
Run forward model and return approximate measured values
"""
x_dummy[:prm.nn]=z_in
x_dummy[prm.nn:]=prm.compute_velocity(z_in,t0)
x_meas = H_meas.dot(x_dummy)
return x_meas | 29,880 |
def test_pn_file():
"""## test_pn_file
Manual test that
[x] A path to a markdown file can be specified and shown
"""
app = pn.Column(test_pn_file.__doc__, pnx.Markdown(path=TEST_MD_FILE, name="test"),)
app.servable("test_pn_file") | 29,881 |
def optimize(name: str, circuit: cirq.Circuit) -> cirq.Circuit:
"""Applies sycamore circuit decompositions/optimizations.
Args:
name: the name of the circuit for printing messages
circuit: the circuit to optimize_for_sycamore
"""
print(f'optimizing: {name}', flush=True)
start = timer()
optimized = cirq.google.optimized_for_sycamore(circuit)
stop = timer()
print_stats(stop - start, optimized)
return optimized | 29,882 |
def download_sbr(destination=None):
"""Download an example of SBR+ Array and return the def path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Parameters
----------
destination : str, optional
Path where files will be downloaded. Optional. Default is user temp folder.
Returns
-------
str
Path to the example file.
Examples
--------
Download an example result file and return the path of the file
>>> from pyaedt import examples
>>> path = examples.download_antenna_array()
>>> path
'C:/Users/user/AppData/local/temp/pyaedtexamples/FiniteArray_Radome_77GHz_3D_CADDM.aedt'
"""
return _download_file("sbr", "Cassegrain.aedt", destination) | 29,883 |
def isvalid_sequence(
level: str, time_series: Tuple[Union[HSScoring, CollegeScoring]]
) -> bool:
"""Checks if entire sequence is valid.
Args:
level: 'high school' or 'college' level for sequence analysis.
time_series: Tuple of sorted match time_series events.
Raises:
ValueError: Invalid level.
ValueError: Not sorted time_series.
ValueError: Invalid position.
Returns:
bool: True if sequence is valid, otherwise raises ValueError.
"""
if level not in {"college", "high school"}:
raise ValueError(
f"Expected `level` to be one of "
f"'college' or 'high school', "
f"got {level!r}."
)
# aliases sequences based on level
sequences = COLLEGE_SEQUENCES if level == "college" else HS_SEQUENCES
position = "neutral"
# skips iteration the last value because we check the next
for i, score in enumerate(time_series[:-1]):
# current time can't be larger than next time
if time_series[i].time_stamp > time_series[i + 1].time_stamp:
raise ValueError(
f"Values in `time_series` appear to be sorted incorrectly."
)
if position == "neutral":
check_neutral(score, sequences["neutral"])
if score.formatted_label == "fT2" or score.formatted_label == "oBOT" or score.formatted_label == 'fTOP':
position = "top"
elif score.formatted_label == "oT2" or score.formatted_label == "fBOT" or score.formatted_label == 'oTOP':
position = "bottom"
elif position == "top":
check_top(score, sequences["top"])
if (
score.formatted_label == "oE1"
or score.formatted_label == "fNEU"
or score.formatted_label == "oNEU"
):
position = "neutral"
elif (
score.formatted_label == "oR2"
or score.formatted_label == "fBOT"
or score.formatted_label == "oTOP"
):
position = "bottom"
elif position == "bottom":
check_bottom(score, sequences["bottom"])
if (
score.formatted_label == "fE1"
or score.formatted_label == "fNEU"
or score.formatted_label == "oNEU"
):
position = "neutral"
elif (
score.formatted_label == "fR2"
or score.formatted_label == "oBOT"
or score.formatted_label == "fTOP"
):
position = "top"
else:
raise ValueError(
f"Invalid `position`, expected one of ['neutral', "
f"'top', 'bottom'], got {position!r}."
)
return True | 29,884 |
def create_backend_app(): # pragma: no cover
"""Returns WSGI app for backend."""
routes = handlers.get_backend_routes() + swarming.get_backend_routes()
app = webapp2.WSGIApplication(routes, debug=utils.is_local_dev_server())
gae_ts_mon.initialize(app, cron_module='backend')
gae_ts_mon.register_global_metrics(metrics.GLOBAL_METRICS)
gae_ts_mon.register_global_metrics_callback(
'buildbucket_global', metrics.update_global_metrics
)
return app | 29,885 |
def non_linear_relationships():
"""Plot logarithmic and exponential data along with correlation coefficients."""
# make subplots
fig, axes = plt.subplots(1, 2, figsize=(12, 3))
# plot logarithmic
log_x = np.linspace(0.01, 10)
log_y = np.log(log_x)
axes[0].scatter(log_x, log_y)
axes[0].set_title(f'ρ = {np.round(np.corrcoef(log_x, log_y)[0][1], 2):.2f}')
# plot exponential
exp_x = np.linspace(0, 10)
exp_y = np.exp(exp_x)
axes[1].scatter(exp_x, exp_y)
axes[1].set_title(f'ρ = {np.round(np.corrcoef(exp_x, exp_y)[0][1], 2):.2f}')
# labels
for ax in axes:
ax.set_xlabel('x')
ax.set_ylabel('y')
return axes | 29,886 |
def label_samples(annotation, atlas, atlas_info=None, tolerance=2):
"""
Matches all microarray samples in `annotation` to parcels in `atlas`
Attempts to place each sample provided in `annotation` into a parcel in
`atlas`, where the latter is a 3D niimg-like object that contains parcels
each idnetified by a unique integer ID.
The function tries to best match samples in `annotation` to parcels defined
in `atlas` by:
1. Determining if the sample falls directly within a parcel,
2. Checking to see if there are nearby parcels by slowly expanding the
search space to include nearby voxels, up to a specified distance
(specified via the `tolerance` parameter),
3. Assigning the sample to the closest parcel if there are multiple
nearby parcels, where closest is determined by the parcel centroid.
If at any step a sample can be assigned to a parcel the matching process is
terminated. If there is still no parcel for a given sample after this
process the sample is provided a label of 0.
Parameters
----------
annotation : (S, 13) pandas.DataFrame
Pre-loaded annotation information for a given AHBA donor
atlas : niimg-like object
A parcellation image in MNI space, where each parcel is identified by a
unique integer ID
atlas_info : pandas.DataFrame, optional
Filepath to or pre-loaded dataframe containing information about
`atlas`. Must have *at least* columns 'id', 'hemisphere', and
'structure' containing information mapping atlas IDs to hemisphere and
broad structural class (i.e., "cortex", "subcortex", "cerebellum").
Default: None
tolerance : int, optional
Distance (in mm) that a sample must be from a parcel for it to be
matched to that parcel. This is only considered if the sample is not
directly within a parcel. Default: 2
Returns
-------
labels : (S, 1) pandas.DataFrame
Dataframe with parcel labels for each of `S` samples
"""
# get annotation and atlas data
annotation = io.read_annotation(annotation)
atlas = utils.check_img(atlas)
label_data, affine = np.asarray(atlas.dataobj), atlas.affine
# load atlas_info, if provided
if atlas_info is not None:
atlas_info = utils.check_atlas_info(atlas, atlas_info)
# get ijk coordinates for microarray samples and find labels
g_ijk = utils.xyz_to_ijk(annotation[['mni_x', 'mni_y', 'mni_z']], affine)
labelled = label_data[g_ijk[:, 0], g_ijk[:, 1], g_ijk[:, 2]]
# if sample coordinates aren't directly inside a parcel, increment radius
# around sample up to `tolerance` to try and find nearby parcels.
# if still no parcel, then ignore this sample
for idx in np.where(labelled == 0)[0]:
label, tol = labelled[idx], 1
while label == 0 and tol <= tolerance:
label = _assign_sample(g_ijk[[idx]], atlas,
sample_info=annotation.iloc[idx],
atlas_info=atlas_info,
tolerance=tol)
tol += 1
labelled[idx] = label
return pd.DataFrame(labelled, dtype=int,
columns=['label'], index=annotation.index) | 29,887 |
def main():
"""Encrypt payload."""
print()
print("====== Test encode -----------------------------------------")
temp = 25.06
humi = 50.55
print("Temperature:", temp, "Humidity:", humi)
print()
data = bytes(bytearray.fromhex('2302CA090303BF13')) # HA BLE data (not encrypted)
count_id = bytes(bytearray.fromhex('00112233')) # count id
mac = binascii.unhexlify('5448E68F80A5') # MAC
uuid16 = b"\x1E\x18"
bindkey = binascii.unhexlify('231d39c1d7cc1ab1aee224cd096db932')
print("MAC:", mac.hex(), "Binkey:", bindkey.hex())
nonce = b"".join([mac, uuid16, count_id]) # 6+2+4 = 12 bytes
cipher = AES.new(bindkey, AES.MODE_CCM, nonce=nonce, mac_len=4)
cipher.update(b"\x11")
ciphertext, mic = cipher.encrypt_and_digest(data)
print("Data:", data.hex())
print("Nonce:", nonce.hex())
print("CryptData:", ciphertext.hex(), "Mic:", mic.hex())
adstruct = b"".join([uuid16, ciphertext, count_id, mic])
print()
print("Encrypted data:", adstruct.hex())
print()
print("====== Test decode -----------------------------------------")
decrypt_aes_ccm(bindkey, mac, adstruct) | 29,888 |
def group_by(x, group_by_fields='Event', return_group_indices=False):
"""
Splits x into LIST of arrays, each array with rows that have same
group_by_fields values.
Gotchas:
Assumes x is sorted by group_by_fields (works in either order, reversed
or not)
Does NOT put in empty lists if indices skip a value! (e.g. events
without peaks)
If return_indices=True, returns list of arrays with indices of group
elements in x instead
"""
# Support single index and list of indices
try:
group_by_fields[0]
except TypeError:
group_by_fields = tuple(group_by_fields)
# Define array we'll split
if return_group_indices:
to_return = np.arange(len(x))
else:
to_return = x
# Indices to determine split points from
indices = fields_view(x, group_by_fields)
# Should we split at all?
if indices[0] == indices[-1]:
return [to_return]
else:
# Split where indices change value
split_points = np.where((np.roll(indices, 1) != indices))[0]
# 0 shouldn't be a split_point, will be in it due to roll (and indices[0] != indices[-1]), so remove it
split_points = split_points[1:]
return np.split(to_return, split_points) | 29,889 |
def unlock_file(fd):
"""unlock file. """
try:
fcntl.flock(fd, fcntl.LOCK_UN)
return (True, 0)
except IOError, ex_value:
return (False, ex_value[0]) | 29,890 |
def DsseTrad(nodes_num, measurements, Gmatrix, Bmatrix, Yabs_matrix, Yphase_matrix):
"""
Traditional state estimator
It performs state estimation using rectangular node voltage state variables
and it is customized to work without PMU measurements
@param nodes_num: number of nodes of the grid
@param measurements: Vector of measurements in Input (voltages, currents, powers)
@param Gmatrix: conductance matrix
@param Bmatrix: susceptance matrix
@param Yabs_matrix: magnitude of the admittance matrix
@param Yphase_matrix: phase of the admittance matrix
return: np.array V - estimated voltages
"""
# calculate weightsmatrix (obtained as stdandard_deviations^-2)
weights = measurements.getWeightsMatrix()
W = np.diag(weights)
inj_code = 0
# Jacobian for Power Injection Measurements
H2, H3 = calculateJacobiMatrixSinj(measurements, nodes_num, Gmatrix, Bmatrix, inj_code, type=2)
# Jacobian for branch Power Measurements
H4, H5 = calculateJacobiBranchPower(measurements, nodes_num, Gmatrix, Bmatrix, inj_code, type=2)
# get array which contains the index of measurements type V_mag and I_mag
vidx = measurements.getIndexOfMeasurements(type=MeasType.V_mag)
iidx = measurements.getIndexOfMeasurements(type=MeasType.I_mag)
nvi = len(vidx)
nii = len(iidx)
# get array which contains the index of measurements type MeasType.Sinj_real, MeasType.Sinj_imag in the array measurements.measurements
pidx = measurements.getIndexOfMeasurements(type=MeasType.Sinj_real)
qidx = measurements.getIndexOfMeasurements(type=MeasType.Sinj_imag)
# get array which contains the index of measurements type MeasType.S_real, MeasType.S_imag in the array measurements.measurements
p1br = measurements.getIndexOfMeasurements(type=MeasType.S1_real)
p2br = measurements.getIndexOfMeasurements(type=MeasType.S2_real)
q1br = measurements.getIndexOfMeasurements(type=MeasType.S1_imag)
q2br = measurements.getIndexOfMeasurements(type=MeasType.S2_imag)
# get an array with all measured values (affected by uncertainty)
z = measurements.getMeasValues()
V = np.ones(nodes_num) + 1j * np.zeros(nodes_num)
State = np.concatenate((np.ones(nodes_num), np.zeros(nodes_num-1)), axis=0)
epsilon = 5
num_iter = 0
# Iteration of Netwon Rapson method: needed to solve non-linear system of equation
while epsilon > 10 ** (-6):
""" Computation of equivalent current measurements in place of the power measurements """
# in every iteration the input power measurements are converted into currents by dividing by the voltage estimated at the previous iteration
z = convertSinjMeasIntoCurrents(measurements, V, z, pidx, qidx)
z = convertSbranchMeasIntoCurrents(measurements, V, z, p1br, q1br, p2br, q2br)
""" Voltage Magnitude Measurements """
h1, H1 = update_h1_vector(measurements, V, vidx, nvi, nodes_num, inj_code, type=2)
""" Power Injection Measurements """
# h(x) vector where power injections are present
h2 = np.inner(H2, State)
h3 = np.inner(H3, State)
""" Power Flow Measurements """
# h(x) vector where power flows are present
h4 = np.inner(H4, State)
h5 = np.inner(H5, State)
""" Current Magnitude Measurements """
h6, H6 = update_h6_vector(measurements, V, iidx, nii, Yabs_matrix, Yphase_matrix, nodes_num, num_iter, inj_code, type=2)
""" WLS computation """
# all the sub-matrixes of H calcualted so far are merged in a unique matrix
H = np.concatenate((H1, H2, H3, H4, H5, H6), axis=0)
# h(x) sub-vectors are concatenated
y = np.concatenate((h1, h2, h3, h4, h5, h6), axis=0)
# "res" is the residual vector. The difference between input measurements and h(x)
res = np.subtract(z, y)
# g = transpose(H) * W * res
g = np.inner(H.transpose(), np.inner(W, res))
WH = np.inner(W, H.transpose())
# G is the gain matrix, that will have to be inverted at each iteration
G = np.inner(H.transpose(), WH.transpose())
# inversion of G
Ginv = np.linalg.pinv(G)
# Delta includes the updates of the states for the current Newton Rapson iteration
Delta_State = np.inner(Ginv, g)
# state is updated
State = State + Delta_State
# calculate the NR treeshold (for the next while check)
epsilon = np.amax(np.absolute(Delta_State))
# update the voltages
V.real = State[:nodes_num]
V.imag = np.concatenate(([0], State[nodes_num:]), axis=0)
num_iter = num_iter + 1
return V | 29,891 |
def _get_specs(layout, surfs, array_name, cbar_range, nvals=256):
"""Get array specifications.
Parameters
----------
layout : ndarray, shape = (n_rows, n_cols)
Array of surface keys in `surfs`. Specifies how window is arranged.
surfs : dict[str, BSPolyData]
Dictionary of surfaces.
array_name : ndarray
Names of point data array to plot for each layout entry.
cbar_range : {'sym'} or tuple,
Range for each array. If 'sym', uses a symmetric range. Only used is
array has positive and negative values.
nvals : int, optional
Number of lookup table values for continuous arrays.
Default is 256.
Returns
-------
specs : ndarray
Array with specifications for each array entry.
"""
nrow, ncol = layout.shape
n_overlays = max([len(a) for a in array_name.ravel()])
def _set_spec(x, rg):
if rg is None or rg == 'sym':
a, b = np.nanmin(x), np.nanmax(x)
if rg == 'sym' and np.sign(a) != np.sign(b):
b = max(np.abs(a), b)
a = -b
rg = (a, b)
if np.issubdtype(x.dtype, np.floating):
return (*rg, nvals, np.array([]), False)
vals = np.unique(x)
return (*rg, vals.size, vals, True)
dt = np.dtype([('min', 'f8'), ('max', 'f8'), ('nval', 'i8'),
('val', 'O'), ('disc', '?')])
specs = np.zeros((n_overlays, nrow, ncol), dtype=dt)
specs[:] = (np.nan, np.nan, nvals, np.array([]), False)
map_sp = {k: {} for k in surfs.keys()}
for idx, k in np.ndenumerate(layout):
if k is None:
continue
for ia, name in enumerate(array_name[idx]):
if name not in surfs[k].point_keys:
continue
if name not in map_sp[k]:
arr = surfs[k].PointData[name]
map_sp[k][name] = _set_spec(arr, cbar_range[idx][ia])
specs[(ia,) + idx] = map_sp[k][name]
return specs | 29,892 |
def get_pool_data(index, val, field):
"""
Return val for volume based on index.
Parameters
----------
index: str
base field name.
val: str
base field value.
field: str
requested field value.
Returns
-------
str: the requested value, None if absent.
"""
cmd = [oci_kvm_path, 'list-pool', '--output-mode', 'parsable']
all_pool_data = subprocess.check_output(cmd).decode('utf-8').splitlines()
for pool in all_pool_data:
pool_list = pool.split('#')
if index not in pool_fields or field not in pool_fields:
return None
if pool_list[pool_fields.index(index)] == val:
return pool_list[pool_fields.index(field)]
return None | 29,893 |
def test_modular_apicast(set_gateway_image, api_client):
"""
Sends a request.
Asserts that the header added by the example policy is present.
"""
response = get(api_client(), "/")
assert response.status_code == 200
assert 'X-Example-Policy-Response' in response.headers | 29,894 |
def upper_camel_to_lower_camel(upper_camel: str) -> str:
"""convert upper camel case to lower camel case
Example:
CamelCase -> camelCase
:param upper_camel:
:return:
"""
return upper_camel[0].lower() + upper_camel[1:] | 29,895 |
def test_custom_link_marshaler():
"""Test that json marshaler use custom marshaler to marshal a link.
1. Create a sub-class of JSONMarshaler with redefined create_link_marshaler factory.
2. Create json marshaler from the sub-class.
3. Marshal a link.
4. Check that custom marshaler is used to marshal a link.
"""
class _CustomLinkMarshaler(JSONMarshaler):
create_link_marshaler = _CustomMarshaler("Custom marshaled link")
marshaled_link = _CustomLinkMarshaler().marshal_link(link=Link(relations=(), target="/target"))
assert marshaled_link == "Custom marshaled link", "Wrong link data" | 29,896 |
def get(isamAppliance, cert_dbase_id, check_mode=False, force=False):
"""
Get details of a certificate database
"""
return isamAppliance.invoke_get("Retrieving all current certificate database names",
"/isam/ssl_certificates/{0}/details".format(cert_dbase_id)) | 29,897 |
def read_environment_file(envfile=None):
"""
Read a .env file into os.environ.
If not given a path to a envfile path, does filthy magic stack backtracking
to find manage.py and then find the envfile.
"""
if envfile is None:
frame = sys._getframe()
envfile = os.path.join(os.path.dirname(frame.f_back.f_code.co_filename), '.env')
if not os.path.exists(envfile):
warnings.warn("not reading %s - it doesn't exist." % envfile)
return
for k, v in parse_environment_file(envfile):
os.environ.setdefault(k, v) | 29,898 |
def model_check(func):
"""Checks if the model is referenced as a valid model. If the model is
valid, the API will be ready to find the correct endpoint for the given
model.
:param func: The function to decorate
:type func: function
"""
def wrapper(*args, **kwargs):
model = None
if kwargs:
model = kwargs.get("model", None)
if not model:
if len(args) > 1:
model = args[1] # args[0] is the decorted function
if not constants.TRANSLATION.get(model, None):
raise ValueError(
"'{model}' doesn't exists. Allowed models: {allowed_models}".format(
model=model,
allowed_models=",\n".join(
list(constants.TRANSLATION.keys())
),
)
)
return func(*args, **kwargs)
return wrapper | 29,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.