content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def binary_get_bucket_for_node(buckets: List[KBucket], node: Node) -> KBucket:
"""Given a list of ordered buckets, returns the bucket for a given node."""
bucket_ends = [bucket.end for bucket in buckets]
bucket_position = bisect.bisect_left(bucket_ends, node.id)
# Prevents edge cases where bisect_left returns an out of range index
try:
bucket = buckets[bucket_position]
assert bucket.start <= node.id <= bucket.end
return bucket
except (IndexError, AssertionError):
raise ValueError("No bucket found for node with id {}".format(node.id))
| 5,336,000
|
def trajectory(identifier,x,y,path_save):
""" Returns the trajectory of Identifier"""
plt.figure()
plt.plot(x, y, '.-')
plt.plot(x[0], y[0], 'ro')
plt.plot(x[-1], y[-1], 'go')
plt.grid()
plt.xlabel("x meters")
plt.ylabel("y meters")
plt.legend(['Trayectory','Initial Point','Final Point'])
plt.title(f'Trajectory subject {identifier}')
plt.savefig(f'{path_save}/trajectory_subject_{identifier}.png')
plt.close('all')
| 5,336,001
|
def darken(color, factor=0.7):
"""Return darkened color as a ReportLab RGB color.
Take a passed color and returns a Reportlab color that is darker by the
factor indicated in the parameter.
"""
newcol = color_to_reportlab(color)
for a in ["red", "green", "blue"]:
setattr(newcol, a, factor * getattr(newcol, a))
return newcol
| 5,336,002
|
def load_experiments(uuid_list, db_root, dbid): # pragma: io
"""Generator to load the results of the experiments.
Parameters
----------
uuid_list : list(uuid.UUID)
List of UUIDs corresponding to experiments to load.
db_root : str
Root location for data store as requested by the serializer used.
dbid : str
Name of the data store as requested by the serializer used.
Yields
------
meta_data : (str, str, str)
The `meta_data` contains a `tuple` of `str` with ``test_case, optimizer, uuid``.
data : (:class:`xarray:xarray.Dataset`, :class:`xarray:xarray.Dataset`, :class:`xarray:xarray.Dataset` list(float))
The `data` contains a tuple of ``(perf_ds, time_ds, suggest_ds, sig)``. The `perf_ds` is a
:class:`xarray:xarray.Dataset` containing the evaluation results with dimensions ``(ITER, SUGGEST)``, each
variable is an objective. The `time_ds` is an :class:`xarray:xarray.Dataset` containing the timing results of
the form accepted by `summarize_time`. The coordinates must be compatible with `perf_ds`. The suggest_ds is a
:class:`xarray:xarray.Dataset` containing the inputs to the function evaluations. Each variable is a function
input. Finally, `sig` contains the `test_case` signature and must be `list(float)`.
"""
uuids_seen = set()
for uuid_ in uuid_list:
logger.info(uuid_.hex)
# Load perf and timing data
perf_ds, meta = XRSerializer.load(db_root, db=dbid, key=cc.EVAL, uuid_=uuid_)
time_ds, meta_t = XRSerializer.load(db_root, db=dbid, key=cc.TIME, uuid_=uuid_)
assert meta == meta_t, "meta data should between time and eval files"
suggest_ds, meta_t = XRSerializer.load(db_root, db=dbid, key=cc.SUGGEST_LOG, uuid_=uuid_)
assert meta == meta_t, "meta data should between suggest and eval files"
# Get signature to pass out as well
_, sig = meta["signature"]
logger.info(meta)
logger.info(sig)
# Build the new indices for combined data, this could be put in function for easier testing
eval_args = unserializable_dict(meta["args"]) # Unpack meta-data
test_case = SklearnModel.test_case_str(
eval_args[CmdArgs.classifier], eval_args[CmdArgs.data], eval_args[CmdArgs.metric]
)
optimizer = str_join_safe(
ARG_DELIM, (eval_args[CmdArgs.optimizer], eval_args[CmdArgs.opt_rev], eval_args[CmdArgs.rev])
)
args_uuid = eval_args[CmdArgs.uuid]
# Check UUID sanity
assert isinstance(args_uuid, str)
assert args_uuid == uuid_.hex, "UUID meta-data does not match filename"
assert args_uuid not in uuids_seen, "uuids being reused between studies"
uuids_seen.add(args_uuid)
# Return key -> data so this generator can be iterated over in dict like manner
meta_data = (test_case, optimizer, args_uuid)
data = (perf_ds, time_ds, suggest_ds, sig)
yield meta_data, data
| 5,336,003
|
def fetch_all_tiles(session):
"""Fetch all tiles."""
return session.query(Tile).all()
| 5,336,004
|
def transaction_update_spents(txs, address):
"""
Update spent information for list of transactions for a specific address. This method assumes the list of
transaction complete and up-to-date.
This methods loops through all the transaction and update all transaction outputs for given address, checks
if the output is spent and add the spending transaction ID and index number to the outputs.
The same list of transactions with updates outputs will be returned
:param txs: Complete list of transactions for given address
:type txs: list of Transaction
:param address: Address string
:type address: str
:return list of Transaction:
"""
spend_list = {}
for t in txs:
for inp in t.inputs:
if inp.address == address:
spend_list.update({(inp.prev_txid.hex(), inp.output_n_int): t})
address_inputs = list(spend_list.keys())
for t in txs:
for to in t.outputs:
if to.address != address:
continue
spent = True if (t.txid, to.output_n) in address_inputs else False
txs[txs.index(t)].outputs[to.output_n].spent = spent
if spent:
spending_tx = spend_list[(t.txid, to.output_n)]
spending_index_n = \
[inp for inp in txs[txs.index(spending_tx)].inputs
if inp.prev_txid.hex() == t.txid and inp.output_n_int == to.output_n][0].index_n
txs[txs.index(t)].outputs[to.output_n].spending_txid = spending_tx.txid
txs[txs.index(t)].outputs[to.output_n].spending_index_n = spending_index_n
return txs
| 5,336,005
|
def count_tilings(n: int) -> int:
"""Returns the number of unique ways to tile a row of length n >= 1."""
if n < 5:
# handle recursive base case
return 2**(n - 1)
else:
# place each tile at end of row and recurse on remainder
return (count_tilings(n - 1) +
count_tilings(n - 2) +
count_tilings(n - 3) +
count_tilings(n - 4))
| 5,336,006
|
def _meters_per_pixel(zoom, lat=0.0, tilesize=256):
"""
Return the pixel resolution for a given mercator tile zoom and lattitude.
Parameters
----------
zoom: int
Mercator zoom level
lat: float, optional
Latitude in decimal degree (default: 0)
tilesize: int, optional
Mercator tile size (default: 256).
Returns
-------
Pixel resolution in meters
"""
return (math.cos(lat * math.pi / 180.0) * 2 * math.pi * 6378137) / (
tilesize * 2 ** zoom
)
| 5,336,007
|
def _generate_submit_id():
"""Generates a submit id in form of <timestamp>-##### where ##### are 5 random digits."""
timestamp = int(time())
return "%d-%05d" % (timestamp, random.randint(0, 99999))
| 5,336,008
|
def draw_from_simplex(ndim: int, nsample: int = 1) -> np.ndarray:
"""Draw uniformly from an n-dimensional simplex.
Args:
ndim: Dimensionality of simplex to draw from.
nsample: Number of samples to draw from the simplex.
Returns:
A matrix of shape (nsample, ndim) that sums to one along axis 1.
"""
if ndim < 1:
raise ValueError("Cannot generate less than 1D samples")
if nsample < 1:
raise ValueError("Generating less than one sample doesn't make sense")
rand = np.random.uniform(size=(nsample, ndim-1))
unsorted = np.concatenate(
[np.zeros(shape=(nsample,1)), rand, np.ones(shape=(nsample,1))],
axis=1
)
sorted = np.sort(unsorted, axis=1)
diff_arr = np.concatenate([[-1., 1.], np.zeros(ndim-1)])
diff_mat = np.array([np.roll(diff_arr, i) for i in range(ndim)]).T
res = sorted @ diff_mat
return res
| 5,336,009
|
def py2melProc(function, returnType='None', procName='None', evaluateInputs='True', argTypes='None'):
"""
This is a work in progress. It generates and sources a mel procedure which wraps the passed
python function. Theoretically useful for calling your python scripts in scenarios where Maya
does not yet support python callbacks.
The function is inspected in order to generate a MEL procedure which relays its
arguments on to the python function. However, Python features a very versatile argument structure whereas
MEL does not.
- python args with default values (keyword args) will be set to their MEL analogue, if it exists.
- normal python args without default values default to strings. If 'evaluteInputs' is True, string arguments passed to the
MEL wrapper proc will be evaluated as python code before being passed to your wrapped python
function. This allows you to include a typecast in the string representing your arg::
myWrapperProc( "Transform('persp')" );
- *args : not yet implemented
- **kwargs : not likely to be implemented
function
This can be a callable python object or the full, dotted path to the callable object as a string.
If passed as a python object, the object's __name__ and __module__ attribute must point to a valid module
where __name__ can be found.
If a string representing the python object is passed, it should include all packages and sub-modules, along
with the function's name: 'path.to.myFunc'
procName
Optional name of the mel procedure to be created. If None, the name of the function will be used.
evaluateInputs
If True (default), string arguments passed to the generated mel procedure will be evaluated as python code, allowing
you to pass a more complex python objects as an argument. For example:
In python:
>>> import pymel.tools.py2mel as py2mel
>>> def myFunc( arg ):
... for x in arg:
... print x
>>> py2mel.py2melProc( myFunc, procName='myFuncWrapper', evaluateInputs=True )
Then, in mel::
// execute the mel-to-python wrapper procedure
myFuncWrapper("[ 1, 2, 3]");
the string "[1,2,3]" will be converted to a python list [1,2,3] before it is executed by the python function myFunc
"""
pass
| 5,336,010
|
def test_download_tile():
""" Tests the download_tile function.
Tests the download_tile function which is supposed to return
a base64 string when the as_base64 argument is set to true.
We don't actually test the values of this function because it
scales down dynamically based on the availability of the tile
on this zoom level. It's dependent on the get_zoom_level_image
function which has this inherent property.
"""
res = ARCGIS.download_tile(1, 1, as_base64=True)
assert 'bounds' in res
assert 'coordinate_pixel' in res
assert 'zoom' in res
assert 'base64' in res
| 5,336,011
|
def manhattanDistance( xy1, xy2 ):
"""Returns the Manhattan distance between points xy1 and xy2"""
return abs( xy1[0] - xy2[0] ) + abs( xy1[1] - xy2[1] )
| 5,336,012
|
def delete_policy_command():
"""
Command to delete an existing policy in Symantec MC
:return: An entry indicating whether the deletion was successful
"""
uuid = demisto.args()['uuid']
force = demisto.args().get('force')
delete_policy_request(uuid, force)
return_outputs('Policy deleted successfully', {}, {})
| 5,336,013
|
def Linear(in_features, out_features, dropout=0.0, bias=True):
"""Weight-normalized Linear layer (input: B x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features))
m.bias.data.zero_()
return nn.utils.weight_norm(m)
| 5,336,014
|
def download_and_save_DistilBERT_model(name):
"""Download and save DistilBERT transformer model to MODELS_DIR"""
print(f"Downloading: {name}")
try:
os.makedirs(MODELS_DIR + f"{name}")
except FileExistsError as _e:
pass
model = DistilBertForQuestionAnswering.from_pretrained(f"{name}")
tokenizer = DistilBertTokenizer.from_pretrained(f"{name}")
model.save_pretrained(MODELS_DIR + f"{name}")
tokenizer.save_pretrained(MODELS_DIR + f"{name}")
return
| 5,336,015
|
def homogeneous_type(obj):
"""
Checks that the type is "homogeneous" in that all lists are of objects of the same type, etc.
"""
return same_types(obj, obj)
| 5,336,016
|
def crosscorr(f, g):
"""
Takes two vectors of the same size, subtracts the vector elements by their
respective means, and passes one over the other to construct a
cross-correlation vector
"""
N = len(f)
r = np.array([], dtype=np.single)
r1 = np.array([], dtype=np.single)
r2 = np.array([], dtype=np.single)
f = f - np.mean(f)
g = g - np.mean(g)
for i in range(N-1):
r1i = np.dot(f[N-i-1:N], g[0:i+1])
r2i = np.dot(f[0:N-i-1], g[i+1:N])
r1 = np.append(r1, r1i)
r2 = np.append(r2, r2i)
r = np.append(r, r1)
r = np.append(r, np.dot(f, g))
r = np.append(r, r2)
return r/N
| 5,336,017
|
def serialize_event(event):
"""
Serialization of Weboob ``BaseCalendarEvent`` object to schema.org
representation.
:param event: The Weboob ``BaseCalendarEvent`` object to serialize.
"""
serialized = {
'@type': 'Event',
'@context': 'http://schema.org/',
'identifier': event.id,
'about': event.summary,
'description': event.description,
}
if event.start_date:
serialized['startDate'] = event.start_date
if event.end_date:
serialized['endDate'] = event.end_date
if event.start_date and event.end_date:
serialized['duration'] = event.end_date - event.start_date
location = ''
if event.location:
location = event.location
if event.city:
location += ' %s' % (event.city)
if location:
event.location = {
'@type': 'Place',
'description': location.strip()
}
if event.event_planner:
serialized['organizer'] = {
'@type': 'Organization',
'name': event.event_planner,
}
if not empty(event.price):
serialized['isAccessibleForFree'] = (event.price == 0)
serialized['offers'] = {
'@type': 'Offer',
'price': event.price,
}
if not empty(event.status):
if event.status == 'CONFIRMED':
serialized['eventStatus'] = 'EventScheduled'
elif event.status == 'CANCELLED':
serialized['eventStatus'] = 'EventCancelled'
| 5,336,018
|
def nearest_neighbors(point_cloud_A, point_cloud_B, alg='knn'):
"""Find the nearest (Euclidean) neighbor in point_cloud_B (model) for each
point in point_cloud_A (data).
Parameters
----------
point_cloud_A: Nx3 numpy array
data points
point_cloud_B: Mx3 numpy array
model points
Returns
-------
distances: (N, ) numpy array
Euclidean distances from each point in
point_cloud_A to its nearest neighbor in point_cloud_B.
indices: (N, ) numpy array
indices in point_cloud_B of each
point_cloud_A point's nearest neighbor - these are the c_i's
"""
assert 3 == point_cloud_A.shape[1] and 3 == point_cloud_B.shape[1]
n, m = point_cloud_A.shape[0], point_cloud_B.shape[0]
assert n == m
distances = np.zeros(n)
indices = np.zeros(n)
if alg == 'knn':
nbrs = NearestNeighbors(n_neighbors=1).fit(point_cloud_B)
d, ids = nbrs.kneighbors(point_cloud_A)
distances = np.array(d).flatten()
indices = np.array(ids).flatten()
elif alg == 'hungarian':
cost = np.zeros((n, m))
for i, j in product(range(n), range(m)):
cost[i,j] = norm(point_cloud_A[i,:]- point_cloud_B[j,:])
row_ids, indices = linear_sum_assignment(cost)
distances = cost[row_ids, indices]
else:
raise NotImplementedError('NN algorithm must be one of: {}'.format(NN_ALGS))
return distances, indices
| 5,336,019
|
def ialign(images, reference=None, mask=None, fill_value=0.0, fast=True):
"""
Generator of aligned diffraction images.
Parameters
----------
images : iterable
Iterable of ndarrays of shape (N,M)
reference : `~numpy.ndarray`, shape (M,N)
Images in `images` will be aligned onto the `reference` image. If
'reference' is None (default), the first image in the 'images' stream
is used as a reference
mask : `~numpy.ndarray` or None, optional
Mask that evaluates to True on valid pixels.
fill_value : float, optional
Edges will be filled with `fill_value` after alignment.
fast : bool, optional
If True (default), alignment is done on images cropped to half
(one quarter area). Disable for small images, e.g. 256x256.
Yields
------
aligned : `~numpy.ndarray`
Aligned image. If `reference` is None, the first aligned image is the reference.
See Also
--------
skued.align : align a single diffraction pattern onto a reference.
"""
images = iter(images)
if reference is None:
reference = next(images)
yield reference
yield from map(
partial(
align, reference=reference, mask=mask, fill_value=fill_value, fast=fast
),
images,
)
| 5,336,020
|
def us_1040(form_values, year="latest"):
"""Compute US federal tax return."""
_dispatch = {
"latest": (ots_2020.us_main, data.US_1040_2020),
"2020": (ots_2020.us_main, data.US_1040_2020),
"2019": (ots_2019.us_main, data.US_1040_2019),
"2018": (ots_2018.us_main, data.US_1040_2018),
"2017": (ots_2017.us_main, data.US_1040_2017),
}
main_fn, schema = _dispatch[str(year)]
return helpers.parse_ots_return(
main_fn(helpers.generate_ots_return(form_values, schema["input_wrap"])),
schema["output_wrap"],
)
| 5,336,021
|
def resolve_service_deps(services: list) -> dict:
"""loop through services and handle needed_by"""
needed_by = {}
for name in services:
service = services.get(name)
needs = service.get_tasks_needed_by()
for need, provides in needs.items():
needed_by[need] = list(set(needed_by.get(need, []) + provides))
for name in services:
service = services.get(name)
service.update_task_requires(needed_by)
return services
| 5,336,022
|
def rolling_window(series, window_size):
"""
Transforms an array of series into an array of sliding window arrays. If
the passed in series is a matrix, each column will be transformed into an
array of sliding windows.
"""
return np.array(
[
series[i : (i + window_size)]
for i in range(0, series.shape[0] - window_size + 1)
]
)
| 5,336,023
|
def reply_published_cb(sender, user, reply, trivial, **kwargs):
"""Send e-mail when a review reply is published.
Listens to the :py:data:`~reviewboard.reviews.signals.reply_published`
signal and sends an e-mail if this type of notification is enabled (through
``mail_send_review_mail`` site configuration).
"""
siteconfig = SiteConfiguration.objects.get_current()
if siteconfig.get('mail_send_review_mail') and not trivial:
mail_reply(reply, user)
| 5,336,024
|
def ldns_key_set_inception(*args):
"""LDNS buffer."""
return _ldns.ldns_key_set_inception(*args)
| 5,336,025
|
def verifyIP(ip):
"""Verifies an IP is valid"""
try:
#Split ip and integer-ize it
octets = [int(x) for x in ip.split('.')]
except ValueError:
return False
#First verify length
if len(octets) != 4:
return False
#Then check octet values
for octet in octets:
if octet < 0 or octet > 255:
return False
return True
| 5,336,026
|
def test_fixtures1(testapp):
""" This test is not really exhaustive.
Still need to inspect the sql log to verify fixture correctness.
"""
res = testapp.get('/awards').maybe_follow()
items = res.json['@graph']
assert len(items) == 1
# Trigger an error
item = {'foo': 'bar'}
res = testapp.post_json('/awards', item, status=422)
assert res.json['errors']
res = testapp.get('/awards').maybe_follow()
items = res.json['@graph']
assert len(items) == 1
item = {
'name': 'NIS39339',
'title': 'Grant to make snow',
'project': 'ENCODE',
'rfa': 'ENCODE3',
}
testapp.post_json('/awards', item, status=201)
res = testapp.get('/awards').maybe_follow()
items = res.json['@graph']
assert len(items) == 2
# Trigger an error
item = {'foo': 'bar'}
res = testapp.post_json('/awards', item, status=422)
assert res.json['errors']
res = testapp.get('/awards').maybe_follow()
items = res.json['@graph']
assert len(items) == 2
| 5,336,027
|
def bayesdb_deregister_backend(bdb, backend):
"""Deregister `backend`, which must have been registered in `bdb`."""
name = backend.name()
assert name in bdb.backends
assert bdb.backends[name] == backend
del bdb.backends[name]
| 5,336,028
|
def get_datetime_now(t=None, fmt='%Y_%m%d_%H%M_%S'):
"""Return timestamp as a string; default: current time, format: YYYY_DDMM_hhmm_ss."""
if t is None:
t = datetime.now()
return t.strftime(fmt)
| 5,336,029
|
def is_firstline(text, medicine, disease):
"""Detect if first-line treatment is mentioned with a medicine in a sentence.
Use keyword matching to detect if the keywords "first-line treatment" or "first-or second-line treatment", medicine name, and disease name all appear in the sentence.
Parameters
----------
text : str
A single sentence.
medicine : str
A medicine's name.
Returns
-------
bool
Return True if the medicine and first-line treatment are mentioned in the sentence, False otherwise.
Examples
--------
Import the module
>>> from biomarker_nlp import biomarker_extraction
Example
>>> txt = "TECENTRIQ, in combination with carboplatin and etoposide, is indicated for the first-line treatment of adult patients with extensive-stage small cell lung cancer (ES-SCLC)."
>>> medicine = "TECENTRIQ"
>>> disease = "small cell lung cancer"
>>> biomarker_extraction.is_firstline(text = txt, medicine = medicine, disease = disease)
True
"""
text = text.lower()
medicine = medicine.lower()
disease = disease.lower()
if medicine in text and ('first-line treatment' in text or 'first-or second-line treatment' in text) and disease in text:
return True
else:
return False
| 5,336,030
|
def mac_address(addr):
""" mac_address checks that a given string is in MAC address format """
mac = addr.upper()
if not _mac_address_pattern.fullmatch(mac):
raise TypeError('{} does not match a MAC address pattern'.format(addr))
return mac
| 5,336,031
|
def savez(d,filepath):
"""
Save a sparse matrix to file in numpy binary format.
Parameters
----------
d : scipy sparse matrix
The sparse matrix to save.
filepath : str
The filepath to write to.
"""
np.savez(filepath,row=d.row,col=d.col,data=d.data,shape=d.shape)
| 5,336,032
|
def py3_classifiers():
"""Fetch the Python 3-related trove classifiers."""
url = 'https://pypi.python.org/pypi?%3Aaction=list_classifiers'
response = urllib_request.urlopen(url)
try:
try:
status = response.status
except AttributeError: #pragma: no cover
status = response.code
if status != 200: #pragma: no cover
msg = 'PyPI responded with status {0} for {1}'.format(status, url)
raise ValueError(msg)
data = response.read()
finally:
response.close()
classifiers = data.decode('utf-8').splitlines()
base_classifier = 'Programming Language :: Python :: 3'
return (classifier for classifier in classifiers
if classifier.startswith(base_classifier))
| 5,336,033
|
def match(i, j):
"""
returns (red, white) count,
where red is matches in color and position,
and white is a match in color but not position
"""
red_count = 0
# these are counts only of the items that are not exact matches
i_colors = [0]*6
j_colors = [0]*6
for i_c, j_c in zip(color_inds(i), color_inds(j)):
if i_c == j_c:
red_count += 1
else:
i_colors[i_c] += 1
j_colors[j_c] += 1
white_count = 0
for i_c, j_c in zip(i_colors, j_colors):
white_count += min(i_c, j_c)
return (red_count, white_count)
| 5,336,034
|
def main():
"""Do Something"""
form = cgi.FieldStorage()
vote = form.getfirst('vote', 'missing')
ssw("Content-type: application/json\n")
j = do(vote)
ssw("\n") # Finalize headers
ssw(json.dumps(j))
| 5,336,035
|
def time_delay_runge_kutta_4(fun, t_0, y_0, tau, history=None, steps=1000,
width=1):
"""
apply the classic Runge Kutta method to a time delay differential equation
f: t, y(t), y(t-tau) -> y'(t)
"""
width = float(width)
if not isinstance(y_0, np.ndarray):
y_0 = np.ones((1,), dtype=np.float)*y_0
dim = len(y_0)
hist_steps = np.floor(tau/width)
assert tau/width == hist_steps, "tau must be a multiple of width"
hist_steps = int(hist_steps)
if history is None:
history = np.zeros((hist_steps, dim), dtype=np.float)
else:
assert len(history) == hist_steps
fun_eval = np.zeros((steps+1+hist_steps, dim), dtype=y_0.dtype)
fun_eval[:hist_steps] = history
fun_eval[hist_steps] = y_0
for step in range(steps):
k_1 = fun(t_0, y_0, fun_eval[step])
k_2 = fun(t_0 + width/2, y_0 + width/2*k_1, fun_eval[step])
k_3 = fun(t_0 + width/2, y_0 + width/2*k_2, fun_eval[step])
k_4 = fun(t_0 + width, y_0 + width*k_3, fun_eval[step])
t_0 += width
y_0 += width*(k_1 + 2*k_2 + 2*k_3 + k_4)/6
fun_eval[step+1+hist_steps] = y_0
return fun_eval[hist_steps:]
| 5,336,036
|
def Vstagger_to_mass(V):
"""
V are the data on the top and bottom of a grid box
A simple conversion of the V stagger grid to the mass points.
Calculates the average of the top and bottom value of a grid box. Looping
over all rows reduces the staggered grid to the same dimensions as the
mass point.
Useful for converting V, XLAT_V, and XLONG_V to masspoints
Differnce between XLAT_V and XLAT is usually small, on order of 10e-5
(row_j1+row_j2)/2 = masspoint_inrow
Input:
Vgrid with size (##+1, ##)
Output:
V on mass points with size (##,##)
"""
# create the first column manually to initialize the array with correct dimensions
V_masspoint = (V[0,:]+V[1,:])/2. # average of first and second column
V_num_rows = int(V.shape[0])-1 # we want one less row than we have
# Loop through the rest of the rows
# We want the same number of rows as we have columns.
# Take the first and second row, average them, and store in first row in V_masspoint
for row in range(1,V_num_rows):
row_avg = (V[row,:]+V[row+1,:])/2.
# Stack those onto the previous for the final array
V_masspoint = np.row_stack((V_masspoint,row_avg))
return V_masspoint
| 5,336,037
|
def verify_l4_block_pow(hash_type: SupportedHashes, block: "l4_block_model.L4BlockModel", complexity: int = 8) -> bool:
"""Verify a level 4 block with proof of work scheme
Args:
hash_type: SupportedHashes enum type
block: L4BlockModel with appropriate data to verify
Returns:
Boolean if valid hashed block with appropriate nonce
"""
# Get hash for PoW calculation to compare
hash_bytes = hash_l4_block(hash_type, block, block.nonce)
# Make sure it matches complexity requirements
if not check_complexity(hash_bytes, complexity):
return False
# Check that the hash bytes match what the block provided
return hash_bytes == base64.b64decode(block.proof)
| 5,336,038
|
def test_add_theme(test_client, sample_data):
"""
GIVEN a project
WHEN add theme is called
THEN the theme should appear in the get tasks reponse for that project
"""
theme_data = {
'project_id': 1,
'title': 'test theme'
}
r = test_client.post('/add_theme', json=theme_data)
r2 = test_client.get(f'/get_tasks?project_id={1}')
r2_body = r2.json
assert r2_body['themes'][0]['title'] == 'test theme 11'
| 5,336,039
|
def Run():
"""Does coverage operations based on command line args."""
# TODO: do we want to support combining coverage for a single target
try:
parser = optparse.OptionParser(usage="usage: %prog --combine-coverage")
parser.add_option(
"-c", "--combine-coverage", dest="combine_coverage", default=False,
action="store_true", help="Combine coverage results stored given "
"android root path")
parser.add_option(
"-t", "--tidy", dest="tidy", default=False, action="store_true",
help="Run tidy on all generated html files")
options, args = parser.parse_args()
coverage = CoverageGenerator(None)
if options.combine_coverage:
coverage.CombineCoverage()
if options.tidy:
coverage.TidyOutput()
except errors.AbortError:
logger.SilentLog("Exiting due to AbortError")
| 5,336,040
|
def report_reply(report_id):
"""
Replies to an existing report. The email reply is constructed and sent
to the email address that original reported the phish.
Args:
report_id - str - The urlsafe key for the EmailReport
TODO: Make this a nice template or something
"""
report = EmailReport.get_by_id(report_id)
if not report:
return json_error(404, 'Report not found', {})
sender_address = g.user.email()
response = EmailResponse.from_dict(request.get_json())
if not response:
return json_error(400, 'Invalid JSON', {})
response.responder = sender_address
response.subject = render_template_string(response.subject, report=report)
response.content = render_template_string(response.content, report=report)
try:
response_key = response.put()
report.responses.append(response_key)
if not report.date_responded:
report.date_responded = datetime.now()
event_key = EventReportResponded(
response=response, report=report).put()
report.events.append(event_key)
report.put()
email_provider.send(
to=report.reported_by,
sender=g.user.email(),
subject=response.subject,
body=response.content)
except Exception as e:
return json_error(400, str(e), {})
return jsonify(report.to_dict())
| 5,336,041
|
def file_reader(file_name):
"""file_reader"""
data = None
with open(file_name, "r") as f:
for line in f.readlines():
data = eval(line)
f.close()
return data
| 5,336,042
|
def train(model, data, params):
""" Trains a model.
Inputs:
model (ATISModel): The model to train.
data (ATISData): The data that is used to train.
params (namespace): Training parameters.
"""
# Get the training batches.
log = Logger(os.path.join(params.logdir, params.logfile), "w")
num_train_original = atis_data.num_utterances(data.train_data)
log.put("Original number of training utterances:\t"
+ str(num_train_original))
eval_fn = evaluate_utterance_sample
trainbatch_fn = data.get_utterance_batches
trainsample_fn = data.get_random_utterances
validsample_fn = data.get_all_utterances
batch_size = params.batch_size
if params.interaction_level:
batch_size = 1
eval_fn = evaluate_interaction_sample
trainbatch_fn = data.get_interaction_batches
trainsample_fn = data.get_random_interactions
validsample_fn = data.get_all_interactions
maximum_output_length = params.train_maximum_sql_length
train_batches = trainbatch_fn(batch_size,
max_output_length=maximum_output_length,
randomize=not params.deterministic)
if params.num_train >= 0:
train_batches = train_batches[:params.num_train]
training_sample = trainsample_fn(params.train_evaluation_size,
max_output_length=maximum_output_length)
valid_examples = validsample_fn(data.valid_data,
max_output_length=maximum_output_length)
num_train_examples = sum([len(batch) for batch in train_batches])
num_steps_per_epoch = len(train_batches)
log.put(
"Actual number of used training examples:\t" +
str(num_train_examples))
log.put("(Shortened by output limit of " +
str(maximum_output_length) +
")")
log.put("Number of steps per epoch:\t" + str(num_steps_per_epoch))
log.put("Batch size:\t" + str(batch_size))
print(
"Kept " +
str(num_train_examples) +
"/" +
str(num_train_original) +
" examples")
print(
"Batch size of " +
str(batch_size) +
" gives " +
str(num_steps_per_epoch) +
" steps per epoch")
# Keeping track of things during training.
epochs = 0
patience = params.initial_patience
learning_rate_coefficient = 1.
previous_epoch_loss = float('inf')
previous_valid_acc = 0.
maximum_validation_accuracy = 0.
maximum_string_accuracy = 0.
countdown = int(patience)
if params.scheduler:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model.trainer, mode='min', )
keep_training = True
step = 0
while keep_training:
log.put("Epoch:\t" + str(epochs))
model.set_dropout(params.dropout_amount)
model.train()
if not params.scheduler:
model.set_learning_rate(learning_rate_coefficient * params.initial_learning_rate)
# Run a training step.
if params.interaction_level:
epoch_loss, step = train_epoch_with_interactions(
train_batches,
params,
model,
randomize=not params.deterministic,
db2id=data.db2id,
id2db=data.id2db,
step=step)
else:
epoch_loss = train_epoch_with_utterances(
train_batches,
model,
randomize=not params.deterministic)
log.put("train epoch loss:\t" + str(epoch_loss))
model.set_dropout(0.)
model.eval()
# Run an evaluation step on a sample of the training data.
train_eval_results = eval_fn(training_sample,
model,
params.train_maximum_sql_length,
name=os.path.join(params.logdir, "train-eval"),
write_results=True,
gold_forcing=True,
metrics=TRAIN_EVAL_METRICS)[0]
for name, value in train_eval_results.items():
log.put(
"train final gold-passing " +
name.name +
":\t" +
"%.2f" %
value)
# Run an evaluation step on the validation set.
valid_eval_results = eval_fn(valid_examples,
model,
params.eval_maximum_sql_length,
name=os.path.join(params.logdir, "valid-eval"),
write_results=True,
gold_forcing=True,
metrics=VALID_EVAL_METRICS)[0]
for name, value in valid_eval_results.items():
log.put("valid gold-passing " + name.name + ":\t" + "%.2f" % value)
valid_loss = valid_eval_results[Metrics.LOSS]
valid_token_accuracy = valid_eval_results[Metrics.TOKEN_ACCURACY]
string_accuracy = valid_eval_results[Metrics.STRING_ACCURACY]
if params.scheduler:
scheduler.step(valid_loss)
if valid_loss > previous_epoch_loss and valid_token_accuracy < previous_valid_acc and step >= params.warmup_step:
learning_rate_coefficient *= params.learning_rate_ratio
log.put(
"learning rate coefficient:\t" +
str(learning_rate_coefficient))
previous_epoch_loss = valid_loss
previous_valid_acc = valid_token_accuracy
saved = False
if not saved and string_accuracy > maximum_string_accuracy:
maximum_string_accuracy = string_accuracy
patience = patience * params.patience_ratio
countdown = int(patience)
last_save_file = os.path.join(params.logdir, "save_" + str(epochs))
model.save(last_save_file)
log.put(
"maximum string accuracy:\t" +
str(maximum_string_accuracy))
log.put("patience:\t" + str(patience))
log.put("save file:\t" + str(last_save_file))
else:
log.put("still saved")
last_save_file = os.path.join(params.logdir, "save_" + str(epochs))
model.save(last_save_file)
if countdown <= 0:
keep_training = False
countdown -= 1
log.put("countdown:\t" + str(countdown))
log.put("")
epochs += 1
log.put("Finished training!")
log.close()
return last_save_file
| 5,336,043
|
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
account = model.authenticate(username, password)
if account is None:
return AuthResponse.no_account
if not model.hasAssignedBlock(account):
return AuthResponse.no_block
return AuthResponse.success
| 5,336,044
|
def plot_energy_ratio(
reference_power_baseline,
test_power_baseline,
wind_speed_array_baseline,
wind_direction_array_baseline,
reference_power_controlled,
test_power_controlled,
wind_speed_array_controlled,
wind_direction_array_controlled,
wind_direction_bins,
confidence=95,
n_boostrap=None,
wind_direction_bin_p_overlap=None,
axarr=None,
base_color="b",
con_color="g",
label_array=None,
label_pchange=None,
plot_simple=False,
plot_ratio_scatter=False,
marker_scale=1.0,
show_count=True,
hide_controlled_case=False,
ls="--",
marker=None,
):
"""
Plot the balanced energy ratio.
Function mainly acts as a wrapper to call
calculate_balanced_energy_ratio and plot the results.
Args:
reference_power_baseline (np.array): Array of power
of reference turbine in baseline conditions.
test_power_baseline (np.array): Array of power of
test turbine in baseline conditions.
wind_speed_array_baseline (np.array): Array of wind
speeds in baseline conditions.
wind_direction_array_baseline (np.array): Array of
wind directions in baseline case.
reference_power_controlled (np.array): Array of power
of reference turbine in controlled conditions.
test_power_controlled (np.array): Array of power of
test turbine in controlled conditions.
wind_speed_array_controlled (np.array): Array of wind
speeds in controlled conditions.
wind_direction_array_controlled (np.array): Array of
wind directions in controlled case.
wind_direction_bins (np.array): Wind directions bins.
confidence (int, optional): Confidence level to use.
Defaults to 95.
n_boostrap (int, optional): Number of bootstaps, if
none, _calculate_bootstrap_iterations is called. Defaults
to None.
wind_direction_bin_p_overlap (np.array, optional):
Percentage overlap between wind direction bin. Defaults to
None.
axarr ([axes], optional): list of axes to plot to.
Defaults to None.
base_color (str, optional): Color of baseline in
plots. Defaults to 'b'.
con_color (str, optional): Color of controlled in
plots. Defaults to 'g'.
label_array ([str], optional): List of labels to
apply Defaults to None.
label_pchange ([type], optional): Label for
percentage change. Defaults to None.
plot_simple (bool, optional): Plot only the ratio, no
confidence. Defaults to False.
plot_ratio_scatter (bool, optional): Include scatter
plot of values, sized to indicate counts. Defaults to False.
marker_scale ([type], optional): Marker scale.
Defaults to 1.
show_count (bool, optional): Show the counts as scatter plot
hide_controlled_case (bool, optional): Option to hide the control case from plots, for demonstration
"""
if axarr is None:
fig, axarr = plt.subplots(3, 1, sharex=True)
if label_array is None:
label_array = ["Baseline", "Controlled"]
if label_pchange is None:
label_pchange = "Energy Gain"
(
ratio_array_base,
lower_ratio_array_base,
upper_ratio_array_base,
counts_ratio_array_base,
ratio_array_con,
lower_ratio_array_con,
upper_ratio_array_con,
counts_ratio_array_con,
diff_array,
lower_diff_array,
upper_diff_array,
counts_diff_array,
p_change_array,
lower_p_change_array,
upper_p_change_array,
counts_p_change_array,
) = calculate_balanced_energy_ratio(
reference_power_baseline,
test_power_baseline,
wind_speed_array_baseline,
wind_direction_array_baseline,
reference_power_controlled,
test_power_controlled,
wind_speed_array_controlled,
wind_direction_array_controlled,
wind_direction_bins,
confidence=95,
n_boostrap=n_boostrap,
wind_direction_bin_p_overlap=wind_direction_bin_p_overlap,
)
if plot_simple:
ax = axarr[0]
ax.plot(
wind_direction_bins,
ratio_array_base,
label=label_array[0],
color=base_color,
ls=ls,
marker=marker,
)
if not hide_controlled_case:
ax.plot(
wind_direction_bins,
ratio_array_con,
label=label_array[1],
color=con_color,
ls=ls,
marker=marker,
)
ax.axhline(1, color="k")
ax.set_ylabel("Energy Ratio (-)")
ax = axarr[1]
ax.plot(
wind_direction_bins,
diff_array,
label=label_pchange,
color=con_color,
ls=ls,
marker=marker,
)
ax.axhline(0, color="k")
ax.set_ylabel("Change in Energy Ratio (-)")
ax = axarr[2]
ax.plot(
wind_direction_bins,
p_change_array,
label=label_pchange,
color=con_color,
ls=ls,
marker=marker,
)
ax.axhline(0, color="k")
ax.set_ylabel("% Change in Energy Ratio (-)")
else:
ax = axarr[0]
ax.plot(
wind_direction_bins,
ratio_array_base,
label=label_array[0],
color=base_color,
ls="-",
marker=".",
)
ax.fill_between(
wind_direction_bins,
lower_ratio_array_base,
upper_ratio_array_base,
alpha=0.3,
color=base_color,
label="_nolegend_",
)
if show_count:
ax.scatter(
wind_direction_bins,
ratio_array_base,
s=counts_ratio_array_base * marker_scale,
label="_nolegend_",
color=base_color,
marker="o",
alpha=0.2,
)
if not hide_controlled_case:
ax.plot(
wind_direction_bins,
ratio_array_con,
label=label_array[1],
color=con_color,
ls="-",
marker=".",
)
ax.fill_between(
wind_direction_bins,
lower_ratio_array_con,
upper_ratio_array_con,
alpha=0.3,
color=con_color,
label="_nolegend_",
)
if show_count:
ax.scatter(
wind_direction_bins,
ratio_array_con,
s=counts_ratio_array_con * marker_scale,
label="_nolegend_",
color=con_color,
marker="o",
alpha=0.2,
)
ax.axhline(1, color="k")
ax.set_ylabel("Energy Ratio (-)")
ax = axarr[1]
ax.plot(
wind_direction_bins,
diff_array,
label=label_pchange,
color=con_color,
ls="-",
marker=".",
)
ax.fill_between(
wind_direction_bins,
lower_diff_array,
upper_diff_array,
alpha=0.3,
color=con_color,
label="_nolegend_",
)
if show_count:
ax.scatter(
wind_direction_bins,
diff_array,
s=counts_diff_array * marker_scale,
label="_nolegend_",
color=con_color,
marker="o",
alpha=0.2,
)
ax.axhline(0, color="k")
ax.set_ylabel("Change in Energy Ratio (-)")
ax = axarr[2]
ax.plot(
wind_direction_bins,
p_change_array,
label=label_pchange,
color=con_color,
ls="-",
marker=".",
)
ax.fill_between(
wind_direction_bins,
lower_p_change_array,
upper_p_change_array,
alpha=0.3,
color=con_color,
label="_nolegend_",
)
if show_count:
ax.scatter(
wind_direction_bins,
p_change_array,
s=counts_p_change_array * marker_scale,
label="_nolegend_",
color=con_color,
marker="o",
alpha=0.2,
)
ax.axhline(0, color="k")
ax.set_ylabel("% Change in Energy Ratio (-)")
for ax in axarr:
ax.grid(True)
ax.set_xlabel("Wind Direction (Deg)")
return diff_array
| 5,336,045
|
def dir_is_cachedir(path):
"""Determines whether the specified path is a cache directory (and
therefore should potentially be excluded from the backup) according to
the CACHEDIR.TAG protocol
(http://www.brynosaurus.com/cachedir/spec.html).
"""
tag_contents = b'Signature: 8a477f597d28d172789f06886806bc55'
tag_path = os.path.join(path, 'CACHEDIR.TAG')
try:
if os.path.exists(tag_path):
with open(tag_path, 'rb') as tag_file:
tag_data = tag_file.read(len(tag_contents))
if tag_data == tag_contents:
return True
except OSError:
pass
return False
| 5,336,046
|
def update_programmer_menu(arduino_info):
"""."""
programmer_names = arduino_info['programmers'].get('names', [])
text = '\t' * 0 + '[\n'
text += '\t' * 1 + '{\n'
text += '\t' * 2 + '"caption": "Arduino",\n'
text += '\t' * 2 + '"mnemonic": "A",\n'
text += '\t' * 2 + '"id": "arduino",\n'
text += '\t' * 2 + '"children":\n'
text += '\t' * 2 + '[\n'
text += '\t' * 3 + '{\n'
text += '\t' * 4 + '"caption": "Programmer",\n'
text += '\t' * 4 + '"id": "stino_programmer",\n'
text += '\t' * 4 + '"children":\n'
text += '\t' * 4 + '[\n'
text += '\t' * 5 + '{\n'
text += '\t' * 6 + '"caption": "Refresh",\n'
text += '\t' * 6 + '"id": "stino_refresh_programmers",\n'
text += '\t' * 6 + '"command": "stino_refresh_programmers"\n'
text += '\t' * 5 + '},\n'
text += '\t' * 5 + '{"caption": "-"}'
for programmer_name in programmer_names:
text += ',\n'
text += '\t' * 5 + '{\n'
text += '\t' * 6 + '"caption": "%s",\n' % programmer_name
text += '\t' * 6 + '"id": "stino_programmer_%s",\n' % programmer_name
text += '\t' * 6 + '"command": "stino_select_programmer",\n'
text += '\t' * 6
text += '"args": {"programmer_name": "%s"},\n' % programmer_name
text += '\t' * 6 + '"checkbox": true\n'
text += '\t' * 5 + '}'
text += '\n' + '\t' * 4 + ']\n'
text += '\t' * 3 + '}\n'
text += '\t' * 2 + ']\n'
text += '\t' * 1 + '}\n'
text += '\t' * 0 + ']\n'
write_menu('programmer', text)
| 5,336,047
|
def first_position():
"""Sets up two positions in the
Upper left
.X.Xo.
X.Xoo.
XXX...
......
Lower right
......
..oooo
.oooXX
.oXXX.
(X = black, o = white)
They do not overlap as the Positions are size_limit 9 or greater.
"""
def position_moves(s):
rest_of_row = '.'*(s-5)
first_three = rest_of_row.join([
'.X.Xo',
'X.Xoo',
'XXX..',''])
last_three = rest_of_row.join(['',
'.oooo',
'oooXX',
'oXXX.',])
board = first_three + '.'*s*(s-6) + last_three
position = go.Position(size=s)
moves_played = defaultdict()
for pt, symbol in enumerate(board):
if symbol == 'X':
position.move(move_pt=pt, colour=go.BLACK)
moves_played[pt] = go.BLACK
elif symbol == 'o':
position.move(move_pt=pt, colour=go.WHITE)
moves_played[pt] = go.WHITE
return position, moves_played
return position_moves
| 5,336,048
|
def _create_teams(
pool: pd.DataFrame,
n_iterations: int = 500,
n_teams: int = 10,
n_players: int = 10,
probcol: str = 'probs'
) -> np.ndarray:
"""Creates initial set of teams
Returns:
np.ndarray of shape
axis 0 - number of iterations
axis 1 - number of teams in league
axis 2 - number of players on team
"""
# get the teams, which are represented as 3D array
# axis 0 = number of iterations (leagues)
# axis 1 = number of teams in league
# axis 2 = number of players on team
arr = _multidimensional_shifting(
elements=pool.index.values,
num_samples=n_iterations,
sample_size=n_teams * n_players,
probs=pool[probcol]
)
return arr.reshape(n_iterations, n_teams, n_players)
| 5,336,049
|
def add_logger_filehandler(logger, filepath):
"""Adds additional file handler to the logger
Args:
logger: logger from `logging` module
filepath: output logging file
"""
# create file handler which logs even debug messages
fh = logging.FileHandler(filepath)
fh.setLevel(logger.level)
fh.setFormatter(LOGGING_FORMATTER)
logger.addHandler(fh)
| 5,336,050
|
def calculate_magnitude(data: np.ndarray) -> np.ndarray:
"""Calculates the magnitude for given (x,y,z) axes stored in numpy array"""
assert data.shape[1] == 3, f"Numpy array should have 3 axes, got {data.shape[1]}"
return np.sqrt(np.square(data).sum(axis=1))
| 5,336,051
|
def clean_str(string: str) -> str:
""" Cleans strings for SQL insertion """
return string.replace('\n', ' ').replace("'", "’")
| 5,336,052
|
def _main() -> None:
"""urlretriveの進捗表示にtqdmを利用する。
Note:
- 参考文献: tqdm – PythonとCLIの高速で拡張できるプログレスバー: `https://githubja.com/tqdm/tqdm`
"""
URL = "http://archive.ics.uci.edu/ml/machine-learning-databases/00264/EEG%20Eye%20State.arff"
with TqdmUpTo(unit="B", unit_scale=True, miniters=1, desc=URL.split("/")[-1]) as t:
request.urlretrieve(URL, filename=os.devnull, reporthook=t.update_to, data=None)
| 5,336,053
|
def zeros(shape, name=None):
"""All zeros."""
return tf.get_variable(name=name, shape=shape, dtype=tf.float32,
initializer=tf.zeros_initializer())
| 5,336,054
|
def test_julia_single_output_cpu_reducesum():
"""
Feature: custom julia operator, multiple inputs, single output, CPU, GRAPH_MODE
Description: pre-write xxx.jl, custom operator launches xxx.jl
Expectation: nn result matches numpy result
"""
system = platform.system()
if system != 'Linux':
pass
else:
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
julia_reducesum_test("reducesum.jl:ReduceSum:foo!", reducesum)
| 5,336,055
|
def test_wf_ndstate_cachelocations_updatespl(plugin, tmpdir):
"""
Two wfs with identical inputs and node state (that is set after adding the node!);
the second wf has cache_locations and should not recompute the results
"""
cache_dir1 = tmpdir.mkdir("test_wf_cache3")
cache_dir2 = tmpdir.mkdir("test_wf_cache4")
wf1 = Workflow(name="wf", input_spec=["x", "y"], cache_dir=cache_dir1)
wf1.add(
multiply(name="mult", x=wf1.lzin.x, y=wf1.lzin.y).split(splitter=("x", "y"))
)
wf1.add(add2_wait(name="add2", x=wf1.mult.lzout.out))
wf1.set_output([("out", wf1.add2.lzout.out)])
wf1.inputs.x = [2, 20]
wf1.inputs.y = [3, 4]
wf1.plugin = plugin
t0 = time.time()
with Submitter(plugin=plugin) as sub:
sub(wf1)
t1 = time.time() - t0
results1 = wf1.result()
assert results1.output.out == [8, 82]
wf2 = Workflow(
name="wf",
input_spec=["x", "y"],
cache_dir=cache_dir2,
cache_locations=cache_dir1,
)
wf2.add(multiply(name="mult", x=wf2.lzin.x, y=wf2.lzin.y))
wf2.add(add2_wait(name="add2", x=wf2.mult.lzout.out))
wf2.mult.split(splitter=("x", "y"))
wf2.set_output([("out", wf2.add2.lzout.out)])
wf2.inputs.x = [2, 20]
wf2.inputs.y = [3, 4]
wf2.plugin = plugin
t0 = time.time()
with Submitter(plugin=plugin) as sub:
sub(wf2)
t2 = time.time() - t0
results2 = wf2.result()
assert results2.output.out == [8, 82]
# for win and dask/slurm the time for dir creation etc. might take much longer
if not sys.platform.startswith("win") and plugin == "cf":
# checking the execution time
assert t1 > 2
assert t2 < 1
# checking all directories
assert wf1.output_dir.exists()
# checking if the second wf didn't run again
# checking all directories
assert not wf2.output_dir.exists()
| 5,336,056
|
def parseTemplate(bStream):
"""Parse the Template in current byte stream, it terminates when meets an object.
:param bStream: Byte stream
:return: The template.
"""
template = Template()
eof = endPos(bStream)
while True:
currPos = bStream.tell()
if currPos <eof:
desc = '{0:08b}'.format(readUSHORT(bStream))
bStream.seek(currPos, io.SEEK_SET)
if ComponentRole[desc[:3]] == OBJECT:
return template
else:
assert(int(desc[3])) # all components in Template must have label.
template._attrList.append(parseAttributeInTemplate(bStream))
else:
logger.warning("Encounter a Set without Objects")
break
| 5,336,057
|
def explode(req: str):
"""Returns the exploded dependency list for a requirements file.
As requirements files can include other requirements files with the -r directive, it can be
useful to see a flattened version of all the constraints. This method unrolls a requirement file
and produces a list of strings for each constraint line in the order of inclusion.
Args:
req: path to a requirements file.
Returns:
list of lines of requirements
"""
res = []
d = os.path.dirname(req)
with open(req) as f:
for l in f.readlines():
l = l.rstrip("\n")
l = l.lstrip(" ")
if l.startswith("-r"):
include = l.lstrip(" ").lstrip("-r").lstrip(" ")
# assuming relative includes always
res += explode(os.path.join(d, include))
elif l:
res += [l]
return res
| 5,336,058
|
def test_account_purchase_history_default_list(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
# Create 11 payments
for i in range(11):
rv = client.post('/api/v1/payment-requests', data=json.dumps(get_payment_request()), headers=headers)
invoice: Invoice = Invoice.find_by_id(rv.json.get('id'))
pay_account: PaymentAccount = PaymentAccount.find_by_id(invoice.payment_account_id)
rv = client.post(f'/api/v1/accounts/{pay_account.auth_account_id}/payments/queries',
data=json.dumps({}),
headers=headers)
assert rv.status_code == 200
# Assert the total is coming as 10 which is the value of default TRANSACTION_REPORT_DEFAULT_TOTAL
assert rv.json.get('total') == 10
| 5,336,059
|
def plot_word_wpm_distribution(word_speeds, filter_func=lambda c: True):
"""Plots a distribution over average speeds of unique words."""
df = pd.read_csv(
word_speeds, header=None, names=["word", "duration", "wpm", "timestamp"]
)
gdf = list(filter(lambda t: filter_func(t[0]), df.groupby(["word"])))
wpms = [df["wpm"].median() for word, df in gdf]
ax = sns.histplot(wpms, kde=True, stat="probability")
ax.set_title("percentage of words typed at a certain speed")
ax.set_xlabel("typing speed in wpm")
ax.set_ylabel("percentage of words")
show_diagram()
| 5,336,060
|
def run1b():
"""Run the Task2B program"""
# Initialize
stations = build_station_list() # stations: [(name, ...), (), ...]
centre = (52.2053, 0.1218)
distance_sort_list = stations_by_distance(stations, centre)
# Build dictionary that maps station to town
name_town_dict = {}
for station in stations:
name_town_dict[station.name] = station.town
# Build list of tuples (name, town, distance)
item_list = []
for item in distance_sort_list: # item: (station.name, d)
key = item[0]
new_item = (key, name_town_dict[key], item[1])
item_list.append(new_item)
# Print first and last ten items in list
closest = item_list[:10]
farthest = item_list[-10:]
print(closest)
print(farthest)
| 5,336,061
|
def draw_lidar(
pc,
color=None,
fig=None,
bgcolor=(0, 0, 0),
pts_scale=0.3,
pts_mode="sphere",
pts_color=None,
color_by_intensity=False,
pc_label=False,
pc_range=[],
):
""" Draw lidar points
Args:
pc: numpy array (n,3) of XYZ
color: numpy array (n) of intensity or whatever
fig: mayavi figure handler, if None create new one otherwise will use it
Returns:
fig: created or used fig
"""
xmin, xmax, ymin, ymax, zmin, zmax = pc_range
pts_mode = "point"
print("====================", pc.shape)
if fig is None:
fig = mlab.figure(
figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=(1600, 1000)
)
if color is None:
color = pc[:, 2]
mlab.points3d(
pc[:, 0],
pc[:, 1],
pc[:, 2],
color,
color=pts_color,
mode=pts_mode,
colormap="gnuplot",
scale_factor=pts_scale,
figure=fig,
vmax=zmax,
vmin=zmin,
)
# draw origin
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode="sphere", scale_factor=0.2)
# draw axis
axes = np.array(
[[2.0, 0.0, 0.0, 0.0], [0.0, 2.0, 0.0, 0.0], [0.0, 0.0, 2.0, 0.0]],
dtype=np.float64,
)
mlab.plot3d(
[0, axes[0, 0]],
[0, axes[0, 1]],
[0, axes[0, 2]],
color=(1, 0, 0),
tube_radius=None,
figure=fig,
)
mlab.plot3d(
[0, axes[1, 0]],
[0, axes[1, 1]],
[0, axes[1, 2]],
color=(0, 1, 0),
tube_radius=None,
figure=fig,
)
mlab.plot3d(
[0, axes[2, 0]],
[0, axes[2, 1]],
[0, axes[2, 2]],
color=(0, 0, 1),
tube_radius=None,
figure=fig,
)
# draw fov (todo: update to real sensor spec.)
a_ymin = abs(ymin)
a_ymax = abs(ymax)
fov = np.array(
[[a_ymax, a_ymax, 0.0, 0.0], [a_ymin, -a_ymin, 0.0, 0.0]], dtype=np.float64 # 45 degree
)
mlab.plot3d(
[0, fov[0, 0]],
[0, fov[0, 1]],
[0, fov[0, 2]],
color=(1, 1, 1),
tube_radius=None,
line_width=1,
figure=fig,
)
mlab.plot3d(
[0, fov[1, 0]],
[0, fov[1, 1]],
[0, fov[1, 2]],
color=(1, 1, 1),
tube_radius=None,
line_width=1,
figure=fig,
)
# draw square region
TOP_Y_MIN = ymin
TOP_Y_MAX = ymax
TOP_X_MIN = xmin
TOP_X_MAX = xmax
#TOP_Z_MIN = -2.0
#TOP_Z_MAX = 0.4
x1 = TOP_X_MIN
x2 = TOP_X_MAX
y1 = TOP_Y_MIN
y2 = TOP_Y_MAX
mlab.plot3d(
[x1, x1],
[y1, y2],
[0, 0],
color=(0.5, 0.5, 0.5),
tube_radius=0.1,
line_width=1,
figure=fig,
)
mlab.plot3d(
[x2, x2],
[y1, y2],
[0, 0],
color=(0.5, 0.5, 0.5),
tube_radius=0.1,
line_width=1,
figure=fig,
)
mlab.plot3d(
[x1, x2],
[y1, y1],
[0, 0],
color=(0.5, 0.5, 0.5),
tube_radius=0.1,
line_width=1,
figure=fig,
)
mlab.plot3d(
[x1, x2],
[y2, y2],
[0, 0],
color=(0.5, 0.5, 0.5),
tube_radius=0.1,
line_width=1,
figure=fig,
)
# mlab.orientation_axes()
mlab.view(
azimuth=180,
elevation=70,
focalpoint=[12.0909996, -1.04700089, -2.03249991],
distance=62.0,
figure=fig,
)
return fig
| 5,336,062
|
def load_circuit(filename:str):
""" Reads a MNSensitivity cicuit file (.mc) and returns a Circuit list
(format is 1D array of tuples, the first element contains a Component
object, the 2nd a SER/PAL string).
Format of the .mc file is:
* each line contains a Component object init string (See Component class
doc string to see format) after an orientation string (SER or PAL,
specifies if the component is series or parallel to ground).
* Comments can be specified by '#'
* Blank lines are skipped
* Components with earliest line number is assumed closest to source,
last line number closest to load, and progressively inbetween.
"""
circuit = []
lnum = 0
#Open file...
with open(filename) as file:
#For each line...
while True:
#Read line...
line = file.readline()
lnum += 1;
if not line:
break;
#Break into tokens...
words = line.split()
if len(words) == 0:
continue
#Skip comments
if words[0] == "#" or words[0][0] == '#':
continue
if len(words) < 5:
print(f"ERROR: Fewer than 5 words on line {lnum}.")
print(words)
return []
try:
idx = line.find(" ")
new_comp = Component(line[idx+1:])
except:
print(f"Failed to interpret component string on line {lnum}.")
return []
if words[0].upper() == "SER":
circuit.append( (new_comp, "SER") )
elif words[0].upper() == "PAL":
circuit.append( (new_comp, "PAL") )
else:
unrectok = words[0]
print(f"ERROR: Unrecognized orientation token '{unrectok}' on line {lnum}. Acceptable tokens are 'SER' and 'PAL'.")
return []
return circuit
| 5,336,063
|
def get_output_attribute(out, attribute_name, cuda_device, reduction="sum"):
"""
This function handles processing/reduction of output for both
DataParallel or non-DataParallel situations.
For the case of multiple GPUs, This function will
sum all values for a certain output attribute in various batches
together.
Parameters
---------------------
:param out: Dictionary, output of model during forward pass,
:param attribute_name: str,
:param cuda_device: list or int
:param reduction: (string, optional) reduction to apply to the output. Default: 'sum'.
"""
if isinstance(cuda_device, list):
if reduction == "sum":
return out[attribute_name].sum()
elif reduction == "mean":
return out[attribute_name].sum() / float(len(out[attribute_name]))
else:
raise ValueError("invalid reduction type argument")
else:
return out[attribute_name]
| 5,336,064
|
def get_ref_aidxs(df_fs):
"""Part of the hotfix for redundant FCGs.
I did not record the occurrence id in the graphs, which was stupid.
So now I need to use the df_fs to get the information instead.
Needs to be used with fid col, which is defined in filter_out_fcgs_ffs_all.
"""
return {k: v for k, v in zip(df_fs['fid'], df_fs['_aidxf'])}
| 5,336,065
|
def set_clear_color(color='black'):
"""Set the screen clear color
This is a wrapper for gl.glClearColor.
Parameters
----------
color : str | tuple | instance of Color
Color to use. See vispy.color.Color for options.
"""
gl.glClearColor(*Color(color).rgba)
| 5,336,066
|
def format_info(info):
""" Print info neatly """
sec_width = 64
eq = ' = '
# find key width
key_widths = []
for section, properties in info.items():
for prop_key, prop_val in properties.items():
if type(prop_val) is dict:
key_widths.append(len(max(list(prop_val.keys()), key=len)) + 4)
else:
key_widths.append(len(prop_key))
key_width = max(key_widths)
# format items
msg = []
for section, properties in info.items():
n0 = (sec_width - 2 - len(section)) // 2
n1 = n0 if n0 * 2 + 2 + len(section) == sec_width else n0 + 1
msg.append('\n' + '=' * n0 + f' {section} ' + '=' * n1)
for prop_key, prop_val in properties.items():
if type(prop_val) is dict:
msg.append((prop_key + ' ').ljust(sec_width, '_'))
for sub_key, sub_val in prop_val.items():
msg.append(' ' * 4 + sub_key.ljust(key_width - 4) +
eq + str(sub_val))
else:
msg.append(prop_key.ljust(key_width) + eq + str(prop_val))
msg.append('=' * (n0 + n1 + 2 + len(section)))
return '\n'.join(msg)
| 5,336,067
|
def server_handle_hallu_message(
msg_output, controller, mi_info, options, curr_iter):
"""
Petridish server handles the return message of a forked
process that watches over a halluciniation job.
"""
log_dir_root = logger.get_logger_dir()
q_child = controller.q_child
model_str, model_iter, _parent_iter, search_depth = msg_output
# Record performance in the main log
jr = parse_remote_stop_file(_mi_to_dn(log_dir_root, model_iter))
if jr is None:
# job failure: reap the virtual resource and move on.
logger.info('Failed mi={}'.format(model_iter))
return curr_iter
(fp, ve, te, hallu_stats, l_op_indices, l_op_omega) = (
jr['fp'], jr['ve'], jr['te'], jr['l_stats'],
jr['l_op_indices'], jr['l_op_omega']
)
logger.info(
("HALLU : mi={} val_err={} test_err={} "
"Gflops={} hallu_stats={}").format(
model_iter, ve, te, fp * 1e-9, hallu_stats))
mi_info[model_iter].ve = ve
mi_info[model_iter].fp = fp
## compute hallucination related info in net_info
net_info = net_info_from_str(model_str)
hallu_locs = net_info.contained_hallucination() # contained
hallu_indices = net_info.sorted_hallu_indices(hallu_locs)
# feature selection based on params
l_fs_ops, l_fs_omega = feature_selection_cutoff(
l_op_indices, l_op_omega, options)
separated_hallu_info = net_info.separate_hallu_info_by_cname(
hallu_locs, hallu_indices, l_fs_ops, l_fs_omega)
## Select a subset of hallucination to add to child model
l_selected = []
# sort by -cos(grad, hallu) for the indices, 0,1,2,...,n_hallu-1.
processed_stats = [process_hallu_stats_for_critic_feat([stats]) \
for stats in hallu_stats]
logger.info('processed_stats={}'.format(processed_stats))
logger.info('separated_hallu_info={}'.format(separated_hallu_info))
# greedy select with gradient boosting
l_greedy_selected = []
if options.n_greed_select_per_init:
greedy_order = sorted(
range(len(hallu_indices)),
key=lambda i : - processed_stats[i][0])
min_select = options.n_hallus_per_select
max_select = max(min_select, len(hallu_indices) // 2)
for selected_len in range(min_select, max_select + 1):
selected = greedy_order[:selected_len]
l_greedy_selected.append(selected)
n_greedy_select = len(l_greedy_selected)
if n_greedy_select > options.n_greed_select_per_init:
# random choose
l_greedy_selected = list(np.random.choice(
l_greedy_selected,
options.n_greed_select_per_init,
replace=False))
# random select a subset
l_random_selected = []
if options.n_rand_select_per_init:
# also try some random samples
l_random_selected = online_sampling(
itertools.combinations(
range(len(hallu_indices)),
options.n_hallus_per_select
),
options.n_rand_select_per_init)
np.random.shuffle(l_random_selected)
l_selected = l_greedy_selected + l_random_selected
## for each selected subset of hallu, make a model for q_child
# since more recent ones tend to be better,
# we insert in reverse order, so greedy are inserted later.
for selected in reversed(l_selected):
# new model description
child_info = copy.deepcopy(net_info)
l_hi = [ hallu_indices[s] for s in selected ]
child_info = child_info.select_hallucination(
l_hi, separated_hallu_info)
# Compute initialization stat
stat = process_hallu_stats_for_critic_feat(
[hallu_stats[s] for s in selected])
# update mi_info
curr_iter += 1
child_str = child_info.to_str()
mi_info.append(ModelSearchInfo(
curr_iter, model_iter, search_depth+1,
None, None, child_str, stat))
controller.add_one_to_queue(
q_child, mi_info, curr_iter, child_info)
return curr_iter
| 5,336,068
|
def logger(status=False, perf_time=None):
"""Show the log of the app
:param status: show status of app.
:param perf_time : show the time passed for generate files
"""
file = open("build_log.txt", "a")
if not status:
file.write("Failed " + str(datetime.datetime.now()) + "\n")
else:
file.write("Success " + str(datetime.datetime.now()) + "\n")
file.write("Generation Time: " + str(perf_time) + "\n")
file.close()
| 5,336,069
|
def pBottleneckSparse_model(inputs, train=True, norm=True, **kwargs):
"""
A pooled shallow bottleneck convolutional autoencoder model..
"""
# propagate input targets
outputs = inputs
# dropout = .5 if train else None
input_to_network = inputs['images']
shape = input_to_network.get_shape().as_list()
stride = 16
hidden_size = 2#np.ceil(shape[1]/stride)
deconv_size = 12#(shape[1]/hidden_size).astype(int)
### YOUR CODE HERE
with tf.variable_scope('conv1') as scope:
convweights = tf.get_variable(shape=[7, 7, 3, 64], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer(), name='weights')
conv = tf.nn.conv2d(input_to_network, convweights,[1, 4, 4, 1], padding='SAME')
biases = tf.get_variable(initializer=tf.constant_initializer(0),
shape=[64], dtype=tf.float32, trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
relu = tf.nn.relu(bias, name='relu')
pool = tf.nn.max_pool(value=relu, ksize=[1, 4, 4, 1], strides=[1, 4, 4, 1], padding='SAME', name='pool')
# assign layers to output
outputs['input'] = input_to_network
outputs['conv1_kernel'] = convweights
outputs['conv1'] = relu
outputs['pool1'] = pool
outputs['convweights'] = convweights
print(outputs['input'].shape)
print(outputs['conv1'].shape)
print(outputs['pool1'].shape)
with tf.variable_scope('deconv2') as scope:
deconvweights = tf.get_variable(shape=[deconv_size, deconv_size, 3, 64], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer(), name='weights')
deconvRegularizer = tf.nn.l2_loss(deconvweights)
deconv = tf.nn.conv2d_transpose(outputs['pool1'], deconvweights,
outputs['input'].shape, [1, 12, 12, 1], padding='VALID', name=None)
# assign layers to output
outputs['deconv2'] = deconv
outputs['deconvweights'] = deconvweights
### END OF YOUR CODE
for k in ['input','conv1', 'deconv2']:
assert k in outputs, '%s was not found in outputs' % k
return outputs, {}
| 5,336,070
|
def CoarseDropout(p=0, size_px=None, size_percent=None, per_channel=False, min_size=4, name=None, deterministic=False,
random_state=None, mask=None):
"""
Augmenter that sets rectangular areas within images to zero.
In contrast to Dropout, these areas can have larger sizes.
(E.g. you might end up with three large black rectangles in an image.)
Note that the current implementation leads to correlated sizes,
so when there is one large area that is dropped, there is a high likelihood
that all other dropped areas are also large.
This method is implemented by generating the dropout mask at a
lower resolution (than the image has) and then upsampling the mask
before dropping the pixels.
dtype support::
See ``imgaug.augmenters.arithmetic.MultiplyElementwise``.
Parameters
----------
p : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The probability of any pixel being dropped (i.e. set to zero).
* If a float, then that value will be used for all pixels. A value
of 1.0 would mean, that all pixels will be dropped. A value of
0.0 would lead to no pixels being dropped.
* If a tuple ``(a, b)``, then a value p will be sampled from the
range ``a <= p <= b`` per image and be used as the pixel's dropout
probability.
* If a StochasticParameter, then this parameter will be used to
determine per pixel whether it should be dropped (sampled value
of 0) or shouldn't (sampled value of 1).
size_px : int or tuple of int or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the dropout
mask in absolute pixel dimensions.
* If an integer, then that size will be used for both height and
width. E.g. a value of 3 would lead to a ``3x3`` mask, which is then
upsampled to ``HxW``, where ``H`` is the image size and W the image width.
* If a tuple ``(a, b)``, then two values ``M``, ``N`` will be sampled from the
range ``[a..b]`` and the mask will be generated at size ``MxN``, then
upsampled to ``HxW``.
* If a StochasticParameter, then this parameter will be used to
determine the sizes. It is expected to be discrete.
size_percent : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the dropout
mask *in percent* of the input image.
* If a float, then that value will be used as the percentage of the
height and width (relative to the original size). E.g. for value
p, the mask will be sampled from ``(p*H)x(p*W)`` and later upsampled
to ``HxW``.
* If a tuple ``(a, b)``, then two values ``m``, ``n`` will be sampled from the
interval ``(a, b)`` and used as the percentages, i.e the mask size
will be ``(m*H)x(n*W)``.
* If a StochasticParameter, then this parameter will be used to
sample the percentage values. It is expected to be continuous.
per_channel : bool or float, optional
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
min_size : int, optional
Minimum size of the low resolution mask, both width and height. If
`size_percent` or `size_px` leads to a lower value than this, `min_size`
will be used instead. This should never have a value of less than 2,
otherwise one may end up with a ``1x1`` low resolution mask, leading easily
to the whole image being dropped.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5)
drops 2 percent of all pixels on an lower-resolution image that has
50 percent of the original image's size, leading to dropped areas that
have roughly 2x2 pixels size.
>>> aug = iaa.CoarseDropout((0.0, 0.05), size_percent=(0.05, 0.5))
generates a dropout mask at 5 to 50 percent of image's size. In that mask,
0 to 5 percent of all pixels are dropped (random per image).
>>> aug = iaa.CoarseDropout((0.0, 0.05), size_px=(2, 16))
same as previous example, but the lower resolution image has 2 to 16 pixels
size.
>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5, per_channel=True)
drops 2 percent of all pixels at 50 percent resolution (2x2 sizes)
in a channel-wise fashion, i.e. it is unlikely
for any pixel to have all channels set to zero (black pixels).
>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5, per_channel=0.5)
same as previous example, but the `per_channel` feature is only active
for 50 percent of all images.
"""
if ia.is_single_number(p):
p2 = iap.Binomial(1 - p)
elif ia.is_iterable(p):
ia.do_assert(len(p) == 2)
ia.do_assert(p[0] < p[1])
ia.do_assert(0 <= p[0] <= 1.0)
ia.do_assert(0 <= p[1] <= 1.0)
p2 = iap.Binomial(iap.Uniform(1 - p[1], 1 - p[0]))
elif isinstance(p, iap.StochasticParameter):
p2 = p
else:
raise Exception("Expected p to be float or int or StochasticParameter, got %s." % (type(p),))
if size_px is not None:
p3 = iap.FromLowerResolution(other_param=p2, size_px=size_px, min_size=min_size)
elif size_percent is not None:
p3 = iap.FromLowerResolution(other_param=p2, size_percent=size_percent, min_size=min_size)
else:
raise Exception("Either size_px or size_percent must be set.")
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return MultiplyElementwise(p3, per_channel=per_channel, name=name, deterministic=deterministic,
random_state=random_state, mask=mask)
| 5,336,071
|
def main():
"""
The whole sett-up. Here everything is called.
"""
wn = turtle.Screen()
ourTurtle = turtle.Turtle()
size = 600
# width is 90% of screen, height is 80% of screen. Screen appears at the center of screen
wn.setup(width=0.9, height=0.9, startx=None, starty=None)
ourTurtle.shape("turtle")
function_2(size) # Function call to function_2
function_1(ourTurtle, size) # Function call to function_1
wn.exitonclick()
| 5,336,072
|
def chenneling(x):
"""
This function makes the dataset suitable for training.
Especially, gray scale image does not have channel information.
This function forces one channel to be created for gray scale images.
"""
# if grayscale image
if(len(x.shape) == 3):
C = 1
N, H, W = x.shape
x = np.asarray(x).reshape((N, H, W, C))
else: # color image
pass
x = x.transpose(0, 3, 1, 2)
x = x.astype(float)
return x
| 5,336,073
|
def _get_ordered_label_map(label_map):
"""Gets label_map as an OrderedDict instance with ids sorted."""
if not label_map:
return label_map
ordered_label_map = collections.OrderedDict()
for idx in sorted(label_map.keys()):
ordered_label_map[idx] = label_map[idx]
return ordered_label_map
| 5,336,074
|
def eight_interp(x, a0, a1, a2, a3, a4, a5, a6, a7):
"""``Approximation degree = 8``
"""
return (
a0
+ a1 * x
+ a2 * (x ** 2)
+ a3 * (x ** 3)
+ a4 * (x ** 4)
+ a5 * (x ** 5)
+ a6 * (x ** 6)
+ a7 * (x ** 7)
)
| 5,336,075
|
def create_ec2_instance(image_id, instance_type, keypair_name, user_data):
"""Provision and launch an EC2 instance
The method returns without waiting for the instance to reach
a running state.
:param image_id: ID of AMI to launch, such as 'ami-XXXX'
:param instance_type: string, such as 't2.micro'
:param keypair_name: string, name of the key pair
:return Dictionary containing information about the instance. If error,
returns None.
"""
# Provision and launch the EC2 instance
ec2_client = boto3.client('ec2')
try:
response = ec2_client.run_instances(ImageId=image_id,
InstanceType=instance_type,
KeyName=keypair_name,
MinCount=1,
MaxCount=1,
UserData=user_data,
SecurityGroups=[
'AllowSSHandOSB',
]
)
instance = response['Instances'][0]
except ClientError as e:
logging.error(e)
return None
return response['Instances'][0]
| 5,336,076
|
def get_pop(state):
"""Returns the population of the passed in state
Args:
- state: state in which to get the population
"""
abbrev = get_abbrev(state)
return int(us_areas[abbrev][1]) if abbrev != '' else -1
| 5,336,077
|
def GitHub_post(data, url, *, headers):
"""
POST the data ``data`` to GitHub.
Returns the json response from the server, or raises on error status.
"""
r = requests.post(url, headers=headers, data=json.dumps(data))
GitHub_raise_for_status(r)
return r.json()
| 5,336,078
|
def subsample(inputs, factor, scope=None):
"""Subsample the input along the spatial dimensions.
Args:
inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
scope: Optional variable_scope.
Returns:
output: A `Tensor` of size [batch, height_out, width_out, channels]
with the input, either intact (if factor == 1) or subsampled
(if factor > 1).
"""
if factor == 1:
return inputs
else:
return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
| 5,336,079
|
def password_reset(*args, **kwargs):
"""
Override view to use a custom Form
"""
kwargs['password_reset_form'] = PasswordResetFormAccounts
return password_reset_base(*args, **kwargs)
| 5,336,080
|
def update_tab_six_two(
var,
time_filter,
month,
hour,
data_filter,
filter_var,
min_val,
max_val,
normalize,
global_local,
df,
):
"""Update the contents of tab size. Passing in the info from the dropdown and the general info."""
df = pd.read_json(df, orient="split")
time_filter_info = [time_filter, month, hour]
data_filter_info = [data_filter, filter_var, min_val, max_val]
heat_map = custom_heatmap(df, global_local, var, time_filter_info, data_filter_info)
no_display = {"display": "none"}
if data_filter:
return (
heat_map,
{},
barchart(df, var, time_filter_info, data_filter_info, normalize),
{},
)
return heat_map, no_display, {"data": [], "layout": {}, "frames": []}, no_display
| 5,336,081
|
async def blog_api(request: Request, year: int, month: int, day: int,
title: str) -> json:
"""Handle blog."""
blog_date = {"year": year, "month": month, "day": day}
req_blog = app.blog.get(xxh64(unquote(title)).hexdigest())
if req_blog:
if all(
map(lambda x: req_blog["date"][x] == blog_date[x],
req_blog["date"])):
return json(
{
"message": f"Hope you enjoy \"{unquote(title)}\"",
"status": request.headers,
"error": None,
"results": req_blog
},
status = 200)
else:
return redirect(f"/{req_blog['blog_path']}")
else:
raise BlogNotFound(f"Blog \"{unquote(title)}\" Not Found!")
| 5,336,082
|
def coherence_score_umass(X, inv_vocabulary, top_words, normalized=False):
"""
Extrinsic UMass coherence measure
Parameter
----------
X : array-like, shape=(n_samples, n_features)
Document word matrix.
inv_vocabulary: dict
Dictionary of index and vocabulary from vectorizer.
top_words: list
List of top words for each topic-sentiment pair
normalized: bool
If true, return to NPMI
Returns
-----------
score: float
"""
wordoccurances = (X > 0).astype(int)
N = X.shape[0]
totalcnt = 0
PMI = 0
NPMI = 0
for allwords in top_words:
for word1 in allwords:
for word2 in allwords:
if word1 != word2:
ind1 = inv_vocabulary[word1]
ind2 = inv_vocabulary[word2]
if ind1 > ind2:
denominator = (np.count_nonzero(wordoccurances > 0, axis=0)[
ind1]/N) * (np.count_nonzero(wordoccurances > 0, axis=0)[ind2]/N)
numerator = (
(np.matmul(wordoccurances[:, ind1], wordoccurances[:, ind2])) + 1) / N
PMI += np.log(numerator) - np.log(denominator)
NPMI += (np.log(denominator) / np.log(numerator)) - 1
totalcnt += 1
if normalized:
score = NPMI / totalcnt
else:
score = PMI / totalcnt
return score
| 5,336,083
|
def quiz_f(teq):
"""
Function to fill in the blank quiz
Params:
teq: dict
"""
# Init LED happy image
display.show(Image.HAPPY)
# This is an advanced topic as well however this little function
# cleans out the unnecessary global objects or variables on what
# we call the heap area in memory
gc.collect()
# Init score object
score = 0
# Here we iterate through our quiz database
for key in teq:
print(key)
say(str(key), speed=SPEED)
response = input('ANSWER: ')
response = response.lower()
correct_answer = teq[key].lower()
if response == correct_answer:
display.show(Image.SURPRISED)
print('CORRECT!')
say('CORRECT!', speed=SPEED)
display.show(Image.HAPPY)
score += 1
else:
display.show(Image.SURPRISED)
print('The correct answer is {0}.'.format(teq[key]))
say('The correct answer is', speed=SPEED)
say(str(teq[key]), speed=SPEED)
display.show(Image.HAPPY)
time.sleep(1)
gc.collect()
# Here we reply to the student their score
display.show(Image.SURPRISED)
print('You got {0} out of {1} correct!'.format(score, len(teq)))
say('You got', speed=SPEED)
say(str(score), speed=SPEED)
say('out of', speed=SPEED)
say(str(len(teq)), speed=SPEED)
say('correct!', speed=SPEED)
# If student got a perfect score respond appropriately
# or provide an encouring message to retry the quiz
if score == len(teq):
print('You got a perfect score!')
say('You got a perfect score!', speed=SPEED)
print('Well done!')
say('Well done!, ', speed=SPEED)
print('I am so proud of you!')
say('I am so proud of you!', speed=SPEED)
play(POWER_UP)
else:
print('You are doing a great job!')
say('You are doing a great job!', speed=SPEED)
print('I would LOVE for you to try again!')
say('I would LOVE for you to try again!', speed=SPEED)
# Display happy response at the end of the quiz
display.show(Image.HAPPY)
gc.collect()
| 5,336,084
|
def _splitaddr(addr):
"""
splits address into character and decimal
:param addr:
:return:
"""
col='';rown=0
for i in range(len(addr)):
if addr[i].isdigit():
col = addr[:i]
rown = int(addr[i:])
break
elif i==len(addr)-1:
col=addr
return col,rown
| 5,336,085
|
def checksum(data):
"""
:return: int
"""
assert isinstance(data, bytes)
assert len(data) >= MINIMUM_MESSAGE_SIZE - 2
assert len(data) <= MAXIMUM_MESSAGE_SIZE - 2
__checksum = 0
for data_byte in data:
__checksum += data_byte
__checksum = -(__checksum % 256) + 256
try:
__checksum = bytes([__checksum])
except ValueError:
__checksum = bytes([0])
return __checksum
| 5,336,086
|
def as_character(
x,
str_dtype=str,
_na=np.nan,
):
"""Convert an object or elements of an iterable into string
Aliases `as_str` and `as_string`
Args:
x: The object
str_dtype: The string dtype to convert to
_na: How NAs should be casted. Specify np.nan will keep them unchanged.
But the dtype will be object then.
Returns:
When x is an array or a series, return x.astype(str).
When x is iterable, convert elements of it into strings
Otherwise, convert x to string.
"""
return _as_type(x, str_dtype, na=_na)
| 5,336,087
|
def test_logger(request: HttpRequest) -> HttpResponse:
"""
Generate a log to test logging setup.
Use a GET parameter to specify level, default to INFO if absent. Value can be INFO, WARNING, ERROR,
EXCEPTION, UNCATCHED_EXCEPTION.
Use a GET parameter to specify message, default to "Test logger"
Example: test_logger?level=INFO&message=Test1
:param request: HttpRequest request
:return: HttpResponse web response
"""
message = request.GET.get("message", "Test logger")
level = request.GET.get("level", "INFO")
if level not in ("INFO", "WARNING", "ERROR", "EXCEPTION", "UNCATCHED_EXCEPTION"):
level = "INFO"
if level == "INFO":
logger.info(message)
elif level == "WARNING":
logger.warning(message)
elif level == "ERROR":
logger.error(message)
elif level == "EXCEPTION":
try:
raise Exception(message)
except Exception:
logger.exception("test_logger")
else:
assert level == "UNCATCHED_EXCEPTION", "should never happen"
raise Exception(message)
return HttpResponse("ok")
| 5,336,088
|
def cvm_informes (year: int, mth: int) -> pd.DataFrame:
"""Downloads the daily report (informe diario) from CVM for a given month and year\n
<b>Parameters:</b>\n
year (int): The year of the report the function should download\n
mth (int): The month of the report the function should download\n
<b>Returns:</b>\n
pd.DataFrame: Pandas dataframe with the report for the given month and year. If the year is previous to 2017, will contain data regarding the whole year
"""
if int(year) >= 2017: #uses download process from reports after the year of 2017
try:
mth = f"{mth:02d}"
year = str(year)
#creates url using the parameters provided to the function
url = 'http://dados.cvm.gov.br/dados/FI/DOC/INF_DIARIO/DADOS/inf_diario_fi_'+year+mth+'.csv'
#reads the csv returned by the link
cotas = pd.read_csv(url, sep =';')
cotas['DT_COMPTC'] = pd.to_datetime(cotas['DT_COMPTC']) #casts date column to datetime
try:
#removes column present in only a few reports to avoid inconsistency when making the union of reports
cotas.drop(columns = ['TP_FUNDO'], inplace = True)
except KeyError:
pass
return cotas
except HTTPError:
print('theres no report for this date yet!.\n')
if int(year) < 2017:
try:
year = str(year)
url = 'http://dados.cvm.gov.br/dados/FI/DOC/INF_DIARIO/DADOS/HIST/inf_diario_fi_' + year + '.zip'
#sends request to the url
r = requests.get(url, stream=True, allow_redirects=True)
with open('informe' + year + '.zip', 'wb') as fd: #writes the .zip file downloaded
fd.write(r.content)
zip_inf = zipfile.ZipFile('informe' + year + '.zip') #opens the .zip file
#le os arquivos csv dentro do arquivo zip
informes = [pd.read_csv(zip_inf.open(f), sep=";") for f in zip_inf.namelist()]
cotas = pd.concat(informes,ignore_index=True)
cotas['DT_COMPTC'] = pd.to_datetime(cotas['DT_COMPTC']) #casts date column to datetime
zip_inf.close() #fecha o arquivo zip
os.remove('informe' + year + '.zip') #deletes .zip file
return cotas
except Exception as E:
print(E)
| 5,336,089
|
def remoteness(N):
"""
Compute the remoteness of N.
Parameters
----------
N : Nimber
The nimber of interest.
Returns
-------
remote : int
The remoteness of N.
"""
if N.n == 0:
return 0
remotes = {remoteness(n) for n in N.left}
if all(remote % 2 == 1 for remote in remotes):
return 1 + max(remotes)
else:
return 1 + min(remote for remote in remotes if remote % 2 == 0)
| 5,336,090
|
def breakfast_analysis_variability(in_path,identifier, date_col, time_col, min_log_num=2, min_separation=4, plot=True):
"""
Description:\n
This function calculates the variability of loggings in good logging day by subtracting 5%,10%,25%,50%,75%,90%,95% quantile of breakfast time from the 50% breakfast time. It can also make a histogram that represents the 90%-10% interval for all subjects.\n
Input:\n
- in_path (str, pandas df): input path, file in pickle, csv or panda dataframe format.
- identitfier(str) : participants' unique identifier such as id, name, etc.
- date_col(str) : the column that represents the dates.
- time_col(str) : the column that represents the float time.
- min_log_num (count,int): filtration criteria on the minimum number of loggings each day.
- min_seperation(hours,int): filtration criteria on the minimum separations between the earliest and latest loggings each day.
- plot(bool) : Whether generating a histogram for breakfast variability. Default = True.
Return:\n
- A dataframe that contains 5%,10%,25%,50%,75%,90%,95% quantile of breakfast time minus 50% time for each subjects from the in_path file.\n
Requirements:\n
in_path file must have the following columns:\n
- unique_code\n
- date\n
- local_time\n
"""
df = universal_key(in_path)
# leave only the loggings in a good logging day
df['in_good_logging_day'] = in_good_logging_day(df, identifier, time_col, min_log_num, min_separation)
df = df[df['in_good_logging_day']==True]
breakfast_series = df.groupby(['unique_code', 'date'])['local_time'].min().groupby('unique_code').quantile([0.05, 0.10, 0.25, 0.5, 0.75, 0.90, 0.95])
breakfast_df = pd.DataFrame(breakfast_series)
all_rows = []
for index in breakfast_df.index:
tmp_dict = dict(breakfast_series[index[0]])
tmp_dict['id'] = index[0]
all_rows.append(tmp_dict)
breakfast_summary_df = pd.DataFrame(all_rows, columns = ['id', 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95])\
.rename(columns = {0.05: '5%', 0.1: '10%', 0.25: '25%', 0.5: '50%', 0.75: '75%', 0.9: '90%', 0.95: '95%'})\
.drop_duplicates().reset_index(drop = True)
breakfast_variability_df = breakfast_summary_df.copy()
for col in breakfast_variability_df.columns:
if col == 'id' or col == '50%':
continue
breakfast_variability_df[col] = breakfast_variability_df[col] - breakfast_variability_df['50%']
breakfast_variability_df['50%'] = breakfast_variability_df['50%'] - breakfast_variability_df['50%']
if plot == True:
fig, ax = plt.subplots(1, 1, figsize = (10, 10), dpi=80)
sns_plot = sns.distplot( breakfast_variability_df['90%'] - breakfast_variability_df['10%'] )
ax.set(xlabel='Variation Distribution for Breakfast (90% - 10%)', ylabel='Kernel Density Estimation')
return breakfast_variability_df
| 5,336,091
|
def _sdss_wcs_to_log_wcs(old_wcs):
"""
The WCS in the SDSS files does not appear to follow the WCS standard - it
claims to be linear, but is logarithmic in base-10.
The wavelength is given by:
λ = 10^(w0 + w1 * i)
with i being the pixel index starting from 0.
The FITS standard uses a natural log with a sightly different formulation,
see WCS Paper 3 (which discusses spectral WCS).
This function does the conversion from the SDSS WCS to FITS WCS.
"""
w0 = old_wcs.wcs.crval[0]
w1 = old_wcs.wcs.cd[0,0]
crval = 10 ** w0
cdelt = crval * w1 * np.log(10)
cunit = old_wcs.wcs.cunit[0] or Unit('Angstrom')
ctype = "WAVE-LOG"
w = WCS(naxis=1)
w.wcs.crval[0] = crval
w.wcs.cdelt[0] = cdelt
w.wcs.ctype[0] = ctype
w.wcs.cunit[0] = cunit
w.wcs.set()
return w
| 5,336,092
|
def request_records(request):
"""show the datacap request records"""
address = request.POST.get('address')
page_index = request.POST.get('page_index', '1')
page_size = request.POST.get('page_size', '5')
page_size = interface.handle_page(page_size, 5)
page_index = interface.handle_page(page_index, 1)
msg_code, msg_data = interface.request_record(address=address)
obj = Page(msg_data, page_size).page(page_index)
data_list = []
for i in obj.get('objects'):
msg_cid = i.msg_cid
assignee = i.assignee
comments_url = i.comments_url
data_list.append({
'assignee': assignee,
'created_at': i.created_at.strftime('%Y-%m-%d %H:%M:%S') if i.created_at else i.created_at,
'region': i.region,
'request_datacap': i.request_datacap,
'status': i.status,
'allocated_datacap': i.allocated_datacap,
'msg_cid': msg_cid,
'url': interface.get_req_url(i.comments_url),
'height': get_height(msg_cid),
'name': i.name,
'media': i.media,
'github_url': get_github_url(comments_url),
'issue_id': get_api_issue_id(comments_url),
'notary': get_notary_by_github_account(assignee),
})
return format_return(0, data={"objs": data_list, "total_page": obj.get('total_page'),
"total_count": obj.get('total_count')})
| 5,336,093
|
def custom_db(app, CustomMetadata):
"""Database fixture."""
InvenioDB(app)
if not database_exists(str(db_.engine.url)):
create_database(str(db_.engine.url))
db_.create_all()
yield db_
db_.session.remove()
db_.drop_all()
| 5,336,094
|
def extendCorrespondingAtomsDictionary(names, str1, str2):
"""
extends the pairs based on list1 & list2
"""
list1 = str1.split()
list2 = str2.split()
for i in range(1, len(list1)):
names[list1[0]][list2[0]].append([list1[i], list2[i]])
names[list2[0]][list1[0]].append([list2[i], list1[i]])
return None
| 5,336,095
|
def _device_name(data):
"""Return name of device tracker."""
if ATTR_BEACON_ID in data:
return "{}_{}".format(BEACON_DEV_PREFIX, data['name'])
return data['device']
| 5,336,096
|
def get_share_path(
storage_server: StorageServer, storage_index: bytes, sharenum: int
) -> FilePath:
"""
Get the path to the given storage server's storage for the given share.
"""
return (
FilePath(storage_server.sharedir)
.preauthChild(storage_index_to_dir(storage_index))
.child("{}".format(sharenum))
)
| 5,336,097
|
def focal_loss_with_prob(prob,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
"""A variant of Focal Loss used in TOOD."""
target_one_hot = prob.new_zeros(len(prob), len(prob[0]) + 1)
target_one_hot = target_one_hot.scatter_(1, target.unsqueeze(1), 1)[:, :-1]
flatten_alpha = torch.empty_like(prob).fill_(1 - alpha)
flatten_alpha[target_one_hot == 1] = alpha
pt = torch.where(target_one_hot == 1, prob, 1 - prob)
ce_loss = F.binary_cross_entropy(prob, target_one_hot, reduction='none')
loss = flatten_alpha * torch.pow(1 - pt, gamma) * ce_loss
if weight is not None:
weight = weight.reshape(-1, 1)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
| 5,336,098
|
def root_key_from_seed(seed):
"""This derives your master key the given seed.
Implemented in ripple-lib as ``Seed.prototype.get_key``, and further
is described here:
https://ripple.com/wiki/Account_Family#Root_Key_.28GenerateRootDeterministicKey.29
"""
seq = 0
while True:
private_gen = from_bytes(first_half_of_sha512(
b''.join([seed, to_bytes(seq, 4)])))
seq += 1
if curves.SECP256k1.order >= private_gen:
break
public_gen = curves.SECP256k1.generator * private_gen
# Now that we have the private and public generators, we apparently
# have to calculate a secret from them that can be used as a ECDSA
# signing key.
secret = i = 0
public_gen_compressed = ecc_point_to_bytes_compressed(public_gen)
while True:
secret = from_bytes(first_half_of_sha512(
b"".join([
public_gen_compressed, to_bytes(0, 4), to_bytes(i, 4)])))
i += 1
if curves.SECP256k1.order >= secret:
break
secret = (secret + private_gen) % curves.SECP256k1.order
# The ECDSA signing key object will, given this secret, then expose
# the actual private and public key we are supposed to work with.
key = SigningKey.from_secret_exponent(secret, curves.SECP256k1)
# Attach the generators as supplemental data
key.private_gen = private_gen
key.public_gen = public_gen
return key
| 5,336,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.