content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import logging
import urllib
import re
def proxyFromPacFiles(pacURLs=None, URL=None, log=True):
"""Attempts to locate and setup a valid proxy server from pac file URLs
:Parameters:
- pacURLs : list
List of locations (URLs) to look for a pac file. This might
come from :func:`~psychopy.web.getPacFiles` or
:func:`~psychopy.web.getWpadFiles`.
- URL : string
The URL to use when testing the potential proxies within the files
:Returns:
- A urllib.request.ProxyHandler if successful (and this will have
been added as an opener to the urllib)
- False if no proxy was found in the files that allowed successful
connection
"""
if pacURLs == None: # if given none try to find some
pacURLs = getPacFiles()
if pacURLs == []: # if still empty search for wpad files
pacURLs = getWpadFiles()
# for each file search for valid urls and test them as proxies
for thisPacURL in pacURLs:
if log:
msg = 'proxyFromPacFiles is searching file:\n %s'
logging.debug(msg % thisPacURL)
try:
response = urllib.request.urlopen(thisPacURL, timeout=2)
except urllib.error.URLError:
if log:
logging.debug("Failed to find PAC URL '%s' " % thisPacURL)
continue
pacStr = response.read().decode('utf-8')
# find the candidate PROXY strings (valid URLS), numeric and
# non-numeric:
pattern = r"PROXY\s([^\s;,:]+:[0-9]{1,5})[^0-9]"
possProxies = re.findall(pattern, pacStr + '\n')
for thisPoss in possProxies:
proxUrl = 'http://' + thisPoss
handler = urllib.request.ProxyHandler({'http': proxUrl})
if tryProxy(handler) == True:
if log:
logging.debug('successfully loaded: %s' % proxUrl)
opener = urllib.request.build_opener(handler)
urllib.request.install_opener(opener)
return handler
return False
|
90d61afcd7473c43e257bc92cb205b1870273845
| 3,642,500
|
def subplots(times,nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True,
gridspec_kw=None, **fig_kw):
""" create figure and subplot axes with same time (x) axis
Non-Market hours will not be included in the plot.
Notably a custom projection is used for the time axis, and the time values
are expected to be the same in all subplots. The Axes that are returned
are not of the usual matplotlib Axes format, but of our custom TSeriesAxes
type. The usual Axes methods are avaialable, in addtion to some methods
specific to TSeriesAxes.
Args:
- times: the time series for all subplots. Pandas DatetimeIndex object
expected.
- following arguments follow matplotlib pyplot.subplots format
Returns:
- (fig, axes) tuple:
* fig: matplotlib.figure.Figure object
* axes: array of systrade.plotting.utils.TSeriesAxes objects
"""
fig, axes = plt.subplots(nrows, ncols,
sharex=sharex,
sharey=sharey,
squeeze=squeeze,
gridspec_kw=gridspec_kw,
subplot_kw=dict(projection='time_series'),
**fig_kw)
if nrows*ncols>1:
for ax in axes:
ax.set_xaxis_markettime(times)
else:
axes_arr=np.empty((1,),dtype=utils.TSeriesAxes)
axes_arr[0] = axes
axes_arr[0].set_xaxis_markettime(times)
axes = axes_arr
return fig,axes
|
dd10ae6939587cac36e9b4bff67e2426357c6635
| 3,642,501
|
def test_env(testenv, agent, config):
"""
Test of a GYM environment with an agent deciding on actions based
on environment state. The test is repeated for the indicated
iterations. Status of environment in each step is displayed if
verbose activated. Test results (frequency count) and status
history are returned.
Parameters
----------
env : str
Name of the GYM environment to test.
envconfig : dict
Configuration of the GYM environment.
agent
Trained agent that will decide over actions.
iterations : int
Number of times the environment test will be repeated.
verbose : bool
Wheter status of environment in each step is displayed in output.
Returns
-------
history : dict(str, str, dict)
Status history of the environment during test, for each iteration
and step.
"""
iterations = config['iter']
verbose = config['verbose']
init_freqs = config['init_freqs']
numfreqs = len(init_freqs)
freqcount = 0
history = {}
# TEST ITERATIONS
for i in range(iterations):
# INITIAL STATUS
if numfreqs != 0:
state = testenv.reset( init_freqs[freqcount] )
freqcount += 1
freqcount %= numfreqs
else:
state = testenv.reset()
status = testenv._info.copy()
history[f"Iteration {i + 1}"] = {}
history[f"Iteration {i + 1}"][0] = status
if verbose:
print(f"Iteration {i + 1}")
print("---------------")
display_status("Step 0", status)
# STEPS TO GOAL (OR MAXSTEPS)
for s in range(testenv.MAXSTEPS):
action = agent.compute_action(state)
state, _, done, info = testenv.step(action)
status = info.copy()
history[f"Iteration {i + 1}"][s + 1] = status
if verbose:
display_status(f"Step {s + 1}", status)
if done:
break
return history
|
7a32e942f22f983e6377568be6e2019a72574eac
| 3,642,502
|
def kldivergence(p, q):
"""Kullback-Leibler divergence D(P || Q) for discrete distributions
Parameters
----------
p, q : array-like, dtype=float, shape=n
Discrete probability distributions.
"""
p = np.asarray(p, dtype=np.float)
q = np.asarray(q, dtype=np.float)
return np.sum(np.where(p != 0, p * np.log(p / q), 0))
|
0c0bff8e1f4d9c5ca57b555cffe5d827350106b5
| 3,642,503
|
import os
def read_file(file_dir: str, filename: str) -> bytes:
""" Read file contents to bytes """
with open(os.path.join(file_dir, filename), "rb") as f:
data = f.read()
return data
|
b0e8207a25d6dd85fcd4701696d40146282a6095
| 3,642,504
|
def sortPermutations(perms, index_return=False):
"""
Sort perms with respect (1) to their length and (2) their lexical order
@param perms:
"""
ans = [None] * len(perms)
indices = np.ndarray(len(perms), dtype="int")
ix = 0
for n in np.sort(np.unique([len(key) for key in perms])):
# load subset of perms with length n
nperms = {}
for i, perm in enumerate(perms):
if len(perm) == n:
tperm = tuple(perm)
nperms[tperm] = i
for perm in sorted(nperms.keys()):
ans[ix] = perm
indices[ix] = nperms[perm]
ix += 1
if index_return:
return ans, indices
else:
return ans
|
d20833936756f3617e394fdb62f45e798c5cc416
| 3,642,505
|
def get_laplacian(Dx,Dy):
"""
return the laplacian
"""
[H,W] = Dx.shape
Dxx, Dyy = np.zeros((H,W)), np.zeros((H,W))
j,k = np.atleast_2d(np.arange(0,H-1)).T, np.arange(0,W-1)
Dxx[j,k+1] = Dx[j,k+1] - Dx[j,k]
Dyy[j+1,k] = Dy[j+1,k] - Dy[j,k]
return Dxx+Dyy
|
77dce6adecdf1effd4922f18dc0ec1d20f4e69f4
| 3,642,506
|
def recipes_ending(language: StrictStr, ending: StrictStr):
"""
Show the recipe for a word-ending.
Given an input language and an ending, present the user with
the recipe that will be used to build grammatical cases
for that specific ending.
And this path operation will:
* returns a single recipe for the ending specific in the path
"""
try:
recipes = Recipes(language=language.lower())
recipes.load()
if ending not in recipes._dict.keys():
raise HTTPException(
status_code=HTTP_404_NOT_FOUND, detail="Ending not found"
)
return {"language": language, "ending": ending, "recipe": recipes._dict[ending]}
except LanguageNotFoundError:
raise HTTPException(
status_code=HTTP_404_NOT_FOUND, detail=f"Language: {language} not found."
)
|
5584f4005d76a0ac13a73cfffd35b7e1f5d5d538
| 3,642,507
|
def blur(grid, blurring):
"""
Spreads probability out on a grid using a 3x3 blurring window.
The blurring parameter controls how much of a belief spills out
into adjacent cells. If blurring is 0 this function will have
no effect.
"""
height = len(grid)
width = len(grid[0])
center_prob = 1.0-blurring
corner_prob = blurring / 12.0
adjacent_prob = blurring / 6.0
window = [
[corner_prob, adjacent_prob, corner_prob],
[adjacent_prob, center_prob, adjacent_prob],
[corner_prob, adjacent_prob, corner_prob]
]
new = [[0.0 for i in range(width)] for j in range(height)]
for i in range(height):
for j in range(width):
grid_val = grid[i][j]
for dx in range(-1,2):
for dy in range(-1,2):
mult = window[dx+1][dy+1]
new_i = (i + dy) % height
new_j = (j + dx) % width
new[new_i][new_j] += mult * grid_val
return normalize(new)
|
dec94ad3d2f14d4ac12b6799d8ee581afe73f53d
| 3,642,508
|
def _row_adress(addr='1'):
"""returns the rown number for a column adress"""
return _cell_address(''.join(['A', addr]))[1]
|
eba4cc5a30a539b6bf021279698070c4ae10ee20
| 3,642,509
|
def split(x, num, axis):
"""
Splits a tensor into a list of tensors.
:param x: [Tensor] A TensorFlow tensor object to be split.
:param num: [int] Number of splits.
:param axis: [int] Axis along which to be split.
:return: [list] A list of TensorFlow tensor objects.
"""
if tf.__version__.startswith('0'): # 0.12 compatibility.
return tf.split(axis, num, x)
else:
return tf.split(x, num, axis)
|
6461e346261cc01e5b2ddc0bee164b49ea03035b
| 3,642,510
|
def delete_sensor_values(request):
"""Delete values from a sensor
"""
params = request.GET
action = params.get('action')
sensor_id = params.get('sensor')
delete_where = params.get('delete_where')
where_value = params.get('value')
where_start_date = params.get('start_date')
where_end_date = params.get('end_date')
db = bmsdata.BMSdata()
# qs = models.Sensor.objects.filter(sensor_id=params['sensor_from'])[0]
where_clause = ''
if delete_where == 'all_values':
pass
elif delete_where == 'value_equals':
where_clause = f'WHERE val = {where_value}'
elif delete_where == 'values_gt':
where_clause = f'WHERE val > {where_value}'
elif delete_where == 'values_lt':
where_clause = f'WHERE val < {where_value}'
elif delete_where == 'dates_between':
where_clause = f'WHERE ts > {bmsapp.data_util.datestr_to_ts(where_start_date)} and ts < {bmsapp.data_util.datestr_to_ts(where_end_date)}'
else:
return HttpResponse(f'Invalid parameter: {delete_where}', status=406)
if action == 'query':
try:
db.cursor.execute(
f'SELECT COUNT(*) FROM [{sensor_id}] {where_clause}')
rec_ct = db.cursor.fetchone()[0]
except Exception as e:
return HttpResponse(e, status=500)
if rec_ct == 0:
return HttpResponse('No records found that meet the criteria!', status=406)
else:
return HttpResponse(f'Do you really want to delete {rec_ct:,} records from {sensor_id}?')
else:
try:
db.cursor.execute(
f'DELETE FROM [{sensor_id}] {where_clause}')
if delete_where == 'all_values':
db.cursor.execute(f'DROP TABLE [{sensor_id}]')
qs = models.Sensor.objects.filter(
sensor_id=sensor_id)
if len(qs) > 0:
qs[0].delete()
db.conn.commit()
except Exception as e:
return HttpResponse(repr(e), status=500)
return HttpResponse('Records Deleted')
|
049119e50ea2613c8f8c2ccaa674fd99a35c07e8
| 3,642,511
|
import collections
def _parse_voc_xml(node):
"""
Extracted from torchvision
"""
voc_dict = {}
children = list(node)
if children:
def_dic = collections.defaultdict(list)
for dc in map(_parse_voc_xml, children):
for ind, v in dc.items():
def_dic[ind].append(v)
if node.tag == 'annotation':
def_dic['object'] = [def_dic['object']]
voc_dict = {
node.tag:
{ind: v[0] if len(v) == 1 else v
for ind, v in def_dic.items()}
}
if node.text:
text = node.text.strip()
if not children:
voc_dict[node.tag] = text
return voc_dict
|
58ef998cdf36ce4620042736ff27fc06d4c277a6
| 3,642,512
|
from ostap.math.integral import integral as _integral
def sp_integrate_3Dy ( func ,
x , z ,
ymin , ymax , *args , **kwargs ) :
"""Make 1D numerical integration over y-axis
>>> func = ... ## func ( x , y , z )
## x , z , ymin , ymax
>>> print func.sp_integrate_y ( 0.5 , 0.1 , -20 , 20 )
"""
def _func_ ( p , *args ) :
return func ( x , p , z , *args )
return _integral ( _func_ ,
ymin , ymax ,
*args , **kwargs )
|
8e641911a7cbdf215cb5a0e8c12d122a14c83638
| 3,642,513
|
def log_exp_sum_1d(x):
"""
This computes log(exp(x_1) + exp(x_2) + ... + exp(x_n)) as
x* + log(exp(x_1-x*) + exp(x_2-x*) + ... + exp(x_n-x*)), where x* is the
max over all x_i. This can avoid numerical problems.
"""
x_max = x.max()
if isinstance(x, gnp.garray):
return x_max + gnp.log(gnp.exp(x - x_max).sum())
else:
return x_max + np.log(np.exp(x - x_max).sum())
|
4e0fcf4831e052e1704394e783bd0a76157a123f
| 3,642,514
|
def connect_intense_cells(int_cells, conv_buffer):
"""Merge nearby intense cells if they are within a given
convective region search radius.
Parameters
----------
int_cells: (N, M) ndarray
Pixels associated with intense cells.
conv_buffer: integer
Distance to search for nearby intense cells.
Returns
-------
labeled_image1: (N, M) ndarray
Binary image of merged intense cells. Same dimensions as int_cells.
"""
return binary_closing(int_cells>0, structure=disk(3), iterations=conv_buffer)
|
079ab9f1dad2ee4b45e5e96923bb05b3e3e59c01
| 3,642,515
|
from typing import Union
def _encode_decimal(value: Union[Decimal, int, str]):
"""
Encodes decimal into internal format
"""
value = Decimal(value)
exponent = value.as_tuple().exponent
mantissa = int(value.scaleb(-exponent))
return {
'mantissa': mantissa,
'exponent': exponent
}
|
089bd0dd79d6d75dfb4278c46c801df32f5be608
| 3,642,516
|
def getPristineStore(testCase, creator):
"""
Get an Axiom Store which has been created and initialized by C{creator} but
which has been otherwise untouched. If necessary, C{creator} will be
called to make one.
@type testCase: L{twisted.trial.unittest.TestCase}
@type creator: one-argument callable
@param creator: A factory for the Store configuration desired. Will be
invoked with the testCase instance if necessary.
@rtype: L{axiom.store.Store}
"""
dbdir = FilePath(testCase.mktemp())
basePath = _getBaseStorePath(testCase, creator)
basePath.copyTo(dbdir)
return Store(dbdir)
|
f3be2f5bfff30af298de250d7c2ecf1664cd503f
| 3,642,517
|
def data_to_CCA(dic, CCA):
"""
Returns a dictionary of ranking details of each CCA
{name:{placeholder:rank}
"""
final_dic = {}
dic_CCA = dic[CCA][0] #the cca sheet
for key, value in dic_CCA.items():
try: #delete all the useless info
del value["Class"]
except KeyError:
del value["CLASS"]
try:
del value["Category"]
except:
pass
final_dic[key] = value
try:
del final_dic["Name"]
except KeyError:
pass
return final_dic
|
fedd8a55e4310c024ede4f474c89463f71a0ecb6
| 3,642,518
|
import os
def quick_nve(mol, confId=0, step=2000, time_step=None, limit=0.0, shake=False, idx=None, tmp_clear=False,
solver='lammps', solver_path=None, work_dir=None, omp=1, mpi=0, gpu=0, **kwargs):
"""
MD.quick_nve
MD simulation with NVE ensemble
Args:
mol: RDKit Mol object
Optional args:
confId: Target conformer ID (int)
step: Number of MD steps (int)
time_step: Set timestep of MD (float or None, fs)
limit: NVE limit (float)
shake: Use SHAKE (boolean)
solver: lammps (str)
solver_path: File path of solver (str)
work_dir: Path of work directory (str)
Returns:
Unwrapped coordinates (float, numpy.ndarray, angstrom)
"""
mol_copy = utils.deepcopy_mol(mol)
if solver == 'lammps':
sol = LAMMPS(work_dir=work_dir, solver_path=solver_path, idx=idx)
#elif solver == 'gromacs':
# sol = Gromacs(work_dir=work_dir, solver_path=solver_path)
md = MD(idx=idx)
if not hasattr(mol_copy, 'cell'):
md.pbc = False
calc.centering_mol(mol_copy, confId=confId)
md.add_md('nve', step, time_step=time_step, shake=shake, nve_limit=limit, **kwargs)
sol.make_dat(mol_copy, confId=confId, file_name=md.dat_file)
sol.make_input(md)
cp = sol.exec(omp=omp, mpi=mpi, gpu=gpu)
if cp.returncode != 0 and (
(md.write_data is not None and not os.path.exists(os.path.join(work_dir, md.write_data)))
or (md.outstr is not None and not os.path.exists(os.path.join(work_dir, md.outstr)))
):
utils.radon_print('Error termination of %s. Return code = %i' % (sol.get_name, cp.returncode), level=3)
return None
uwstr, wstr, _, vel, _ = sol.read_traj_simple(os.path.join(sol.work_dir, md.outstr))
for i in range(mol_copy.GetNumAtoms()):
mol_copy.GetConformer(confId).SetAtomPosition(i, Geom.Point3D(uwstr[i, 0], uwstr[i, 1], uwstr[i, 2]))
mol_copy.GetAtomWithIdx(i).SetDoubleProp('vx', vel[i, 0])
mol_copy.GetAtomWithIdx(i).SetDoubleProp('vy', vel[i, 1])
mol_copy.GetAtomWithIdx(i).SetDoubleProp('vz', vel[i, 2])
if hasattr(mol_copy, 'cell'):
mol_copy = calc.mol_trans_in_cell(mol_copy, confId=confId)
if tmp_clear: md.clear(work_dir)
return mol_copy, uwstr
|
9b59f09bd86ba2dc93bb51cd4338f26b948de858
| 3,642,519
|
from astropy.coordinates import Angle
def deg2hms(x):
"""Transform degrees to *hours:minutes:seconds* strings.
Parameters
----------
x : float
The degree value to be written as a sexagesimal string.
Returns
-------
out : str
The input angle written as a sexagesimal string, in the
form, hours:minutes:seconds.
"""
ac = Angle(x, unit='degree')
hms = ac.to_string(unit='hour', sep=':', pad=True)
return str(hms)
|
d4172d8cfe5b71115b6fde93469019a4644edce6
| 3,642,520
|
def get_waveforms_scales(we, templates, channel_locations):
"""
Return scales and x_vector for templates plotting
"""
wf_max = np.max(templates)
wf_min = np.max(templates)
x_chans = np.unique(channel_locations[:, 0])
if x_chans.size > 1:
delta_x = np.min(np.diff(x_chans))
else:
delta_x = 40.
y_chans = np.unique(channel_locations[:, 1])
if y_chans.size > 1:
delta_y = np.min(np.diff(y_chans))
else:
delta_y = 40.
m = max(np.abs(wf_max), np.abs(wf_min))
y_scale = delta_y / m * 0.7
y_offset = channel_locations[:, 1][None, :]
xvect = delta_x * (np.arange(we.nsamples) - we.nbefore) / we.nsamples * 0.7
xvectors = channel_locations[:, 0][None, :] + xvect[:, None]
# put nan for discontinuity
xvectors[-1, :] = np.nan
return xvectors, y_scale, y_offset
|
98ece821ab449d2cbdfe5c81635b7948de88083e
| 3,642,521
|
import time
def rec_findsc(positions, davraddi, davradius='dav', adatom_radius=1.1,
ssamples=1000, return_expositions=True,
print_surf_properties=False, remove_is=True, procs=1):
"""It return the atom site surface(True)/core(Flase) for each atoms in
for each structure pandas dataframe. See more in the function
quandarium.analy.mols.findsurfatons.
Parameters
----------
positions: Pandas.Series
The name of the fuature (bag type) in pd_df with
cartezian positions of the atoms.
davraddi: Pandas.Series
The name of the fuature in pd_df with atomic radii or dav
information (bag of floats).
davradius: str ['dav','radii'] (optional, default='dav')
If radii, atomic radius will be the feature davraddiifeature
values. If dav the values in atomic radius will be half of the
feature davraddiifeature values.
adatom_radius: float (optional, default=1.1).
Radius of the dummy adatom, in angstroms.
ssampling: intiger (optional, default=1000).
Quantity of samplings over the touched sphere surface of each
atom.
Return
------
list_of_new_features_name: list with strings.
['bag_issurface', 'bag_surfaceexposition']
list_of_new_features_data: list with data.
issurface: bag of intiger
The number indicate 1 to surface
atoms, 0 to core atoms.
surfaceexposition: bag of floats.
The percentual of surface
exposition of each atom.
"""
print("Initializing analysis: rec_findsc")
inputs_list = []
list_is_surface = []
list_exposition = []
for index, (poitionsi, davraddii) in enumerate(zip(positions, davraddi)):
#print(type(poitionsi),poitionsi)
positionsi = np.array(poitionsi) # manter np.array e ativar bags
if davradius == 'dav':
atomic_radii = np.array(davraddii)/2 # manter np.array e ativar bags
if davradius == 'radii':
atomic_radii = np.array(davraddii)
inputs_list.append([positionsi, atomic_radii, adatom_radius,
remove_is, ssamples, False, return_expositions,
print_surf_properties, "surface_points.xyz"])
pool = mp.Pool(procs)
result = pool.map_async(findsc_wrap, inputs_list, chunksize=1)
while not result.ready():
remaining = result._number_left # pylint: disable=W0212
print('Remaining: ', remaining)
time.sleep(5.0)
print('Finished')
outputs = result.get()
for index, _ in enumerate(outputs):
list_is_surface.append(outputs[index][0])
list_exposition.append(outputs[index][1])
list_of_new_features_data = [list_is_surface, list_exposition]
list_of_new_features_name = ['bag_issurf', 'bag_exposition']
return list_of_new_features_name, list_of_new_features_data
|
7f48d6c7732a23827879f22fba97c109456d01f8
| 3,642,522
|
from typing import Optional
async def accept_taa(
controller: AcaPyClient, taa: TAARecord, mechanism: Optional[str] = None
):
"""
Accept the TAA
Parameters:
-----------
controller: AcaPyClient
The aries_cloudcontroller object
TAA:
The TAA object we want to agree to
Returns:
--------
accept_taa_response: {}
The response from letting the ledger know we accepted the response
"""
accept_taa_response = await controller.ledger.accept_taa(
body=TAAAccept(**taa.dict(), mechanism=mechanism)
)
logger.info("accept_taa_response: %s", accept_taa_response)
if accept_taa_response != {}:
logger.error("Failed to accept TAA.\n %s", accept_taa_response)
raise HTTPException(
status_code=404,
detail=f"Something went wrong. Could not accept TAA. {accept_taa_response}",
)
return accept_taa_response
|
40557b41aa2f43a3174dacf20252b11be4b6679d
| 3,642,523
|
def discoverYadis(uri):
"""Discover OpenID services for a URI. Tries Yadis and falls back
on old-style <link rel='...'> discovery if Yadis fails.
@param uri: normalized identity URL
@type uri: six.text_type, six.binary_type is deprecated
@return: (claimed_id, services)
@rtype: (six.text_type, list(OpenIDServiceEndpoint))
@raises DiscoveryFailure: when discovery fails.
"""
uri = string_to_text(uri, "Binary values for discoverYadis are deprecated. Use text input instead.")
# Might raise a yadis.discover.DiscoveryFailure if no document
# came back for that URI at all. I don't think falling back
# to OpenID 1.0 discovery on the same URL will help, so don't
# bother to catch it.
response = yadisDiscover(uri)
yadis_url = response.normalized_uri
body = response.response_text
try:
openid_services = OpenIDServiceEndpoint.fromXRDS(yadis_url, body)
except XRDSError:
# Does not parse as a Yadis XRDS file
openid_services = []
if not openid_services:
# Either not an XRDS or there are no OpenID services.
if response.isXRDS():
# if we got the Yadis content-type or followed the Yadis
# header, re-fetch the document without following the Yadis
# header, with no Accept header.
return discoverNoYadis(uri)
# Try to parse the response as HTML.
# <link rel="...">
openid_services = OpenIDServiceEndpoint.fromHTML(yadis_url, body)
return (yadis_url, getOPOrUserServices(openid_services))
|
11ec5cb250f331e17fb84a4bc699c4396d4040ad
| 3,642,524
|
def mapper_to_func(map_obj):
"""Converts an object providing a mapping to a callable function"""
map_func = map_obj
if isinstance(map_obj, dict):
map_func = map_obj.get
elif isinstance(map_obj, pd.core.series.Series):
map_func = lambda x: map_obj.loc[x]
return map_func
|
5da5497b68dc71aec10231f80580fcbaab86c00e
| 3,642,525
|
def int_finder(input_v, tol=1e-6, order='all', tol1=1e-6):
"""
The function computes the scaling factor required to multiply the
given input array to obtain an integer array. The integer array is
returned.
Parameters
----------
input1: numpy.array
input array
tol: float
tolerance with Default = 1e-06
order: str
choices are 'rows', 'columns', 'col', 'all'.
If order = 'all', the input array is flattened and then scaled. This is default value.
If order = 'rows', elements in each row are scaled
If order = 'columns' or 'cols'', elements in each column are scaled
tol1: float
tolerance with Default = 1e-06
Returns
-------
output: numpy.array
An array of integers obtained by scaling input
"""
input1 = np.array(input_v)
Sz = input1.shape
if np.ndim(input1) == 1:
input1 = np.reshape(input1, (1, input1.shape[0]))
if int_check(input1, 15).all():
input1 = np.around(input1)
# Divide by LCM (rows, cols, all) <--- To Do
tmult = gcd_array(input1.astype(dtype='int64'), order)
if (order == 'all'):
input1 = input1 / tmult
elif (order == 'rows'):
tmult = np.tile(tmult, (np.shape(input1[1])))
input1 = input1 / tmult
elif (order == 'col' or order == 'cols' or order == 'columns'):
tmult = np.tile(tmult, (np.shape(input1[0])[0], 1))
input1 = input1 / tmult
output_v = input1
if len(Sz) == 1:
output_v = np.reshape(output_v, (np.size(output_v),))
return output_v.astype(int)
else:
# By default it flattens the array (if nargin < 3)
if order.lower() == 'all':
if len(Sz) != 1:
input1.shape = (1, Sz[0]*Sz[1])
else:
Switch = 0
err_msg = "Not a valid input. For the third argument please"+ \
" choose either \"rows\" or \"columns\" keys for this function."
order_options = ('rows', 'columns', 'col')
try:
Keys = (order_options.index(order.lower()))
except:
raise Exception(err_msg)
if (Keys == 1) or (Keys == 2):
if input1.shape[0] != 1:
# Handling the case of asking a row vector
# with the 'column' key by mistake.
input1 = input1.T
Switch = 1
# Handling the case of asking a column
# vector with the 'row' key by mistake.
if (Keys == 0) and (input1.shape[1] == 1):
input1 = input1.T
Switch = 1
if (abs(input1) < tol).all():
excep1 = 'All the input components cannot' \
+ 'be smaller than tolerance.'
raise Exception(excep1)
tmp = np.array((abs(input1) > tol1))
Vec = 2 * abs(input1[::]).max() * np.ones(
(input1.shape[0], input1.shape[1]))
Vec[tmp] = input1[tmp]
MIN = abs(Vec).min(axis=1)
# Transposing a row to a column
MIN.shape = (len(MIN), 1)
input1 = input1 / np.tile(MIN, (1, input1.shape[1]))
N, D = rat(input1, tol)
N[~tmp] = 0 # <---- added
D[~tmp] = 1 # <---- added
lcm_rows = lcm_array(D, 'rows')
lcm_mat = np.tile(lcm_rows, (1, input1.shape[1]))
Rounded = (N * lcm_mat) / D
output_v = Rounded
# --------------------------
if order.lower() == 'all':
if len(Sz) != 1:
output_v.shape = (Sz[0], Sz[1])
else:
if (Keys) == 1 or (Keys) == 2:
output_v = output_v.T
if Keys == 0 and Switch == 1:
output_v = output_v.T
if len(Sz) == 1:
output_v = np.reshape(output_v, (np.size(output_v), ))
return output_v.astype(int)
|
c7168a9146def4990174be56c7811663b62e82a2
| 3,642,526
|
def cell_info_for_active_cells(self, porosity_model="MATRIX_MODEL"):
"""Get list of cell info objects for current case
Arguments:
porosity_model(str): String representing an enum.
must be 'MATRIX_MODEL' or 'FRACTURE_MODEL'.
Returns:
List of **CellInfo** objects
**CellInfo class description**::
Parameter | Description | Type
------------------------- | --------------------------------------------- | -----
grid_index | Index to grid | Integer
parent_grid_index | Index to parent grid | Integer
coarsening_box_index | Index to coarsening box | Integer
local_ijk | Cell index in IJK directions of local grid | Vec3i
parent_ijk | Cell index in IJK directions of parent grid | Vec3i
**Vec3i class description**::
Parameter | Description | Type
---------------- | -------------------------------------------- | -----
i | I grid index | Integer
j | J grid index | Integer
k | K grid index | Integer
"""
active_cell_info_chunks = self.cell_info_for_active_cells_async(
porosity_model=porosity_model
)
received_active_cells = []
for active_cell_chunk in active_cell_info_chunks:
for active_cell in active_cell_chunk.data:
received_active_cells.append(active_cell)
return received_active_cells
|
f2b211e72bc5c2f651d67fa383dc750b6b9f5c5a
| 3,642,527
|
def features(x, encoded):
"""
Given the original images or the encoded images, generate the
features to use for the patch similarity function.
"""
print('start shape',x.shape)
if len(x.shape) == 3:
x = x - np.mean(x,axis=0,keepdims=True)
else:
# count x 100 x 256 x 768
print(x[0].shape)
x = x - np.mean(x,axis=1,keepdims=True)
# remove per-neural-network dimension
x = x.reshape((x.shape[0] * x.shape[1],) + x.shape[2:])
p = mp.Pool(96)
B = len(x) // 96
print(1)
bs = [x[i:i+B] for i in range(0,len(x),B)]
print(2)
r = p.map(features_, bs)
#r = features_(bs[0][:100])
print(3)
p.close()
#r = np.array(r)
#print('finish',r.shape)
return np.concatenate(r, axis=0)
|
1f3e1514dc67908207c38d75914954cb1af8178b
| 3,642,528
|
from datetime import datetime
def create_processing_log(s_url,s_status=settings.Status.PENDING):
"""
Creates a new crawler processing status. Default status PENDING
:param s_url: str - URL/name of the site
:param s_status: str - The chosen processing status
:return: SiteProcessingLog - The new processing status log
"""
# is the status valid?
assert isinstance(s_status, settings.Status), 'Not valid type of status'
# Gets the chosen status
new_status = entities.SiteStatus.get(type=s_status.name)
# Creates the new processing status
return entities.SiteProcessingLog(site=get_site(s_url=s_url), status=new_status, timestamp=datetime.today())
|
05fd2e5b01289f982789b1f7d88b401b25d445c3
| 3,642,529
|
import torch
def pick_action(action_distribution):
"""action selection by sampling from a multinomial.
Parameters
----------
action_distribution : 1d torch.tensor
action distribution, pi(a|s)
Returns
-------
torch.tensor(int), torch.tensor(float)
sampled action, log_prob(sampled action)
"""
m = torch.distributions.Categorical(action_distribution)
a_t = m.sample()
return a_t
|
ac7ceb0df860876ec209563eaa6bdd3f8bd09189
| 3,642,530
|
from typing import List
def word_tokenizer(text: str) -> List[str]:
"""Tokenize input text splitting into words
Args:
text : Input text
Returns:
Tokenized text
"""
return text.split()
|
dc6e4736d7a1f564bcfc6fed081a1869db38eea5
| 3,642,531
|
from google.cloud import datacatalog_v1beta1
def lookup_bigquery_dataset(project_id, dataset_id):
"""Retrieves Data Catalog entry for the given BigQuery Dataset."""
datacatalog = datacatalog_v1beta1.DataCatalogClient()
resource_name = '//bigquery.googleapis.com/projects/{}/datasets/{}'\
.format(project_id, dataset_id)
return datacatalog.lookup_entry(linked_resource=resource_name)
|
847ce43ccea5f462ccf696bc5a098c0e26f27852
| 3,642,532
|
def indicator_structure(Template, LC_instance):
""" given a Template A and a LC instance Sigma
builds the indicator structure of Sigma over A
and passes the identification object """
in_vars, in_cons = LC_instance
# Construct the domain of the indicator by factoring
arities = dict(in_vars)
domain = ((f, x)
for f, arity in arities.items()
for x in product(Template.domain, repeat=arity))
identify = Components(domain)
for scope, relation in in_cons:
f, g = scope
pi = {x: y for x, y in relation}
for x in product(Template.domain, repeat=arities[g]):
x_pi = tuple(x[pi[i]] for i in range(arities[f]))
identify.add((f, x_pi), (g, x))
variables = iter(identify)
# impose constraints that cover all thats necessary
important_fs = tuple(cover(arities, (scope for scope, rel in in_cons)))
def indicator_relation(template_relation):
return set(
tuple(identify((f, x)) for x in xs)
for f in important_fs
for xs in product_relation(template_relation, repeat=arities[f]))
rels = (indicator_relation(relation) for relation in Template.relations)
def decode(homomorphism):
polymorphisms = dict()
for f, arity in arities.items():
polymorphisms[f] = {
x: homomorphism[identify((f, x))]
for x in product(Template.domain, repeat=arity)}
return polymorphisms
return DelayDecode(Structure(variables, *rels), decode)
|
0652fd3af1a16892496b232e42c2fbdb2525fc78
| 3,642,533
|
def add_depth_dim(X, y):
"""
Add extra dimension at tail for x only. This is trivial to do in-line.
This is slightly more convenient than writing a labmda.
Args:
X (tf.tensor):
y (tf.tensor):
Returns:
tf.tensor, tf.tensor: X, y tuple, with X having a new trailing dimension.
"""
x_dat = tf.expand_dims(X, -1) # Prepare as an image, with only 1 colour-depth channel.
return x_dat, y
|
972bcc5df0e333186b39d306c9cec46c23117c9a
| 3,642,534
|
import functools
import warnings
import traceback
def catch_exceptions(warning_msg="An exception was caught and ignored.", should_catch=True):
"""Decorator that catches exceptions."""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not should_catch:
return func(*args, **kwargs)
try:
return func(*args, **kwargs)
except:
warnings.warn(warning_msg + "\nDetailed error log: " + traceback.format_exc())
return wrapper
return decorator
|
4242512d6416ecd97ef0c241d0d719fcdaedd797
| 3,642,535
|
def setSC2p5DAccNodes(lattice, sc_path_length_min, space_charge_calculator, boundary = None):
"""
It will put a set of a space charge SC2p5D_AccNode into the lattice as child nodes of the first level accelerator nodes.
The SC nodes will be inserted at the beginning of a particular part of the first level AccNode element.
The distance between SC nodes should be more than sc_path_length_min, and the boundary is optional.
The function will return the array of SC nodes as a convenience for the user.
"""
scNodes_arr = setSC_General_AccNodes(lattice, sc_path_length_min, space_charge_calculator, SC2p5D_AccNode)
for scNode in scNodes_arr:
scNode.setName(scNode.getName()+"SC2p5D")
scNode.setBoundary(boundary)
# initialize the lattice
lattice.initialize()
return scNodes_arr
|
f40c21f625da435ff7b42b157dbce803ebd0bf82
| 3,642,536
|
def transmuting_ring_sizes_score(mapping: LigandAtomMapping):
"""Checks if mapping alters a ring size"""
molA = mapping.molA.to_rdkit()
molB = mapping.molB.to_rdkit()
molA_to_molB = mapping.molA_to_molB
def gen_ringdict(mol):
# maps atom idx to ring sizes
ringinfo = mol.GetRingInfo()
idx_to_ringsizes = defaultdict(list)
for r in ringinfo.AtomRings():
for idx in r:
idx_to_ringsizes[idx].append(len(r))
return idx_to_ringsizes
# generate ring size dicts
ringdictA = gen_ringdict(molA)
ringdictB = gen_ringdict(molB)
is_bad = False
# check first degree neighbours of core atoms to see if their ring
# sizes are the same
for i, j in molA_to_molB.items():
atomA = molA.GetAtomWithIdx(i)
for bA in atomA.GetBonds():
otherA = bA.GetOtherAtom(atomA)
if otherA.GetIdx() in molA_to_molB:
# if other end of bond in core, ignore
continue
# otherA is an atom not in the mapping, but bonded to an
# atom in the mapping
if not otherA.IsInRing():
continue
# try and find the corresponding atom in molecule B
atomB = molB.GetAtomWithIdx(j)
for bB in atomB.GetBonds():
otherB = bB.GetOtherAtom(atomB)
if otherB.GetIdx() in molA_to_molB.values():
continue
if not otherB.IsInRing():
continue
# ringdict[idx] will give the list of ringsizes for an atom
if set(ringdictA[otherA.GetIdx()]) != set(
ringdictB[otherB.GetIdx()]):
is_bad = True
return 1 - 0.1 if is_bad else 0
|
fb09a123eea6eb155c6c8aecccedf82838ee40ff
| 3,642,537
|
def test_translate_six_frames(seq_record):
"""
Given a Biopython sequence record with a DNA (or RNA?) sequence,
translate into amino acid (protein) sequences in six frames.
Returns translations as list of strings.
"""
translation_list = []
for strand, nuc in [(+1, seq_record.seq), (-1, seq_record.seq.reverse_complement())]:
print("Strand: %s\nNuc: %s" % (strand, nuc))
for frame in range(3):
print("Frame: %s" % frame)
length = 3 * ((len(seq_record)-frame) // 3)
print("Length: %s" % length)
print("Possible translations: %s" % nuc[frame:frame+length].translate())
for pro in nuc[frame:frame+length].translate().split("*"):
translation_list.append(pro)
return(translation_list)
|
ce230ee2d8c48d55b269b89e828782456389fc39
| 3,642,538
|
import math
import tokenize
def from_sequence(seq, partition_size=None, npartitions=None):
""" Create dask from Python sequence
This sequence should be relatively small in memory. Dask Bag works
best when it handles loading your data itself. Commonly we load a
sequence of filenames into a Bag and then use ``.map`` to open them.
Parameters
----------
seq: Iterable
A sequence of elements to put into the dask
partition_size: int (optional)
The length of each partition
npartitions: int (optional)
The number of desired partitions
It is best to provide either ``partition_size`` or ``npartitions``
(though not both.)
Examples
--------
>>> b = from_sequence(['Alice', 'Bob', 'Chuck'], partition_size=2)
See Also
--------
from_filenames: Specialized bag creation function for textfiles
"""
seq = list(seq)
if npartitions and not partition_size:
partition_size = int(math.ceil(len(seq) / npartitions))
if npartitions is None and partition_size is None:
if len(seq) < 100:
partition_size = 1
else:
partition_size = int(len(seq) / 100)
parts = list(partition_all(partition_size, seq))
name = 'from_sequence-' + tokenize(seq, partition_size)
d = dict(((name, i), part) for i, part in enumerate(parts))
return Bag(d, name, len(d))
|
d408f73211af6713f5bf621daf262d3d217419a1
| 3,642,539
|
import os
def smooth_trajectory(
df,
bodyparts,
filter_window=3,
order=1,
deriv=0,
save=False,
output_filename=None,
destfolder=None,
):
"""
Smooths the input data which is a multiindex pandas array generated by DeepLabCut as a result of analyzing a video.
Parameters
----------
df: Pandas multiindex dataframe
bodyparts: List
List of bodyparts to smooth. To smooth all the bodyparts use bodyparts=['all']
filter_window: int
The length of filter window which needs to be a positive odd integer
order: int
Order of the polynomial to fit the data. The order must be less than the filter_window
deriv: int
Optional. Computes the derivative. If order=1, it computes the velocity on the smoothed data, if order=2 it computes the acceleration on the smoothed data.
Outputs
-------
df: smoothed dataframe
Example
-------
>>> df_smooth = kinematics.smooth_trajectory(df,bodyparts=['nose','shoulder'],window_length=11,order=3)
To smooth all the bodyparts in the dataframe, use
>>> df_smooth = kinematics.smooth_trajectory(df,bodyparts=['all'],window_length=11,order=3)
"""
df = df.copy()
xy = df.columns.get_level_values("coords") != "likelihood"
if bodyparts[0] == "all":
mask = np.ones(df.shape[1], dtype=bool)
else:
mask = df.columns.get_level_values("bodyparts").isin(bodyparts)
to_smooth = xy & mask
df.loc[:, to_smooth] = savgol_filter(
df.loc[:, to_smooth], filter_window, order, deriv, axis=0
)
df_cut = df.loc[:, mask]
if not destfolder:
destfolder = os.getcwd()
if not output_filename:
output_filename = (
"dataFrame_smooth_" + df.columns.get_level_values("scorer").unique()[0]
)
if save:
print("Saving the smoothed data as a pandas array in %s " % destfolder)
df_cut.to_hdf(
os.path.join(destfolder, output_filename + ".h5"),
"df_with_missing",
format="table",
mode="w",
)
return df_cut
|
aef633d09e0e22de662ddd5fb79f57d8fd56291f
| 3,642,540
|
def _get_instances(consul_host, user):
"""Get all deployed component instances for a given user
Sourced from multiple places to ensure we get a complete list of all
component instances no matter what state they are in.
Args
----
consul_host: (string) host string of Consul
user: (string) user id
Returns
-------
List of unique component instance names
"""
cons = Consul(consul_host)
get_instances_from_kv = partial(_get_instances_from_kv, cons.kv.get)
get_instances_from_catalog = partial(_get_instances_from_catalog, cons.catalog.services)
return _merge_instances(user, get_instances_from_kv, get_instances_from_catalog)
|
fa8b85d0c917b20676b74b2123d00f0d4dab41f5
| 3,642,541
|
from typing import Sequence
import glob
async def pool_help(p: 'Player', c: Messageable, msg: Sequence[str]) -> str:
"""Show information of all documented pool commands the player can access."""
prefix = glob.config.command_prefix
cmds = []
for cmd in pool_commands.commands:
if not cmd.doc or not p.priv & cmd.priv:
# no doc, or insufficient permissions.
continue
cmds.append(f'{prefix}pool {cmd.triggers[0]}: {cmd.doc}')
return '\n'.join(cmds)
|
a21f894585995cfc80717c301906da9634641b30
| 3,642,542
|
def filter_params(params):
"""Filter the dictionary of params for a Bountysource account.
This is so that the Bountysource access token doesn't float
around in a user_info hash (considering nothing else does that).
"""
whitelist = ['id', 'display_name', 'first_name', 'last_name', 'email', 'avatar_url']
filtered_params = {}
for key in params:
if key in whitelist:
filtered_params[key] = params[key]
return filtered_params
|
d471ecfa413f6a6821202f14a2506a89e55353b2
| 3,642,543
|
from typing import Counter
def build_vocab(tokens, glove_vocab, min_freq):
""" build vocab from tokens and glove words. """
counter = Counter(t for t in tokens)
# if min_freq > 0, use min_freq, otherwise keep all glove words
if min_freq > 0:
v = sorted([t for t in counter if counter.get(t) >= min_freq], key=counter.get, reverse=True)
else:
v = sorted([t for t in counter if t in glove_vocab], key=counter.get, reverse=True)
# add special tokens and entity mask tokens
v = constant.VOCAB_PREFIX + v
print("vocab built with {}/{} words.".format(len(v), len(counter)))
return v
|
797718d6c5d91ac1318b318ca0c254903383304a
| 3,642,544
|
def assert_array_approx_equal(x, y, decimal=6, err_msg='', verbose=True):
"""
Checks the equality of two masked arrays, up to given number odecimals.
The equality is checked elementwise.
"""
def compare(x, y):
"Returns the result of the loose comparison between x and y)."
return approx(x, y, rtol=10. ** -decimal)
assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
header='Arrays are not almost equal')
|
1551ef5b723718644805901fdd041594900cffd2
| 3,642,545
|
def to_bits_string(value: int) -> str:
"""Converts unsigned value to a bit string with _ separators every nibble."""
if value < 0:
raise ValueError(f'Value is not unsigned: {value!r}')
bits = bin(value)[2:]
rev = bits[::-1]
pieces = []
i = 0
while i < len(rev):
pieces.append(rev[i:i + 4])
i += 4
return '0b' + '_'.join(pieces)[::-1]
|
07dea253378686a1c65c97fad3d0b706e02335c4
| 3,642,546
|
import collections
def comp_days_centered(ndays, offset=0):
"""Return days for pre/onset/post composites centered on onset.
Parameters
----------
ndays : int
Number of days to average in each composite.
offset : int, optional
Number of offset days between pre/onset and onset/post
day ranges.
Returns
-------
reldays : dict of arrays
Components are 'pre', 'onset', and 'post', arrays of days
of the year relative to onset day, for each composite.
"""
ndays = int(ndays)
n1 = int(ndays // 2)
n2 = ndays - n1
reldays = collections.OrderedDict()
reldays['pre'] = np.arange(-offset - n1 - ndays, -offset - n1)
reldays['onset'] = np.arange(-n1, n2)
reldays['post'] = np.arange(offset + n2, offset + n2 + ndays)
return reldays
|
afe62411e5540c9088e929ecc502994a66a4d272
| 3,642,547
|
import os
def createDatabase(name: str) -> bool:
"""
Creates a database in the format of python. If spaces are in the name, it will be cut off through trim(). Returns true if the database is successfully created.
>>> import coconut
#The database format must always be python, it still supports if name has no .py
>>> coconut.database.createDatabase("Example.py")
True
"""
if backend.trim(name) == '':
raise DatabaseNameError(f'Name {name} is invalid and is just spaces')
else:
if not name.lower().endswith('.py'):
if len(name.split('.')) > 1:
raise InvalidTypeDatabaseError('Extension type {} is invalid'.format(name.split('.')[-1]))
else:
if name.endswith('.py'):
pass
else:
name = name + '.py'
os.chdir(os.path.join(backend.throwback(), 'databases'))
f = open(f'{name}', 'w+')
f.close()
return True
else:
os.chdir(os.path.join(backend.throwback(), 'databases'))
f = open(f'{name}', 'w+')
f.close()
return True
|
723be430b04c48536423cb8d825af591b9ddf9ce
| 3,642,548
|
def linreg_fit_bayes(X, y, **kwargs):
"""
Fit a Bayesian linear regression model.
This is a port of linregFit.m from pmtk3.
:param X: N*D design matrix
:param y: N*1 response vector
"""
pp = preprocessor_create(add_ones=True, standardize_X=False) # default
prior = kwargs['prior'] if 'prior' in kwargs else 'uninf'
preproc = kwargs['preproc'] if 'preproc' in kwargs else pp
beta = kwargs['beta'] if 'beta' in kwargs else None
alpha = kwargs['alpha'] if 'alpha' in kwargs else None
g = kwargs['g'] if 'g' in kwargs else None
use_ARD = kwargs['use_ARD'] if 'use_ARD' in kwargs else False
verbose = kwargs['verbose'] if 'verbose' in kwargs else False
if prior.lower() == 'eb':
prior = 'ebnetlab'
if prior.lower() == 'uninf':
raise NotImplementedError
elif prior.lower() == 'gauss':
raise NotImplementedError
elif prior.lower() == 'zellner':
raise NotImplementedError
elif prior.lower() == 'vb':
raise NotImplementedError
elif prior.lower() == 'ebnetlab':
model, logev = linreg_fit_eb_netlab(X, y, preproc)
elif prior.lower() == 'ebchen':
raise NotImplementedError
else:
raise ValueError('Invalid prior')
model['model_type'] = 'linreg_bayes'
model['prior'] = prior
return model, logev
|
0ba43ff80a4dbc96a8111beadb3efcb7adb7c8eb
| 3,642,549
|
def getrv(objwave, objflam, refwave, refflam, maxrv=[-200.,200.], waverange=[-np.inf,np.inf]):
"""
Calculates the rv shift of an object relative to some reference spectrum.
Inputs:
objwave - obj wavelengths, 1d array
objflam - obj flux, 1d array
refwave - ref wavelengths, 1d array
refflam - ref flux, 1d array
maxrv - min and max rv shift in km/s, 2-element array
Output:
rv shift in km/s
"""
ow = (objwave >= np.nanmin(refwave)) & (objwave <= np.nanmax(refwave)) \
& (objwave >= waverange[0]) & (objwave <= waverange[1])
rw = (refwave >= np.nanmin(objwave)) & (refwave <= np.nanmax(objwave)) \
& (refwave >= waverange[0]) & (refwave <= waverange[1])
oscl = 1.0/np.nanmedian(objflam[ow])
rscl = 1.0/np.nanmedian(refflam[rw])
iflam = np.interp(refwave[rw], objwave[np.isfinite(objflam)], objflam[np.isfinite(objflam)], left=np.nan, right=np.nan)
drv = np.nanmedian(2.*(refwave[rw][1:]-refwave[rw][:-1])/(refwave[rw][1:]+refwave[rw][:-1]))
maxshift = maxrv / (drv*3e5)
ssd = []
ss = np.arange(int(maxshift[0]),int(maxshift[1]+1))
for s in ss:
if s > 0:
shiftflam = np.append(np.repeat(np.nan, s), iflam[0:-s])
elif s < 0:
shiftflam = np.append(iflam[-s:], np.repeat(np.nan, -s))
else:
shiftflam = iflam
ssd.append(np.nansum((rscl*refflam[rw] - oscl*shiftflam)**2)/np.sum(~np.isnan(shiftflam)))
ssd = np.array(ssd)
s = ss[np.nanargmin(ssd)]
return s*drv*3e5
|
4bf9bea90da74476e38e465d142fa83092e5c3c7
| 3,642,550
|
def ping(host):
"""
Returns True if host (str) responds to a ping request.
Remember that some hosts may not respond to a ping request even if the host name is valid.
"""
# Ping parameters as function of OS
ping_param = "-n 1" if system_name().lower()=="windows" else "-c 1"
# Pinging
return system_call("ping " + ping_param + " " + host) == 0
|
1d19f5f01593099c5a534c0afa4d7bde85ba9d47
| 3,642,551
|
from typing import Counter
def remove_unpopulated_classes(_df, target_column, threshold):
"""
Removes any row of the df for which the label in target_column appears less
than threshold times in the whole frame (not enough populated classes)
:param df: The dataframe to filter
:param target_column: The target column with labels
:param threshold: the number of appearances a label must respect
:return: The filtered dataframe
"""
count = Counter(_df[target_column])
valid = [k for k in count.keys() if count[k] >= threshold]
_df = _df[_df[target_column].isin(valid)]
return _df
|
2ed31cfd3883a3856501dabff935028824141181
| 3,642,552
|
def get_longest_substrings(full_strings):
"""Return a dict of top substrings with hits from a given list of strings.
Args:
full_strings (list[str]): List of strings to test against each other.
Returns:
dict: substrings with their respective frequencies in full_strings.
"""
combos = list(combinations(full_strings, 2))
all_substrings = {}
for string_a, string_b in combos:
substring = get_longest_substring(string_a, string_b)
# Set entry to 2, matches with string_a and string_b, else + 1.
all_substrings.update(
{
substring: all_substrings.get(substring, 1) + 1,
},
)
return all_substrings
|
1eed90129d286988c0ff7e5e5e1229bf871b48bd
| 3,642,553
|
def num_weekdays():
"""
Creates a function which returns the number of weekdays in a pandas.Period, typically for use as the average_weight parameter for other functions.
Returns:
callable: Function accepting a single parameter of type pandas.Period and returning the number of weekdays within this
period as a float.
"""
return num_business_days([])
|
bbcb2f8eca0398d46d72cc784f6ec1575159e36d
| 3,642,554
|
import os
def reader_from_file(load_dir: str, **kwargs):
"""
Load a reader from a checkpoint.
Args:
load_dir: folder containing the reader being loaded.
Returns: a reader.
"""
shared_resources = create_shared_resources()
shared_resources.load(os.path.join(load_dir, "shared_resources"))
if kwargs:
shared_resources.config.update(kwargs)
reader = readers[shared_resources.config["reader"]](shared_resources)
reader.load_and_setup_modules(load_dir)
return reader
|
810cd414b99e289f989555b73597e511dfddea1b
| 3,642,555
|
import inspect
def add_special_param_to_dependency(
*,
dependency_param: inspect.Parameter,
dependant: Dependant,
) -> bool:
"""Check if param is non field object that should be passed into callable.
Arguments:
dependency_param: param that should be checked.
dependant: dependency which field would be filled with required param name.
Returns:
Result of check.
"""
if lenient_issubclass(dependency_param.annotation, bots.Bot):
dependant.bot_param_name = dependency_param.name
return True
elif lenient_issubclass(dependency_param.annotation, Message):
dependant.message_param_name = dependency_param.name
return True
elif lenient_issubclass(dependency_param.annotation, async_client.AsyncClient):
dependant.async_client_param_name = dependency_param.name
return True
elif lenient_issubclass(dependency_param.annotation, sync_client.Client):
dependant.sync_client_param_name = dependency_param.name
return True
return False
|
d4fa82c6fca8d79ca8b19e8b2aa3447ad38dbd48
| 3,642,556
|
import torch
def mtask_forone_advacc(val_loader, model, criterion, task_name, args, info, epoch=0, writer=None,
comet=None, test_flag=False, test_vis=False, norm='Linf'):
"""
NOTE: test_flag is for the case when we are testing for multiple models, need to return something to be able to plot and analyse
"""
assert len(task_name) > 0
avg_losses = {}
num_classes = args.classes
hist = np.zeros((num_classes, num_classes))
for c_name, criterion_fun in criterion.items():
avg_losses[c_name] = AverageMeter()
seg_accuracy = AverageMeter()
seg_clean_accuracy = AverageMeter()
model.eval() # this is super important for correct including the batchnorm
print("using norm type", norm)
for i, (input, target, mask) in enumerate(val_loader):
if test_vis:
clean_output = model(Variable(input.cuda(), requires_grad=False))
seg_clean_accuracy.update(accuracy(clean_output['segmentsemantic'], target['segmentsemantic'].long().cuda()),
input.size(0))
if args.steps == 0 or args.step_size == 0:
args.epsilon = 0
if norm == 'Linf':
if args.dataset == 'taskonomy':
adv_img = PGD_attack_mtask(input, target, mask, model, criterion, task_name, args.epsilon, args.steps, args.dataset,
args.step_size, info, args, using_noise=True)
elif args.dataset == 'cityscape':
adv_img = PGD_attack_mtask_city(input, target, mask, model, criterion, task_name, args.epsilon, args.steps,
args.dataset,
args.step_size, info, args, using_noise=True)
elif norm == 'l2':
adv_img = PGD_attack_mtask_L2(input, target, mask, model, criterion, task_name, args.epsilon, args.steps,
args.dataset,
args.step_size)
# image_var = Variable(adv_img.data, requires_grad=False)
image_var = adv_img.data
# image_var = input
if torch.cuda.is_available():
image_var = image_var.cuda()
for keys, m in mask.items():
mask[keys] = m.cuda()
for keys, tar in target.items():
target[keys] = tar.cuda()
# print("diff", torch.sum(torch.abs(raw_input-image_var)))
with torch.no_grad():
output = model(image_var)
sum_loss = None
loss_dict = {}
for c_name, criterion_fun in criterion.items():
this_loss = criterion_fun(output[c_name].float(), target[c_name],
mask[c_name])
if sum_loss is None:
sum_loss = this_loss
else:
sum_loss = sum_loss + this_loss
loss_dict[c_name] = this_loss
avg_losses[c_name].update(loss_dict[c_name].data.item(), input.size(0))
if 'segmentsemantic' in criterion.keys():
# this is accuracy for segmentation
seg_accuracy.update(accuracy(output['segmentsemantic'], target['segmentsemantic'].long()), input.size(0))
#TODO: also mIOU here
class_prediction = torch.argmax(output['segmentsemantic'], dim=1)
target_seg = target['segmentsemantic'].cpu().data.numpy() if torch.cuda.is_available() else target['segmentsemantic'].data.numpy()
class_prediction = class_prediction.cpu().data.numpy() if torch.cuda.is_available() else class_prediction.data.numpy()
hist += fast_hist(class_prediction.flatten(), target_seg.flatten(), num_classes)
if i % 500 == 0:
class_prediction = torch.argmax(output['segmentsemantic'], dim=1)
# print(target['segmentsemantic'].shape)
decoded_target = decode_segmap(
target['segmentsemantic'][0][0].cpu().data.numpy() if torch.cuda.is_available() else
target['segmentsemantic'][0][0].data.numpy(),
args.dataset)
decoded_target = np.moveaxis(decoded_target, 2, 0)
decoded_class_prediction = decode_segmap(
class_prediction[0].cpu().data.numpy() if torch.cuda.is_available() else class_prediction[
0].data.numpy(), args.dataset)
decoded_class_prediction = np.moveaxis(decoded_class_prediction, 2, 0)
if not test_flag:
writer.add_image('Val/image clean ', back_transform(input, info)[0])
writer.add_image('Val/image adv ', back_transform(adv_img, info)[0])
writer.add_image('Val/image gt for adv ', decoded_target)
writer.add_image('Val/image adv prediction ', decoded_class_prediction)
# if comet is not None: comet.log_image(back_transform(input, info)[0].cpu(), name='Val/image clean ', image_channels='first')
# if comet is not None: comet.log_image(back_transform(adv_img, info)[0].cpu(), name='Val/image adv ', image_channels='first')
# if comet is not None: comet.log_image(decoded_target, name='Val/image gt for adv ', image_channels='first')
# if comet is not None: comet.log_image(decoded_class_prediction, name='Val/image adv prediction ', image_channels='first')
if 'segmentsemantic' in criterion.keys():
# this is accuracy for segmentation
seg_accuracy.update(accuracy(output['segmentsemantic'], target['segmentsemantic'].long()), input.size(0))
#TODO: also mIOU here
class_prediction = torch.argmax(output['segmentsemantic'], dim=1)
target_seg = target['segmentsemantic'].cpu().data.numpy() if torch.cuda.is_available() else target['segmentsemantic'].data.numpy()
class_prediction = class_prediction.cpu().data.numpy() if torch.cuda.is_available() else class_prediction.data.numpy()
hist += fast_hist(class_prediction.flatten(), target_seg.flatten(), num_classes)
if args.debug:
if i>1:
break
if test_vis:
print("clean seg accuracy: {}".format(seg_clean_accuracy.avg))
str_attack_result = ''
str_not_attacked_task_result = ''
for keys, loss_term in criterion.items():
if keys in task_name:
str_attack_result += 'Attacked Loss: {} {loss.val:.4f} ({loss.avg:.4f})\t'.format(keys, loss=avg_losses[keys])
else:
str_not_attacked_task_result += 'Not att Task Loss: {} {loss.val:.4f} ({loss.avg:.4f})\t'.format(keys, loss=avg_losses[keys])
# Tensorboard logger
if not test_flag:
for keys, _ in criterion.items():
if keys in task_name:
writer.add_scalar('Val Adv Attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg, epoch)
if comet is not None: comet.log_metric('Val Adv Attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg)
else:
writer.add_scalar('Val Adv not attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg)
if comet is not None: comet.log_metric('Val Adv not attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg)
if 'segmentsemantic' in criterion.keys() or 'segmentsemantic' in criterion.keys():
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
mIoU = round(np.nanmean(ious), 2)
str_attack_result += '\n Segment Score ({score.avg:.3f}) \t'.format(score=seg_accuracy)
str_attack_result += ' Segment ===> mAP {}\n'.format(mIoU)
if comet is not None: comet.log_metric('segmentsemantic Attacked IOU', mIoU)
if comet is not None: comet.log_metric('segmentsemantic Attacked Score', seg_accuracy)
print('clean task')
print(str_not_attacked_task_result)
if test_flag:
dict_losses = {}
for key, loss_term in criterion.items():
dict_losses[key] = avg_losses[key].avg
# print(str_attack_result, "\nnew", avg_losses[keys].avg, "\n")
if 'segmentsemantic' in criterion.keys():
dict_losses['segmentsemantic'] = {'iou' : mIoU,
'loss' : avg_losses['segmentsemantic'].avg,
'seg_acc': seg_accuracy.avg}
print("These losses are returned", dict_losses)
#Compute the dictionary of losses that we want. Desired: {'segmentsemantic:[mIoU, cel],'keypoints2d':acc,'}
return dict_losses
|
17b980f93ff596b0f1c694bcef533ba5a95e09da
| 3,642,557
|
from typing import Dict
async def ga4gh_info(host: str) -> Dict:
"""Construct the `Beacon` app information dict in GA4GH Discovery format.
:return beacon_info: A dict that contain information about the ``Beacon`` endpoint.
"""
beacon_info = {
# TO DO implement some fallback mechanism for ID
"id": ".".join(reversed(host.split("."))),
"name": __title__,
"type": __service_type__,
"description": __description__,
"organization": {
"name": __org_name__,
"url": __org_welcomeUrl__,
},
"contactUrl": __org_contactUrl__,
"documentationUrl": __docs_url__,
"createdAt": __createtime__,
"updatedAt": __updatetime__,
"environment": __service_env__,
"version": __version__,
}
return beacon_info
|
ceb223ff97313bcd0a15b39b3ecf1afa5c16ac8b
| 3,642,558
|
def Random_Forest_Classifier_Circoscrizione(X, y, num_features, cat_features):
"""
Funzione che crea, fitta, testa e ritorna una pipeline con il RFC regressor,
(questa volta in riferimento al problema della classificazione di circoscrizioni)
con alcune caratterstiche autoevidenti da codice
Input: X sono i dati, y i targets
num e cat features rappresentano features numeriche e categoriche (come lista di stringhe)
Printa l'accuracy del test
"""
X=X[num_features+cat_features]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)
transf=make_column_transformer(
(StandardScaler(), num_features),
(OneHotEncoder(handle_unknown="ignore"), cat_features),
remainder='drop')
pipe_RFC = Pipeline([
#('encoder', OneHotEncoder(sparse=False, handle_unknown='ignore')),
('transformer', transf),
('Regressor', RandomForestClassifier(bootstrap=False))
])
# Provo con una grid search CV
CV_parameters = {'Regressor__n_estimators': [50, 100, 200, 500], # Valori superiori rallentano l'algoritmo
'Regressor__max_depth': [5, 10, 20, 50, 70, 100], # Rasoio di Occam per evitare overfitting
'Regressor__min_samples_leaf': [1, 2, 4], # Sempre rasoio di Occam
'Regressor__min_samples_split': [2, 5, 10, 15, 20],
}
# Parametri di Tuning del nostro RFR
RFC_CV = GridSearchCV(estimator=pipe_RFC,
param_grid=CV_parameters,
n_jobs=-1,
cv=2)
RFC_CV.fit(X_train, y_train)
y_RFC_pred = RFC_CV.predict(X_test)
print("Random forest classifier accuracy score:", accuracy_score(y_test, y_RFC_pred))
#Gonna leave these here, more convenient than return the test values as in "return RFC_CV, (X_test, y_test)"
plot_confusion_matrix(RFC_CV, X_test, y_test)
plot_precision_recall_curve(RFC_CV, X_test, y_test)
plot_roc_curve(RFC_CV, X_test, y_test)
return RFC_CV
|
a23253d8fcb724dc3456c7e3506cd2ecf975ba0f
| 3,642,559
|
def convert_data_set(data_set, specific_character_set):
""" Convert a DICOM data set to its NIfTI+JSON representation.
"""
result = {}
if odil.registry.SpecificCharacterSet in data_set:
specific_character_set = data_set[odil.registry.SpecificCharacterSet]
for tag, element in data_set.items():
name = get_tag_name(tag)
value = convert_element(element, specific_character_set)
result[name] = value
return result
|
68d9eaa54ba244b5b1c31e0d4ffb0af82b564174
| 3,642,560
|
import math
def calc_distance(
p1: Location,
p2: Location
) -> float:
"""
Args:
p1 (Location): planet 1 of interest
p2 (Location): planet 1 of interest
"""
if p1.coords.galaxy != p1.coords.galaxy:
distance = 20000 * math.fabs(p2.coords.galaxy - p1.coords.planet)
else:
if p1.coords.system != p2.coords.system:
distance = 2700 + 95 * math.fabs(p2.coords.system - p1.coords.system)
else:
if p1.coords.planet != p2.coords.planet:
distance = 1000 + 5 * math.fabs(p2.coords.planet - p1.coords.planet)
else:
raise ValueError
return distance
|
725d7401fee19925b8b154495dc17368b19534fc
| 3,642,561
|
def reset_noise_model():
"""Return test reset noise model"""
noise_model = NoiseModel()
error1 = thermal_relaxation_error(50, 50, 0.1)
noise_model.add_all_qubit_quantum_error(error1, ['u1', 'u2', 'u3'])
error2 = error1.tensor(error1)
noise_model.add_all_qubit_quantum_error(error2, ['cx'])
return NoiseWithDescription(noise_model, "Reset Noise")
|
967ce6da9328f4ed4635d72f67036befde661bcb
| 3,642,562
|
def parse_to_timestamp(dt_string):
"""Attempts to parse to Timestamp.
Parameters
----------
dt_string: str
Returns
-------
pandas.Timestamp
Raises
------
ValueError
If the string cannot be parsed to timestamp, or parses to null
"""
timestamp = pd.Timestamp(dt_string)
if pd.isnull(timestamp):
raise ValueError
if timestamp.tzinfo is None:
# consinstent with schema ISODateTime
timestamp = timestamp.tz_localize('UTC')
return timestamp
|
766a1028b97e3a5c96430c9fcc5da9610315767a
| 3,642,563
|
def __factor(score, items_sum, item_count):
"""Helper method for the pearson correlation coefficient algorithm."""
return score - items_sum/item_count
|
2f92b4a5be4375e3083ace9b3855a170f7372460
| 3,642,564
|
from typing import Callable
from typing import Tuple
def cross_validate(estimator: BaseEstimator, X: np.ndarray, y: np.ndarray,
scoring: Callable[[np.ndarray, np.ndarray, ...], float], cv: int = 5) -> Tuple[float, float]:
"""
Evaluate metric by cross-validation for given estimator
Parameters
----------
estimator: BaseEstimator
Initialized estimator to use for fitting the data
X: ndarray of shape (n_samples, n_features)
Input data to fit
y: ndarray of shape (n_samples, )
Responses of input data to fit to
scoring: Callable[[np.ndarray, np.ndarray, ...], float]
Callable to use for evaluating the performance of the cross-validated model.
When called, the scoring function receives the true- and predicted values for each sample
and potentially additional arguments. The function returns the score for given input.
cv: int
Specify the number of folds.
Returns
-------
train_score: float
Average train score over folds
validation_score: float
Average validation score over folds
"""
rows, cols = X.shape
group_length = int(rows / cv)
train_error = []
test_error = []
for i in range(cv):
fold_delimiter = (i + 1) * group_length
S_divided_by_S_i_X = np.concatenate((X[:(fold_delimiter - group_length)], X[fold_delimiter:]))
S_divided_by_S_i_y = np.concatenate((y[:(fold_delimiter - group_length)], y[fold_delimiter:]))
estimator.fit(S_divided_by_S_i_X, S_divided_by_S_i_y)
train_pred = estimator.predict(S_divided_by_S_i_X)
test_pred = estimator.predict(X[(fold_delimiter - group_length):fold_delimiter])
train_error.append(scoring(S_divided_by_S_i_y, train_pred))
test_error.append(scoring(y[(fold_delimiter - group_length):fold_delimiter], test_pred))
return np.average(train_error), np.average(test_error)
|
2d656171a2043cdc7fd07ad199c6e0598de43900
| 3,642,565
|
from pathlib import Path
import sys
def get_img_path() -> Path:
"""
Gets the path of the Mac installation image from the command line
arguments. Fails with an error if the argument is not present or the given
file doesn't exist.
"""
args = sys.argv
if len(args) < 2:
sys.exit(
"Please provide the path to the Fuzzlecheck image as argument."
)
if len(args) > 2:
sys.exit("More arguments provided than needed")
rsl = Path(args[1])
if not rsl.exists():
sys.exit("Given dmg image ({}) doesn't exist.".format(args[1]))
return rsl
|
2ba4ac7f7954cbe2ed76824da41a13b8e6d0b4d9
| 3,642,566
|
def global_info(request):
"""存放用户,会话信息等."""
loginUser = request.session.get('login_username', None)
if loginUser is not None:
user = users.objects.get(username=loginUser)
# audit_users_list_info = WorkflowAuditSetting.objects.filter().values('audit_users').distinct()
audit_users_list_info = WorkflowAuditSetting.objects.filter(Q(workflow_type=1) | Q(workflow_type=2)).values('audit_users').distinct()
project_leaders_list = [ leaders['group_leader'] for leaders in Group.objects.all().values('group_leader').distinct() ]
audit_users_list = []
for i in range(len(audit_users_list_info)):
if ',' in audit_users_list_info[i]['audit_users']:
audit_users_list += audit_users_list_info[i]['audit_users'].split(',')
else:
audit_users_list.append(audit_users_list_info[i]['audit_users'])
UserDisplay = user.display
leftMenuBtns = leftMenuBtnsCommon
if UserDisplay == '':
UserDisplay = loginUser
if user.is_superuser:
leftMenuBtns = leftMenuBtns + leftMenuBtnsProject + leftMenuBtnsAuditor + leftMenuBtnsSuper + leftMenuBtnsDoc
if loginUser in audit_users_list:
leftMenuBtns = leftMenuBtns + leftMenuBtnsAuditor
if loginUser in project_leaders_list:
leftMenuBtns = leftMenuBtns + leftMenuBtnsProject
else:
leftMenuBtns = ()
UserDisplay = ''
return {
'loginUser': loginUser,
'leftMenuBtns': leftMenuBtns,
'UserDisplay': UserDisplay,
'ACCESS_ITOM_ADDR': ACCESS_ITOM_ADDR
}
|
12ac1b296fed2a89fbefd8630da00c585e531e8d
| 3,642,567
|
def arcsech(val):
"""Inverse hyperbolic secant"""
return np.arccosh(1. / val)
|
e1a90e1dec3ad7a2e427dc0e93421fd9b9f2b061
| 3,642,568
|
def addEachAtomAutocorrelationMeasures(coordinates, numAtoms):
"""
Computes sum ri*rj, and ri = coords for a single trajectory. Results are stored in different array indexes for different atoms.
"""
rirj = np.zeros((1, 3*numAtoms), dtype=np.float64)
ri = np.zeros((1, 3*numAtoms), dtype=np.float64)
rirjMeasures = np.zeros((1, 3*numAtoms))
riMeasures = np.zeros((1, 3*numAtoms))
#convert to 2d matrix
coordinates = coordinates.reshape((coordinates.shape[0], 3*coordinates.shape[1]))
#add rows if necessary
rirj.resize((coordinates.shape[0], rirj.shape[1]))
ri.resize((coordinates.shape[0], ri.shape[1]))
rirjMeasures.resize((coordinates.shape[0], rirjMeasures.shape[1]))
riMeasures.resize((coordinates.shape[0], riMeasures.shape[1]))
for i in range(3*numAtoms):
#resize vector and don't initialise up
result, measures = correlate(coordinates[:,i])
rirj[:,i] += result
ri[:,i] += coordinates[:,i]
rirjMeasures[:,i] += measures
riMeasures[:,i] += np.ones(len(measures))
return rirj, ri, rirjMeasures, riMeasures
|
7413cad78c159af961784f7d2d221ea16c9c9c32
| 3,642,569
|
def api_posts_suggest(request):
"""サジェスト候補の記事をJSONで返す。"""
keyword = request.GET.get('keyword')
if keyword:
post_list = [{'pk': post.pk, 'title': post.title} for post in Post.objects.filter(title__icontains=keyword)]
else:
post_list = []
return JsonResponse({'post_list': post_list})
|
328ec626a6ed477b29ff7f31c82321643a551004
| 3,642,570
|
import os
import configparser
def parse_config(args): # pylint: disable=R0912
"""parses the gitconfig file."""
configpath = os.path.join(HOME, ".gitconfig_2")
# config = configobj.ConfigObj(configpath, interpolation=None, indent_type="\t")
config = configparser.ConfigParser(interpolation=None)
config.read(configpath)
if "core" not in config:
config["core"] = {}
if "attributesfile" not in config["core"]:
config["core"]["attributesfile"] = "~/.gitattributes"
attributespath = config["core"]["attributesfile"]
if 'diff "kettle"' not in config:
config['diff "kettle"'] = {}
if "textconv" not in config['diff "kettle"'] and \
"xfuncname" not in config['diff "kettle"']:
if args.exe:
config['diff "kettle"']["textconv"] = "\"'{}'\"".format(
os.path.join(os.getcwd(), "kettlediff.exe").replace("\\", "/"))
else:
config['diff "kettle"']["textconv"] = "python \"'{}'\"".format(
os.path.join(os.getcwd(), "kettlediff.py").replace("\\", "/"))
config['diff "kettle"']["xfuncname"] = (
"< name > (.*) < /name > | < order > | < hops > )")
else:
print("already diff for kettle in .gitconfig!")
if 'diff "prpt"' not in config:
config['diff "prpt"'] = {}
if "textconv" not in config['diff "prpt"'] and \
"xfuncname" not in config['diff "prpt"']:
if args.exe:
config['diff "prpt"']["textconv"] = "\"'{}'\"".format(
os.path.join(os.getcwd(), "kettlediff.exe").replace("\\", "/"))
else:
config['diff "prpt"']["textconv"] = "python \"'{}'\"".format(
os.path.join(os.getcwd(), "kettlediff.py").replace("\\", "/"))
config['diff "prpt"']["xfuncname"] = ".*name=.*"
else:
print("already diff for prpt in .gitconfig!")
if not args.write:
print("Template for .gitconfig:\n---------------------------")
for section in [section for section in config if config[section] != "DEFAULT"]:
print("[{}]".format(section))
for key, item in config[section].items():
print("\t{} = {}".format(key, item))
print("---------------------------")
else:
with open(configpath, "w") as file:
for section in config:
print("[{}]".format(section), file=file)
for key, item in config[section].items():
print("\t{} = {}".format(key, item), file=file)
return attributespath
|
a1fdb77939e7b9092226e8e631085cfbf90bba07
| 3,642,571
|
from os.path import expanduser
import configparser
def _get_config_from_ini_file(f):
"""Load params from the specified filename and return the params as a
dictionary"""
filename = expanduser(f)
_log.debug('Loading parms from {0}'.format(filename))
config = configparser.ConfigParser()
config.optionxform = str
config.read(filename)
object_list = config.sections()
params = {}
for config_object in object_list:
o = ConfigObject(name=config_object)
all_attributes = config.options(config_object)
for attribute in all_attributes:
value = config.get(config_object, attribute)
# Replace the AWSH_ROOT variable with the current value if present
value = value.replace('$AWSH_ROOT', CONST_AWSH_ROOT)
_log.debug('ConfigObject[{0}] {1}: {2}'.format(config_object, attribute, value))
o.add_property(attribute, value)
params[o.name] = o
return params
|
94c99efdc38e75ed03ced9822271551b3343e8c3
| 3,642,572
|
from typing import Tuple
from typing import List
def read_task_values(task_result: TaskResult) -> Tuple[bool, str, List[float], int]:
"""Reads unitary and federated accuracy from results.json.
Args:
fname (str): Path to results.json file containing required fields.
Example:
>>> print(read_task_values(task_result))
(false, "VisionTask", [0.12, 0.33], 5)
Returns:
~typing.Tuple[bool, str, ~typing.List[float], int]: Tuple consisting of information,
if the task is unitary or not, the task label, a list of accuracies and the epochs.
"""
return (
task_result.is_unitary(),
task_result.get_label(),
task_result.get_accuracies(),
task_result.get_E(),
)
|
3b562709492c4bfad834236aba1ed97ee3dcc393
| 3,642,573
|
def get_two_dots(full=1):
""" return all posible simple two-dots """
bg= bottomGates()
two_dots = [dict({'gates':bg[0:3]+bg[2:5]})] # two dot case
for td in two_dots:
td['name'] = '-'.join(td['gates'])
return two_dots
|
3529e4b6f1056930f0cc864d86ebe8ec5bfbd9b6
| 3,642,574
|
def read_values_of_line(line):
"""Read values in line. Line is splitted by INPUT_FILE_VALUE_DELIMITER."""
if INPUT_FILE_VALUE_DELIMITER == INPUT_FILE_DECIMAL_DELIMITER:
exit_on_error(f"Input file value delimiter and decimal delimiter are equal. Please set INPUT_FILE_VALUE_DELIMITER and INPUT_FILE_DECIMAL_DELIMITER.")
# Clean line
line = line.rstrip('\n').rstrip('\r')
# Split line by value delimiter
values = line.split(INPUT_FILE_VALUE_DELIMITER)
return values
|
5450211b5aa10bbf7cca208ddce511b688abeb85
| 3,642,575
|
def find_allergens(ingredients):
"""Return ingredients with cooresponding allergen."""
by_allergens_count = sorted(ingredients, key=lambda i: len(ingredients[i]))
for ingredient in by_allergens_count:
if len(ingredients[ingredient]) == 1:
for other_ingredient, allergens in ingredients.items():
if ingredient == other_ingredient:
continue
ingredients[other_ingredient] = (allergens
- ingredients[ingredient])
return {
ingredient: allergen.pop()
for ingredient, allergen in ingredients.items()
}
|
b5fde42cae0138f3bd819eb60629bb2d7ddf2f38
| 3,642,576
|
def send_calendar_events():
"""Sends calendar events."""
error_msg = None
try:
with benchmark("Send calendar events"):
builder = calendar_event_builder.CalendarEventBuilder()
builder.build_cycle_tasks()
sync = calendar_event_sync.CalendarEventsSync()
sync.sync_cycle_tasks_events()
except Exception as exp: # pylint: disable=broad-except
logger.error(exp.message)
error_msg = exp.message
return utils.make_simple_response(error_msg)
|
501941c968e67178cd2f1d7c6eac293e1617e4a5
| 3,642,577
|
import os
import json
def custom_precision(labels: np.array, predictions: np.array, exp_path: str, exp_name: str):
"""
Calculate custom precision value.
Parameters
----------
exp_name : str
experiment name
exp_path : str
path to experiment folder
labels : np.array
predictions : np.array
Returns
-------
float:
average precision
"""
label_list = []
for label in labels:
for lab in label:
label_list.append(lab)
num_labels = len(set(label_list))
# https://www.researchgate.net/figure/Confusion-matrix-for-multi-class-classification-The-confusion-matrix-of-a_fig7_314116591
confusion_matrix = _confusion_matrix(labels, predictions, num_labels)
precisions = []
for label in range(num_labels):
true_positive = confusion_matrix[label, label]
false_positive = np.sum(confusion_matrix[:, label]) - confusion_matrix[label, label]
if (true_positive + false_positive) != 0:
precisions.append(true_positive / (true_positive + false_positive))
else:
precisions.append(0)
print('Precision:', label)
if exp_path:
file_name = 'result_precison_single' + exp_name + '.json'
with open(os.path.join(exp_path, file_name), "w") as fp:
json.dump(precisions, fp)
return np.mean(precisions)
|
8d699f48efd762f3d010e22d964ed3c342425589
| 3,642,578
|
import os
def detect_or_create_config(config_file, output_root, theseargs,
newname=None, logger=None):
""" return path to config file, or die trying.
if a config file exists, just return the path. If not,
create it using "make_riboSeed_config.py"
"""
assert logger is not None, "must use logging"
if not os.path.isfile(config_file):
logger.debug("creating config file")
make_config_args = Namespace(
outdir=output_root,
name=newname)
config_file = mrc.main(make_config_args)
add_these_params_to_config(config_file=config_file,
args=theseargs)
else:
logger.info("using provided config file! ignoring any other args" +
"provided via commandline")
return config_file
|
75167be82d65014749354697fdd4a7c42689ed0a
| 3,642,579
|
def gaussian(x, p):
"""
Gaussian function
@param x : variable
@param p : parameters [height, center, sigma]
"""
return p[0] * (1/np.sqrt(2*pi*(p[2]**2))) * np.exp(-(x-p[1])**2/(2*p[2]**2))
|
1a42fabe572d9be2794ccf6e06c8ba2c0cc162fc
| 3,642,580
|
def get_exploration_ids_subscribed_to(user_id):
"""Returns a list with ids of all explorations that the given user
subscribes to.
WARNING: Callers of this function should ensure that the user_id is valid.
Args:
user_id: str. The user ID of the subscriber.
Returns:
list(str). IDs of all explorations that the given user
subscribes to.
"""
subscriptions_model = user_models.UserSubscriptionsModel.get(
user_id, strict=False)
return (
subscriptions_model.exploration_ids
if subscriptions_model else [])
|
2d8ff3570705ffd59fbf5a83ae23a4b07f49c6a7
| 3,642,581
|
def generate_trials(olh_samples: np.array, roi_space: Space) -> list[dict]:
"""
Generates trials from the given normalized orthogonal Latin hypercube samples
Parameters
----------
olh_samples: `numpy.array`
Samples from the orthogonal Latin hypercube
roi_space: orion.algo.space.Space
Parameter space region-of-interest
Returns
-------
A list of trials as `dict` objects, each a list of parameter values in the
original search space
"""
trials = []
for sample in olh_samples:
trial_dict = {}
for j, param_name in enumerate(roi_space.keys()):
interval_min, interval_max = roi_space[param_name].interval()
# TODO: deal with categoricals
trial_dict[param_name] = (
sample[j] * (interval_max - interval_min) + interval_min
)
trials.append(trial_dict)
return trials
|
db58954fffc4b4dfdf57a43eb4b41bee4b87fb33
| 3,642,582
|
def check_all_flash(matrix_2d):
"""
Check if all octopuses flashed.
:param matrix_2d: 2D matrix
:return: Boolean
"""
for line in matrix_2d:
for digit in line:
if digit != 0:
return True
return False
|
9dca0174cd0272773e9b9330977bd3fac86f413a
| 3,642,583
|
import os
import torch
def train(request, pretrained):
"""
Train a model, and save it to disk.
"""
if pretrained:
return dash.no_update
dataset = get_dataset(request["dataset_name"])
train_set = dataset["train_set"]
val_set = dataset["val_set"]
lake_set = dataset["lake_set"]
n_classes = dataset["n_classes"]
# TODO: allow multiple examples in query set?
# TODO: allow query examples that aren't in train set (from lake, or uploaded)
trainloader = DataLoader(train_set, batch_size=TRAIN_BATCH_SIZE, shuffle=True, pin_memory=True)
valloader = DataLoader(val_set, batch_size=VAL_BATCH_SIZE, shuffle=False, pin_memory=True)
print(f"Number of labeled examples: {len(train_set)}. Number of unlabeled examples: {len(lake_set)}")
result_dict = {}
model = get_model(request["model_name"], n_classes, DEVICE, EMBEDDING_TYPE)
optimizer = get_optimizer(model)
criterion = nn.CrossEntropyLoss()
# Get the initial model by training
# In the future, we may choose to save the initial model to disk.
print("Beginning training")
for i_epoch in range(EPOCHS_PER_ROUND):
model.train()
for inputs, targets in trainloader:
inputs, targets = inputs.to(DEVICE), targets.to(DEVICE, non_blocking=True)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
# TODO: Add some kind of progress bar or accuracy display for responsiveness
# TODO: Report initial train/val accuracy for plotting
print("Training done")
# Save trained model to disk
model_path = os.path.join("models/", request["dataset_name"], request["model_name"])
os.makedirs(model_path, exist_ok=True)
model_path = os.path.join(model_path, "saved.pth")
torch.save(model, model_path)
# Compute values for logging (train/val accuracy)
with torch.no_grad():
model.eval()
for loader, name in [(trainloader, "train"), (valloader, "val")]:
examples_total = 0
examples_correct = 0
for inputs, targets in loader:
inputs, targets = inputs.to(DEVICE), targets.to(DEVICE, non_blocking=True)
outputs = model(inputs)
_, predicted = outputs.max(1)
examples_total += targets.size(0)
examples_correct += predicted.eq(targets).sum().item()
result_dict[name + "_accuracy"] = examples_correct/examples_total
return dash.no_update
|
82126fcc69719f43b041fe3e1620af54ffd615ad
| 3,642,584
|
def cache_get(cache, key, fcn, force=False):
"""Get key from cache, or compute one."""
if cache is None:
cache = {}
if force or (key not in cache):
cache[key] = fcn()
return cache[key]
|
b358bf01dc657d8cd983830d18ef5a85a48d69ec
| 3,642,585
|
def _BreadthFirstSearch(to_visit, children, visited_key=lambda x: x):
"""Runs breadth first search starting from the nodes in |to_visit|
Args:
to_visit: the starting nodes
children: a function which takes a node and returns the nodes adjacent to it
visited_key: a function for deduplicating node visits. Defaults to the
identity function (lambda x: x)
Returns:
A list of nodes which are reachable from any node in |to_visit| by calling
|children| any number of times.
"""
to_visit = list(to_visit)
seen = set(map(visited_key, to_visit))
for node in to_visit:
for child in children(node):
key = visited_key(child)
if key not in seen:
seen.add(key)
to_visit.append(child)
return to_visit
|
1c7153f61af81bb4bd9a06e0213bfcee4aab5cb8
| 3,642,586
|
def get_receptor_from_receptor_ligand_model(receptor_ligand_model):
"""
This function obtains the name of receptor based on receptor_ligand_model
Example of input: compl_ns3pro_dm_0_-_NuBBE_485_obabel_3D+----+20
"""
separator_model = get_separator_filename_mode()
separator_receptor = "_-_"
string_ref = receptor_ligand_model
receptor_name = string_ref.split(separator_receptor)[0] #Removing all, except receptor name
return receptor_name
|
58b83ad160181bb04c533feb7fa3d37de44303ef
| 3,642,587
|
import tokenize
def build_model():
"""
Selects the best model with optimal parameters
"""
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('svd', TruncatedSVD()),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier(class_weight='balanced')))
])
parameters = {'clf__estimator__n_estimators': [25, 50],
'clf__estimator__max_depth': [10, 25]}
cv_rf = GridSearchCV(pipeline, parameters)
return cv_rf
|
99aea3019fd55b84f7252e0c72cc7bf2bdb8d4e6
| 3,642,588
|
import copy
def monthly_expenses(costs, saleprice, propvalue, boyprincipal, rent):
"""Calculate monthly expenses
costs list of MonthlyCost objects
saleprice sale price of the property
propvalue actual value of the property
boyprincipal principal at beginning of year to calculate for
rent projected monthly rent for the property
"""
expenses = []
if not costs:
return expenses
# deepcopy the costs during *every* iteration,
# or else we keep doing expenses.append(cost) on the same cost each time
for cost in copy.deepcopy(costs):
# First, check our inputs
if cost.calctype == costconfig.CostCalculationType.DOLLAR_AMOUNT and cost.value is None:
raise Exception(
f"The {cost.label} MonthlyCost calctype is DOLLAR_AMOUNT, "
"but with an empty value property")
elif (
cost.calctype is not costconfig.CostCalculationType.DOLLAR_AMOUNT and
cost.calc is None):
raise Exception(
f"The {cost.label} MonthlyCost calctype is {cost.calctype}, "
"but with an empty calc property")
# Now calculate what can be calculated now
# Don't calculate LOAN_FRACTION or INTEREST_MONTHS calctypes here,
# because any PRINCIPAL paytypes will affect their value
if cost.calctype is costconfig.CostCalculationType.DOLLAR_AMOUNT:
pass
elif cost.calctype is costconfig.CostCalculationType.YEARLY_PRINCIPAL_FRACTION:
cost.value = boyprincipal * cost.calc / mmath.MONTHS_IN_YEAR
elif cost.calctype is costconfig.CostCalculationType.SALE_FRACTION:
cost.value = saleprice * cost.calc
elif cost.calctype is costconfig.CostCalculationType.VALUE_FRACTION:
cost.value = propvalue * cost.calc
elif cost.calctype is costconfig.CostCalculationType.MONTHLY_RENT_FRACTION:
cost.value = rent * cost.calc
elif cost.calctype is costconfig.CostCalculationType.CAPEX:
cost.value = cost.calc.monthly
else:
raise NotImplementedError(
f"Cannot process a cost with a calctype of {cost.calctype}")
# logger.info(f"Calculating monthy expense: {cost}")
expenses.append(cost)
return expenses
|
92887f624ce8d87d2f787087f6bb23f80c8186c7
| 3,642,589
|
def get_spitfire_template_class(prefer_c_extension=True):
"""Returns an appropriate SpitfireTemplate class.
Args:
prefer_c_extension: If set True and _template loaded properly, use the
C extension's baseclass implementation.
Returns:
A SpitfireTemplate class with an appropriate base class.
"""
if prefer_c_extension and _template is not None:
baseclass = _template.BaseSpitfireTemplate
else:
baseclass = _BaseSpitfireTemplate
class _SpitfireTemplate(baseclass):
# store a reference to the filter function - this is tricky because of
# some python stuff. filter functions look like this:
#
# def filter_function(template_instance, value):
#
# when this is assigned to a template instance, accessing this name
# binds the function to the current instance. using the name
# 'template_instance' to indicate that these functions aren't really
# related to the template.
_filter_function = staticmethod(filters.simple_str_filter)
repeat = None
placeholder_cache = None
def __init__(self,
search_list=None,
default_filter=None,
use_placeholder_cache=False):
# use_placeholder_cache - cache the values returned from the
# search_list? The cached values will live for the lifetime of
# this object.
self.search_list = search_list
if use_placeholder_cache:
self.placeholder_cache = {}
if default_filter is not None:
self._filter_function = default_filter
# FIXME: repeater support is not needed most of the time, just
# disable it for the time being
# self.repeat = spitfire.runtime.repeater.RepeatTracker()
def get_var(self, name, default=None):
return udn.resolve_from_search_list(self.search_list, name, default)
def has_var(self, name):
var = self.get_var(name, default=runtime.UnresolvedPlaceholder)
return var is not runtime.UnresolvedPlaceholder
@staticmethod
def new_buffer():
return BufferIO()
return _SpitfireTemplate
|
f5e4c8e285d070b3b0c5716335fef42aef57e845
| 3,642,590
|
def _create_player_points(
pool: pd.DataFrame,
teams: np.ndarray,
n_iterations: int,
n_teams: int,
n_players: int,
team_points: np.ndarray
) -> np.ndarray:
"""Calculates playerpoints
Args:
pool (pd.DataFrame): the player pool
statscols (Iterable[str]): the statistics columns
teams (np.ndarray): the teams
Returns:
np.ndarray
"""
# now need to link back to players
players = pool.index.values
# once we've calculated stats, can remove league dimension from teams
# is just a 2D array of teams
# if you flatten teampoints, get 1D array lines up with 2D teams
teams2d = teams.reshape(n_iterations * n_teams, n_players)
team_points1d = team_points.ravel()
# creates array of shape (len(teams2d), len(players))
# is effectively one hot encoder for player indexes
# if player index 3 is on team 0, then on_team[0, 3] == 1
on_team = (players[...,None]==teams2d[:,None,:]).any(-1).astype(int)
# now we can calculate player points by multiplying
# matrix of zeroes and ones with team points
return on_team * team_points1d[:, np.newaxis]
|
c6b7d55ce5041552a113436faf450ca96c84a15e
| 3,642,591
|
def get_SolverSettings(instance):
""" get solver settings """
instance.sSolver = ""
instance.dicPyomoOption = {}
instance.dicSolverOption = {}
file_setting = "../Input/1_model_config/01_SolverConfig.csv"
dt_data = genfromtxt(file_setting, dtype = str, skip_header=0, delimiter=',')
for sSetting in dt_data:
if sSetting[0] == "Solver":
instance.sSolver = sSetting[2]
elif sSetting[0] == "Pyomo options" and sSetting[3] == "1":
instance.dicPyomoOption[sSetting[1]] = sSetting[2]
elif sSetting[0] == "Solver options" and sSetting[3] == "1":
instance.dicSolverOption[sSetting[1]] = sSetting[2]
return instance
|
aa9a1299d3a1a1ea7c3f2315117b097641ed22be
| 3,642,592
|
import signal
def demod_from_array(mod_array, faraday_sampling_rate = 5e6, start_time = 0, end_time = 'max', reference_frequency = 736089.8, reference_phase_deg = 0, lowpas_freq = 10000, plot_demod = False, decimate_factor = 4, time_stamp = '', label = '', save = False):
"""
Sweet lord above this function washes the dishes as well. In summary it opens a h5
file, in the same directory as the file, extracts data according to specified
directory and then demodulates. Demodulation requires phase and frequency to be
entered. After this is decimates the data, done to upload into limited memory of
and arbitary waveform generator. Along the way there are options to plot graphs
in order to check outputs. It returns the decimated waveform in a numpy array.
Arguments;
h5file_name - Name of the h5 file data is being loaded from
h5data_path - Path in the h5 file to the data
faraday_sampling_rate - Sampling rate the data was recorded at
start_time - Selecting the start time of the data in the h5 file after which data will
be data
end_time - Selecting the end time of the data before which data will be included. Enter 'max' for
the entire array
reference_frequency - Frequency of the reference waveform used for demodulation
reference_phase_deg - Phase of the reference waveform used for demodulation.
lowpas_freq - frequency of the lowpass filter. May be the 6db point, don't know.
plot_demod - Plots a figure of the demodulated data
decimate_factor - Multiple for decimating the data.
"""
""" Creating the reference """
Faraday_clipped = mod_array
dt = 1/faraday_sampling_rate
time_axis = np.arange(len(Faraday_clipped))*dt
# Decimate input data to help filter behave better
# Faraday_clipped = signal.decimate(Faraday_clipped, 100, n=None, ftype='iir', axis=-1, zero_phase=True)
# time_axis = signal.decimate(time_axis, 100, n=None, ftype='iir', axis=-1, zero_phase=True)
# dt = 1/faraday_sampling_rate
reference = np.sin(2*np.pi*reference_frequency*time_axis + reference_phase_deg*np.pi/180)
decimate_factor = 10
""" Multiplying Faraday with reference and lowpassing """
multiplied_waves = np.multiply(Faraday_clipped, reference)
# Decimate input data for LPFilter
# NOTE: decimated fector must be under a factor of 13, so to get 100 we do 10 twice
dec_data = signal.decimate(multiplied_waves, decimate_factor, n=None, ftype='iir', axis=-1, zero_phase=True)
dec_dec_data = signal.decimate(dec_data, decimate_factor, n=None, ftype='iir', axis=-1, zero_phase=True)
# dec_time = signal.decimate(time_axis, decimate_factor, n=None, ftype='iir', axis=-1, zero_phase=True)
# dec_dec_time = signal.decimate(dec_time, decimate_factor, n=None, ftype='iir', axis=-1, zero_phase=True)
demod_time = np.arange(len(dec_dec_data))*dt*decimate_factor*decimate_factor
# Feed into Low pass filter
demodulated = LPFilter(dec_dec_data, lowpas_freq, dt*decimate_factor*decimate_factor)
""" Figures to check demodulation """
fNameDemod = str(time_stamp) + '_Fx_demodulated'
if plot_demod == True:
plt.figure(4, figsize = (10,7.5))
# plt.plot(time_axis, multiplied_waves, label = label)
plt.plot(demod_time, demodulated, label = label)
plt.xlabel('Time (s)')
plt.ylabel('Demodulated <Fx>')
# plt.xlim(time_axis[0], time_axis[-1])
plt.title(fNameDemod)
plt.grid()
if save is True:
path = 'C:/Users/Boundsy/Desktop/Uni Work/PHS2360/Sim Results/' + str(fNameDemod) + '.png'
print('Demodulated Fx plot saved to Sim Results folder')
plt.savefig(path)
fNamePeriodogram = str(time_stamp) + '_demod_periodogram'
freq, amp = make_periodogram(demodulated, faraday_sampling_rate/100, fNamePeriodogram, saveFig = save, plot_graph = plot_demod, start_freq = 0, end_freq = 20000, label = label)
""" decimating data """
# decimated_demod = signal.decimate(demodulated, 100, n=None, ftype='iir', axis=-1, zero_phase=True)
# return decimated_demod
return demodulated
|
7661ba4dbb39a5a3bd2336e386605b1361a6b287
| 3,642,593
|
import numpy
import os
def find_many_files_gridrad(
top_directory_name, radar_field_names, radar_heights_m_agl,
start_time_unix_sec, end_time_unix_sec, one_file_per_time_step=True,
raise_error_if_all_missing=True):
"""Finds many files with storm-centered images from GridRad data.
T = number of "file times"
If `one_file_per_time_step = True`, T = number of time steps
Else, T = number of SPC dates
F = number of radar fields
H = number of radar heights
:param top_directory_name: Name of top-level directory for storm-centered
images.
:param radar_field_names: length-F list with names of radar fields.
:param radar_heights_m_agl: length-H numpy array of radar heights (metres
above ground level).
:param start_time_unix_sec: See doc for `find_many_files_myrorss_or_mrms`.
:param end_time_unix_sec: Same.
:param one_file_per_time_step: Same.
:param raise_error_if_all_missing: Same.
:return: file_dict: Dictionary with the following keys.
file_dict['image_file_name_matrix']: T-by-F-by-H numpy array of paths to
image files.
file_dict['valid_times_unix_sec']: length-T numpy array of valid times. If
`one_file_per_time_step = False`, valid_times_unix_sec[i] is just a time
within the [i]th SPC date.
file_dict['radar_field_names']: Same as input.
file_dict['radar_heights_m_agl']: Same as input.
"""
error_checking.assert_is_numpy_array(
numpy.array(radar_field_names), num_dimensions=1
)
for this_field_name in radar_field_names:
radar_utils.check_field_name(this_field_name)
error_checking.assert_is_numpy_array(radar_heights_m_agl, num_dimensions=1)
error_checking.assert_is_geq_numpy_array(radar_heights_m_agl, 0)
radar_heights_m_agl = numpy.round(radar_heights_m_agl).astype(int)
error_checking.assert_is_boolean(one_file_per_time_step)
error_checking.assert_is_boolean(raise_error_if_all_missing)
if one_file_per_time_step:
all_times_unix_sec = time_periods.range_and_interval_to_list(
start_time_unix_sec=start_time_unix_sec,
end_time_unix_sec=end_time_unix_sec,
time_interval_sec=GRIDRAD_TIME_INTERVAL_SEC, include_endpoint=True)
good_indices = numpy.where(numpy.logical_and(
all_times_unix_sec >= start_time_unix_sec,
all_times_unix_sec <= end_time_unix_sec
))[0]
all_times_unix_sec = all_times_unix_sec[good_indices]
all_spc_date_strings = [
time_conversion.time_to_spc_date_string(t)
for t in all_times_unix_sec
]
else:
first_spc_date_string = time_conversion.time_to_spc_date_string(
start_time_unix_sec)
last_spc_date_string = time_conversion.time_to_spc_date_string(
end_time_unix_sec)
all_spc_date_strings = time_conversion.get_spc_dates_in_range(
first_spc_date_string=first_spc_date_string,
last_spc_date_string=last_spc_date_string)
all_times_unix_sec = numpy.array([
time_conversion.spc_date_string_to_unix_sec(s)
for s in all_spc_date_strings
], dtype=int)
file_dict = {
RADAR_FIELD_NAMES_KEY: radar_field_names,
RADAR_HEIGHTS_KEY: radar_heights_m_agl
}
image_file_name_matrix = None
valid_times_unix_sec = None
valid_spc_date_strings = None
num_fields = len(radar_field_names)
num_heights = len(radar_heights_m_agl)
for j in range(num_fields):
for k in range(num_heights):
print((
'Finding storm-image files for "{0:s}" at {1:d} metres AGL...'
).format(
radar_field_names[j], radar_heights_m_agl[k]
))
if j == 0 and k == 0:
image_file_names = []
valid_times_unix_sec = []
valid_spc_date_strings = []
for i in range(len(all_times_unix_sec)):
if one_file_per_time_step:
this_time_unix_sec = all_times_unix_sec[i]
else:
this_time_unix_sec = None
this_file_name = find_storm_image_file(
top_directory_name=top_directory_name,
unix_time_sec=this_time_unix_sec,
spc_date_string=all_spc_date_strings[i],
radar_source=radar_utils.GRIDRAD_SOURCE_ID,
radar_field_name=radar_field_names[j],
radar_height_m_agl=radar_heights_m_agl[k],
raise_error_if_missing=False)
if not os.path.isfile(this_file_name):
continue
image_file_names.append(this_file_name)
valid_times_unix_sec.append(all_times_unix_sec[i])
valid_spc_date_strings.append(all_spc_date_strings[i])
num_times = len(image_file_names)
if num_times == 0:
if raise_error_if_all_missing:
if one_file_per_time_step:
start_time_string = (
time_conversion.unix_sec_to_string(
start_time_unix_sec, TIME_FORMAT)
)
end_time_string = (
time_conversion.unix_sec_to_string(
end_time_unix_sec, TIME_FORMAT)
)
error_string = (
'Cannot find any files from {0:s} to {1:s}.'
).format(start_time_string, end_time_string)
raise ValueError(error_string)
error_string = (
'Cannot find any files from SPC dates "{0:s}" to '
'"{1:s}".'
).format(
all_spc_date_strings[0], all_spc_date_strings[-1]
)
raise ValueError(error_string)
file_dict.update({
IMAGE_FILE_NAMES_KEY: None, VALID_TIMES_KEY: None
})
return file_dict
image_file_name_matrix = numpy.full(
(num_times, num_fields, num_heights), '', dtype=object
)
image_file_name_matrix[:, j, k] = numpy.array(
image_file_names, dtype=object)
valid_times_unix_sec = numpy.array(
valid_times_unix_sec, dtype=int)
else:
for i in range(len(valid_times_unix_sec)):
if one_file_per_time_step:
this_time_unix_sec = valid_times_unix_sec[i]
else:
this_time_unix_sec = None
image_file_name_matrix[i, j, k] = find_storm_image_file(
top_directory_name=top_directory_name,
unix_time_sec=this_time_unix_sec,
spc_date_string=valid_spc_date_strings[i],
radar_source=radar_utils.GRIDRAD_SOURCE_ID,
radar_field_name=radar_field_names[j],
radar_height_m_agl=radar_heights_m_agl[k],
raise_error_if_missing=True)
file_dict.update({
IMAGE_FILE_NAMES_KEY: image_file_name_matrix,
VALID_TIMES_KEY: valid_times_unix_sec
})
return file_dict
|
0e240dbcd5a4f1a2edf129fd48c692fb4ac76e85
| 3,642,594
|
import click
def cli_num_postproc_workers(
usage_help: str = "Number of workers to post-process the network output.",
default: int = 0,
) -> callable:
"""Enables --num-postproc-workers option for cli."""
return click.option(
"--num-postproc-workers",
help=add_default_to_usage_help(usage_help, default),
type=int,
default=default,
)
|
a34c1f75e26bfe8aca9569dd7e3102d20315deab
| 3,642,595
|
import json
def create_cluster(redshift, iam, ec2, cluster_config, wait_status=False):
""" Create publicly available redshift cluster per provided cluster configuration.
:param redshift: boto.redshift object to use
:param iam: boto.iam object to use
:param ec2: boto.ec2 object to use
:param cluster_config: configparser cluster configuration, from manage_cluster.cfg file
:param wait_status: bool, default is False. Should function wait and repeatedly check if cluster has
reached its desired state.
:return: Returns JSON, if successful, otherwise displays error and returns integer 1
"""
print("Attempting to create a new IAM Role")
iam_role_name = cluster_config['iam_role_name']
try:
iam.create_role(
Path='/',
RoleName=iam_role_name,
Description="Allows Redshift clusters to call AWS services on your behalf.",
AssumeRolePolicyDocument=json.dumps({
'Statement': [
{
'Action': 'sts:AssumeRole',
'Effect': 'Allow',
'Principal': {
'Service': 'redshift.amazonaws.com'
}
}
],
'Version': '2012-10-17'
})
)
print(f"Role '{iam_role_name}' created")
except iam.exceptions.EntityAlreadyExistsException:
print("Role already exists")
print("Attaching AmazonS3ReadOnlyAccess policy to the role")
iam.attach_role_policy(
RoleName=iam_role_name,
PolicyArn="arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess"
)
print("Retrieving role ARN")
aws_role_arn = iam.get_role(RoleName=cluster_config['iam_role_name'])['Role']['Arn']
print(f"Role ARN: {aws_role_arn}")
try:
redshift.create_cluster(
# HW
ClusterType=cluster_config['cluster_type'],
NodeType=cluster_config['node_type'],
NumberOfNodes=int(cluster_config['num_nodes']),
# Identifiers & Credentials
DBName=cluster_config['db_name'],
ClusterIdentifier=cluster_config['cluster_identifier'],
MasterUsername=cluster_config['db_user'],
MasterUserPassword=cluster_config['db_password'],
# Roles (for s3 access)
IamRoles=[aws_role_arn]
)
except Exception as e:
print(f"ERROR: {e}")
return 1
if wait_status:
expected_status = 'available'
else:
expected_status = None
cluster_info = get_cluster_status(
redshift,
cluster_config['cluster_identifier'],
expected_status=expected_status
)
print(f"DWH_ENDPOINT :: {cluster_info['Endpoint']['Address']}")
print(f"DWH_ROLE_ARN :: {cluster_info['IamRoles'][0]['IamRoleArn']}")
vpc_id = cluster_info['VpcId']
vpc_cidr_ip = '0.0.0.0/0'
vpc_ip_proto = 'TCP'
vpc_port = int(cluster_config['db_port'])
try:
vpc = ec2.Vpc(id=vpc_id)
default_sec_group = list(vpc.security_groups.all())[0]
print(default_sec_group)
default_sec_group.authorize_ingress(
GroupName=default_sec_group.group_name,
CidrIp=vpc_cidr_ip,
IpProtocol=vpc_ip_proto,
FromPort=vpc_port,
ToPort=vpc_port
)
print(f"VPC {vpc_id} access has been granted to {vpc_ip_proto} {vpc_cidr_ip} "
f"for port {vpc_port}")
except Exception as e:
print(f"ERROR: {e}")
return 1
|
2f93e8bcf3c2f9a43a370fb34f513df56dc312e7
| 3,642,596
|
def index():
""" Application entry point. """
return render_template("index.html")
|
fa84b6a2f6ae8c0ef7f976a6e58419913e8cbc1a
| 3,642,597
|
def add_filepath(parent, filename, definition=''):
"""returns the path to filename under `parent`."""
if filename is None:
raise ValueError("filename cannot be None")
# FIXME implementation specifics! only works with FileSystemDatabase
parent_path = parent._repr
file_path = parent_path / filename
parent.attrs[f'{file_path.name}/{DEFINITION_KEY}'] = definition
parent.attrs[f'{file_path.name}/{TYPE_KEY}'] = FILE_TYPE
parent.attrs.commit()
return file_path
|
615dc918a01c4c7f5dc216959f5b3ebe7e1a7b38
| 3,642,598
|
from typing import Callable
def silhouette_to_prediction_function(
silhouette: np.ndarray
) -> Callable[[np.ndarray], bool]:
"""
Takes a silhouette and returns a function.
The returned function takes x,y point and
returns wether it is in the silhouette.
Args:
silhouette:
Returns:
"""
def prediction_function(point: np.ndarray) -> bool:
try:
return silhouette[int(point[0]), int(point[1])]
except:
return False
return prediction_function
|
802bf4dc83739fff171178f0b2b95e6900c1f725
| 3,642,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.