content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def test_user_missing_username():
"""Exception should be raised due to missing username"""
with pytest.raises(exceptions.MissingCredentialsException) as miserr:
_ = user.User(password="test")
assert "missing" in str(miserr.value) | 5,334,100 |
def sp_integrate_3Dy ( func ,
x , z ,
ymin , ymax , *args , **kwargs ) :
"""Make 1D numerical integration over y-axis
>>> func = ... ## func ( x , y , z )
## x , z , ymin , ymax
>>> print func.sp_integrate_y ( 0.5 , 0.1 , -20 , 20 )
"""
def _func_ ( p , *args ) :
return func ( x , p , z , *args )
from ostap.math.integral import integral as _integral
return _integral ( _func_ ,
ymin , ymax ,
*args , **kwargs ) | 5,334,101 |
def log_exp_sum_1d(x):
"""
This computes log(exp(x_1) + exp(x_2) + ... + exp(x_n)) as
x* + log(exp(x_1-x*) + exp(x_2-x*) + ... + exp(x_n-x*)), where x* is the
max over all x_i. This can avoid numerical problems.
"""
x_max = x.max()
if isinstance(x, gnp.garray):
return x_max + gnp.log(gnp.exp(x - x_max).sum())
else:
return x_max + np.log(np.exp(x - x_max).sum()) | 5,334,102 |
def connect_intense_cells(int_cells, conv_buffer):
"""Merge nearby intense cells if they are within a given
convective region search radius.
Parameters
----------
int_cells: (N, M) ndarray
Pixels associated with intense cells.
conv_buffer: integer
Distance to search for nearby intense cells.
Returns
-------
labeled_image1: (N, M) ndarray
Binary image of merged intense cells. Same dimensions as int_cells.
"""
return binary_closing(int_cells>0, structure=disk(3), iterations=conv_buffer) | 5,334,103 |
def _encode_decimal(value: Union[Decimal, int, str]):
"""
Encodes decimal into internal format
"""
value = Decimal(value)
exponent = value.as_tuple().exponent
mantissa = int(value.scaleb(-exponent))
return {
'mantissa': mantissa,
'exponent': exponent
} | 5,334,104 |
def add_columns(header, c):
"""
add any missing columns to the samples table
"""
for column in header:
try:
c.execute('ALTER TABLE samples ADD COLUMN {0}'.format(column))
except:
pass | 5,334,105 |
def test_valid_incremental_read_with_no_interval(mocker, logger):
"""Tests that an incremental read which doesn't specify a checkpoint interval outputs a STATE message only after fully reading the stream and does
not output any STATE messages during syncing the stream."""
stream_output = [{"k1": "v1"}, {"k2": "v2"}]
s1 = MockStream([({"sync_mode": SyncMode.incremental, "stream_state": {}}, stream_output)], name="s1")
s2 = MockStream([({"sync_mode": SyncMode.incremental, "stream_state": {}}, stream_output)], name="s2")
state = {"cursor": "value"}
mocker.patch.object(MockStream, "get_updated_state", return_value=state)
mocker.patch.object(MockStream, "supports_incremental", return_value=True)
mocker.patch.object(MockStream, "get_json_schema", return_value={})
src = MockSource(streams=[s1, s2])
catalog = ConfiguredAirbyteCatalog(streams=[_configured_stream(s1, SyncMode.incremental), _configured_stream(s2, SyncMode.incremental)])
expected = [
*_as_records("s1", stream_output),
_state({"s1": state}),
*_as_records("s2", stream_output),
_state({"s1": state, "s2": state}),
]
messages = _fix_emitted_at(list(src.read(logger, {}, catalog, state=defaultdict(dict))))
assert expected == messages | 5,334,106 |
def plot_diode_fold(dio_cross,bothfeeds=True,feedtype='l',min_samp=-500,max_samp=7000,legend=True,**kwargs):
"""
Plots the calculated average power and time sampling of ON (red) and
OFF (blue) for a noise diode measurement over the observation time series
"""
#Get full stokes data of ND measurement
obs = Waterfall(dio_cross,max_load=150)
tsamp = obs.header['tsamp']
data = obs.data
obs = None
I,Q,U,V = get_stokes(data,feedtype)
#Calculate time series, OFF and ON averages, and time samples for each
tseriesI = np.squeeze(np.mean(I,axis=2))
I_OFF,I_ON,OFFints,ONints = foldcal(I,tsamp,inds=True,**kwargs)
if bothfeeds==True:
if feedtype=='l':
tseriesQ = np.squeeze(np.mean(Q,axis=2))
tseriesX = (tseriesI+tseriesQ)/2
tseriesY = (tseriesI-tseriesQ)/2
if feedtype=='c':
tseriesV = np.squeeze(np.mean(V,axis=2))
tseriesR = (tseriesI+tseriesV)/2
tseriesL = (tseriesI-tseriesV)/2
stop = ONints[-1,1]
#Plot time series and calculated average for ON and OFF
if bothfeeds==False:
plt.plot(tseriesI[0:stop],'k-',label='Total Power')
for i in ONints:
plt.plot(np.arange(i[0],i[1]),np.full((i[1]-i[0]),np.mean(I_ON)),'r-')
for i in OFFints:
plt.plot(np.arange(i[0],i[1]),np.full((i[1]-i[0]),np.mean(I_OFF)),'b-')
else:
if feedtype=='l':
diff = np.mean(tseriesX)-np.mean(tseriesY)
plt.plot(tseriesX[0:stop],'b-',label='XX')
plt.plot(tseriesY[0:stop]+diff,'r-',label='YY (shifted)')
if feedtype=='c':
diff = np.mean(tseriesL)-np.mean(tseriesR)
plt.plot(tseriesL[0:stop],'b-',label='LL')
plt.plot(tseriesR[0:stop]+diff,'r-',label='RR (shifted)')
#Calculate plotting limits
if bothfeeds==False:
lowlim = np.mean(I_OFF)-(np.mean(I_ON)-np.mean(I_OFF))/2
hilim = np.mean(I_ON)+(np.mean(I_ON)-np.mean(I_OFF))/2
plt.ylim((lowlim,hilim))
plt.xlim((min_samp,max_samp))
plt.xlabel('Time Sample Number')
plt.ylabel('Power (Counts)')
plt.title('Noise Diode Fold')
if legend==True:
plt.legend() | 5,334,107 |
def jobcard(partcode, path, folder):
"""command for jobcard shortcut"""
shortcut = nav.Jobcard(partcode)
parse_options(shortcut, path, folder) | 5,334,108 |
def getPristineStore(testCase, creator):
"""
Get an Axiom Store which has been created and initialized by C{creator} but
which has been otherwise untouched. If necessary, C{creator} will be
called to make one.
@type testCase: L{twisted.trial.unittest.TestCase}
@type creator: one-argument callable
@param creator: A factory for the Store configuration desired. Will be
invoked with the testCase instance if necessary.
@rtype: L{axiom.store.Store}
"""
dbdir = FilePath(testCase.mktemp())
basePath = _getBaseStorePath(testCase, creator)
basePath.copyTo(dbdir)
return Store(dbdir) | 5,334,109 |
def update_origins(origin_list_, champions_list_, origin_counters_):
"""Checks nonzero counters for champions in pool and updates origins by
setting origin counters."""
logging.debug("Function update_origins() called")
origin_counters_value_list = [0] * len(origin_list_)
for i, origin_ in enumerate(origin_list_): # looping over counters for every origin
logging.info("Current origin: %s", origin_)
for (
champ
) in (
champions_list_
): # for loop to assign how much champions are nonzero in origin
if champ.ChampCounter.get() >= 1:
logging.info("Current champ with counter >=1: %s", champ.name)
if origin_ in (champ.origin_prim, champ.origin_sec):
logging.info(
"Current champ with counter >=1 match origin Prim or Sec \
: %s or %s",
champ.origin_prim,
champ.origin_sec,
)
origin_counters_value_list[i] = origin_counters_value_list[i] + 1
logging.info(
"Number of nonzero champions in this origin: %s",
origin_counters_value_list[i],
)
origin_counters_[i].set(origin_counters_value_list[i])
logging.debug("Function update_origins() end") | 5,334,110 |
def data_to_CCA(dic, CCA):
"""
Returns a dictionary of ranking details of each CCA
{name:{placeholder:rank}
"""
final_dic = {}
dic_CCA = dic[CCA][0] #the cca sheet
for key, value in dic_CCA.items():
try: #delete all the useless info
del value["Class"]
except KeyError:
del value["CLASS"]
try:
del value["Category"]
except:
pass
final_dic[key] = value
try:
del final_dic["Name"]
except KeyError:
pass
return final_dic | 5,334,111 |
def classify_document(Text=None, EndpointArn=None):
"""
Creates a new document classification request to analyze a single document in real-time, using a previously created and trained custom model and an endpoint.
See also: AWS API Documentation
Exceptions
:example: response = client.classify_document(
Text='string',
EndpointArn='string'
)
:type Text: string
:param Text: [REQUIRED]\nThe document text to be analyzed.\n
:type EndpointArn: string
:param EndpointArn: [REQUIRED]\nThe Amazon Resource Number (ARN) of the endpoint.\n
:rtype: dict
ReturnsResponse Syntax
{
'Classes': [
{
'Name': 'string',
'Score': ...
},
],
'Labels': [
{
'Name': 'string',
'Score': ...
},
]
}
Response Structure
(dict) --
Classes (list) --
The classes used by the document being analyzed. These are used for multi-class trained models. Individual classes are mutually exclusive and each document is expected to have only a single class assigned to it. For example, an animal can be a dog or a cat, but not both at the same time.
(dict) --
Specifies the class that categorizes the document being analyzed
Name (string) --
The name of the class.
Score (float) --
The confidence score that Amazon Comprehend has this class correctly attributed.
Labels (list) --
The labels used the document being analyzed. These are used for multi-label trained models. Individual labels represent different categories that are related in some manner and are not multually exclusive. For example, a movie can be just an action movie, or it can be an action movie, a science fiction movie, and a comedy, all at the same time.
(dict) --
Specifies one of the label or labels that categorize the document being analyzed.
Name (string) --
The name of the label.
Score (float) --
The confidence score that Amazon Comprehend has this label correctly attributed.
Exceptions
Comprehend.Client.exceptions.InvalidRequestException
Comprehend.Client.exceptions.ResourceUnavailableException
Comprehend.Client.exceptions.TextSizeLimitExceededException
Comprehend.Client.exceptions.InternalServerException
:return: {
'Classes': [
{
'Name': 'string',
'Score': ...
},
],
'Labels': [
{
'Name': 'string',
'Score': ...
},
]
}
:returns:
Comprehend.Client.exceptions.InvalidRequestException
Comprehend.Client.exceptions.ResourceUnavailableException
Comprehend.Client.exceptions.TextSizeLimitExceededException
Comprehend.Client.exceptions.InternalServerException
"""
pass | 5,334,112 |
def quick_nve(mol, confId=0, step=2000, time_step=None, limit=0.0, shake=False, idx=None, tmp_clear=False,
solver='lammps', solver_path=None, work_dir=None, omp=1, mpi=0, gpu=0, **kwargs):
"""
MD.quick_nve
MD simulation with NVE ensemble
Args:
mol: RDKit Mol object
Optional args:
confId: Target conformer ID (int)
step: Number of MD steps (int)
time_step: Set timestep of MD (float or None, fs)
limit: NVE limit (float)
shake: Use SHAKE (boolean)
solver: lammps (str)
solver_path: File path of solver (str)
work_dir: Path of work directory (str)
Returns:
Unwrapped coordinates (float, numpy.ndarray, angstrom)
"""
mol_copy = utils.deepcopy_mol(mol)
if solver == 'lammps':
sol = LAMMPS(work_dir=work_dir, solver_path=solver_path, idx=idx)
#elif solver == 'gromacs':
# sol = Gromacs(work_dir=work_dir, solver_path=solver_path)
md = MD(idx=idx)
if not hasattr(mol_copy, 'cell'):
md.pbc = False
calc.centering_mol(mol_copy, confId=confId)
md.add_md('nve', step, time_step=time_step, shake=shake, nve_limit=limit, **kwargs)
sol.make_dat(mol_copy, confId=confId, file_name=md.dat_file)
sol.make_input(md)
cp = sol.exec(omp=omp, mpi=mpi, gpu=gpu)
if cp.returncode != 0 and (
(md.write_data is not None and not os.path.exists(os.path.join(work_dir, md.write_data)))
or (md.outstr is not None and not os.path.exists(os.path.join(work_dir, md.outstr)))
):
utils.radon_print('Error termination of %s. Return code = %i' % (sol.get_name, cp.returncode), level=3)
return None
uwstr, wstr, _, vel, _ = sol.read_traj_simple(os.path.join(sol.work_dir, md.outstr))
for i in range(mol_copy.GetNumAtoms()):
mol_copy.GetConformer(confId).SetAtomPosition(i, Geom.Point3D(uwstr[i, 0], uwstr[i, 1], uwstr[i, 2]))
mol_copy.GetAtomWithIdx(i).SetDoubleProp('vx', vel[i, 0])
mol_copy.GetAtomWithIdx(i).SetDoubleProp('vy', vel[i, 1])
mol_copy.GetAtomWithIdx(i).SetDoubleProp('vz', vel[i, 2])
if hasattr(mol_copy, 'cell'):
mol_copy = calc.mol_trans_in_cell(mol_copy, confId=confId)
if tmp_clear: md.clear(work_dir)
return mol_copy, uwstr | 5,334,113 |
def deg2hms(x):
"""Transform degrees to *hours:minutes:seconds* strings.
Parameters
----------
x : float
The degree value to be written as a sexagesimal string.
Returns
-------
out : str
The input angle written as a sexagesimal string, in the
form, hours:minutes:seconds.
"""
from astropy.coordinates import Angle
ac = Angle(x, unit='degree')
hms = ac.to_string(unit='hour', sep=':', pad=True)
return str(hms) | 5,334,114 |
async def test_proxy_binding21():
"""
14 None
24 None
24 24
----------
14
? JsComponentA
undefined
? JsComponentA
undefined
"""
# Test multiple sessions, and sharing objects
c1, s1 = launch(JsComponentB)
c2, s2 = launch(JsComponentB)
with c1:
c11 = JsComponentA() # JsComponent that has local JsComponent
c1.set_sub1(c11)
with c2:
c22 = JsComponentA() # JsComponent that has local JsComponent
c2.set_sub1(c22)
await roundtrip(s1, s2)
c11.set_foo(14)
c22.set_foo(24)
await roundtrip(s1, s2)
print(c1.sub1 and c1.sub1.foo, c1.sub2 and c1.sub2.foo)
s1.send_command('EVAL', c1.id, 'sub1.foo')
await roundtrip(s1, s2)
# So far, not much news, now break the universe ...
c1.set_sub1(c2.sub1)
await roundtrip(s1, s2)
print(c1.sub1 and c1.sub1.foo, c1.sub2 and c1.sub2.foo)
# In JS, c1.sub1 will be a stub
s1.send_command('EVAL', c1.id, 'sub1.id')
s1.send_command('EVAL', c1.id, 'sub1.foo')
await roundtrip(s1, s2)
# But we can still "handle" it
c1.sub1_to_sub2()
await roundtrip(s1, s2)
# And now c1.sub2.foo has the value of c2.sub1.foo
print(c1.sub1 and c1.sub1.foo, c1.sub2 and c1.sub2.foo)
s1.send_command('EVAL', c1.id, 'sub1.id')
s1.send_command('EVAL', c1.id, 'sub1.foo')
await roundtrip(s1, s2) | 5,334,115 |
def get_waveforms_scales(we, templates, channel_locations):
"""
Return scales and x_vector for templates plotting
"""
wf_max = np.max(templates)
wf_min = np.max(templates)
x_chans = np.unique(channel_locations[:, 0])
if x_chans.size > 1:
delta_x = np.min(np.diff(x_chans))
else:
delta_x = 40.
y_chans = np.unique(channel_locations[:, 1])
if y_chans.size > 1:
delta_y = np.min(np.diff(y_chans))
else:
delta_y = 40.
m = max(np.abs(wf_max), np.abs(wf_min))
y_scale = delta_y / m * 0.7
y_offset = channel_locations[:, 1][None, :]
xvect = delta_x * (np.arange(we.nsamples) - we.nbefore) / we.nsamples * 0.7
xvectors = channel_locations[:, 0][None, :] + xvect[:, None]
# put nan for discontinuity
xvectors[-1, :] = np.nan
return xvectors, y_scale, y_offset | 5,334,116 |
def chunk_it(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n] | 5,334,117 |
def rec_findsc(positions, davraddi, davradius='dav', adatom_radius=1.1,
ssamples=1000, return_expositions=True,
print_surf_properties=False, remove_is=True, procs=1):
"""It return the atom site surface(True)/core(Flase) for each atoms in
for each structure pandas dataframe. See more in the function
quandarium.analy.mols.findsurfatons.
Parameters
----------
positions: Pandas.Series
The name of the fuature (bag type) in pd_df with
cartezian positions of the atoms.
davraddi: Pandas.Series
The name of the fuature in pd_df with atomic radii or dav
information (bag of floats).
davradius: str ['dav','radii'] (optional, default='dav')
If radii, atomic radius will be the feature davraddiifeature
values. If dav the values in atomic radius will be half of the
feature davraddiifeature values.
adatom_radius: float (optional, default=1.1).
Radius of the dummy adatom, in angstroms.
ssampling: intiger (optional, default=1000).
Quantity of samplings over the touched sphere surface of each
atom.
Return
------
list_of_new_features_name: list with strings.
['bag_issurface', 'bag_surfaceexposition']
list_of_new_features_data: list with data.
issurface: bag of intiger
The number indicate 1 to surface
atoms, 0 to core atoms.
surfaceexposition: bag of floats.
The percentual of surface
exposition of each atom.
"""
print("Initializing analysis: rec_findsc")
inputs_list = []
list_is_surface = []
list_exposition = []
for index, (poitionsi, davraddii) in enumerate(zip(positions, davraddi)):
#print(type(poitionsi),poitionsi)
positionsi = np.array(poitionsi) # manter np.array e ativar bags
if davradius == 'dav':
atomic_radii = np.array(davraddii)/2 # manter np.array e ativar bags
if davradius == 'radii':
atomic_radii = np.array(davraddii)
inputs_list.append([positionsi, atomic_radii, adatom_radius,
remove_is, ssamples, False, return_expositions,
print_surf_properties, "surface_points.xyz"])
pool = mp.Pool(procs)
result = pool.map_async(findsc_wrap, inputs_list, chunksize=1)
while not result.ready():
remaining = result._number_left # pylint: disable=W0212
print('Remaining: ', remaining)
time.sleep(5.0)
print('Finished')
outputs = result.get()
for index, _ in enumerate(outputs):
list_is_surface.append(outputs[index][0])
list_exposition.append(outputs[index][1])
list_of_new_features_data = [list_is_surface, list_exposition]
list_of_new_features_name = ['bag_issurf', 'bag_exposition']
return list_of_new_features_name, list_of_new_features_data | 5,334,118 |
async def accept_taa(
controller: AcaPyClient, taa: TAARecord, mechanism: Optional[str] = None
):
"""
Accept the TAA
Parameters:
-----------
controller: AcaPyClient
The aries_cloudcontroller object
TAA:
The TAA object we want to agree to
Returns:
--------
accept_taa_response: {}
The response from letting the ledger know we accepted the response
"""
accept_taa_response = await controller.ledger.accept_taa(
body=TAAAccept(**taa.dict(), mechanism=mechanism)
)
logger.info("accept_taa_response: %s", accept_taa_response)
if accept_taa_response != {}:
logger.error("Failed to accept TAA.\n %s", accept_taa_response)
raise HTTPException(
status_code=404,
detail=f"Something went wrong. Could not accept TAA. {accept_taa_response}",
)
return accept_taa_response | 5,334,119 |
def setdefault(self, other):
"""
Merge two dictionaries like .update() but don't overwrite values.
:param dict self: updated dict
:param dict other: default values to be inserted
"""
for k, v in other.items():
self.setdefault(k, v) | 5,334,120 |
def discoverYadis(uri):
"""Discover OpenID services for a URI. Tries Yadis and falls back
on old-style <link rel='...'> discovery if Yadis fails.
@param uri: normalized identity URL
@type uri: six.text_type, six.binary_type is deprecated
@return: (claimed_id, services)
@rtype: (six.text_type, list(OpenIDServiceEndpoint))
@raises DiscoveryFailure: when discovery fails.
"""
uri = string_to_text(uri, "Binary values for discoverYadis are deprecated. Use text input instead.")
# Might raise a yadis.discover.DiscoveryFailure if no document
# came back for that URI at all. I don't think falling back
# to OpenID 1.0 discovery on the same URL will help, so don't
# bother to catch it.
response = yadisDiscover(uri)
yadis_url = response.normalized_uri
body = response.response_text
try:
openid_services = OpenIDServiceEndpoint.fromXRDS(yadis_url, body)
except XRDSError:
# Does not parse as a Yadis XRDS file
openid_services = []
if not openid_services:
# Either not an XRDS or there are no OpenID services.
if response.isXRDS():
# if we got the Yadis content-type or followed the Yadis
# header, re-fetch the document without following the Yadis
# header, with no Accept header.
return discoverNoYadis(uri)
# Try to parse the response as HTML.
# <link rel="...">
openid_services = OpenIDServiceEndpoint.fromHTML(yadis_url, body)
return (yadis_url, getOPOrUserServices(openid_services)) | 5,334,121 |
def mapper_to_func(map_obj):
"""Converts an object providing a mapping to a callable function"""
map_func = map_obj
if isinstance(map_obj, dict):
map_func = map_obj.get
elif isinstance(map_obj, pd.core.series.Series):
map_func = lambda x: map_obj.loc[x]
return map_func | 5,334,122 |
def int_finder(input_v, tol=1e-6, order='all', tol1=1e-6):
"""
The function computes the scaling factor required to multiply the
given input array to obtain an integer array. The integer array is
returned.
Parameters
----------
input1: numpy.array
input array
tol: float
tolerance with Default = 1e-06
order: str
choices are 'rows', 'columns', 'col', 'all'.
If order = 'all', the input array is flattened and then scaled. This is default value.
If order = 'rows', elements in each row are scaled
If order = 'columns' or 'cols'', elements in each column are scaled
tol1: float
tolerance with Default = 1e-06
Returns
-------
output: numpy.array
An array of integers obtained by scaling input
"""
input1 = np.array(input_v)
Sz = input1.shape
if np.ndim(input1) == 1:
input1 = np.reshape(input1, (1, input1.shape[0]))
if int_check(input1, 15).all():
input1 = np.around(input1)
# Divide by LCM (rows, cols, all) <--- To Do
tmult = gcd_array(input1.astype(dtype='int64'), order)
if (order == 'all'):
input1 = input1 / tmult
elif (order == 'rows'):
tmult = np.tile(tmult, (np.shape(input1[1])))
input1 = input1 / tmult
elif (order == 'col' or order == 'cols' or order == 'columns'):
tmult = np.tile(tmult, (np.shape(input1[0])[0], 1))
input1 = input1 / tmult
output_v = input1
if len(Sz) == 1:
output_v = np.reshape(output_v, (np.size(output_v),))
return output_v.astype(int)
else:
# By default it flattens the array (if nargin < 3)
if order.lower() == 'all':
if len(Sz) != 1:
input1.shape = (1, Sz[0]*Sz[1])
else:
Switch = 0
err_msg = "Not a valid input. For the third argument please"+ \
" choose either \"rows\" or \"columns\" keys for this function."
order_options = ('rows', 'columns', 'col')
try:
Keys = (order_options.index(order.lower()))
except:
raise Exception(err_msg)
if (Keys == 1) or (Keys == 2):
if input1.shape[0] != 1:
# Handling the case of asking a row vector
# with the 'column' key by mistake.
input1 = input1.T
Switch = 1
# Handling the case of asking a column
# vector with the 'row' key by mistake.
if (Keys == 0) and (input1.shape[1] == 1):
input1 = input1.T
Switch = 1
if (abs(input1) < tol).all():
excep1 = 'All the input components cannot' \
+ 'be smaller than tolerance.'
raise Exception(excep1)
tmp = np.array((abs(input1) > tol1))
Vec = 2 * abs(input1[::]).max() * np.ones(
(input1.shape[0], input1.shape[1]))
Vec[tmp] = input1[tmp]
MIN = abs(Vec).min(axis=1)
# Transposing a row to a column
MIN.shape = (len(MIN), 1)
input1 = input1 / np.tile(MIN, (1, input1.shape[1]))
N, D = rat(input1, tol)
N[~tmp] = 0 # <---- added
D[~tmp] = 1 # <---- added
lcm_rows = lcm_array(D, 'rows')
lcm_mat = np.tile(lcm_rows, (1, input1.shape[1]))
Rounded = (N * lcm_mat) / D
output_v = Rounded
# --------------------------
if order.lower() == 'all':
if len(Sz) != 1:
output_v.shape = (Sz[0], Sz[1])
else:
if (Keys) == 1 or (Keys) == 2:
output_v = output_v.T
if Keys == 0 and Switch == 1:
output_v = output_v.T
if len(Sz) == 1:
output_v = np.reshape(output_v, (np.size(output_v), ))
return output_v.astype(int) | 5,334,123 |
def cell_info_for_active_cells(self, porosity_model="MATRIX_MODEL"):
"""Get list of cell info objects for current case
Arguments:
porosity_model(str): String representing an enum.
must be 'MATRIX_MODEL' or 'FRACTURE_MODEL'.
Returns:
List of **CellInfo** objects
**CellInfo class description**::
Parameter | Description | Type
------------------------- | --------------------------------------------- | -----
grid_index | Index to grid | Integer
parent_grid_index | Index to parent grid | Integer
coarsening_box_index | Index to coarsening box | Integer
local_ijk | Cell index in IJK directions of local grid | Vec3i
parent_ijk | Cell index in IJK directions of parent grid | Vec3i
**Vec3i class description**::
Parameter | Description | Type
---------------- | -------------------------------------------- | -----
i | I grid index | Integer
j | J grid index | Integer
k | K grid index | Integer
"""
active_cell_info_chunks = self.cell_info_for_active_cells_async(
porosity_model=porosity_model
)
received_active_cells = []
for active_cell_chunk in active_cell_info_chunks:
for active_cell in active_cell_chunk.data:
received_active_cells.append(active_cell)
return received_active_cells | 5,334,124 |
def features(x, encoded):
"""
Given the original images or the encoded images, generate the
features to use for the patch similarity function.
"""
print('start shape',x.shape)
if len(x.shape) == 3:
x = x - np.mean(x,axis=0,keepdims=True)
else:
# count x 100 x 256 x 768
print(x[0].shape)
x = x - np.mean(x,axis=1,keepdims=True)
# remove per-neural-network dimension
x = x.reshape((x.shape[0] * x.shape[1],) + x.shape[2:])
p = mp.Pool(96)
B = len(x) // 96
print(1)
bs = [x[i:i+B] for i in range(0,len(x),B)]
print(2)
r = p.map(features_, bs)
#r = features_(bs[0][:100])
print(3)
p.close()
#r = np.array(r)
#print('finish',r.shape)
return np.concatenate(r, axis=0) | 5,334,125 |
def create_processing_log(s_url,s_status=settings.Status.PENDING):
"""
Creates a new crawler processing status. Default status PENDING
:param s_url: str - URL/name of the site
:param s_status: str - The chosen processing status
:return: SiteProcessingLog - The new processing status log
"""
# is the status valid?
assert isinstance(s_status, settings.Status), 'Not valid type of status'
# Gets the chosen status
new_status = entities.SiteStatus.get(type=s_status.name)
# Creates the new processing status
return entities.SiteProcessingLog(site=get_site(s_url=s_url), status=new_status, timestamp=datetime.today()) | 5,334,126 |
def pick_action(action_distribution):
"""action selection by sampling from a multinomial.
Parameters
----------
action_distribution : 1d torch.tensor
action distribution, pi(a|s)
Returns
-------
torch.tensor(int), torch.tensor(float)
sampled action, log_prob(sampled action)
"""
m = torch.distributions.Categorical(action_distribution)
a_t = m.sample()
return a_t | 5,334,127 |
def setMonitoringQuerys():
""" This will set the 'Accepted' Queries to 'Monitoring'
"""
rows = 0
with engine.begin() as conn:
upd = userQuery.update().where(userQuery.c.status=='Accepted').values(
status='Monitoring', message_text='CoWin Hawk is monitoring.',
timestamp=datetime.now())
rows = conn.execute(upd)
if rows.rowcount > 0:
print("%d Query(s) added for monitoring.." %(rows.rowcount)) | 5,334,128 |
def word_tokenizer(text: str) -> List[str]:
"""Tokenize input text splitting into words
Args:
text : Input text
Returns:
Tokenized text
"""
return text.split() | 5,334,129 |
def event_loop():
"""Create an instance of the default event loop for each test case."""
loop = asyncio.new_event_loop()
yield loop
loop.close() | 5,334,130 |
def lookup_bigquery_dataset(project_id, dataset_id):
"""Retrieves Data Catalog entry for the given BigQuery Dataset."""
from google.cloud import datacatalog_v1beta1
datacatalog = datacatalog_v1beta1.DataCatalogClient()
resource_name = '//bigquery.googleapis.com/projects/{}/datasets/{}'\
.format(project_id, dataset_id)
return datacatalog.lookup_entry(linked_resource=resource_name) | 5,334,131 |
def indicator_structure(Template, LC_instance):
""" given a Template A and a LC instance Sigma
builds the indicator structure of Sigma over A
and passes the identification object """
in_vars, in_cons = LC_instance
# Construct the domain of the indicator by factoring
arities = dict(in_vars)
domain = ((f, x)
for f, arity in arities.items()
for x in product(Template.domain, repeat=arity))
identify = Components(domain)
for scope, relation in in_cons:
f, g = scope
pi = {x: y for x, y in relation}
for x in product(Template.domain, repeat=arities[g]):
x_pi = tuple(x[pi[i]] for i in range(arities[f]))
identify.add((f, x_pi), (g, x))
variables = iter(identify)
# impose constraints that cover all thats necessary
important_fs = tuple(cover(arities, (scope for scope, rel in in_cons)))
def indicator_relation(template_relation):
return set(
tuple(identify((f, x)) for x in xs)
for f in important_fs
for xs in product_relation(template_relation, repeat=arities[f]))
rels = (indicator_relation(relation) for relation in Template.relations)
def decode(homomorphism):
polymorphisms = dict()
for f, arity in arities.items():
polymorphisms[f] = {
x: homomorphism[identify((f, x))]
for x in product(Template.domain, repeat=arity)}
return polymorphisms
return DelayDecode(Structure(variables, *rels), decode) | 5,334,132 |
def add_depth_dim(X, y):
"""
Add extra dimension at tail for x only. This is trivial to do in-line.
This is slightly more convenient than writing a labmda.
Args:
X (tf.tensor):
y (tf.tensor):
Returns:
tf.tensor, tf.tensor: X, y tuple, with X having a new trailing dimension.
"""
x_dat = tf.expand_dims(X, -1) # Prepare as an image, with only 1 colour-depth channel.
return x_dat, y | 5,334,133 |
def catch_exceptions(warning_msg="An exception was caught and ignored.", should_catch=True):
"""Decorator that catches exceptions."""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not should_catch:
return func(*args, **kwargs)
try:
return func(*args, **kwargs)
except:
warnings.warn(warning_msg + "\nDetailed error log: " + traceback.format_exc())
return wrapper
return decorator | 5,334,134 |
def setSC2p5DAccNodes(lattice, sc_path_length_min, space_charge_calculator, boundary = None):
"""
It will put a set of a space charge SC2p5D_AccNode into the lattice as child nodes of the first level accelerator nodes.
The SC nodes will be inserted at the beginning of a particular part of the first level AccNode element.
The distance between SC nodes should be more than sc_path_length_min, and the boundary is optional.
The function will return the array of SC nodes as a convenience for the user.
"""
scNodes_arr = setSC_General_AccNodes(lattice, sc_path_length_min, space_charge_calculator, SC2p5D_AccNode)
for scNode in scNodes_arr:
scNode.setName(scNode.getName()+"SC2p5D")
scNode.setBoundary(boundary)
# initialize the lattice
lattice.initialize()
return scNodes_arr | 5,334,135 |
def transmuting_ring_sizes_score(mapping: LigandAtomMapping):
"""Checks if mapping alters a ring size"""
molA = mapping.molA.to_rdkit()
molB = mapping.molB.to_rdkit()
molA_to_molB = mapping.molA_to_molB
def gen_ringdict(mol):
# maps atom idx to ring sizes
ringinfo = mol.GetRingInfo()
idx_to_ringsizes = defaultdict(list)
for r in ringinfo.AtomRings():
for idx in r:
idx_to_ringsizes[idx].append(len(r))
return idx_to_ringsizes
# generate ring size dicts
ringdictA = gen_ringdict(molA)
ringdictB = gen_ringdict(molB)
is_bad = False
# check first degree neighbours of core atoms to see if their ring
# sizes are the same
for i, j in molA_to_molB.items():
atomA = molA.GetAtomWithIdx(i)
for bA in atomA.GetBonds():
otherA = bA.GetOtherAtom(atomA)
if otherA.GetIdx() in molA_to_molB:
# if other end of bond in core, ignore
continue
# otherA is an atom not in the mapping, but bonded to an
# atom in the mapping
if not otherA.IsInRing():
continue
# try and find the corresponding atom in molecule B
atomB = molB.GetAtomWithIdx(j)
for bB in atomB.GetBonds():
otherB = bB.GetOtherAtom(atomB)
if otherB.GetIdx() in molA_to_molB.values():
continue
if not otherB.IsInRing():
continue
# ringdict[idx] will give the list of ringsizes for an atom
if set(ringdictA[otherA.GetIdx()]) != set(
ringdictB[otherB.GetIdx()]):
is_bad = True
return 1 - 0.1 if is_bad else 0 | 5,334,136 |
def cmp_policy_objs(pol1, pol2, year_range=None, exclude=None):
"""
Compare parameter values two policy objects.
year_range: years over which to compare values.
exclude: list of parameters to exclude from comparison.
"""
if year_range is not None:
pol1.set_state(year=list(year_range))
pol2.set_state(year=list(year_range))
else:
pol1.clear_state()
pol2.clear_state()
for param in pol1._data:
if exclude and param in exclude:
continue
v1 = getattr(pol1, param)
v2 = getattr(pol2, param)
np.testing.assert_allclose(v1, v2) | 5,334,137 |
def test_translate_six_frames(seq_record):
"""
Given a Biopython sequence record with a DNA (or RNA?) sequence,
translate into amino acid (protein) sequences in six frames.
Returns translations as list of strings.
"""
translation_list = []
for strand, nuc in [(+1, seq_record.seq), (-1, seq_record.seq.reverse_complement())]:
print("Strand: %s\nNuc: %s" % (strand, nuc))
for frame in range(3):
print("Frame: %s" % frame)
length = 3 * ((len(seq_record)-frame) // 3)
print("Length: %s" % length)
print("Possible translations: %s" % nuc[frame:frame+length].translate())
for pro in nuc[frame:frame+length].translate().split("*"):
translation_list.append(pro)
return(translation_list) | 5,334,138 |
def from_sequence(seq, partition_size=None, npartitions=None):
""" Create dask from Python sequence
This sequence should be relatively small in memory. Dask Bag works
best when it handles loading your data itself. Commonly we load a
sequence of filenames into a Bag and then use ``.map`` to open them.
Parameters
----------
seq: Iterable
A sequence of elements to put into the dask
partition_size: int (optional)
The length of each partition
npartitions: int (optional)
The number of desired partitions
It is best to provide either ``partition_size`` or ``npartitions``
(though not both.)
Examples
--------
>>> b = from_sequence(['Alice', 'Bob', 'Chuck'], partition_size=2)
See Also
--------
from_filenames: Specialized bag creation function for textfiles
"""
seq = list(seq)
if npartitions and not partition_size:
partition_size = int(math.ceil(len(seq) / npartitions))
if npartitions is None and partition_size is None:
if len(seq) < 100:
partition_size = 1
else:
partition_size = int(len(seq) / 100)
parts = list(partition_all(partition_size, seq))
name = 'from_sequence-' + tokenize(seq, partition_size)
d = dict(((name, i), part) for i, part in enumerate(parts))
return Bag(d, name, len(d)) | 5,334,139 |
def smooth_trajectory(
df,
bodyparts,
filter_window=3,
order=1,
deriv=0,
save=False,
output_filename=None,
destfolder=None,
):
"""
Smooths the input data which is a multiindex pandas array generated by DeepLabCut as a result of analyzing a video.
Parameters
----------
df: Pandas multiindex dataframe
bodyparts: List
List of bodyparts to smooth. To smooth all the bodyparts use bodyparts=['all']
filter_window: int
The length of filter window which needs to be a positive odd integer
order: int
Order of the polynomial to fit the data. The order must be less than the filter_window
deriv: int
Optional. Computes the derivative. If order=1, it computes the velocity on the smoothed data, if order=2 it computes the acceleration on the smoothed data.
Outputs
-------
df: smoothed dataframe
Example
-------
>>> df_smooth = kinematics.smooth_trajectory(df,bodyparts=['nose','shoulder'],window_length=11,order=3)
To smooth all the bodyparts in the dataframe, use
>>> df_smooth = kinematics.smooth_trajectory(df,bodyparts=['all'],window_length=11,order=3)
"""
df = df.copy()
xy = df.columns.get_level_values("coords") != "likelihood"
if bodyparts[0] == "all":
mask = np.ones(df.shape[1], dtype=bool)
else:
mask = df.columns.get_level_values("bodyparts").isin(bodyparts)
to_smooth = xy & mask
df.loc[:, to_smooth] = savgol_filter(
df.loc[:, to_smooth], filter_window, order, deriv, axis=0
)
df_cut = df.loc[:, mask]
if not destfolder:
destfolder = os.getcwd()
if not output_filename:
output_filename = (
"dataFrame_smooth_" + df.columns.get_level_values("scorer").unique()[0]
)
if save:
print("Saving the smoothed data as a pandas array in %s " % destfolder)
df_cut.to_hdf(
os.path.join(destfolder, output_filename + ".h5"),
"df_with_missing",
format="table",
mode="w",
)
return df_cut | 5,334,140 |
def write_certificate(crt, filename):
"""Write certificate to file using usual PEM encoding.
Args:
X509 obj, certificate
filename: str location to save file
"""
with open(filename, 'wb') as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, crt)) | 5,334,141 |
def _get_instances(consul_host, user):
"""Get all deployed component instances for a given user
Sourced from multiple places to ensure we get a complete list of all
component instances no matter what state they are in.
Args
----
consul_host: (string) host string of Consul
user: (string) user id
Returns
-------
List of unique component instance names
"""
cons = Consul(consul_host)
get_instances_from_kv = partial(_get_instances_from_kv, cons.kv.get)
get_instances_from_catalog = partial(_get_instances_from_catalog, cons.catalog.services)
return _merge_instances(user, get_instances_from_kv, get_instances_from_catalog) | 5,334,142 |
async def pool_help(p: 'Player', c: Messageable, msg: Sequence[str]) -> str:
"""Show information of all documented pool commands the player can access."""
prefix = glob.config.command_prefix
cmds = []
for cmd in pool_commands.commands:
if not cmd.doc or not p.priv & cmd.priv:
# no doc, or insufficient permissions.
continue
cmds.append(f'{prefix}pool {cmd.triggers[0]}: {cmd.doc}')
return '\n'.join(cmds) | 5,334,143 |
def filter_params(params):
"""Filter the dictionary of params for a Bountysource account.
This is so that the Bountysource access token doesn't float
around in a user_info hash (considering nothing else does that).
"""
whitelist = ['id', 'display_name', 'first_name', 'last_name', 'email', 'avatar_url']
filtered_params = {}
for key in params:
if key in whitelist:
filtered_params[key] = params[key]
return filtered_params | 5,334,144 |
def build_vocab(tokens, glove_vocab, min_freq):
""" build vocab from tokens and glove words. """
counter = Counter(t for t in tokens)
# if min_freq > 0, use min_freq, otherwise keep all glove words
if min_freq > 0:
v = sorted([t for t in counter if counter.get(t) >= min_freq], key=counter.get, reverse=True)
else:
v = sorted([t for t in counter if t in glove_vocab], key=counter.get, reverse=True)
# add special tokens and entity mask tokens
v = constant.VOCAB_PREFIX + v
print("vocab built with {}/{} words.".format(len(v), len(counter)))
return v | 5,334,145 |
def test046():
"""
check that None instead of dict generates a warning
"""
om = ObjectMeta()
copy: ObjectMeta = om.dup()
copy.annotations = None
warnings = copy.get_type_warnings()
assert len(warnings) == 1, f"{len(warnings)} warnings"
assert warnings[0].cls == ObjectMeta
assert warnings[0].attrname == "annotations"
assert "empty dict" in warnings[0].warning | 5,334,146 |
def assert_array_approx_equal(x, y, decimal=6, err_msg='', verbose=True):
"""
Checks the equality of two masked arrays, up to given number odecimals.
The equality is checked elementwise.
"""
def compare(x, y):
"Returns the result of the loose comparison between x and y)."
return approx(x, y, rtol=10. ** -decimal)
assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
header='Arrays are not almost equal') | 5,334,147 |
def to_bits_string(value: int) -> str:
"""Converts unsigned value to a bit string with _ separators every nibble."""
if value < 0:
raise ValueError(f'Value is not unsigned: {value!r}')
bits = bin(value)[2:]
rev = bits[::-1]
pieces = []
i = 0
while i < len(rev):
pieces.append(rev[i:i + 4])
i += 4
return '0b' + '_'.join(pieces)[::-1] | 5,334,148 |
def enumerate_names_countries():
"""Outputs:
1. Julian Australia
2. Bob Spain
3. PyBites Global
4. Dante Argentina
5. Martin USA
6. Rodolfo Mexico"""
for i in enumerate(zip(names, countries)):
print("{}. {:<10} {}".format(i[0] + 1, i[1][0], i[1][1])) | 5,334,149 |
def test_channel_opened_notification(node_factory):
"""
Test the 'channel_opened' notification sent at channel funding success.
"""
opts = [{}, {"plugin": os.path.join(os.getcwd(), "tests/plugins/misc_notifications.py")}]
amount = 10**6
l1, l2 = node_factory.line_graph(2, fundchannel=True, fundamount=amount,
opts=opts)
l2.daemon.wait_for_log(r"A channel was opened to us by {}, "
"with an amount of {}*"
.format(l1.info["id"], amount)) | 5,334,150 |
def comp_days_centered(ndays, offset=0):
"""Return days for pre/onset/post composites centered on onset.
Parameters
----------
ndays : int
Number of days to average in each composite.
offset : int, optional
Number of offset days between pre/onset and onset/post
day ranges.
Returns
-------
reldays : dict of arrays
Components are 'pre', 'onset', and 'post', arrays of days
of the year relative to onset day, for each composite.
"""
ndays = int(ndays)
n1 = int(ndays // 2)
n2 = ndays - n1
reldays = collections.OrderedDict()
reldays['pre'] = np.arange(-offset - n1 - ndays, -offset - n1)
reldays['onset'] = np.arange(-n1, n2)
reldays['post'] = np.arange(offset + n2, offset + n2 + ndays)
return reldays | 5,334,151 |
def createDatabase(name: str) -> bool:
"""
Creates a database in the format of python. If spaces are in the name, it will be cut off through trim(). Returns true if the database is successfully created.
>>> import coconut
#The database format must always be python, it still supports if name has no .py
>>> coconut.database.createDatabase("Example.py")
True
"""
if backend.trim(name) == '':
raise DatabaseNameError(f'Name {name} is invalid and is just spaces')
else:
if not name.lower().endswith('.py'):
if len(name.split('.')) > 1:
raise InvalidTypeDatabaseError('Extension type {} is invalid'.format(name.split('.')[-1]))
else:
if name.endswith('.py'):
pass
else:
name = name + '.py'
os.chdir(os.path.join(backend.throwback(), 'databases'))
f = open(f'{name}', 'w+')
f.close()
return True
else:
os.chdir(os.path.join(backend.throwback(), 'databases'))
f = open(f'{name}', 'w+')
f.close()
return True | 5,334,152 |
def bedgraph_per_gene_ss(genes, bg_plus, bg_minus, bgfile):
"""
bedtools intersect genes with each of bg_plus and bg_minus.
Run separately so that gene coverage is consecutive by strand.
"""
# === split annotation ===
plus_bed = bgfile + '.genes.plus'
minus_bed = bgfile + '.genes.minus'
p = open(plus_bed, 'w')
m = open(minus_bed, 'w')
with open(genes, 'r') as f:
for line in f:
if not line.startswith('track'):
strand = line.rstrip().split('\t')[5]
if strand == '+':
p.write(line)
elif strand == '-':
m.write(line)
else:
logger.error('do not recognize strand: ' + strand)
logger.error(line)
sys.exit(1)
p.close()
m.close()
# === bedtools intersect: concatenate + & - strands ===
sort_bedfile(bg_plus, bg_plus)
pb.BedTool(plus_bed).intersect(bg_plus, wo=True, sorted=True).saveas(bgfile + '.plus')
# os.remove(bg_plus)
sort_bedfile(bg_minus, bg_minus)
pb.BedTool(minus_bed).intersect(bg_minus, wo=True, sorted=True).saveas(bgfile + '.minus')
# os.remove(bg_minus + ".sorted")
t = open(bgfile, 'w')
t.write(open(bgfile + '.plus').read())
t.write(open(bgfile + '.minus').read())
t.close()
for file in [bgfile + '.plus', bgfile + '.minus', plus_bed, minus_bed]:
os.remove(file) | 5,334,153 |
def sort_numbers(numbers: str) -> str:
""" Input is a space-delimited string of numberals from 'zero' to 'nine'.
Valid choices are 'zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight' and 'nine'.
Return the string with numbers sorted from smallest to largest
>>> sort_numbers('three one five')
'one three five'
Example solution:
# line 1
value_map = {
'zero': 0,
'one': 1,
'two': 2,
'three': 3,
'four': 4,
'five': 5,
'six': 6,
'seven': 7,
'eight': 8,
'nine': 9
}
# line 2
ret = []
# line 3
for x in numbers.split():
# line 4
ret.append([x, value_map[x]])
# line 5
ret.sort(key=lambda x: x[1])
# line 6
ret = [x for x, y in ret]
# line 7
ret = ' '.join(ret)
# line 8
return ret
"""
# Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4
# END OF CONTEXT
print("7")
# END OF SOLUTION | 5,334,154 |
def linreg_fit_bayes(X, y, **kwargs):
"""
Fit a Bayesian linear regression model.
This is a port of linregFit.m from pmtk3.
:param X: N*D design matrix
:param y: N*1 response vector
"""
pp = preprocessor_create(add_ones=True, standardize_X=False) # default
prior = kwargs['prior'] if 'prior' in kwargs else 'uninf'
preproc = kwargs['preproc'] if 'preproc' in kwargs else pp
beta = kwargs['beta'] if 'beta' in kwargs else None
alpha = kwargs['alpha'] if 'alpha' in kwargs else None
g = kwargs['g'] if 'g' in kwargs else None
use_ARD = kwargs['use_ARD'] if 'use_ARD' in kwargs else False
verbose = kwargs['verbose'] if 'verbose' in kwargs else False
if prior.lower() == 'eb':
prior = 'ebnetlab'
if prior.lower() == 'uninf':
raise NotImplementedError
elif prior.lower() == 'gauss':
raise NotImplementedError
elif prior.lower() == 'zellner':
raise NotImplementedError
elif prior.lower() == 'vb':
raise NotImplementedError
elif prior.lower() == 'ebnetlab':
model, logev = linreg_fit_eb_netlab(X, y, preproc)
elif prior.lower() == 'ebchen':
raise NotImplementedError
else:
raise ValueError('Invalid prior')
model['model_type'] = 'linreg_bayes'
model['prior'] = prior
return model, logev | 5,334,155 |
def getrv(objwave, objflam, refwave, refflam, maxrv=[-200.,200.], waverange=[-np.inf,np.inf]):
"""
Calculates the rv shift of an object relative to some reference spectrum.
Inputs:
objwave - obj wavelengths, 1d array
objflam - obj flux, 1d array
refwave - ref wavelengths, 1d array
refflam - ref flux, 1d array
maxrv - min and max rv shift in km/s, 2-element array
Output:
rv shift in km/s
"""
ow = (objwave >= np.nanmin(refwave)) & (objwave <= np.nanmax(refwave)) \
& (objwave >= waverange[0]) & (objwave <= waverange[1])
rw = (refwave >= np.nanmin(objwave)) & (refwave <= np.nanmax(objwave)) \
& (refwave >= waverange[0]) & (refwave <= waverange[1])
oscl = 1.0/np.nanmedian(objflam[ow])
rscl = 1.0/np.nanmedian(refflam[rw])
iflam = np.interp(refwave[rw], objwave[np.isfinite(objflam)], objflam[np.isfinite(objflam)], left=np.nan, right=np.nan)
drv = np.nanmedian(2.*(refwave[rw][1:]-refwave[rw][:-1])/(refwave[rw][1:]+refwave[rw][:-1]))
maxshift = maxrv / (drv*3e5)
ssd = []
ss = np.arange(int(maxshift[0]),int(maxshift[1]+1))
for s in ss:
if s > 0:
shiftflam = np.append(np.repeat(np.nan, s), iflam[0:-s])
elif s < 0:
shiftflam = np.append(iflam[-s:], np.repeat(np.nan, -s))
else:
shiftflam = iflam
ssd.append(np.nansum((rscl*refflam[rw] - oscl*shiftflam)**2)/np.sum(~np.isnan(shiftflam)))
ssd = np.array(ssd)
s = ss[np.nanargmin(ssd)]
return s*drv*3e5 | 5,334,156 |
def me_show(ctx, verbose):
"""Show my own user information"""
result = ctx.obj['nc'].me()[0]
if verbose >= 1:
print_object(result)
else:
print_object(result, exclude=['APIKey'], only=ctx.obj['show_only']) | 5,334,157 |
def ping(host):
"""
Returns True if host (str) responds to a ping request.
Remember that some hosts may not respond to a ping request even if the host name is valid.
"""
# Ping parameters as function of OS
ping_param = "-n 1" if system_name().lower()=="windows" else "-c 1"
# Pinging
return system_call("ping " + ping_param + " " + host) == 0 | 5,334,158 |
def remove_unpopulated_classes(_df, target_column, threshold):
"""
Removes any row of the df for which the label in target_column appears less
than threshold times in the whole frame (not enough populated classes)
:param df: The dataframe to filter
:param target_column: The target column with labels
:param threshold: the number of appearances a label must respect
:return: The filtered dataframe
"""
count = Counter(_df[target_column])
valid = [k for k in count.keys() if count[k] >= threshold]
_df = _df[_df[target_column].isin(valid)]
return _df | 5,334,159 |
def get_longest_substrings(full_strings):
"""Return a dict of top substrings with hits from a given list of strings.
Args:
full_strings (list[str]): List of strings to test against each other.
Returns:
dict: substrings with their respective frequencies in full_strings.
"""
combos = list(combinations(full_strings, 2))
all_substrings = {}
for string_a, string_b in combos:
substring = get_longest_substring(string_a, string_b)
# Set entry to 2, matches with string_a and string_b, else + 1.
all_substrings.update(
{
substring: all_substrings.get(substring, 1) + 1,
},
)
return all_substrings | 5,334,160 |
def num_weekdays():
"""
Creates a function which returns the number of weekdays in a pandas.Period, typically for use as the average_weight parameter for other functions.
Returns:
callable: Function accepting a single parameter of type pandas.Period and returning the number of weekdays within this
period as a float.
"""
return num_business_days([]) | 5,334,161 |
def setup_blast_args(args):
"""Set up the blast args."""
if args['no_filter']:
args['bit_score'] = 0
args['contig_length'] = 0 | 5,334,162 |
def largestSumSubArray(arr,size):
"""
* Kadane's algorithm is used to find the largest possible sum in a contiguous subarray
* The main idea is to find all positive contiguous segments in the array and try to update sum for all positive such
segments if the sum is greater
* We find sum for every positive segment and then try to update it.
"""
max_val=-sys.maxsize-1
mx=0
start=0
end=0
s=0
for i in range(0,size):
mx+=arr[i]
if max_val<mx:
max_val=mx
start=s
end=i
if mx<0:
mx=0
s=i+1
print("Largest contiguous sum = ",end="")
print(max_val)
print ("Start Index = ",end="")
print(start)
print ("End Index = ",end="")
print(end) | 5,334,163 |
def print_tohu_version(): # pragma: no cover
"""
Convenience helper function to print the current tohu version.
"""
print(f"Tohu version: {get_versions()['version']}") | 5,334,164 |
def reader_from_file(load_dir: str, **kwargs):
"""
Load a reader from a checkpoint.
Args:
load_dir: folder containing the reader being loaded.
Returns: a reader.
"""
shared_resources = create_shared_resources()
shared_resources.load(os.path.join(load_dir, "shared_resources"))
if kwargs:
shared_resources.config.update(kwargs)
reader = readers[shared_resources.config["reader"]](shared_resources)
reader.load_and_setup_modules(load_dir)
return reader | 5,334,165 |
def add_special_param_to_dependency(
*,
dependency_param: inspect.Parameter,
dependant: Dependant,
) -> bool:
"""Check if param is non field object that should be passed into callable.
Arguments:
dependency_param: param that should be checked.
dependant: dependency which field would be filled with required param name.
Returns:
Result of check.
"""
if lenient_issubclass(dependency_param.annotation, bots.Bot):
dependant.bot_param_name = dependency_param.name
return True
elif lenient_issubclass(dependency_param.annotation, Message):
dependant.message_param_name = dependency_param.name
return True
elif lenient_issubclass(dependency_param.annotation, async_client.AsyncClient):
dependant.async_client_param_name = dependency_param.name
return True
elif lenient_issubclass(dependency_param.annotation, sync_client.Client):
dependant.sync_client_param_name = dependency_param.name
return True
return False | 5,334,166 |
def getBestStyleFit(c_path, styles_path):
"""
Finds the style that gives the lowest content loss
and saves it.
"""
choices = []
# Create the save path if it doesn't extis
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
# Go through all styles
for s_path in styles_path:
print(s_path)
image, data_list = style_transfer(
c_path, s_path, ITERATIONS)
# Get the final content loss
content_loss = data_list['content_loss'][-1]
print(s_path + " " + str(content_loss))
# Add the generated image to our potential choices
choices.append(
{'image': image, 'name': s_path, 'content_loss': content_loss})
print("Content losses for all applied styles")
for image in choices:
print(image['name'] + " : " + str(image['content_loss']))
# Find the image with the lowest content loss
best = choices[0]
for image in choices:
if image['content_loss'] < best['content_loss']:
best = image
print("Best style: " + best['name'])
# Save the image with the lowest content loss
the_path = args.save_path
filename = the_path + '/' + c_path + "-" + best['name'] + '.png'
save_image(filename, best['image']) | 5,334,167 |
def mtask_forone_advacc(val_loader, model, criterion, task_name, args, info, epoch=0, writer=None,
comet=None, test_flag=False, test_vis=False, norm='Linf'):
"""
NOTE: test_flag is for the case when we are testing for multiple models, need to return something to be able to plot and analyse
"""
assert len(task_name) > 0
avg_losses = {}
num_classes = args.classes
hist = np.zeros((num_classes, num_classes))
for c_name, criterion_fun in criterion.items():
avg_losses[c_name] = AverageMeter()
seg_accuracy = AverageMeter()
seg_clean_accuracy = AverageMeter()
model.eval() # this is super important for correct including the batchnorm
print("using norm type", norm)
for i, (input, target, mask) in enumerate(val_loader):
if test_vis:
clean_output = model(Variable(input.cuda(), requires_grad=False))
seg_clean_accuracy.update(accuracy(clean_output['segmentsemantic'], target['segmentsemantic'].long().cuda()),
input.size(0))
if args.steps == 0 or args.step_size == 0:
args.epsilon = 0
if norm == 'Linf':
if args.dataset == 'taskonomy':
adv_img = PGD_attack_mtask(input, target, mask, model, criterion, task_name, args.epsilon, args.steps, args.dataset,
args.step_size, info, args, using_noise=True)
elif args.dataset == 'cityscape':
adv_img = PGD_attack_mtask_city(input, target, mask, model, criterion, task_name, args.epsilon, args.steps,
args.dataset,
args.step_size, info, args, using_noise=True)
elif norm == 'l2':
adv_img = PGD_attack_mtask_L2(input, target, mask, model, criterion, task_name, args.epsilon, args.steps,
args.dataset,
args.step_size)
# image_var = Variable(adv_img.data, requires_grad=False)
image_var = adv_img.data
# image_var = input
if torch.cuda.is_available():
image_var = image_var.cuda()
for keys, m in mask.items():
mask[keys] = m.cuda()
for keys, tar in target.items():
target[keys] = tar.cuda()
# print("diff", torch.sum(torch.abs(raw_input-image_var)))
with torch.no_grad():
output = model(image_var)
sum_loss = None
loss_dict = {}
for c_name, criterion_fun in criterion.items():
this_loss = criterion_fun(output[c_name].float(), target[c_name],
mask[c_name])
if sum_loss is None:
sum_loss = this_loss
else:
sum_loss = sum_loss + this_loss
loss_dict[c_name] = this_loss
avg_losses[c_name].update(loss_dict[c_name].data.item(), input.size(0))
if 'segmentsemantic' in criterion.keys():
# this is accuracy for segmentation
seg_accuracy.update(accuracy(output['segmentsemantic'], target['segmentsemantic'].long()), input.size(0))
#TODO: also mIOU here
class_prediction = torch.argmax(output['segmentsemantic'], dim=1)
target_seg = target['segmentsemantic'].cpu().data.numpy() if torch.cuda.is_available() else target['segmentsemantic'].data.numpy()
class_prediction = class_prediction.cpu().data.numpy() if torch.cuda.is_available() else class_prediction.data.numpy()
hist += fast_hist(class_prediction.flatten(), target_seg.flatten(), num_classes)
if i % 500 == 0:
class_prediction = torch.argmax(output['segmentsemantic'], dim=1)
# print(target['segmentsemantic'].shape)
decoded_target = decode_segmap(
target['segmentsemantic'][0][0].cpu().data.numpy() if torch.cuda.is_available() else
target['segmentsemantic'][0][0].data.numpy(),
args.dataset)
decoded_target = np.moveaxis(decoded_target, 2, 0)
decoded_class_prediction = decode_segmap(
class_prediction[0].cpu().data.numpy() if torch.cuda.is_available() else class_prediction[
0].data.numpy(), args.dataset)
decoded_class_prediction = np.moveaxis(decoded_class_prediction, 2, 0)
if not test_flag:
writer.add_image('Val/image clean ', back_transform(input, info)[0])
writer.add_image('Val/image adv ', back_transform(adv_img, info)[0])
writer.add_image('Val/image gt for adv ', decoded_target)
writer.add_image('Val/image adv prediction ', decoded_class_prediction)
# if comet is not None: comet.log_image(back_transform(input, info)[0].cpu(), name='Val/image clean ', image_channels='first')
# if comet is not None: comet.log_image(back_transform(adv_img, info)[0].cpu(), name='Val/image adv ', image_channels='first')
# if comet is not None: comet.log_image(decoded_target, name='Val/image gt for adv ', image_channels='first')
# if comet is not None: comet.log_image(decoded_class_prediction, name='Val/image adv prediction ', image_channels='first')
if 'segmentsemantic' in criterion.keys():
# this is accuracy for segmentation
seg_accuracy.update(accuracy(output['segmentsemantic'], target['segmentsemantic'].long()), input.size(0))
#TODO: also mIOU here
class_prediction = torch.argmax(output['segmentsemantic'], dim=1)
target_seg = target['segmentsemantic'].cpu().data.numpy() if torch.cuda.is_available() else target['segmentsemantic'].data.numpy()
class_prediction = class_prediction.cpu().data.numpy() if torch.cuda.is_available() else class_prediction.data.numpy()
hist += fast_hist(class_prediction.flatten(), target_seg.flatten(), num_classes)
if args.debug:
if i>1:
break
if test_vis:
print("clean seg accuracy: {}".format(seg_clean_accuracy.avg))
str_attack_result = ''
str_not_attacked_task_result = ''
for keys, loss_term in criterion.items():
if keys in task_name:
str_attack_result += 'Attacked Loss: {} {loss.val:.4f} ({loss.avg:.4f})\t'.format(keys, loss=avg_losses[keys])
else:
str_not_attacked_task_result += 'Not att Task Loss: {} {loss.val:.4f} ({loss.avg:.4f})\t'.format(keys, loss=avg_losses[keys])
# Tensorboard logger
if not test_flag:
for keys, _ in criterion.items():
if keys in task_name:
writer.add_scalar('Val Adv Attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg, epoch)
if comet is not None: comet.log_metric('Val Adv Attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg)
else:
writer.add_scalar('Val Adv not attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg)
if comet is not None: comet.log_metric('Val Adv not attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg)
if 'segmentsemantic' in criterion.keys() or 'segmentsemantic' in criterion.keys():
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
mIoU = round(np.nanmean(ious), 2)
str_attack_result += '\n Segment Score ({score.avg:.3f}) \t'.format(score=seg_accuracy)
str_attack_result += ' Segment ===> mAP {}\n'.format(mIoU)
if comet is not None: comet.log_metric('segmentsemantic Attacked IOU', mIoU)
if comet is not None: comet.log_metric('segmentsemantic Attacked Score', seg_accuracy)
print('clean task')
print(str_not_attacked_task_result)
if test_flag:
dict_losses = {}
for key, loss_term in criterion.items():
dict_losses[key] = avg_losses[key].avg
# print(str_attack_result, "\nnew", avg_losses[keys].avg, "\n")
if 'segmentsemantic' in criterion.keys():
dict_losses['segmentsemantic'] = {'iou' : mIoU,
'loss' : avg_losses['segmentsemantic'].avg,
'seg_acc': seg_accuracy.avg}
print("These losses are returned", dict_losses)
#Compute the dictionary of losses that we want. Desired: {'segmentsemantic:[mIoU, cel],'keypoints2d':acc,'}
return dict_losses | 5,334,168 |
async def ga4gh_info(host: str) -> Dict:
"""Construct the `Beacon` app information dict in GA4GH Discovery format.
:return beacon_info: A dict that contain information about the ``Beacon`` endpoint.
"""
beacon_info = {
# TO DO implement some fallback mechanism for ID
"id": ".".join(reversed(host.split("."))),
"name": __title__,
"type": __service_type__,
"description": __description__,
"organization": {
"name": __org_name__,
"url": __org_welcomeUrl__,
},
"contactUrl": __org_contactUrl__,
"documentationUrl": __docs_url__,
"createdAt": __createtime__,
"updatedAt": __updatetime__,
"environment": __service_env__,
"version": __version__,
}
return beacon_info | 5,334,169 |
def p_declaracao_funcao(p):
"""declaracao_funcao : tipo cabecalho
| cabecalho
"""
pai = MyNode(name='declaracao_funcao', type='DECLARACAO_FUNCAO')
p[0] = pai
p[1].parent = pai
if len(p) == 3:
p[2].parent = pai | 5,334,170 |
def Random_Forest_Classifier_Circoscrizione(X, y, num_features, cat_features):
"""
Funzione che crea, fitta, testa e ritorna una pipeline con il RFC regressor,
(questa volta in riferimento al problema della classificazione di circoscrizioni)
con alcune caratterstiche autoevidenti da codice
Input: X sono i dati, y i targets
num e cat features rappresentano features numeriche e categoriche (come lista di stringhe)
Printa l'accuracy del test
"""
X=X[num_features+cat_features]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)
transf=make_column_transformer(
(StandardScaler(), num_features),
(OneHotEncoder(handle_unknown="ignore"), cat_features),
remainder='drop')
pipe_RFC = Pipeline([
#('encoder', OneHotEncoder(sparse=False, handle_unknown='ignore')),
('transformer', transf),
('Regressor', RandomForestClassifier(bootstrap=False))
])
# Provo con una grid search CV
CV_parameters = {'Regressor__n_estimators': [50, 100, 200, 500], # Valori superiori rallentano l'algoritmo
'Regressor__max_depth': [5, 10, 20, 50, 70, 100], # Rasoio di Occam per evitare overfitting
'Regressor__min_samples_leaf': [1, 2, 4], # Sempre rasoio di Occam
'Regressor__min_samples_split': [2, 5, 10, 15, 20],
}
# Parametri di Tuning del nostro RFR
RFC_CV = GridSearchCV(estimator=pipe_RFC,
param_grid=CV_parameters,
n_jobs=-1,
cv=2)
RFC_CV.fit(X_train, y_train)
y_RFC_pred = RFC_CV.predict(X_test)
print("Random forest classifier accuracy score:", accuracy_score(y_test, y_RFC_pred))
#Gonna leave these here, more convenient than return the test values as in "return RFC_CV, (X_test, y_test)"
plot_confusion_matrix(RFC_CV, X_test, y_test)
plot_precision_recall_curve(RFC_CV, X_test, y_test)
plot_roc_curve(RFC_CV, X_test, y_test)
return RFC_CV | 5,334,171 |
def get_open_id_connection_providers_data():
"""Generate the OpenID connection providers' data"""
response = client.list_open_id_connect_providers()['OpenIDConnectProviderList']
iam_output['OpenIDConnectProviderList'] = response | 5,334,172 |
def test_jdbc_producer_multischema_multitable(sdc_builder, sdc_executor, database):
"""Test a JDBC Producer in a multischema scenario with different destination tables for each schema. We create 3
schemas with one table for each, with different names. Then we use an EL expressions to insert records according to
the /schema and /table record fields.
There were a limitation in previous versions that affected to MySQL and MemSQL. These RDBMs do not differentiate
between schema and database. SDC used the database configured in the JDBC connection string, and looked for database
metadata filtering by database+schema. If the schema were other than the database of the connection string, metadata
could not be retrieved. This was a problem in a multischema scenario, where several schemas are employed.
Pipeline:
dev_raw_data_source >> record_deduplicator >> jdbc_producer
record_deduplicator >> trash
"""
schema1_name = _get_random_name(database, prefix='stf_schema_')
schema2_name = _get_random_name(database, prefix='stf_schema_')
schema3_name = _get_random_name(database, prefix='stf_schema_')
table1_name = _get_random_name(database, prefix='stf_table_')
table2_name = _get_random_name(database, prefix='stf_table_')
table3_name = _get_random_name(database, prefix='stf_table_')
_create_schema(schema1_name, database)
_create_schema(schema2_name, database)
_create_schema(schema3_name, database)
table1 = _create_table(table1_name, database, schema_name=schema1_name)
table2 = _create_table(table2_name, database, schema_name=schema2_name)
table3 = _create_table(table3_name, database, schema_name=schema3_name)
ROWS = [{'schema': schema1_name, 'table': table1_name, 'id': 1, 'name': 'Roger Federer'},
{'schema': schema2_name, 'table': table2_name, 'id': 2, 'name': 'Rafael Nadal'},
{'schema': schema3_name, 'table': table3_name, 'id': 3, 'name': 'Dominic Thiem'}]
INPUT_DATA = '\n'.join(json.dumps(rec) for rec in ROWS)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Multischema and Multitable Insert',
INPUT_DATA, "${record:value('/table')}", 'INSERT')
# JDBC Producer's "Schema Name" property is set through the `database` environment under some circumstances
# (e.g. Sql Server database). We overwrite it afterwards for the test.
pipeline.configure_for_environment(database)
pipeline[2].set_attributes(schema_name="${record:value('/schema')}")
# JDBC Producer's "Table Name" property is converted to uppercase through the configure_for_environment() method
# when database is Oracle. However EL function names are case-sensitive; we overwrite it afterwards to avoid an EL
# error.
pipeline[2].set_attributes(table_name="${record:value('/table')}")
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS))
sdc_executor.stop_pipeline(pipeline)
result1 = database.engine.execute(table1.select())
result2 = database.engine.execute(table2.select())
result3 = database.engine.execute(table3.select())
data1 = result1.fetchall()
data2 = result2.fetchall()
data3 = result3.fetchall()
assert data1 == [(ROWS[0]['name'], ROWS[0]['id'])]
assert data2 == [(ROWS[1]['name'], ROWS[1]['id'])]
assert data3 == [(ROWS[2]['name'], ROWS[2]['id'])]
result1.close()
result2.close()
result3.close()
finally:
logger.info('Dropping tables %s, %s, %s...', table1_name, table2_name, table3_name)
table1.drop(database.engine)
table2.drop(database.engine)
table3.drop(database.engine)
logger.info('Dropping schemas %s, %s, %s...', schema1_name, schema2_name, schema3_name)
_drop_schema(schema1_name, database)
_drop_schema(schema2_name, database)
_drop_schema(schema3_name, database) | 5,334,173 |
def convert_data_set(data_set, specific_character_set):
""" Convert a DICOM data set to its NIfTI+JSON representation.
"""
result = {}
if odil.registry.SpecificCharacterSet in data_set:
specific_character_set = data_set[odil.registry.SpecificCharacterSet]
for tag, element in data_set.items():
name = get_tag_name(tag)
value = convert_element(element, specific_character_set)
result[name] = value
return result | 5,334,174 |
def test_slice_1():
""" Slice on index, <int> """
ad = AstroData(dataset=TESTFILE)
sub_ad = ad[1]
assert len(sub_ad) == 1 | 5,334,175 |
def log(do):
"""
This function logs events in a CSV. This is important to ensure that repeat
signals don't get blasted from the feed constantly.
""" | 5,334,176 |
def calc_distance(
p1: Location,
p2: Location
) -> float:
"""
Args:
p1 (Location): planet 1 of interest
p2 (Location): planet 1 of interest
"""
if p1.coords.galaxy != p1.coords.galaxy:
distance = 20000 * math.fabs(p2.coords.galaxy - p1.coords.planet)
else:
if p1.coords.system != p2.coords.system:
distance = 2700 + 95 * math.fabs(p2.coords.system - p1.coords.system)
else:
if p1.coords.planet != p2.coords.planet:
distance = 1000 + 5 * math.fabs(p2.coords.planet - p1.coords.planet)
else:
raise ValueError
return distance | 5,334,177 |
def reset_noise_model():
"""Return test reset noise model"""
noise_model = NoiseModel()
error1 = thermal_relaxation_error(50, 50, 0.1)
noise_model.add_all_qubit_quantum_error(error1, ['u1', 'u2', 'u3'])
error2 = error1.tensor(error1)
noise_model.add_all_qubit_quantum_error(error2, ['cx'])
return NoiseWithDescription(noise_model, "Reset Noise") | 5,334,178 |
def parse_to_timestamp(dt_string):
"""Attempts to parse to Timestamp.
Parameters
----------
dt_string: str
Returns
-------
pandas.Timestamp
Raises
------
ValueError
If the string cannot be parsed to timestamp, or parses to null
"""
timestamp = pd.Timestamp(dt_string)
if pd.isnull(timestamp):
raise ValueError
if timestamp.tzinfo is None:
# consinstent with schema ISODateTime
timestamp = timestamp.tz_localize('UTC')
return timestamp | 5,334,179 |
def listen():
"""Start listening to slack channels the Testimonials Turtle bot is in."""
# STOPSHIP: better error handling so an exception doesn't crash the module
client = slackclient.SlackClient(
secrets.slack_testimonials_turtle_api_token)
if not client.rtm_connect():
logging.critical("Failed to connect to Slack RTM API, bailing.")
return
# Once connected, just continually pull messages and handle those that are
# testimonials-related.
while True:
messages = client.rtm_read()
handle_messages(messages)
time.sleep(1) | 5,334,180 |
def __factor(score, items_sum, item_count):
"""Helper method for the pearson correlation coefficient algorithm."""
return score - items_sum/item_count | 5,334,181 |
def cross_validate(estimator: BaseEstimator, X: np.ndarray, y: np.ndarray,
scoring: Callable[[np.ndarray, np.ndarray, ...], float], cv: int = 5) -> Tuple[float, float]:
"""
Evaluate metric by cross-validation for given estimator
Parameters
----------
estimator: BaseEstimator
Initialized estimator to use for fitting the data
X: ndarray of shape (n_samples, n_features)
Input data to fit
y: ndarray of shape (n_samples, )
Responses of input data to fit to
scoring: Callable[[np.ndarray, np.ndarray, ...], float]
Callable to use for evaluating the performance of the cross-validated model.
When called, the scoring function receives the true- and predicted values for each sample
and potentially additional arguments. The function returns the score for given input.
cv: int
Specify the number of folds.
Returns
-------
train_score: float
Average train score over folds
validation_score: float
Average validation score over folds
"""
rows, cols = X.shape
group_length = int(rows / cv)
train_error = []
test_error = []
for i in range(cv):
fold_delimiter = (i + 1) * group_length
S_divided_by_S_i_X = np.concatenate((X[:(fold_delimiter - group_length)], X[fold_delimiter:]))
S_divided_by_S_i_y = np.concatenate((y[:(fold_delimiter - group_length)], y[fold_delimiter:]))
estimator.fit(S_divided_by_S_i_X, S_divided_by_S_i_y)
train_pred = estimator.predict(S_divided_by_S_i_X)
test_pred = estimator.predict(X[(fold_delimiter - group_length):fold_delimiter])
train_error.append(scoring(S_divided_by_S_i_y, train_pred))
test_error.append(scoring(y[(fold_delimiter - group_length):fold_delimiter], test_pred))
return np.average(train_error), np.average(test_error) | 5,334,182 |
def get_img_path() -> Path:
"""
Gets the path of the Mac installation image from the command line
arguments. Fails with an error if the argument is not present or the given
file doesn't exist.
"""
args = sys.argv
if len(args) < 2:
sys.exit(
"Please provide the path to the Fuzzlecheck image as argument."
)
if len(args) > 2:
sys.exit("More arguments provided than needed")
rsl = Path(args[1])
if not rsl.exists():
sys.exit("Given dmg image ({}) doesn't exist.".format(args[1]))
return rsl | 5,334,183 |
def test_sslscan_package_installed(host):
"""
Tests if sslscan is installed.
"""
assert host.package(PACKAGE).is_installed | 5,334,184 |
def global_info(request):
"""存放用户,会话信息等."""
loginUser = request.session.get('login_username', None)
if loginUser is not None:
user = users.objects.get(username=loginUser)
# audit_users_list_info = WorkflowAuditSetting.objects.filter().values('audit_users').distinct()
audit_users_list_info = WorkflowAuditSetting.objects.filter(Q(workflow_type=1) | Q(workflow_type=2)).values('audit_users').distinct()
project_leaders_list = [ leaders['group_leader'] for leaders in Group.objects.all().values('group_leader').distinct() ]
audit_users_list = []
for i in range(len(audit_users_list_info)):
if ',' in audit_users_list_info[i]['audit_users']:
audit_users_list += audit_users_list_info[i]['audit_users'].split(',')
else:
audit_users_list.append(audit_users_list_info[i]['audit_users'])
UserDisplay = user.display
leftMenuBtns = leftMenuBtnsCommon
if UserDisplay == '':
UserDisplay = loginUser
if user.is_superuser:
leftMenuBtns = leftMenuBtns + leftMenuBtnsProject + leftMenuBtnsAuditor + leftMenuBtnsSuper + leftMenuBtnsDoc
if loginUser in audit_users_list:
leftMenuBtns = leftMenuBtns + leftMenuBtnsAuditor
if loginUser in project_leaders_list:
leftMenuBtns = leftMenuBtns + leftMenuBtnsProject
else:
leftMenuBtns = ()
UserDisplay = ''
return {
'loginUser': loginUser,
'leftMenuBtns': leftMenuBtns,
'UserDisplay': UserDisplay,
'ACCESS_ITOM_ADDR': ACCESS_ITOM_ADDR
} | 5,334,185 |
def arcsech(val):
"""Inverse hyperbolic secant"""
return np.arccosh(1. / val) | 5,334,186 |
def addEachAtomAutocorrelationMeasures(coordinates, numAtoms):
"""
Computes sum ri*rj, and ri = coords for a single trajectory. Results are stored in different array indexes for different atoms.
"""
rirj = np.zeros((1, 3*numAtoms), dtype=np.float64)
ri = np.zeros((1, 3*numAtoms), dtype=np.float64)
rirjMeasures = np.zeros((1, 3*numAtoms))
riMeasures = np.zeros((1, 3*numAtoms))
#convert to 2d matrix
coordinates = coordinates.reshape((coordinates.shape[0], 3*coordinates.shape[1]))
#add rows if necessary
rirj.resize((coordinates.shape[0], rirj.shape[1]))
ri.resize((coordinates.shape[0], ri.shape[1]))
rirjMeasures.resize((coordinates.shape[0], rirjMeasures.shape[1]))
riMeasures.resize((coordinates.shape[0], riMeasures.shape[1]))
for i in range(3*numAtoms):
#resize vector and don't initialise up
result, measures = correlate(coordinates[:,i])
rirj[:,i] += result
ri[:,i] += coordinates[:,i]
rirjMeasures[:,i] += measures
riMeasures[:,i] += np.ones(len(measures))
return rirj, ri, rirjMeasures, riMeasures | 5,334,187 |
def api_posts_suggest(request):
"""サジェスト候補の記事をJSONで返す。"""
keyword = request.GET.get('keyword')
if keyword:
post_list = [{'pk': post.pk, 'title': post.title} for post in Post.objects.filter(title__icontains=keyword)]
else:
post_list = []
return JsonResponse({'post_list': post_list}) | 5,334,188 |
def parse_config(args): # pylint: disable=R0912
"""parses the gitconfig file."""
configpath = os.path.join(HOME, ".gitconfig_2")
# config = configobj.ConfigObj(configpath, interpolation=None, indent_type="\t")
config = configparser.ConfigParser(interpolation=None)
config.read(configpath)
if "core" not in config:
config["core"] = {}
if "attributesfile" not in config["core"]:
config["core"]["attributesfile"] = "~/.gitattributes"
attributespath = config["core"]["attributesfile"]
if 'diff "kettle"' not in config:
config['diff "kettle"'] = {}
if "textconv" not in config['diff "kettle"'] and \
"xfuncname" not in config['diff "kettle"']:
if args.exe:
config['diff "kettle"']["textconv"] = "\"'{}'\"".format(
os.path.join(os.getcwd(), "kettlediff.exe").replace("\\", "/"))
else:
config['diff "kettle"']["textconv"] = "python \"'{}'\"".format(
os.path.join(os.getcwd(), "kettlediff.py").replace("\\", "/"))
config['diff "kettle"']["xfuncname"] = (
"< name > (.*) < /name > | < order > | < hops > )")
else:
print("already diff for kettle in .gitconfig!")
if 'diff "prpt"' not in config:
config['diff "prpt"'] = {}
if "textconv" not in config['diff "prpt"'] and \
"xfuncname" not in config['diff "prpt"']:
if args.exe:
config['diff "prpt"']["textconv"] = "\"'{}'\"".format(
os.path.join(os.getcwd(), "kettlediff.exe").replace("\\", "/"))
else:
config['diff "prpt"']["textconv"] = "python \"'{}'\"".format(
os.path.join(os.getcwd(), "kettlediff.py").replace("\\", "/"))
config['diff "prpt"']["xfuncname"] = ".*name=.*"
else:
print("already diff for prpt in .gitconfig!")
if not args.write:
print("Template for .gitconfig:\n---------------------------")
for section in [section for section in config if config[section] != "DEFAULT"]:
print("[{}]".format(section))
for key, item in config[section].items():
print("\t{} = {}".format(key, item))
print("---------------------------")
else:
with open(configpath, "w") as file:
for section in config:
print("[{}]".format(section), file=file)
for key, item in config[section].items():
print("\t{} = {}".format(key, item), file=file)
return attributespath | 5,334,189 |
def _get_config_from_ini_file(f):
"""Load params from the specified filename and return the params as a
dictionary"""
from os.path import expanduser
filename = expanduser(f)
_log.debug('Loading parms from {0}'.format(filename))
import configparser
config = configparser.ConfigParser()
config.optionxform = str
config.read(filename)
object_list = config.sections()
params = {}
for config_object in object_list:
o = ConfigObject(name=config_object)
all_attributes = config.options(config_object)
for attribute in all_attributes:
value = config.get(config_object, attribute)
# Replace the AWSH_ROOT variable with the current value if present
value = value.replace('$AWSH_ROOT', CONST_AWSH_ROOT)
_log.debug('ConfigObject[{0}] {1}: {2}'.format(config_object, attribute, value))
o.add_property(attribute, value)
params[o.name] = o
return params | 5,334,190 |
def test_launch_with_none_or_empty_oauth_version_value():
"""
Does the launch request work with an empty or None oauth_version value?
"""
oauth_consumer_key = 'my_consumer_key'
oauth_consumer_secret = 'my_shared_secret'
launch_url = 'http://jupyterhub/hub/lti/launch'
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
args = factory_lti11_basic_launch_args(oauth_consumer_key, oauth_consumer_secret,)
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
args['oauth_version'] = None
validator.validate_launch_request(launch_url, headers, args)
with pytest.raises(HTTPError):
args['oauth_version'] = ''
validator.validate_launch_request(launch_url, headers, args) | 5,334,191 |
def read_task_values(task_result: TaskResult) -> Tuple[bool, str, List[float], int]:
"""Reads unitary and federated accuracy from results.json.
Args:
fname (str): Path to results.json file containing required fields.
Example:
>>> print(read_task_values(task_result))
(false, "VisionTask", [0.12, 0.33], 5)
Returns:
~typing.Tuple[bool, str, ~typing.List[float], int]: Tuple consisting of information,
if the task is unitary or not, the task label, a list of accuracies and the epochs.
"""
return (
task_result.is_unitary(),
task_result.get_label(),
task_result.get_accuracies(),
task_result.get_E(),
) | 5,334,192 |
def get_two_dots(full=1):
""" return all posible simple two-dots """
bg= bottomGates()
two_dots = [dict({'gates':bg[0:3]+bg[2:5]})] # two dot case
for td in two_dots:
td['name'] = '-'.join(td['gates'])
return two_dots | 5,334,193 |
def read_values_of_line(line):
"""Read values in line. Line is splitted by INPUT_FILE_VALUE_DELIMITER."""
if INPUT_FILE_VALUE_DELIMITER == INPUT_FILE_DECIMAL_DELIMITER:
exit_on_error(f"Input file value delimiter and decimal delimiter are equal. Please set INPUT_FILE_VALUE_DELIMITER and INPUT_FILE_DECIMAL_DELIMITER.")
# Clean line
line = line.rstrip('\n').rstrip('\r')
# Split line by value delimiter
values = line.split(INPUT_FILE_VALUE_DELIMITER)
return values | 5,334,194 |
def find_allergens(ingredients):
"""Return ingredients with cooresponding allergen."""
by_allergens_count = sorted(ingredients, key=lambda i: len(ingredients[i]))
for ingredient in by_allergens_count:
if len(ingredients[ingredient]) == 1:
for other_ingredient, allergens in ingredients.items():
if ingredient == other_ingredient:
continue
ingredients[other_ingredient] = (allergens
- ingredients[ingredient])
return {
ingredient: allergen.pop()
for ingredient, allergen in ingredients.items()
} | 5,334,195 |
def test_second_apply(record_xml_attribute, app_path):
"""Test that we can run kfctl apply again with error.
Args:
kfctl_path: The path to kfctl binary.
app_path: The app dir of kubeflow deployment.
"""
_, kfctl_path = kfctl_util.get_kfctl_go_build_dir_binary_path()
if not os.path.exists(kfctl_path):
msg = "kfctl Go binary not found: {path}".format(path=kfctl_path)
logging.error(msg)
raise RuntimeError(msg)
util.run([kfctl_path, "apply", "-V", "-f=" + os.path.join(app_path, "tmp.yaml")], cwd=app_path) | 5,334,196 |
def write_lhc_ascii(output_path: Union[str, Path], tbt_data: TbtData) -> None:
"""
Write a ``TbtData`` object's data to file, in the ASCII **SDDS** format.
Args:
output_path (Union[str, Path]): path to a the disk locatino where to write the data.
tbt_data (TbtData): the ``TbtData`` object to write to disk.
"""
output_path = Path(output_path)
LOGGER.info(f"Writing TbTdata in ASCII SDDS (LHC) format at '{output_path.absolute()}'")
for bunch_id in range(tbt_data.nbunches):
LOGGER.debug(f"Writing data for bunch {bunch_id}")
suffix = f"_{tbt_data.bunch_ids[bunch_id]}" if tbt_data.nbunches > 1 else ""
with output_path.with_suffix(suffix).open("w") as output_file:
_write_header(tbt_data, bunch_id, output_file)
_write_tbt_data(tbt_data, bunch_id, output_file) | 5,334,197 |
def send_calendar_events():
"""Sends calendar events."""
error_msg = None
try:
with benchmark("Send calendar events"):
builder = calendar_event_builder.CalendarEventBuilder()
builder.build_cycle_tasks()
sync = calendar_event_sync.CalendarEventsSync()
sync.sync_cycle_tasks_events()
except Exception as exp: # pylint: disable=broad-except
logger.error(exp.message)
error_msg = exp.message
return utils.make_simple_response(error_msg) | 5,334,198 |
def custom_precision(labels: np.array, predictions: np.array, exp_path: str, exp_name: str):
"""
Calculate custom precision value.
Parameters
----------
exp_name : str
experiment name
exp_path : str
path to experiment folder
labels : np.array
predictions : np.array
Returns
-------
float:
average precision
"""
label_list = []
for label in labels:
for lab in label:
label_list.append(lab)
num_labels = len(set(label_list))
# https://www.researchgate.net/figure/Confusion-matrix-for-multi-class-classification-The-confusion-matrix-of-a_fig7_314116591
confusion_matrix = _confusion_matrix(labels, predictions, num_labels)
precisions = []
for label in range(num_labels):
true_positive = confusion_matrix[label, label]
false_positive = np.sum(confusion_matrix[:, label]) - confusion_matrix[label, label]
if (true_positive + false_positive) != 0:
precisions.append(true_positive / (true_positive + false_positive))
else:
precisions.append(0)
print('Precision:', label)
if exp_path:
file_name = 'result_precison_single' + exp_name + '.json'
with open(os.path.join(exp_path, file_name), "w") as fp:
json.dump(precisions, fp)
return np.mean(precisions) | 5,334,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.