content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def get_all_listening_ports() -> List[int]:
"""
Returns all tcp port numbers in LISTEN state (on any address).
Reads port state from /proc/net/tcp.
"""
res = []
with open('/proc/net/tcp', 'r') as file:
try:
next(file)
for line in file:
split_line = line.strip().split(' ')
hex_port = split_line[1].split(':')[1]
hex_state = split_line[3]
if hex_state == '0A':
res.append(int(hex_port, 16))
except StopIteration:
pass
return res | 33,500 |
async def mock_race_result() -> dict:
"""Create a mock race-result object."""
return {
"id": "race_result_1",
"race_id": "190e70d5-0933-4af0-bb53-1d705ba7eb95",
"timing_point": "Finish",
"no_of_contestants": 2,
"ranking_sequence": ["time_event_1", "time_event_2"],
"status": 0,
} | 33,501 |
def qr_to_install_code(qr_code: str) -> tuple[zigpy.types.EUI64, bytes]:
"""Try to parse the QR code.
if successful, return a tuple of a EUI64 address and install code.
"""
for code_pattern in QR_CODES:
match = re.search(code_pattern, qr_code, re.VERBOSE)
if match is None:
continue
ieee_hex = binascii.unhexlify(match[1])
ieee = zigpy.types.EUI64(ieee_hex[::-1])
install_code = match[2]
# install_code sanity check
install_code = convert_install_code(install_code)
return ieee, install_code
raise vol.Invalid(f"couldn't convert qr code: {qr_code}") | 33,502 |
def vwr(scene, analyzer, test_number, workbook=None, sheet_format=None, agg_dict=None):
"""
Calculates Variability Weighted Return (VWR).
:param workbook: Excel workbook to be saved to disk.
:param analyzer: Backtest analyzer.
:param sheet_format: Dictionary holding formatting information such as col width, font etc.
:param agg_dict: Collects the dictionary outputs from backtrader for using in platting.
:return workbook: Excel workbook to be saved to disk.
"""
# Get the drawdowns auto ordered nested dictionary
vwr_dict = analyzer.get_analysis()
columns = [
"vwr",
]
if scene["save_db"]:
df = pd.DataFrame(vwr_dict.values(), index=vwr_dict.keys()).T
df = add_key_to_df(df, test_number)
agg_dict["vwr"] = df
if scene["save_excel"]:
worksheet = workbook.add_worksheet("vwr")
worksheet.write_row(0, 0, columns)
worksheet.set_row(0, None, sheet_format["header_format"])
worksheet.set_column("A:A", sheet_format["x_wide"], sheet_format["align_left"])
worksheet.set_column("B:B", sheet_format["medium"], sheet_format["align_left"])
for i, (k, v) in enumerate(vwr_dict.items()):
worksheet.write_row(i + 1, 0, [k])
worksheet.write_row(i + 1, 1, [v])
return workbook, agg_dict | 33,503 |
def StepCommandContains(check, step_odict, step, argument_sequence):
"""Assert that a step's command contained the given sequence of arguments.
Args:
step (str) - The name of the step to check the command of.
argument_sequence (list of (str|regex)) - The expected sequence of
arguments. Strings will be compared for equality, while regex patterns
will be matched using the search method. The check will pass if the step's
command contains a subsequence where the elements are matched by the
corresponding elements of argument_sequence.
"""
check('command line for step %s contained %s' % (step, argument_sequence),
argument_sequence in step_odict[step].cmd) | 33,504 |
def dec2hms(dec):
"""
ADW: This should really be replaced by astropy
"""
DEGREE = 360.
HOUR = 24.
MINUTE = 60.
SECOND = 3600.
dec = float(dec)
fhour = dec*(HOUR/DEGREE)
hour = int(fhour)
fminute = (fhour - hour)*MINUTE
minute = int(fminute)
second = (fminute - minute)*MINUTE
return (hour, minute, second) | 33,505 |
def soft_update(target, source, tau):
"""
Perform DDPG soft update (move target params toward source based on weight
factor tau)
Inputs:
target (torch.nn.Module): Net to copy parameters to
source (torch.nn.Module): Net whose parameters to copy
tau (float, 0 < x < 1): Weight factor for update
"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - tau) + param.data * tau) | 33,506 |
def logfpsd(data, rate, window, noverlap, fmin, bins_per_octave):
"""Computes ordinary linear-frequency power spectral density, then multiplies by a matrix
that converts to log-frequency space.
Returns the log-frequency PSD, the centers of the frequency bins,
and the time points.
Adapted from Matlab code by Dan Ellis (Columbia):
http://www.ee.columbia.edu/ln/rosa/matlab/sgram/logfsgram.m"""
stft, linfreqs, times = specgram(data, window, Fs=rate, noverlap=noverlap)
# construct matrix for mapping to log-frequency space
fratio = 2**(1/bins_per_octave) # ratio between adjacent frequencies
nbins = np.floor(np.log((rate/2)/fmin)/np.log(fratio))
#fftfreqs = (rate/window)*np.arange(window/2+1)
nfftbins = window/2+1
logffreqs = fmin*np.exp(np.log(2)*np.arange(nbins)/bins_per_octave)
logfbws = logffreqs*(fratio-1)
logfbws = np.maximum(logfbws, rate/window)
bandoverlapconstant = 0.5475 # controls adjacent band overlap. set by hand by Dan Ellis
freqdiff = (np.repeat(logffreqs[:,np.newaxis],nfftbins,axis=1) - np.repeat(linfreqs[np.newaxis,:],nbins,axis=0))
freqdiff = freqdiff / np.repeat(bandoverlapconstant*logfbws[:,np.newaxis],nfftbins,axis=1)
mapping = np.exp(-0.5*freqdiff**2)
rowEs = np.sqrt(2*np.sum(mapping**2,axis=1))
mapping = mapping/np.repeat(rowEs[:,np.newaxis],nfftbins,axis=1)
# perform mapping
logfpsd = np.sqrt(np.dot(mapping,(np.abs(stft)**2)))
return logfpsd.T, logffreqs, times | 33,507 |
def ppg_dual_double_frequency_template(width):
"""
EXPOSE
Generate a PPG template by using 2 sine waveforms.
The first waveform double the second waveform frequency
:param width: the sample size of the generated waveform
:return: a 1-D numpy array of PPG waveform
having diastolic peak at the low position
"""
t = np.linspace(0, 1, width, False) # 1 second
sig = np.sin(2 * np.pi * 2 * t - np.pi / 2) + \
np.sin(2 * np.pi * 1 * t - np.pi / 6)
sig_scale = MinMaxScaler().fit_transform(np.array(sig).reshape(-1, 1))
return sig_scale.reshape(-1) | 33,508 |
def init_lg36(init_conf=None):
""" Optional init call to let lg36 know it can go ahead and init itself now. If this call is never made, lg36
would initialize lazily as necessary.
init_conf is optional also, if it is None, this call reduces to just a trigger to run the init procedure now
as opposed to later.
"""
# TODO set global opts to whats in init_conf
_ = init_conf
_lg36_internal_init() | 33,509 |
def _run_fast_scandir(dir, fn_glob):
"""
Quickly scan nested directories to get a list of filenames that match the fn_glob string.
Modified from https://stackoverflow.com/a/59803793/2441026
(faster than os.walk or glob methods, and allows filename matching in subdirectories).
Parameters
----------
dir : str
full path to the input directory
fn_glob : str
glob-style filename pattern
Outputs
-------
subfolders : list
list of strings of all nested subdirectories
files : list
list of strings containing full paths to each file matching the filename pattern
"""
subfolders, files = [], []
for f in os.scandir(dir):
if any(f.name.startswith(s) for s in ["__", "."]):
continue
if f.is_dir():
subfolders.append(f.path)
if f.is_file():
if fnmatch.fnmatch(f.name, fn_glob):
files.append(f.path)
for dir in list(subfolders):
sf, f = _run_fast_scandir(dir, fn_glob)
subfolders.extend(sf)
files.extend(f)
return subfolders, files | 33,510 |
def wait_for_element_to_be_clickable(
self, timeout=None, poll_frequency=None, select_type=None, element=None
):
"""An Expectation for checking an element is visible and enabled such that
you can click it.
select_type - option that follows after SelectBy. (Examples: CSS, ID, XPATH, NAME)
element - locator in string format(Example: "organizationId").
"""
driver = self.context.driver
if timeout is None:
timeout = 30
if poll_frequency is None:
poll_frequency = 1
wait = WebDriverWait(driver, timeout, poll_frequency)
wait.until(EC.element_to_be_clickable((select_type, element))) | 33,511 |
def str_input(prompt: str) -> str:
"""Prompt user for string value.
Args:
prompt (str): Prompt to display.
Returns:
str: User string response.
"""
return input(f"{prompt} ") | 33,512 |
def action(ra_deg, dec_deg, d_kpc, pm_ra_masyr, pm_dec_masyr, v_los_kms,
verbose=False):
"""
parameters:
----------
ra_deg: (float)
RA in degrees.
dec_deg: (float)
Dec in degress.
d_kpc: (float)
Distance in kpc.
pm_ra_masyr: (float)
RA proper motion in mas/yr.
pm_decmasyr: (float)
Dec proper motion in mas/yr.
v_los_kms: (float)
RV in kms.
returns:
------
R_kpc, phi_rad, z_kpc, vR_kms, vT_kms, vz_kms
jR: (float)
Radial action.
lz: (float)
Vertical ang mom.
jz: (float)
Vertical action.
"""
ra_rad = ra_deg * (np.pi / 180.) # RA [rad]
dec_rad = dec_deg * (np.pi / 180.) # dec [rad]
# Galactocentric position of the Sun:
X_gc_sun_kpc = 8. # [kpc]
Z_gc_sun_kpc = 0.025 # [kpc]
# Galactocentric velocity of the Sun:
vX_gc_sun_kms = -9.58 # = -U [kms]
vY_gc_sun_kms = 10.52 + 220. # = V+v_circ(R_Sun) [kms]
vZ_gc_sun_kms = 7.01 # = W [kms]
# a. convert spatial coordinates (ra,dec,d) to (R,z,phi)
# (ra,dec) --> Galactic coordinates (l,b):
lb = bovy_coords.radec_to_lb(ra_rad, dec_rad, degree=False, epoch=2000.0)
# l_rad = lb[:, 0]
# b_rad = lb[:, 1]
l_rad = lb[0]
b_rad = lb[1]
# (l,b,d) --> Galactocentric cartesian coordinates (x,y,z):
xyz = bovy_coords.lbd_to_XYZ(l_rad, b_rad, d_kpc, degree=False)
# x_kpc = xyz[:, 0]
# y_kpc = xyz[:, 1]
# z_kpc = xyz[:, 2]
x_kpc = xyz[0]
y_kpc = xyz[1]
z_kpc = xyz[2]
# (x,y,z) --> Galactocentric cylindrical coordinates (R,z,phi):
Rzphi = bovy_coords.XYZ_to_galcencyl(x_kpc, y_kpc, z_kpc,
Xsun=X_gc_sun_kpc, Zsun=Z_gc_sun_kpc)
# R_kpc = Rzphi[:, 0]
# phi_rad = Rzphi[:, 1]
# z_kpc = Rzphi[:, 2]
R_kpc = Rzphi[0]
phi_rad = Rzphi[1]
z_kpc = Rzphi[2]
# b. convert velocities (pm_ra,pm_dec,vlos) to (vR,vz,vT)
# (pm_ra,pm_dec) --> (pm_l,pm_b):
pmlpmb = bovy_coords.pmrapmdec_to_pmllpmbb(pm_ra_masyr, pm_dec_masyr,
ra_rad, dec_rad, degree=False,
epoch=2000.0)
# pml_masyr = pmlpmb[:, 0]
# pmb_masyr = pmlpmb[:, 1]
pml_masyr = pmlpmb[0]
pmb_masyr = pmlpmb[1]
# (v_los,pm_l,pm_b) & (l,b,d) --> (vx,vy,vz):
vxvyvz = bovy_coords.vrpmllpmbb_to_vxvyvz(v_los_kms, pml_masyr, pmb_masyr,
l_rad, b_rad, d_kpc, XYZ=False,
degree=False)
# vx_kms = vxvyvz[:, 0]
# vy_kms = vxvyvz[:, 1]
# vz_kms = vxvyvz[:, 2]
vx_kms = vxvyvz[0]
vy_kms = vxvyvz[1]
vz_kms = vxvyvz[2]
# (vx,vy,vz) & (x,y,z) --> (vR,vT,vz):
vRvTvZ = bovy_coords.vxvyvz_to_galcencyl(vx_kms, vy_kms, vz_kms, R_kpc,
phi_rad, z_kpc,
vsun=[vX_gc_sun_kms,
vY_gc_sun_kms,
vZ_gc_sun_kms],
galcen=True)
# vR_kms = vRvTvZ[:, 0]
# vT_kms = vRvTvZ[:, 1]
# vz_kms = vRvTvZ[:, 2]
vR_kms = vRvTvZ[0]
vT_kms = vRvTvZ[1]
vz_kms = vRvTvZ[2]
if verbose:
print("R = ", R_kpc, "\t kpc")
print("phi = ", phi_rad, "\t rad")
print("z = ", z_kpc, "\t kpc")
print("v_R = ", vR_kms, "\t km/s")
print("v_T = ", vT_kms, "\t km/s")
print("v_z = ", vz_kms, "\t km/s")
jR, lz, jz = calc_actions(R_kpc, phi_rad, z_kpc, vR_kms, vT_kms, vz_kms)
return R_kpc, phi_rad, z_kpc, vR_kms, vT_kms, vz_kms, jR, lz, jz | 33,513 |
def getProcWithParent(host,targetParentPID,procname):
""" returns (parentPID,procPID) tuple for the procname with the specified parent """
cmdStr="ps -ef | grep '%s' | grep -v grep" % (procname)
cmd=Command("ps",cmdStr,ctxt=REMOTE,remoteHost=host)
cmd.run(validateAfter=True)
sout=cmd.get_results().stdout
logger.info(cmd.get_results().printResult())
if sout is None:
return (0,0)
lines=sout.split('\n')
for line in lines:
if line == '':
continue
fields=line.lstrip(' ').split()
if len(fields) < 3:
logger.info("not enough fields line: '%s'" % line)
return (0,0)
procPID=int(line.split()[1])
parentPID=int(line.split()[2])
if parentPID == targetParentPID:
return (parentPID,procPID)
logger.info("couldn't find process with name: %s which is a child of PID: %s" % (procname,targetParentPID))
return (0,0) | 33,514 |
def temporal_autocorrelation(array):
"""Computes temporal autocorrelation of array."""
dt = array['time'][1] - array['time'][0]
length = array.sizes['time']
subsample = max(1, int(1. / dt))
def _autocorrelation(array):
def _corr(x, d):
del x
arr1 = jnp.roll(array, d, 0)
ans = arr1 * array
ans = jnp.sum(
jnp.where(
jnp.arange(length).reshape(-1, 1, 1, 1) >= d, ans / length, 0),
axis=0)
return d, ans
_, full_result = jax.lax.scan(_corr, 0, jnp.arange(0, length, subsample))
return full_result
full_result = jax.jit(_autocorrelation)(
jnp.array(array.transpose('time', 'sample', 'x', 'model').u))
full_result = xarray.Dataset(
data_vars=dict(t_corr=(['time', 'sample', 'x', 'model'], full_result)),
coords={
'dt': np.array(array.time[slice(None, None, subsample)]),
'sample': array.sample,
'x': array.x,
'model': array.model
})
return full_result | 33,515 |
def kv_detail(request, kv_class, kv_pk):
"""
GET to:
/core/keyvalue/api/<kv_class>/<kv_pk>/detail/
Returns a single KV instance.
"""
Klass = resolve_class(kv_class)
KVKlass = Klass.keyvalue_set.related.model
try:
kv = KVKlass.objects.get(pk=kv_pk)
except KVKlass.DoesNotExist:
return HttpResponse(
status=404, content=json.dumps({'success': False})
)
return HttpResponse(
json.dumps(kv.get_bundle())
) | 33,516 |
def opentrons_protocol(protocol_id):
"""Get OpenTrons representation of a protocol."""
current_protocol = Protocol.query.filter_by(id=protocol_id).first()
if not current_protocol:
flash('No such specification!', 'danger')
return redirect('.')
if current_protocol.user != current_user and not current_protocol.public:
flash('Not your project!', 'danger')
return redirect('.')
if not current_protocol.protocol:
return ""
protocol_object = json.loads(current_protocol.protocol)
converter = OpenTrons()
resp = make_response(converter.convert(protocol_object, current_protocol.name, current_protocol.description))
resp.headers['Content-Type'] = "text"
resp.headers['Content-Disposition'] = "attachment; filename=" + current_protocol.name + "-opentrons.py"
return resp | 33,517 |
def get_link_external():
""" Return True if we should link to system BLAS / LAPACK
If True, attempt to link to system BLAS / LAPACK. Otherwise, compile
lapack_lite, and link to that.
First check ``setup.cfg`` file for section ``[lapack]`` key ``external``.
If this value undefined, then get string from environment variable
NIPY_EXTERNAL_LAPACK.
If value from ``setup.cfg`` or environment variable is not 'False' or '0',
then return True.
"""
config = ConfigParser()
try:
config.read(SETUP_FILE)
external_link = config.get(SECTION, KEY)
except (IOError, KeyError, NoOptionError, NoSectionError):
external_link = os.environ.get(EXTERNAL_LAPACK_VAR)
if external_link is None:
return False
return external_link.lower() not in ('0', 'false') | 33,518 |
def report_strategy(strategy):
"""
Reports the assembly strategy that will be used to output.
ARGUMENTS
strategy (AssemblyStrategy): the assembly strategy that will be used for assembling
POST
The assembly strategy will be written to output.
"""
click.echo(strategy.report)
if not strategy.proceed:
click.echo("The assembly was unable to proceed.\n") | 33,519 |
def flag(request, comment_id, next=None):
"""
Flags a comment. Confirmation on GET, action on POST.
Templates: `comments/flag.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Flag on POST
if request.method == 'POST':
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment = comment,
user = request.user,
flag = comments.models.CommentFlag.SUGGEST_REMOVAL
)
signals.comment_was_flagged.send(
sender = comment.__class__,
comment = comment,
flag = flag,
created = created,
request = request,
)
return next_redirect(request.POST.copy(), next, flag_done, c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/flag.html',
{'comment': comment, "next": next},
template.RequestContext(request)
) | 33,520 |
def get_nodes_for_homek8s_group(inventory, group_name) -> List[str]:
"""Return the nodes' names of the given group from the inventory as a list."""
hosts_dict = inventory['all']['children']['homek8s']['children'][group_name]['hosts']
if hosts_dict:
return list(hosts_dict.keys())
else:
return [] | 33,521 |
def test_find_by_id_cs_dt(session):
"""Assert that find an change registration DT by ID contains all expected elements."""
registration = Registration.find_by_id(200000009)
assert registration
assert registration.registration_id == 200000009
assert registration.registration_num
assert registration.registration_type_cd == 'DT'
assert registration.financing_id
json_data = registration.json
assert json_data['changeType'] == 'DT'
assert json_data['addDebtors']
assert len(json_data['addDebtors']) == 1
assert json_data['deleteDebtors']
assert len(json_data['deleteDebtors']) == 1
assert 'addSecuredParties' not in json_data
assert 'addGeneralCollateral' not in json_data
assert 'addVehicleCollateral' not in json_data
assert 'deleteSecuredParties' not in json_data
assert 'deleteGeneralCollateral' not in json_data
assert 'deleteVehicleCollateral' not in json_data
assert 'documentId' not in json_data | 33,522 |
def is_blank_or_none(value: str):
"""
Returns True if the specified string is whitespace, empty or None.
:param value: the string to check
:return: True if the specified string is whitespace, empty or None
"""
try:
return "".__eq__(value.strip())
except AttributeError:
return value is None | 33,523 |
def __get_play_widget(function: typing.Any) -> typing.Any:
"""Generate play widget.
:param function: Function to associate with Play.
:return: Play widget.
"""
play = widgets.interactive(
function,
i=widgets.Play(
value=0,
min=0,
max=500,
step=1,
interval=5000,
description="Press play",
disabled=False,
),
)
return play | 33,524 |
def add_data_to_profile(id, profile_id, read_only, tree_identifier, folder_path=None, web_session=None):
"""Shares data to user group
Args:
id (int): The id of the data
profile_id (int): The id of profile
read_only (int): The flag that specifies whether the data is read only
tree_identifier (str): The identifier of the tree
folder_path (str): The folder path f.e. "/scripts/server1"
web_session (object): The webserver session object, optional. Will be
passed in my the webserver automatically
Returns:
The id of the folder to which the data was shared.
"""
if tree_identifier.strip() == "":
raise MSGException(Error.CORE_INVALID_PARAMETER, f"Parameter 'tree_identifier' cannot be empty.")
if folder_path.strip() == "" or folder_path.strip() == "/":
folder_path = None
with BackendDatabase(web_session) as db:
with BackendTransaction(db):
privileges = backend.get_user_privileges_for_data(db, id, web_session.user_id)
max_privilege = min([p['read_only'] for p in privileges])
# We check if the user is owner of given profile
if backend.get_profile_owner(db, profile_id) == web_session.user_id:
if max_privilege <= read_only:
profile_root_folder_id = backend.get_root_folder_id(db, tree_identifier, 'profile', profile_id)
if profile_root_folder_id is None:
profile_root_folder_id = backend.create_profile_data_tree(db, tree_identifier, profile_id)
if folder_path:
folder_profile_id, _ = backend.create_folder(db, folder_path, profile_root_folder_id)
else:
folder_profile_id = profile_root_folder_id
backend.add_data_to_folder(db, id, folder_profile_id, read_only)
else:
raise MSGException(Error.MODULES_SHARING_WITH_HIGHER_PERMISSIONS, "Cannot assign data to profile with higher permission than user has.")
else:
raise MSGException(Error.MODULES_USER_HAVE_NO_PRIVILEGES, "User have no privileges to perform operation.")
return folder_profile_id | 33,525 |
def get_activation_function(activation):
"""
Gets an activation function module given the name of the activation.
:param activation: The name of the activation function.
:return: The activation function module.
"""
if activation == 'ReLU':
return nn.ReLU()
elif activation == 'LeakyReLU':
return nn.LeakyReLU(0.1)
elif activation == 'PReLU':
return nn.PReLU()
elif activation == 'tanh':
return nn.Tanh()
elif activation == 'SELU':
return nn.SELU()
elif activation == 'ELU':
return nn.ELU()
else:
raise ValueError('Activation "{}" not supported.'.format(activation)) | 33,526 |
def plot_pairwise_analysis(data_mat, feature_columns, dependent_column, column_names):
"""
Does a basic pairwise correlation analysis between features and a dependent variable,
meaning it plots a scatter plot with a linear curve fit through it, with the R^2.
Then it plots a correlation matrix for all features and the dependent variable.
data_mat: an NxM matrix, where there are N samples, M-1 features, and 1 dependent variable.
feature_columns: the column indices of the features in data_mat that are being examined
dependent_column: the column index of the dependent variable in data_mat
column_names: a list of len(feature_columns)+1 feature/variable names. The last element is
the name of the dependent variable.
"""
plot_data = list()
for k,fname in enumerate(column_names[:-1]):
fi = feature_columns[k]
pdata = dict()
pdata['x'] = data_mat[:, fi]
pdata['y'] = data_mat[:, dependent_column]
pdata['xlabel'] = column_names[fi]
pdata['ylabel'] = column_names[-1]
pdata['R2'] = compute_R2(pdata['x'], pdata['y'])
plot_data.append(pdata)
#sort by R^2
plot_data.sort(key=operator.itemgetter('R2'), reverse=True)
multi_plot(plot_data, plot_pairwise_scatter, title=None, nrows=3, ncols=3)
all_columns = copy.copy(feature_columns)
all_columns.append(dependent_column)
C = np.corrcoef(data_mat[:, all_columns].transpose())
Cy = C[:, -1]
corr_list = [(column_names[k], np.abs(Cy[k]), Cy[k]) for k in range(len(column_names)-1)]
corr_list.sort(key=operator.itemgetter(1), reverse=True)
print('Correlations with %s' % column_names[-1])
for cname,abscorr,corr in corr_list:
print('\t%s: %0.6f' % (cname, corr))
fig = plt.figure()
plt.subplots_adjust(top=0.99, bottom=0.15, left=0.15)
ax = fig.add_subplot(1, 1, 1)
fig.autofmt_xdate(rotation=45)
im = ax.imshow(C, interpolation='nearest', aspect='auto', vmin=-1.0, vmax=1.0, origin='lower')
plt.colorbar(im)
ax.set_yticks(range(len(column_names)))
ax.set_yticklabels(column_names)
ax.set_xticks(range(len(column_names)))
ax.set_xticklabels(column_names) | 33,527 |
def get_value_from_time(a_node="", idx=0):
"""
gets the value from the time supplied.
:param a_node: MFn.kAnimCurve node.
:param idx: <int> the time index.
:return: <tuple> data.
"""
return OpenMaya.MTime(a_node.time(idx).value(), OpenMaya.MTime.kSeconds).value(), a_node.value(idx), | 33,528 |
def getrinputs(rtyper, graph):
"""Return the list of reprs of the input arguments to the 'graph'."""
return [rtyper.bindingrepr(v) for v in graph.getargs()] | 33,529 |
def check_cell_content(filename, sheet, identifier, column, value) :
""" Test if an excel file test content match the hypothesis
---
filename (str) : Xlsx filename to analyze
sheet (str) : Excel sheet in which the cell is located
identifier(str) : identifier identifying the line to analyse (in the Identifier column)
column (str) : Header of the column to analyze
value (str) : Value to look for in the selected cell
"""
full_filename = filename
# Load workbook
wbook = load_workbook(full_filename)
# Select sheet
content_sheet = wbook[sheet]
# Associate header to column
i_column = 1
column_to_header = {}
header_to_column = {}
content = content_sheet.cell(1,i_column).value
while content is not None :
column_to_header[i_column] = content
header_to_column[content] = i_column
i_column = i_column + 1
content = content_sheet.cell(1,i_column).value
if not column in header_to_column : raise Exception('Column not found')
# Look for the line in which identifier is located
selected_row = -1
for i_row in range(1,content_sheet.max_row + 1) :
logger.info(content_sheet.cell(i_row,header_to_column['Identifier']).value)
logger.info(identifier)
if str(content_sheet.cell(i_row,header_to_column['Identifier']).value) == identifier :
logger.info("found")
selected_row = i_row
if selected_row == -1 : raise Exception('Identifier not found')
if not content_sheet.cell(selected_row,header_to_column[column]).value == value :
raise Exception('Value does not match') | 33,530 |
def _apply_mask(head_file, mask_file, write_dir=None,
caching=False, terminal_output='allatonce'):
"""
Parameters
----------
head_file : str
Path to the image to mask.
mask_file : str
Path to the image mask to apply.
write_dir : str or None, optional
Path to the folder to save the output file to. If None, the folder
of the head file is used.
caching : bool, optional
Wether or not to use caching.
terminal_output : one of {'stream', 'allatonce', 'file', 'none'}
Control terminal output :
'stream' : displays to terminal immediately,
'allatonce' : waits till command is finished to display output,
'file' : writes output to file
'none' : output is ignored
Returns
-------
path to brain extracted image.
"""
if write_dir is None:
write_dir = os.path.dirname(head_file)
if caching:
memory = Memory(write_dir)
apply_mask = memory.cache(fsl.ApplyMask)
apply_mask.interface().set_default_terminal_output(terminal_output)
else:
apply_mask = fsl.ApplyMask(terminal_output=terminal_output).run
# Check mask is binary
mask_img = nibabel.load(mask_file)
mask = mask_img.get_data()
values = np.unique(mask)
if len(values) == 2:
# If there are 2 different values, one of them must be 0 (background)
if not 0 in values:
raise ValueError('Background of the mask must be represented with'
'0. Given mask contains: %s.' % values)
elif len(values) != 2:
# If there are more than 2 values, the mask is invalid
raise ValueError('Given mask is not made of 2 values: %s'
'. Cannot interpret as true or false' % values)
try:
np.testing.assert_array_equal(nibabel.load(mask_file).affine,
nibabel.load(head_file).affine)
except AssertionError:
raise ValueError('Given mask {0} and file {1} do not have the same '
'affine'.format(mask_file, head_file))
out_apply_mask = apply_mask(in_file=head_file,
mask_file=mask_file,
out_file=fname_presuffix(head_file,
suffix='_masked',
newpath=write_dir))
return out_apply_mask.outputs.out_file | 33,531 |
def mars_reshape(x_i):
"""
Reshape (n_stacks, 3, 16, 112, 112) into (n_stacks * 16, 112, 112, 3)
"""
return np.transpose(x_i, (0, 2, 3, 4, 1)).reshape((-1, 112, 112, 3)) | 33,532 |
def Rz_to_coshucosv(R,z,delta=1.):
"""
NAME:
Rz_to_coshucosv
PURPOSE:
calculate prolate confocal cosh(u) and cos(v) coordinates from R,z, and delta
INPUT:
R - radius
z - height
delta= focus
OUTPUT:
(cosh(u),cos(v))
HISTORY:
2012-11-27 - Written - Bovy (IAS)
"""
d12= (z+delta)**2.+R**2.
d22= (z-delta)**2.+R**2.
coshu= 0.5/delta*(sc.sqrt(d12)+sc.sqrt(d22))
cosv= 0.5/delta*(sc.sqrt(d12)-sc.sqrt(d22))
return (coshu,cosv) | 33,533 |
def rpFFNET_createdict(cf,ds,series):
""" Creates a dictionary in ds to hold information about the FFNET data used
to gap fill the tower data."""
# get the section of the control file containing the series
section = pfp_utils.get_cfsection(cf,series=series,mode="quiet")
# return without doing anything if the series isn't in a control file section
if len(section)==0:
logger.error("ERUsingFFNET: Series "+series+" not found in control file, skipping ...")
return
# check that none of the drivers have missing data
driver_list = ast.literal_eval(cf[section][series]["ERUsingFFNET"]["drivers"])
target = cf[section][series]["ERUsingFFNET"]["target"]
for label in driver_list:
data,flag,attr = pfp_utils.GetSeriesasMA(ds,label)
if numpy.ma.count_masked(data)!=0:
logger.error("ERUsingFFNET: driver "+label+" contains missing data, skipping target "+target)
return
# create the dictionary keys for this series
ffnet_info = {}
# site name
ffnet_info["site_name"] = ds.globalattributes["site_name"]
# source series for ER
opt = pfp_utils.get_keyvaluefromcf(cf, [section,series,"ERUsingFFNET"], "source", default="Fc")
ffnet_info["source"] = opt
# target series name
ffnet_info["target"] = cf[section][series]["ERUsingFFNET"]["target"]
# list of drivers
ffnet_info["drivers"] = ast.literal_eval(cf[section][series]["ERUsingFFNET"]["drivers"])
# name of ffnet output series in ds
ffnet_info["output"] = cf[section][series]["ERUsingFFNET"]["output"]
# results of best fit for plotting later on
ffnet_info["results"] = {"startdate":[],"enddate":[],"No. points":[],"r":[],
"Bias":[],"RMSE":[],"Frac Bias":[],"NMSE":[],
"Avg (obs)":[],"Avg (FFNET)":[],
"Var (obs)":[],"Var (FFNET)":[],"Var ratio":[],
"m_ols":[],"b_ols":[]}
# create an empty series in ds if the SOLO output series doesn't exist yet
if ffnet_info["output"] not in ds.series.keys():
data,flag,attr = pfp_utils.MakeEmptySeries(ds,ffnet_info["output"])
pfp_utils.CreateSeries(ds,ffnet_info["output"],data,flag,attr)
# create the merge directory in the data structure
if "merge" not in dir(ds): ds.merge = {}
if "standard" not in ds.merge.keys(): ds.merge["standard"] = {}
# create the dictionary keys for this series
ds.merge["standard"][series] = {}
# output series name
ds.merge["standard"][series]["output"] = series
# source
ds.merge["standard"][series]["source"] = ast.literal_eval(cf[section][series]["MergeSeries"]["Source"])
# create an empty series in ds if the output series doesn't exist yet
if ds.merge["standard"][series]["output"] not in ds.series.keys():
data,flag,attr = pfp_utils.MakeEmptySeries(ds,ds.merge["standard"][series]["output"])
pfp_utils.CreateSeries(ds,ds.merge["standard"][series]["output"],data,flag,attr)
return ffnet_info | 33,534 |
def test_twins_names():
"""Test twins with names"""
base = rsgame.empty(3, 2)
game = paygame.game_names(
['role'], 3, [['a', 'b']], base.all_profiles(),
np.zeros((base.num_all_profiles, base.num_strats)))
redgame = tr.reduce_game(game)
expected = paygame.game_names(
['role'], 2, [['a', 'b']], redgame.all_profiles(),
np.zeros((redgame.num_all_profiles, base.num_strats)))
assert redgame == expected | 33,535 |
def test_checkout_data(has_paid):
"""
Test checkout data serializer
"""
application = BootcampApplicationFactory.create()
user = application.user
run = application.bootcamp_run
if has_paid:
line = LineFactory.create(
order__status=Order.FULFILLED,
order__application=application,
order__user=user,
bootcamp_run=run,
)
InstallmentFactory.create(bootcamp_run=run)
assert CheckoutDataSerializer(instance=application).data == {
"id": application.id,
"bootcamp_run": BootcampRunSerializer(application.bootcamp_run).data,
"installments": [
InstallmentSerializer(installment).data
for installment in run.installment_set.all()
],
"payments": [LineSerializer(line).data] if has_paid else [],
"total_paid": application.total_paid,
"total_price": application.price,
} | 33,536 |
def svn_client_relocate(*args):
"""
svn_client_relocate(char dir, char from_prefix, char to_prefix, svn_boolean_t recurse,
svn_client_ctx_t ctx, apr_pool_t pool) -> svn_error_t
"""
return _client.svn_client_relocate(*args) | 33,537 |
def get_pairs(image1, image2, global_shift, current_objects, record, params):
""" Given two images, this function identifies the matching objects and
pairs them appropriately. See disparity function. """
nobj1 = np.max(image1)
nobj2 = np.max(image2)
if nobj1 == 0:
print('No echoes found in the first scan.')
return
elif nobj2 == 0:
zero_pairs = np.zeros(nobj1)
return zero_pairs
obj_match = locate_all_objects(image1,
image2,
global_shift,
current_objects,
record,
params)
pairs = match_pairs(obj_match, params)
return pairs | 33,538 |
def write(path, **kwargs):
"""Writes the options to the file
Assumes opt has a __print__ method"""
with open(path, 'w') as _file:
for key, arg in kwargs.items():
_file.write("--- {} ---\n\n".format(key))
_file.write(str(arg) + '\n\n')
return | 33,539 |
def stopListening():
"""
Stop the listening server which was created with a call to listen().
"""
global _listener
logging._acquireLock()
try:
if _listener:
_listener.abort = 1
_listener = None
finally:
logging._releaseLock() | 33,540 |
def plot_prediction_det_animate2(save_dir, target, prediction, epoch, index, i_plot,
plot_fn='imshow', cmap='jet', same_scale=False,
vmax=None, vmin=None, vmax_err=None, vmin_err=None):
"""Plot prediction for one input (`index`-th at epoch `epoch`)
Args:
save_dir: directory to save predictions
target (np.ndarray): (3, 65, 65)
prediction (np.ndarray): (3, 65, 65)
epoch (int): which epoch
index (int): i-th prediction
plot_fn (str): choices=['contourf', 'imshow']
"""
target, prediction = to_numpy(target), to_numpy(prediction)
rows = ['Simulation', 'Prediction', 'Abs Error']
cols = ['Pressure', 'Horizontal Flux', 'Vertical Flux']
# 3 x 65 x 65
n_fields = target.shape[0]
samples = np.concatenate((target, prediction, abs(target - prediction)), axis=0)
# print(samples.shape)
interp = None
if vmax is None:
vmin, vmax = [], []
for i in range(n_fields):
vmin.append(np.amin(samples[[i, i+n_fields]]))
vmax.append(np.amax(samples[[i, i+n_fields]]))
fig, axes = plt.subplots(3, n_fields, figsize=(3.5 * n_fields, 9))
for j, ax in enumerate(fig.axes):
ax.set_aspect('equal')
# ax.set_axis_off()
ax.set_xticks([])
ax.set_yticks([])
if j < 2 * n_fields:
if plot_fn == 'contourf':
cax = ax.contourf(samples[j], 50, cmap=cmap,
vmin=vmin[j % n_fields], vmax=vmax[j % n_fields])
elif plot_fn =='imshow':
cax = ax.imshow(samples[j], cmap=cmap, origin='upper',
interpolation=interp,
vmin=vmin[j % n_fields], vmax=vmax[j % n_fields])
else:
if same_scale:
vmin_error, vmax_error = vmin[j % n_fields], vmax[j % n_fields]
else:
vmin_error, vmax_error = None, None
if vmax_err is not None:
vmin_error, vmax_error = vmin_err[j % n_fields], vmax_err[j % n_fields]
# if j == 8:
# vmin_error = None
if plot_fn == 'contourf':
cax = ax.contourf(samples[j], 50, cmap=cmap)
elif plot_fn =='imshow':
cax = ax.imshow(samples[j], cmap=cmap, origin='upper',
interpolation=interp, vmin=vmin_error, vmax=vmax_error)
if plot_fn == 'contourf':
for c in cax.collections:
c.set_edgecolor("face")
c.set_linewidth(0.000000000001)
cbar = plt.colorbar(cax, ax=ax, fraction=0.046, pad=0.04,
format=ticker.ScalarFormatter(useMathText=True))
cbar.formatter.set_powerlimits((-2, 2))
cbar.ax.yaxis.set_offset_position('left')
# cbar.ax.tick_params(labelsize=5)
cbar.update_ticks()
for ax, col in zip(axes[0], cols):
ax.set_title(col)
for ax, row in zip(axes[:, 0], rows):
ax.set_ylabel(row, rotation=90)
# plt.suptitle(f'Epoch {epoch}')
plt.tight_layout(pad=0.05, w_pad=0.05, h_pad=0.05)
plt.subplots_adjust(top=0.93)
plt.savefig(save_dir + '/pred_{}_{}.{}'.format(index, i_plot, ext),
dpi=dpi, bbox_inches='tight')
# plt.savefig(save_dir + '/pred_epoch{}_{}.{}'.format(epoch, index, ext),
# dpi=dpi, bbox_inches='tight')
plt.close(fig) | 33,541 |
def getComparedVotes(request):
"""
* @api {get} /getComparedVotes/?people_same={people_same_ids}&parties_same={parties_same_ids}&people_different={people_different_ids}&parties_different={parties_different_ids} List all votes where selected MPs/PGs voted the same/differently
* @apiName getComparedVotes
* @apiGroup Session
* @apiParam {people_same_ids} Comma separated list of Parladata ids for MPs who voted the same
* @apiParam {parties_same_ids} Comma separated list of Parladata ids for PGs who voted the same
* @apiParam {people_different_ids} Comma separated list of Parladata ids for MPs who voted differently
* @apiParam {parties_different_ids} Comma separated list of Parladata ids for PGs who voted the differently
* @apiSuccess {Integer} total Total number of votes so far
* @apiSuccess {Object[]} results List of votes that satisfy the supplied criteria
* @apiSuccess {Object} results.session Session data for this vote
* @apiSuccess {String} results.session.name Name of session.
* @apiSuccess {Date} results.session.date_ts Date and time of session.
* @apiSuccess {Date} results.session.date Date of session.
* @apiSuccess {Integer} results.session.id Id of session.
* @apiSuccess {Boolean} results.session.in_review Return true or false if session is in review.
* @apiSuccess {Object[]} results.session.orgs Organization object
* @apiSuccess {String} results.session.orgs.acronym Organization acronym
* @apiSuccess {Boolean} results.session.orgs.is_coalition True of False if organization is in coalition
* @apiSuccess {Integer} results.session.orgs.id Id of organization
* @apiSuccess {Integer} results.session.orgs.name Name of organization
* @apiSuccess {Object} results.results Results for this vote
* @apiSuccess {Integer} results.results.abstain Number of abstentions
* @apiSuccess {Integer} results.results.against Number of MPs who voted against the motion
* @apiSuccess {Integer} results.results.not_present Number of MPs who weren't present at the vote
* @apiSuccess {Integer} results.results.votes_for Number of MPs who voted for the motion
* @apiSuccess {date} results.results.date The date of the vote
* @apiSuccess {String} results.results.text The text of the motion which was voted upon
* @apiSuccess {String[]} results.results.tags List of tags that belong to this motion
* @apiSuccess {Boolean} results.results.is_outlier Is this vote a weird one (flame icon)?
* @apiSuccess {Boolean} results.results.result Did the motion pass?
* @apiExample {curl} Example:
curl -i https://analize.parlameter.si/v1/s/getComparedVotes/?people_same=&parties_same=1&people_different=&parties_different=2
* @apiSuccessExample {json} Example response:
{
"total": 2155,
"results": [{
"session": {
"name": "44. izredna seja",
"date_ts": "2017-05-30T02:00:00",
"orgs": [{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
}],
"date": "30. 5. 2017",
"org": {
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
},
"id": 9587,
"in_review": false
},
"results": {
"abstain": 0,
"against": 0,
"motion_id": 7260,
"date": "09.06.2017",
"text": "Dnevni red v celoti",
"tags": ["Proceduralna glasovanja"],
"is_outlier": false,
"not_present": 34,
"votes_for": 56,
"result": true
}
}, {
"session": {
"name": "44. izredna seja",
"date_ts": "2017-05-30T02:00:00",
"orgs": [{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
}],
"date": "30. 5. 2017",
"org": {
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
},
"id": 9587,
"in_review": false
},
"results": {
"abstain": 0,
"against": 34,
"motion_id": 7258,
"date": "09.06.2017",
"text": "Priporo\u010dilo Vladi RS v zvezi z okoljsko katastrofo, ki jo je povzro\u010dil po\u017ear v podjetju Kemis d.o.o. - Amandma: k 5. to\u010dki 9.6.2017 [SDS - Poslanska skupina Slovenske demokratske stranke]",
"tags": ["Odbor za infrastrukturo, okolje in prostor"],
"is_outlier": false,
"not_present": 35,
"votes_for": 21,
"result": false
}
}, {
"session": {
"name": "30. redna seja",
"date_ts": "2017-05-22T02:00:00",
"orgs": [{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
}],
"date": "22. 5. 2017",
"org": {
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
},
"id": 9580,
"in_review": true
},
"results": {
"abstain": 4,
"against": 18,
"motion_id": 7219,
"date": "30.05.2017",
"text": "Zakon o dopolnitvi Zakona o omejevanju uporabe toba\u010dnih in povezanih izdelkov - Glasovanje o zakonu v celoti",
"tags": ["Odbor za zdravstvo"],
"is_outlier": false,
"not_present": 16,
"votes_for": 52,
"result": true
}
}, {
"session": {
"name": "30. redna seja",
"date_ts": "2017-05-22T02:00:00",
"orgs": [{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
}],
"date": "22. 5. 2017",
"org": {
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
},
"id": 9580,
"in_review": true
},
"results": {
"abstain": 6,
"against": 23,
"motion_id": 7218,
"date": "30.05.2017",
"text": "Zakon o spremembah in dopolnitvah Zakona o zdravstveni dejavnosti - Eviden\u010dni sklep o primernosti predloga zakona 30.5.2017",
"tags": ["Odbor za zdravstvo"],
"is_outlier": false,
"not_present": 19,
"votes_for": 42,
"result": true
}
}, {
"session": {
"name": "30. redna seja",
"date_ts": "2017-05-22T02:00:00",
"orgs": [{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
}],
"date": "22. 5. 2017",
"org": {
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
},
"id": 9580,
"in_review": true
},
"results": {
"abstain": 6,
"against": 23,
"motion_id": 7218,
"date": "30.05.2017",
"text": "Zakon o spremembah in dopolnitvah Zakona o zdravstveni dejavnosti - Eviden\u010dni sklep o primernosti predloga zakona 30.5.2017",
"tags": ["Odbor za zdravstvo"],
"is_outlier": false,
"not_present": 19,
"votes_for": 42,
"result": true
}
}, {
"session": {
"name": "30. redna seja",
"date_ts": "2017-05-22T02:00:00",
"orgs": [{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
}],
"date": "22. 5. 2017",
"org": {
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
},
"id": 9580,
"in_review": true
},
"results": {
"abstain": 3,
"against": 22,
"motion_id": 7217,
"date": "30.05.2017",
"text": "Priporo\u010dilo v zvezi s problematiko slovenskega zdravstva - Eviden\u010dni sklep MDT 30.5.2017",
"tags": ["Odbor za zdravstvo"],
"is_outlier": false,
"not_present": 14,
"votes_for": 51,
"result": true
}
}, {
"session": {
"name": "30. redna seja",
"date_ts": "2017-05-22T02:00:00",
"orgs": [{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
}],
"date": "22. 5. 2017",
"org": {
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
},
"id": 9580,
"in_review": true
},
"results": {
"abstain": 2,
"against": 51,
"motion_id": 7216,
"date": "30.05.2017",
"text": "Zakon o spremembah in dopolnitvah Zakona o pokojninskem in invalidskem zavarovanju - Eviden\u010dni sklep o primernosti predloga zakona 30.5.2017",
"tags": ["Odbor za delo, dru\u017eino, socialne zadeve in invalide"],
"is_outlier": false,
"not_present": 13,
"votes_for": 24,
"result": false
}
}]
}
"""
people_same = request.GET.get('people_same')
parties_same = request.GET.get('parties_same')
people_different = request.GET.get('people_different')
parties_different = request.GET.get('parties_different')
if people_same != '':
people_same_list = people_same.split(',')
else:
people_same_list = []
if parties_same != '':
parties_same_list = parties_same.split(',')
else:
parties_same_list = []
if people_different != '':
people_different_list = people_different.split(',')
else:
people_different_list = []
if parties_different != '':
parties_different_list = parties_different.split(',')
else:
parties_different_list = []
if len(people_same_list) + len(parties_same_list) == 0:
return HttpResponse('Need at least one same to compare.')
if len(people_same_list) + len(parties_same_list) < 2 and len(people_different_list) + len(parties_different_list) < 1:
return HttpResponse('Not enough to compare.')
beginning = 'SELECT * FROM '
select_same_people = ''
select_same_parties = ''
match_same_people_ballots = ''
match_same_people_persons = ''
match_same_people_options = ''
match_same_parties_ballots = ''
match_same_parties_organizations = ''
match_same_parties_options = ''
select_different_people = ''
select_different_parties = ''
match_different_people_ballots = ''
match_different_people_persons = ''
match_different_people_options = ''
match_different_parties_ballots = ''
match_different_parties_organizations = ''
match_different_parties_options = ''
# select for same people DONE
for i, e in enumerate(people_same_list):
if i < len(people_same_list) - 1:
select_same_people = '%s parlaseje_ballot b%s, parlaseje_activity a%s, parlaposlanci_person p%s, ' % (select_same_people, str(i), str(i), str(i))
else:
select_same_people = '%s parlaseje_ballot b%s, parlaseje_activity a%s, parlaposlanci_person p%s' % (select_same_people, str(i), str(i), str(i))
# select for same parties DONE
for i, e in enumerate(parties_same_list):
if i < len(parties_same_list) - 1:
select_same_parties = '%s parlaseje_ballot pb%s, parlaskupine_organization o%s, ' % (select_same_parties, str(i), str(i))
else:
select_same_parties = '%s parlaseje_ballot pb%s, parlaskupine_organization o%s' % (select_same_parties, str(i), str(i))
# select for different people DONE
for i, e in enumerate(people_different_list):
if i < len(people_different_list) - 1:
select_different_people = '%s parlaseje_ballot db%s, parlaseje_activity da%s, parlaposlanci_person dp%s, ' % (select_different_people, str(i), str(i), str(i))
else:
select_different_people = '%s parlaseje_ballot db%s, parlaseje_activity da%s, parlaposlanci_person dp%s' % (select_different_people, str(i), str(i), str(i))
# select for different parties DONE
for i, e in enumerate(parties_different_list):
if i < len(parties_different_list) - 1:
select_different_parties = '%s parlaseje_ballot dpb%s, parlaskupine_organization do%s, ' % (select_different_parties, str(i), str(i))
else:
select_different_parties = '%s parlaseje_ballot dpb%s, parlaskupine_organization do%s' % (select_different_parties, str(i), str(i))
# match same people ballots by vote id DONE
# if only one person was passed, match_same_people_ballots will remain an empty string
for i, e in enumerate(people_same_list):
if i != 0:
if i < len(people_same_list) - 1:
match_same_people_ballots = '%s b0.vote_id = b%s.vote_id AND ' % (match_same_people_ballots, str(i))
else:
match_same_people_ballots = '%s b0.vote_id = b%s.vote_id' % (match_same_people_ballots, str(i))
# match same parties ballots by vote id DONE
# if only one same party was passed match_same_parties_ballots will remain an empty string
if len(people_same_list) == 0:
# no same people were passed to the API
pass
if len(parties_same_list) == 0:
# no same parties were passed
return HttpResponse('You need to pass at least one "same" person or party.')
elif len(parties_same_list) == 1:
# only one same party was passed, there is nothing to match yet
match_same_parties_ballots = ''
else:
# more than one same party was passed
for i, e in enumerate(parties_same_list):
if i != 0:
# ignore the first one, because all others will be compared with it
if i < len(parties_same_list) - 1:
# not last
match_same_parties_ballots = '%s pb0.vote_id = pb%s.vote_id AND ' % (match_same_parties_ballots, str(i))
else:
# last
match_same_parties_ballots = '%s pb0.vote_id = pb%s.vote_id' % (match_same_parties_ballots, str(i))
elif len(people_same_list) > 0:
# one or more same people were passed
for i, e in enumerate(parties_same_list):
# do not ignore the first one, because all will be compared to the first person ballot
if i < len(parties_same_list) - 1:
# not last
match_same_parties_ballots = '%s b0.vote_id = pb%s.vote_id AND ' % (match_same_parties_ballots, str(i))
else:
# last
match_same_parties_ballots = '%s b0.vote_id = pb%s.vote_id' % (match_same_parties_ballots, str(i))
# match same people with persons DONE
for i, e in enumerate(people_same_list):
if i < len(people_same_list) - 1:
match_same_people_persons = '%s b%s.activity_ptr_id = a%s.id AND a%s.person_id = p%s.id AND p%s.id_parladata = %s AND ' % (match_same_people_persons, str(i), str(i), str(i), str(i), str(i), e)
else:
match_same_people_persons = '%s b%s.activity_ptr_id = a%s.id AND a%s.person_id = p%s.id AND p%s.id_parladata = %s' % (match_same_people_persons, str(i), str(i), str(i), str(i), str(i), e)
# match same parties with organizations DONE
for i, e in enumerate(parties_same_list):
if i < len(parties_same_list) -1:
match_same_parties_organizations = '%s pb%s.org_voter_id = o%s.id AND o%s.id_parladata = %s AND ' % (match_same_parties_organizations, str(i), str(i), str(i), e)
else:
match_same_parties_organizations = '%s pb%s.org_voter_id = o%s.id AND o%s.id_parladata = %s' % (match_same_parties_organizations, str(i), str(i), str(i), e)
# match same people based on options DONE
for i, e in enumerate(people_same_list):
if i != 0:
if i != len(people_same_list) - 1:
match_same_people_options = '%s b0.option = b%s.option AND ' % (match_same_people_options, str(i))
else:
match_same_people_options = '%s b0.option = b%s.option' % (match_same_people_options, str(i))
# match same parties based on options
for i, e in enumerate(parties_same_list):
if i == 0:
if select_same_people != '':
if len(parties_same_list) > 1:
match_same_parties_options = '%s b0.option = pb0.option AND ' % (match_same_parties_options)
else:
match_same_parties_options = '%s b0.option = pb0.option ' % (match_same_parties_options)
else:
if i != len(parties_same_list) - 1:
match_same_parties_options = '%s pb0.option = pb%s.option AND ' % (match_same_parties_options, str(i))
else:
match_same_parties_options = '%s pb0.option = pb%s.option' % (match_same_parties_options, str(i))
# compare different people and parties
if len(people_same_list) > 0:
# we compare with same people
# match different people ballots by vote id
for i, e in enumerate(people_different_list):
if i < len(people_different_list) - 1:
match_different_people_ballots = '%s b0.vote_id = db%s.vote_id AND ' % (match_different_people_ballots, str(i))
else:
match_different_people_ballots = '%s b0.vote_id = db%s.vote_id' % (match_different_people_ballots, str(i))
# match different parties ballots by vote id
for i, e in enumerate(parties_different_list):
if i < len(parties_different_list) - 1:
match_different_parties_ballots = '%s b0.vote_id = dpb%s.vote_id AND ' % (match_different_parties_ballots, str(i))
else:
match_different_parties_ballots = '%s b0.vote_id = dpb%s.vote_id' % (match_different_parties_ballots, str(i))
# match different people based on options
for i, e in enumerate(people_different_list):
if i != len(people_different_list) - 1:
match_different_people_options = '%s b0.option != db%s.option AND ' % (match_different_people_options, str(i))
else:
match_different_people_options = '%s b0.option != db%s.option' % (match_different_people_options, str(i))
# match different parties based on options
for i, e in enumerate(parties_different_list):
if i < len(parties_different_list) - 1:
match_different_parties_options = '%s b0.option != dpb%s.option AND ' % (match_different_parties_options, str(i))
else:
match_different_parties_options = '%s b0.option != dpb%s.option ' % (match_different_parties_options, str(i))
else:
# we compare with same parties
# match different people ballots by vote id
for i, e in enumerate(people_different_list):
if i < len(people_different_list) - 1:
match_different_people_ballots = '%s pb0.vote_id = db%s.vote_id AND ' % (match_different_people_ballots, str(i))
else:
match_different_people_ballots = '%s pb0.vote_id = db%s.vote_id' % (match_different_people_ballots, str(i))
# match different parties ballots by vote id
for i, e in enumerate(parties_different_list):
if i < len(parties_different_list) - 1:
match_different_parties_ballots = '%s pb0.vote_id = dpb%s.vote_id AND ' % (match_different_parties_ballots, str(i))
else:
match_different_parties_ballots = '%s pb0.vote_id = dpb%s.vote_id' % (match_different_parties_ballots, str(i))
# match different people based on options
for i, e in enumerate(people_different_list):
if i != len(people_different_list) - 1:
match_different_people_options = '%s pb0.option != db%s.option AND ' % (match_different_people_options, str(i))
else:
match_different_people_options = '%s pb0.option != db%s.option' % (match_different_people_options, str(i))
# match different parties based on options
for i, e in enumerate(parties_different_list):
if i < len(parties_different_list) - 1:
match_different_parties_options = '%s pb0.option != dpb%s.option AND ' % (match_different_parties_options, str(i))
else:
match_different_parties_options = '%s pb0.option != dpb%s.option ' % (match_different_parties_options, str(i))
# match different people with person
for i, e in enumerate(people_different_list):
if i < len(people_different_list) - 1:
match_different_people_persons = '%s db%s.activity_ptr_id = da%s.id AND da%s.person_id = dp%s.id AND dp%s.id_parladata = %s AND ' % (match_different_people_persons, str(i), str(i), str(i), str(i), str(i), e)
else:
match_different_people_persons = '%s db%s.activity_ptr_id = da%s.id AND da%s.person_id = dp%s.id AND dp%s.id_parladata = %s' % (match_different_people_persons, str(i), str(i), str(i), str(i), str(i), e)
# match different parties with organizations
for i, e in enumerate(parties_different_list):
if i < len(parties_different_list) - 1:
match_different_parties_organizations = '%s dpb%s.org_voter_id = do%s.id AND do%s.id_parladata = %s AND ' % (match_different_parties_organizations, str(i), str(i), str(i), e)
else:
match_different_parties_organizations = '%s dpb%s.org_voter_id = do%s.id AND do%s.id_parladata = %s' % (match_different_parties_organizations, str(i), str(i), str(i), e)
query = beginning
q_selectors_list = [select_same_people, select_same_parties, select_different_people, select_different_parties]
q_selectors_list_clean = [s for s in q_selectors_list if s != '']
q_selectors = ', '.join(q_selectors_list_clean)
print 'q_selectors ' + q_selectors
query = query + ' ' + q_selectors + ' WHERE'
q_match_ballots_list = [match_same_people_ballots, match_same_parties_ballots, match_different_people_ballots, match_different_parties_ballots]
q_match_ballots_list_clean = [s for s in q_match_ballots_list if s != '']
q_match_ballots = ' AND '.join(q_match_ballots_list_clean)
print 'q_match_ballots ' + q_match_ballots
# query = query + ' ' + q_match_ballots + ' AND'
q_match_options_list = [match_same_people_options, match_same_parties_options, match_different_people_options, match_different_parties_options]
q_match_options_list_clean = [s for s in q_match_options_list if s != '']
q_match_options = ' AND '.join(q_match_options_list_clean)
print 'q_match_options ' + q_match_options
# query = query + ' ' + q_match_options + ' AND'
q_match_persons_list = [match_same_people_persons, match_different_people_persons]
q_match_persons_list_clean = [s for s in q_match_persons_list if s != '']
q_match_persons = ' AND '.join(q_match_persons_list_clean)
print 'q_match_persons ' + q_match_persons
# query = query + ' ' + q_match_persons + ' AND'
q_match_organizations_list = [match_same_parties_organizations, match_different_parties_organizations]
q_match_organizations_list_clean = [s for s in q_match_organizations_list if s != '']
q_match_organizations = ' AND '.join(q_match_organizations_list_clean)
print 'q_match_organizations ' + q_match_organizations
# query = query + ' ' + q_match_organizations
after_where_list = [q_match_ballots, q_match_options, q_match_persons, q_match_organizations]
after_where_list_clean = [s for s in after_where_list if s != '']
after_where = ' AND '.join(after_where_list_clean)
query = query + after_where
if request.GET.get('special'):
# exclude 'ni'
exclude_ni_people_same = ''
exclude_ni_parties_same = ''
exclude_ni_people_different = ''
exclude_ni_parties_different = ''
for i, e in enumerate(people_same_list):
if i < len(people_same_list) - 1:
exclude_ni_people_same = '%s b%s.option != \'ni\' AND ' % (exclude_ni_people_same, i)
else:
exclude_ni_people_same = '%s b%s.option != \'ni\'' % (exclude_ni_people_same, i)
for i, e in enumerate(parties_same_list):
if i < len(parties_same_list) - 1:
exclude_ni_parties_same = '%s pb%s.option != \'ni\' AND ' % (exclude_ni_parties_same, i)
else:
exclude_ni_parties_same = '%s pb%s.option != \'ni\'' % (exclude_ni_parties_same, i)
for i, e in enumerate(people_different_list):
if i < len(people_different_list) - 1:
exclude_ni_people_different = '%s db%s.option != \'ni\' AND ' % (exclude_ni_people_different, i)
else:
exclude_ni_people_different = '%s db%s.option != \'ni\'' % (exclude_ni_people_different, i)
for i, e in enumerate(parties_different_list):
if i < len(parties_different_list) - 1:
exclude_ni_parties_different = '%s dpb%s.option != \'ni\' AND ' % (exclude_ni_parties_different, i)
else:
exclude_ni_parties_different = '%s dpb%s.option != \'ni\'' % (exclude_ni_parties_different, i)
exclude_ni_list = [exclude_ni_people_same, exclude_ni_parties_same, exclude_ni_people_different, exclude_ni_parties_different]
exclude_ni_list_clean = [s for s in exclude_ni_list if s != '']
exclude_ni = ' AND '.join(exclude_ni_list_clean)
query = query + ' AND ' + exclude_ni
# return HttpResponse(query)
print query
print 'STATEMENT PARTS:'
print 'select_same_people ' + select_same_people
print 'select_same_parties ' + select_same_parties
print 'match_same_people_ballots ' + match_same_people_ballots
print 'match_same_people_persons ' + match_same_people_persons
print 'match_same_people_options ' + match_same_people_options
print 'match_same_parties_ballots ' + match_same_parties_ballots
print 'match_same_parties_organizations ' + match_same_parties_organizations
print 'match_same_parties_options ' + match_same_parties_options
print 'select_different_people ' + select_different_people
print 'select_different_parties ' + select_different_parties
print 'match_different_people_ballots ' + match_different_people_ballots
print 'match_different_people_persons ' + match_different_people_persons
print 'match_different_people_options ' + match_different_people_options
print 'match_different_parties_ballots ' + match_different_parties_ballots
print 'match_different_parties_organizations ' + match_different_parties_organizations
print 'match_different_parties_options ' + match_different_parties_options
ballots = Ballot.objects.raw(query)
session_ids = set([b.vote.session.id for b in ballots])
sessions = {}
for s in session_ids:
sessions[s] = Session.objects.get(id=s).getSessionData()
print '[SESSION IDS:]'
print set(session_ids)
out = {
'total': Vote.objects.all().count(),
'results': []
}
for ballot in ballots:
out['results'].append({
'session': sessions[ballot.vote.session.id],
'results': {
'motion_id': ballot.vote.id_parladata,
'text': ballot.vote.motion,
'votes_for': ballot.vote.votes_for,
'against': ballot.vote.against,
'abstain': ballot.vote.abstain,
'not_present': ballot.vote.not_present,
'result': ballot.vote.result,
'is_outlier': ballot.vote.is_outlier,
'tags': ballot.vote.tags,
'date': ballot.start_time.strftime(API_DATE_FORMAT)
}
})
return JsonResponse(out, safe=False) | 33,542 |
def find_switch():
"""Find switch-like boxes"""
target = [[0, 1, 0, 0], [0, 0, 1, 0]]
find_pattern(target) | 33,543 |
def get_bits(register, index, length=1):
"""
Get selected bit(s) from register while masking out the rest.
Returns as boolean if length==1
:param register: Register value
:type register: int
:param index: Start index (from right)
:type index: int
:param length: Number of bits (default 1)
:type length: int
:return: Selected bit(s)
:rtype: Union[int, bool]
"""
result = (register >> index) & ((1 << length) - 1)
if length == 1:
return result == 1
return result | 33,544 |
def remove(path):
"""
Remove the file or directory
"""
if os.path.isdir(path):
try:
os.rmdir(path)
except OSError:
logger.fatal("Unable to remove the folder: {}".format(path), exc_info=True)
else:
try:
if os.path.exists(path):
os.remove(path)
except OSError:
logger.fatal("Unable to remove the file: {}".format(path), exc_info=True) | 33,545 |
def _dir_travel(
path: Path,
excludes: List[Callable],
handler: Callable,
logger: Optional[logging.Logger] = default_logger,
):
"""Travels the path recursively, calling the handler on each subpath.
Respects excludes, which will be called to check if this path is skipped.
"""
e = _get_gitignore(path)
if e is not None:
excludes.append(e)
skip = any(e(path) for e in excludes)
if not skip:
try:
handler(path)
except Exception as e:
logger.error(f"Issue with path: {path}")
raise e
if path.is_dir():
for sub_path in path.iterdir():
_dir_travel(sub_path, excludes, handler, logger=logger)
if e is not None:
excludes.pop() | 33,546 |
def plot_acceptance_ratio(
plotter: Plotter,
mcmc_tables: List[pd.DataFrame],
burn_in: int,
label_font_size=6,
dpi_request=300,
):
"""
Plot the progressive acceptance ratio over iterations.
"""
fig, axis, _, _, _, _ = plotter.get_figure()
full_df = db.load.append_tables(mcmc_tables)
# Chain index starts at 0
n_chains = max(full_df["chain"]) + 1
for chain in range(n_chains):
chain_mask = full_df["chain"] == chain
chain_df = full_df[chain_mask]
ratios = collate_acceptance_ratios(chain_df["accept"])
# Plot
axis.plot(ratios, alpha=0.8, linewidth=0.7)
# Add vertical line for burn-in point
if burn_in > 0:
axis.axvline(x=burn_in, color=COLOR_THEME[1], linestyle="dotted")
axis.set_title("acceptance ratio", fontsize=label_font_size)
axis.set_xlabel("iterations", fontsize=label_font_size)
axis.set_ylim(bottom=0.0)
pyplot.setp(axis.get_yticklabels(), fontsize=label_font_size)
pyplot.setp(axis.get_xticklabels(), fontsize=label_font_size)
plotter.save_figure(fig, filename=f"acceptance_ratio", dpi_request=dpi_request) | 33,547 |
def addactual():
"""Add actual spendings"""
if request.method == "POST":
allPayments = []
# Current user that is logged-in saved in variable
userId = session["user_id"]
month = request.form.get("month")
housing = request.form.get("housing")
housing = float(housing)
pensionIns = request.form.get("pensionIns")
pensionIns = float(pensionIns)
food = request.form.get("food")
food= float(food)
health = request.form.get("health")
health = float(health)
transport = request.form.get("transport")
transport = float(transport)
debt = request.form.get("debt")
debt = float(debt)
utilities = request.form.get("utilities")
utilities = float(utilities)
clothing = request.form.get("clothing")
clothing = float(clothing)
vacation = request.form.get("vacation")
vacation = float(vacation)
unexpected = request.form.get("unexpected")
unexpected = float(unexpected)
total = housing + pensionIns + food + health + transport + debt + utilities + clothing + vacation + unexpected
allPayments.append({"month": month, "housing": housing, "pensionIns": pensionIns, "food": food, "health": health, "transport": transport, "debt": debt, "utilities": utilities, "clothing": clothing, "vacation": vacation, "unexpected": unexpected, "total": total})
allMonths = db.execute("SELECT month FROM payments WHERE userid = :userId", userId=userId)
enteredMonths = allMonths[0]["month"]
db.execute("INSERT INTO payments(userId, month, housing, pensionIns, food, health, transport, debt, utilities, clothing, vacation, unexpected, total)\
VALUES(:userId, :month, :housing, :pensionIns, :food, :health, :transport, :debt, :utilities, :clothing, :vacation, :unexpected, :total)", userId=userId, month=month, housing=housing, pensionIns=pensionIns, food=food, health=health, transport=transport, debt=debt, utilities=utilities, clothing=clothing, vacation=vacation, unexpected=unexpected, total=total)
# Flash message to confirm that the user add a note
flash("Payments added")
if month in enteredMonths:
return apology("Monnth already entered!")
return redirect("/actual")
else:
return render_template("addactual.html") | 33,548 |
def parse_move(line):
""" Parse steps from a move string """
text = line.split()
if len(text) == 0:
raise ValueError("No steps in move given to parse. %s" % (repr(line)))
steps = []
for step in text:
from_ix = alg_to_index(step[1:3])
if len(step) > 3:
if step[3] == 'x':
continue
elif step[3] == 'n':
to_ix = from_ix + 8
elif step[3] == 's':
to_ix = from_ix - 8
elif step[3] == 'e':
to_ix = from_ix + 1
elif step[3] == 'w':
to_ix = from_ix - 1
else:
raise ValueError("Invalid step direction.")
steps.append((from_ix, to_ix))
else:
raise ValueError("Can't represent placing step")
return steps | 33,549 |
def test_levensthein_dist() -> None:
"""
Test our implemented levensthein distance function for measuring string similarity.
"""
assert (
levensthein_dist("horse", "ros") == 3
and levensthein_dist("", "hello") == 5
and levensthein_dist("lululul", "") == 7
and levensthein_dist("intention", "execution") == 5
and levensthein_dist("", "") == 0
and levensthein_dist("hello", "") == 5
and levensthein_dist("cookietemple", "cookiejar") == levensthein_dist("cookiejar", "cookietemple") == 6
and levensthein_dist("cli", "cliiiiiiiiiii") == 10
and levensthein_dist("wep", "web") == 1
and levensthein_dist("mycommand", "mycommand") == 0
) | 33,550 |
def get_conf_path(run_id):
"""
Generate path for storing/loading configuration file
:param run_id (str): run ID to be used
:return: full file path for storing/loading config file
"""
return os.path.join('conf', run_id + '.ini') | 33,551 |
def get_tensor_name(node_name, output_slot):
"""Get tensor name given node name and output slot index.
Args:
node_name: Name of the node that outputs the tensor, as a string.
output_slot: Output slot index of the tensor, as an integer.
Returns:
Name of the tensor, as a string.
"""
return "%s:%d" % (node_name, output_slot) | 33,552 |
def EstimateMarriageSurvival(resp):
"""Estimates the survival curve.
resp: DataFrame of respondents
returns: pair of HazardFunction, SurvivalFunction
"""
# NOTE: Filling missing values would be better than dropping them.
complete = resp[resp.evrmarry == 1].agemarry.dropna()
ongoing = resp[resp.evrmarry == 0].age
hf = EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
return hf, sf | 33,553 |
def generate_all_RGB_combinations(pixel_bits_count: int):
"""Generates all possible combinations of bit spread across all colour channels.
"""
for r in range(0, pixel_bits_count+1):
for g in range(0, pixel_bits_count+1):
for b in range(0, pixel_bits_count+1):
if r + g + b == pixel_bits_count:
yield (r, g, b) | 33,554 |
def shipengine_error_with_no_error_type() -> ShipEngineError:
"""Return a ShipEngineError that only has the error_type set to None."""
raise ShipEngineError(
request_id="req_a523b1b19bd54054b7eb953f000e7f15",
message="The is a test exception",
error_source="shipengine",
error_type=None,
error_code="invalid_address",
) | 33,555 |
def get_cowell_data():
"""
Gets Cowell data.
:return: Data and headers.
"""
n = 10000
Y = np.random.normal(0, 1, n)
X = np.random.normal(Y, 1, n)
Z = np.random.normal(X, 1, n)
D = np.vstack([Y, X, Z]).T
return D, ['Y', 'X', 'Z'] | 33,556 |
def is_str_str_tuple(t):
"""Is this object a tuple of two strings?"""
return (isinstance(t, tuple) and len(t) == 2
and isinstance(t[0], basestring)
and isinstance(t[1], basestring)) | 33,557 |
def test_beer_lambert(fname, fmt, tmp_path):
"""Test converting NIRX files."""
assert fmt in ('nirx', 'fif')
raw = read_raw_nirx(fname)
if fmt == 'fif':
raw.save(tmp_path / 'test_raw.fif')
raw = read_raw_fif(tmp_path / 'test_raw.fif')
assert 'fnirs_cw_amplitude' in raw
assert 'fnirs_od' not in raw
raw = optical_density(raw)
_validate_type(raw, BaseRaw, 'raw')
assert 'fnirs_cw_amplitude' not in raw
assert 'fnirs_od' in raw
assert 'hbo' not in raw
raw = beer_lambert_law(raw)
_validate_type(raw, BaseRaw, 'raw')
assert 'fnirs_cw_amplitude' not in raw
assert 'fnirs_od' not in raw
assert 'hbo' in raw
assert 'hbr' in raw | 33,558 |
def g(dist, aq):
"""
Compute function g (Lemma 5) for a given full parent isntantiation.
Parameters
----------
dists: list ints
Counts of the child variable for a given full parent instantiation.
aq: float
Equivalent sample size divided by the product of parents arities.
"""
res = log(2*min(dist)/aq + 1)
for d in dist:
res += - log(2*d/aq + 1)
return res | 33,559 |
def save_2D_animation(embeddings, target_optimizers, emb_space_sizes,
total_train_losses, total_test_losses,
n_bins=100, cmap_name='jet', **plotting_kwargs):
"""Utility function for visualizing the changes in weights over time in
UMAP space. The visualization is in 2D for better appreciating the global
loss surface.
Args:
- embeddings: list of embeddings, result of alligned UMAP
- target_optimizers: list of strings, name of the optimizers
considered.
- emb_space_sizes: list of arrays, define the limits of the
embedding space for the three layers of the MLP.
- total_train_losses: list, training losses history.
- total_test_losses: list, test losses.
- n_bins: int, number of bins for discretizing the training loss.
- cmap_name: string, name of the colormap used for representing
the change in train losses.
- **plotting_kwargs: keyword arguments, keyword arguments for the
plotting function.
Returns:
- None
"""
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
axs = axs.flatten()
Z = np.array(total_train_losses).flatten()
for layer, emb in enumerate(embeddings):
x = emb[:, 0]
y = emb[:, 1]
xi = np.linspace(
x.min(),
x.max(),
1000
)
yi = np.linspace(
y.min(),
y.max(),
1000
)
x_grid, Y_grid = np.meshgrid(xi, yi)
zi = griddata(
(x, y),
Z,
(xi[None, :], yi[:, None]),
method='linear'
)
zi = np.nan_to_num(zi, nan=Z.mean())
cont = axs[layer].contourf(
x_grid,
Y_grid,
zi,
cmap=cmap_name,
levels=n_bins,
vmin=Z.min(),
vmax=Z.max()
)
fig.subplots_adjust(right=0.85)
cbar_ax = fig.add_axes([0.88, 0.15, 0.04, 0.7])
fig.colorbar(
cont,
cax=cbar_ax,
label='Training Loss'
)
for index, opt_name in enumerate(target_optimizers):
print(f'Saving Optimizer {opt_name}')
emb_size = len(total_test_losses[index])
start = emb_size * index
stop = start + emb_size
embs = [emb[start:stop] for emb in embeddings]
for ax_idx, ax in enumerate(axs):
ax.set_title(
f'Layer {ax_idx + 1} \
\nOptimizer: {opt_name}'
)
if ax_idx == 0:
ax.set_ylabel('Weights Space \n UMAP 2')
ax.set_xlabel('Weights Space \n UMAP 1')
else:
ax.set_xlabel('Weights Space \n UMAP 1')
for i in tqdm(range(embs[0].shape[0])):
point_1 = axs[0].scatter(
embs[0][i, 0],
embs[0][i, 1],
marker="*",
c='white',
edgecolor='k',
s=60
)
point_2 = axs[1].scatter(
embs[1][i, 0],
embs[1][i, 1],
c='white',
marker="*",
edgecolor='k',
s=60
)
point_3 = axs[2].scatter(
embs[2][i, 0],
embs[2][i, 1],
c='white',
marker="*",
edgecolor='k',
s=60
)
if not os.path.exists(f'results\\2D_{opt_name}'):
os.makedirs(f'results\\2D_{opt_name}')
plt.savefig(
f'results\\2D_{opt_name}\\{i}.png',
bbox_inches='tight'
)
point_1.remove()
point_2.remove()
point_3.remove()
return None | 33,560 |
def plot_scatter(ax: Axes,
eps: array,
gnt: array,
labels: Labels,
leg: Labels,
pst: Styles,
psm: Styles,
txtopts: Options,
legopts: Options):
"""
Plot all data and legend
Parameters
==========
ax
axes object for plotting on
eps
distortions
gnt
guarantees
labels
list of axes labels [x, y]
leg
legend text associated with corresponding datum, or ``None``
pst
list of plot styles associated with `thetas`
psm
list of plot styles associated with `proj_dims`
txtopts
text style options for axes labels
legopts
style options for legend
"""
if 'prop' in legopts and 'size' in legopts['prop']:
ax.tick_params(axis='both', which='major',
labelsize=legopts['prop']['size'])
lhs = []
for t, pt in enumerate(pst[:eps.shape[0]]):
for m, pm in enumerate(psm[:eps.shape[1]]):
lhs.append(ax.plot(eps[t, m], gnt[t, m],
**pt, **pm, linestyle='none')[0])
# extra element at end of each row: label with value of theta
lhs.append(ax.plot(eps[t, 0], gnt[t, 0],
**pt, **psm[0], linestyle='none')[0])
ax.set_xlabel(labels[0], **txtopts)
ax.set_ylabel(labels[1], **txtopts)
# ax.yaxis.set_label_coords(-0.1, 0.5)
if leg is not None:
hstep = gnt.shape[1]
lhs = lhs[0:hstep] + lhs[hstep::(hstep + 1)]
leg = leg[0:hstep] + leg[hstep::(hstep + 1)]
ax.legend(lhs, leg, numpoints=1, **legopts)
plot_equality(ax) | 33,561 |
def order_tweets_by_polarity(tweets, positive_highest=True):
"""Sort the tweets by polarity, receives positive_highest which determines
the order. Returns a list of ordered tweets."""
reverse = True if positive_highest else False
return sorted(tweets, key=lambda tweet: tweet.polarity, reverse=reverse) | 33,562 |
def text_file_md5(filename, exclude_lines=None, exclude_re=None,
prepend_lines=None, append_lines=None):
"""Get a MD5 (check) sum of a text file.
Works in the same way as `file_md5()` function but ignores newlines
characters and excludes lines from the file as well as prepend or
append them if requested.
:param exclude_lines: list of strings to be excluded
(newline characters should not be part of the strings)
:param exclude_re: regular expression string;
lines matching this regular expression will not be considered
:param prepend_lines: list of lines to be prepended to the file
before computing the sum
:param append_lines: list of lines to be appended to the file
before computing the sum
"""
hasher = hashlib.md5()
if exclude_re:
regexp = re.compile(exclude_re)
if prepend_lines:
for line in prepend_lines:
hasher.update(line if sys.version_info[0] == 2 else encode(line))
with open(filename, 'r') as f:
for line in f:
# replace platform newlines by standard newline
if os.linesep != '\n':
line = line.rstrip(os.linesep) + '\n'
if exclude_lines and line in exclude_lines:
continue
if exclude_re and regexp.match(line):
continue
hasher.update(line if sys.version_info[0] == 2 else encode(line))
if append_lines:
for line in append_lines:
hasher.update(line if sys.version_info[0] == 2 else encode(line))
return hasher.hexdigest() | 33,563 |
def read_in_Reff_file(file_date, VoC_flag=None, scenario=''):
"""
Read in Reff h5 file produced by generate_RL_forecast.
Args:
file_date: (date as string) date of data file
VoC_date: (date as string) date from which to increase Reff by VoC
"""
from scipy.stats import beta
from params import VoC_start_date, use_vaccine_effect
if file_date is None:
raise Exception('Need to provide file date to Reff read.')
file_date = pd.to_datetime(file_date).strftime("%Y-%m-%d")
df_forecast = pd.read_hdf('results/soc_mob_R'+file_date+scenario+'.h5', key='Reff')
if (VoC_flag != '') and (VoC_flag is not None):
VoC_start_date = pd.to_datetime(VoC_start_date)
if VoC_flag == 'Alpha':
print('This VoC will be deprecated in future.')
# Here we apply the beta(6,14)+1 scaling from VoC to the Reff.
# We do so by editing a slice of the data frame. Forgive me for my sins.
row_bool_to_apply_VoC = (df_forecast.type == 'R_L') & (pd.to_datetime(df_forecast.date, format='%Y-%m-%d') >= VoC_start_date)
index_map = df_forecast.index[row_bool_to_apply_VoC]
# Index 9 and onwards are the 2000 Reff samples.
df_slice_after_VoC = df_forecast.iloc[index_map, 8:]
multiplier = beta.rvs(6,14, size = df_slice_after_VoC.shape) + 1
if VoC_flag == 'Delta': # Increase from Delta
# Here we apply the beta(2,2)+3 scaling from VoC to the Reff based on CDC results.
# We do so by editing a slice of the data frame. Forgive me for my sins.
row_bool_to_apply_VoC = (df_forecast.type == 'R_L') & (pd.to_datetime(df_forecast.date, format='%Y-%m-%d') >= VoC_start_date)
index_map = df_forecast.index[row_bool_to_apply_VoC]
# Index 9 and onwards are the 2000 Reff samples.
df_slice_after_VoC = df_forecast.iloc[index_map, 8:]
multiplier = beta.rvs(3,3, size = df_slice_after_VoC.shape) + 2.1 - 0.5 # Mean 2.1 Delta
df_forecast.iloc[index_map , 8:] = df_slice_after_VoC*multiplier
if use_vaccine_effect:
# Load in vaccination effect data
vaccination_by_state = pd.read_csv('data/vaccination_by_state.csv', parse_dates=['date'])
vaccination_by_state = vaccination_by_state[['state', 'date','overall_transmission_effect']]
# Make datetime objs into strings
vaccination_by_state['date_str'] = pd.to_datetime(vaccination_by_state['date'], format='%Y-%m-%d').dt.strftime('%Y-%m-%d')
df_forecast['date_str'] = pd.to_datetime(df_forecast['date'], format='%Y-%m-%d').dt.strftime('%Y-%m-%d')
# Filling in future days will the same vaccination level as current.
for state, forecast_df_state in df_forecast.groupby('state'):
latest_Reff_data_date = max(forecast_df_state.date_str)
latest_vaccination_data_date = max(vaccination_by_state.groupby('state').get_group(state)['date'])
latest_vaccination_date_effect = vaccination_by_state.groupby(['state', 'date']).get_group((state, latest_vaccination_data_date))['overall_transmission_effect'].iloc[0]
# Fill in the future dates with the same level of vaccination.
vaccination_by_state = vaccination_by_state.append(pd.DataFrame([(state, pd.to_datetime(date), latest_vaccination_date_effect, date.strftime('%Y-%m-%d')) for date in pd.date_range(latest_vaccination_data_date, latest_Reff_data_date)], columns = ['state', 'date', 'overall_transmission_effect', 'date_str']))
# Create a (state,date) indexed map of transmission effect
overall_transmission_effect = vaccination_by_state.set_index(['state', 'date_str'])['overall_transmission_effect'].to_dict()
# Apply this effect to the forecast
vaccination_multiplier = df_forecast.apply(lambda row: 1 if row['type']!='R_L' else overall_transmission_effect.get((row['state'], row['date_str']),1), axis=1)
df_forecast = df_forecast.drop('date_str', axis='columns')
# Apply the vaccine effect to the forecast. The 8:onwards columns are all the Reff paths.
df_forecast.iloc[: , 8:] = df_forecast.iloc[: , 8:].multiply(vaccination_multiplier.to_numpy(), axis='rows')
return df_forecast | 33,564 |
def main(argv):
"""Main Compute Demo
When invoked from the command line, it will connect using secrets.py
(see secrets.py-dist for instructions and examples), and perform the
following tasks:
- List current nodes
- List available images (up to 10)
- List available sizes (up to 10)
"""
try:
driver = get_demo_driver()
except InvalidCredsError:
e = sys.exc_info()[1]
print("Invalid Credentials: " + e.value)
return 1
try:
print(">> Loading nodes...")
pprint(driver.list_nodes())
print(">> Loading images... (showing up to 10)")
pprint(driver.list_images()[:10])
print(">> Loading sizes... (showing up to 10)")
pprint(driver.list_sizes()[:10])
except Exception:
e = sys.exc_info()[1]
print("A fatal error occurred: " + e)
return 1 | 33,565 |
def unregister_domain(domain):
"""Unregisters a domain from reporting.
Unregistering a domain that isn't registered is a no-op.
"""
keys = (
'registered_for_reporting',
'resolution_report',
'resolution_report_updated',
'delta_report',
'delta_report_updated',
'delta_report_read',
)
for key in keys:
db.delete(':'.join((key, domain))) | 33,566 |
def string_with_fixed_length(s="", l=30):
"""
Return a string with the contents of s plus white spaces until length l.
:param s: input string
:param l: total length of the string (will crop original string if longer than l)
:return:
"""
s_out = ""
for i in range(0, l):
if i < len(s):
s_out += s[i]
else:
s_out += " "
return s_out | 33,567 |
def fetch_ref_proteomes():
"""
This method returns a list of all reference proteome accessions available
from Uniprot
"""
ref_prot_list = []
response = urllib2.urlopen(REF_PROT_LIST_URL)
for ref_prot in response:
ref_prot_list.append(ref_prot.strip())
return ref_prot_list | 33,568 |
def test_create_without_description(location):
"""test_create_without_description.
"""
location_obj = Location.objects.create(name=location.name, is_demo=False)
assert isinstance(location_obj, Location) | 33,569 |
def load(fp: BinaryIO, *, fmt=None, **kwargs) -> TextPlistTypes:
"""Read a .plist file (forwarding all arguments)."""
if fmt is None:
header = fp.read(32)
fp.seek(0)
if FMT_TEXT_HANDLER["detect"](header):
fmt = PF.FMT_TEXT
if fmt == PF.FMT_TEXT:
return FMT_TEXT_HANDLER["parser"](**kwargs).parse(fp)
else:
# This one can fail a bit more violently like the original
return pl.load(fp, fmt=translation[fmt], **kwargs) | 33,570 |
def test_get_points_within_radius_of_cameras_no_points():
"""Catch degenerate input."""
wTi0 = Pose3(Rot3(), np.zeros(3))
wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0]))
wTi_list = [wTi0, wTi1]
points_3d = np.zeros((0, 3))
radius = 10.0
nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(wTi_list, points_3d, radius)
assert nearby_points_3d is None, "At least one 3d point must be provided" | 33,571 |
def load_network_interface_instance_to_subnet_relations(neo4j_session: neo4j.Session, update_tag: int) -> None:
"""
Creates (:EC2Instance)-[:PART_OF_SUBNET]->(:EC2Subnet) if
(:EC2Instance)--(:NetworkInterface)--(:EC2Subnet).
"""
ingest_network_interface_instance_relations = """
MATCH (i:EC2Instance)-[:NETWORK_INTERFACE]-(interface:NetworkInterface)-[:PART_OF_SUBNET]-(s:EC2Subnet)
MERGE (i)-[r:PART_OF_SUBNET]->(s)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = $update_tag
"""
logger.debug("-> Instance to subnet")
neo4j_session.run(
ingest_network_interface_instance_relations, update_tag=update_tag,
) | 33,572 |
def get_rixs(header, light_ROI=[0, np.inf, 0, np.inf],
curvature=np.array([0., 0., 0.]), bins=1, ADU_per_photon=None,
detector='rixscam_centroids',
min_threshold=-np.inf, max_threshold=np.inf,
background=None):
"""
Create rixs spectra according to procces_dict
and return data as generator with similar behavior to
header.data()
Parameters
----------
header : databroker header object
A dictionary-like object summarizing metadata for a run.
light_ROI : [minx, maxx, miny, maxy]
Define the region of the sensor to use.
Events are chosen with minx <= x < maxx and miny <= y < maxy
curvature : array
The polynominal coeffcients describing the image curvature.
These are in decreasing order e.g.
.. code-block:: python
curvature[0]*x**2 + curvature[1]*x**1 + curvature[2]*x**0
The order of polynominal used is set by len(curvature) - 1
bins : float or array like
Binning in the y direction.
If `bins` is a sequence, it defines the bin edges,
including the rightmost edge.
If `bins' is a single number this defines the step
in the bins sequence, which is created using the min/max
of the input data. Half a bin may be discarded in order
to avoid errors at the edge. (Default 1.)
ADU_per_photon : float
Conversion factor between the input intensity values in table
and photons. (Default is 1)
detector : string
name of the detector passed on header.data
At SIX
'rixscam_centroids' is the centroided data, which is the default
'rixscam_image' is the image data
min_threshold : float
fliter events below this threshold
defaults to -infinity to include all events
max_threshold : float
fliter events above this threshold
defaults to +infinity to include all events
background : array
2D array for background subtraction
Only used for image data.
Yields
-------
spectra : generator
RIXS spectra are returned as a generator
"""
if ADU_per_photon is None:
pgm_en = header.table(stream_name='baseline',
fields=['pgm_en']).mean().values.mean()
if np.isnan(pgm_en):
pgm_en = header.table(fields=['pgm_en']).mean().values.mean()
ADU_per_photon = pgm_en * 1.12
if detector == 'rixscam_centroids':
try:
iter(bins)
except TypeError:
if np.isinf(light_ROI[3]):
total_table = pd.concat(t for event in header.data(detector)
for t in event)
light_ROI[3] = total_table['y_eta'].max()
bins = step_to_bins(light_ROI[2], light_ROI[3], bins)
for event in header.data(detector):
yield [centroids_to_spectrum(table, light_ROI=light_ROI,
curvature=curvature, bins=bins,
min_threshold=min_threshold,
max_threshold=max_threshold,
ADU_per_photon=ADU_per_photon)
for table in event]
elif detector == 'rixscam_image':
for ImageStack in header.data(detector):
yield image_to_spectrum(ImageStack, light_ROI=light_ROI,
curvature=curvature, bins=bins,
ADU_per_photon=ADU_per_photon,
min_threshold=min_threshold,
max_threshold=max_threshold,
background=background)
else:
raise Warning("detector {} not reconized, but we will try to"
"return data in any case.".format(detector))
for ImageStack in header.data(detector):
yield image_to_spectrum(ImageStack, light_ROI=light_ROI,
curvature=curvature, bins=bins,
min_threshold=min_threshold,
max_threshold=max_threshold,
background=background) | 33,573 |
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper | 33,574 |
def create_affected_entities_description(security_data: SecurityData, limit: int = 5) -> str:
"""Create a description of the entities which are affected by a security problem.
:param security_data: the security details for which to create the description
:param limit: the maximum number of entities to list in the description
:return: the description
"""
def _stringify(entity_list: Set[str], label: str, the_limit: int):
if len(entity_list) > the_limit:
return f"{len(entity_list)} {label} affected ([details|{security_data.url}])\n"
return f"Affected {label}: {', '.join(entity_list)}\n"
desc = _stringify(security_data.affected_entity_names, 'entities', limit)
desc += _stringify(security_data.related_hostnames, 'hostnames', limit)
return desc | 33,575 |
def amac_person_org_list_ext():
"""
中国证券投资基金业协会-信息公示-从业人员信息-基金从业人员资格注册外部公示信息
http://gs.amac.org.cn/amac-infodisc/res/pof/extperson/extPersonOrgList.html
:return:
:rtype: pandas.DataFrame
"""
data = get_data(url=amac_person_org_list_ext_url, payload=amac_person_org_list_ext_payload)
need_data = data["content"]
keys_list = [
"orgName",
"orgType",
"extWorkerTotalNum",
"extOperNum",
"extSalesmanNum",
"extInvestmentManagerNum",
"extFundManagerNum",
] # 定义要取的 value 的 keys
manager_data_out = pd.DataFrame(need_data)
manager_data_out = manager_data_out[keys_list]
manager_data_out.columns = [
"机构名称",
"机构性质",
"员工人数",
"基金从业资格",
"基金销售业务资格",
"基金经理",
"投资经理",
]
return manager_data_out | 33,576 |
def concatenate(arrays, axis=0, _no_check=False, align=False, **kwargs):
""" concatenate several DimArrays
Parameters
-----------
arrays : list of DimArrays
arrays to concatenate
axis : int or str
axis along which to concatenate (must exist)
align : bool, optional
align secondary axes before joining on the primary
axis `axis`. Default to False.
**kwargs : optional key-word arguments passed to align, if align is True
Returns
-------
concatenated DimArray
See Also
--------
stack: join arrays along a new dimension
align: align arrays
Examples
--------
1-D
>>> from dimarray import DimArray
>>> a = DimArray([1,2,3], axes=[['a','b','c']])
>>> b = DimArray([4,5,6], axes=[['d','e','f']])
>>> concatenate((a, b))
dimarray: 6 non-null elements (0 null)
0 / x0 (6): 'a' to 'f'
array([1, 2, 3, 4, 5, 6])
2-D
>>> a = DimArray([[1,2,3],[11,22,33]])
>>> b = DimArray([[4,5,6],[44,55,66]])
>>> concatenate((a, b), axis=0)
dimarray: 12 non-null elements (0 null)
0 / x0 (4): 0 to 1
1 / x1 (3): 0 to 2
array([[ 1, 2, 3],
[11, 22, 33],
[ 4, 5, 6],
[44, 55, 66]])
>>> concatenate((a, b), axis='x1')
dimarray: 12 non-null elements (0 null)
0 / x0 (2): 0 to 1
1 / x1 (6): 0 to 2
array([[ 1, 2, 3, 4, 5, 6],
[11, 22, 33, 44, 55, 66]])
"""
# input argument check
if not type(arrays) in (list, tuple):
raise ValueError("arrays must be list or tuple, got {}:{}".format(type(arrays), arrays))
arrays = [a for a in arrays]
from dimarray import DimArray, Dataset
for i, a in enumerate(arrays):
if isinstance(a, Dataset):
msg = "\n==>Note: you may use `concatenate_ds` for Datasets"
raise ValueError("concatenate: expected DimArray. Got {}".format(type(a))+msg)
elif np.isscalar(a):
arrays[i] = DimArray(a)
if not isinstance(a, DimArray):
raise ValueError("concatenate: expected DimArray. Got {}".format(type(a)))
if type(axis) is not int:
axis = arrays[0].dims.index(axis)
dim = arrays[0].dims[axis]
# align secondary axes prior to concatenate
# TODO: just encourage user to use align outside this function
# and remove argument passing
if align:
kwargs['strict'] = True
for ax in arrays[0].axes:
if ax.name != dim:
arrays = align_(arrays, axis=ax.name, **kwargs)
values = np.concatenate([a.values for a in arrays], axis=axis)
_get_subaxes = lambda x: [ax for i, ax in enumerate(arrays[0].axes) if i != axis]
subaxes = _get_subaxes(arrays[0])
# concatenate axis values
newaxis = _concatenate_axes([a.axes[axis] for a in arrays])
if not align and not _no_check:
# check that other axes match
for ax in subaxes:
for a in arrays:
if not np.all(a.axes[ax.name].values == ax.values):
raise ValueError("contatenate: secondary axes do not match. Align first? (`align=True`)")
# print arrays[0]
# for i,a in enumerate(arrays[1:]):
# if not _get_subaxes(a) == subaxes:
# msg = "First array:\n{}\n".format(subaxes)
# msg += "{}th array:\n{}\n".format(i,_get_subaxes(a))
# raise ValueError("contatenate: secondary axes do not match. Align first? (`align=True`)")
# print a
# print '==> arrays look ok'
newaxes = subaxes[:axis] + [newaxis] + subaxes[axis:]
return arrays[0]._constructor(values, newaxes) | 33,577 |
def ele_types(eles):
"""
Returns a list of unique types in eles
"""
return list(set([e['type'] for e in eles] )) | 33,578 |
def colorful_subgraph(G, colors, k, s, subgraph_type, get_detail=True, verbose=False):
"""Detect if colorful path exists fom s to any node by dynamic programming.
Args:
G (nx.Graph): with n nodes and m edges
colors (list): list of integers represents node colors
k (int): number of colors
s (int): starting node
subgraph (str): 'path' or 'cycle'
Return:
List: nodes connect to s with at least one colorful path
"""
n = G.number_of_nodes()
col = 2**k
dp_mat = np.zeros((n, col))
dp_mat[s][power2(colors[s])] = 1
targets = dp_helper(dp_mat, 2, G, colors, k, set([s]), s, subgraph_type)
if not get_detail:
return targets
else:
empty_color = 0
total_count = 0
for target in targets:
total_count += backtrack(dp_mat, G, colors, target, s, [str(target)], set_bit(empty_color, colors[target]), verbose)
if verbose:
print('from node', s, 'find in total', total_count, 'colorful paths of length', k)
return total_count | 33,579 |
def find_all_combos(
conformer,
delta=float(120),
cistrans=True,
chiral_centers=True):
"""
A function to find all possible conformer combinations for a given conformer
Params:
- conformer (`Conformer`) an AutoTST `Conformer` object of interest
- delta (int or float): a number between 0 and 180 or how many conformers to generate per dihedral
- cistrans (bool): indication of if one wants to consider cistrans bonds
- chiral_centers (bool): indication of if one wants to consider chiral centers bonds
Returns:
- all_combos (list): a list corresponding to the number of unique conformers created.
"""
conformer.get_geometries()
_, torsions = find_terminal_torsions(conformer)
torsion_angles = np.arange(0, 360, delta)
torsion_combos = list(itertools.product(
torsion_angles, repeat=len(torsions)))
if cistrans:
cistranss = []
cistrans_options = ["E", "Z"]
try:
ring_info = conformer._pseudo_geometry.GetRingInfo()
except AttributeError:
ring_info = conformer.rdkit_molecule.GetRingInfo()
for cistrans in conformer.cistrans:
i,j,k,l = cistrans.atom_indices
if (ring_info.NumAtomRings(i) != 0) or (ring_info.NumAtomRings(k) != 0):
continue
cistranss.append(cistrans)
cistrans_combos = list(itertools.product(
cistrans_options, repeat=len(cistranss)))
else:
cistrans_combos = [()]
if chiral_centers:
chiral_centerss = []
chiral_options = ["R", "S"]
try:
ring_info = conformer._pseudo_geometry.GetRingInfo()
except AttributeError:
ring_info = conformer.rdkit_molecule.GetRingInfo()
for center in conformer.chiral_centers:
if ring_info.NumAtomRings(center.atom_index) != 0:
continue
chiral_centerss.append(center)
chiral_combos = list(itertools.product(
chiral_options, repeat=len(chiral_centerss)))
else:
chiral_combos = [()]
all_combos = list(
itertools.product(
torsion_combos,
cistrans_combos,
chiral_combos))
return all_combos | 33,580 |
def hmc2(f, x, options, gradf, *args, **kargs):
"""
SAMPLES = HMC2(F, X, OPTIONS, GRADF)
Description
SAMPLES = HMC2(F, X, OPTIONS, GRADF) uses a hybrid Monte Carlo
algorithm to sample from the distribution P ~ EXP(-F), where F is the
first argument to HMC2. The Markov chain starts at the point X, and
the function GRADF is the gradient of the `energy' function F.
HMC2(F, X, OPTIONS, GRADF, P1, P2, ...) allows additional arguments to
be passed to F() and GRADF().
[SAMPLES, ENERGIES, DIAGN] = HMC2(F, X, OPTIONS, GRADF) also returns a
log of the energy values (i.e. negative log probabilities) for the
samples in ENERGIES and DIAGN, a structure containing diagnostic
information (position, momentum and acceptance threshold) for each
step of the chain in DIAGN.POS, DIAGN.MOM and DIAGN.ACC respectively.
All candidate states (including rejected ones) are stored in
DIAGN.POS. The DIAGN structure contains fields:
pos
the position vectors of the dynamic process
mom
the momentum vectors of the dynamic process
acc
the acceptance thresholds
rej
the number of rejections
stp
the step size vectors
"""
global HMC_MOM
# Reference to structures is much slower, so...
opt_nsamples = options.nsamples
opt_nomit = options.nomit
opt_window = options.window
opt_steps = options.steps
opt_display = options.display
opt_persistence = options.persistence
if opt_persistence:
alpha = options.decay
salpha = np.sqrt(1-alpha**2);
else:
alpha = salpha = 0.
# TODO: not implemented yet. Haven't figured out how this is supposed to work...
if options.stepsf is not None:
# Stepsizes, varargin gives the opt.stepsf arguments (net, x ,y)
# where x is input data and y is a target data.
# epsilon = feval(opt.stepsf,varargin{:}).*opt.stepadj;
raise NotImplementedError
else:
epsilon = options.stepadj
nparams = len(x)
# Check the gradient evaluation.
if options.checkgrad:
# Check gradients
error = check_grad(f, gradf, x, *args)
print "Energy gradient error: %f"%error
# Initialize matrix of returned samples
samples = np.zeros((opt_nsamples, nparams))
# Check all keyword arguments
known_keyargs = ['return_energies','return_diagnostics']
for key in kargs.keys():
assert key in known_keyargs, 'unknown option %s'%key
# Return energies?
return_energies = kargs.get('return_energies',False)
if return_energies:
energies = np.zeros(opt_nsamples)
else:
energies = np.zeros(0)
# Return diagnostics?
return_diagnostics = kargs.get('return_diagnostics',False)
if return_diagnostics:
diagn_pos = np.zeros(opt_nsamples, nparams)
diagn_mom = np.zeros(opt_nsamples, nparams)
diagn_acc = np.zeros(opt_nsamples)
else:
diagn_pos = np.zeros((0,0))
diagn_mom = np.zeros((0,0))
diagn_acc = np.zeros(0)
if not opt_persistence or HMC_MOM is None or nparams != len(HMC_MOM):
# Initialise momenta at random
p = np.random.randn(nparams)
else:
# Initialise momenta from stored state
p = HMC_MOM
# Main loop.
all_args = [f,
x,
gradf,
args,
p,
samples,
energies,
diagn_pos,
diagn_mom,
diagn_acc,
opt_nsamples,
opt_nomit,
opt_window,
opt_steps,
opt_display,
opt_persistence,
return_energies,
return_diagnostics,
alpha,
salpha,
epsilon]
try:
os.environ['C_INCLUDE_PATH']=np.get_include()
import pyximport; pyximport.install()
from hmc2c import hmc_main_loop as c_hmc_main_loop
print "Using compiled code"
c_hmc_main_loop(*all_args)
except:
print "Using pure python code"
hmc_main_loop(*all_args)
if opt_display:
print '\nFraction of samples rejected: %g\n'%(nreject/float(opt_nsamples))
# Store diagnostics
if return_diagnostics:
diagn = dict()
diagn['pos'] = diagn_pos # positions matrix
diagn['mom'] = diagn_mom # momentum matrix
diagn['acc'] = diagn_acc # acceptance treshold matrix
diagn['rej'] = nreject/float(opt_nsamples) # rejection rate
diagn['stps'] = epsilon # stepsize vector
# Store final momentum value in global so that it can be retrieved later
if opt_persistence:
HMC_MOM = p
else:
HMC_MOM = None
if return_energies or return_diagnostics:
out = (samples,)
else:
return samples
if return_energies: out += (energies,)
if return_diagnostics: out += (diagn,)
return out | 33,581 |
def test_version(decider_mock, command_line_args, capsys):
"""Ensure version is displayed."""
# Run function
parser = seddy_main.build_parser()
with pytest.raises(SystemExit) as e:
parser.parse_args(command_line_args)
assert e.value.code == 0
# Check output
res_out = capsys.readouterr().out
assert res_out.strip() == pkg_resources.get_distribution("seddy").version | 33,582 |
def generate_abbreviations(
labels: tp.Iterable[str],
max_abbreviation_len: int = 3,
dictionary: tp.Union[tp.Tuple[str], str] = "cdfghjklmnpqrstvxz"):
"""
Returns unique abbreviations for the given labels. Generates the abbreviations with
:func:`beatsearch.utils.generate_unique_abbreviation`.
:param labels: labels to abbreviate
:param max_abbreviation_len: maximum length of the abbreviations
:param dictionary: characteristic characters (defaults to consonants)
:return: abbreviations of the given labels
"""
abbreviations = list()
for label in labels:
abbreviations.append(generate_unique_abbreviation(
label,
max_len=max_abbreviation_len,
taken_abbreviations=abbreviations,
dictionary=dictionary
))
return abbreviations | 33,583 |
def test_address__DeletePostalAddressForm__1(person_data, browser):
"""`DeletePostalAddressForm` allows to delete a home page address."""
browser.login('editor')
browser.open(browser.PERSON_DELETE_ENTRY_URL)
browser.getControl('RST-Software').click()
browser.getControl('Delete entry').click()
assert browser.POSTAL_ADDRESS_DELETE_URL == browser.url
browser.getControl('Yes').click()
assert (u'"RST-Software, Forsterstra\xdfe 302a, 98344, Erfurt, Germany" '
u'deleted.' == browser.message)
assert browser.PERSON_EDIT_URL
assert (['c/o Mama, Demoweg 23, 88888, Testhausen, Austria'] ==
browser.getControl('main postal address').displayOptions) | 33,584 |
def test_view_change_done_delayed(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client):
"""
A node is slow so is behind other nodes, after view change, it catches up
but it also gets view change message as delayed, a node should start
participating only when caught up and ViewChangeCone quorum received.
"""
nprs = [r.node for r in getNonPrimaryReplicas(txnPoolNodeSet, 0)]
slow_node = nprs[-1]
other_nodes = [n for n in txnPoolNodeSet if n != slow_node]
delay_3pc = 10
delay_vcd = 25
delay_3pc_messages([slow_node], 0, delay_3pc)
slow_node.nodeIbStasher.delay(nv_delay(delay_vcd))
def chk(node):
assert node.isParticipating
assert None not in {r.isPrimary for r in node.replicas.values()}
sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, 5 * 4, 4)
ensure_view_change(looper, nodes=txnPoolNodeSet)
# After view change, the slow node successfully completes catchup
waitNodeDataEquality(looper, slow_node, *other_nodes)
# Other nodes complete view change, select primary and participate
for node in other_nodes:
looper.run(eventually(chk, node, retryWait=1))
# Since slow node catches up successfully, it catch last primary
assert slow_node.isParticipating
assert {r.isPrimary for r in slow_node.replicas.values()} != {None}
assert all(slow_node.viewNo == node.viewNo for node in other_nodes)
# Send requests to make sure pool is functional
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5)
# Repair network
slow_node.reset_delays_and_process_delayeds()
# `slow_node` selects primary and participate
looper.run(eventually(chk, slow_node, retryWait=1))
# Processes requests received during lack of primary
waitNodeDataEquality(looper, slow_node, *other_nodes)
# Send more requests and compare data of all nodes
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5)
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) | 33,585 |
def best_hand(hand):
"""Из "руки" в 7 карт возвращает лучшую "руку" в 5 карт """
i = iter(combinations(hand, 5))
best_rank = 0, 0, 0
best_combination = None
for combination in i:
current_rank = hand_rank(combination)
if compare(current_rank, best_rank):
best_rank = current_rank
best_combination = combination
return best_combination | 33,586 |
def calculate_label_counts(examples):
"""Assumes that the examples each have ONE label, and not a distribution over labels"""
label_counts = {}
for example in examples:
label = example.label
label_counts[label] = label_counts.get(label, 0) + 1
return label_counts | 33,587 |
def add_custom_tax(
df,
segment_income,
w,
base_income,
incidence,
name,
total=None,
ratio=None,
verbose=True,
):
"""Add a custom tax based on incidence analysis driven by percentiles.
:param df: DataFrame.
:param segment_income: Income measure used to segment tax units into
quantiles.
:param w: Weight used to segment into quantiles (either s006 or XTOT_m).
:param base_income: Income measure by which incidence is multiplied to
estimate liability.
:param incidence: pandas Series indexed on the floor of an income
percentile, with values for the tax rate.
:param name: Name of the column to add.
:param total: Total amount the tax should generate. If not provided,
liabilities are calculated only based on the incidence schedule.
(Default value = None)
:param ratio: Ratio to adjust the tax by, compared to the original tax.
This acts as a multiplier for the incidence argument.
(Default value = None)
:param verbose: Whether to print the tax adjustment factor if needed.
Defaults to True.
:returns: Nothing. Adds the column name to df representing the tax
liability. df is also sorted by segment_income.
"""
if ratio is not None:
incidence = incidence * ratio
assert total is None, "ratio and total cannot both be provided."
df.sort_values(segment_income, inplace=True)
income_percentile = 100 * df[w].cumsum() / df[w].sum()
tu_incidence = incidence.iloc[
pd.cut(
income_percentile,
# Add a right endpoint. Should be 100 but sometimes a decimal
# gets added.
bins=incidence.index.tolist() + [101],
labels=False,
)
].values
df[name] = np.maximum(0, tu_incidence * df[base_income])
if total is not None:
initial_total = mdf.weighted_sum(df, name, "s006")
if verbose:
print(
"Multiplying tax by "
+ str(round(total / initial_total, 2))
+ "."
)
df[name] *= total / initial_total | 33,588 |
def printer(arg1):
"""
Even though 'times' is destroyed when printer() has been called,
the 'inner' function created remembers what times is. Same goes
for the argument arg1.
"""
times = 3
def inner():
for i in range(times): print(arg1)
return inner | 33,589 |
def from_get_proxy():
"""
From "http://www.getproxy.jp"
:return:
"""
base = 'http://www.getproxy.jp/proxyapi?' \
'ApiKey=659eb61dd7a5fc509bef01f2e8b15669dfdb0f54' \
'&area={:s}&sort=requesttime&orderby=asc&page={:d}'
urls = [base.format('CN', i) for i in range(1, 25)]
urls += [base.format('US', i) for i in range(1, 25)]
urls += [base.format('CN', i) for i in range(25, 50)]
urls += [base.format('US', i) for i in range(25, 50)]
proxies = []
i = 0
retry = 0
length = len(urls)
while i < length:
res = _safe_http(urls[i])
try:
soup = BeautifulSoup(res, 'lxml')
except:
i += 1
continue
data = soup.find_all('ip')
if len(data) == 0:
retry += 1
if retry == 4:
break
else:
sleep(62)
else:
retry = 0
proxies += [pro.text for pro in data]
i += 1
return proxies | 33,590 |
def KORL(a, kappa=None):
""" log rounds k-ary OR """
k = len(a)
if k == 1:
return a[0]
else:
t1 = KORL(a[:k//2], kappa)
t2 = KORL(a[k//2:], kappa)
return t1 + t2 - t1.bit_and(t2) | 33,591 |
def verify(token):
"""Verifies a JWS token, returning the parsed token if the token has a
valid signature by the key provided by the key of the OpenID
Connect server stated in the ISS claim of the token. If the
signature does not match that key, None is returned.
"""
unverified_token_data = json.loads(jose.jws.get_unverified_claims(token))
jwks_uri = requests.get("%s/.well-known/openid-configuration" % unverified_token_data["iss"]).json()["jwks_uri"]
keys = requests.get(jwks_uri).json()["keys"]
for key in keys:
try:
verified_token_data = json.loads(
jose.jws.verify(token, key, [key["alg"]]))
except:
pass
else:
return verified_token_data
return None | 33,592 |
def get_listing_panel(tool, ghidra):
""" Get the code listing UI element, so we can get up-to-date location/highlight/selection """
cvs = tool.getService(ghidra.app.services.CodeViewerService)
return cvs.getListingPanel() | 33,593 |
def template_data(environment, template_name="report_html.tpl", **kwds):
"""Build an arbitrary templated page.
"""
template = env.get_template(template_name)
return template.render(**environment) | 33,594 |
def resnet152_ibn_a(**kwargs):
"""
Constructs a ResNet-152-IBN-a model.
"""
model = ResNet_IBN(block=Bottleneck_IBN,
layers=[3, 8, 36, 3],
ibn_cfg=('a', 'a', 'a', None),
**kwargs)
return model | 33,595 |
def test_update(session):
"""Assert user updation."""
user = create_user(session)
user.update({'phone': '123456897'})
found = user.find_by_oauth_id(user.oauth_id)
assert found.phone == '123456897' | 33,596 |
def calibratePose(pts3,pts2,cam,params_init):
"""
Calibrates the camera to match the view calibrated by updating R,t so that pts3 projects
as close as possible to pts2
:param pts3: Coordinates of N points stored in a array of shape (3,N)
:param pts2: Coordinates of N points stored in a array of shape (2,N)
:param cam_init: Initial estimate of camera
:param params_init:
:return: Refined estimate of camera with updated R,t parameters
"""
func = lambda rt: residuals(pts3,pts2,cam,rt)
least = scipy.optimize.leastsq(func,params_init)[0]
cam.update_extrinsics(least)
return cam | 33,597 |
def test_wait_for_task_interval_custom(index_with_documents, small_movies):
"""Tests call to wait for an update with custom interval."""
index = index_with_documents()
response = index.add_documents(small_movies)
assert 'uid' in response
start_time = datetime.now()
wait_update = index.wait_for_task(
response['uid'],
interval_in_ms=1000,
timeout_in_ms=6000
)
time_delta = datetime.now() - start_time
assert isinstance(wait_update, dict)
assert 'status' in wait_update
assert wait_update['status'] != 'enqueued'
assert wait_update['status'] != 'processing'
assert time_delta.seconds >= 1 | 33,598 |
def image_to_base64(file_image):
"""
ESSA FUNÇÃO TEM COMO OBJETIVO, CONVERTER FORMATO DE INPUT (PNG) -> BASE64
O ARQUIVO OBTIDO (PNG) É SALVO NA MÁQUINA QUE ESTÁ EXECUTANDO O MODELO.
# Arguments
file_image - Required : Caminho do arquivo
no formato imagem (String)
# Returns
built_base64 - Required : Valor no formato Base64 (BASE64)
"""
# INICIANDO A VARIÁVEL QUE RECEBERÁ O VALOR DA BASE64
built_base64 = None
try:
# DECODOFICANDO A BASE64, ARMAZENANDO-O NO OBJETO ABERTO
# ESCREVENDO NA MÁQUINA
built_base64 = base64.b64encode(open(file_image, 'rb').read())
except Exception as ex:
execute_log.error("ERRO NA FUNÇÃO {} - {}".format(stack()[0][3], ex))
return built_base64 | 33,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.