content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def vec_moderates(vec, minv, maxv, inclusive=1):
"""return a integer array where values inside bounds are 1, else 0
if inclusive, values will also be set if they equal a bound
return error code, new list
success: 0, list
error : 1, None"""
if not vec: return 1, None
if minv > maxv:
print '** moderates: minv > maxv (', minv, maxv, ')'
return 1, None
if inclusive:
elist = [1*(vec[t]>=minv and vec[t]<=maxv) for t in range(len(vec))]
else:
elist = [1*(vec[t]> minv and vec[t]< maxv) for t in range(len(vec))]
return 0, elist | 65696ba3d4cb8c43e231a4aae1c8cef83351fb07 | 32,300 |
def SideInfo(version_index, channel_mode, raw_data=None):
"""SideInfo(version_index, channel_mode, raw_data) -> object
Return an object representing MPEG layer 3 side info, based on the given
parameters. The class of the object varies based on the MPEG version and
channel mode (only applicable fields are present, and field sizes vary)."""
lsf = (version_index != 3)
mono = (channel_mode == 3)
return _si_classes[lsf][mono](raw_data) | af554b0b6ebc4c33846881b27c02fb648d82b5ca | 32,301 |
def datetimeConvertor(date, month, year, time, timezone):
"""
Converts raw date/time data into an object of datetime class.
"""
Date = date + "/" + monthnumberSwap(month) + "/" + year
Time = time + " " + timezone
return dt.datetime.strptime(Date + " " + Time, "%d/%m/%Y %H:%M:%S %z") | a83e873ee9b9aa1737fffc61c80aa9204305d3fb | 32,302 |
import pathlib
def dump_gone(aspect_store: dict, indent=False) -> bool:
"""Not too dry ..."""
return _dump(aspect_store, pathlib.Path('gone.json'), indent) | 14df4567d0ffc80f9764afa50c725bc1d178e031 | 32,303 |
def loss_fn(params, model, data):
"""
Description:
This is MSE loss function, again pay close attention to
function signature as this is the function which is going to be differentiated, so
params must be in its inputs. we do not need to vectorize this function as it is written
with batching considerations.
params -- pytree of trainable parameters
model -- model to be trained
data -- a tuple of training data --> (x_train, y_train)
"""
x, y = data
return jnp.mean((model(params, x) - y) ** 2) | cddd29becee4ce047b086130c7ce8cea114cb914 | 32,304 |
import copy
def lufact(A):
"""
lufact(A)
Compute the LU factorization of square matrix A, returning the factors.
"""
n = A.shape[0]
L = eye(n) # puts ones on diagonal
U = copy(A)
# Gaussian elimination
for j in range(n-1):
for i in range(j+1,n):
L[i,j] = U[i,j] / U[j,j] # row multiplier
U[i,j:] = U[i,j:] - L[i,j]*U[j,j:]
return L,triu(U) | e04c20ede47019789e00dc375b84efa931fe2e1f | 32,305 |
def get_city(msg):
""" 提取消息中的地名
"""
# 对消息进行分词和词性标注
words = posseg.lcut(msg)
# 遍历 posseg.lcut 返回的列表
for word in words:
# 每个元素是一个 pair 对象,包含 word 和 flag 两个属性,分别表示词和词性
if word.flag == 'ns':
# ns 词性表示地名
return word.word
return None | 017f910090291fdc77cc22ce4bc3fc3699c2981b | 32,306 |
from IPython.display import SVG
import os
def render_model(cobra_model, background_template=None, custom_css=None,
figure_id=None, hide_unused=None, hide_unused_cofactors=None,
inactive_alpha=1., figsize=None, label=None, fontsize=None,
default_flux_width=2.5, flux_dict=None, metabolite_dict=None,
svg_scale=100, flowLayout=False):
""" Render a cobra.Model object in the current window
Parameters:
background_template:
filename for an SVG to render behind the flux figure. Useful for
compartments or layout guides.
custom_css:
Additional CSS to embed in the figure. Use HTML inspector to show
labels and classes applied to reactions and metabolites.
figure_id:
Each figure in the page requires a unique ID, which can be passed or
generated automatically.
hide_unused:
whether or not to show metabolites and reactions with zero flux.
hide_unused_cofactors:
similar to hide_unused, but only hide cofactor nodes for reactions with
0 flux.
inactive_alpha:
Alpha value with which to color reactions and nodes without any carried
flux. Defaults to 1.
figsize:
size, in pixels, of the generated SVG window. Defaults to 1024x768.
fontsize:
text size, in pt. Defaults to 12
default_flux_width:
If reaction fluxes are missing, the default thickness to use for
connecting arrows.
flux_dict:
A dictionary-like object containing the desired fluxes for each
reaction in the model
metabolite_dict:
A dictionary-like object containing the desired carried fluxes for each
metabolite in the model
"""
# Increment figure counter
# Get figure name and JSON string for the cobra model
if not figure_id:
render_model._fignum += 1
figure_id = 'd3flux{:0>3d}'.format(render_model._fignum)
if not figsize:
figsize = (1028, 768)
modeljson = create_model_json(cobra_model, flux_dict, metabolite_dict)
if not hide_unused:
hide_unused = "false"
else:
hide_unused = "true"
if not hide_unused_cofactors:
hide_unused_cofactors = "false"
else:
hide_unused_cofactors = "true"
# Handle custom CSS
if not custom_css:
custom_css = ''
if not fontsize:
fontsize = 12
# Handle background template
if not background_template:
background_svg = ''
no_background = "true"
else:
background_svg = SVG(background_template).data
no_background = "false"
# Initialize the jinja templates
env = Environment(loader=FileSystemLoader(
os.path.join(os.path.dirname(d3flux.__file__), 'templates')))
template_css = env.get_template('network_style.css')
template_html = env.get_template('output_template.html')
template_js = env.get_template('d3flux.js')
# Render the jinja templates with the given variables
css = template_css.render(inactive_alpha=inactive_alpha,
fontsize=fontsize, cf_fontsize=0.8 * fontsize)
js = template_js.render(figure_id=figure_id, modeljson=modeljson,
no_background=no_background,
hide_unused=hide_unused,
hide_unused_cofactors=hide_unused_cofactors,
figwidth=figsize[0], figheight=figsize[1],
css=compress(css + custom_css),
default_flux_width=default_flux_width,
svg_scale=svg_scale, flowLayout=flowLayout)
html = template_html.render(figure_id=figure_id,
background_svg=background_svg,
javascript_source=js)
# compile and return HTML
return HTML(html) | 3283bec0e30ce520ee3048f4526cc825d6aebb51 | 32,307 |
import logging
def handle_config_defaults(config, num_params_fn):
"""Resolve dependencies within `config`.
In particular, set hidden_size (if -1) according to num_params and make the
embedding sizes default to the hidden size. Also, handle budgeting: if
hidden_size is not provided (it is -1), but num_params is, then compute the
largest possible hidden_size with which the total number of trainable
parameters does not exceed num_params.
Args:
config: The base config. Must have num_params set.
num_params_fn: A function of one argument a config object. The config passed
to it is constructed by setting the hidden_size and performing the usual
defaulting.
Returns:
The mutated config.
"""
# TODO(melisgl): Move this to the tuner code.
# For ease of specification, tuning ranges are weird. Let's fix them up here.
if config.sparsity_ratio >= 1.0:
config.sparsity_ratio = -1.0
if config.input_embedding_ratio >= 1.0:
config.input_embedding_ratio = 1.0
if config.output_embedding_ratio >= 1.0:
config.output_embedding_ratio = 1.0
if config.output_embedding_ratio < 0.0:
config.output_embedding_ratio = config.input_embedding_ratio
if config.learning_rate_decay > 1.0:
config.learning_rate_decay = 1.0
if config.feature_mask_rank < 0:
config.feature_mask_rank = 0
if config.inter_layer_dropout < 0.0:
config.inter_layer_dropout = config.input_dropout
if config.downprojected_output_dropout < 0.0:
config.downprojected_output_dropout = config.output_dropout
# Handle deprecated feature_mask flag.
if config.feature_mask:
config.feature_mask_rounds = 2
config.feature_mask_rank = 0
# Handle the num_param budget.
if config.hidden_size in [-1, [-1]]:
assert config.num_params > -1, (
'Neither hidden_size nor num_params is specified.')
config.hidden_size = [_budget_hidden_size(config, num_params_fn)]
config = _handle_hidden_size_defaults(config)
# Perform some sanity checks.
if config.output_embedding_size > config.hidden_size[-1]:
logging.warn('output_embedding_size %s is greater than '
'the hidden size %s', config.output_embedding_size,
config.hidden_size[-1])
if config.share_input_and_output_embeddings:
assert config.input_embedding_size == config.output_embedding_size
return config | f72388bd0e425e65b0254527f2762ea88738ed8a | 32,308 |
def get_cycle_time(string):
"""
Extract the cycle time text from the given string. None if not found.
"""
return _search_in_pattern(string, CYCLE_TIME_PATTERN, 1) | 6d41a9f4b04f90b4a5a8d7892398bc080d41e519 | 32,309 |
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
hass.data[DOMAIN].pop(config_entry.entry_id, None)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
return unload_ok | 6e4bf924e6e04d03cc30de3a6d4b2f713dd05b32 | 32,310 |
import os
def showFileOrFolder(pathUrl):
"""
Send the binary content of a file on the X drive
"""
#showRequest(pathUrl, request)
pathLocal = abfBrowse.getLocalPath("X/"+pathUrl)
print(" serving", os.path.basename(pathLocal))
if os.path.isdir(pathLocal):
return f"directory index of [{pathLocal}]"
elif os.path.isfile(pathLocal):
return flask.send_file(pathLocal)
else:
return f"ERROR: path does not exist [{pathLocal}]" | fe0c88531e4f5ad571901ea16bde11fb1ba6b587 | 32,311 |
from typing import Union
def get_scale_factor(unit: Union[str, float]) -> float:
"""
Get how many pts are in a unit.
:param unit: A unit accepted by fpdf.FPDF
:return: The number of points in that unit
:raises FPDFException
"""
if isinstance(unit, (int, float)):
return float(unit)
k = FPDF_UNITS.get(unit, None)
if k is None:
raise ValueError(f"Unit does not exist: {unit}")
return k | c95429436b96f883e5fcfe3b1680a9f35c5f27e3 | 32,312 |
def r2_score(y_true,y_pred):
"""Calculate the coefficient of determination."""
assert len(y_true)==len(y_pred)
rss = sum_square_residuals(y_true,y_pred)
tss = total_sum_squares(y_true)
return 1 - rss/tss | 7d2eba54db3d5682ec0ed22b5c09a65cf1e34e27 | 32,313 |
def give_name(fname):
"""
return name.csv
"""
if fname[:len( AUX_FILE) ] != AUX_FILE: # hide file
# renaming with correct extension
if fname[ -4: ]!= '.csv':
if fname.find('.') > -1:
fname = fname[: fname.find('.')]+'.csv'
else:
fname += '.csv'
else:
fname += '.csv'
return fname | 55f6241b7a57d7611fe2db1731d909bb5b4186ac | 32,314 |
def categorize(document):
"""Categorize a document.
Categorizes a document into the following categories
[business, entertainment, politics, sport, tech].
Takes a string object as input and returns a string object.
"""
doc = clean(document)
vector = doc2vec_model.infer_vector(doc.split(' '))
result = svm.predict(vector.reshape(1, -1))[0]
return news_categories[result] | a085e08e7e8b7ff31e68a536e973b5131540e481 | 32,315 |
def delete_intent(token, aiid, intent_name):
"""Delete an Intent"""
return fetch_api(
'/intent/{aiid}?intent_name={intent_name}',
token=token,
aiid=aiid,
intent_name=intent_name,
method='delete'
) | 198ed90b176c2f08c3c681dfbb5deea52cfbcfa4 | 32,316 |
def report_all(df_select):
"""
report all values to a defined template
"""
if len(df_select) == 0:
report_all = 'No similar events were reported on in online media'
else:
report_all = """
Similar events were reported on in online media.
Below we provide a tabulated set of impacts from these media sources. We also
provide the links to these media sources so that you can read more about these
past events. You can use these reports to have a better understanding what may
happen with the forecast event by TMA.
==============================================================================
"""
for _, row in df_select.iterrows():
report_all += fill_report(row)
return report_all | d0e5f06416a467d7578f4748725301638b33d1bb | 32,317 |
import urllib
def get_filename_from_headers(response):
"""Extract filename from content-disposition headers if available."""
content_disposition = response.headers.get("content-disposition", None)
if not content_disposition:
return None
entries = content_disposition.split(";")
name_entry = next((e.strip() for e in entries if e.strip().lower().startswith("filename*=")), None)
if name_entry:
name = name_entry.split("=", 1)[1].strip()
encoding, _, name = name.split("'")
return urllib.parse.unquote(name, encoding, errors="strict")
name_entry = next((e.strip() for e in entries if e.strip().lower().startswith("filename=")), None)
if not name_entry:
return None
filename = name_entry.split("=", 1)[1].strip()
if filename.startswith('"'):
filename = filename[1:-1]
return filename | d4c54c3d19d72f2813e2d1d4afde567d0db0e1af | 32,318 |
def names(as_object=False, p5_connection=None):
"""
Syntax: ArchiveIndex names
Description: Returns the list of names of archive indexes.
Return Values:
-On Success: a list of names. If no archive indexes are configured,
the command returns the string "<empty>"
"""
method_name = "names"
result = exec_nsdchat([module_name, method_name], p5_connection)
if not as_object:
return result
else:
return resourcelist(result, ArchiveIndex, p5_connection) | 1b1d00d70730b79ccab25e5ca101f752ad49cc1c | 32,319 |
def regionvit_base_w14_224(pretrained=False, progress=True, **kwargs):
"""
Constructs the RegionViT-Base-w14-224 model.
.. note::
RegionViT-Base-w14-224 model from `"RegionViT: Regional-to-Local Attention for Vision Transformers" <https://arxiv.org/pdf/2106.02689.pdf>`_.
The required input size of the model is 224x224.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> regionvit_base_w14_224 = flowvision.models.regionvit_base_w14_224(pretrained=False, progress=True)
"""
return _regionvit("regionvit_base_w14", pretrained, progress, **kwargs) | 63983c4fe9cb5ed74e43e1c40b501f50fbaead56 | 32,320 |
import collections
def create_executor_list(suites):
"""
Looks up what other resmoke suites run the tests specified in the suites
parameter. Returns a dict keyed by suite name / executor, value is tests
to run under that executor.
"""
memberships = collections.defaultdict(list)
test_membership = resmokelib.parser.create_test_membership_map()
for suite in suites:
for group in suite.test_groups:
for test in group.tests:
for executor in test_membership[test]:
memberships[executor].append(test)
return memberships | c9150b14ba086d9284acb2abdcd4592e7803a432 | 32,321 |
def calculate_great_circle(args):
"""one step of the great circle calculation"""
lon1,lat1,lon2,lat2 = args
radius = 3956.0
x = np.pi/180.0
a,b = (90.0-lat1)*(x),(90.0-lat2)*(x)
theta = (lon2-lon1)*(x)
c = np.arccos((np.cos(a)*np.cos(b)) +
(np.sin(a)*np.sin(b)*np.cos(theta)))
return(radius*c) | f0832b984382b2cd2879c40ab1249d68aacddd69 | 32,322 |
def divide(x,y):
"""div x from y"""
return x/y | 74adba33dfd3db2102f80a757024696308928e38 | 32,323 |
def execute(compile_state: CompileState, string: StringResource) -> NullResource:
""" Executes the string at runtime and returns Null"""
compile_state.ir.append(CommandNode(string.static_value))
return NullResource() | 4785d9a527982eb723d120f47af2915b6b830795 | 32,324 |
import torch
def fakeLabels(lth):
"""
lth (int): no of labels required
"""
label=torch.tensor([])
for i in range(lth):
arr=np.zeros(c_dims)
arr[0]=1
np.random.shuffle(arr)
label=torch.cat((label,torch.tensor(arr).float().unsqueeze(0)),dim=0)
return label | a2ffb4a7ff3b71bc789181130bc6042ff184ac9c | 32,325 |
def load_canadian_senators(**kwargs):
"""
A history of Canadian senators in office.::
Size: (933,10)
Example:
Name Abbott, John Joseph Caldwell
Political Affiliation at Appointment Liberal-Conservative
Province / Territory Quebec
Appointed on the advice of Macdonald, John Alexander
Term (yyyy.mm.dd) 1887.05.12 - 1893.10.30 (Death)
start_date 1887-05-12 00:00:00
end_date 1893-10-30 00:00:00
reason Death
diff_days 2363
observed True
"""
return _load_dataset("canadian_senators.csv", **kwargs) | 42ae6a455d3bed11275d211646ee6acd2da505b6 | 32,326 |
def _get_md5(filename):
"""Return the MD5 checksum of the passed file"""
data = open(filename, "rb").read()
r = md5(data)
return r.hexdigest() | c86943841a1f8f8e296d82818c668c197f824373 | 32,327 |
def implements(numpy_func_string, func_type):
"""Register an __array_function__/__array_ufunc__ implementation for Quantity
objects.
"""
def decorator(func):
if func_type == "function":
HANDLED_FUNCTIONS[numpy_func_string] = func
elif func_type == "ufunc":
HANDLED_UFUNCS[numpy_func_string] = func
else:
raise ValueError("Invalid func_type {}".format(func_type))
return func
return decorator | ec0d843798c4c047d98cd9a76bcd862c3d5339e8 | 32,328 |
def r2(data1, data2):
"""Return the r-squared difference between data1 and data2.
Parameters
----------
data1 : 1D array
data2 : 1D array
Returns
-------
output: scalar (float)
difference in the input data
"""
ss_res = 0.0
ss_tot = 0.0
mean = sum(data1) / len(data1)
for i in range(len(data1)):
ss_res += (data1[i] - data2[i]) ** 2
ss_tot += (data1[i] - mean) ** 2
return 1 - ss_res / ss_tot | d42c06a5ad4448e74fcb1f61fa1eed1478f58048 | 32,329 |
from typing import IO
def fio_color_hist_fio(image_fio):
"""Generate a fileIO with the color histogram of an image fileIO
:param image_fio: input image in fileIO format
:type image_fio: fileIO
:return: color histogram of the input image in fileIO format
:rtype: fileIO
"""
image_fio.seek(0)
bkp = fio_to_b(image_fio)
img_pil = Image.open(image_fio).convert('RGB')
r, g, b = img_pil.split()
bins = list(range(256))
plt.plot(bins, r.histogram(), 'r')
plt.plot(bins, g.histogram(), 'g')
plt.plot(bins, b.histogram(), 'b')
plt.xlabel('Pixel value')
plt.ylabel('Frequency')
plt.grid(True)
out_img_fio = IO.BytesIO()
plt.savefig(out_img_fio)
plt.close()
out_img_fio.seek(0)
image_fio = b_to_fio(bkp)
return out_img_fio | 13c10cce5dc9bfa17d19a4b2f486fb7b34bcb176 | 32,330 |
def store_list(request, user_id):
"""
Verify user has the access to enlist store.
"""
logger.debug('calling store.views.store_list()')
user_name = request.user.username
menu = MenuService.new_user_menu(request.user)
context = {
'menu':menu,
'page_title': 'Profile',
'user_name': user_name,
}
return render_to_response('profile.html', context) | a6e17acb2ddba850f84d12ae1db9ceca0f83958f | 32,331 |
def lattice2d_fixed_env():
"""Lattice2DEnv with a fixed sequence"""
seq = 'HHHH'
return Lattice2DEnv(seq) | 664b6b411a47018c460b09909ccb29c033bae2e5 | 32,332 |
import time
import logging
def expected_full(
clr,
view_df=None,
smooth_cis=False,
aggregate_smoothed=False,
smooth_sigma=0.1,
aggregate_trans=False,
expected_column_name="expected",
ignore_diags=2,
clr_weight_name='weight',
chunksize=10_000_000,
nproc=4,
):
"""
Generate a DataFrame with expected for *all* 2D regions
tiling entire heatmap in clr.
Such 2D regions are defined as all pairwise combinations
of the regions in view_df. Average distance decay is calculated
for every cis-region (e.g. inter- and intra-arms), and
a "simple" average over each block is caculated for trans-
regions.
When sub-chromosomal view is provided, trans averages
can be aggregated back to the level of full chromosomes.
Parameters
----------
clr : cooler.Cooler
Cooler object
view_df : viewframe
expected is calculated for all pairwise combinations of regions
in view_df. Distance dependent expected is calculated for cis
regions, and block-level average is calculated for trans regions.
smooth_cis: bool
Apply smoothing to cis-expected. Will be stored in an additional column
aggregate_smoothed: bool
When smoothing cis expected, average over all regions, ignored without smoothing.
smooth_sigma: float
Control smoothing with the standard deviation of the smoothing Gaussian kernel.
Ignored without smoothing.
aggregate_trans : bool
Aggregate trans-expected at the inter-chromosomal level.
expected_column_name : str
Name of the column where to store combined expected
ignore_diags : int, optional
Number of intial diagonals to exclude for calculation of distance dependent
expected.
clr_weight_name : str or None
Name of balancing weight column from the cooler to use.
Use raw unbalanced data, when None.
chunksize : int, optional
Size of pixel table chunks to process
nproc : int, optional
How many processes to use for calculation
Returns
-------
expected_df: pd.DataFrame
cis and trans expected combined together
"""
# contacs vs distance - i.e. intra/cis expected
time_start = time.perf_counter()
cvd = expected_cis(
clr,
view_df=view_df,
intra_only=False, # get cvd for all 2D regions
smooth=smooth_cis,
smooth_sigma=smooth_sigma,
aggregate_smoothed=aggregate_smoothed,
clr_weight_name=clr_weight_name,
ignore_diags=ignore_diags,
chunksize=chunksize,
nproc=nproc,
)
time_elapsed = time.perf_counter() - time_start
logging.info(f"Done calculating cis expected in {time_elapsed:.3f} sec ...")
# contacts per block - i.e. inter/trans expected
time_start = time.perf_counter()
cpb = expected_trans(
clr,
view_df=view_df,
clr_weight_name=clr_weight_name,
chunksize=chunksize,
nproc=nproc,
)
# pretend that they also have a "dist"
# to make them mergeable with cvd
cpb["dist"] = 0
time_elapsed = time.perf_counter() - time_start
logging.info(f"Done calculating trans expected in {time_elapsed:.3f} sec ...")
# annotate expected_df with the region index and chromosomes
view_label = view_df \
.reset_index() \
.rename(columns={"index":"r"}) \
.set_index("name")
# which expected column to use, based on requested "modifications":
cis_expected_name = "balanced.avg" if clr_weight_name else "count.avg"
if smooth_cis:
cis_expected_name = f"{cis_expected_name}.smoothed"
if aggregate_smoothed:
cis_expected_name = f"{cis_expected_name}.agg"
# copy to the prescribed column for the final output:
cvd[expected_column_name] = cvd[cis_expected_name].copy()
# aggregate trans if requested and deide which trans-expected column to use:
trans_expected_name = "balanced.avg" if clr_weight_name else "count.avg"
if aggregate_trans:
trans_expected_name = f"{trans_expected_name}.agg"
additive_cols = ["n_valid","count.sum"]
if clr_weight_name:
additive_cols.append("balanced.sum")
# groupby chrom1, chrom2 and aggregate additive fields (sums and n_valid):
_cpb_agg = cpb.groupby(
[
view_label["chrom"].loc[cpb["region1"]].to_numpy(), # chrom1
view_label["chrom"].loc[cpb["region2"]].to_numpy(), # chrom2
]
)[additive_cols].transform("sum")
# recalculate aggregated averages:
cpb["count.avg.agg"] = _cpb_agg["count.sum"]/_cpb_agg["n_valid"]
if clr_weight_name:
cpb["balanced.avg.agg"] = _cpb_agg["balanced.sum"]/_cpb_agg["n_valid"]
# copy to the prescribed column for the final output:
cpb[expected_column_name] = cpb[trans_expected_name].copy()
# concatenate cvd and cpb (cis and trans):
expected_df = pd.concat([cvd, cpb], ignore_index=True)
# add r1 r2 labels to the final dataframe for obs/exp merging
expected_df["r1"] = view_label["r"].loc[expected_df["region1"]].to_numpy()
expected_df["r2"] = view_label["r"].loc[expected_df["region2"]].to_numpy()
# and return joined cis/trans expected in the same format
logging.info(f"Returning combined expected DataFrame.")
# consider purging unneccessary columns here
return expected_df | 5f387c71f059cd942ff1ff4b6cdb6a59e91ef85b | 32,333 |
def nmgy2abmag(flux, flux_ivar=None):
"""
Conversion from nanomaggies to AB mag as used in the DECALS survey
flux_ivar= Inverse variance oF DECAM_FLUX (1/nanomaggies^2)
"""
lenf = len(flux)
if lenf > 1:
ii = np.where(flux>0)
mag = 99.99 + np.zeros_like(flux)
mag[ii] = 22.5 - 2.5*np.log10(flux[ii])
else:
mag = 22.5 - 2.5*np.log10(flux)
if flux_ivar is None:
return mag
elif lenf>1:
err = np.zeros_like(mag)
df = np.sqrt(1./flux_ivar)
err[ii] = mag_err(df[ii]/flux[ii], verbose=False)
else:
df = np.sqrt(1./flux_ivar)
err = mag_err(df/flux, verbose=False)
return mag,err | 5f65a06049955b4ddfe235d6fc12ae5726089b0f | 32,334 |
def rnn_decoder(dec_input, init_state, cell, infer, dnn_hidden_units, num_feat):
"""Decoder for RNN cell.
Given list of LSTM hidden units and list of LSTM dropout output keep
probabilities.
Args:
dec_input: List of tf.float64 current batch size by number of features
matrix tensors input to the decoder.
init_state: Initial state of the decoder cell. Final state from the
encoder cell.
cell:
infer:
dnn_hidden_units:
num_feat:
Returns:
outputs: List of decoder outputs of length number of timesteps of tf.float64
current batch size by number of features matrix tensors.
state: Final cell state of the decoder.
"""
# Create the decoder variable scope
with tf.variable_scope("decoder"):
# Load in our initial state from our encoder
# Tuple of final encoder c_state and h_state of final encoder layer
state = init_state
# Create an empty list to store our hidden state output for every timestep
outputs = []
# Begin with no previous output
previous_output = None
# Loop over all of our dec_input which will be seq_len long
for index, decoder_input in enumerate(dec_input):
# If there has been a previous output, we will determine the next input
if previous_output is not None:
# Create the input layer to our DNN
# shape = (cur_batch_size, lstm_hidden_units[-1])
network = previous_output
# Create our dnn variable scope
with tf.variable_scope(name_or_scope="dnn", reuse=tf.AUTO_REUSE):
# Add hidden layers with the given number of units/neurons per layer
# shape = (cur_batch_size, dnn_hidden_units[i])
for units in dnn_hidden_units:
network = tf.layers.dense(
inputs=network,
units=units,
activation=tf.nn.relu)
# Connect final hidden layer to linear layer to get the logits
# shape = (cur_batch_size, num_feat)
logits = tf.layers.dense(
inputs=network,
units=num_feat,
activation=None)
# If we are in inference then we will overwrite our next decoder_input
# with the logits we just calculated. Otherwise, we leave the decoder
# input input as it was from the enumerated list. We have to calculate
# the logits even when not using them so that the correct DNN subgraph
# will be generated here and after the encoder-decoder for both
# training and inference
if infer:
# shape = (cur_batch_size, num_feat)
decoder_input = logits
# If this isn"t our first time through the loop, just reuse(share) the
# same variables for each iteration within the current variable scope
if index > 0:
tf.get_variable_scope().reuse_variables()
# Run the decoder input through the decoder stack picking up from the
# previous state
# output_shape = (cur_batch_size, lstm_hidden_units[-1])
# state_shape = # tuple of final decoder c_state and h_state
output, state = cell(decoder_input, state)
# Append the current decoder hidden state output to the outputs list
# List seq_len long of shape = (cur_batch_size, lstm_hidden_units[-1])
outputs.append(output)
# Set the previous output to the output just calculated
# shape = (cur_batch_size, lstm_hidden_units[-1])
previous_output = output
return outputs, state | 215691ac8b3191da46d01a17fd37e2be08174640 | 32,335 |
import torch
def l1_loss(pre, gt):
""" L1 loss
"""
return torch.nn.functional.l1_loss(pre, gt) | c552224b3a48f9cde201db9d0b2ee08cd6335861 | 32,336 |
def run_tnscope(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Call variants with Sentieon's TNscope somatic caller.
"""
if out_file is None:
out_file = "%s-variants.vcf.gz" % utils.splitext_plus(align_bams[0])[0]
if not utils.file_exists(out_file):
variant_regions = bedutils.merge_overlaps(dd.get_variant_regions(items[0]), items[0])
interval = _get_interval(variant_regions, region, out_file, items)
with file_transaction(items[0], out_file) as tx_out_file:
paired = vcfutils.get_paired_bams(align_bams, items)
assert paired and paired.normal_bam, "Require normal BAM for Sentieon TNscope"
dbsnp = "--dbsnp %s" % (assoc_files.get("dbsnp")) if "dbsnp" in assoc_files else ""
license = license_export(items[0])
cmd = ("{license}sentieon driver -t 1 -r {ref_file} "
"-i {paired.tumor_bam} -i {paired.normal_bam} {interval} "
"--algo TNscope "
"--tumor_sample {paired.tumor_name} --normal_sample {paired.normal_name} "
"{dbsnp} {tx_out_file}")
do.run(cmd.format(**locals()), "Sentieon TNhaplotyper")
return out_file | a7e82dc94a9166bde47ad43dab2c778b2f7945d6 | 32,337 |
def get_product(product_id):
"""
Read a single Product
This endpoint will return a product based on it's id
"""
app.logger.info("Request for product with id: %s", product_id)
product = Product.find(product_id)
if not product:
raise NotFound("product with id '{}' was not found.".format(product_id))
return make_response(jsonify(product.serialize()), status.HTTP_200_OK) | e9ee42be5f586aa0bbe08dfa5edefbd3b0bbc5d7 | 32,338 |
import re
import string
def aips_bintable_fortran_fields_to_dtype_conversion(aips_type):
"""Given AIPS fortran format of binary table (BT) fields, returns
corresponding numpy dtype format and shape. Examples:
4J => array of 4 32bit integers,
E(4,32) => two dimensional array with 4 columns and 32 rows.
"""
intv = np.vectorize(int)
aips_char = None
dtype_char = None
repeat = None
_shape = None
format_dict = {'L': 'bool', 'I': '>i2', 'J': '>i4', 'A': 'S', 'E': '>f4',
'D': '>f8'}
for key in format_dict.keys():
if key in aips_type:
aips_char = key
if not aips_char:
raise Exception("aips data format reading problem " + str(aips_type))
try:
dtype_char = format_dict[aips_char]
except KeyError:
raise Exception("no dtype counterpart for aips data format" +
str(aips_char))
try:
repeat = int(re.search(r"^(\d+)" + aips_char,
aips_type).groups()[0])
if aips_char is 'A':
dtype_char = str(repeat) + dtype_char
repeat = 1
except AttributeError:
repeat = None
if repeat is None:
_shape = tuple(intv(string.split(re.search(r"^" + aips_char +
"\((.+)\)$",
aips_type).groups()[0],
sep=',')))
else:
_shape = repeat
return dtype_char, _shape | 772bd75ff2af92cede5e5dac555662c9d97c544a | 32,339 |
def account_list():
"""获取账户列表"""
rps = {}
rps["status"] = True
account_list = query_account_list(db)
if account_list:
rps["data"] = account_list
else:
rps["status"] = False
rps["data"] = "账户列表为空"
return jsonify(rps) | 3ab704e96cbf2c6548bf39f51a7f8c6f77352b6c | 32,340 |
def sample_points_in_range(min_range, max_range, origin, directions, n_points):
"""Sample uniformly depth planes in a depth range set to [min_range,
max_range]
Arguments
---------
min_range: int, The minimum depth range
max_range: int, The maximum depth range
origin: tensor(shape=(4, 1), float32), The origin of the rays
directions: tensor(shape=(4, N), float32), The direction vectors defining
the rays
n_points: int, The number of points to be sampled
"""
# How many rays do we have?
N = K.shape(directions)[1]
directions /= K.sqrt(K.sum(directions**2, axis=0))
# Sample points uniformly on the ray in the bbox
points = K.map_fn(
lambda i: origin + directions[:, i:i+1] * K.tf.linspace(min_range, max_range, n_points),
K.tf.range(N),
dtype="float32"
)
return K.permute_dimensions(
K.reshape(points, (N, 4, n_points)),
(1, 0, 2)
) | 6cc33a77e58a573315caf51b907cd881029e7ea1 | 32,341 |
from typing import Counter
def normalize(vectorOrCounter):
"""
normalize a vector or counter by dividing each value by the sum of all values
"""
normalizedCounter = Counter()
if type(vectorOrCounter) == type(normalizedCounter):
counter = vectorOrCounter
total = float(counter.totalCount())
if total == 0:
return counter
for key in counter.keys():
value = counter[key]
normalizedCounter[key] = value / total
return normalizedCounter
else:
vector = vectorOrCounter
s = float(sum(vector))
if s == 0:
return vector
return [el / s for el in vector] | 8d4cb0f8be4e7c6eeaba6b49d5a84b024f2c91b9 | 32,342 |
def IsStringInt(string_to_check):
"""Checks whether or not the given string can be converted to an int."""
try:
int(string_to_check)
return True
except ValueError:
return False | 75d83ce78fca205457d4e4325bca80306f248e08 | 32,343 |
import os
import codecs
def read(*parts):
"""
Build an absolute path from *parts* and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, *parts), 'rb', 'utf-8') as f:
return f.read() | 76346f4c016e9a053c25f0478a46f94813da2812 | 32,344 |
import torch
import math
def build_ewc_posterior(data_handlers, mnet, device, config, shared, logger,
writer, num_trained, task_id=None):
"""Build a normal posterior after having trained using EWC.
The posterior is constructed as described in function :func:`test`.
Args:
(....): See docstring of function :func:`probailistic.train_vi.test`.
num_trained (int): The number of output heads that already have been
trained.
task_id (int, optional): If training from scratch, only a specific head
has been trained, that has to be specified via this argument.
Note:
If training from scratch, it is assumed that the correct
``mnet`` (corresponding to ``task_id``) has already been loaded
to memory. This function will not load any checkpoints!
"""
n = num_trained
# Build posterior from Fisher approximations.
is_regression = 'regression' in shared.experiment_type
is_multihead = None
if is_regression:
is_multihead = config.multi_head
else:
is_multihead = config.cl_scenario == 1 or \
config.cl_scenario == 3 and config.split_head_cl3
if is_multihead:
post_means = [None] * len(mnet.internal_params)
post_stds = [None] * len(mnet.internal_params)
out_inds = [pmutils.out_units_of_task(config, data_handlers[i], i,
None) for i in range(n)]
out_masks = [mnet.get_output_weight_mask(out_inds=out_inds[i], \
device=device) for i in range(n)]
for ii, mask in enumerate(out_masks[0]):
pind = mnet.param_shapes_meta[ii]['index']
buff_w_name, buff_f_name = ewc._ewc_buffer_names(None, pind, True)
if mask is None: # Shared parameters.
post_means[pind] = getattr(mnet, buff_w_name)
# The hessian that is approximated in EWC is corresponds to the
# inverse variance.
post_stds[pind] = getattr(mnet, buff_f_name).pow(-.5)
else:
# Initialize head weights to prior.
curr_m = torch.zeros_like(getattr(mnet, buff_w_name)).to(device)
curr_s = torch.ones_like(getattr(mnet, buff_w_name)).\
to(device) * math.sqrt(config.prior_variance)
# Update head weights for trained output heads.
for jj, t_mask in enumerate(out_masks):
# Note, if we train from scratch, then also all previous
# output heads are not trained, thus we let those weights
# follow the prior.
if not config.train_from_scratch or jj == task_id:
m = t_mask[ii]
curr_m[m] = getattr(mnet, buff_w_name)[m]
curr_s[m] = getattr(mnet, buff_f_name)[m].pow(-.5)
post_means[pind] = curr_m
post_stds[pind] = curr_s
# Quick and dirty solution. Note, that a Pytorch `Normal` object with
# zero std will just return the mean.
if hasattr(config, 'det_multi_head') and config.det_multi_head:
post_stds = [torch.zeros_like(t) for t in post_stds]
return post_means, post_stds
return None | dd04d235a36516ea600eec154f5a8952ee6ea889 | 32,345 |
def get_roc_curve(y_gold_standard,y_predicted):
"""
Computes the Receiver Operating Characteristic.
Keyword arguments:
y_gold_standard -- Expected labels.
y_predicted -- Predicted labels
"""
return roc_curve(y_gold_standard, y_predicted) | b522ee6566004ec97781585be0ed8946e8f2889e | 32,346 |
def get_image_unixtime2(ibs, gid_list):
""" alias for get_image_unixtime_asfloat """
return ibs.get_image_unixtime_asfloat(gid_list) | 2c5fb29359d7a1128fab693d8321d48c8dda782b | 32,347 |
def create_sql_query(mogrify, data_set_id, user_query):
"""
Creates a sql query and a funtion which transforms the output into a list
of dictionaries with correct field names.
>>> from tests.support.test_helpers import mock_mogrify
>>> query, fn = create_sql_query(mock_mogrify, 'some-collection', Query.create())
>>> query
"SELECT record FROM mongo WHERE collection='some-collection'"
>>> fn([({"foo":"bar"},)])
[{'foo': 'bar'}]
>>> query, fn = create_sql_query(mock_mogrify, 'some-collection', Query.create(group_by=['foo']))
>>> query
"SELECT count(*), record->'foo' FROM mongo WHERE collection='some-collection' AND record->'foo' IS NOT NULL GROUP BY record->'foo'"
>>> fn([[123, 'some-foo-value'], [456, 'other-foo-value']])
[{'_count': 123, 'foo': 'some-foo-value'}, {'_count': 456, 'foo': 'other-foo-value'}]
"""
if user_query.is_grouped:
return _create_grouped_sql_query(mogrify, data_set_id, user_query)
else:
return _create_basic_sql_query(mogrify, data_set_id, user_query) | ac56dd8b89da7554111f4e285eb9511fbdef5ced | 32,348 |
def patch_hass():
"""
Patch the Hass API and returns a tuple of:
- The patched functions (as Dict)
- A callback to un-patch all functions
"""
class MockInfo:
"""Holds information about a function that will be mocked"""
def __init__(self, object_to_patch, function_name, autospec=False):
self.object_to_patch = object_to_patch
self.function_name = function_name
# Autospec will include `self` in the mock signature.
# Useful if you want a sideeffect that modifies the actual object instance.
self.autospec = autospec
actionable_functions_to_patch = [
# Meta
MockInfo(Hass, '__init__', autospec=True), # Patch the __init__ method to skip Hass initialization
# Logging
MockInfo(Hass, 'log'),
MockInfo(Hass, 'error'),
# Scheduler callback registrations functions
MockInfo(Hass, 'run_in'),
MockInfo(Hass, 'run_once'),
MockInfo(Hass, 'run_at'),
MockInfo(Hass, 'run_daily'),
MockInfo(Hass, 'run_hourly'),
MockInfo(Hass, 'run_minutely'),
MockInfo(Hass, 'run_every'),
MockInfo(Hass, 'cancel_timer'),
# Sunrise and sunset functions
MockInfo(Hass, 'run_at_sunrise'),
MockInfo(Hass, 'run_at_sunset'),
# Listener callback registrations functions
MockInfo(Hass, 'listen_event'),
MockInfo(Hass, 'listen_state'),
# Sunrise and sunset functions
MockInfo(Hass, 'run_at_sunrise'),
MockInfo(Hass, 'run_at_sunset'),
# Listener callback registrations functions
# State functions / attr
MockInfo(Hass, 'set_state'),
MockInfo(Hass, 'get_state'),
MockInfo(Hass, 'time'),
MockInfo(Hass, 'args'), # Not a function, attribute. But same patching logic
# Interactions functions
MockInfo(Hass, 'call_service'),
MockInfo(Hass, 'turn_on'),
MockInfo(Hass, 'turn_off'),
# Custom callback functions
MockInfo(Hass, 'register_constraint'),
MockInfo(Hass, 'now_is_between'),
MockInfo(Hass, 'notify'),
# Miscellaneous Helper Functions
MockInfo(Hass, 'entity_exists')
]
patches = []
hass_functions = {}
for mock_info in actionable_functions_to_patch:
patch_function = mock.patch.object(mock_info.object_to_patch, mock_info.function_name, create=True,
autospec=mock_info.autospec)
patches.append(patch_function)
patched_function = patch_function.start()
patched_function.return_value = None
hass_functions[mock_info.function_name] = patched_function
def unpatch_callback():
for patch in patches:
patch.stop()
_ensure_compatibility_with_previous_versions(hass_functions)
_mock_logging(hass_functions)
_mock_hass_init(hass_functions)
return hass_functions, unpatch_callback | 400bb38ca7f00da3b1a28bc1ab5c2408be2931c9 | 32,349 |
def compute_coherence_values(dictionary, corpus, texts, limit, start=2, step=3):
"""
Compute c_v coherence for various number of topics
Parameters:
----------
dictionary : Gensim dictionary
corpus : Gensim corpus
texts : List of input texts
limit : Max num of topics
Returns:
-------
model_list : List of LDA topic models
coherence_values : Coherence values corresponding to the LDA model with respective number of topics
"""
coherence_values = []
model_list = []
for num_topics in range(start, limit, step):
model = gensim.models.ldamodel.LdaModel(doc_term_matrix, num_topics = num_topics, random_state = 2, id2word = dictionary, iterations = 10)
model_list.append(model)
coherence_model = CoherenceModel(model = model ,texts = texts, dictionary = dictionary, coherence = 'c_v')
coherence_values.append(coherence_model.get_coherence())
return (model_list, coherence_values) | 009f637b7ff1d92514711ca5566f2c2c7ee307b0 | 32,350 |
def parallax_angle(sc, **kw) -> DEG:
"""Compute parallax angle from skycoord.
Parameters
----------
sc: SkyCoord
** warning: check if skycoord frame centered on Earth
Returns
-------
p: deg
parallax angle
"""
return np.arctan(1 * AU / sc.spherical.distance) | 0d84a98cae93828d1166008fe3d654668a4a178e | 32,351 |
def formatting_dates(dates_list):
""" Formatting of both the start and end dates of a historical period.
dates = [period_start_date, period_end_date]"""
new_dates = dates_list
# Change all "BCE" into "BC":
for index1 in range(len(new_dates)):
if " BCE" not in new_dates[index1]:
if " BC" in new_dates[index1]:
new_dates[index1] = str(new_dates[index1]) + "E"
counter = 0
# Change "present" into today's year:
if "present" in new_dates[counter +1]:
new_dates[counter +1] = str(date.today())[:-6]
if "th century" in new_dates[counter]:
pass
# Adding Missing " BCE" and " CE":
elif " CE" not in new_dates[counter] and " CE" not in new_dates[counter +1]:
# Both dates "Before Common Era" - Add "BCE" if start date higher number than end date:
if "BCE" not in new_dates[counter] and "BCE" in new_dates[counter +1]:
if int(new_dates[counter]) >= int(new_dates[counter+1][:-3]):
new_dates[counter] = str(new_dates[counter]) + " BCE"
else:
print("Not a valid date.") # PRINT ERROR
# Both dates "Before Common Era" - Add "BCE" if start date higher number than end date:
elif "BCE" in new_dates[counter] and "BCE" not in new_dates[counter +1]:
if int(new_dates[counter][:-3]) >= int(new_dates[counter+1]):
new_dates[counter +1] = str(new_dates[counter +1]) + " BCE"
else:
print("Not a valid date, except if end date is CE.") # PRINT ERROR
elif "BCE" not in new_dates[counter] and "BCE" not in new_dates[counter +1]:
# Both dates "Before Common Era" - Add "BCE" if start date higher number than end date:
if int(new_dates[counter]) >= int(new_dates[counter+1]):
new_dates[counter] = str(new_dates[counter]) + " BCE"
new_dates[counter+1] = str(new_dates[counter+1]) + " BCE"
# Both dates "Common Era"
else:
new_dates[counter] = str(new_dates[counter]) + " CE"
new_dates[counter+1] = str(new_dates[counter+1]) + " CE"
# One date "Before Common Era" and one date "Common Era"
elif " BCE" in new_dates[counter] and " CE" in new_dates[counter +1]:
pass
return new_dates | 174617ad0a97c895187f8c1abe7e6eb53f59da6f | 32,352 |
from datetime import datetime
def exp_days_f(cppm_class, current_user):
"""
User's password expiration and check force change password function.
1. Calculates days to expiry password for particular user
2. Checks change password force checkbox.
Returns:
exp_days: Number of days until a password expired
change_pwd_next_login: status for force change password checkbox (boolean)
"""
now = datetime.now()
# Get User ID, date of password changing and user attributes
uid, pwd_dt, attr, change_pwd_next_login = pg_sql('user', cppm_class,
current_user)
# print (cppm_class.db_password, current_user)
exp_days = int(cppm_connect_main.days_to_passw_exp) - (now - pwd_dt).days
# print(exp_days, change_pwd_next_login)
return exp_days, change_pwd_next_login | e0f014fe4813dd70fd733aa2ed2fa4f06105c2f0 | 32,353 |
def isinteger(x):
"""
determine if a string can be converted to an integer
"""
try:
a = int(x)
except ValueError:
return False
except TypeError:
return False
else:
return True | b39530a79c39f0937a42335587f30bed26c6ce0a | 32,354 |
import os
def get_economic_parameters():
"""
Extracts and returns the parameters for the economic model
This function returns a dictionary with all parameters needed to run the economic model.
Returns
-------
pars_dict : dictionary
contains the values of all economic parameters
Parameters
------------
IO: input-output matrix
x_0 : sectoral output during business-as-usual
c_0 : household demand during business-as-usual
f_0 : other final demand during business-as-usual
n : desired stock
c_s : consumer demand shock during lockdown
f_s : other final demand shock during lockdown
l_0 : sectoral employees during business-as-usual
l_s : sectoral employees during lockdown
C : matrix of crictical inputs
Example use
-----------
parameters = get_economic_parameters()
"""
abs_dir = os.path.dirname(__file__)
par_interim_path = os.path.join(abs_dir, "../../../data/interim/economical/")
# Initialize parameters dictionary
pars_dict = {}
# IO_NACE64.csv
df = pd.read_csv(os.path.join(par_interim_path,"IO_NACE64.csv"), sep=',',header=[0],index_col=[0])
pars_dict['IO'] = df.values
# Others.csv
df = pd.read_csv(os.path.join(par_interim_path,"others.csv"), sep=',',header=[1],index_col=[0])
pars_dict['x_0'] = np.expand_dims(np.array(df['Sectoral output (M€)'].values), axis=1)
pars_dict['c_0'] = np.expand_dims(np.array(df['Household demand (M€)'].values), axis=1)
pars_dict['f_0'] = np.expand_dims(np.array(df['Other demand (M€)'].values), axis=1)
pars_dict['n'] = np.expand_dims(np.array(df['Desired stock (days)'].values), axis=1)
pars_dict['c_s'] = np.expand_dims(np.array(df['Consumer demand shock (%)'].values), axis=1)
pars_dict['f_s'] = np.expand_dims(np.array(df['Other demand shock (%)'].values), axis=1)
pars_dict['l_0'] = np.expand_dims(np.array(df['Employees (x1000)'].values), axis=1)*1000
pars_dict['l_s'] = np.expand_dims(np.array(df['Employees (x1000)'].values), axis=1)*1000*np.expand_dims(np.array((df['Telework (%)'].values+df['Mix (%)'].values+df['Workplace (%)'].values)/100), axis = 1)
# IHS_critical_NACE64.csv
df = pd.read_csv(os.path.join(par_interim_path,"IHS_critical_NACE64.csv"), sep=',',header=[0],index_col=[0])
pars_dict['C'] = df.values
return pars_dict | a2b3630ba67f8430c27476ce97d31361342b656a | 32,355 |
import sys
import json
import math
def recommend(docs_path, dict_path, use_fos_annot=False, pp_dict_path=None,
np_dict_path=None, lda_preselect=False,
combine_train_contexts=True):
""" Recommend
"""
test = []
train_mids = []
train_texts = []
train_foss = []
train_ppann = []
train_nps = []
foss = []
tmp_bag = []
adjacent_cit_map = {}
if pp_dict_path and False:
prind('loading predpatt dictionary')
pp_dictionary = corpora.Dictionary.load(pp_dict_path)
pp_num_unique_tokens = len(pp_dictionary.keys())
use_predpatt_model = True
if not combine_train_contexts:
prind(('usage of predpatt model is not implemented for not'
'combining train contexts.\nexiting.'))
sys.exit()
else:
use_predpatt_model = False
pp_dictionary = None
if np_dict_path:
prind('loading noun phrase dictionary')
np_dictionary = corpora.Dictionary.load(np_dict_path)
np_num_unique_tokens = len(np_dictionary.keys())
use_noun_phrase_model = True
else:
use_noun_phrase_model = False
np_dictionary = None
prind('checking file length')
num_lines = sum(1 for line in open(docs_path))
# # for MAG eval
# mag_id2year = {}
# with open('MAG_CS_en_year_map.csv') as f:
# for line in f:
# pid, year = line.strip().split(',')
# mag_id2year[pid] = int(year)
# # /for MAG eval
prind('train/test splitting')
with open(docs_path) as f:
for idx, line in enumerate(f):
if idx == 0:
tmp_bag_current_mid = line.split('\u241E')[0]
if idx%10000 == 0:
prind('{}/{} lines'.format(idx, num_lines))
cntxt_foss = []
cntxt_ppann = []
cntxt_nps = []
# handle varying CSV formats
vals = line.split('\u241E')
if use_noun_phrase_model:
cntxt_nps = vals[-1]
if '\u241D' in cntxt_nps: # includes NP<marker> variant
np_all, np_marker = cntxt_nps.split('\u241D')
cntxt_nps = np_marker # mby use both for final eval
cntxt_nps = [np for np in cntxt_nps.strip().split('\u241F')]
vals = vals[:-1]
if len(vals) == 4:
mid, adjacent, in_doc, text = vals
elif len(vals) == 5:
if use_predpatt_model:
mid, adjacent, in_doc, text, pp_annot_json = vals
else:
mid, adjacent, in_doc, text, fos_annot = vals
elif len(vals) == 6:
mid, adjacent, in_doc, text, fos_annot, pp_annot_json = vals
else:
prind('input file format can not be parsed\nexiting')
sys.exit()
if len(vals) in [5, 6] and use_fos_annot:
cntxt_foss = [f.strip() for f in fos_annot.split('\u241F')
if len(f.strip()) > 0]
foss.extend(cntxt_foss)
if use_predpatt_model:
if '\u241F' in pp_annot_json: # includes alternative version
ppann, ppann_alt = pp_annot_json.split('\u241F')
pp_annot_json = ppann
cntxt_ppann = json.loads(pp_annot_json)
# create adjacent map for later use in eval
if mid not in adjacent_cit_map:
adjacent_cit_map[mid] = []
if len(adjacent) > 0:
adj_cits = adjacent.split('\u241F')
for adj_cit in adj_cits:
if adj_cit not in adjacent_cit_map[mid]:
adjacent_cit_map[mid].append(adj_cit)
# fill texts
if mid != tmp_bag_current_mid or idx == num_lines-1:
# tmp_bag now contains all lines sharing ID tmp_bag_current_mid
num_contexts = len(tmp_bag)
sub_bags_dict = {}
for item in tmp_bag:
item_in_doc = item[0]
item_text = item[1]
item_foss = item[2]
item_ppann = item[3]
item_nps = item[4]
if item_in_doc not in sub_bags_dict:
sub_bags_dict[item_in_doc] = []
sub_bags_dict[item_in_doc].append(
[item_text, item_foss, item_ppann, item_nps]
)
if len(sub_bags_dict) < 2:
# can't split, reset bag, next
tmp_bag = []
tmp_bag_current_mid = mid
continue
order = sorted(sub_bags_dict,
key=lambda k: len(sub_bags_dict[k]),
reverse=True)
# ↑ keys for sub_bags_dict, ordered for largest bag to smallest
min_num_train = math.floor(num_contexts * 0.8)
train_tups = []
test_tups = []
for jdx, sub_bag_key in enumerate(order):
sb_tup = sub_bags_dict[sub_bag_key]
# if sub_bag_key[1:3] == '06': # time split ACL
# if mag_id2year[sub_bag_key] > 2017: # time split MAG
# if sub_bag_key[:2] == '17': # time split arXiv
if len(train_tups) > min_num_train or jdx == len(order)-1:
test_tups.extend(sb_tup)
else:
train_tups.extend(sb_tup)
test.extend(
[
[tmp_bag_current_mid, # mid
tup[0], # text
tup[1], # fos
sum_weighted_term_lists(tup[2], pp_dictionary), # pp
tup[3] # nps
]
for tup in test_tups
])
if combine_train_contexts:
# combine train contexts per cited doc
train_text_combined = ' '.join(tup[0] for tup in train_tups)
train_mids.append(tmp_bag_current_mid)
train_texts.append(train_text_combined.split())
train_foss.append(
[fos for tup in train_tups for fos in tup[1]]
)
train_ppann.append(
sum_weighted_term_lists(
sum([tup[2] for tup in train_tups], []),
pp_dictionary
)
)
train_nps.append(
[np for tup in train_tups for np in tup[3]]
)
else:
# don't combine train contexts per cited doc
for tup in train_tups:
train_mids.append(tmp_bag_current_mid)
train_texts.append(tup[0].split())
train_foss.append([fos for fos in tup[1]])
train_nps.append([np for np in tup[1]])
# reset bag
tmp_bag = []
tmp_bag_current_mid = mid
tmp_bag.append([in_doc, text, cntxt_foss, cntxt_ppann, cntxt_nps])
prind('loading dictionary')
dictionary = corpora.Dictionary.load(dict_path)
num_unique_tokens = len(dictionary.keys())
prind('building corpus')
corpus = [dictionary.doc2bow(text) for text in train_texts]
if use_fos_annot:
prind('preparing FoS model')
mlb = MultiLabelBinarizer()
mlb.fit([foss])
train_foss_matrix = mlb.transform(train_foss)
train_foss_set_sizes = np.sum(train_foss_matrix, 1)
prind('generating TFIDF model')
tfidf = models.TfidfModel(corpus)
prind('preparing similarities')
index = similarities.SparseMatrixSimilarity(
tfidf[corpus],
num_features=num_unique_tokens)
bm25 = BM25(corpus)
average_idf = sum(
map(lambda k: float(bm25.idf[k]),
bm25.idf.keys())
) / len(bm25.idf.keys())
if lda_preselect:
orig_index = index.index.copy()
prind('generating LDA/LSI model')
lda = LsiModel(tfidf[corpus], id2word=dictionary, num_topics=100)
prind('preparing similarities')
lda_index = similarities.SparseMatrixSimilarity(
lda[tfidf[corpus]],
num_features=num_unique_tokens)
if use_predpatt_model:
prind('preparing claim similarities')
pp_tfidf = models.TfidfModel(train_ppann)
pp_index = similarities.SparseMatrixSimilarity(
pp_tfidf[train_ppann],
num_features=pp_num_unique_tokens)
if use_noun_phrase_model:
prind('preparing noun phrase similarities')
np_corpus = [np_dictionary.doc2bow(nps) for nps in train_nps]
np_index = similarities.SparseMatrixSimilarity(
np_corpus,
num_features=np_num_unique_tokens)
# models: BoW, NP<marker>, Claim, Claim+BoW
eval_models = [
{'name':'bow'},
{'name':'np'},
{'name':'claim'},
{'name':'claim+bow'}
]
for mi in range(len(eval_models)):
eval_models[mi]['num_cur'] = 0
eval_models[mi]['num_top'] = 0
eval_models[mi]['num_top_5'] = 0
eval_models[mi]['num_top_10'] = 0
eval_models[mi]['ndcg_sums'] = [0]*AT_K
eval_models[mi]['map_sums'] = [0]*AT_K
eval_models[mi]['mrr_sums'] = [0]*AT_K
eval_models[mi]['recall_sums'] = [0]*AT_K
prind('test set size: {}\n- - - - - - - -'.format(len(test)))
for test_item_idx, tpl in enumerate(test):
if test_item_idx > 0 and test_item_idx%10000 == 0:
save_results(
docs_path, num_lines, len(test), eval_models, suffix='_tmp'
)
test_mid = tpl[0]
# if test_mid not in train_mids:
# # not testable
# continue
test_text = bow_preprocess_string(tpl[1])
if use_fos_annot:
test_foss_vec = mlb.transform([tpl[2]])
dot_prods = train_foss_matrix.dot(
test_foss_vec.transpose()
).transpose()[0]
with np.errstate(divide='ignore',invalid='ignore'):
fos_sims = np.nan_to_num(dot_prods/train_foss_set_sizes)
fos_sims_list = list(enumerate(fos_sims))
fos_sims_list.sort(key=lambda tup: tup[1], reverse=True)
fos_ranking = [s[0] for s in fos_sims_list]
fos_boost = np.where(
dot_prods >= dot_prods.max()-1
)[0].tolist()
top_dot_prod = dot_prods[-1]
if use_predpatt_model:
pp_sims = pp_index[pp_tfidf[tpl[3]]]
pp_sims_list = list(enumerate(pp_sims))
pp_sims_list.sort(key=lambda tup: tup[1], reverse=True)
pp_ranking = [s[0] for s in pp_sims_list]
if use_noun_phrase_model:
np_sims = np_index[np_dictionary.doc2bow(tpl[4])]
np_sims_list = list(enumerate(np_sims))
np_sims_list.sort(key=lambda tup: tup[1], reverse=True)
np_ranking = [s[0] for s in np_sims_list]
test_bow = dictionary.doc2bow(test_text)
if lda_preselect:
# pre select in LDA/LSI space
lda_sims = lda_index[lda[tfidf[test_bow]]]
lda_sims_list = list(enumerate(lda_sims))
lda_sims_list.sort(key=lambda tup: tup[1], reverse=True)
lda_ranking = [s[0] for s in lda_sims_list]
lda_picks = lda_ranking[:1000]
index.index = orig_index[lda_picks]
sims = index[tfidf[test_bow]]
sims_list = list(enumerate(sims))
sims_list.sort(key=lambda tup: tup[1], reverse=True)
bow_ranking = [s[0] for s in sims_list]
bm25_scores = list(enumerate(bm25.get_scores(test_bow, average_idf)))
bm25_scores.sort(key=lambda tup: tup[1], reverse=True)
bm25_ranking = [s[0] for s in bm25_scores]
if lda_preselect:
# translate back from listing in LDA/LSI pick subset to global listing
bow_ranking = [lda_picks[r] for r in bow_ranking]
if use_fos_annot:
boost_ranking = fos_boost_ranking(
bow_ranking, fos_boost, top_dot_prod)
if not combine_train_contexts:
seen = set()
seen_add = seen.add
final_ranking = [x for x in final_ranking
if not (train_mids[x] in seen or seen_add(train_mids[x]))]
if use_predpatt_model:
sims_comb = combine_simlists(sims, pp_sims, [2, 1])
comb_sims_list = list(enumerate(sims_comb))
comb_sims_list.sort(key=lambda tup: tup[1], reverse=True)
comb_ranking = [s[0] for s in comb_sims_list]
for mi in range(len(eval_models)):
if mi == 0:
final_ranking = bow_ranking
elif mi == 1:
final_ranking = np_ranking
elif mi == 2:
final_ranking = pp_ranking
elif mi == 3:
final_ranking = comb_ranking
rank = len(bow_ranking) # assign worst possible
for idx, doc_id in enumerate(final_ranking):
if train_mids[doc_id] == test_mid:
rank = idx+1
break
if idx >= 10:
break
dcgs = [0]*AT_K
idcgs = [0]*AT_K
precs = [0]*AT_K
num_rel_at = [0]*AT_K
num_rel = 1 + len(adjacent_cit_map[test_mid])
num_rel_at_k = 0
for i in range(AT_K):
relevant = False
placement = i+1
doc_id = final_ranking[i]
result_mid = train_mids[doc_id]
if result_mid == test_mid:
relevance = 1
num_rel_at_k += 1
relevant = True
elif result_mid in adjacent_cit_map[test_mid]:
relevance = .5
num_rel_at_k += 1
relevant = True
else:
relevance = 0
num_rel_at[i] = num_rel_at_k
if relevant:
precs[i] = num_rel_at_k / placement
denom = math.log2(placement + 1)
dcg_numer = math.pow(2, relevance) - 1
for j in range(i, AT_K):
dcgs[j] += dcg_numer / denom
if placement == 1:
ideal_rel = 1
elif placement <= num_rel:
ideal_rel = .5
else:
ideal_rel = 0
idcg_numer = math.pow(2, ideal_rel) - 1
for j in range(i, AT_K):
# note this^ we go 0~9, 1~9, 2~9, ..., 9
idcgs[j] += idcg_numer / denom
for i in range(AT_K):
eval_models[mi]['ndcg_sums'][i] += dcgs[i] / idcgs[i]
eval_models[mi]['map_sums'][i] += sum(precs[:i+1])/max(num_rel_at[i], 1)
if rank <= i+1:
eval_models[mi]['mrr_sums'][i] += 1 / rank
eval_models[mi]['recall_sums'][i] += 1
if rank == 1:
eval_models[mi]['num_top'] += 1
if rank <= 5:
eval_models[mi]['num_top_5'] += 1
if rank <= 10:
eval_models[mi]['num_top_10'] += 1
eval_models[mi]['num_cur'] += 1
prind('- - - - - {}/{} - - - - -'.format(
eval_models[0]['num_cur'], len(test))
)
prind('#1: {}'.format(eval_models[0]['num_top']))
prind('in top 5: {}'.format(eval_models[0]['num_top_5']))
prind('in top 10: {}'.format(eval_models[0]['num_top_10']))
prind('ndcg@5: {}'.format(
eval_models[0]['ndcg_sums'][4]/eval_models[0]['num_cur'])
)
prind('map@5: {}'.format(
eval_models[0]['map_sums'][4]/eval_models[0]['num_cur'])
)
prind('mrr@5: {}'.format(
eval_models[0]['mrr_sums'][4]/eval_models[0]['num_cur'])
)
prind('recall@5: {}'.format(
eval_models[0]['recall_sums'][4]/eval_models[0]['num_cur'])
)
for mi in range(len(eval_models)):
eval_models[mi]['num_applicable'] = eval_models[mi]['num_cur']
eval_models[mi]['ndcg_results'] = [
sm/eval_models[mi]['num_cur'] for sm in eval_models[mi]['ndcg_sums']
]
eval_models[mi]['map_results'] = [
sm/eval_models[mi]['num_cur'] for sm in eval_models[mi]['map_sums']
]
eval_models[mi]['mrr_results'] = [
sm/eval_models[mi]['num_cur'] for sm in eval_models[mi]['mrr_sums']
]
eval_models[mi]['recall_results'] = [
sm/eval_models[mi]['num_cur'] for sm in eval_models[mi]['recall_sums']
]
return eval_models, num_lines, len(test) | 5f5ecefb77a639fa36879b930775b4597be32933 | 32,356 |
import hashlib
def _get_hash(x):
"""Generate a hash from a string, or dictionary."""
if isinstance(x, dict):
x = tuple(sorted(pair for pair in x.items()))
return hashlib.md5(bytes(repr(x), "utf-8")).hexdigest() | c47f96c1e7bfc5fd9e7952b471516fbf40470799 | 32,357 |
def wrap_arr(arr, wrapLow=-90.0, wrapHigh=90.0):
"""Wrap the values in an array (e.g., angles)."""
rng = wrapHigh - wrapLow
arr = ((arr-wrapLow) % rng) + wrapLow
return arr | e07e8916ec060aa327c9c112a2e5232b9155186b | 32,358 |
def task_fail_slack_alert(context):
"""
Callback task that can be used in DAG to alert of failure task completion
Args:
context (dict): Context variable passed in from Airflow
Returns:
None: Calls the SlackWebhookOperator execute method internally
"""
if ENV != "data":
return
if context["dag_run"].external_trigger is True:
return
if context["dag"].is_paused is True:
return
slack_webhook_token = BaseHook.get_connection(SLACK_CONN_ID).password
slack_msg = """
:red_circle: Task Failed.
*Task*: {task}
*Dag*: {dag}
*Execution Time*: {exec_date}
*Running For*: {run_time} secs
*Log Url*: {log_url}
""".format(
task=context["task_instance"].task_id,
dag=context["task_instance"].dag_id,
ti=context["task_instance"],
exec_date=context["execution_date"],
run_time=get_task_run_time(context["task_instance"]),
log_url=context["task_instance"].log_url,
)
failed_alert = SlackWebhookOperator(
task_id=context["task_instance"].task_id,
http_conn_id=SLACK_CONN_ID,
webhook_token=slack_webhook_token,
message=slack_msg,
username="airflow",
)
return failed_alert.execute(context=context) | 392d5f3b1df21d8dbe239e700b7ea0bd1d44c49f | 32,359 |
def largest_negative_number(seq_seq):
"""
Returns the largest NEGATIVE number in the given sequence of
sequences of numbers. Returns None if there are no negative numbers
in the sequence of sequences.
For example, if the given argument is:
[(30, -5, 8, -20),
(100, -2.6, 88, -40, -5),
(400, 500)
]
then this function returns -2.6.
As another example, if the given argument is:
[(200, 2, 20), (500, 400)]
then this function returns None.
Preconditions:
:type seq_seq: (list, tuple)
and the given argument is a sequence of sequences,
where each subsequence contains only numbers.
"""
# -------------------------------------------------------------------------
# DONE: 5. Implement and test this function.
# Note that you should write its TEST function first (above).
#
# CHALLENGE: Try to solve this problem with no additional sequences
# being constructed (so the SPACE allowed is limited to the
# give sequence of sequences plus any non-list variables you want).
# -------------------------------------------------------------------------
largest = 0
for k in range(len(seq_seq)):
for j in range(len(seq_seq[k])):
if seq_seq[k][j] < 0 and largest == 0:
largest = seq_seq[k][j]
if seq_seq[k][j] < 0 and seq_seq[k][j] > largest:
largest = seq_seq[k][j]
if largest != 0:
return largest | b7326b3101d29fcc0b8f5921eede18a748af71b7 | 32,360 |
def align_quaternion_frames(target_skeleton, frames):
"""align quaternions for blending
src: http://physicsforgames.blogspot.de/2010/02/quaternions.html
"""
ref_frame = None
new_frames = []
for frame in frames:
if ref_frame is None:
ref_frame = frame
else:
offset = 3
for joint in target_skeleton.animated_joints:
q = frame[offset:offset + 4]
ref_q = ref_frame[offset:offset + 4]
dot = np.dot(ref_q, q)
if dot < 0:
frame[offset:offset + 4] = -q
offset += 4
new_frames.append(frame)
return new_frames | 7c8d6f4bacfb3581dc023504b94d2fba66c5e875 | 32,361 |
import math
def do_round(precision=0, method='common'):
"""
Round the number to a given precision. The first
parameter specifies the precision (default is ``0``), the
second the rounding method:
- ``'common'`` rounds either up or down
- ``'ceil'`` always rounds up
- ``'floor'`` always rounds down
If you don't specify a method ``'common'`` is used.
.. sourcecode:: jinja
{{ 42.55|round }}
-> 43
{{ 42.55|round(1, 'floor') }}
-> 42.5
*new in Jinja 1.1*
"""
if not method in ('common', 'ceil', 'floor'):
raise FilterArgumentError('method must be common, ceil or floor')
if precision < 0:
raise FilterArgumentError('precision must be a postive integer '
'or zero.')
def wrapped(env, context, value):
if method == 'common':
return round(value, precision)
func = getattr(math, method)
if precision:
return func(value * 10 * precision) / (10 * precision)
else:
return func(value)
return wrapped | 3e2b4c6c842ca5c3f60951559a815f27cc8edd19 | 32,362 |
import torch
def scale_invariant_signal_distortion_ratio(preds: Tensor, target: Tensor, zero_mean: bool = False) -> Tensor:
"""Calculates Scale-invariant signal-to-distortion ratio (SI-SDR) metric. The SI-SDR value is in general
considered an overall measure of how good a source sound.
Args:
preds:
shape ``[...,time]``
target:
shape ``[...,time]``
zero_mean:
If to zero mean target and preds or not
Returns:
si-sdr value of shape [...]
Example:
>>> from torchmetrics.functional.audio import scale_invariant_signal_distortion_ratio
>>> target = torch.tensor([3.0, -0.5, 2.0, 7.0])
>>> preds = torch.tensor([2.5, 0.0, 2.0, 8.0])
>>> scale_invariant_signal_distortion_ratio(preds, target)
tensor(18.4030)
References:
[1] Le Roux, Jonathan, et al. "SDR half-baked or well done." IEEE International Conference on Acoustics, Speech
and Signal Processing (ICASSP) 2019.
"""
_check_same_shape(preds, target)
EPS = torch.finfo(preds.dtype).eps
if zero_mean:
target = target - torch.mean(target, dim=-1, keepdim=True)
preds = preds - torch.mean(preds, dim=-1, keepdim=True)
alpha = (torch.sum(preds * target, dim=-1, keepdim=True) + EPS) / (
torch.sum(target ** 2, dim=-1, keepdim=True) + EPS
)
target_scaled = alpha * target
noise = target_scaled - preds
val = (torch.sum(target_scaled ** 2, dim=-1) + EPS) / (torch.sum(noise ** 2, dim=-1) + EPS)
val = 10 * torch.log10(val)
return val | 2ec9e4d3cbd0046940974f8d7bae32e230da63ed | 32,363 |
import json
from datetime import datetime
def is_token_valid():
"""Check whether the stored token is still valid.
:returns: A bool.
"""
try:
with open('/tmp/tngcli.txt', 'r') as file:
for line in file:
payload = json.loads(line)
except:
return False, 'no token file found'
exp_t = payload[env.get_sp_path()]['exp_t']
exp_t_datetime = datetime.strptime(exp_t, '%Y-%m-%d %H:%M')
return (datetime.now() - exp_t_datetime) < timedelta(minutes=58) | 2574245a38a02bdba7b2fee8f5dff807b128316f | 32,364 |
def DB_getQanswer(question):
"""
Calls the function in the database that gets the question answer to the
input question.
"""
return DB.get_question_answer(question) | 8afb32f1e8b39d3ff89b3c9fe02a314099a416ef | 32,365 |
def _state_senate_slide_preview(slug):
"""
Preview a state slide outside of the stack.
"""
context = make_context()
resp = _state_senate_slide(slug)
if resp.status_code == 200:
context['body'] = resp.data
return render_template('slide_preview.html', **context)
else:
return "404", 404 | c9139df85745feca150fd22591e85165969952de | 32,366 |
def tensor_network_tt_einsum(inputs, states, output_size, rank_vals, bias, bias_start=0.0):
# print("Using Einsum Tensor-Train decomposition.")
"""tensor train decomposition for the full tenosr """
num_orders = len(rank_vals)+1#alpha_1 to alpha_{K-1}
num_lags = len(states)
batch_size = tf.shape(inputs)[0]
state_size = states[0].get_shape()[1].value #hidden layer size
input_size= inputs.get_shape()[1].value
total_state_size = (state_size * num_lags + 1 )
# These bookkeeping variables hold the dimension information that we'll
# use to store and access the transition tensor W efficiently.
mat_dims = np.ones((num_orders,)) * total_state_size
# The latent dimensions used in our tensor-train decomposition.
# Each factor A^i is a 3-tensor, with dimensions [a_i, hidden_size, a_{i+1}]
# with dimensions [mat_rank[i], hidden_size, mat_rank[i+1] ]
# The last
# entry is the output dimension, output_size: that dimension will be the
# output.
mat_ranks = np.concatenate(([1], rank_vals, [output_size]))
# This stores the boundary indices for the factors A. Starting from 0,
# each index i is computed by adding the number of weights in the i'th
# factor A^i.
mat_ps = np.cumsum(np.concatenate(([0], mat_ranks[:-1] * mat_dims * mat_ranks[1:])),dtype=np.int32)
mat_size = mat_ps[-1]
# Compute U * x
weights_x = vs.get_variable("weights_x", [input_size, output_size] )
out_x = tf.matmul(inputs, weights_x)
# Get a variable that holds all the weights of the factors A^i of the
# transition tensor W. All weights are stored serially, so we need to do
# some bookkeeping to keep track of where each factor is stored.
mat = vs.get_variable("weights_h", mat_size) # h_z x h_z... x output_size
#mat = tf.Variable(mat, name="weights")
states_vector = tf.concat(states, 1)
states_vector = tf.concat( [states_vector, tf.ones([batch_size, 1])], 1)
"""form high order state tensor"""
states_tensor = states_vector
for order in range(num_orders-1):
states_tensor = _outer_product(batch_size, states_tensor, states_vector)
# print("tensor product", states_tensor.name, states_tensor.get_shape().as_list())
cores = []
for i in range(num_orders):
# Fetch the weights of factor A^i from our big serialized variable weights_h.
mat_core = tf.slice(mat, [mat_ps[i]], [mat_ps[i + 1] - mat_ps[i]])
mat_core = tf.reshape(mat_core, [mat_ranks[i], total_state_size, mat_ranks[i + 1]])
cores.append(mat_core)
out_h = tensor_train_contraction(states_tensor, cores)
# Compute h_t = U*x_t + W*H_{t-1}
res = tf.add(out_x, out_h)
# print "END OF CELL CONSTRUCTION"
# print "========================"
# print ""
if not bias:
return res
biases = vs.get_variable("biases", [output_size])
return nn_ops.bias_add(res,biases) | b9cabf2e76e3b18d73d53968b4578bedc3d7bb7e | 32,367 |
from .observable.case import case_
from typing import Callable
from typing import Mapping
from typing import Optional
from typing import Union
def case(
mapper: Callable[[], _TKey],
sources: Mapping[_TKey, Observable[_T]],
default_source: Optional[Union[Observable[_T], "Future[_T]"]] = None,
) -> Observable[_T]:
"""Uses mapper to determine which source in sources to use.
.. marble::
:alt: case
--1---------------|
a--1--2--3--4--|
b--10-20-30---|
[case(mapper, { 1: a, 2: b })]
---1--2--3--4--|
Examples:
>>> res = reactivex.case(mapper, { '1': obs1, '2': obs2 })
>>> res = reactivex.case(mapper, { '1': obs1, '2': obs2 }, obs0)
Args:
mapper: The function which extracts the value for to test in a
case statement.
sources: An object which has keys which correspond to the case
statement labels.
default_source: [Optional] The observable sequence or Future that will
be run if the sources are not matched. If this is not provided,
it defaults to :func:`empty`.
Returns:
An observable sequence which is determined by a case statement.
"""
return case_(mapper, sources, default_source) | 3ecc790a3e6e7e30e4f0a34e06dbfc9e2875388c | 32,368 |
from typing import Tuple
import json
def group_to_stats(request, project_id) -> Tuple:
"""
Combining the same actions for grouping data for chart
"""
filters = json.loads(request.query_params.get('filters', '{}')) # date time, issue type, method
group_by = request.query_params.get('groupBy', 'hours')
requests_stats = RequestStat.objects.filter(project_id=project_id, **filters).order_by('created')
group_type = group_types[group_by]
return requests_stats, group_type | 7a27fa180fd0e1bf059d11bd7995cdea0a85c6cf | 32,369 |
def build_delete(table, where):
"""
Build a delete request.
Parameters
----------
table : str
Table where query will be directed.
where: iterable
The list of conditions to constrain the query.
Returns
-------
str
Built query.
"""
sql_q = "DELETE "
sql_q += 'FROM \"' + table + '\"'
sql_q += ' WHERE '
sql_q += ' AND '.join('{0} = :{0}'.format(w) for w in where)
return sql_q | 1f065b5905b6c7af4e19863ae48e228358278f06 | 32,370 |
def filter_resources_sets(used_resources_sets, resources_sets, expand_resources_set, reduce_resources_set):
""" Filter resources_set used with resources_sets defined.
It will block a resources_set from resources_sets if an used_resources_set in a subset of a resources_set"""
resources_expand = [expand_resources_set(resources_set) for resources_set in resources_sets]
used_resources_expand = [expand_resources_set(used_resources_set) for used_resources_set in used_resources_sets]
real_used_resources_sets = []
for resources_set in resources_expand:
for used_resources_set in used_resources_expand:
if resources_set.intersection(used_resources_set):
real_used_resources_sets.append(reduce_resources_set(resources_set))
break
return list(set(resources_sets).difference(set(real_used_resources_sets))) | 2ecd752a0460fff99ecc6b8c34ed28782e848923 | 32,371 |
def get_index_base():
"""获取上海及深圳指数代码、名称表"""
url_fmt = 'http://quotes.money.163.com/hs/service/hsindexrank.php?host=/hs/service/'
url_fmt += 'hsindexrank.php&page={page}&query=IS_INDEX:true;EXCHANGE:CNSE{ex}&fields=no,SYMBOL,NAME&'
url_fmt += 'sort=SYMBOL&order=asc&count={count}&type=query'
one_big_int = 10000 # 设定一个比较大的整数
def get_index_from(ex):
url = url_fmt.format_map({'page': 0, 'ex': ex, 'count': one_big_int})
#response = get_response(url, 'get', 'json')
response = get_page_response(url, method='post')
df = pd.DataFrame(response.json()['list'])
return df.loc[:, ['SYMBOL', 'NAME']]
# 查询代码(深圳+1,上海+0)
dfs = [get_index_from('SH'), get_index_from('SZ')]
df = pd.concat(dfs)
df.columns = df.columns.str.lower()
df.rename(columns={'symbol': 'code'}, inplace=True)
df.set_index('code', inplace=True, drop=True)
return df | 4639e94d9412967c0a5403a5e49fc43c8033c40b | 32,372 |
def merge_list_of_dicts(old, new, key):
"""
Merge a list of dictionary items based on a specific key.
Dictionaries inside the list with a matching key get merged together.
Assumes that a value for the given key is unique and appears only once.
Example:
list1 = [{"name": "one", "data": "stuff"}, {"name": "two", "data": "stuff2"}]
list2 = [{"name": "one", "data": "newstuff"}]
merge_list_of_dicts(list1, list2) returns:
[{"name": "one", "data": "newstuff"}, {"name": "two", "data": "stuff2"}]
"""
for old_item in reversed(old):
matching_val = old_item[key]
for new_item in new:
if new_item[key] == matching_val:
object_merge(old_item, new_item)
break
else:
new.append(old_item)
return new | a56c0b3476ea67d6b77126a34c14005aad345cfa | 32,373 |
from masci_tools.util.schema_dict_util import read_constants, eval_simple_xpath
from masci_tools.util.schema_dict_util import evaluate_text, evaluate_attribute
from masci_tools.util.xml.common_functions import clear_xml
def get_kpoints_data_max4(xmltree, schema_dict, logger=None, convert_to_angstroem=True):
"""
Get the kpoint sets defined in the given fleur xml file.
.. note::
This function is specific to file version before and including the
Max4 release of fleur
:param xmltree: etree representing the fleur xml file
:param schema_dict: schema dictionary corresponding to the file version
of the xmltree
:param logger: logger object for logging warnings, errors
:param convert_to_angstroem: bool if True the bravais matrix is converted to angstroem
:returns: tuple containing the kpoint information
The tuple contains the following entries:
1. :kpoints: list containing the coordinates of the kpoints
2. :weights: list containing the weights of the kpoints
3. :cell: numpy array, bravais matrix of the given system
4. :pbc: list of booleans, determines in which directions periodic boundary conditions are applicable
"""
if isinstance(xmltree, etree._ElementTree):
xmltree, _ = clear_xml(xmltree)
root = xmltree.getroot()
else:
root = xmltree
constants = read_constants(root, schema_dict, logger=logger)
cell, pbc = get_cell(root, schema_dict, logger=logger, convert_to_angstroem=convert_to_angstroem)
kpointlist = eval_simple_xpath(root,
schema_dict,
'kPointList',
list_return=True,
not_contains='altKPoint',
logger=logger)
if len(kpointlist) == 0:
raise ValueError('No Kpoint lists found in the given inp.xml')
kpointlist = kpointlist[0]
kpoints = evaluate_text(kpointlist,
schema_dict,
'kPoint',
constants=constants,
not_contains='altKPoint',
list_return=True,
logger=logger)
weights = evaluate_attribute(kpointlist,
schema_dict,
'weight',
constants=constants,
not_contains='altKPoint',
list_return=True,
logger=logger)
return kpoints, weights, cell, pbc | 23001a430e8cb1b2434fce7de67e5249f345806c | 32,374 |
def permission_required_on_object(
perm, object_getter, login_url=None, handle_access_error=None, raise_exception=False
):
"""
Decorator for views that checks whether a user has a particular permission
enabled, redirecting to the log-in page if necessary.
If the raise_exception parameter is given the PermissionDenied exception
is raised.
"""
def check_perms(user, obj):
if not isinstance(perm, (list, tuple)):
perms = (perm,)
else:
perms = perm
# First check if the user has the permission (even anon users)
if user.has_perms(perms, obj):
logger.debug(
"[permissions][check] User %s ** has ** permissions %s", user, perms
)
return True
logger.debug(
"[permissions][check] User %s ** has not ** permissions %s", user, perms
)
# In case the 403 handler should be called raise the exception
# if raise_exception:
# raise PermissionDenied
# this is done by the _user_passes_test_with_object_getter
# As the last resort, show the login form
return False
return _user_passes_test_with_object_getter(
check_perms,
object_getter,
login_url,
handle_access_error=handle_access_error,
raise_exception=raise_exception,
) | 08c6544f2e9fdd60b4b16f162c1d0ab06e2c2a0e | 32,375 |
def contains_badwords(string):
"""
Return whether a string contains bad words
"""
return any([x in string for x in bad_words]) | 499e338599441e24845a19ba8504a77bd7838d8e | 32,376 |
from os.path import join
def get_cluster_info(metadata, consensus_subjects=None):
"""
Construct cluster centroids and tractograms in MNI space from test session
so can run `match_clusters`
Identify `consensus_subject` for the test session n_clusters using `metadata['algorithm']`.
The consensus subject is use to align clusters across test-retest.
Parameters
----------
metadata : dict
Returns
-------
cluster_info : dict `n_clusters` key
└── values dict with following keys :
`consensus_subject` and `session_name` as keys
└── `consensus_subject` : `subject_id`
└── `session_name` : dict with centriods`, `tractograms_filenames`, `tractograms` keys
└── `centriods` : dict with `subject_id` key
└── values centroid `StatefulTratogram` list of length `n_clusters`
└── `tractograms_filenames` : dict with `subject_id` key
└── tractogram filename list of length `n_clusters`
└── `tractograms` : dict with `subject_id` key
└── `StatefulTractogram` list of length n_clusters
"""
cluster_info = {}
base_dir = join(metadata['experiment_output_dir'], metadata['bundle_name'])
for n_clusters in metadata['experiment_range_n_clusters']:
cluster_info[n_clusters] = {}
for session_name in metadata['experiment_sessions']:
cluster_info[n_clusters][session_name] = {}
# (test session) centroids from cleaned cluster tractograms
# for each subject and cluster_label find the cluster centroid
prealign_centroids = _prealignment_centroids(
base_dir,
session_name,
metadata['model_name'],
metadata['experiment_subjects'],
metadata['bundle_name'],
n_clusters
)
# (test session) MNI centroids from cleaned cluster tractograms
# move the centroids into MNI space
#
# used for:
# • visualization and
# • matching test clusters with Algorithm.CENTROID
cluster_info[n_clusters][session_name]['centroids'] = _move_centroids_to_MNI(
session_name,
metadata['experiment_subjects'],
prealign_centroids
)
# (test session) MNI tractograms
# for each subject move all cleaned cluster tractograms into the MNI space
# allows for easier comparision using weighted dice
#
# used for:
# • matching test clusters with using Algorithm.MAXDICE or Algorithm.MUNKRES
tractogram_dict, tractograms_filename_dicts = _load_MNI_cluster_tractograms(
base_dir,
session_name,
metadata['model_name'],
metadata['experiment_subjects'],
metadata['bundle_name'],
n_clusters
)
cluster_info[n_clusters][session_name]['tractograms_filenames'] = tractograms_filename_dicts
cluster_info[n_clusters][session_name]['tractograms'] = tractogram_dict
# consensus_subject
# once we have the centroids and tractograms we can calculate the consensus subject
# using the specified algorithm
#
# this could be extracted into a separate step:
# then could determine if consensus subjects are consistent across algorithms.
# right now just run one algorithm at a time.
if consensus_subjects is None:
cluster_info[n_clusters]['consensus_subject'] = _find_consensus_subject(
base_dir,
metadata['experiment_test_session'],
metadata['model_name'],
metadata['experiment_subjects'],
cluster_info,
metadata['bundle_name'],
n_clusters,
metadata['algorithm']
)
else:
cluster_info[n_clusters]['consensus_subject'] = consensus_subjects[n_clusters]['consensus_subject']
return cluster_info | 3262456326bc4a4e98ced48955347072e71d258b | 32,377 |
import logging
def probe_all_devices(driver_config_fname):
"""
Acquiring states of all devies, added to config.
States could be:
alive - device is working in normal mode and answering to modbus commands
in_bootloader - device could not boot it's rom
disconnected - a dummy-record in config
"""
alive = []
in_bootloader = []
disconnected = []
too_old_to_update = []
store_device = lambda name, slaveid, port, uart_params: DeviceInfo(name, slaveid, port, uart_settings=uart_params)
logging.info('Will probe all devices defined in %s' % driver_config_fname)
for port, port_params in get_devices_on_driver(driver_config_fname).items():
uart_params = port_params['uart_params']
devices_on_port = port_params['devices']
for device_name, device_slaveid in devices_on_port:
logging.debug('Probing device %s (port: %s, slaveid: %d)...' % (device_name, port, device_slaveid))
modbus_connection = bindings.WBModbusDeviceBase(device_slaveid, port, *uart_params)
if modbus_connection.is_in_bootloader():
in_bootloader.append(store_device(device_name, device_slaveid, port, uart_params))
else:
try:
modbus_connection.get_slave_addr()
except ModbusError: # Device is really disconnected
disconnected.append(store_device(device_name, device_slaveid, port, uart_params))
continue
try:
db.save(modbus_connection.slaveid, modbus_connection.port, modbus_connection.get_fw_signature()) # old devices haven't fw_signatures
alive.append(store_device(device_name, device_slaveid, port, uart_params))
except TooOldDeviceError:
logging.error('%s (slaveid: %d; port: %s) is too old and does not support firmware updates!' % (device_name, device_slaveid, port))
too_old_to_update.append(store_device(device_name, device_slaveid, port, uart_params))
return alive, in_bootloader, disconnected, too_old_to_update | ee3e3b1e1f47011fb9dee04077406c170c82e537 | 32,378 |
def _learn_individual_mixture_weights(n_users, alpha, multinomials, max_iter, tol, val_mat, prior_strength, num_proc):
"""
Learns the mixing weights for each individual user, uses multiple-processes to make it faster.
:param n_users: Int, total number of users.
:param alpha: prior (learned through global weights) for the pi's
:param multinomials: List of components (Arrays of vectors).
:param max_iter: max number of em iterations
:param tol: convergence threshold
:param val_mat: validation data to optimize on. U x C matrix.
:param prior_strength: float, how much to increase the strength of the prior.
:param num_proc: number of processes to be used.
:return: 1. Matrix of mixing weights (Users x Components)
2. Event log likelihood for validation data.
"""
lls = np.ones(n_users)
pis = np.tile(alpha, n_users).reshape(n_users, len(multinomials))
pis = normalize(pis, 'l1', axis=1) # pi's for each user.
log.info('Doing individual weights with %d proc' % num_proc)
mix_weights = []
alpha *= prior_strength
if any(alpha < 1):
alpha += 1
# multi-process. Essentially calls _mp_learn_user_mix for a set of users.
batch_size = int(np.ceil(1. * n_users / num_proc)) # how many users per process
args = (alpha, multinomials, val_mat, max_iter, tol)
uids = range(n_users)
queue = Queue()
num_eof = 0
proc_pool = []
# set-up the processes
for i in range(num_proc):
p_uids = uids[i * batch_size:(i + 1) * batch_size] # define which users this process will handle.
if len(p_uids) == 0:
break
proc = Process(target=_mp_learn_user_mix, args=(queue, p_uids, args))
proc_pool.append(proc)
# start the processes
[proc.start() for proc in proc_pool]
# collect end tokens
while num_eof < len(proc_pool):
resp = queue.get()
if type(resp) == str:
num_eof += 1
else:
mix_weights.append(resp)
[proc.join() for proc in proc_pool]
queue.close()
# end multi-process
for id, u_mix_weights, u_ll in mix_weights:
pis[id] = np.array(u_mix_weights)
lls[id] = u_ll
mask = np.where(lls != 1)
lls = lls[mask] * np.squeeze(np.array(val_mat.sum(axis=1)))[mask]
event_ll = np.sum(lls) / np.sum(val_mat)
return pis, event_ll | 7ee9020685ec8fc0538ce4695fcefedc6280d55e | 32,379 |
def banana(cls):
"""
A decorator for a class that adds the ability to create Permissions and Handlers
from their Checks.
"""
cls.__checks = set()
# Basically tell checks that we are the class, not a medium to pass things through
cls.__banana = True
cls_annotations = cls.__dict__.get("__annotations__", {})
for name in cls_annotations.keys():
check = get_check(cls, name)
if check is None:
continue
cls.__checks.add(name)
setattr(cls, name, check)
for base in cls.__bases__:
if base is Permission or PermissionHandler:
setattr(cls, "from_checks", classmethod(from_checks(cls)))
break
return cls | 6392d5a7e029dca556c92f4d7546fb6f76078858 | 32,380 |
def residual_v2_conv(
kernel_size: int,
stride: int,
depth: int,
is_deconv: bool,
add_max_pool: bool,
add_bias: bool,
is_train: bool,
input_op: tf.Tensor,
name: str = None,
) -> tf.Tensor:
"""Creates a residual convolution in the style of He et al. April 2016.
This is the second version of their proposed residual structure, where the
order of operations is batch_norm -> activation -> convolution.
We use RELU and TANH activations, and we optionally add a max pool.
Args:
kernel_size: The size of the kernel.
stride: The stride of the convolution.
depth: The depth of the reduction layer.
is_deconv: Whether this is a deconvolution.
add_max_pool: Whether to add a parallel max pool with the same parameters
as the convolution.
add_bias: Whether to add bias to the convolution.
is_train: Whether we're training this graph.
input_op: The input.
name: An optional op name.
Returns:
The tensor output of residual convolution.
"""
with tf.variable_scope(name, 'residual_v2_conv', [input_op]) as scope:
[_, num_rows, num_columns, _] = input_op.shape.as_list()
if not is_deconv:
assert num_rows >= kernel_size
assert num_columns >= kernel_size
# Make sure we can do a valid convolution.
assert (num_rows - kernel_size) % stride == 0
assert (num_columns - kernel_size) % stride == 0
# In the future it may be necessary to set epsilon to a larger value
# than the default here.
bn_op = slim.batch_norm(input_op, is_training=is_train, scale=True)
concat_op = tf.concat([tf.nn.relu(bn_op), tf.nn.tanh(bn_op)], 3)
with slim.arg_scope(
[slim.conv2d, slim.conv2d_transpose, slim.max_pool2d],
kernel_size=kernel_size,
stride=stride,
padding='VALID'):
if add_bias:
biases_initializer = tf.zeros_initializer()
else:
biases_initializer = None
with slim.arg_scope(
[slim.conv2d, slim.conv2d_transpose],
inputs=concat_op,
num_outputs=depth,
activation_fn=None,
biases_initializer=biases_initializer):
if is_deconv:
conv_op = slim.conv2d_transpose()
else:
conv_op = slim.conv2d()
if add_max_pool:
assert not is_deconv
assert kernel_size > 1
return tf.concat(
[conv_op, slim.max_pool2d(input_op)], 3, name=scope.name)
else:
return tf.identity(conv_op, name=scope.name) | 63d7589caed876ed9b0d3617442e6d676555c791 | 32,381 |
def dump_cups_with_first(cups: list[int]) -> str:
"""Dump list of cups with highlighting the first one
:param cups: list of digits
:return: list of cups in string format
"""
dump_cup = lambda i, cup: f'({cup})' if i == 0 else f' {cup} '
ret_val = ''.join([dump_cup(i, cup) for i, cup in enumerate(cups)])
return ret_val | 5fe4111f09044c6afc0fbd0870c2b5d548bd3c1a | 32,382 |
def init(strKernel, iKernelPar=1, iALDth=1e-4, iMaxDict=1e3):
"""
Function initializes krls dictionary. |br|
Args:
strKernel (string): Type of the kernel
iKernelPar (float): Kernel parameter [default = 1]
iALDth (float): ALD threshold [default = 1e-4]
iMaxDict (int): Max size of the dictionary [default = 1e3]
Returns:
dAldKRLS (dictionary): Python dictionary which contains all the data of the current KRLS algorithm.
Fields in the output dictionary:
- a. **iALDth** (*int*): ALD threshold
- b. **iMaxDt** (*float*): Max size of the dictionary
- c. **strKernel** (*string*): Type of the kernel
- d. **iKernelPar** (*float*): Kernel parameter
- e. **bInit** (*int*): Initialization flag = 1. This flag is cleared with a first call to the 'train' function.
"""
dAldKRLS = {} # Initialize dictionary with data for aldkrls algorithm
# Store all the parameters in the dictionary
dAldKRLS['iALDth'] = iALDth; # ALD threshold
dAldKRLS['iMaxDt'] = iMaxDict; # Maximum size of the dictionary
dAldKRLS['strKernel'] = strKernel # Type of the kernel
dAldKRLS['iKernelPar'] = iKernelPar # Kernel parameter
dAldKRLS['bInit'] = 0 # Clear 'Initialization done' flag
return dAldKRLS | 652e0c498b4341e74bcd30ca7119163345c7f2cc | 32,383 |
def prune_scope():
"""Provides a scope in which Pruned layers and models can be deserialized.
For TF 2.X: this is not needed for SavedModel or TF checkpoints, which are
the recommended serialization formats.
For TF 1.X: if a tf.keras h5 model or layer has been pruned, it needs to be
within this
scope to be successfully deserialized. This is not needed for loading just
keras weights.
Returns:
Object of type `CustomObjectScope` with pruning objects included.
Example:
```python
pruned_model = prune_low_magnitude(model, **self.params)
keras.models.save_model(pruned_model, keras_file)
with prune_scope():
loaded_model = keras.models.load_model(keras_file)
```
"""
return tf.keras.utils.custom_object_scope(
{'PruneLowMagnitude': pruning_wrapper.PruneLowMagnitude}) | 64569464611640ac5c13cbb0bf41c3f7ba16424a | 32,384 |
import platform
import subprocess
import os
import shutil
def _find_chrome(user_given_executable=None):
""" Finds a Chrome executable.
Search Chrome on a given path. If no path given,
try to find Chrome or Chromium-browser on a Windows or Unix system.
Parameters
----------
- `user_given_executable`: str (optional)
+ A filepath leading to a Chrome/ Chromium executable
+ Or a filename found in the current working directory
+ Or a keyword that executes Chrome/ Chromium, ex:
- 'chromium' on linux systems
- 'chrome' on windows (if typing `start chrome` in a cmd works)
Raises
------
- `FileNotFoundError`
+ If a suitable chrome executable could not be found.
Returns
-------
- str
+ Path of the chrome executable on the current machine.
"""
# try to find a chrome bin/exe in ENV
path_from_env = find_first_defined_env_var(
env_var_list=CHROME_EXECUTABLE_ENV_VAR_CANDIDATES,
toggle=ENV_VAR_LOOKUP_TOGGLE
)
if path_from_env:
print(
f'Found a potential chrome executable in the {path_from_env} '
f'environment variable:\n{path_from_env}\n'
)
return path_from_env
# if an executable is given, try to use it
if user_given_executable is not None:
# On Windows, we cannot "safely" validate that user_given_executable
# seems to be a chrome executable, as we cannot run it with
# the --version flag.
# https://bugs.chromium.org/p/chromium/issues/detail?id=158372
#
# We thus do the "bare minimum" and check if user_given_executable
# is a file, a filepath, or corresponds to a keyword that can be used
# with the start command, like so: `start user_given_executable`
if platform.system() == 'Windows':
command_origin = get_command_origin(user_given_executable)
if command_origin:
return command_origin
# cannot validate user_given_executable
raise FileNotFoundError()
# On a non-Windows OS, we can validate in a basic way that
# user_given_executable leads to a Chrome / Chromium executable,
# or is a command, using the --version flag
else:
try:
if 'chrom' in subprocess.check_output(
[user_given_executable, '--version']
).decode('utf-8').lower():
return user_given_executable
except Exception:
pass
# We got a user_given_executable but couldn't validate it
raise FileNotFoundError(
'Failed to find a seemingly valid chrome executable '
'in the given path.'
)
# Executable not in ENV or given by the user, try to find it
# Search for executable on a Windows OS
if platform.system() == 'Windows':
prefixes = [
os.getenv('PROGRAMFILES(X86)'),
os.getenv('PROGRAMFILES'),
os.getenv('LOCALAPPDATA'),
]
suffix = "Google\\Chrome\\Application\\chrome.exe"
for prefix in prefixes:
path_candidate = os.path.join(prefix, suffix)
if os.path.isfile(path_candidate):
return path_candidate
# Search for executable on a Linux OS
elif platform.system() == "Linux":
chrome_commands = [
'chromium',
'chromium-browser',
'chrome',
'google-chrome'
]
for chrome_command in chrome_commands:
if shutil.which(chrome_command):
# check the --version for "chrom" ?
return chrome_command
# snap seems to be a special case?
# see https://stackoverflow.com/q/63375327/12182226
try:
version_result = subprocess.check_output(
["chromium-browser", "--version"]
)
if 'snap' in str(version_result):
chrome_snap = (
'/snap/chromium/current/usr/lib/chromium-browser/chrome'
)
if os.path.isfile(chrome_snap):
return chrome_snap
except Exception:
pass
# Search for executable on MacOS
elif platform.system() == "Darwin":
# MacOS system
chrome_app = (
'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'
)
try:
version_result = subprocess.check_output(
[chrome_app, "--version"]
)
if "Google Chrome" in str(version_result):
return chrome_app
except Exception:
pass
# Couldn't find an executable (or OS not in Windows, Linux or Mac)
raise FileNotFoundError(
'Could not find a Chrome executable on this '
'machine, please specify it yourself.'
) | 41e00f7cb3de662c88abfddf09fd27991448639d | 32,385 |
import re
def is_valid_br_cnpj(cnpj):
"""
Accept an string parameter cnpj and
Check if is brazilian CNPJ valid.
Return True or False
"""
# Extract dots, stroke, slash
cnpj = re.sub('[.|\-/|/]', '', str(cnpj))
# if does not contain numerical characters
if not re.match(r'^\d{14}$', cnpj) or cnpj in _INVALID_CNPJ:
return False
# checks if all digits are equal
for i in range(10):
text = str(i) * 14
if text == cnpj:
return False
# first checksum1
multi = 5
result = 0
for i in cnpj[:12]:
result += int(i) * multi
multi -= 1
if multi < 2:
multi = 9
remainder = result % 11
if remainder < 2:
checksum1 = 0
else:
checksum1 = 11 - remainder
assemble_cnpj = cnpj[:12] + str(checksum1)
# secound checksum
multi = 6
result = 0
for i in assemble_cnpj:
result += int(i) * multi
multi -= 1
if multi < 2:
multi = 9
remainder = result % 11
if remainder < 2:
checksum2 = 0
else:
checksum2 = 11 - remainder
assemble_cnpj += str(checksum2)
return True if cnpj == assemble_cnpj else False | f41f9814cfef7d75e287834ac2a5514d03cd8fdb | 32,386 |
def get_supported_locales():
"""
Returns a list of Locale objects that the Web Interfaces supports
"""
locales = BABEL.list_translations()
locales.append(Locale("en"))
sorted_locales = sorted(locales, key=lambda x: x.language)
return sorted_locales | 3068889d0c7888b23f207d3397e0aec58418cef2 | 32,387 |
from typing import Optional
from pathlib import Path
import site
def get_pipx_user_bin_path() -> Optional[Path]:
"""Returns None if pipx is not installed using `pip --user`
Otherwise returns parent dir of pipx binary
"""
# NOTE: using this method to detect pip user-installed pipx will return
# None if pipx was installed as editable using `pip install --user -e`
# https://docs.python.org/3/install/index.html#inst-alt-install-user
# Linux + Mac:
# scripts in <userbase>/bin
# Windows:
# scripts in <userbase>/Python<XY>/Scripts
# modules in <userbase>/Python<XY>/site-packages
pipx_bin_path = None
script_path = Path(__file__).resolve()
userbase_path = Path(site.getuserbase()).resolve()
try:
_ = script_path.relative_to(userbase_path)
except ValueError:
pip_user_installed = False
else:
pip_user_installed = True
if pip_user_installed:
test_paths = (
userbase_path / "bin" / "pipx",
Path(site.getusersitepackages()).resolve().parent / "Scripts" / "pipx.exe",
)
for test_path in test_paths:
if test_path.exists():
pipx_bin_path = test_path.parent
break
return pipx_bin_path | ccf9b886af41b73c7e2060704d45781938d8e811 | 32,388 |
def _normalize_int_key(key, length, axis_name=None):
"""
Normalizes an integer signal key.
Leaves a nonnegative key as it is, but converts a negative key to
the equivalent nonnegative one.
"""
axis_text = '' if axis_name is None else axis_name + ' '
if key < -length or key >= length:
raise IndexError(
f'Index {key} is out of bounds for signal {axis_text}axis with '
f'length {length}.')
return key if key >= 0 else key + length | 9b58b09e70c20c9ac5ee0be059333dd5058802ef | 32,389 |
def create_interview_in_jobma(interview):
"""
Create a new interview on Jobma
Args:
interview (Interview): An interview object
"""
client = get_jobma_client()
url = urljoin(settings.JOBMA_BASE_URL, "interviews")
job = interview.job
first_name, last_name = get_first_and_last_names(interview.applicant)
response = client.post(
url,
json={
"interview_template_id": str(job.interview_template_id),
"job_id": str(job.job_id),
"job_code": job.job_code,
"job_title": job.job_title,
"callback_url": urljoin(
settings.SITE_BASE_URL,
reverse("jobma-webhook", kwargs={"pk": interview.id}),
),
"candidate": {
"first_name": first_name,
"last_name": last_name,
"phone": "",
"email": interview.applicant.email,
},
},
)
response.raise_for_status()
result = response.json()
interview_link = result.get("interview_link")
if interview_link is not None:
interview.interview_url = interview_link
else:
log.error("Interview link not found in payload - %s", result)
interview_token = result.get("interview_token")
if interview_token is not None:
interview.interview_token = interview_token
interview.save_and_log(None)
return interview_link | 36834c0e6557627a52a179b9d8529d5693cc92cb | 32,390 |
def get_solutions(N, K, W_hat, x):
"""
Get valid indices of x that sum up to S
"""
# Scalar form of y = W_hat * x
S = scalar(W_hat @ x)
# print(f'Scalar value = {S}')
solutions = []
for partition in sum_to_S(S, K):
if len(set(partition)) == len(partition) and max(partition) < N:
partition = sorted(partition)
if partition not in solutions:
solutions.append(partition)
x_vectors = []
for sol in solutions:
tmp = np.zeros(N)
tmp[sol] = 1
x_vectors.append(tmp)
return x_vectors | 6de6b0f77070b40f6e0028009f9b96264f6daa64 | 32,391 |
def get_actual_order(geometry, order):
"""
Return the actual integration order for given geometry.
Parameters
----------
geometry : str
The geometry key describing the integration domain,
see the keys of `quadrature_tables`.
Returns
-------
order : int
If `order` is in quadrature tables it is this
value. Otherwise it is the closest higher order. If no
higher order is available, a warning is printed and the
highest available order is used.
"""
table = quadrature_tables[geometry]
if order not in table:
orders = list(table.keys())
ii = nm.searchsorted(orders, order)
if ii >= len(orders):
omax = max(orders)
output(_msg1 % (order, geometry))
output(_msg2 % omax)
order = omax
else:
order = orders[ii]
return order | 876c9a70418de7d4768ab0234abb86bf676884c0 | 32,392 |
def getcwd(*args,**kw):
"""getcwd() -> path
Return a unicode string representing the current working directory."""
return __BRYTHON__.brython_path | 1d0e9491a2a35b326ec87314887fb1dede23c927 | 32,393 |
def sample_graph(B, logvars, n_samp):
"""
Generate data given B matrix, variances
"""
p = len(logvars)
N = np.random.normal(0, np.sqrt(np.exp(logvars)), size=(n_samp, p))
return (np.linalg.inv(np.eye(p) - B.T)@N.T).T | 2e798035bcb807e670ff9b9f4a39236ffe6b1157 | 32,394 |
def rotate_ne_rt(n, e, ba):
"""
Rotates horizontal components of a seismogram.
The North- and East-Component of a seismogram will be rotated in Radial
and Transversal Component. The angle is given as the back-azimuth, that is
defined as the angle measured between the vector pointing from the station
to the source and the vector pointing from the station to the North.
:type n: :class:`~numpy.ndarray`
:param n: Data of the North component of the seismogram.
:type e: :class:`~numpy.ndarray`
:param e: Data of the East component of the seismogram.
:type ba: float
:param ba: The back azimuth from station to source in degrees.
:return: Radial and Transversal component of seismogram.
"""
if len(n) != len(e):
raise TypeError("North and East component have different length.")
if ba < 0 or ba > 360:
raise ValueError("Back Azimuth should be between 0 and 360 degrees.")
ba = radians(ba)
r = - e * sin(ba) - n * cos(ba)
t = - e * cos(ba) + n * sin(ba)
return r, t | c374ad762e122b519698bd1c199e2aa773e295cb | 32,395 |
def pwgen(pw_len=16):
""" Generate a random password with the given length.
Allowed chars does not have "I" or "O" or letters and
digits that look similar -- just to avoid confusion.
"""
return get_random_string(pw_len, 'abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789') | 747bb049ad3cca47d3898f0ea6b52108938aa2b2 | 32,396 |
import os
import subprocess
import warnings
def run_zeopp(structure: Structure) -> dict:
"""Run zeopp with network -ha -res (http://www.zeoplusplus.org/examples.html)
to find the pore diameters
Args:
structure (Structure): pymatgen Structure object
Returns:
dict: pore analysis results
"""
if is_tool("network"):
with TemporaryDirectory() as tempdir:
structure_path = os.path.join(tempdir, "structure.cif")
result_path = os.path.join(tempdir, "result.res")
structure.to("cif", structure_path)
cmd = ZEOPP_BASE_COMMAND + [str(result_path), str(structure_path)]
_ = subprocess.run(
cmd,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
)
with open(result_path, "r") as handle:
results = handle.read()
zeopp_results = _parse_zeopp(results)
return zeopp_results
else:
warnings.warn(NO_ZEOPP_WARNING)
return {
"lis": np.nan, # largest included sphere
"lifs": np.nan, # largest free sphere
"lifsp": np.nan, # largest included sphere along free sphere path
} | 1cecb39ce4350e86076dc59a0cea2525b51bd4d0 | 32,397 |
import requests
from bs4 import BeautifulSoup
def get_property_data(sch=""):
"""Get property id and return dictionary with data
Attributes:
sch: property id
"""
property_url = "http://ats.jeffco.us/ats/displaygeneral.do?sch={0}".format(sch)
r = requests.get(property_url)
property_page = BeautifulSoup(r.text, "lxml")
property_dict = {}
# Get data from the single data fields
data_cells = property_page.find_all("td")
for i, data_cell in enumerate(data_cells):
try:
cell_text = data_cell.text.strip()
if "PIN/Schedule" in cell_text:
property_dict["PIN"] = ":".join(cell_text.split(":")[1:]).strip()
elif "status:" in cell_text:
property_dict["Status"] = ":".join(cell_text.split(":")[1:]).strip()
elif "AIN/Parcel ID:" in cell_text:
property_dict["AIN"] = ":".join(cell_text.split(":")[1:]).strip()
elif "Property Type:" in cell_text:
property_dict["property_type"] = ":".join(cell_text.split(":")[1:]).strip()
elif "Neighborhood:" in cell_text:
property_dict["neighborhood"] = " ".join(":".join(cell_text.split(":")[1:]).strip().split())
elif "Subdivision Name:" in cell_text:
property_dict["subdivision_name"] = ":".join(cell_text.split(":")[1:]).strip()
elif "Adjusted Year Built:" in cell_text:
property_dict["adjusted_year_built"] = ":".join(cell_text.split(":")[1:]).strip()
elif "Year Built:" in cell_text:
property_dict["year_built"] = ":".join(cell_text.split(":")[1:]).strip()
except (AttributeError, IndexError):
continue
# Define data from tables
data_tables = property_page.find_all("table")
for data_table in data_tables:
try:
table_header = data_table.find("tr", class_="tableheaders").text
if "Owner Name(s)" in table_header:
property_dict["owners"] = parse_one_column_table(data_table)
elif "Assessor Parcel Maps Associated" in table_header:
property_dict["Assessor Parcel Maps Associated with Schedule"] = parse_one_column_table(data_table)
elif "Land Characteristics" in table_header:
property_dict["land_characteristics"] = parse_one_column_table(data_table)
elif (
"Block" in table_header and
"Lot" in table_header and
"Key" in table_header
):
property_dict["property_description"] = parse_many_columns_table(data_table, name="property_description")
elif (
"Item" in table_header and
"Quality" in table_header
):
property_dict["property_inventory_1"] = parse_many_columns_table(data_table)
elif (
"Areas" in table_header and
"Quality" in table_header
):
property_dict["property_inventory_2"] = parse_many_columns_table(data_table)
elif (
"Adjustment Code" in table_header and
"Adjustment SqFt" in table_header
):
property_dict["property_inventory_3"] = parse_many_columns_table(data_table)
elif (
"Sale Date" in table_header and
"Sale Amount" in table_header
):
property_dict["sales_history"] = parse_many_columns_table(data_table)
elif (
"Payable" in table_header and not data_table.table
):
property_dict["tax_information"] = parse_many_columns_table(data_table, name="tax_info")
elif (
"Mill Levy" in table_header and not data_table.table
):
property_dict["mill_levy_information"] = parse_many_columns_table(data_table, name="mill_levy_information")
except AttributeError:
pass
if "Property Address:" in data_table.text and not data_table.table:
address_data = parse_address_table(data_table)
property_dict["property_address"] = address_data[0]
property_dict["mailing_address"] = address_data[1]
return property_dict | d7a0f462340c75d14f00a1712923988b415258fb | 32,398 |
import os
def sanitize_fname(fname):
"""
Ensures that fname is a path under the current working directory.
"""
root_dir = os.getcwd()
return opath.join(
bytes(root_dir, encoding='ascii'),
opath.normpath(
b'/' + fname).lstrip(b'/')) | 3aae8a62effce58152f722032615aa33468b8239 | 32,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.