content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def Chat_(request):
"""
{
"value" : "Your query"
}
"""
print(request.data)
serializer = PatternSerializer(request.data)
try:
response = ChatBot(serializer.data["value"])
except:
response = {
"error": "Data is in wrong formate use { 'value' : 'Your query' }",
"response": None,
"tag": None
}
return Response(response) | 34,700 |
def get_solubility(molecular_weight, density):
"""
Estimate the solubility of each oil pseudo-component
Estimate the solubility (mol/L) of each oil pseudo-component using the
method from Huibers and Lehr given in the huibers_lehr.py module of
py_gnome in the directory gnome/utilities/weathering/. This method is from
Huibers & Katrisky in a 2012 EPA report and was further modified by Lehr
to better match measured values. The equation used here is adapted to
return results in mol/L.
Parameters
----------
molecular_weight : np.array
Molecular weights of each pseudo-component as recorded in the NOAA
Oil Library (g/mol)
density : np.array
Density of each pseudo-component as recorded in the NOAA Oil Library
(kg/m^3)
Returns
-------
solubility : np.array
Array of solubilities (mol/L) for each pseudo-component of the oil.
"""
return 46.4 * 10. ** (-36.7 * molecular_weight / density) | 34,701 |
def build_1d_frp_matrix(func, x, sigma, B=1):
""" Builds quadratic frp matrix respecting pbc.
func: Kernel function
x: position of points
sigma: width of Kernel
"""
N = len(x)
A = np.zeros((N, N))
shifts = np.arange(-5, 6) * B
for r in range(N):
for p in range(N):
value = 0
for shift in shifts:
value += func(x[r] - x[p] + shift, sigma[r])
A[r, p] = value
return A | 34,702 |
def invalid_item(item_key, valid_flag=False):
"""
Update item valid_flag.
"""
if kind.str_is_empty(item_key):
raise RequiredError("item_key")
query = Registry.all()
query.filter("item_key =", item_key)
query.set("valid_flag", valid_flag)
return query.update(context.get_user_id()) | 34,703 |
def ret_str() -> str:
"""
# blahs
blahs
# blahs
Returns
-------
"""
# blahs
# blahs
# blahs
return '' | 34,704 |
def delete_column(table, name):
"""
"""
col = table.c[name]
col.drop(table) | 34,705 |
def main():
"""
----------
Author: Damon Gwinn
----------
Entry point. Generates music from a model specified by command line arguments
----------
"""
args = parse_generate_args()
print_generate_args(args)
if(args.force_cpu):
use_cuda(False)
print("WARNING: Forced CPU usage, expect model to perform slower")
print("")
os.makedirs(args.output_dir, exist_ok=True)
# Grabbing dataset if needed
_, _, dataset = create_epiano_datasets(args.midi_root, args.num_prime, random_seq=False)
# Can be None, an integer index to dataset, or a file path
if(args.primer_file is None):
f = str(random.randrange(len(dataset)))
else:
f = args.primer_file
if(f.isdigit()):
idx = int(f)
primer, _ = dataset[idx]
primer = primer.to(get_device())
print("Using primer index:", idx, "(", dataset.data_files[idx], ")")
else:
raw_mid = encode_midi(f)
if(len(raw_mid) == 0):
print("Error: No midi messages in primer file:", f)
return
primer, _ = process_midi(raw_mid, args.num_prime, random_seq=False)
primer = torch.tensor(primer, dtype=TORCH_LABEL_TYPE, device=get_device())
print("Using primer file:", f)
model = MusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads,
d_model=args.d_model, dim_feedforward=args.dim_feedforward,
max_sequence=args.max_sequence, rpr=args.rpr).to(get_device())
model.load_state_dict(torch.load(args.model_weights))
# Saving primer first
f_path = os.path.join(args.output_dir, "primer")
#decode_midi(primer[:args.num_prime].cpu().numpy(), file_path=f_path)
x = primer[:args.num_prime].cpu().numpy()
y = x.tolist()
z = TMIDI.Tegridy_INT_to_TXT_Converter(y)
SONG = TMIDI.Tegridy_Reduced_TXT_to_Notes_Converter(z, has_MIDI_channels=False, has_velocities=False)
stats = TMIDI.Tegridy_SONG_to_MIDI_Converter(SONG=SONG[0], output_file_name=f_path)
# GENERATION
model.eval()
with torch.set_grad_enabled(False):
if(args.beam > 0):
print("BEAM:", args.beam)
beam_seq = model.generate(primer[:args.num_prime], args.target_seq_length, beam=args.beam)
f_path = os.path.join(args.output_dir, "beam")
decode_midi(beam_seq[0].cpu().numpy(), file_path=f_path)
x = beam_seq[0].cpu().numpy()
y = x.tolist()
z = TMIDI.Tegridy_INT_to_TXT_Converter(y)
SONG, song_name = TMIDI.Tegridy_Optimus_TXT_to_Notes_Converter(z,
has_MIDI_channels=False,
simulate_velocity=False,
char_encoding_offset=33,
save_only_first_composition=True,
dataset_MIDI_events_time_denominator=10,
has_velocities=True
)
stats = TMIDI.Tegridy_SONG_to_MIDI_Converter(SONG=SONG, output_file_name=f_path)
print(stats)
else:
print("RAND DIST")
rand_seq = model.generate(primer[:args.num_prime], args.target_seq_length, beam=0)
f_path = os.path.join(args.output_dir, "rand")
#decode_midi(rand_seq[0].cpu().numpy(), file_path=f_path)
#print('Seq =', rand_seq[0].cpu().numpy())
x = rand_seq[0].cpu().numpy()
y = x.tolist()
z = TMIDI.Tegridy_INT_to_TXT_Converter(y)
#SONG = TMIDI.Tegridy_Reduced_TXT_to_Notes_Converter(z, has_MIDI_channels=False, has_velocities=False)
SONG, song_name = TMIDI.Tegridy_Optimus_TXT_to_Notes_Converter(z,
has_MIDI_channels=False,
simulate_velocity=False,
char_encoding_offset=33,
save_only_first_composition=True,
dataset_MIDI_events_time_denominator=10,
has_velocities=True
)
stats = TMIDI.Tegridy_SONG_to_MIDI_Converter(SONG=SONG, output_file_name=f_path)
print(stats) | 34,706 |
def dump_pb(dir_or_filename):
"""Dump the data from either a single .pb file, or all .pb files in a directory.
All files must contain a serialized TensorProto."""
if os.path.isdir(dir_or_filename):
for f in glob.glob(os.path.join(dir_or_filename, "*.pb")):
print(f)
dump_tensorproto_pb_file(f)
else:
dump_tensorproto_pb_file(dir_or_filename) | 34,707 |
def get_wh_words(document: Union[Doc, Span]):
"""
Get the list of WH-words\n
- when, where, why\n
- whence, whereby, wherein, whereupon\n
- how\n
- what, which, whose\n
- who, whose, which, what\n
Resources:\n
- https://grammar.collinsdictionary.com/easy-learning/wh-words\n
- https://www.ling.upenn.edu/hist-corpora/annotation/pos-wh.htm
:param document: The parsed document
:return: The list of WH-words
"""
return list([token for token in document if token.tag_ in ['WDT', 'WP', 'WP$', 'WRB']]) | 34,708 |
def sample_mixture_gaussian(batch_size, p_array, mu_list, sig_list, k=K, d=DIM):
"""
samples from a mixture of normals
:param batch_size: sample size
:param p_array: np array which includes probability for each component of mix
:param mu_list: list of means of each component
:param sig_list: list of covariance matrices of each component
:return: samples from mixture
"""
if hasattr(mu_list[0], "__len__"):
d = len(mu_list[0]) # dimension of distribution
else:
d = 1
k = len(mu_list) # number of mixtures
dataset = np.zeros([batch_size, d])
rh = np.random.choice(range(k), p=p_array, size=batch_size)
for i in range(batch_size):
if d > 1:
dataset[i, :] = np.random.multivariate_normal(mean=mu_list[rh[i]], cov=sig_list[rh[i]])
else:
dataset[i, :] = np.random.randn() * sig_list[rh[i]] + mu_list[rh[i]]
return dataset | 34,709 |
def sign(x: float) -> float:
"""Return the sign of the argument. Zero returns zero."""
if x > 0:
return 1.0
elif x < 0:
return -1.0
else:
return 0.0 | 34,710 |
def profile_genome_plot(bar_width,l_genome,allinsertionsites_list,allcounts_binnedlist,summed_chr_length_dict,
middle_chr_position,chrom_list,variable,genes_currentchrom_pos_list,gene_pos_dict):
"""Plot function to show the whole insertion map throughout the genome
Parameters
----------
bar_width : int
The width for the histogram of the plot, by default None , which means internally the length of the genome over 1000
l_genome : int
The length of the genome in bp
allinsertionsites_list : list
List of insertions sites
allcounts_binnedlist : list
List of binned counts
summed_chr_length_dict : dict
The cumulative sum of the length of every chromosome
middle_chr_position : dict
Middle chromosome position per chromosome
chrom_list : list
A list of all the chromosomes
variable : str
It could be "transposons" or "reads"
genes_currentchrom_pos_list : list
List of genes per chromosome
gene_pos_dict : dict
Postion along the genome of every gene
"""
_,essential_file,_=load_default_files()
genes_essential_list=list_known_essentials(essential_file)
plt.figure(figsize=(19.0,9.0))
grid = plt.GridSpec(20, 1, wspace=0.0, hspace=0.0)
textsize = 12
textcolor = "#000000"
barcolor= "#333333"
chrom_color=(0.9,0.9,0.9,1.0)
essential_face_color="#00F28E"
non_essential_face_color="#F20064"
alpha=0.8
binsize = bar_width
ax = plt.subplot(grid[0:19,0])
ax.grid(False)
ax.tick_params(axis='x', which='major', pad=30)
axc = plt.subplot(grid[19,0])
axc.set_xlim(0,l_genome)
axc.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
axc.tick_params(
axis='y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
left=False, # ticks along the bottom edge are off
right=False, # ticks along the top edge are off
labelleft=False) # labels along the bottom edge are off
ax.set_xlim(0,l_genome)
# bar lines
ax.bar(allinsertionsites_list,allcounts_binnedlist,width=binsize,color=barcolor)
# chromosome lines
for chrom in summed_chr_length_dict:
ax.axvline(x = summed_chr_length_dict.get(chrom), linestyle='-', color=chrom_color)
ax.set_xticks(middle_chr_position)
ax.set_xticklabels(chrom_list, fontsize=textsize)
# Axis labels
if variable == "transposons":
ax.set_ylabel('Transposon Count', fontsize=textsize, color=textcolor)
elif variable == "reads":
ax.set_ylabel('Read Count', fontsize=textsize, color=textcolor)
# colored bars in the bottom
for gene in genes_currentchrom_pos_list:
if not gene_pos_dict.get(gene)[0] == 'Mito':
gene_start_pos = summed_chr_length_dict.get(gene_pos_dict.get(gene)[0]) + int(gene_pos_dict.get(gene)[1])
gene_end_pos = summed_chr_length_dict.get(gene_pos_dict.get(gene)[0]) + int(gene_pos_dict.get(gene)[2])
if gene in genes_essential_list:
axc.axvspan(gene_start_pos,gene_end_pos,facecolor=essential_face_color,alpha=alpha)
else:
axc.axvspan(gene_start_pos,gene_end_pos,facecolor=non_essential_face_color,alpha=alpha) | 34,711 |
def CalculateLocalDipoleIndex(mol):
"""
Calculation of local dipole index (D)
"""
GMCharge.ComputeGasteigerCharges(mol, iter_step)
res = []
for atom in mol.GetAtoms():
res.append(float(atom.GetProp('_GasteigerCharge')))
cc = [numpy.absolute(res[x.GetBeginAtom().GetIdx()] - res[x.GetEndAtom().GetIdx()]) for x in mol.GetBonds()]
B = len(mol.GetBonds())
return round(sum(cc) / B, 3) | 34,712 |
def controller_login_fixture(event_loop, auth_token, controller_json):
"""Return an aresponses server for an authenticated remote client."""
client = aresponses.ResponsesMockServer(loop=event_loop)
client.add(
TEST_HOST,
"/v1/authenticate",
"post",
aresponses.Response(
text=json.dumps(
{
"data": {
"token": "login_token",
"refresh_token": "login_refresh_token",
"user_id": "login_userid",
}
}
),
status=200,
),
)
client.add(
TEST_HOST,
"/v1/controllers",
"GET",
aresponses.Response(status=200, text=json.dumps(controller_json)),
)
client.add(
TEST_HOST,
"/v1/controllers/1/history",
"GET",
aresponses.Response(
status=200,
text=json.dumps(
{"data": [{"dummy": "history"}], "meta": {"page": 1, "count": 1}}
),
),
)
yield client
event_loop.run_until_complete(event_loop.shutdown_asyncgens())
event_loop.close() | 34,713 |
def tile1(icon="", **kw):
"""<!-- Tile with icon, icon can be font icon or image -->"""
ctx=[kw['tile_label']]
s = span(cls="icon %s" % icon)
ctx.append(s)
d2 = div(ctx=ctx, cls="tile-content iconic")
return d2 | 34,714 |
def config_server(sender_email:str, sender_autorization_code:str, smtp_host: Optional[str] = None, smtp_port: Optional[int] = None, timeout=10):
"""
smtp server configuration
:param sender_email: sender's email
:param sender_autorization_code: sender's smtp authorization code
:param smtp_host: smtp host address
:param smtp_port: smtp host port
:param timeout: timeout
:return: smtp server object
"""
assert isinstance(sender_email, str), "sender_email should be given a string"
assert isinstance(sender_autorization_code, str), "sender_authorization_code should be given a string"
s = server(sender_email, sender_autorization_code, smtp_host=smtp_host, smtp_port=smtp_port, timeout=timeout)
if s.smtp_able():
print("server config success")
return s
else:
raise SMTPConfigException | 34,715 |
def test_Conv3DTranspose_dilation_2_9_10_11_12():
"""
api: paddle.Conv3DTranspose
op version: 9,10,11,12
"""
op = Net(in_channels=16, out_channels=16, dilation=3)
op.eval()
# net, name, ver_list, delta=1e-6, rtol=1e-5
obj = APIOnnx(op, 'nn_Conv3DTranspose', [9, 10, 11, 12])
obj.set_input_data(
"input_data",
paddle.to_tensor(
randtool("float", -1, 1, [3, 16, 10, 10, 10]).astype('float32')))
obj.run() | 34,716 |
def try_to_import_file(file_name):
"""
Tries to import the file as Python module. First calls
import_file_as_package() and falls back to import_file_as_module(). If
fails, keeps silent on any errors and returns the occured exceptions.
:param file_name: The path to import.
:return: The loaded module or tuple of length 2 with the exceptions.
"""
try:
return import_file_as_package(file_name)
except Exception as e1:
try:
return import_file_as_module(file_name)
except Exception as e2:
return e1, e2 | 34,717 |
def main():
"""Run main function."""
txt = []
for name, help_diag in sorted(_iterhelps()):
txt.append(f"*{name}*")
txt.append("```")
txt.append(help_diag)
txt.append("```")
txt.append("---")
fmt = "\n".join(txt)
with README.open("r+") as fileh:
data = fileh.read()
fileh.seek(0)
data = data.replace("__CLI_COMMANDS__", fmt)
fileh.write(data)
fileh.flush() | 34,718 |
def is_uppervowel(char: str) -> bool:
"""
Checks if the character is an uppercase Irish vowel (aeiouáéíóú).
:param char: the character to check
:return: true if the input is a single character, is uppercase, and is an Irish vowel
"""
vowels = "AEIOUÁÉÍÓÚ"
return len(char) == 1 and char[0] in vowels | 34,719 |
def _resolve_command(lexer, match) -> Iterator[Tuple[int, Token, str]]:
"""Pygments lexer callback for determining if a command is valid.
Yielded values take the form: (index, tokentype, value).
See:
https://pygments.org/docs/lexerdevelopment/#callbacks
"""
command_engine: CommandEngine = lexer.app.command_engine
command_name = match.group(0)
if command_name.strip() in command_engine.keys():
token_type = Name.RealCommand
else:
token_type = Name.NonexistentCommand
yield match.start(), token_type, command_name | 34,720 |
def onerror(func, path, exc_info):
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
import stat
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise | 34,721 |
async def test_incorrect_modes(
hass: HomeAssistant,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test incorrect values are handled correctly."""
with patch(
"pyatag.entities.Climate.hvac_mode",
new_callable=PropertyMock(return_value="bug"),
):
await init_integration(hass, aioclient_mock)
assert hass.states.get(CLIMATE_ID).state == STATE_UNKNOWN | 34,722 |
def visualize_permutation_results(
obs_r2: float,
permuted_r2: np.ndarray,
verbose: bool = True,
permutation_color: str = "#a6bddb",
output_path: Optional[str] = None,
show: bool = True,
close: bool = False,
) -> float:
"""
Parameters
----------
obs_r2 : float
Denotes the r2 value obtained using `x2_array` to predict `x1_array`,
given `z_array` if it was not None.
permuted_r2 : 1D np.ndarray
Should have length `num_permutations`. Each element denotes the r2
attained using a permuted version of `x2_array` to predict `x1_array`,
given `z_array` if it was not None.
verbose : optional, bool.
Denotes whether or not the p-value of the permutation test will be
printed to the stdout. Default == True.
permutation_color : optional, str.
Denotes the color of the kernel density estimate used to visuale the
distribution of r2 from the permuted values of `x2_array`.
Default == '#a6bddb'.
output_path : optional, str or None.
Denotes the path to the location where the plot visualizing the
permutation test results will be stored. If `output_path` is None, the
plot will not be stored. Default is None.
show : optional, bool.
Denotes whether the matplotlib figure that visualizes the results of
the permutation test should be shown. Default == True.
close : optional, bool.
Denotes whether the matplotlib figure that visualizes the results of
the permutation test should be closed. Default == False.
Returns
-------
p_value : float.
The p-value of the visual permutation test, denoting the percentage of
times that the r2 with permuted `x2_array` was greater than the r2 with
the observed `x2_array`.
"""
fig, ax = plt.subplots(figsize=(10, 6))
p_value = (obs_r2 < permuted_r2).mean()
if verbose:
msg = "The p-value of the permutation independence test is {:.2f}."
print(msg.format(p_value))
sbn.kdeplot(permuted_r2, ax=ax, color=permutation_color, label="Simulated")
v_line_label = "Observed\np-val: {:0.3f}".format( # noqa: F522
p_value, precision=1
)
ax.vlines(
obs_r2,
ax.get_ylim()[0],
ax.get_ylim()[1],
linestyle="dashed",
color="black",
label=v_line_label,
)
ax.set_xlabel(r"$r^2$", fontsize=13)
ax.set_ylabel(
"Density", fontdict={"fontsize": 13, "rotation": 0}, labelpad=40
)
ax.legend(loc="best")
sbn.despine()
if output_path is not None:
fig.savefig(output_path, dpi=500, bbox_inches="tight")
if show:
plt.show()
if close:
plt.close(fig=fig)
return p_value | 34,723 |
def htmlmovie(html_index_fname,pngfile,framenos,figno):
#=====================================
"""
Input:
pngfile: a dictionary indexed by (frameno,figno) with value the
corresponding png file for this figure.
framenos: a list of frame numbers to include in movie
figno: integer with the figure number for this movie.
Returns:
text for an html file that incorporates javascript to loop through the
plots one after another.
New 6/7/10: The html page also has buttons for controlling the movie.
The parameter iterval below is the time interval between loading
successive images and is in milliseconds.
The img_width and img_height parameters do not seem to have any effect.
"""
text = """
<html>
<head>
<script language="Javascript">
<!---
var num_images = %s; """ % len(framenos)
text += """
var img_width = 800;
var img_height = 600;
var interval = 300;
var images = new Array();
function preload_images()
{
t = document.getElementById("progress");
"""
i = 0
for frameno in framenos:
i = i+1
text += """
t.innerHTML = "Preloading image ";
images[%s] = new Image(img_width, img_height);
images[%s].src = "%s";
""" % (i,i,pngfile[frameno,figno])
text += """
t.innerHTML = "";
}
function tick()
{
frame += 1;
if (frame > num_images+1)
frame = 1;
document.movie.src = images[frame].src;
tt = setTimeout("tick()", interval);
}
function startup()
{
preload_images();
frame = 1;
document.movie.src = images[frame].src;
}
function rewind()
{
frame = 1;
document.movie.src = images[frame].src;
}
function start()
{
tt = setTimeout("tick()", interval);
}
function pause()
{
clearTimeout(tt);
}
function restart()
{
tt = setTimeout("tick()", interval);
}
function slower()
{
interval = interval / 0.7;
}
function faster()
{
interval = interval * 0.7;
}
// --->
</script>
</head>
<body onLoad="startup();">
<form>
<input type="button" value="Start movie" onClick="start()">
<input type="button" value="Pause" onClick="pause()">
<input type="button" value="Rewind" onClick="rewind()">
<input type="button" value="Slower" onClick="slower()">
<input type="button" value="Faster" onClick="faster()">
<a href="%s">Plot Index</a>
</form>
<p><div ID="progress"></div></p>
<img src="%s" name="movie"/>
</body>
</html>
""" % (html_index_fname,pngfile[framenos[0],figno])
return text
# end of htmlmovie | 34,724 |
def readin_q3d_matrix_m(path: str) -> pd.DataFrame:
"""Read in Q3D cap matrix from a .m file exported by Ansys Q3d.
Args:
path (str): Path to .m file
Returns:
pd.DataFrame of cap matrix, with no names of columns.
"""
text = Path(path).read_text()
match = re.findall(r'capMatrix (.*?)]', text, re.DOTALL)
if match:
match = match[0].strip('= [').strip(']').strip('\n')
dfC = pd.read_csv(io.StringIO(match),
skipinitialspace=True,
header=None)
return dfC | 34,725 |
def get_DB(type='mysql'):
"""
Parameters
----------
type
Returns
-------
"""
if type == 'mysql':
return MySQLAdapter
elif type == 'mongodb':
return MongoAdapter | 34,726 |
def movstd(x,window):
""" Computes the moving standard deviation for a 1D array. Returns
an array with the same length of the input array.
Small window length provides a finer description of deviation
Longer window coarser (faster to compute).
By default, each segment is centered, going L/2 to L/2-1 around Ai.
Parameters
----------
x : input numpy array (1D)
window : integer for the evaluation window,
Returns
-------
1d vector of standard deviations
"""
if not type(x)==np.ndarray:
x=np.array(x)
if window%2:
window=window-1
win2 = np.floor(window/2)
N=len(x)
y=np.full(N,medx)
for ii in np.arange(win2,N-win2+1,window):
try:
idx=(np.arange(-win2,win2)+ii).astype(np.int)
y[idx] = np.nanstd(x[idx])
except:
pass
return y | 34,727 |
def test_load_dict(testdata_dir, tmp_trestle_dir):
"""Test loading of distributed dict."""
# prepare trestle project dir with the file
test_utils.ensure_trestle_config_dir(tmp_trestle_dir)
test_data_source = testdata_dir / 'split_merge/step4_split_groups_array/catalogs'
catalogs_dir = Path('catalogs/')
mycatalog_dir = catalogs_dir / 'mycatalog'
catalog_dir = mycatalog_dir / 'catalog'
# Copy files from test/data/split_merge/step4
shutil.rmtree(catalogs_dir)
shutil.copytree(test_data_source, catalogs_dir)
actual_model_type, actual_model_alias, actual_model_instance = _load_dict(
catalog_dir / 'metadata/responsible-parties')
expexted_model_instance = {
'contact': ResponsibleParty.oscal_read(
catalog_dir / 'metadata/responsible-parties/contact__responsible-party.json'
),
'creator': ResponsibleParty.oscal_read(
catalog_dir / 'metadata/responsible-parties/creator__responsible-party.json'
)
}
assert len(list(dictdiffer.diff(actual_model_instance, expexted_model_instance))) == 0
assert actual_model_alias == 'catalog.metadata.responsible-parties'
expected_model_type, _ = fs.get_contextual_model_type((catalog_dir / 'metadata/responsible-parties/').absolute())
assert actual_model_type.__fields__['__root__'].outer_type_ == expected_model_type | 34,728 |
def get_attribute(parent, selector, attribute, index=0):
"""Get the attribute value for the child element of parent matching the given CSS selector
If index is specified, return the attribute value for the matching child element with the specified zero-based index; otherwise, return the attribute value for the first matching child element.
If selector is None, return the attribute value for parent instead.
"""
if selector is None:
return parent.get(attribute)
else:
values = get_attributes(parent, selector, attribute)
if (index < 0 and len(values) >= abs(index)) or (index >= 0 and len(values) > index):
return values[index] | 34,729 |
def rdd_plot(
data, x_variable, y_variable, nbins=20, ylimits=None, frac=None, width=20.1, deg=1
):
""" Plots a Regression Discontinouity Design graph. For this, binned
observations are portrayed in a scatter plot.
Uses non-parametric regression (local polynomial estimation) to fit a curve
on the original observations.
Args:
data: contains DataFrame that contains the data to plot (DataFrame)
x_var: determines variable on x-axis, passed as the column name (string)
y_var: determines variable on y_axis, passed as the column name (string)
nbins: defines number of equally sized bins (int)
ylimits: A tuple specifying the limits of the y axis (tuple)
width: Bandwidth for the local polynomial estimation
deg: degree of the polynomial to be estimated
Returns:
Returns the RDD Plot
"""
# Find min and max of the running variable
x_var, y_var = data.loc[:, x_variable], data.loc[:, y_variable]
x_min = int(round(x_var.min()))
x_max = int(round(x_var.max()))
x_width = int(round(((abs(x_min) + abs(x_max)) / nbins)))
# Get a list of t uples with borders of each bin.
bins = []
for b in range(x_min, x_max, x_width):
bins.append((b, b + x_width))
# Find bin for a given value
def find_bin(value, bins):
for count, b in enumerate(bins):
# Bins generally follow the structure [lower_bound, upper_bound),
# thus do not include the upper bound.
if count < len(bins) - 1:
if (value >= bins[count][0]) & (value < bins[count][1]):
bin_number = count
# The last bin, however, includes its upper bound.
else:
if (value >= bins[count][0]) & (value <= bins[count][1]):
bin_number = count
return bin_number
# Sort running data into bins
x_bin = np.zeros(len(x_var))
i = 0
for value in x_var.values:
x_bin[i] = find_bin(value, bins)
i += 1
# Write data needed for the plot into a DataFrame
df = pd.DataFrame(data={"x_variable": x_var, "y_variable": y_var, "x_bin": x_bin})
# For each bin calculate the mean of affiliated values on the y-axis.
y_bin_mean = np.zeros(len(bins))
for n, b in enumerate(bins):
affiliated_y_values = df.loc[x_bin == n]
y_bin_mean[n] = affiliated_y_values.y_variable.mean()
# For the x-axis take the mean of the bounds of each bin.
x_bin_mean = np.zeros(len(bins))
i = 0
for e, t in enumerate(bins):
x_bin_mean[i] = (bins[e][0] + bins[e][1]) / 2
i += 1
# Draw the actual plot for all bins of the running variable and their
# affiliated mean in the y-variable.
plt.scatter(x=x_bin_mean, y=y_bin_mean, s=50, c="black", alpha=1)
plt.axvline(x=0)
if ~(ylimits == None):
plt.ylim(ylimits)
plt.grid()
# Implement local polynomial regression, calculate fitted values as well as
# estimated betas.
# This is estimated seperatly for the untreadted state 0 and the treated state 1
df0 = pd.DataFrame(
data={
"x0": data.loc[data[x_variable] < 0][x_variable],
"y0": data.loc[data[x_variable] < 0][y_variable],
}
).sort_values(by="x0")
df0["y0_hat"] = localreg(
x=df0["x0"].to_numpy(),
y=df0["y0"].to_numpy(),
degree=deg,
kernel=tricube,
frac=frac,
width=width,
)["y"]
for i in range(deg + 1):
df0["beta_hat_" + str(i)] = localreg(
x=df0["x0"].to_numpy(),
y=df0["y0"].to_numpy(),
degree=deg,
kernel=tricube,
frac=frac,
width=width,
)["beta"][:, i]
df1 = pd.DataFrame(
data={
"x1": data.loc[data[x_variable] > 0][x_variable],
"y1": data.loc[data[x_variable] > 0][y_variable],
}
).sort_values(by="x1")
df1["y1_hat"] = localreg(
x=df1["x1"].to_numpy(),
y=df1["y1"].to_numpy(),
degree=deg,
kernel=tricube,
frac=frac,
width=width,
)["y"]
for i in range(deg + 1):
df1["beta_hat_" + str(i)] = localreg(
x=df1["x1"].to_numpy(),
y=df1["y1"].to_numpy(),
degree=deg,
kernel=tricube,
frac=frac,
width=width,
)["beta"][:, i]
# Calculate local standard errors
y0_se = local_se(df=df0, kernel=tricube, deg=deg, width=width)
y1_se = local_se(df=df1, kernel=tricube, deg=deg, width=width)
# Calculate confidence intervals
# TODO: This certainly would be faster if I would not use dictionaries!
y0_upper_ci = np.empty(len(df0["y0"]))
y0_lower_ci = np.empty(len(df0["y0"]))
y1_upper_ci = np.empty(len(df1["y1"]))
y1_lower_ci = np.empty(len(df1["y1"]))
for count, element in enumerate(df0["x0"].array):
y0_upper_ci[count] = df0["y0_hat"].iloc[count] + 1.96 * y0_se[str(element)]
for count, element in enumerate(df0["x0"].array):
y0_lower_ci[count] = df0["y0_hat"].iloc[count] - 1.96 * y0_se[str(element)]
for count, element in enumerate(df1["x1"].array):
y1_upper_ci[count] = df1["y1_hat"].iloc[count] + 1.96 * y1_se[str(element)]
for count, element in enumerate(df1["x1"].array):
y1_lower_ci[count] = df1["y1_hat"].iloc[count] - 1.96 * y1_se[str(element)]
# Plot the RDD-Graph
# fittet lines
plt.plot(df0.x0, df0.y0_hat, color="r")
plt.plot(df1.x1, df1.y1_hat, color="r")
plt.plot(df0.x0, y0_upper_ci, color="black")
plt.plot(df0.x0, y0_lower_ci, color="black")
plt.plot(df1.x1, y1_upper_ci, color="black")
plt.plot(df1.x1, y1_lower_ci, color="black")
# Plot the RDD-Graph
# fittet lines
plt.plot(df0.x0, df0.y0_hat, color="r")
plt.plot(df1.x1, df1.y1_hat, color="r")
# labels
plt.title(label="Figure 5: Regression Discontinuity Design Plot")
plt.xlabel("Binned margin of victory")
plt.ylabel("Normalized rank improvement")
plt.show
return | 34,730 |
def collect() -> None:
"""运行一次垃圾回收。"""
... | 34,731 |
def cal_head_bbox(kps, image_size):
"""
Args:
kps (torch.Tensor): (N, 19, 2)
image_size (int):
Returns:
bbox (torch.Tensor): (N, 4)
"""
NECK_IDS = 12 # in cocoplus
kps = (kps + 1) / 2.0
necks = kps[:, NECK_IDS, 0]
zeros = torch.zeros_like(necks)
ones = torch.ones_like(necks)
# min_x = int(max(0.0, np.min(kps[HEAD_IDS:, 0]) - 0.1) * image_size)
min_x, _ = torch.min(kps[:, NECK_IDS:, 0] - 0.05, dim=1)
min_x = torch.max(min_x, zeros)
max_x, _ = torch.max(kps[:, NECK_IDS:, 0] + 0.05, dim=1)
max_x = torch.min(max_x, ones)
# min_x = int(max(0.0, np.min(kps[HEAD_IDS:, 0]) - 0.1) * image_size)
min_y, _ = torch.min(kps[:, NECK_IDS:, 1] - 0.05, dim=1)
min_y = torch.max(min_y, zeros)
max_y, _ = torch.max(kps[:, NECK_IDS:, 1], dim=1)
max_y = torch.min(max_y, ones)
min_x = (min_x * image_size).long() # (T,)
max_x = (max_x * image_size).long() # (T,)
min_y = (min_y * image_size).long() # (T,)
max_y = (max_y * image_size).long() # (T,)
rects = torch.stack((min_x, max_x, min_y, max_y), dim=1)
return rects | 34,732 |
def my_json_render(docs, style="dep", options=None, manual=False) -> list:
"""
Render nlp visualisation.
Args:
docs (list or Doc): Document(s) to visualise.
style (unicode): Visualisation style, 'dep' or 'ent'.
options (dict): Visualiser-specific options, e.g. colors.
manual (bool): Don't parse `Doc` and
instead expect a dict/list of dicts.
Returns:
[{'text': '近一周饮食不当,一度腹泻,日3次,泻下后精神疲烦,时有低热,怕风,口干,痰中夹有血丝,左侧胸痛时作',
'ents': [{'start': 20, 'end': 24, 'label': 'ZZ'},
{'start': 25, 'end': 27, 'label': 'CD'},
{'start': 27, 'end': 29, 'label': 'ZZ'},
{'start': 30, 'end': 32, 'label': 'ZZ'},
{'start': 33, 'end': 35, 'label': 'ZZ'},
{'start': 36, 'end': 42, 'label': 'ZZ'}],
'title': None, 'settings': {'lang': 'zh', 'direction': 'ltr'}}]
"""
if options is None:
options = {}
factories = {
"dep": (DependencyRenderer, parse_deps),
"ent": (EntityRenderer, parse_ents),
}
if style not in factories:
raise ValueError(Errors.E087.format(style=style))
if isinstance(docs, (Doc, Span, dict)):
docs = [docs]
docs = [obj if not isinstance(obj, Span) else obj.as_doc() for obj in docs]
if not all(isinstance(obj, (Doc, Span, dict)) for obj in docs):
raise ValueError(Errors.E096)
renderer, converter = factories[style]
renderer = renderer(options=options)
parsed = [converter(doc, options) for doc in docs] if not manual else docs
return parsed | 34,733 |
def draw_raw_data_first_try(files_location):
""" first try analyze raw data - drawings from Magda"""
import ast
import matplotlib.pyplot as plt
test_raw = pd.read_csv(files_location + "/test_raw.csv", index_col="key_id", nrows=100)
# first 10 drawings
first_ten_ids = test_raw.iloc[:10].index
raw_images = [ast.literal_eval(lst) for lst in test_raw.loc[first_ten_ids, 'drawing']]
for i in range(len(raw_images)):
for x, y, t in raw_images[i]:
plt.figure(figsize=(2, 1))
plt.subplot(1, 2, 1)
plt.plot(x, y, marker=".")
plt.gca().invert_yaxis() | 34,734 |
def org_office_duplicate(job):
"""
This callback will be called when importing office records it will look
to see if the record being imported is a duplicate.
@param job: An S3ImportJob object which includes all the details
of the record being imported
If the record is a duplicate then it will set the job method to update
Rules for finding a duplicate:
- Look for a record with the same name, ignoring case
- and the same location, if provided
"""
# ignore this processing if the id is set
if job.id:
return
if job.tablename == "org_office":
table = job.table
if "name" in job.data:
name = job.data.name
else:
return
#query = table.name.lower().like('%%%s%%' % name.lower())
query = (table.name.lower() == name.lower())
if "location_id" in job.data:
location_id = job.data.location_id
query = query & \
(table.location_id == location_id)
_duplicate = db(query).select(table.id,
limitby=(0, 1)).first()
if _duplicate:
job.id = _duplicate.id
job.data.id = _duplicate.id
job.method = job.METHOD.UPDATE | 34,735 |
def get_testcase_chain(testcase_id, case_type, chain_list=None, with_intf_system_name=None, with_extract=None,
only_first=False, main_case_flow_id=None, childless=False):
"""
根据testcase_id获取调用链, 包含接口用例和全链路用例
return example:
[
{
"preCaseId": 1,
"preCaseName": "指定手机获取验证码",
"preCaseType": "接口用例",
"preIntfName": "接口描述-/url/api"
},
{
"preCaseId": 27,
"preCaseName": "新户申请钱包",
"preCaseType": "全链路用例"
},
{
"preCaseId": 2,
"preCaseName": "登录",
"preCaseType": "接口用例"
}
]
"""
if not chain_list:
chain_list = []
# 调用链最大长度保护
if len(chain_list) >= 100:
return chain_list
if case_type == 1:
tc_obj = ApiTestcaseInfoManager.get_testcase(id=testcase_id)
if tc_obj:
if with_intf_system_name:
intf_obj = ApiIntfInfoManager.get_intf(id=tc_obj.api_intf_id)
system_obj = ApiSystemInfoManager.get_system(id=intf_obj.api_system_id)
chain_row_dic = {
"preCaseName": '{0}__{1}'.format(tc_obj.testcase_name, tc_obj.expect_result),
"preCaseId": tc_obj.id,
"preCaseType": get_desc_by_case_type(case_type),
"preIntfName": '{0}-{1}'.format(intf_obj.intf_desc, intf_obj.intf_name),
"preSystemName": system_obj.system_name
}
if with_extract:
# 解析出用例中提取变量
extract_v_names = get_extract_v_names(testcase_id)
public_v_names = get_public_v_names(tc_obj)
chain_row_dic.update({"extract_v_names": extract_v_names, "public_v_names": public_v_names})
chain_list.insert(0, chain_row_dic)
else:
chain_row_dic = {
"preCaseName": '{0}__{1}'.format(tc_obj.testcase_name, tc_obj.expect_result),
"preCaseId": tc_obj.id,
"preCaseType": get_desc_by_case_type(case_type),
}
if with_extract:
# 解析出用例中提取变量
extract_v_names = get_extract_v_names(testcase_id)
public_v_names = get_public_v_names(tc_obj)
chain_row_dic.update({"extract_v_names": extract_v_names, "public_v_names": public_v_names})
chain_list.insert(0, chain_row_dic)
if childless:
chain_list[0]['hasChildren'] = False
return chain_list
setup_case_list = json.loads(tc_obj.setup_case_list) if tc_obj.setup_case_list else []
setup_case_list.reverse()
if setup_case_list:
if only_first:
chain_list[0]['hasChildren'] = True
return chain_list
else:
# 继续递归查询前置
for setup_case_str in setup_case_list:
setup_case_type, setup_case_id, option = parse_setup_case_str(setup_case_str)
kwargs = {
'chain_list': chain_list,
'with_intf_system_name': with_intf_system_name,
'with_extract': with_extract
}
if setup_case_type == 1:
if option == 'self':
kwargs['childless'] = True
elif setup_case_type == 2:
kwargs['main_case_flow_id'] = option
chain_list = get_testcase_chain(setup_case_id, setup_case_type, **kwargs)
# setup_case_type, setup_case_id, setup_case_flow_id = parse_setup_case_str(setup_case_str)
# chain_list = get_testcase_chain(
# setup_case_id, setup_case_type, chain_list=chain_list,
# with_intf_system_name=with_intf_system_name, with_extract=with_extract,
# main_case_flow_id=setup_case_flow_id
# )
else:
if only_first:
chain_list[0]['hasChildren'] = False
return chain_list
return chain_list
elif case_type == 2:
tm_obj = ApiTestcaseMainManager.get_testcase_main(id=testcase_id)
if tm_obj:
chain_list.insert(
0,
{
"preCaseName": '{0}__{1}'.format(tm_obj.testcase_name, tm_obj.expect_result),
"preCaseId": tm_obj.id,
"preCaseType": get_desc_by_case_type(case_type),
"preIntfName": '',
"preSystemName": '',
"customFlowId": None,
"customFlowName": ''
}
)
if only_first:
chain_list[0]['hasChildren'] = False
if main_case_flow_id:
flow_obj = ApiTestcaseMainCustomFlowManager.get_flow(id=main_case_flow_id)
if flow_obj:
chain_list[0]['customFlowName'] = flow_obj.flow_name
chain_list[0]['customFlowId'] = flow_obj.id
return chain_list | 34,736 |
def approximate_parameter_profile(
problem: Problem,
result: Result,
profile_index: Iterable[int] = None,
profile_list: int = None,
result_index: int = 0,
n_steps: int = 100,
) -> Result:
"""
Calculate profiles based on an approximation via a normal likelihood
centered at the chosen optimal parameter value, with the covariance matrix
being the Hessian or FIM.
Parameters
----------
problem:
The problem to be solved.
result:
A result object to initialize profiling and to append the profiling
results to. For example, one might append more profiling runs to a
previous profile, in order to merge these.
The existence of an optimization result is obligatory.
profile_index:
List with the profile indices to be computed
(by default all of the free parameters).
profile_list:
Integer which specifies whether a call to the profiler should create
a new list of profiles (default) or should be added to a specific
profile list.
result_index:
Index from which optimization result profiling should be started
(default: global optimum, i.e., index = 0).
n_steps:
Number of profile steps in each dimension.
Returns
-------
result:
The profile results are filled into `result.profile_result`.
"""
# Handling defaults
# profiling indices
if profile_index is None:
profile_index = problem.x_free_indices
# create the profile result object (retrieve global optimum) or append to
# existing list of profiles
global_opt = initialize_profile(problem, result, result_index,
profile_index, profile_list)
# extract optimization result
optimizer_result = result.optimize_result.list[result_index]
# extract values of interest
x = optimizer_result.x
fval = optimizer_result.fval
hess = problem.get_reduced_matrix(optimizer_result.hess)
# ratio scaling factor
ratio_scaling = np.exp(global_opt - fval)
# we need the hessian - compute if not provided or fishy
if hess is None or np.isnan(hess).any():
logger.info("Computing Hessian/FIM as not available in result.")
hess = problem.objective(
problem.get_reduced_vector(x), sensi_orders=(2,))
# inverse of the hessian
sigma = np.linalg.inv(hess)
# the steps
xs = np.linspace(problem.lb_full, problem.ub_full, n_steps).T
# loop over parameters for profiling
for i_par in profile_index:
# not requested or fixed -> compute no profile
if i_par in problem.x_fixed_indices:
continue
i_free_par = problem.full_index_to_free_index(i_par)
ys = multivariate_normal.pdf(xs[i_par], mean=x[i_par],
cov=sigma[i_free_par, i_free_par])
fvals = - np.log(ys)
ratios = ys / ys.max() * ratio_scaling
profiler_result = ProfilerResult(
x_path=xs,
fval_path=fvals,
ratio_path=ratios
)
result.profile_result.set_profiler_result(
profiler_result=profiler_result,
i_par=i_par, profile_list=profile_list)
return result | 34,737 |
def _process_input(data, context):
""" pre-process request input before it is sent to
TensorFlow Serving REST API
Args:
data (obj): the request data, in format of dict or string
context (Context): object containing request and configuration details
Returns:
(dict): a JSON-serializable dict that contains request body and headers
"""
if context.request_content_type == 'application/json':
data = data.read().decode("utf-8")
return data if len(data) else ''
raise ValueError('{{"error": "unsupported content type {}"}}'.format(
context.request_content_type or "unknown"
)) | 34,738 |
def update_policies(isamAppliance, name, policies, action, check_mode=False, force=False):
"""
Update a specified policy set's policies (add/remove/set)
Note: Please input policies as an array of policy names (it will be converted to id's)
"""
pol_id, update_required, json_data = _check_policies(isamAppliance, name, policies, action)
if pol_id is None:
from ibmsecurity.appliance.ibmappliance import IBMError
raise IBMError("999", "Cannot update data for unknown policy set: {0}".format(name))
if force is True or update_required is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_put(
"Update a specified policy set",
"{0}/{1}/policies{2}".format(uri, pol_id, tools.create_query_string(action=action)), json_data)
return isamAppliance.create_return_object() | 34,739 |
def is_bullish_engulfing(previous: Candlestick, current: Candlestick) -> bool:
"""Engulfs previous candle body. Wick and tail not included"""
return (
previous.is_bearish
and current.is_bullish
and current.open <= previous.close
and current.close > previous.open
) | 34,740 |
def yaw_cov_to_quaternion_cov(yaw, yaw_covariance):
"""Calculate the quaternion covariance based on the yaw and yaw covariance.
Perform the operation :math:`C_{\\theta} = R C_q R^T`
where :math:`C_{\\theta}` is the yaw covariance,
:math:`C_q` is the quaternion covariance and :math:`R` is
the Jacobian of the transform from yaw to a quaternion.
:math:`R` will be a collumn vector defined by:
.. math::
R = \\\\
\\frac{dx}{d\\theta} &= 0, \\\\
\\frac{dy}{d\\theta} &= 0, \\\\
\\frac{dz}{d\\theta} &= \\frac{1}{2} \\cos \\frac{1}{2} \\theta, \\\\
\\frac{dw}{d\\theta} &= -\\frac{1}{2} \\sin \\frac{1}{2} \\theta, \\\\
:param yaw: Yaw of the vehicle in radians
:type quat: float
:return: The yaw covariance transformed to quaternion coordinates.
:rtype: 4x4 numpy array
"""
R = np.c_[0,
0,
0.5 * math.cos(yaw * 0.5),
-0.5 * math.sin(yaw * 0.5)].T
quat_covariance = R.dot(yaw_covariance).dot(R.T)
return quat_covariance | 34,741 |
async def token(req: web.Request) -> web.Response:
"""Auth endpoint."""
global nonce, user_eppn, user_family_name, user_given_name
id_token = {
"at_hash": "fSi3VUa5i2o2SgY5gPJZgg",
"sub": "smth",
"eduPersonAffiliation": "member;staff",
"eppn": user_eppn,
"displayName": f"{user_given_name} {user_family_name}",
"iss": "http://mockauth:8000",
"schacHomeOrganizationType": "urn:schac:homeOrganizationType:test:other",
"given_name": user_given_name,
"nonce": nonce,
"aud": "aud2",
"acr": "http://mockauth:8000/LoginHaka",
"nsAccountLock": "false",
"eduPersonScopedAffiliation": "staff@test.what;member@test.what",
"auth_time": 1606579533,
"name": f"{user_given_name} {user_family_name}",
"schacHomeOrganization": "test.what",
"exp": 9999999999,
"iat": 1561621913,
"family_name": user_family_name,
"email": user_eppn,
}
data = {"access_token": "test", "id_token": jwt.encode(header, id_token, jwk_pair[1]).decode("utf-8")}
logging.info(data)
return web.json_response(data) | 34,742 |
def split_train_test(observations, train_percentage):
"""Splits observations into a train and test set.
Args:
observations: Observations to split in train and test. They can be the
representation or the observed factors of variation. The shape is
(num_dimensions, num_points) and the split is over the points.
train_percentage: Fraction of observations to be used for training.
Returns:
observations_train: Observations to be used for training.
observations_test: Observations to be used for testing.
"""
num_labelled_samples = observations.shape[1]
num_labelled_samples_train = int(
np.ceil(num_labelled_samples * train_percentage))
num_labelled_samples_test = num_labelled_samples - num_labelled_samples_train
observations_train = observations[:, :num_labelled_samples_train]
observations_test = observations[:, num_labelled_samples_train:]
assert observations_test.shape[1] == num_labelled_samples_test, \
"Wrong size of the test set."
return observations_train, observations_test | 34,743 |
def InitF11(frame):
"""F6 to navigate between regions
:param frame: see InitShorcuts->param
:type frame: idem
:return: entrie(here tuple) for AcceleratorTable
:rtype: tuple(int, int, int)
"""
frame.Bind(wx.EVT_MENU, frame.shell.SetFocus, id=wx.ID_SHELL_FOCUS)
return (wx.ACCEL_NORMAL, wx.WXK_F11, wx.ID_SHELL_FOCUS) | 34,744 |
def ReadExactly(from_stream, num_bytes):
"""Reads exactly num_bytes from a stream."""
pieces = []
bytes_read = 0
while bytes_read < num_bytes:
data = from_stream.read(min(MAX_READ, num_bytes - bytes_read))
bytes_read += len(data)
pieces.append(data)
return ''.join(pieces) | 34,745 |
def indicator_collect(container=None, artifact_ids_include=None, indicator_types_include=None, indicator_types_exclude=None, indicator_tags_include=None, indicator_tags_exclude=None, **kwargs):
"""
Collect all indicators in a container and separate them by data type. Additional output data paths are created for each data type. Artifact scope is ignored.
Args:
container (CEF type: phantom container id): The current container
artifact_ids_include (CEF type: phantom artifact id): Optional parameter to only look for indicator values that occur in the artifacts with these IDs. Must be one of: json serializable list, comma separated integers, or a single integer.
indicator_types_include: Optional parameter to only include indicators with at least one of the provided types in the output. If left empty, all indicator types will be included except those that are explicitly excluded. Accepts a comma-separated list.
indicator_types_exclude: Optional parameter to exclude indicators with any of the provided types from the output. Accepts a comma-separated list.
indicator_tags_include: Optional parameter to only include indicators with at least one of the provided tags in the output. If left empty, tags will be ignored except when they are excluded. Accepts a comma-separated list.
indicator_tags_exclude: Optional parameter to exclude indicators with any of the provided tags from the output. Accepts a comma-separated list.
Returns a JSON-serializable object that implements the configured data paths:
all_indicators.*.cef_key
all_indicators.*.cef_value
all_indicators.*.data_types
all_indicators.*.artifact_id
domain.*.cef_key
domain.*.cef_value (CEF type: domain)
domain.*.artifact_id
file_name.*.cef_key (CEF type: file name)
file_name.*.cef_value (CEF type: file name)
file_name.*.artifact_id
"""
############################ Custom Code Goes Below This Line #################################
import json
import phantom.rules as phantom
from hashlib import sha256
outputs = {'all_indicators': []}
def grouper(seq, size):
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
def get_indicator_json(value_set):
value_list = list(value_set)
indicator_url = phantom.build_phantom_rest_url('indicator') + '?page_size=0&timerange=all'
hashed_list = [sha256(item.encode('utf-8')).hexdigest() for item in value_list]
indicator_dictionary = {}
for group in grouper(hashed_list, 100):
query_url = indicator_url + f'&_filter_value_hash__in={group}'
indicator_response = phantom.requests.get(query_url, verify=False)
indicator_json = indicator_response.json() if indicator_response.status_code == 200 else {}
for data in indicator_json.get('data', []):
indicator_dictionary[data['value_hash']] = data
return indicator_dictionary
def check_numeric_list(input_list):
return (all(isinstance(x, int) for x in input_list) or all(x.isnumeric() for x in input_list))
def is_valid_indicator(list_1=None, list_2=None, check_type="include"):
list_1 = [] if not list_1 else list_1
list_2 = [] if not list_2 else list_2
if check_type == 'exclude':
if list_1 and any(item in list_1 for item in list_2):
return False
elif check_type == 'include':
if list_1 and not any(item in list_1 for item in list_2):
return False
return True
# validate container and get ID
if isinstance(container, dict) and container['id']:
container_dict = container
container_id = container['id']
elif isinstance(container, int):
rest_container = phantom.requests.get(uri=phantom.build_phantom_rest_url('container', container), verify=False).json()
if 'id' not in rest_container:
raise RuntimeError('Failed to find container with id {container}')
container_dict = rest_container
container_id = container
else:
raise TypeError("The input 'container' is neither a container dictionary nor a valid container id, so it cannot be used")
if indicator_types_include:
indicator_types_include = [item.strip(' ') for item in indicator_types_include.split(',')]
if indicator_types_exclude:
indicator_types_exclude = [item.strip(' ') for item in indicator_types_exclude.split(',')]
if indicator_tags_include:
indicator_tags_include = [item.strip(' ').replace(' ', '_') for item in indicator_tags_include.split(',')]
if indicator_tags_exclude:
indicator_tags_exclude = [item.strip(' ').replace(' ', '_') for item in indicator_tags_exclude.split(',')]
if artifact_ids_include:
# Try to convert to a valid list
if isinstance(artifact_ids_include, str) and artifact_ids_include.startswith('[') and artifact_ids_include.endswith(']'):
artifact_ids_include = json.loads(artifact_ids_include)
elif isinstance(artifact_ids_include, str):
artifact_ids_include = artifact_ids_include.replace(' ','').split(',')
elif isinstance(artifact_ids_include, int):
artifact_ids_include = [artifact_ids_include]
# Check validity of list
if isinstance(artifact_ids_include, list) and not check_numeric_list(artifact_ids_include):
raise ValueError(
f"Invalid artifact_ids_include entered: '{artifact_ids_include}'. Must be a list of integers."
)
artifact_ids_include = [int(art_id) for art_id in artifact_ids_include]
indicator_set = set()
# fetch all artifacts in the container
container_artifact_url = phantom.build_phantom_rest_url('artifact')
container_artifact_url += f'?_filter_container={container_id}&page_size=0&include_all_cef_types'
artifacts = phantom.requests.get(container_artifact_url, verify=False).json()['data']
for artifact in artifacts:
artifact_id = artifact['id']
if (artifact_ids_include and artifact_id in artifact_ids_include) or not artifact_ids_include:
for cef_key in artifact['cef']:
cef_value = artifact['cef'][cef_key]
data_types = artifact['cef_types'].get(cef_key, [])
# get indicator details if valid type
if (
(
is_valid_indicator(indicator_types_exclude, data_types, check_type='exclude')
and is_valid_indicator(indicator_types_include, data_types, check_type='include')
)
and
(
isinstance(cef_value, str) or isinstance(cef_value, bool) or isinstance(cef_value, int) or isinstance(cef_value, float)
)
):
indicator_set.add(str(cef_value))
indicator_dictionary = get_indicator_json(indicator_set)
for artifact in artifacts:
artifact_id = artifact['id']
if (artifact_ids_include and artifact_id in artifact_ids_include) or not artifact_ids_include:
for cef_key in artifact['cef']:
cef_value = artifact['cef'][cef_key]
cef_value_hash = sha256(str(cef_value).encode('utf-8')).hexdigest()
data_types = artifact['cef_types'].get(cef_key, [])
if indicator_dictionary.get(cef_value_hash):
tags = indicator_dictionary[cef_value_hash]['tags']
if (
is_valid_indicator(indicator_tags_exclude, tags, check_type='exclude')
and is_valid_indicator(indicator_tags_include, tags, check_type='include')
):
outputs['all_indicators'].append({
'cef_key': cef_key,
'cef_value': cef_value,
'artifact_id': artifact_id,
'data_types': data_types,
'tags': tags
})
for data_type in data_types:
# outputs will have underscores instead of spaces
data_type_escaped = data_type.replace(' ', '_')
if data_type_escaped not in outputs:
outputs[data_type_escaped] = []
outputs[data_type_escaped].append(
{'cef_key': cef_key, 'cef_value': cef_value, 'artifact_id': artifact_id, 'tags': tags}
)
if outputs.get('all_indicators'):
# sort the all_indicators outputs to make them more consistent
outputs['all_indicators'].sort(key=lambda indicator: str(indicator['cef_value']))
# Return a JSON-serializable object
assert json.dumps(outputs) # Will raise an exception if the :outputs: object is not JSON-serializable
return outputs | 34,746 |
def test_allow_multiple_creation():
"""Test method"""
message = "Population must be re-creatable"
size = 6
test_factory = BlobFactory(n=size, scatter=12)
test_factory.create_blobs()
test_factory.create_blobs()
rmtree(test_factory.get_population_string())
assert len(test_factory.get_population()["names"])==size, message | 34,747 |
def remove_duplicates(iterable):
"""Removes duplicates of an iterable without meddling with the order"""
seen = set()
seen_add = seen.add # for efficiency, local variable avoids check of binds
return [x for x in iterable if not (x in seen or seen_add(x))] | 34,748 |
def pytest_configure(config):
"""Disable verbose output when running tests."""
log.init(debug=True)
terminal = config.pluginmanager.getplugin("terminal")
terminal.TerminalReporter.showfspath = False | 34,749 |
def verify_my_token(user: User = Depends(auth_user)):
"""
Verify a token, and get basic user information
"""
return {"token": get_token(user),
"email": user.email,
"is_admin": user.is_admin,
"restricted_job": user.restricted_job} | 34,750 |
def inv_partition_spline_curve(x):
"""The inverse of partition_spline_curve()."""
c = lambda z: tf.cast(z, x.dtype)
assert_ops = [tf.Assert(tf.reduce_all(x >= 0.), [x])]
with tf.control_dependencies(assert_ops):
alpha = tf.where(
x < 8,
c(0.5) * x + tf.where(
x <= 4,
c(1.25) - tf.sqrt(c(1.5625) - x + c(.25) * tf.square(x)),
c(-1.25) + tf.sqrt(c(9.5625) - c(3) * x + c(.25) * tf.square(x))),
c(3.75) + c(0.25) * util.exp_safe(x * c(3.6) - c(28.8)))
return alpha | 34,751 |
def _orbit_bbox(partitions):
""" Takes a granule's partitions 'partitions' and returns the bounding box
containing all of them. Bounding box is ll, ur format
[[lon, lat], [lon, lat]]. """
lon_min = partitions[0]['lon_min']
lat_min = partitions[0]['lat_min']
lon_max = partitions[0]['lon_max']
lat_max = partitions[0]['lat_max']
for p in partitions[1:]:
if p['lon_min'] < lon_min:
lon_min = p['lon_min']
if p['lat_min'] < lat_min:
lat_min = p['lat_min']
if p['lon_max'] > lon_max:
lon_max = p['lon_max']
if p['lat_max'] > lat_max:
lat_max = p['lat_max']
return [[lon_min, lat_min], [lon_max, lat_max]] | 34,752 |
def test_paragraph_series_e_rh():
"""
Test case: Paragraph with raw HTML with newline inside
was: test_paragraph_extra_44
"""
# Arrange
source_markdown = """a<raw
html='cool'>a"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):a:]",
"[raw-html(1,2):raw\nhtml='cool']",
"[text(2,13):a:]",
"[end-para:::True]",
]
expected_gfm = """<p>a<raw\nhtml='cool'>a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens) | 34,753 |
def clear_ip_mroute_vrf(device, vrf_name):
""" clear ipv6 mld group
Args:
device (`obj`): Device object
Returns:
None
Raises:
SubCommandFailure
"""
try:
device.execute('clear ip mroute vrf {vrf_name} *'.format(vrf_name=vrf_name))
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not clear ip mroute vrf on {device}. Error:\n{error}"
.format(device=device, error=e)
) | 34,754 |
def GetModel(name: str) -> None:
"""
Returns model from model pool that coresponds
to the given name. Raises GraphicsException
if certain model cannot be found.
param name: Name of a model.
"""
if not name in _models:
raise GraphicsException(f"No such model '{name}'.")
return _models[name] | 34,755 |
def db_to_dict(s_str, i = 0, d = {}):
""" Converts a dotbracket string to a dictionary of indices and their pairs
Args:
s_str -- str: secondary_structure in dotbracket notation
KWargs:
i -- int: start index
d -- dict<index1, index2>: the dictionary so far
Returns:
dictionary
"""
j = i
while j < len(s_str):
c = s_str[j]
if c == "(":
d = db_to_dict(s_str, j + 1, d)
j = d[j]
elif c == ")":
d[i - 1] = j
d[j] = i - 1
if(i != 0): return d # Don't return from the first iteration yet
else:
d[j] = None
j = j + 1
return d | 34,756 |
def identify_event_type(event):
"""Look at event to determine type of device.
Async friendly.
"""
if EVENT_KEY_COMMAND in event:
return EVENT_KEY_COMMAND
if EVENT_KEY_SENSOR in event:
return EVENT_KEY_SENSOR
return "unknown" | 34,757 |
def run_update_department_task(depart_id):
"""
Updates Department performance score and NFIRS counts when it is instantiated. Updates ERF and Service area
"""
from firecares.tasks import update
from celery.task.control import inspect
# Check for status of the current celery queue
taskinspector = inspect()
notaduplicatetask = True
# print(taskinspector.reserved())
# print(taskinspector.active())
try:
active_tasks = taskinspector.active().values()[0]
p(0)
queue_tasks = taskinspector.reserved().values()[0]
p(1)
for q_task in queue_tasks:
q_departmentid = q_task['args']
p(q_departmentid)
if str(depart_id) in str(q_departmentid):
notaduplicatetask = False
p(2)
if notaduplicatetask:
for a_task in active_tasks:
a_departmentid = a_task['args']
p(a_departmentid)
if str(depart_id) in str(a_departmentid):
notaduplicatetask = False
except Exception as e:
p('run_update_department_task error')
p(e)
# return 'No Data in celery queue'
if notaduplicatetask:
# delay for 50 seconds
p('Running dept update for ' + str(depart_id))
update.update_parcel_department_effectivefirefighting_rollup.apply_async((depart_id,), countdown=50, task_id=str(depart_id) + 'efff')
update.get_parcel_department_hazard_level_rollup.apply_async((depart_id,), countdown=50, task_id=str(depart_id) + 'servicearea')
update.update_department.apply_async((depart_id,), countdown=50, task_id=str(depart_id) + 'nfirs')
else:
p('this is a duplicate task') | 34,758 |
def stopTiming( startTime ):
"""
Prints the elapsed time from 'startTime' to now.
Useful for measuring code execution times.
"""
import datetime
Any.requireIsInstance( startTime, datetime.datetime )
stopTime = now()
logging.debug( 'elapsed time: %s', stopTime - startTime ) | 34,759 |
def export(group, bucket, prefix, start, end, role, poll_period=120,
session=None, name="", region=None):
"""export a given log group to s3"""
start = start and isinstance(start, str) and parse(start) or start
end = (end and isinstance(start, str) and
parse(end) or end or datetime.now())
start = start.replace(tzinfo=tzlocal()).astimezone(tzutc())
end = end.replace(tzinfo=tzlocal()).astimezone(tzutc())
if session is None:
session = get_session(role, region)
client = session.client('logs')
paginator = client.get_paginator('describe_log_groups')
for p in paginator.paginate():
found = False
for _group in p['logGroups']:
if _group['logGroupName'] == group:
group = _group
found = True
break
if found:
break
if not found:
raise ValueError("Log group %s not found." % group)
if prefix:
prefix = "%s/%s" % (prefix.rstrip('/'), group['logGroupName'].strip('/'))
else:
prefix = group['logGroupName']
named_group = "%s:%s" % (name, group['logGroupName'])
log.info(
"Log exporting group:%s start:%s end:%s bucket:%s prefix:%s size:%s",
named_group,
start.strftime('%Y/%m/%d'),
end.strftime('%Y/%m/%d'),
bucket,
prefix,
group['storedBytes'])
t = time.time()
days = [(
start + timedelta(i)).replace(minute=0, hour=0, second=0, microsecond=0)
for i in range((end - start).days)]
day_count = len(days)
s3 = boto3.Session().client('s3')
days = filter_extant_exports(s3, bucket, prefix, days, start, end)
log.info("Group:%s filtering s3 extant keys from %d to %d start:%s end:%s",
named_group, day_count, len(days),
days[0] if days else '', days[-1] if days else '')
t = time.time()
retry = get_retry(('SlowDown',))
for idx, d in enumerate(days):
date = d.replace(minute=0, microsecond=0, hour=0)
export_prefix = "%s%s" % (prefix, date.strftime("/%Y/%m/%d"))
params = {
'taskName': "%s-%s" % ("c7n-log-exporter",
date.strftime("%Y-%m-%d")),
'logGroupName': group['logGroupName'],
'fromTime': int(time.mktime(
date.replace(
minute=0, microsecond=0, hour=0).timetuple()) * 1000),
'to': int(time.mktime(
date.replace(
minute=59, hour=23, microsecond=0).timetuple()) * 1000),
'destination': bucket,
'destinationPrefix': export_prefix
}
# if stream_prefix:
# params['logStreamPrefix'] = stream_prefix
try:
s3.head_object(Bucket=bucket, Key=prefix)
except ClientError as e:
if e.response['Error']['Code'] != '404': # Not Found
raise
s3.put_object(
Bucket=bucket,
Key=prefix,
Body=json.dumps({}),
ACL="bucket-owner-full-control",
ServerSideEncryption="AES256")
t = time.time()
counter = 0
while True:
counter += 1
try:
result = client.create_export_task(**params)
except ClientError as e:
if e.response['Error']['Code'] == 'LimitExceededException':
time.sleep(poll_period)
# log every 30m of export waiting
if counter % 6 == 0:
log.debug(
"group:%s day:%s waiting for %0.2f minutes",
named_group, d.strftime('%Y-%m-%d'),
(counter * poll_period) / 60.0)
continue
raise
retry(
s3.put_object_tagging,
Bucket=bucket, Key=prefix,
Tagging={
'TagSet': [{
'Key': 'LastExport',
'Value': d.isoformat()}]})
break
log.info(
"Log export time:%0.2f group:%s day:%s bucket:%s prefix:%s task:%s",
time.time() - t,
named_group,
d.strftime("%Y-%m-%d"),
bucket,
params['destinationPrefix'],
result['taskId'])
log.info(
("Exported log group:%s time:%0.2f days:%d start:%s"
" end:%s bucket:%s prefix:%s"),
named_group,
time.time() - t,
len(days),
start.strftime('%Y/%m/%d'),
end.strftime('%Y/%m/%d'),
bucket,
prefix) | 34,760 |
def pcolormesh_nan(x: np.ndarray, y: np.ndarray, c: np.ndarray, cmap=None, axis=None):
"""handles NaN in x and y by smearing last valid value in column or row out,
which doesn't affect plot because "c" will be masked too
"""
mask = np.isfinite(x) & np.isfinite(y)
top = None
bottom = None
for i, m in enumerate(mask):
good = m.nonzero()[0]
if good.size == 0:
continue
elif top is None:
top = i
else:
bottom = i
x[i, good[-1] :] = x[i, good[-1]]
y[i, good[-1] :] = y[i, good[-1]]
x[i, : good[0]] = x[i, good[0]]
y[i, : good[0]] = y[i, good[0]]
x[:top, :] = np.nanmean(x[top, :])
y[:top, :] = np.nanmean(y[top, :])
x[bottom:, :] = np.nanmean(x[bottom, :])
y[bottom:, :] = np.nanmean(y[bottom, :])
if axis is None:
axis = figure().gca()
return axis.pcolormesh(x, y, np.ma.masked_where(~mask, c), cmap=cmap) | 34,761 |
def send_message(message):
"""Sends a message"""
title = '[{}] Message'.format(HOSTNAME)
pushover.send_message(settings, message, title) | 34,762 |
def scan_patch(project, patch_file, binary_list, file_audit_list,
file_audit_project_list, master_list,
ignore_list, licence_ext, file_ignore, licence_ignore):
""" Scan actions for each commited file in patch set """
global failure
if is_binary(patch_file):
hashlist = get_lists.GetLists()
binary_hash = hashlist.binary_hash(project, patch_file)
if not binary_list.search(patch_file):
with open(patch_file, 'rb') as afile:
buf = afile.read()
hasher.update(buf)
if hasher.hexdigest() in binary_hash:
logger.info('Found matching file hash for file: %s',
patch_file)
else:
logger.error('Non Whitelisted Binary file: %s',
patch_file)
logger.error('Submit patch with the following hash: %s',
hasher.hexdigest())
failure = True
with open(reports_dir + "binaries-" + project + ".log", "a") \
as gate_report:
gate_report.write('Non Whitelisted Binary file: {0}\n'.
format(patch_file))
else:
# Check file names / extensions
if file_audit_list.search(patch_file) and not \
file_audit_project_list.search(patch_file):
match = file_audit_list.search(patch_file)
logger.error('Blacklisted file: %s', patch_file)
logger.error('Matched String: %s', match.group())
failure = True
with open(reports_dir + "file-names_" + project + ".log", "a") \
as gate_report:
gate_report.write('Blacklisted file: {0}\n'.
format(patch_file))
gate_report.write('Matched String: {0}'.
format(match.group()))
# Open file to check for blacklisted content
try:
fo = open(patch_file, 'r')
lines = fo.readlines()
file_exists = True
except IOError:
file_exists = False
if file_exists and not patch_file.endswith(tuple(file_ignore)):
for line in lines:
for key, value in master_list.iteritems():
regex = value['regex']
desc = value['desc']
if re.search(regex, line) and not re.search(
ignore_list, line):
logger.error('File contains violation: %s', patch_file)
logger.error('Flagged Content: %s', line.rstrip())
logger.error('Matched Regular Exp: %s', regex)
logger.error('Rationale: %s', desc.rstrip())
failure = True
with open(reports_dir + "contents_" + project + ".log",
"a") as gate_report:
gate_report.write('File contains violation: {0}\n'.
format(patch_file))
gate_report.write('Flagged Content: {0}'.
format(line))
gate_report.write('Matched Regular Exp: {0}\n'.
format(regex))
gate_report.write('Rationale: {0}\n'.
format(desc.rstrip()))
# Run license check
licence_check(project, licence_ext, licence_ignore, patch_file) | 34,763 |
def delete_post(post_id):
"""Delete a post
:param post_id: id of the post object
:return: redirect or 404
"""
if Post.delete_post(post_id):
logger.warning('post %d has been deleted', post_id)
return redirect(url_for('.posts'))
else:
return render_template('page_not_found.html'), 404 | 34,764 |
def log_arguments(func: Callable) -> Callable:
"""
decorate a function to log its arguments and result
:param func: the function to be decorated
:return: the decorator
"""
@functools.wraps(func)
def wrapper_args(*args, **kwargs) -> Any: # type: ignore
result = func(*args, **kwargs)
log_args_kwargs_results(func, result, -1, None, *args, **kwargs)
return result
return wrapper_args | 34,765 |
def huggingface_from_pretrained_custom(
source: Union[Path, str], tok_config: Dict, trf_config: Dict
) -> HFObjects:
"""Create a Huggingface transformer model from pretrained weights. Will
download the model if it is not already downloaded.
source (Union[str, Path]): The name of the model or a path to it, such as
'bert-base-cased'.
tok_config (dict): Settings to pass to the tokenizer.
trf_config (dict): Settings to pass to the transformer.
"""
if hasattr(source, "absolute"):
str_path = str(source.absolute())
else:
str_path = source
try:
tokenizer = AutoTokenizer.from_pretrained(str_path, **tok_config)
except ValueError as e:
if "tokenizer_class" not in tok_config:
raise e
tokenizer_class_name = tok_config["tokenizer_class"].split(".")
from importlib import import_module
tokenizer_module = import_module(".".join(tokenizer_class_name[:-1]))
tokenizer_class = getattr(tokenizer_module, tokenizer_class_name[-1])
tokenizer = tokenizer_class(vocab_file=str_path + "/vocab.txt", **tok_config)
vocab_file_contents = None
if hasattr(tokenizer, "vocab_file"):
with open(tokenizer.vocab_file, "rb") as fileh:
vocab_file_contents = fileh.read()
try:
trf_config["return_dict"] = True
config = AutoConfig.from_pretrained(str_path, **trf_config)
transformer = AutoModel.from_pretrained(str_path, config=config)
except OSError as e:
try:
transformer = AutoModel.from_pretrained(str_path, local_files_only=True)
except OSError as e2:
model_name = str(source)
print("trying to download model from huggingface hub:", model_name, "...", file=sys.stderr)
transformer = AutoModel.from_pretrained(model_name)
print("succeded", file=sys.stderr)
ops = get_current_ops()
if isinstance(ops, CupyOps):
transformer.cuda()
return HFObjects(tokenizer, transformer, vocab_file_contents) | 34,766 |
def recv_categorical_matrix(socket):
"""
Receives a matrix of type string from the getml engine
"""
# -------------------------------------------------------------------------
# Receive shape
# By default, numeric data sent over the socket is big endian,
# also referred to as network-byte-order!
if sys.byteorder == 'little':
shape = np.frombuffer(
socket.recv(np.nbytes[np.int32] * 2),
dtype=np.int32
).byteswap().astype(np.uint64)
size = shape[0] * shape[1]
else:
shape = np.frombuffer(
socket.recv(np.nbytes[np.int32] * 2),
dtype=np.int32
).astype(np.uint64)
size = shape[0] * shape[1]
# -------------------------------------------------------------------------
# Receive actual strings
mat = []
for i in range(size):
mat.append(recv_string(socket))
# -------------------------------------------------------------------------
# Cast as numpy.array and reshape
mat = np.asarray(mat)
return mat.reshape(shape[0], shape[1]) | 34,767 |
def collect_gsso_dict(gsso):
""" Export gsso as a dict: keys are cls, ind, all (ie cls+ind)"""
print('Importing gsso as dict')
t0 = time.time()
gsso_cls_dict, gsso_ind_dict = _create_gsso_dict(gsso)
gsso_all_dict = _create_gsso_dict_all(gsso)
print("Executed in %s seconds." % str(time.time()-t0))
return gsso_cls_dict, gsso_ind_dict, gsso_all_dict | 34,768 |
def H_squared(omega):
"""Square magnitude of the frequency filter function."""
return 1 / (
(1 + (omega * tau_a) ** 2) * (1 + (omega * tau_r) ** 2)
) * H_squared_heaviside(omega) | 34,769 |
def export_to_cvs(export_df: pd.DataFrame, name):
"""
Export DataFrame into csv file with given name
"""
if not os.path.exists(EXPORTS_DIR):
os.mkdir(EXPORTS_DIR)
file = os.path.join(EXPORTS_DIR, name)
export_df.to_csv(file, index=False)
print("Output file: ", os.path.abspath(file)) | 34,770 |
def test_block_verify_work_difficulty(block_factory):
"""
Load a block with work, and verify it different work difficulties
making it either pass or fail
"""
block = block_factory("send")
# Passes with normal difficulty
block.verify_work()
with pytest.raises(InvalidWork):
# Insufficient work for this difficulty
block.verify_work(difficulty="fffffffeb1249487")
# Threshold can also be changed using the 'difficulty' parameter
block.difficulty = "fffffffeb1249487"
with pytest.raises(InvalidWork):
block.verify_work()
block.difficulty = "fffffffeb1249486" | 34,771 |
def docs(c):
"""Run if docstrings have changed"""
c.run("poetry run sphinx-build -M html ./docs/sphinx ./docs/dist -v") | 34,772 |
def get_neighbor_distances(ntw, v0, l):
"""Get distances to the nearest vertex neighbors along
connecting arcs.
Parameters
----------
ntw : spaghetti.Network
spaghetti Network object.
v0 : int
vertex id
l : dict
key is tuple (start vertex, end vertex); value is ``float``.
Cost per arc to travel, e.g. distance.
Returns
-------
neighbors : dict
key is int (vertex id); value is ``float`` (distance)
Examples
--------
>>> import spaghetti as spgh
>>> from libpysal import examples
>>> ntw = spgh.Network(examples.get_path('streets.shp'))
>>> neighs = spgh.util.get_neighbor_distances(ntw, 0, ntw.arc_lengths)
>>> neighs[1]
102.62353453439829
"""
# fetch links associated with vertices
arcs = ntw.enum_links_vertex(v0)
# create neighbor distance lookup
neighbors = {}
# iterate over each associated link
for arc in arcs:
# set distance from vertex1 to vertex2 (link length)
if arc[0] != v0:
neighbors[arc[0]] = l[arc]
else:
neighbors[arc[1]] = l[arc]
return neighbors | 34,773 |
def _cwt_gen(X, Ws, *, fsize=0, mode="same", decim=1, use_fft=True):
"""Compute cwt with fft based convolutions or temporal convolutions.
Parameters
----------
X : array of shape (n_signals, n_times)
The data.
Ws : list of array
Wavelets time series.
fsize : int
FFT length.
mode : {'full', 'valid', 'same'}
See numpy.convolve.
decim : int | slice, default 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
use_fft : bool, default True
Use the FFT for convolutions or not.
Returns
-------
out : array, shape (n_signals, n_freqs, n_time_decim)
The time-frequency transform of the signals.
"""
fft, ifft = _import_fft(('fft', 'ifft'))
_check_option('mode', mode, ['same', 'valid', 'full'])
decim = _check_decim(decim)
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
_, n_times = X.shape
n_times_out = X[:, decim].shape[1]
n_freqs = len(Ws)
# precompute FFTs of Ws
if use_fft:
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
for i, W in enumerate(Ws):
fft_Ws[i] = fft(W, fsize)
# Make generator looping across signals
tfr = np.zeros((n_freqs, n_times_out), dtype=np.complex128)
for x in X:
if use_fft:
fft_x = fft(x, fsize)
# Loop across wavelets
for ii, W in enumerate(Ws):
if use_fft:
ret = ifft(fft_x * fft_Ws[ii])[:n_times + W.size - 1]
else:
ret = np.convolve(x, W, mode=mode)
# Center and decimate decomposition
if mode == 'valid':
sz = int(abs(W.size - n_times)) + 1
offset = (n_times - sz) // 2
this_slice = slice(offset // decim.step,
(offset + sz) // decim.step)
if use_fft:
ret = _centered(ret, sz)
tfr[ii, this_slice] = ret[decim]
elif mode == 'full' and not use_fft:
start = (W.size - 1) // 2
end = len(ret) - (W.size // 2)
ret = ret[start:end]
tfr[ii, :] = ret[decim]
else:
if use_fft:
ret = _centered(ret, n_times)
tfr[ii, :] = ret[decim]
yield tfr | 34,774 |
def is_generic_list(annotation: Any):
"""Checks if ANNOTATION is List[...]."""
# python<3.7 reports List in __origin__, while python>=3.7 reports list
return getattr(annotation, '__origin__', None) in (List, list) | 34,775 |
def write_summary(umi_well, out_file):
"""write summary about edit distance among same read position"""
with file_transaction(out_file) as tx_out_file:
with gzip.open(tx_out_file, 'wb') as out_handle:
for read in umi_well:
umi_list = Counter(umi_well[read].umi)
ma = calculate_matrix_distance(umi_list.keys())
for pair in ma:
max_umi, min_umi = pair[1], pair[0]
if umi_list[pair[0]] > umi_list[pair[1]]:
max_umi, min_umi = pair[0], pair[1]
out_handle.write("%s\n" % (" ".join(map(str, [read[0], read[1],
umi_list[max_umi], umi_list[min_umi],
ma[pair]])))) | 34,776 |
def create_folder(base_path: Path, directory: str, rtn_path=False):
""" Recursive directory creation function. Like mkdir(), but makes all intermediate-level directories needed to
contain the leaf directory
Parameters
-----------
base_path : pathlib.PosixPath
Global Path to be root of the created directory(s)
directory : str
Location in the Songbird-LFP-Paper the new directory is meant to be made
rtn_path : bool, optional
If True it returns a Path() object of the path to the Directory requested to be created
Returns
--------
location_to_save : class, (Path() from pathlib)
Path() object for the Directory requested to be created
Example
--------
# Will typically input a path using the Global Paths from paths.py
>>> create_folder('/data/')
"""
location_to_save = base_path / directory
# Recursive directory creation function
location_to_save.mkdir(parents=True, exist_ok=True)
if rtn_path:
return location_to_save.resolve() | 34,777 |
def CheckCallAndFilter(args, stdout=None, filter_fn=None,
print_stdout=None, call_filter_on_first_line=False,
**kwargs):
"""Runs a command and calls back a filter function if needed.
Accepts all subprocess.Popen() parameters plus:
print_stdout: If True, the command's stdout is forwarded to stdout.
filter_fn: A function taking a single string argument called with each line
of the subprocess's output. Each line has the trailing newline
character trimmed.
stdout: Can be any bufferable output.
stderr is always redirected to stdout.
"""
assert print_stdout or filter_fn
stdout = stdout or sys.stdout
filter_fn = filter_fn or (lambda x: None)
assert not 'stderr' in kwargs
kid = Popen(args, bufsize=0,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
**kwargs)
# Do a flush of stdout before we begin reading from the subprocess's stdout
last_flushed_at = time.time()
stdout.flush()
# Also, we need to forward stdout to prevent weird re-ordering of output.
# This has to be done on a per byte basis to make sure it is not buffered:
# normally buffering is done for each line, but if svn requests input, no
# end-of-line character is output after the prompt and it would not show up.
in_byte = kid.stdout.read(1)
if in_byte:
if call_filter_on_first_line:
filter_fn(None)
in_line = ''
while in_byte:
if in_byte != '\r':
if print_stdout:
stdout.write(in_byte)
if in_byte != '\n':
in_line += in_byte
else:
filter_fn(in_line)
in_line = ''
# Flush at least 10 seconds between line writes. We wait at least 10
# seconds to avoid overloading the reader that called us with output,
# which can slow busy readers down.
if (time.time() - last_flushed_at) > 10:
last_flushed_at = time.time()
stdout.flush()
in_byte = kid.stdout.read(1)
# Flush the rest of buffered output. This is only an issue with
# stdout/stderr not ending with a \n.
if len(in_line):
filter_fn(in_line)
rv = kid.wait()
if rv:
raise Error('failed to run command: %s' % ' '.join(args))
return 0 | 34,778 |
def test_parser_bernese_clu():
"""Test that parsing bernese_clu gives expected output"""
parser = get_parser("bernese_clu").as_dict()
assert len(parser) == 16
assert "ales" in parser
assert "domes" in parser["ales"] | 34,779 |
def slave_node_driver(shared_job_queue, shared_result_queue, n_jobs_per_node):
""" Starts slave processes on a single node
Arguments:
shared_job_queue -- the job queue to obtain jobs from
shared_result_queue -- the queue that results are sent to
n_jobs_per_node -- the number of slave processes to start per node
"""
procs = []
for i in range(n_jobs_per_node):
proc = mp.Process(target=slave, args=(shared_job_queue, shared_result_queue))
procs.append(proc)
proc.start()
for proc in procs:
proc.join() | 34,780 |
def extract_track_from_cube(nemo_cube, track_cube, time_pad, dataset_id,
nn_finder=None):
"""
Extract surface track from NEMO 2d cube
"""
# crop track time
st = ga.get_cube_datetime(nemo_cube, 0)
et = ga.get_cube_datetime(nemo_cube, -1)
# NOTE do not include start instant to have non-overlapping windows
target = ga.constrain_cube_time(
track_cube, st - time_pad, et + time_pad, include_start=False
)
def find_nearest_index(src, dst, coord_name):
src_arr = src.coord(coord_name).points
dst_arr = dst.coord(coord_name).points
time_tree = cKDTree(src_arr[:, numpy.newaxis])
d, index = time_tree.query(dst_arr[:, numpy.newaxis], k=1)
return index
if nn_finder is None:
nn_finder = NearestNeighborLatLon(nemo_cube[0, ...])
target_lat = target.coord('latitude').points
target_lon = target.coord('longitude').points
i_lat, i_lon = nn_finder.search(target_lon, target_lat)
ntime = len(nemo_cube.coord('time').points)
if ntime == 1:
i_time = numpy.zeros_like(i_lat)
else:
i_time = find_nearest_index(nemo_cube, target, 'time')
values = nemo_cube.data[i_time, i_lat, i_lon]
sname = ga.nemo_reader.map_nemo_sname_to_standard[nemo_cube.standard_name]
cube = iris.cube.Cube(values, standard_name=sname, units=nemo_cube.units)
# copy coordinates
cube.add_dim_coord(target.coord('time'), 0)
cube.add_aux_coord(target.coord('latitude'), 0)
cube.add_aux_coord(target.coord('longitude'), 0)
cube.add_aux_coord(target.coord('depth'))
for coord_name in ['time', 'latitude', 'longitude', 'depth']:
cube.coord(coord_name).attributes = {} # discard coord attributes
# add attributes
cube.attributes['location_name'] = target.attributes['location_name']
cube.attributes['dataset_id'] = dataset_id
return cube | 34,781 |
def get_mean_series_temp(log_frame: pd.DataFrame):
"""Get temperature time series as mean over CPU cores."""
columns_temp = [c for c in log_frame.columns if re.fullmatch(r"Temp:Core\d+,0", c)]
values_temp = log_frame[columns_temp].mean(axis=1)
return values_temp | 34,782 |
def new_channel():
"""Instantiates a dict containing a template for an empty single-point
channel.
"""
return {
"channel_name": "myChannel",
"after_last": "Goto first point",
"alternate_direction": False,
"equation": "x",
"final_value": 0.0,
"optimizer_config": {
"Enabled": False,
"Initial step size": 1.0,
"Max value": 1.0,
"Min value": 0.0,
"Precision": 0.001,
"Start value": 0.5
},
"relation_parameters": [
{
"channel_name": "Step values",
"lookup": None,
"use_lookup": False,
"variable": "x"
}
],
"show_advanced": False,
"step_items": [
{
"center": 0.0,
"interp": "Linear",
"n_pts": 1,
"range_type": "Single",
"single": 1.0,
"span": 0.0,
"start": 1.0,
"step": 0.0,
"step_type": "Fixed # of pts",
"stop": 1.0,
"sweep_rate": 0.0
}
],
"step_unit": "Instrument",
"sweep_mode": "Off",
"sweep_rate_outside": 0.0,
"use_outside_sweep_rate": False,
"use_relations": False,
"wait_after": 0.0
} | 34,783 |
def Log1p(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().Log1pVertex, label, cast_to_vertex(input_vertex)) | 34,784 |
def test(request):
"""
Controller for the app home page.
"""
context = {}
return render(request, 'ueb_app/test.html', context) | 34,785 |
def banner(name, ver_str, extra=""):
"""Create a simple header with version and host information"""
print("\n" + "#" * 75)
print("Intel (R) {}. Version: {}".format(name, ver_str))
print("Copyright (c) 2019, Intel Corporation. All rights reserved.\n")
print("Running on {} with Python {}".format(platform.platform(),
platform.python_version()))
print("#" * 75 + "\n")
print(extra) | 34,786 |
def pre_process_flights(flights_folder):
"""
Imports and merges flight files inside input folder.
"""
df_flights = pd.DataFrame()
for flight_file in os.listdir(flights_folder):
print('Processing flight: '+flight_file)
df_flight = pd.read_csv(os.path.join(flights_folder, flight_file))
df_flight['flight_id'] = flight_file.split('.')[0]
df_flight = distance_from_touchdown(df_flight)
print(df_flight.head())
df_flights = df_flights.append(df_flight, ignore_index=True)
return df_flights | 34,787 |
def get_node_backups(request, queryset):
"""
Return dict with backups attribute.
"""
user_order_by, order_by = get_order_by(request, api_view=VmBackupList,
db_default=('-id',), user_default=('-created',))
bkps = get_pager(request, queryset.order_by(*order_by), per_page=50)
return {
'order_by': user_order_by,
'pager': bkps,
'backups': bkps,
'backups_count': bkps.paginator.count,
'backups_size': queryset.exclude(size__isnull=True).aggregate(Sum('size')).get('size__sum'),
} | 34,788 |
def collect_gameplay_experiences(env, agent, buffer):
"""
Collects gameplay experiences by playing env with the instructions
produced by agent and stores the gameplay experiences in buffer.
:param env: the game environment
:param agent: the DQN agent
:param buffer: the replay buffer
:return: None
"""
state = env.reset()
done = False
while not done:
action = agent.collect_policy(state)
next_state, reward, done, _ = env.step(action)
if done:
reward = -1.0
buffer.store_gameplay_experience(state, next_state,
reward, action, done)
state = next_state | 34,789 |
def xpath(elt, xp, ns, default=None):
"""Run an xpath on an element and return the first result. If no results
were returned then return the default value."""
res = elt.xpath(xp, namespaces=ns)
if len(res) == 0: return default
else: return res[0] | 34,790 |
def discard(hand):
"""
Given six cards, return the four to keep
"""
from app.controller import Hand
cut_card = {
"value": 16,
"suit": "none",
"rank": 0,
"name": "none",
"id": 'uhgfhc'
}
max_points = -1
card_ids = []
for set_of_four in permutations(hand, 4):
cards = [deck.get(c) for c in set_of_four]
hand = Hand(cards, cut_card)
try:
hand_points = hand.calculate_points()
except Exception as e:
# TODO: why does this happen??
print('Exception calculating bot points: {}'.format(e))
continue
if hand_points > max_points:
max_points = hand_points
card_ids = set_of_four
return card_ids | 34,791 |
def check_response_stimFreeze_delays(data, **_):
""" Checks that the time difference between the visual stimulus freezing and the
response is positive and less than 100ms.
Metric: M = (stimFreeze_times - response_times)
Criterion: 0 < M < 0.100 s
Units: seconds [s]
:param data: dict of trial data with keys ('stimFreeze_times', 'response_times', 'intervals',
'choice')
"""
# Calculate the difference between stimOn and goCue times.
# If either are NaN, the result will be Inf to ensure that it crosses the failure threshold.
metric = np.nan_to_num(data["stimFreeze_times"] - data["response_times"], nan=np.inf)
# Test for valid values
passed = ((metric < 0.1) & (metric > 0)).astype(float)
# Finally remove no_go trials (stimFreeze triggered differently in no_go trials)
# These values are ignored in calculation of proportion passed
passed[data["choice"] == 0] = np.nan
assert data["intervals"].shape[0] == len(metric) == len(passed)
return metric, passed | 34,792 |
def get_invVR_aff2Ds(kpts, H=None):
"""
Returns matplotlib keypoint transformations (circle -> ellipse)
Example:
>>> # Test CV2 ellipse vs mine using MSER
>>> import vtool as vt
>>> import cv2
>>> import wbia.plottool as pt
>>> img_fpath = ut.grab_test_imgpath(ut.get_argval('--fname', default='zebra.png'))
>>> imgBGR = vt.imread(img_fpath)
>>> imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY)
>>> mser = cv2.MSER_create()
>>> regions, bboxs = mser.detectRegions(imgGray)
>>> region = regions[0]
>>> bbox = bboxs[0]
>>> vis = imgBGR.copy()
>>> vis[region.T[1], region.T[0], :] = 0
>>> hull = cv2.convexHull(region.reshape(-1, 1, 2))
>>> cv2.polylines(vis, [hull], 1, (0, 255, 0))
>>> ell = cv2.fitEllipse(region)
>>> cv2.ellipse(vis, ell, (255))
>>> ((cx, cy), (rx, ry), degrees) = ell
>>> # Convert diameter to radians
>>> rx /= 2
>>> ry /= 2
>>> # Make my version of ell
>>> theta = np.radians(degrees) # opencv lives in radians
>>> S = vt.scale_mat3x3(rx, ry)
>>> T = vt.translation_mat3x3(cx, cy)
>>> R = vt.rotation_mat3x3(theta)
>>> #R = np.eye(3)
>>> invVR = T.dot(R).dot(S)
>>> kpts = vt.flatten_invV_mats_to_kpts(np.array([invVR]))
>>> pt.imshow(vis)
>>> # MINE IS MUCH LARGER (by factor of 2)) WHY?
>>> # we start out with a unit circle not a half circle
>>> pt.draw_keypoints(pt.gca(), kpts, pts=True, ori=True, eig=True, rect=True)
"""
import vtool.keypoint as ktool
# invVR_mats = ktool.get_invV_mats(kpts, with_trans=True, with_ori=True)
invVR_mats = ktool.get_invVR_mats3x3(kpts)
if H is None:
invVR_aff2Ds = [mpl.transforms.Affine2D(invVR) for invVR in invVR_mats]
else:
invVR_aff2Ds = [HomographyTransform(H.dot(invVR)) for invVR in invVR_mats]
return invVR_aff2Ds | 34,793 |
def lambda_fanout_clean(event, context):
"""Fanout SNS messages to cleanup snapshots when called by AWS Lambda."""
# baseline logging for lambda
utils.configure_logging(context, LOG)
# for every region, send to this function
clean.perform_fanout_all_regions(context)
LOG.info('Function lambda_fanout_clean completed') | 34,794 |
def is_reload(module_name: str) -> bool:
"""True if the module given by `module_name` should reload the
modules it imports. This is the case if `enable_reload()` was called
for the module before.
"""
mod = sys.modules[module_name]
return hasattr(mod, module_name.replace('.', '_') + "_DO_RELOAD_MODULE") | 34,795 |
def get_string(string_name):
"""
Gets a string from the language file
"""
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return string_name | 34,796 |
def build_streambed(x_max, set_diam):
""" Build the bed particle list.
Handles calls to add_bed_particle, checks for
completness of bed and updates the x-extent
of stream when the packing exceeds/under packs
within 8mm range.
Note: the updates to x-extent are only required
when variable particle diameter is being used.
Return values:
bed_particles -- list of bed particles
bed_vertices -- list of available vertices
based on bed list
"""
max_particles = int(math.ceil( x_max / set_diam ))
bed_particles = np.zeros([max_particles, 7],dtype=float)
running_id = 0
running_pack_idx = 0
# This probably doesn't need to be a loop. NumPy!
while True:
running_id, running_pack_idx = add_bed_particle(set_diam,
bed_particles,
running_id,
running_pack_idx)
if bed_complete(running_pack_idx, x_max):
break
else: continue
# Bed packing does not always match x_max. Adjust if off
bed_max = int(math.ceil(bed_particles[running_id-1][1]
+ bed_particles[running_id-1][3]))
if x_max != bed_max:
msg = (
f'Bed packing could not match x_max parameter... Updating '
f'x_max to match packing extent: {bed_max}....'
)
logging.warning(msg)
x_max = bed_max
else: x_max = x_max
# strip zero element particles tuples from the original array
valid = ((bed_particles==0).all(axis=(1)))
bed_particles = bed_particles[~valid]
return bed_particles, x_max | 34,797 |
def rotate_around_point_highperf_Numpy(xy, radians, origin):
"""
Rotate a point around a given point.
I call this the "high performance" version since we're caching some
values that are needed >1 time. It's less readable than the previous
function but it's faster.
"""
adjust_xy = xy - origin
rotate_matrix_X = np.array( (np.cos(radians), np.sin(radians)) )
rotate_matrix_Y = np.array( (-np.sin(radians), np.cos(radians)) )
rotate_xy = origin + np.array( (sum(adjust_xy * rotate_matrix_X), sum(adjust_xy * rotate_matrix_Y)) )
return rotate_xy | 34,798 |
def eval(cfg, env, agent):
"""
Do the evaluation of the current agent
:param cfg: configuration of the agent
:param env:
:param agent:
:return:
"""
print("========= Start to Evaluation ===========")
print("Environment:{}, Algorithm:{}".format(cfg.env, cfg.algo))
for i_episode in range(cfg.eval_eps):
temp_ep_reward = 0
state = env.reset()
while True:
action = agent.predict(state)
next_state, reward, done, _ = env.step(action)
state = next_state
temp_ep_reward += reward
if done:
break
rewards.append(temp_ep_reward)
if ma_rewards:
ma_rewards.append(ma_rewards[-1]*0.9+temp_ep_reward*0.1)
else:
ma_rewards.append(temp_ep_reward)
print("Episode:{}/{} : reward:{:.1f}".format(i_episode, cfg.eval_eps, temp_ep_reward))
print("============ Evaluation Complete =================")
return rewards, ma_rewards | 34,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.