content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def get_table_from_alter_table(line, alter_expr):
"""
Parse the content and return full qualified schema.table from the line if
schema provided, else return the table name.
Fact: if schema name or table name contains any special chars, each should be
double quoted already in dump file.
"""
dot_separator_idx = line.find('.')
last_double_quote_idx = line.rfind('"')
has_schema_table_fmt = True if dot_separator_idx != -1 else False
has_special_chars = True if last_double_quote_idx != -1 else False
if not has_schema_table_fmt and not has_special_chars:
return line[len(alter_expr):].split()[0]
elif has_schema_table_fmt and not has_special_chars:
full_table_name = line[len(alter_expr):].split()[0]
_, table = split_fqn(full_table_name)
return table
elif not has_schema_table_fmt and has_special_chars:
return line[len(alter_expr) + 1 : last_double_quote_idx + 1]
else:
if dot_separator_idx < last_double_quote_idx:
# table name is double quoted
full_table_name = line[len(alter_expr) : last_double_quote_idx + 1]
else:
# only schema name double quoted
ending_space_idx = line.find(' ', dot_separator_idx)
full_table_name = line[len(alter_expr) : ending_space_idx]
_, table = split_fqn(full_table_name)
return table | 30,500 |
def read_and_setup_bs(request):
"""
This routine reads the parameter files, sets up
the lattice and calculates or reads in the bandstructure
Parameters
----------
test_number : int
The test number, e.g. the folder under the tests
folder.
Returns
-------
bs : object
A `Bandstructure()` object.
"""
# unfortunately we cannot pickle with classes
# so load data everytime this is called
location = os.path.dirname(__file__) + \
"/test_data/" + str(request.param)
# read parameters, generate lattice and bands
param = inputoutput.Param(inputoutput.readparam(location=location))
lat = lattice.Lattice(param, location=location)
bs = bandstructure.Bandstructure(lat, param, location=location)
return bs | 30,501 |
def bind_port(socket, ip, port):
""" Binds the specified ZMQ socket. If the port is zero, a random port is
chosen. Returns the port that was bound.
"""
connection = 'tcp://%s' % ip
if port <= 0:
port = socket.bind_to_random_port(connection)
else:
connection += ':%i' % port
socket.bind(connection)
return port | 30,502 |
def eval_once(saver, summary_writer, ler, summary_op):
"""Run Eval once.
Args:
saver: Saver.
summary_writer: Summary writer.
ler: Top K op.
summary_op: Summary op.
"""
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/ocr_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
if global_step in tested_checkpoints:
sess.close()
return
saver.restore(sess, ckpt.model_checkpoint_path)
tested_checkpoints.append(global_step)
else:
print('No checkpoint file found')
return
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.eval_batch_size))
step = 0
mean_ler = 0
while step < num_iter and not coord.should_stop():
ler_res = sess.run(ler)
mean_ler += ler_res
step += 1
precision = mean_ler / step
status_string = "{} Step: {} Val LER: {} "
print(status_string.format(datetime.now(), global_step, precision))
summary = tf.Summary()
summary.ParseFromString(sess.run(summary_op))
summary.value.add(tag='Val LER', simple_value=precision)
summary_writer.add_summary(summary, global_step)
global best_accuracy
if precision < best_accuracy:
best_accuracy = precision
print("Saving new best checkpoint")
saver.save(sess, os.path.join(FLAGS.best_dir, "checkpoint_ler_" + str(precision) + "_"))
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
sess.close()
gc.collect() | 30,503 |
def update_target_graph(actor_tvars, target_tvars, tau):
""" Updates the variables of the target graph using the variable values from the actor, following the DDQN update
equation. """
op_holder = list()
# .assign() is performed on target graph variables with discounted actor graph variable values
for idx, variable in enumerate(target_tvars):
op_holder.append(
target_tvars[idx].assign(
(variable.value() * tau) + ((1 - tau) * actor_tvars[idx].value())
)
)
return op_holder | 30,504 |
def safe(function: Callable[..., T]) -> Callable[..., Result[T, Exception]]:
"""Wraps a function that may raise an exception.
e.g.:
@safe
def bad() -> int:
raise Exception("oops")
"""
def wrapped(*args, **kwargs) -> Result[T, Exception]:
try:
return Ok(function(*args, **kwargs))
except Exception as e:
return Err(e)
return wrapped | 30,505 |
def test_load_labels_data_include_missing_labels_as_false():
""" Test the load_labels_data function by checking whether the query
produces the correct labels
"""
# set up labeling config variables
dates = [
datetime.datetime(2016, 1, 1, 0, 0),
datetime.datetime(2016, 2, 1, 0, 0),
datetime.datetime(2016, 6, 1, 0, 0),
]
# same as the other load_labels_data test, except we include an extra date, 2016-06-01
# this date does have entity 0 included via the states table, but no labels
# make a dataframe of labels to test against
labels_df = pd.DataFrame(
labels,
columns=[
"entity_id",
"as_of_date",
"label_timespan",
"label_name",
"label_type",
"label",
],
)
labels_df["as_of_date"] = convert_string_column_to_date(labels_df["as_of_date"])
labels_df.set_index(["entity_id", "as_of_date"])
# create an engine and generate a table with fake feature data
with testing.postgresql.Postgresql() as postgresql:
engine = create_engine(postgresql.url())
create_schemas(engine, features_tables, labels, states)
with get_matrix_storage_engine() as matrix_storage_engine:
builder = MatrixBuilder(
db_config=db_config,
matrix_storage_engine=matrix_storage_engine,
experiment_hash=experiment_hash,
engine=engine,
include_missing_labels_in_train_as=False,
)
# make the entity-date table
entity_date_table_name = builder.make_entity_date_table(
as_of_times=dates,
label_type="binary",
label_name="booking",
state="active",
matrix_type="train",
matrix_uuid="my_uuid",
label_timespan="1 month",
)
result = builder.load_labels_data(
label_name=label_name,
label_type=label_type,
label_timespan="1 month",
matrix_uuid="my_uuid",
entity_date_table_name=entity_date_table_name,
)
df = pd.DataFrame.from_dict(
{
"entity_id": [0, 2, 3, 4, 4],
"as_of_date": [dates[2], dates[1], dates[1], dates[0], dates[1]],
"booking": [0, 0, 0, 1, 0],
}
).set_index(["entity_id", "as_of_date"])
# the first row would not be here if we had not configured the Builder
# to include missing labels as false
test = result == df
assert test.all().all() | 30,506 |
def strtime(millsec, form="%i:%02i:%06.3f"):
"""
Time formating function
Args:
millsec(int): Number of milliseconds to format
Returns:
(string)Formated string
"""
fc = form.count("%")
days, milliseconds = divmod(millsec, 86400000)
hours, milliseconds = divmod(millsec, 3600000)
minutes, milliseconds = divmod(millsec, 60000)
seconds = float(milliseconds) / 1000
var = {1: (seconds), 2: (minutes, seconds), 3: (hours, minutes, seconds),
4: (days, hours, minutes, seconds)}
return form % var[fc] | 30,507 |
def parallel_categories():
"""Parallel Categories Plot."""
mean_neighborhood_sfo = sfo_data.groupby(["neighborhood"]).mean()
mean_sale_price_sfo = mean_neighborhood_sfo.sort_values("sale_price_sqr_foot", ascending=False)
sfo = mean_sale_price_sfo.head(10)
a = sfo.reset_index()
parallel_categories_top_10 = px.parallel_categories(a, color="sale_price_sqr_foot", color_continuous_scale=px.colors.sequential.Inferno, title='Average House Value/Neighborhood', labels={'neighborhood': "Neighborhood", 'sale_price_sqr_foot':'Sales Price/Square Foot', 'housing_units':'Housing Units', 'gross_rent':'Gross Rent'})
return parallel_categories_top_10 | 30,508 |
def hessian_filter(img: Image, scales: list, dimension: int = 0,
scaled_to_eval=False, normalized=False, parallel=False) -> Image:
"""Applies a (multi-scale) hessian-like filtering operation on a 3D stack.
Transforms an image to an input image where each pixel represents an object-ness measure at
that location. By setting the dimension of the objects to be enhanced, both blob-like (0D) objects, and
vessel-like objects (1D) can be recovered.
Args:
img (Image): The image to which the hessian-type filter is applied
scales (list): List of integer scales at which features will be filtered.
dimension (int): Type of features that will be filtered. The filter enhances blob-like structures when
``dimension`` is set to 0. Vessel like structures are enhanced when set to 1.
scaled_to_eval (bool): If set to True, the object-ness measure at each location in the image is
scaled by the magnitude of the largest absolute eigenvalue.
normalized (bool): If set to True, all the intensities of the output image are scaled to a range from 0-1.
parallel (bool): If set to True, each scale in ``scales`` will be processed on a different thread. Doing so
might incur an extra penalty in processing time due to the initialization steps. Speed gains are expected
for large sets of scales.
Returns:
Image: An image where each pixel in the stack has an intensity that corresponds to how similar it is in
shape to the object (blob, vessel) specified by ``dimension``.
Todo:
* Benchmark parallelization
"""
if len(scales) > 1:
normalized = True
if parallel:
with Manager() as manager:
results = manager.list()
pps = []
for scale in scales:
pp = Process(target=eval_hessian_scale,
args=(img, results, scale, dimension, scaled_to_eval, normalized))
pp.start()
pps.append(pp)
for pp in pps:
pp.join()
# When done with all processes
results = list(results)
else:
results = []
for scale in scales:
eval_hessian_scale(img, results, scale, dimension, scaled_to_eval, normalized)
return max_between_stacks(results) | 30,509 |
def close_project(id_, **kwargs):
"""Close a project
:param id_: The ID of the project object to be updated
:type id_: str
:rtype: ProjectSerializer
"""
proj = get_project_object(id_)
check_project_permission(proj, kwargs["token_info"])
if proj.owner != kwargs["user"]:
raise connexion.ProblemException(status=403, title="Permission Denied",
detail="Doesn't have enough permissions to take this action")
proj.state = project_pb2.STATE.CLOSED
stub = get_projects_services_stub()
response = stub.Update(proj)
return ProjectSerializer.from_dict(util.deserialize_protobuf(response)) | 30,510 |
def inspect_download_url(input_path, args, facts):
"""Process a direct download URL
Gather information required to create a recipe.
Args:
input_path: The path or URL that Recipe Robot was asked to use
to create recipes.
args: The command line arguments.
facts: A continually-updated dictionary containing all the
information we know so far about the app associated with the
input path.
Returns:
facts dictionary.
"""
# We never skip download URL inspection, even if we've already
# inspected a download URL during this run. This handles rare
# situations in which the download URL is in a different format than
# the Sparkle download.
# Remove leading and trailing spaces from URL.
input_path = input_path.strip()
# Save the download URL to the dictionary of facts.
robo_print("Download URL is: %s" % input_path, LogLevel.VERBOSE, 4)
facts["download_url"] = input_path
facts["is_from_app_store"] = False
# If download URL is hosted on GitHub or SourceForge, we can gather
# more information.
if "github.com" in input_path or "githubusercontent.com" in input_path:
if "github_repo" not in facts:
facts = inspect_github_url(input_path, args, facts)
if "sourceforge.net" in input_path:
if "sourceforge_id" not in facts:
facts = inspect_sourceforge_url(input_path, args, facts)
# Warn if it looks like we're using a version-specific download
# path, but only if the path was not obtained from a feed of some
# sort.
version_match = re.search(r"[\d]+\.[\w]+$", input_path)
if version_match is not None and (
"sparkle_feed_url" not in facts["inspections"]
and "github_url" not in facts["inspections"]
and "sourceforge_url" not in facts["inspections"]
and "bitbucket_url" not in facts["inspections"]
):
facts["warnings"].append(
"Careful, this might be a version-specific URL. Better to give me "
'a "latest" URL or a Sparkle feed.'
)
# Warn if it looks like we're using a temporary CDN URL.
aws_expire_match = re.search(r"\:\/\/.*Expires\=", input_path)
if aws_expire_match is not None and (
"sparkle_feed_url" not in facts["inspections"]
and "github_url" not in facts["inspections"]
and "sourceforge_url" not in facts["inspections"]
and "bitbucket_url" not in facts["inspections"]
):
facts["warnings"].append(
"This is a CDN-cached URL, and it may expire. Try feeding me a "
"permanent URL instead."
)
# Warn if it looks like we're using an AWS URL with an access key.
aws_key_match = re.search(r"\:\/\/.*AWSAccessKeyId\=", input_path)
if aws_key_match is not None and (
"sparkle_feed_url" not in facts["inspections"]
and "github_url" not in facts["inspections"]
and "sourceforge_url" not in facts["inspections"]
and "bitbucket_url" not in facts["inspections"]
):
facts["warnings"].append("This URL contains an AWSAccessKeyId parameter.")
# Determine filename from input URL (will be overridden later if a
# better candidate is found.)
parsed_url = urlparse(input_path)
filename = parsed_url.path.split("/")[-1]
# If the download URL doesn't already end with the parsed filename,
# it's very likely that URLDownloader needs the filename argument
# specified.
facts["specify_filename"] = not input_path.endswith(filename)
# Check to make sure URL is valid, and switch to HTTPS if possible.
checked_url = check_url(input_path)
if checked_url.startswith("http:"):
facts["warnings"].append(
"This download URL is not using HTTPS. I recommend contacting the "
"developer and politely suggesting that they secure their download URL. "
"(Example: https://twitter.com/homebysix/status/714508127228403712)"
)
# Download the file for continued inspection.
# TODO(Elliot): Maybe something like this is better for downloading
# big files? https://gist.github.com/gourneau/1430932 (#24)
robo_print("Downloading file for further inspection...", LogLevel.VERBOSE)
# Actually download the file.
try:
raw_download = urlopen(checked_url)
except HTTPError as err:
if err.code == 403:
# Try again, this time with a user-agent.
try:
raw_download = useragent_urlopen(checked_url, "Mozilla/5.0")
facts["warnings"].append(
"I had to use a different user-agent in order to "
"download this file. If you run the recipes and get a "
'"Can\'t open URL" error, it means AutoPkg encountered '
"the same problem."
)
facts["user-agent"] = "Mozilla/5.0"
except Exception as err:
facts["warnings"].append(
"Error encountered during file download. (%s)" % err
)
return facts
if err.code == 404:
facts["warnings"].append("Download URL not found. (%s)" % err)
return facts
else:
facts["warnings"].append(
"Error encountered during file download. (%s)" % err
)
return facts
except URLError as err:
if str(err.reason).startswith("[SSL: SSLV3_ALERT_HANDSHAKE_FAILURE]"):
# TODO(Elliot): Try again using curl? (#19)
facts["warnings"].append(
"I got an SSLv3 handshake error, and I don't yet know what to "
"do with that. (%s)" % err
)
return facts
else:
facts["warnings"].append(
"Error encountered during file download. (%s)" % err.reason
)
return facts
except CertificateError as err:
facts["warnings"].append(
"There seems to be a problem with the developer's SSL certificate. "
"(%s)" % err
)
# TODO: If input path was HTTP, revert to that and try again.
return facts
# Get the actual filename from the server, if it exists.
if "Content-Disposition" in raw_download.info():
content_disp = raw_download.info()["Content-Disposition"]
r_obj = re.search(r"filename=\"(.+)\"\;", content_disp)
if r_obj is not None:
filename = r_obj.group(1)
# If filename was not detected from either the URL or the headers,
# use a safe default name.
if filename == "":
filename = "download"
facts["download_filename"] = filename
# Write the downloaded file to the cache folder, showing progress.
if len(raw_download.info().getheaders("Content-Length")) > 0:
file_size = int(raw_download.info().getheaders("Content-Length")[0])
else:
# File size is unknown, so we can't show progress.
file_size = 0
with open(os.path.join(CACHE_DIR, filename), "wb") as download_file:
file_size_dl = 0
block_sz = 8192
while True:
buffer = raw_download.read(block_sz)
if not buffer:
break
# Write downloaded chunk.
file_size_dl += len(buffer)
download_file.write(buffer)
# Show progress if file size is known.
if file_size > 0:
p = float(file_size_dl) / file_size
status = r" {0:.2%}".format(p)
status = status + chr(8) * (len(status) + 1)
if args.app_mode:
# Show progress in 10% increments.
if (file_size_dl / block_sz) % (file_size / block_sz / 10) == 0:
robo_print(status, LogLevel.VERBOSE)
else:
# Show progress in real time.
sys.stdout.flush()
sys.stdout.write(status)
robo_print(
"Downloaded to %s" % os.path.join(CACHE_DIR, filename), LogLevel.VERBOSE, 4
)
# Just in case the "download" was actually a Sparkle feed.
hidden_sparkle = False
with open(os.path.join(CACHE_DIR, filename), "r") as download_file:
if download_file.read()[:6] == "<?xml ":
robo_print("This download is actually a Sparkle feed", LogLevel.VERBOSE, 4)
hidden_sparkle = True
if hidden_sparkle is True:
os.remove(os.path.join(CACHE_DIR, filename))
facts = inspect_sparkle_feed_url(checked_url, args, facts)
return facts
# Try to determine the type of file downloaded. (Overwrites any
# previous download_type, because the download URL is the most
# reliable source.)
download_format = ""
robo_print("Determining download format...", LogLevel.VERBOSE)
for this_format in ALL_SUPPORTED_FORMATS:
if filename.lower().endswith(this_format) or this_format in parsed_url.query:
download_format = this_format
facts["download_format"] = this_format
robo_print("File extension is %s" % this_format, LogLevel.VERBOSE, 4)
break # should stop after the first format match
# If we've already seen the app and the download format, there's no
# need to unpack the downloaded file.
if "download_format" in facts and "app" in facts["inspections"]:
return facts
robo_print(
"Download format is unknown, so we're going to try mounting it as a disk image "
"first, then unarchiving it. This may produce errors, but will hopefully "
"result in a success.",
LogLevel.DEBUG,
)
robo_print("Opening downloaded file...", LogLevel.VERBOSE)
# If the file is a webpage (e.g. 404 message), warn the user now.
with open(os.path.join(CACHE_DIR, filename), "r") as download:
if "html" in download.readline().lower():
facts["warnings"].append(
"There's a good chance that the file failed to download. "
"Looks like a webpage was downloaded instead."
)
# Open the disk image (or test to see whether the download is one).
if (
facts.get("download_format", "") == "" or download_format == ""
) or download_format in SUPPORTED_IMAGE_FORMATS:
facts = inspect_disk_image(os.path.join(CACHE_DIR, filename), args, facts)
# Open the zip archive (or test to see whether the download is one).
if (
facts.get("download_format", "") == "" or download_format == ""
) or download_format in SUPPORTED_ARCHIVE_FORMATS:
facts = inspect_archive(os.path.join(CACHE_DIR, filename), args, facts)
# Inspect the installer (or test to see whether the download is
# one).
if download_format in SUPPORTED_INSTALL_FORMATS:
robo_print("Download format is %s" % download_format, LogLevel.VERBOSE, 4)
facts["download_format"] = download_format
# Inspect the package.
facts = inspect_pkg(os.path.join(CACHE_DIR, filename), args, facts)
if facts.get("download_format", "") == "":
facts["warnings"].append(
"I've investigated pretty thoroughly, and I'm still not sure "
"what the download format is. This could cause problems later."
)
return facts | 30,511 |
def min_cycle_ratio(G: nx.Graph, dist):
"""[summary] todo: parameterize cost and time
Arguments:
G ([type]): [description]
Returns:
[type]: [description]
"""
mu = 'cost'
sigma = 'time'
set_default(G, mu, 1)
set_default(G, sigma, 1)
T = type(dist[next(iter(G))])
def calc_weight(r, e):
"""[summary]
Arguments:
r ([type]): [description]
e ([type]): [description]
Returns:
[type]: [description]
"""
u, v = e
return G[u][v]['cost'] - r * G[u][v]['time']
def calc_ratio(C):
"""Calculate the ratio of the cycle
Arguments:
C {list}: cycle list
Returns:
cycle ratio
"""
total_cost = sum(G[u][v]['cost'] for (u, v) in C)
total_time = sum(G[u][v]['time'] for (u, v) in C)
return T(total_cost) / total_time
C0 = nx.find_cycle(G)
r0 = calc_ratio(C0)
return max_parametric(G, r0, C0, calc_weight, calc_ratio, dist) | 30,512 |
def int2base(x, base):
"""
Method to convert an int to a base
Source: http://stackoverflow.com/questions/2267362
"""
import string
digs = string.digits + string.ascii_uppercase
if x < 0: sign = -1
elif x == 0: return digs[0]
else:
sign = 1
x *= sign
digits = []
while x:
digits.append(digs[x % base])
x = int(x / base)
if sign < 0:
digits.append('-')
digits.reverse()
return ''.join(digits) | 30,513 |
def geometric(X):
"""
If x1,x2,...xn ~iid~ GEO(p) then the MLE is 1 / X-bar
Parameters
----------
X : array_like
Returns:
----------
geo_mle : MLE calculation for p-hat for GEO(p)
References
----------
[1] Casella, G., Berger, R. L., "Statistical Inference"
Belmont (California): Brooks/Cole Cengage Learning (2017)
[2] Tone, MAT 562: Mathematical Statistics notes, U of L
"""
_input = np.array(X)
n = len(_input)
discrete_bool = discrete_check(_input)
geo_mle = 1 / np.mean(X)
if discrete_bool == True:
return geo_mle
else:
raise ValueError("X must be a discrete data set (only integers)") | 30,514 |
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Run train or eval scripts for Gated2Depth')
parser.add_argument("--base_dir", help="Path to dataset", required=True)
parser.add_argument("--train_files_path", help="Path to file with train file names", required=False)
parser.add_argument("--eval_files_path",
help="Path to file with validation/evaluation file names. Required if running both in train and eval mode",
required=True)
parser.add_argument("--data_type", choices=['real', 'synthetic'], help="[real|synthetic].", default='real',
required=True)
parser.add_argument("--results_dir", help="Path to results directory (train or eval)",
default='gated2depth_results', required=False)
parser.add_argument("--model_dir", help="Path to model directory",
default='gated2depth model', required=False)
parser.add_argument("--exported_disc_path", help="Path to exported discriminator. Used to train "
"a generator with a pre-trained discriminator",
default=None, required=False)
parser.add_argument("--mode", choices=['train', 'eval'], help="[train/eval]",
default='train', required=False)
parser.add_argument('--use_multiscale', help='Use multiscale loss function',
action='store_true', required=False)
parser.add_argument('--smooth_weight', type=float, help='Smoothing loss weight',
default=0.5, required=False)
parser.add_argument('--adv_weight', type=float, help='Adversarial loss weight',
default=0., required=False)
parser.add_argument('--lrate', type=float, help='Learning rate',
default=0.0001, required=False)
parser.add_argument('--min_distance', type=float, help='minimum distance',
default=3., required=False)
parser.add_argument('--max_distance', type=float, help='maximum distance',
default=150., required=False)
parser.add_argument('--use_3dconv', help='Use 3D convolutions architecture',
action='store_true', required=False)
parser.add_argument('--gpu', dest='gpu', help='GPU id', default='0', required=False)
parser.add_argument('--num_epochs', type=int, dest='num_epochs',
help='Number of training epochs', default=2)
parser.add_argument('--show_result', help='Show result image during evaluation',
action='store_true', required=False)
args = parser.parse_args()
return args | 30,515 |
def test_bad_colours_set():
""" Validating that the BadColoursSetError exception is raised when required
"""
mastermind = MastermindCore()
with pytest.raises(BadColoursSetError):
mastermind.configure(
code_length = 4,
allow_duplicates = False,
colours_set = ['Red', 'Green', 'Blue']) | 30,516 |
def calc_dH(
e_per_atom,
stoich=None,
num_H_atoms=0,
):
"""
The original method is located in:
F:\Dropbox\01_norskov\00_git_repos\PROJ_IrOx_Active_Learning_OER\data\proj_data_irox.py
Based on a E_DFT/atom of -7.047516 for rutile-IrO2
See the following dir for derivation:
PROJ_IrOx_Active_Learning_OER/workflow/energy_treatment_deriv/calc_references
"""
# | - calc_dH
o_ref = -4.64915959
ir_metal_fit = -9.32910211636731
h_ref = -3.20624595
if stoich == "AB2":
dH = (2 + 1) * e_per_atom - 2 * o_ref - ir_metal_fit
dH_per_atom = dH / 3.
elif stoich == "AB3":
dH = (3 + 1) * e_per_atom - 3 * o_ref - ir_metal_fit
dH_per_atom = dH / 4.
elif stoich == "IrHO3" or stoich == "IrO3H" or stoich == "iro3h" or stoich == "iroh3":
dH = (3 + 1 + 1) * e_per_atom - 3 * o_ref - ir_metal_fit - h_ref
dH_per_atom = dH / 5.
return(dH_per_atom)
#__| | 30,517 |
def store_policy_instance(policy_type_id, policy_instance_id, instance):
"""
Store a policy instance
"""
type_is_valid(policy_type_id)
key = _generate_instance_key(policy_type_id, policy_instance_id)
if SDL.get(key) is not None:
# Reset the statuses because this is a new policy instance, even if it was overwritten
_clear_handlers(policy_type_id, policy_instance_id) # delete all the handlers
SDL.set(key, instance) | 30,518 |
def _make_asset_build_reqs(asset):
"""
Prepare requirements and inputs lists and display it
:params str asset: name of the asset
"""
def _format_reqs(req_list):
"""
:param list[dict] req_list:
:return list[str]:
"""
templ = "\t{} ({})"
return [templ.format(req[KEY], req[DESC]) if DEFAULT not in req
else (templ + "; default: {}").format(req[KEY], req[DESC], req[DEFAULT]) for req in req_list]
reqs_list = []
if asset_build_packages[asset][REQ_FILES]:
reqs_list.append("- files:\n{}".format("\n".join(_format_reqs(asset_build_packages[asset][REQ_FILES]))))
if asset_build_packages[asset][REQ_ASSETS]:
reqs_list.append("- assets:\n{}".format("\n".join(_format_reqs(asset_build_packages[asset][REQ_ASSETS]))))
if asset_build_packages[asset][REQ_PARAMS]:
reqs_list.append("- params:\n{}".format("\n".join(_format_reqs(asset_build_packages[asset][REQ_PARAMS]))))
_LOGGER.info("\n".join(reqs_list)) | 30,519 |
def gsl_eigen_herm_alloc(*args, **kwargs):
"""gsl_eigen_herm_alloc(size_t const n) -> gsl_eigen_herm_workspace"""
return _gslwrap.gsl_eigen_herm_alloc(*args, **kwargs) | 30,520 |
def cosine_similarity(array1, array2):
"""
Calcula la similitud coseno entre dos arrays
"""
# -sum(l2_norm(y_true) * l2_norm(y_pred))
return -dot(array1, array2)/(norm(array1)*norm(array2)) | 30,521 |
def DEW_T(Y, P, all_params):
"""
Y = list of mollar fractions of vapor like [0.2 ,0.8] or [0.1 0.2 0.7]
Sumation of X list must be 1.0
P = Pressure in kPa
all_params = list of parameters for Antonie equations
example for all params:
all_params = [[A1, B1, C1],
[A2, B2, C2],
[A3, B3, C3]]
"""
# creating root finding function
def func(T):
return (P - DEW_P(Y, T, all_params)[0])
# solving and finding Temprature
solve = root(func, 20, method='lm')
T = solve['x'][0]
# Computing X mollar fractions of liqui
X = DEW_P(Y, T, all_params)[1]
return T, X | 30,522 |
def plugin_last():
"""This function should sort after other plug-in functions"""
return "last" | 30,523 |
def init_seg_table(metadata, tablename, segid_colname=cn.seg_id, chunked=True):
""" Specifies a table for tracking info about a segment. """
columns = [Column("id", BigInteger, primary_key=True),
Column(cn.seg_id, Integer, index=True),
Column(cn.size, Integer),
# Centroid coordinates
Column(cn.centroid_x, Float),
Column(cn.centroid_y, Float),
Column(cn.centroid_z, Float),
# Bounding box
Column(cn.bbox_bx, Integer),
Column(cn.bbox_by, Integer),
Column(cn.bbox_bz, Integer),
Column(cn.bbox_ex, Integer),
Column(cn.bbox_ey, Integer),
Column(cn.bbox_ez, Integer)]
if chunked:
# Chunk id - None if merged across chunks
columns.append(Column(cn.chunk_tag, Text, index=True))
return Table(tablename, metadata, *columns) | 30,524 |
def extract_shebang_command(handle):
"""
Extract the shebang_ command line from an executable script.
:param handle: A file-like object (assumed to contain an executable).
:returns: The command in the shebang_ line (a string).
The seek position is expected to be at the start of the file and will be
reset afterwards, before this function returns. It is not an error if the
executable contains binary data.
.. _shebang: https://en.wikipedia.org/wiki/Shebang_(Unix)
"""
try:
if handle.read(2) == b'#!':
data = handle.readline()
text = data.decode('UTF-8')
return text.strip()
else:
return ''
finally:
handle.seek(0) | 30,525 |
def set_resource_limit(limit, soft=None, hard=None, warn_on_failure=False):
"""Uses the ``resource`` package to change a resource limit for the current
process.
If the ``resource`` package cannot be imported, this command does nothing.
Args:
limit: the name of the resource to limit. Must be the name of a
constant in the ``resource`` module starting with ``RLIMIT``. See
the documentation of the ``resource`` module for supported values
soft (None): a new soft limit to apply, which cannot exceed the hard
limit. If omitted, the current soft limit is maintained
hard (None): a new hard limit to apply. If omitted, the current hard
limit is maintained
warn_on_failure (False): whether to issue a warning rather than an
error if the resource limit change is not successful
"""
try:
import resource
except ImportError as e:
if warn_on_failure:
logger.warning(e)
else:
return
try:
_limit = getattr(resource, limit)
soft_orig, hard_orig = resource.getrlimit(_limit)
soft = soft or soft_orig
hard = hard or hard_orig
resource.setrlimit(_limit, (soft, hard))
except ValueError as e:
if warn_on_failure:
logger.warning(e)
else:
raise | 30,526 |
def split_abstracts(ftm_df):
"""
Split the mail abstract (item content) into different mails.
This is required to find the 'novel' email, and the rest of the threat. We
create a new row for each email in the threat, but it keeps the ID of the
'novel email'. We add two boolean flags for is_novel, and
is_threat_starter.
Parameters
----------
ftm_df : pandas.DataFrame
FTM dataset with prettified abstracts.
Returns
-------
pandas.DataFrame
FTM dataset with a new row for each email, and flags for is_novel and
is_threat_starter.
"""
# Create a list of strings from novel email and its forwards or reactions
ftm_df['new_abstract'] = ftm_df[ftm_df.title.str.contains('RE ') | ftm_df.title.str.contains(
'FW ')].abstract.apply(lambda row: re.split(FILTER_SENDER, row))
# Create a list of is_novel. First email is novel (1), the rest is
# forwards or reactions (0). Similar for is_threat_starter
ftm_df['is_novel'] = ftm_df[ftm_df.title.str.contains('RE ') | ftm_df.title.str.contains(
'FW ')].new_abstract.apply(lambda row: [1] + [0] * (len(row) - 1))
ftm_df['is_threat_starter'] = ftm_df[ftm_df.title.str.contains('RE ') |
ftm_df.title.str.contains('FW ')
].new_abstract.apply(
lambda row: [0] * (len(row) - 1) + [1])
# explode the lists
ftm_df = ftm_df.explode(['is_novel', 'new_abstract', 'is_threat_starter'])
ftm_df = ftm_df.reset_index()
ftm_df.abstract = ftm_df.new_abstract.fillna(ftm_df.abstract)
return ftm_df.drop(columns=['new_abstract']) | 30,527 |
def merge(xref):
""" Do some logic here to clean things up!"""
pgconn = get_dbconn('mesosite', user='mesonet')
ipgconn = get_dbconn('iem', user='mesonet')
for nwsli, name in xref.iteritems():
cursor = pgconn.cursor()
rwcursor = pgconn.cursor()
icursor = ipgconn.cursor()
name = name[:64] # database name size limitation
cursor.execute("""SELECT id, name, network, iemid from stations WHERE
id = %s and (network ~* 'COOP' or network ~* 'DCP')
""", (nwsli,))
if cursor.rowcount == 0:
print("Unknown station: %s" % (nwsli,))
elif cursor.rowcount == 1:
row = cursor.fetchone()
if row[2].find("DCP") == -1:
print("Site is listed as only COOP: %s" % (nwsli,))
should_switch_2dcp(rwcursor, icursor, nwsli, row[3])
if row[1] != name:
print(" -> Update %s |%s| -> |%s|" % (nwsli, row[1], name))
rwcursor.execute("""UPDATE stations SET name = %s
WHERE iemid = %s""", (name, row[3]))
elif cursor.rowcount == 2:
row = cursor.fetchone()
row2 = cursor.fetchone()
print("DCP/COOP Duplicate: %s |%s| |%s|" % (nwsli, row[1],
row2[1]))
should_delete_coop(rwcursor, icursor, nwsli)
# Fix DCP name
for _r in [row, row2]:
if _r[2].find("DCP") > -1 and _r[1] != name:
print(" -> Updating Name to |%s|" % (name,))
rwcursor.execute("""UPDATE stations SET name = %s
WHERE iemid = %s""", (name, _r[3]))
else:
print("Too many rows for: %s" % (nwsli,))
pgconn.commit()
rwcursor.close()
ipgconn.commit()
icursor.close()
pgconn.close()
ipgconn.close() | 30,528 |
def get_weight_from_alias(blend_shape, alias):
"""
Given a blend shape node and an aliased weight attribute, return the index in .weight to the
alias.
"""
# aliasAttr lets us get the alias from an attribute, but it doesn't let us get the attribute
# from the alias.
existing_indexes = blend_shape.attr('weight').get(mi=True) or []
for idx in existing_indexes:
aliasName = pm.aliasAttr(blend_shape.attr('weight').elementByLogicalIndex(idx), q=True)
if aliasName == alias:
return idx
raise Exception('Couldn\'t find the weight index for blend shape target %s.%s' % (blend_shape, alias)) | 30,529 |
def __check_dependences_and_predecessors(pet: PETGraphX, out_dep_edges: List[Tuple[Any, Any, Any]],
parent_task: CUNode, cur_cu: CUNode):
"""Checks if only dependences to self, parent omittable node or path to target task exists.
Checks if node is a direct successor of an omittable node or a task node.
:param pet: PET Graph
:param out_dep_edges: list of outgoing edges
:param parent_task: parent cu of cur_cu
:param cur_cu: current cu node
:return True, if a violation has been found. False, otherwise.
"""
violation = False
# check if only dependencies to self, parent omittable node or path to target task exists
for e in out_dep_edges:
if pet.node_at(e[1]) == cur_cu:
continue
elif pet.node_at(e[1]).tp_omittable is True:
continue
elif check_reachability(pet, parent_task, cur_cu, [EdgeType.DATA]):
continue
else:
violation = True
# check if node is a direct successor of an omittable node or a task node
in_succ_edges = [(s, t, e) for (s, t, e) in pet.in_edges(cur_cu.id) if
e.etype == EdgeType.SUCCESSOR]
is_successor = False
for e in in_succ_edges:
if pet.node_at(e[0]).tp_omittable is True:
is_successor = True
elif pet.node_at(e[0]).tp_contains_task is True:
is_successor = True
if not is_successor:
violation = True
return violation | 30,530 |
def clone_master_track(obj, stdata, stindex, stduration):
"""
ghetto-clone ('deep copy') an object using JSON
populate subtrack info from CUE sheet
"""
newsong = json.loads(json.dumps(obj))
newsong['subsong'] = {'index': stindex, 'start_time': stdata['index'][1][0], 'duration': stduration}
newsong['tags']['artist'] = stdata.get('PERFORMER', newsong['tags'].get('artist'))
newsong['tags']['title'] = stdata.get('TITLE', newsong['tags'].get('title'))
newsong['tags']['tracknum'] = stindex
newsong['tags']['trackstr'] = stindex
return newsong | 30,531 |
def create_graph(num_islands, bridge_config):
"""
Helper function to create graph using adjacency list implementation
"""
adjacency_list = [list() for _ in range(num_islands + 1)]
for config in bridge_config:
source = config[0]
destination = config[1]
cost = config[2]
adjacency_list[source].append((destination, cost))
adjacency_list[destination].append((source, cost))
#print("adjacency_list",adjacency_list)
return adjacency_list | 30,532 |
def distinct_extractors(count=True, active=True):
""" Tool to count unique number of predictors for each Dataset/Task """
active_datasets = ms.Dataset.query.filter_by(active=active)
superset = set([v for (v, ) in ms.Predictor.query.filter_by(active=True).filter(
ms.Predictor.dataset_id.in_(
active_datasets.with_entities('id'))).join(
ms.ExtractedFeature).distinct(
'extractor_name').values('extractor_name')])
res = {}
for en in superset:
for ds in active_datasets:
for t in ds.tasks:
name = f"{ds.name}_{t.name}"
if name not in res:
res[name] = {}
preds = ms.Predictor.query.filter_by(
dataset_id=ds.id, active=True).join(
ms.ExtractedFeature).filter_by(
extractor_name=en).distinct('feature_name')
if count:
r = preds.count()
else:
r = list(preds.values('name'))
res[name][en] = r
return res | 30,533 |
def get_timepoint( data, tp=0 ):
"""Returns the timepoint (3D data volume, lowest is 0) from 4D input.
You can save memory by using [1]:
nifti.dataobj[..., tp]
instead: see get_nifti_timepoint()
Works with loop_and_save().
Call directly, or with niftify().
Ref:
[1]: http://nipy.org/nibabel/images_and_memory.html
"""
# Replicating seg_maths -tp
tp = int(tp)
if len(data.shape) < 4:
print("Data has fewer than 4 dimensions. Doing nothing...")
output = data
else:
if data.shape[3] < tp:
print("Data has fewer than {0} timepoints in its 4th dimension.".format(tp))
output = data
else:
output = data[:,:,:,tp]
return output
# elif len(data.shape) > 4:
# print("Data has more than 4 dimensions! Assuming the 4th is time ...")
# End get_timepoint() definition | 30,534 |
def method_from_name(klass, method_name: str):
"""
Given an imported class, return the given method pointer.
:param klass: An imported class containing the method.
:param method_name: The method name to find.
:return: The method pointer
"""
try:
return getattr(klass, method_name)
except AttributeError:
raise NotImplementedError() | 30,535 |
def create_config_file(config_path=CONFIG_FILE_PATH):
"""
generate a generic config file from a given path
:param config_path:
:return:
"""
if os.path.exists(config_path):
logging.warning("the file config.txt already exits, it will not be overwritten")
else:
of = open(config_path, 'w')
of.write("{}={}Debug\images{}\n".format(DATA_PATH, MAIN_DIR, os.sep))
of.close() | 30,536 |
def get_querypage(site: Site, page: str, limit: int = 500):
"""
:type site Site
:type page str
:type limit int
:rtype: list[str]
"""
# http://poznan.wikia.com/api.php?action=query&list=querypage&qppage=Nonportableinfoboxes
# http://poznan.wikia.com/api.php?action=query&list=querypage&qppage=Mostlinkedtemplates
# http://poznan.wikia.com/api.php?action=query&list=querypage&qppage=AllInfoboxes
res = site.get(action='query', list='querypage', qppage=page, qplimit=limit)
return [
# (u'value', u'69'), (u'ns', 10), (u'title', u'Template:Crew TV')
entry['title']
for entry in res['query']['querypage']['results']
] | 30,537 |
def parse_value(string: str) -> str:
"""Check if value is a normal string or an arrow function
Args:
string (str): Value
Returns:
str: Value if it's normal string else Function Content
"""
content, success = re.subn(r'^\(\s*\)\s*=>\s*{(.*)}$', r'\1', string)
if not success:
return string
return content | 30,538 |
def test_arrowleft_shortchut(vim_bot):
"""Test j command (Cursor moves right)."""
main, editor_stack, editor, vim, qtbot = vim_bot
editor.stdkey_backspace()
qtbot.keyPress(editor, Qt.Key_Right)
cmd_line = vim.get_focus_widget()
_, col = editor.get_cursor_line_column()
qtbot.keyPress(editor, Qt.Key_Left)
_, new_col = editor.get_cursor_line_column()
assert new_col == col - 1 | 30,539 |
def uniqify(seq, idfun=None):
"""Return only unique values in a sequence"""
# order preserving
if idfun is None:
def idfun(x):
return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen:
continue
seen[marker] = 1
result.append(item)
return result | 30,540 |
def inside(Sv, r, r0, r1):
"""
Mask data inside a given range.
Args:
Sv (float): 2D array with data to be masked.
r (float): 1D array with range data.
r0 (int): Upper range limit.
r1 (int): Lower range limit.
Returns:
bool: 2D array mask (inside range = True).
"""
masku = np.ma.masked_greater_equal(r, r0).mask
maskl = np.ma.masked_less(r, r1).mask
idx = np.where(masku & maskl)[0]
mask = np.zeros((Sv.shape), dtype=bool)
mask[idx,:] = True
return mask | 30,541 |
def file_exists(fpath: str):
"""Checks if file exists by the given path
:param str fpath:
A path to validate for being a file
:returns:
True, if fpath is a file
False, if fpath doesn't exists or isn't a file
"""
return os.path.isfile(fpath) | 30,542 |
def get_solution_name(
fs: float, tf_name: str, tf_var: str, postfix: Optional[str] = None
) -> str:
"""Get the name of a solution file"""
from resistics.common import fs_to_string
solution_name = f"{fs_to_string(fs)}_{tf_name.lower()}"
if tf_var != "":
tf_var = tf_var.replace(" ", "_")
solution_name = solution_name + f"_{tf_var}"
if postfix is None:
return solution_name + ".json"
return solution_name + "_" + postfix + ".json" | 30,543 |
def test_client_arm(server, client):
"""Should call the API and arm the system."""
html = """[
{
"Poller": {"Poller": 1, "Panel": 1},
"CommandId": 5,
"Successful": True,
}
]"""
server.add(
responses.POST,
"https://example.com/api/panel/syncSendCommand",
body=html,
status=200,
)
client._session_id = "test"
client._lock.acquire()
assert client.arm() is True
assert len(server.calls) == 1 | 30,544 |
def test_get_filename_from_request(patch_url_handler):
"""Test getting the filename of an url from a request object."""
# url with no filename in name, but in request content headers
request = urlopen("http://valid.com")
filename = packagerbuddy._get_filename_from_request(request)
assert filename == "valid.tar"
# url with filename in name, not in request content headers
request = urlopen("http://filename.tar")
filename = packagerbuddy._get_filename_from_request(request)
assert filename == "filename.tar" | 30,545 |
def escape(message: str) -> str:
"""Escape tags which might be interpreted by the theme tokenizer.
Should be used when passing text from external sources to `theme.echo`.
"""
return re.sub(
rf"<(/?{TAG_RE})>",
r"\<\1>",
message,
) | 30,546 |
def display_notebook(host, port, display):
"""Display Aim instance in an ipython context output frame.
"""
import IPython.display
shell = """
<iframe id="aim" width="100%" height="800" frameborder="0" src={}:{}{}>
</iframe>
""".format(host, port, '/notebook/')
# @TODO write passing proxy logic
iframe = IPython.display.HTML(shell)
display.update(iframe) | 30,547 |
async def normalize_message(app: FastAPI, message: Message) -> Message:
"""
Given a TRAPI message, updates the message to include a
normalized qgraph, kgraph, and results
"""
try:
merged_qgraph = await normalize_qgraph(app, message.query_graph)
merged_kgraph, node_id_map, edge_id_map = await normalize_kgraph(app, message.knowledge_graph)
merged_results = await normalize_results(message.results, node_id_map, edge_id_map)
return Message.parse_obj({
'query_graph': merged_qgraph,
'knowledge_graph': merged_kgraph,
'results': merged_results
})
except Exception as e:
logger.error(f'normalize_message Exception: {e}') | 30,548 |
def ec_elgamal_encrypt(msg, pk, symmalg):
"""
Computes a random b, derives a key from b*g and ab*g, then encrypts it using symmalg.
Input:
msg Plaintext message string.
pk Public key: a tuple (EC, ECPt, ECPt), that is (ec, generator g, a*g)
symmalg A callable that accepts two arguments, the key and the message.
symmalg(key, msg) should output a symmetric-enciphered ciphertext.
Output:
A tuple (ECPt, str), where the first element is actually b*g.
"""
ec, g, ag=pk
b=random_with_bytes(log2(ec._p)//4)
abg=b*ag
bg=b*g
k=ec_elgamal_derive_symm_key(bg, abg)
return (bg, symmalg(k, msg)) | 30,549 |
def plot_confusion_matrix(cm, classes,
normalize=False,
cmap=plt.cm.YlGnBu):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
np.set_printoptions(precision=2)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
ylabel='True',
# fontsize=16,
xlabel='Predicted')
# Loop over data dimensions and create text annotations.
fmt = '.2f'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center", fontsize=14,
color="white" if cm[i, j] > thresh else "black")
plt.xlim(-0.5, len(np.unique(classes))-0.5)
plt.ylim(len(np.unique(classes))-0.5, -0.5)
plt.xlabel("Predicted", fontsize=16)
plt.ylabel('True', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
return ax | 30,550 |
def meshgrid(params):
"""Returns meshgrid X that can be used for 1D plotting.
params is what is returned by finess.params.util.read_params."""
assert(params['finess', 'ndims'] == 1)
mx = params['grid', 'mx']
xlow = params['grid', 'xlow']
xhigh = params['grid', 'xhigh']
dx = (xhigh-xlow) / float(mx)
from pylab import linspace
X = linspace(xlow + 0.5*dx, xhigh - 0.5*dx, mx)
return X | 30,551 |
def _pbe_p12(params, passphrase, hash_alg, cipher, key_size):
"""PKCS#12 cipher selection function for password-based encryption
This function implements the PKCS#12 algorithm for password-based
encryption. It returns a cipher object which can be used to encrypt
or decrypt data based on the specified encryption parameters,
passphrase, and salt.
"""
if (not isinstance(params, tuple) or len(params) != 2 or
not isinstance(params[0], bytes) or not params[0] or
not isinstance(params[1], int) or params[1] == 0):
raise KeyEncryptionError('Invalid PBES1 PKCS#12 encryption parameters')
salt, count = params
key = _pbkdf_p12(hash_alg, passphrase, salt, count, key_size, 1)
if cipher.block_size == 1:
cipher = cipher.new(key)
else:
iv = _pbkdf_p12(hash_alg, passphrase, salt, count,
cipher.block_size, 2)
cipher = _RFC1423Pad(cipher, key, iv)
return cipher | 30,552 |
def add_settings_routes(app):
""" Create routes related to settings """
@app.route('/v1/rule_settings/', methods=['GET'])
@requires_login
@use_kwargs({
'agency_code': webargs_fields.String(required=True),
'file': webargs_fields.String(validate=webargs_validate.
OneOf(FILE_TYPES, error='Must be {}, or {}'.format(', '.join(FILE_TYPES[:-1]),
FILE_TYPES[-1])),
required=True)
})
def get_rule_settings(**kwargs):
""" Returns the rule settings based on the filters provided """
agency_code = kwargs.get('agency_code')
file = kwargs.get('file')
return list_rule_settings(agency_code, file)
@app.route('/v1/save_rule_settings/', methods=['POST'])
@requires_login
@use_kwargs({
'agency_code': webargs_fields.String(required=True),
'file': webargs_fields.String(validate=webargs_validate.
OneOf(FILE_TYPES, error='Must be {}, or {}'.format(', '.join(FILE_TYPES[:-1]),
FILE_TYPES[-1])),
required=True),
'errors': webargs_fields.List(webargs_fields.Dict),
'warnings': webargs_fields.List(webargs_fields.Dict)
})
def post_save_rule_settings(**kwargs):
""" Set the rule settings based on the rules provided """
agency_code = kwargs.get('agency_code')
file = kwargs.get('file')
errors = kwargs.get('errors', [])
warnings = kwargs.get('warnings', [])
return save_rule_settings(agency_code, file, errors, warnings) | 30,553 |
def delete_by_date_paste(date):
"""
Deletes the paste entries older than a certain date. Note that it will delete any document/index type entered into
it for elasticsearch, the paste restriction is due to postgreql
:return: True once
"""
# Create a connection to the database (seemd to want it in this case)
db = SQLAlchemy(app)
# Add the start of the day to ensure anything older gets deleted
date += " 00:00:00.000000"
# Make the query to get the pastes to be deleted
old_pastes = db.session.query(Paste).filter(Paste.datetime < date)
# Attempt to delete old pastes
for item in old_pastes:
try:
delete_from_es(item)
db.session.delete(item)
db.session.commit()
except:
logger.error("Did not delete item from one or more databases: %s", item)
return True | 30,554 |
def add_user_to_ldap_and_login(self, server, user=None, ch_user=None, login=None, exitcode=None, message=None, rbac=False):
"""Add user to LDAP and ClickHouse and then try to login.
"""
self.context.ldap_node = self.context.cluster.node(server)
if ch_user is None:
ch_user = {}
if login is None:
login = {}
if user is None:
user = {"cn": "myuser", "userpassword": "myuser"}
with ldap_user(**user) as user:
ch_user["username"] = ch_user.get("username", user["cn"])
ch_user["server"] = ch_user.get("server", user["_server"])
with ldap_authenticated_users(ch_user, config_file=f"ldap_users_{getuid()}.xml", restart=True, rbac=rbac):
username = login.get("username", user["cn"])
password = login.get("password", user["userpassword"])
login_and_execute_query(username=username, password=password, exitcode=exitcode, message=message) | 30,555 |
def is_venv():
"""Check whether if this workspace is a virtualenv.
"""
dir_path = os.path.dirname(SRC)
is_venv_flag = True
if SYS_NAME == "Windows":
executable_list = ["activate", "pip.exe", "python.exe"]
elif SYS_NAME in ["Darwin", "Linux"]:
executable_list = ["activate", "pip", "python"]
for executable in executable_list:
path = os.path.join(dir_path, BIN_SCRIPTS, executable)
if not os.path.exists(path):
is_venv_flag = False
return is_venv_flag | 30,556 |
def convert(origDict, initialSpecies):
"""
Convert the original dictionary with species labels as keys
into a new dictionary with species objects as keys,
using the given dictionary of species.
"""
new_dict = {}
for label, value in origDict.items():
new_dict[initialSpecies[label]] = value
return new_dict | 30,557 |
def Vector(point, direction, simple=None):
"""
Easy to use Vector type constructor. If three arguments are passed,
the first two are the x components of the point and the third is
the direction component of the Vector.
"""
if simple is not None:
point = Point(point, direction)
direction = simple
return {
'point': point,
'direction': direction,
} | 30,558 |
def check_runner(event, context):
"""
Pure lambda function to pull run and check information from SQS and run
the checks. Self propogates. event is a dict of information passed into
the lambda at invocation time.
"""
if not event:
return
app_utils_obj.run_check_runner(event) | 30,559 |
def createGrid(nx, ny):
"""
Create a grid position array.
"""
direction = 0
positions = []
if (nx > 1) or (ny > 1):
half_x = int(nx/2)
half_y = int(ny/2)
for i in range(-half_y, half_y+1):
for j in range(-half_x, half_x+1):
if not ((i==0) and (j==0)):
if ((direction%2)==0):
positions.append([j,i])
else:
positions.append([-j,i])
direction += 1
return positions | 30,560 |
def print_gradient(texto,grad):
"""
Impressão do Gradiente.
Utilizado para Gradiente de Suporte ou de Confiança
conforme a faixa em que ele se encontra.
:param: grad: vetor de Gradiente que será alterado
:param: val: valor atual que será contabilizado no vetor de Gradiente
"""
print('{:<19}'.format(texto) + ":" , ">= 0,9 -" , '{:>4}'.format(str(grad[9])) )
for n in range(8,-1,-1):
print('{:<19}'.format('') + " " , ">= 0,"+str(n)+" -" , '{:>4}'.format(str(grad[n])) ) | 30,561 |
def _update_distances_for_one_sample(
sample_index, new_distances, all_distances, sample_name_to_index
):
"""Updates all distance data in dictionary all_distances.
new_distances=list of tuples, made by _load_one_sample_distances_file"""
for other_sample, distance in new_distances:
other_index = sample_name_to_index[other_sample]
if other_index == sample_index:
continue
key = tuple(sorted([sample_index, other_index]))
if key in all_distances and all_distances[key] != distance:
raise RuntimeError(
f"Pair of samples seen twice when loading distances, with different distances: {key}"
)
all_distances[key] = distance | 30,562 |
def pay(name):
"""Pay participants"""
# Skip if performing local development
if reseval.is_local(name):
return
# Get config
config = reseval.load.config_by_name(name)
# Get credentials
credentials = reseval.load.credentials_by_name(name, 'crowdsource')
# Pay participants
module(config).pay(config, credentials) | 30,563 |
def get_private_network(filters: Optional[Sequence[pulumi.InputType['GetPrivateNetworkFilterArgs']]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateNetworkResult:
"""
Get information about a Vultr private network.
## Example Usage
Get the information for a private network by `description`:
```python
import pulumi
import pulumi_vultr as vultr
my_network = vultr.get_private_network(filters=[vultr.GetPrivateNetworkFilterArgs(
name="description",
values=["my-network-description"],
)])
```
:param Sequence[pulumi.InputType['GetPrivateNetworkFilterArgs']] filters: Query parameters for finding private networks.
"""
__args__ = dict()
__args__['filters'] = filters
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('vultr:index/getPrivateNetwork:getPrivateNetwork', __args__, opts=opts, typ=GetPrivateNetworkResult).value
return AwaitableGetPrivateNetworkResult(
date_created=__ret__.date_created,
description=__ret__.description,
filters=__ret__.filters,
id=__ret__.id,
region=__ret__.region,
v4_subnet=__ret__.v4_subnet,
v4_subnet_mask=__ret__.v4_subnet_mask) | 30,564 |
def test_cell_order_2d():
"""Test for 6 cells in 2d"""
cell_order_reference = [[1, 3, 4], [2, 3, 4, 5], [4, 5], [4], [5], []]
cell_order = create_cell_order_2d(1, [3, 2])
npt.assert_array_equal(cell_order_reference, cell_order) | 30,565 |
def bootstrap(tank, context):
"""
Interface for older versions of tk-multi-launchapp.
This is deprecated and now replaced with the ``startup.py`` file and
``SoftwareLauncher`` interface.
Prepares the environment for a tk-houdini bootstrap. This method is
called directly from the tk-multi-launchapp.
"""
# get the necessary environment variable for launch
env = get_classic_startup_env()
# update the environment with the classic startup vars
os.environ.update(env) | 30,566 |
def make1():
""" делаем простой документ, но уже с разметкой.
это helloworld.docx
"""
doc = docx.Document()
# добавляем первый параграф
doc.add_paragraph('Здравствуй, мир!')
# добавляем еще два параграфа
par1 = doc.add_paragraph('Это второй абзац.')
par2 = doc.add_paragraph('Это третий абзац.')
# добавляем текст во второй параграф
par1.add_run(' Этот текст был добавлен во второй абзац.')
# добавляем текст в третий параграф
par2.add_run(' Добавляем текст в третий абзац.').bold = True
# делаем заголовки
doc.add_heading('Заголовок 0', 0)
doc.add_heading('Заголовок 1', 1)
doc.add_heading('Заголовок 2', 2)
doc.add_heading('Заголовок 3', 3)
doc.add_heading('Заголовок 4', 4)
# запись результата
doc.save('helloworld.docx') | 30,567 |
def sdc_pandas_series_operator_le(self, other):
"""
Pandas Series operator :attr:`pandas.Series.le` implementation
.. only:: developer
**Test**: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_op7*
python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_operator_le*
Parameters
----------
series: :obj:`pandas.Series`
Input series
other: :obj:`pandas.Series` or :obj:`scalar`
Series or scalar value to be used as a second argument of binary operation
Returns
-------
:obj:`pandas.Series`
The result of the operation
"""
_func_name = 'Operator le().'
ty_checker = TypeChecker('Operator le().')
self_is_series, other_is_series = isinstance(self, SeriesType), isinstance(other, SeriesType)
if not (self_is_series or other_is_series):
return None
if not isinstance(self, (SeriesType, types.Number, types.UnicodeType)):
ty_checker.raise_exc(self, 'pandas.series or scalar', 'self')
if not isinstance(other, (SeriesType, types.Number, types.UnicodeType)):
ty_checker.raise_exc(other, 'pandas.series or scalar', 'other')
operands_are_series = self_is_series and other_is_series
if operands_are_series:
none_or_numeric_indexes = ((isinstance(self.index, types.NoneType) or check_index_is_numeric(self))
and (isinstance(other.index, types.NoneType) or check_index_is_numeric(other)))
series_indexes_comparable = check_types_comparable(self.index, other.index) or none_or_numeric_indexes
if not series_indexes_comparable:
raise TypingError('{} Not implemented for series with not-comparable indexes. \
Given: self.index={}, other.index={}'.format(_func_name, self.index, other.index))
series_data_comparable = check_types_comparable(self, other)
if not series_data_comparable:
raise TypingError('{} Not supported for not-comparable operands. \
Given: self={}, other={}'.format(_func_name, self, other))
if not operands_are_series:
def _series_operator_le_scalar_impl(self, other):
if self_is_series == True: # noqa
return pandas.Series(self._data <= other, index=self._index, name=self._name)
else:
return pandas.Series(self <= other._data, index=other._index, name=other._name)
return _series_operator_le_scalar_impl
else:
# optimization for series with default indexes, that can be aligned differently
if (isinstance(self.index, types.NoneType) and isinstance(other.index, types.NoneType)):
def _series_operator_le_none_indexes_impl(self, other):
left_size, right_size = len(self._data), len(other._data)
if (left_size == right_size):
return pandas.Series(self._data <= other._data)
else:
raise ValueError("Can only compare identically-labeled Series objects")
return _series_operator_le_none_indexes_impl
else:
if none_or_numeric_indexes:
ty_left_index_dtype = types.int64 if isinstance(self.index, types.NoneType) else self.index.dtype
ty_right_index_dtype = types.int64 if isinstance(other.index, types.NoneType) else other.index.dtype
numba_index_common_dtype = find_common_dtype_from_numpy_dtypes(
[ty_left_index_dtype, ty_right_index_dtype], [])
def _series_operator_le_common_impl(self, other):
left_index, right_index = self.index, other.index
if sdc_check_indexes_equal(left_index, right_index):
if none_or_numeric_indexes == True: # noqa
new_index = astype(left_index, numba_index_common_dtype)
else:
new_index = self._index
return pandas.Series(self._data <= other._data,
new_index)
else:
raise ValueError("Can only compare identically-labeled Series objects")
return _series_operator_le_common_impl
return None | 30,568 |
def getArrFromFile(path = fromPath):
"""
读取原始csv文件,返回numpy数组
:param
path: 原始csv文件路径 string类型 default:fromPath
:return:
X: 由原始文件生成的数组 二维numpy数组类型
"""
X = numpy.genfromtxt(path,dtype=float,delimiter=',')[1:,:-2]
return X | 30,569 |
def erode(binary_image, erosion=1):
"""
Sets 1s at boundaries of binary_image to 0
"""
batch_array = binary_image.data.cpu().numpy()
return torch.tensor(
np.stack([
binary_erosion(
array,
iterations=erosion,
border_value=1, # so that we don't get border of zeros
).astype(array.dtype)
for array in batch_array])
).to(binary_image.device) | 30,570 |
def plot_chirpam_fit(cell_mean, param_d, QI=None, fit_f=sinexp_sigm,
start=420, stop=960, ax=None):
"""
Helper function to visualize the fit of a cell response to a chirp_am stimulus.
params:
- cell_mean: Cell's mean response to the stimulus
- param_d: Parameter dictionary of the fit for fit_f
- QI: Quality index of the fit
- fit_f: Function used for the fit
- start: Where the fit started in index of cell_mean
- stop: Where the fit stopped in index of cell_mean
- ax: Axis where to plot the figure. If None, a new figure of size (50,2) is created
return:
- The axis of the figure
"""
if ax is None:
fig, ax = plt.subplots(figsize=(50,2))
ax.plot(np.linspace(0, len(cell_mean)/60, len(cell_mean), endpoint=False), cell_mean)
if param_d is not None:
ax.plot(np.linspace(start/60, stop/60, stop-start, endpoint=False),
fit_f(np.linspace(0, (stop-start)/60, stop-start, endpoint=False), **param_d))
if QI is not None:
ax.text((start/60), max(cell_mean)*80/100, str(round(QI,3)), fontdict={'size':22})
ax.set_xlim(0, len(cell_mean)/60)
if param_d is not None:
param_d = {k: round(v,2) for k, v in param_d.items()}
ax.set_title(str(param_d))
return ax | 30,571 |
def convert2SQUAD_format(hoppy_data, write_file_name):
"""
Converts QAngaroo data (hoppy_data) into SQuAD format.
The SQuAD-formatted data is written to disk at write_file_name.
Note: All given support documents per example are concatenated
into one super-document. All text is lowercased.
"""
# adapt the JSON tree structure used in SQUAD.
squad_formatted_content = dict()
squad_formatted_content['version'] = 'hoppy_squad_format'
data = []
# loop over dataset
for datum in hoppy_data:
# Format is deeply nested JSON -- prepare data structures
data_ELEMENT = dict()
data_ELEMENT['title'] = 'dummyTitle'
paragraphs = []
paragraphs_ELEMENT = dict()
qas = []
qas_ELEMENT = dict()
qas_ELEMENT_ANSWERS = []
ANSWERS_ELEMENT = dict()
### content start
qas_ELEMENT['id'] = datum['id']
qas_ELEMENT['question'] = datum['query']
# concatenate all support documents into one superdocument
superdocument = " <new_doc> ".join(datum['supports']).lower()
# where is the answer in the superdocument?
answer_position = superdocument.find(datum['answer'].lower())
if answer_position == -1:
continue
ANSWERS_ELEMENT['answer_start'] = answer_position
ANSWERS_ELEMENT['text'] = datum['answer'].lower()
### content end
# recursively fill in content into the nested SQuAD data format
paragraphs_ELEMENT['context'] = superdocument
qas_ELEMENT_ANSWERS.append(ANSWERS_ELEMENT)
qas_ELEMENT['answers'] = qas_ELEMENT_ANSWERS
qas.append(qas_ELEMENT)
paragraphs_ELEMENT['qas'] = qas
paragraphs.append(paragraphs_ELEMENT)
data_ELEMENT['paragraphs'] = paragraphs
data.append(data_ELEMENT)
squad_formatted_content['data'] = data
with open(write_file_name, 'w') as f:
json.dump(squad_formatted_content, f, indent=1)
print('Done writing SQuAD-formatted data to: ',write_file_name) | 30,572 |
def command_ltc(bot, user, channel, args):
"""Display current LRC exchange rates from BTC-E"""
r = bot.get_url("https://btc-e.com/api/2/ltc_usd/ticker")
j = r.json()['ticker']
return bot.say(channel, "BTC-E: avg:$%s last:$%s low:$%s high:$%s vol:%s" % (j['avg'], j['last'], j['low'], j['high'], j['vol'])) | 30,573 |
def new_match(request, cmd_args):
"""
Return a slack message with a link to a new match
"""
# Could potentially add arguments to allow game configuration here
serializer = LiveMatchSerializer(data={"config": cmd_args})
if serializer.is_valid():
live_match = serializer.save()
return {
"response_type": "in_channel",
"text": request.build_absolute_uri(
"/matches/live/{}".format(live_match.id)
),
"attachments": [{"text": random.choice(NEW_MATCH_MESSAGES)}],
}
else:
error_str = "\n".join(
f" {field}: {', '.join(errors)}"
for field, errors in serializer.errors["config"].items()
)
return {"response_type": "in_channel", "text": f"Error:\n{error_str}"} | 30,574 |
def abort(container_dir, why=None, payload=None):
"""Abort a running application.
Called when some initialization failed in a running container.
"""
flag_aborted(container_dir, why, payload)
container_dir = os.path.realpath(os.path.join(container_dir, '../'))
supervisor.control_service(container_dir,
supervisor.ServiceControlAction.down) | 30,575 |
def head(feats, anchors, num_classes):
"""Convert final layer features to bounding box parameters.
Parameters
----------
feats : tensor
Final convolutional layer features.
anchors : array-like
Anchor box widths and heights.
num_classes : int
Number of target classes.
Returns
-------
box_xy : tensor
x, y box predictions adjusted by spatial location in conv layer.
box_wh : tensor
w, h box predictions adjusted by anchors and conv spatial resolution.
box_conf : tensor
Probability estimate for whether each box contains any object.
box_class_pred : tensor
Probability distribution estimate for each box over class labels.
"""
num_anchors = len(anchors)
# Reshape to batch, height, width, num_anchors, box_params.
anchors_tensor = tf.reshape(
tf.Variable(anchors, dtype=tf.float32, name='anchors'),
[1, 1, 1, num_anchors, 2])
# Dynamic implementation of conv dims for fully convolutional model.
conv_dims = tf.shape(feats)[1:3] # assuming channels last
# In YOLO the height index is the inner most iteration.
conv_height_index = tf.range(0, conv_dims[0])
conv_width_index = tf.range(0, conv_dims[1])
conv_height_index = tf.tile(conv_height_index, [conv_dims[1]])
conv_width_index = tf.tile(tf.expand_dims(conv_width_index, 0),
[conv_dims[0], 1])
conv_width_index = tf.reshape(tf.transpose(conv_width_index), [-1])
conv_index = tf.transpose(tf.stack([conv_height_index, conv_width_index]))
conv_index = tf.reshape(conv_index, [1, conv_dims[0], conv_dims[1], 1, 2])
conv_index = tf.cast(conv_index, feats.dtype)
feats = tf.reshape(
feats, [-1, conv_dims[0], conv_dims[1], num_anchors, num_classes + 5])
conv_dims = tf.cast(tf.reshape(conv_dims, [1, 1, 1, 1, 2]), feats.dtype)
box_xy = tf.nn.sigmoid(feats[..., :2])
box_wh = tf.exp(feats[..., 2:4])
box_confidence = tf.sigmoid(feats[..., 4:5])
box_class_probs = tf.nn.softmax(feats[..., 5:])
# Adjust preditions to each spatial grid point and anchor size.
# Note: YOLO iterates over height index before width index.
box_xy = (box_xy + conv_index) / conv_dims
box_wh = box_wh * anchors_tensor / conv_dims
return box_xy, box_wh, box_confidence, box_class_probs | 30,576 |
def socfaker_azurevmtopology_get():
"""
None
"""
if validate_request(request):
return jsonify(str(socfaker.products.azure.vm.topology)) | 30,577 |
def read_json_file(filename):
"""Load json object from a file."""
with open(filename, 'r') as f:
content = json.load(f)
return content | 30,578 |
def createVectorisedTargValObjFunction(functTypeStr:str, averageMethod="mean",catchOverflow=True, errorRetVal=1e30, normToErrorRetVal=False, greaterThanIsOk=False, lessThanIsOk=False, useAbsVals=False,
divideErrorsByNormFactor=None):
""" Creates a comparison function that operators on (iterA,iterB) and returns a single value representing their similarity
Args:
functTypeStr(str): Key for selecting a base function for comparing two single numbers. e.g. "absdev" means a function returning the absolute difference.
All possible values can be found in OBJ_FUNCT_DICT. The function this gives has interface cmpFunct(expVal,actVal)->objVal
averageMethod(str): Determines how we convert an array of errors (obtained by applying functTypeStr function to all pairs of values) to a single error value
catchOverflow(bool): If True we catch overflow errors when comparing numbers, we replace the (overflowed) error value with errorRetVal
errorRetVal(float): see catchOverflow
normToErrorRetVal(bool): If True we ensure that all output values are between - and 1. We do this by divinding values by errorRetVal, and still setting the answer
to 1 even if they go above that value
greaterThanIsOk(bool): If True then the cmpFunct(expVal,actVal) returns 0 if expVal>=actVal, regardless on the actual type of cmpFunct(which is determined by functTypeStr)
lessThanIsOk(bool): If True then the cmpFunct(expVal, actVal) returns 0 if expVal<=actVal, regardless on the actual type of cmpFunct(which is determined by functTypeStr)
useAbsVals(bool): If True then the cmpFunct(expVal,actVal) will use abs(expVal) and abs(actVal) as inputs. Useful if you only care about the magnitude of your errors. Note: This is applied BEFORE less than/greater than functionality; so if mixed the <,> operators are appleid to abs(expVal) and abs(actVal)
divideErrorsByNormFactor(float): If not None, then we divide the output error by this value. The original purpose is to get a normalised error based on target values;
this is accomplished by setting this arg to the average expVal, and using the absdev cmp function.
Returns
outFunct(targIter,actIter)->error: Single function that works on two input iterators. Order of targIter and actIter probably wont matter.
"""
baseFunct = createSimpleTargValObjFunction(functTypeStr, catchOverflow=False, greaterThanIsOk=greaterThanIsOk, lessThanIsOk=lessThanIsOk, useAbsVals=useAbsVals)
def vectorizedFunct(targVals,actVals):
outVals = list()
tVals, aVals = copy.deepcopy(targVals), copy.deepcopy(actVals)
for t,a in it.zip_longest(tVals,aVals):
outVals.append( baseFunct(t,a) )
return outVals
outFunct = vectorizedFunct #Currently takes in lists, and returns a list
if averageMethod.lower()=="mean":
outFunct = applyMeanDecorator(outFunct)
else:
raise ValueError("{} is not a supported option for averageMethod".format(averageMethod))
if divideErrorsByNormFactor is not None:
outFunct = applyDivByConstantDecorator(outFunct, divideErrorsByNormFactor)
if catchOverflow:
outFunct = catchOverflowDecorator(outFunct,errorRetVal)
#Important this comes after catchOverflow, which essentially caps the value
if normToErrorRetVal:
outFunct = applyNormDecorator(outFunct, errorRetVal)
return outFunct | 30,579 |
def get_nonoverlap_ra_dataset_conf(dataset_conf):
"""extract segments by shifting segment length"""
if dataset_conf["if_rand"]:
info("disabled dataset_conf if_rand")
dataset_conf["if_rand"] = False
if dataset_conf["seg_rand"]:
info("disabled dataset_conf seg_rand")
dataset_conf["seg_rand"] = False
if dataset_conf["seg_shift"] != dataset_conf["seg_len"]:
info("change seg_shift from %s to %s" % (
dataset_conf["seg_shift"], dataset_conf["seg_len"]))
dataset_conf["seg_shift"] = dataset_conf["seg_len"]
return dataset_conf | 30,580 |
def _f_model_snaive_wday(a_x, a_date, params, is_mult=False, df_actuals=None):
"""Naive model - takes last valid weekly sample"""
if df_actuals is None:
raise ValueError('model_snaive_wday requires a df_actuals argument')
# df_actuals_model - table with actuals samples,
# adding y_out column with naive model values
df_actuals_model = _fillna_wday(df_actuals.drop_duplicates('x'))
# df_last_week - table with naive model values from last actuals week,
# to use in extrapolation
df_last_week = (
df_actuals_model
# Fill null actual values with data from previous weeks
.assign(y=df_actuals_model.y.fillna(df_actuals_model.y_out))
.drop_duplicates('wday', keep='last')
[['wday', 'y']]
.rename(columns=dict(y='y_out'))
)
# Generate table with extrapolated samples
df_out_tmp = pd.DataFrame({'date': a_date, 'x': a_x})
df_out_tmp['wday'] = df_out_tmp.date.dt.weekday
df_out_extrapolated = (
df_out_tmp
.loc[~df_out_tmp.date.isin(df_actuals_model.date)]
.merge(df_last_week, how='left')
.sort_values('x')
)
# Filter actuals table - only samples in a_x, a_date
df_out_actuals_filtered = (
# df_actuals_model.loc[df_actuals_model.x.isin(a_x)]
# Using merge rather than simple filtering to account for
# dates with multiple samples
df_actuals_model.merge(df_out_tmp, how='inner')
.sort_values('x')
)
df_out = (
pd.concat(
[df_out_actuals_filtered, df_out_extrapolated],
sort=False, ignore_index=True)
)
return df_out.y_out.values | 30,581 |
def read_fts(self,s, orders=None, filename=None, pfits=True, verb=True):
"""
SYNTAX: read_fts(filename)
OUTPUT: namedtuple('spectrum', 'w f berv bjd blaze drift timeid sn55 ')
w - wavelength
f - flux
berv - Barycentric Earth Radial Velocity
bjd - Barycentric Julian Day
blaze - Blaze filename
drift - Used RV Drift
sn55 - S_N order center55
"""
HIERARCH = 'HIERARCH '
if orders is None:
hdr = {'OBJECT': 'Iod'}
self.header = hdr
self.inst = 'FTS'
self.drsberv = hdr.get('bla', 0)
self.fileid = os.path.basename(s) #fileid[fileid.index('(')+1:fileid.index(')')]
self.drsbjd = 0.0 if '.fits' in s else float(self.fileid.split('.')[1].split('_')[0])
with open('/home/data1/fts/Lemke/2015_I/2015-01-29/DPT/parameter_Q_Si_I2_001.txt') as finfo:
for line in finfo:
if self.fileid.replace('_ScSm.txt','\t') in line:
line = line.split(); date=line[2].split('/'); time=line[3].split(':')
#from subprocess import call
#call(['bash','date2jd', date[2], date[1], date[0]] +time)
from subprocess import Popen, PIPE
p = Popen(['bash','date2jd', date[2], date[1], date[0]] +time, stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
rc = p.returncode
self.drsbjd = float(output)
self.sn55 = 10
self.blaze = '' #hdr[HIERARCH+'ESO DRS BLAZE FILE']
self.drift = hdr.get(HIERARCH+'ESO DRS DRIFT RV USED', np.nan)
self.calmode = hdr.get('SOURCE', 0) #.split("_")[3] #fileid[fileid.index('(')+1:fileid.index(')')]
self.timeid = self.fileid
self.exptime = 0
if verb: print("read_fts:", self.timeid, self.header['OBJECT'], self.drsbjd, self.sn55, self.drsberv, self.drift, self.flag, self.calmode)
if orders is not None: # read order data
nord = 70 # some arbitary shaping to 70x10000
nw = 700000
if '.fits' in s:
hdulist = pyfits.open(s)
w, f = hdulist[0].data[:,1600000:]
else:
#w, f = np.loadtxt(s, skiprows=1600000, unpack=True) ; too slow 45s vs 5.2s
data = np.fromfile(s, sep=' ', count=2*(1600000+nw))[2*1600000:]
w = 1e7 / data[::2]
f = data[1::2]
w = 10 * w[:nw].reshape(nord,nw/nord)
f = f[:nw].reshape(nord,nw/nord)*4000
bpmap = np.isnan(f).astype(int) # flag 1 for nan
xmax = w.size
e = np.ones_like(w)
return w, f, e, bpmap | 30,582 |
def main():
"""The main function."""
model_args, training_args, inference_args = uisrnn.parse_arguments()
diarization_experiment(model_args, training_args, inference_args) | 30,583 |
def save_model(sess, model_saver, model_out_dir):
"""
Saves the FCN model.
:param sess: TF Session
:param model_saver: TF model saver
:param model_out_dir: Directory to save the model in
"""
# Create a folder for the models:
if os.path.exists(model_out_dir):
shutil.rmtree(model_out_dir)
os.makedirs(model_out_dir)
# Saving the model:
model_saver.save(sess, os.path.join(model_out_dir, MODEL_NAME))
# builder = tf.saved_model.builder.SavedModelBuilder("%s/%s" % (model_out_dir, MODEL_NAME))
# builder.add_meta_graph_and_variables(sess, ["vgg16"])
# builder.save()
print(BOLD_GREEN, end='')
print('Training finished. Saving model to: {}'.format(model_out_dir))
print(REG) | 30,584 |
def union_exprs(La, Lb):
"""
Union two lists of Exprs.
"""
b_strs = set([node.unique_str() for node in Lb])
a_extra_nodes = [node for node in La if node.unique_str() not in b_strs]
return a_extra_nodes + Lb | 30,585 |
def programs_reload():
"""Reload programs from config file
Parameters (default):
- do_create (True)
- do_update (True)
- do_pause (False)
"""
try:
result = dar.reload_programs(**request.args)
except TypeError as e:
log.info("Caught TypeError: %s" % (str(e)))
return "Error: " + str(e), 400
return jsonify({i: list(j) for i, j in result.items()}) | 30,586 |
def generate_urls():
"""Gathers clinical trials from clinicaltrials.gov for search term
defined in build_url() function and downloads to specified file format.
"""
api_call = build_url(expr='Cancer', max_rnk=1, fmt='json')
r = requests.get(api_call)
data = r.json()
n_studies = data['StudyFieldsResponse']['NStudiesFound']
print(f'{n_studies} studies found.\n')
print('\nGenerating request urls...')
urls = []
for i in range(1, n_studies, 1000):
url = build_url(expr='Cancer', field_names=['EligibilityCriteria'],
min_rnk=f'{i}', max_rnk=f'{i+999}',
fmt='csv')
urls.append(url)
return urls | 30,587 |
def xyz2luv(xyz, illuminant="D65", observer="2"):
"""XYZ to CIE-Luv color space conversion.
Parameters
----------
xyz : (M, N, [P,] 3) array_like
The 3 or 4 dimensional image in XYZ format. Final dimension denotes
channels.
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
out : (M, N, [P,] 3) ndarray
The image in CIE-Luv format. Same dimensions as input.
Raises
------
ValueError
If `xyz` is not a 3-D or 4-D array of shape ``(M, N, [P,] 3)``.
ValueError
If either the illuminant or the observer angle are not supported or
unknown.
Notes
-----
By default XYZ conversion weights use observer=2A. Reference whitepoint
for D65 Illuminant, with XYZ tristimulus values of ``(95.047, 100.,
108.883)``. See function 'get_xyz_coords' for a list of supported
illuminants.
References
----------
.. [1] http://www.easyrgb.com/index.php?X=MATH&H=16#text16
.. [2] http://en.wikipedia.org/wiki/CIELUV
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2xyz, xyz2luv
>>> img = data.astronaut()
>>> img_xyz = rgb2xyz(img)
>>> img_luv = xyz2luv(img_xyz)
"""
arr = _prepare_colorarray(xyz)
# extract channels
x, y, z = arr[..., 0], arr[..., 1], arr[..., 2]
eps = np.finfo(np.float).eps
# compute y_r and L
xyz_ref_white = get_xyz_coords(illuminant, observer)
L = y / xyz_ref_white[1]
mask = L > 0.008856
L[mask] = 116. * np.power(L[mask], 1. / 3.) - 16.
L[~mask] = 903.3 * L[~mask]
u0 = 4 * xyz_ref_white[0] / np.dot([1, 15, 3], xyz_ref_white)
v0 = 9 * xyz_ref_white[1] / np.dot([1, 15, 3], xyz_ref_white)
# u' and v' helper functions
def fu(X, Y, Z):
return (4. * X) / (X + 15. * Y + 3. * Z + eps)
def fv(X, Y, Z):
return (9. * Y) / (X + 15. * Y + 3. * Z + eps)
# compute u and v using helper functions
u = 13. * L * (fu(x, y, z) - u0)
v = 13. * L * (fv(x, y, z) - v0)
return np.concatenate([q[..., np.newaxis] for q in [L, u, v]], axis=-1) | 30,588 |
def safe_open_w(path):
"""
Open "path" for writing, creating any parent directories as needed.
"""
mkdir_p(os.path.dirname(path))
return open(path, 'wb') | 30,589 |
def mysql_drop_tables():
""" Drop the application tables"""
require('environment', provided_by=[production, staging])
total_tables = mysql_count_tables()
question = ("Do you want to drop the {} tables in '%(db_name)s'?"
.format(total_tables) % env)
if not confirm(question):
abort(colors.yellow("Aborting at user request."))
exists = mysql_check_db_exists()
if not exists:
abort(colors.red("Unable to drop tables in database '%(db_name)s'."
"The database does not exist" % env))
files = ['004/downgrade.sql',
'003/downgrade.sql',
'002/downgrade.sql',
'001/downgrade.sql']
with lcd('../db/'):
for sql in files:
cmd = ("mysql --login-path=fabric_%(db_host)s %(db_name)s < {}"
.format(sql)
% env)
local(cmd) | 30,590 |
def direct_publish_workflow(previous_model, new_deposit):
"""Workflow publishing the deposits on submission."""
from b2share.modules.deposit.api import PublicationStates
new_state = new_deposit['publication_state']
previous_state = previous_model.json['publication_state']
if previous_state != new_state:
transition = (previous_state, new_state)
# Check that the transition is a valid one
if transition not in [
(PublicationStates.draft.name, PublicationStates.submitted.name),
(PublicationStates.draft.name, PublicationStates.published.name),
]:
raise InvalidPublicationStateError(
description='Transition from publication state {0} to {1} is '
'not allowed by community\'s workflow {2}'.format(
previous_state, new_state, 'direct_publish'
)
)
# Publish automatically when submitted
if new_state == PublicationStates.submitted.name:
new_deposit['publication_state'] = PublicationStates.published.name | 30,591 |
def today():
"""
Today Page:
Displays all the notifications like the word of the day, news highlights or friends who added you or shared a
note with you
"""
return render_template('main/today.html', username=session['username']) | 30,592 |
def load_data_states(csv_filename):
"""Load data for file .csv."""
with open(csv_filename, mode='r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
state = State(**row)
#state.save()
print(state) | 30,593 |
def compile_read_regex(read_tags, file_extension):
"""Generate regular expressions to disern direction in paired-end reads."""
read_regex = [re.compile(r'{}\.{}$'.format(x, y))\
for x, y in itertools.product(read_tags, [file_extension])]
return read_regex | 30,594 |
def hello_world():
"""Print 'Hello World!' to the console."""
print("Hello world!") | 30,595 |
def filter(start=None, stop=None, **kwargs):
"""
Get commands with ``start`` <= date < ``stop``. Additional ``key=val`` pairs
can be supplied to further filter the results. Both ``key`` and ``val``
are case insensitive. In addition to the any of the command parameters
such as TLMSID, MSID, SCS, STEP, or POS, the ``key`` can be:
date : Exact date of command e.g. '2013:003:22:11:45.530'
type : Command type e.g. COMMAND_SW, COMMAND_HW, ACISPKT, SIMTRANS
Examples::
>>> from kadi import cmds
>>> cs = cmds.filter('2012:001', '2012:030')
>>> cs = cmds.filter('2012:001', '2012:030', type='simtrans')
>>> cs = cmds.filter(type='acispkt', tlmsid='wsvidalldn')
>>> cs = cmds.filter(msid='aflcrset')
>>> print(cs.table)
Parameters
----------
start : DateTime format (optional)
Start time, defaults to beginning of available commands (2002:001)
stop : DateTime format (optional)
Stop time, defaults to end of available commands
**kwargs : any key=val keyword argument pairs
Returns
-------
cmds : CmdList object (list of commands)
"""
cmds = _find(start, stop, **kwargs)
return CmdList(cmds) | 30,596 |
def _get_energy_at_time(masses, pos, vel, time_idx):
"""
Internal function used to calculate kinetic energy and potential energy at
a give time index using a vectorized direct sum approach. This function is
necessary to facilitate the parallelization of the energy calculation
across multiple CPU cores.
:param masses: Array of masses.
:param pos: Array of positions over time.
:param vel: Array of velocities over time.
:param time_idx: Time index at which the energy is to be calculated.
:return: Tuple of kinetic energy and potential energy
at the give time index.
"""
# kinetic energy
kin_energy = 0.5 * np.sum(masses * np.sum(vel[:, :, time_idx] ** 2, axis=1))
# potential energy
# extract x & y coordinates to a (N, 1) array
x = pos[:, 0:1, time_idx]
y = pos[:, 1:2, time_idx]
# matrices that store pairwise body distances
dx = x.T - x
dy = y.T - y
# calculate pairwise inverse norm of distances
# mask operation to avoid divide by zero
norm = np.sqrt(dx ** 2 + dy ** 2)
inv = np.zeros_like(norm) # ensure that diagonal of inv will only contain zeros
np.divide(1, norm, where=norm != 0, out=inv)
# multiply matrix element ij with the masses of bodies i and j
energy_per_body = np.transpose(inv * masses) * masses
# sum energies
pot_energy = -0.5 * energy_per_body.sum()
return kin_energy, pot_energy | 30,597 |
def test_base_forest_quantile():
"""
Test that the base estimators belong to the correct class.
"""
rng = np.random.RandomState(0)
X = rng.randn(10, 1)
y = np.linspace(0.0, 100.0, 10.0)
rfqr = RandomForestQuantileRegressor(random_state=0, max_depth=1)
rfqr.fit(X, y)
for est in rfqr.estimators_:
assert_true(isinstance(est, DecisionTreeQuantileRegressor))
etqr = ExtraTreesQuantileRegressor(random_state=0, max_depth=1)
etqr.fit(X, y)
for est in etqr.estimators_:
assert_true(isinstance(est, ExtraTreeQuantileRegressor)) | 30,598 |
def get_root_url_for_date(date):
"""
Returns the root URL of the TMCDB web I/F for the given date.
The argument date should be an ISO-8601 date string (YYYY-MM-DD).
The returned URL already contains the date.
"""
year = date[:4]
mm = date[5:7]
hostname = get_host_name()
return "%s/index.php?dir=%s/%s/%s/" % (hostname, year, mm, date) | 30,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.