content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def process_domain_assoc(url, domain_map):
"""
Replace domain name with a more fitting tag for that domain.
User defined. Mapping comes from provided config file
Mapping in yml file is as follows:
tag:
- url to map to tag
- ...
A small example domain_assoc.yml is included
"""
if not domain_map:
return url
for key in domain_map:
if url in domain_map[key]:
return key
return url | 5,333,400 |
def process_auc(gt_list, pred_list):
"""
Process AUC (AUROC) over lists.
:param gt_list: Ground truth list
:type gt_list: np.array
:param pred_list: Predictions list
:type pred_list: np.array
:return: Mean AUC over the lists
:rtype: float
"""
res = []
for i, gt in enumerate(gt_list):
if np.amax(gt) != 0:
pred = pred_list[i].flatten()
gt = gt.flatten()
# res.append(roc_auc_score(gt, pred))
res.append(f1_score(gt, pred))
return np.mean(res) | 5,333,401 |
def transform(
Y,
transform_type=None,
dtype=np.float32):
""" Transform STFT feature
Args:
Y: STFT
(n_frames, n_bins)-shaped np.complex array
transform_type:
None, "log"
dtype: output data type
np.float32 is expected
Returns:
Y (numpy.array): transformed feature
"""
Y = np.abs(Y)
if transform_type == 'log':
Y = np.log(np.maximum(Y, 1e-06))
return Y.astype(dtype) | 5,333,402 |
def tol_vif_table(df, n = 5):
"""
:param df: dataframe
:param n: number of pairs to show
:return: table of correlations, tolerances, and VIF
"""
cor = get_top_abs_correlations(df, n)
tol = 1 - cor ** 2
vif = 1 / tol
cor_table = pd.concat([cor, tol, vif], axis=1)
cor_table.columns = ['Correlation', 'Tolerance', 'VIF']
return cor_table | 5,333,403 |
def shuffled(iterable):
"""Randomly shuffle a copy of iterable."""
items = list(iterable)
random.shuffle(items)
return items | 5,333,404 |
def get_local_ray_processes(archive: Archive,
processes: Optional[List[Tuple[str, bool]]] = None,
verbose: bool = False):
"""Get the status of all the relevant ray processes.
Args:
archive (Archive): Archive object to add process info files to.
processes (list): List of processes to get information on. The first
element of the tuple is a string to filter by, and the second
element is a boolean indicating if we should filter by command
name (True) or command line including parameters (False)
verbose (bool): If True, show entire executable command line.
If False, show just the first term.
Returns:
Open archive object.
"""
if not processes:
# local import to avoid circular dependencies
from ray.autoscaler._private.constants import RAY_PROCESSES
processes = RAY_PROCESSES
process_infos = []
for process in psutil.process_iter(["pid", "name", "cmdline", "status"]):
try:
with process.oneshot():
cmdline = " ".join(process.cmdline())
process_infos.append(({
"executable": cmdline
if verbose else cmdline.split("--", 1)[0][:-1],
"name": process.name(),
"pid": process.pid,
"status": process.status(),
}, process.cmdline()))
except Exception as exc:
raise LocalCommandFailed(exc) from exc
relevant_processes = {}
for process_dict, cmdline in process_infos:
for keyword, filter_by_cmd in processes:
if filter_by_cmd:
corpus = process_dict["name"]
else:
corpus = subprocess.list2cmdline(cmdline)
if keyword in corpus and process_dict["pid"] \
not in relevant_processes:
relevant_processes[process_dict["pid"]] = process_dict
with tempfile.NamedTemporaryFile("wt") as fp:
for line in relevant_processes.values():
fp.writelines([yaml.dump(line), "\n"])
fp.flush()
with archive.subdir("meta") as sd:
sd.add(fp.name, "process_info.txt")
return archive | 5,333,405 |
def test_rule(rule_d, ipv6=False):
""" Return True if the rule is a well-formed dictionary, False otherwise """
try:
_encode_iptc_rule(rule_d, ipv6=ipv6)
return True
except:
return False | 5,333,406 |
def intersects(hp, sphere):
"""
The closed, upper halfspace intersects the sphere
(i.e. there exists a spatial relation between the two)
"""
return signed_distance(sphere.center, hp) + sphere.radius >= 0.0 | 5,333,407 |
def invert_center_scale(X_cs, X_center, X_scale):
"""
This function inverts whatever centering and scaling was done by
``center_scale`` function:
.. math::
\mathbf{X} = \mathbf{X_{cs}} \\cdot \mathbf{D} + \mathbf{C}
**Example:**
.. code:: python
from PCAfold import center_scale, invert_center_scale
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,20)
# Center and scale:
(X_cs, X_center, X_scale) = center_scale(X, 'range', nocenter=False)
# Uncenter and unscale:
X = invert_center_scale(X_cs, X_center, X_scale)
:param X_cs:
centered and scaled data set :math:`\mathbf{X_{cs}}`.
:param X_center:
vector of centers :math:`\mathbf{C}` applied on the original data set :math:`\mathbf{X}`.
:param X_scale:
vector of scales :math:`\mathbf{D}` applied on the original data set :math:`\mathbf{X}`.
:return:
- **X** - original data set :math:`\mathbf{X}`.
"""
try:
(_, n_variables) = np.shape(X_cs)
except:
n_variables = 1
if n_variables == 1:
X = X_cs * X_scale + X_center
else:
X = np.zeros_like(X_cs, dtype=float)
for i in range(0, n_variables):
X[:, i] = X_cs[:, i] * X_scale[i] + X_center[i]
return(X) | 5,333,408 |
def weighted_smoothing(image, diffusion_weight=1e-4, data_weight=1.0,
weight_function_parameters={}):
"""Weighted smoothing of images: smooth regions, preserve sharp edges.
Parameters
----------
image : NumPy array
diffusion_weight : float or NumPy array, optional
The weight of the diffusion for smoothing. It can be provided
as an array the same shape as the image, or as a scalar that
will multiply the default weight matrix obtained from the edge
indicator function.
data_weight : float or NumPy array, optional
The weight of the image data to preserve fidelity to the image.
It can be provided as an array the same shape as the image, or
as a scalar that will multiply the default weight matrix obtained
by subtracting the edge indicator function from 1.0.
weight_function_parameters : dict, optional
The parameters sigma and rho for the edge indicator function.
Sigma is the standard deviation for the gaussian gradient
applied to the image: dI = N * gaussian_gradient_magnitude(image),
where N = min(image.shape) - 1, and rho is the scaling weight in
the definition of the edge indicator function: 1/(1 + (dI/rho)**2)
The default values are: {'sigma': 3.0, 'rho': None}.
If rho is None, it is calculated by rho = 0.23 * dI.max().
Returns
-------
smoothed_image : NumPy array
Raises
------
ValueError
If diffusion_weight or data_weight have the wrong type.
"""
if type( diffusion_weight ) is np.ndarray:
beta = diffusion_weight
elif not np.isscalar( diffusion_weight ):
raise ValueError("data_weight can only be None or a scalar number "\
"or a NumPy array the same shape as the image.")
else:
sigma = weight_function_parameters.get('sigma', 3.0)
rho = weight_function_parameters.get('rho', None)
if rho is None:
N = min(image.shape) - 1.0
dI = N * gaussian_gradient_magnitude( image, sigma, mode='nearest' )
rho = 0.23 * dI.max()
g = EdgeIndicatorFunction( image, rho, sigma )
G = g._g
beta = (G - G.min()) / (G.max() - G.min())
beta *= diffusion_weight
if type(data_weight) is np.ndarray:
alpha = data_weight
elif np.isscalar(data_weight):
alpha = data_weight * (beta.max() - beta) / (beta.max() - beta.min())
else:
raise ValueError("data_weight can only be None or a scalar number "\
"or a NumPy array the same shape as the image.")
rhs = alpha * image
grid = Grid2d( image.shape )
smooth_image = fem.solve_elliptic_pde( grid, alpha, beta, rhs )
return smooth_image | 5,333,409 |
def get_random_tablature(tablature : Tablature, constants : Constants):
"""make a copy of the tablature under inspection and generate new random tablatures"""
new_tab = deepcopy(tablature)
for tab_instance, new_tab_instance in zip(tablature.tablature, new_tab.tablature):
if tab_instance.string == 6:
string, fret = random.choice(determine_combinations(tab_instance.fundamental, constants))
new_tab_instance.string, new_tab_instance.fret = string, fret
elif constants.init_mutation_rate > random.random():
string, fret = get_random_position(tab_instance.string, tab_instance.fret, constants)
new_tab_instance.string, new_tab_instance.fret = string, fret
return new_tab | 5,333,410 |
def _staticfy(value):
"""
Allows to keep backward compatibility with instances of OpenWISP which
were using the previous implementation of OPENWISP_ADMIN_THEME_LINKS
and OPENWISP_ADMIN_THEME_JS which didn't automatically pre-process
those lists of static files with django.templatetags.static.static()
and hence were not configured to allow those files to be found
by the staticfile loaders, if static() raises ValueError, we assume
one of either cases:
1. An old instance has upgraded and we keep returning the old value
so the file will continue being found although unprocessed by
django's static file machinery.
2. The value passed is wrong, instead of failing loudly we fail silently.
"""
try:
return static(value)
# maintain backward compatibility
except ValueError:
return value | 5,333,411 |
def add_files(
client: SymphonyClient,
local_directory_path: str,
entity_type: str,
entity_id: str,
category: Optional[str] = None,
) -> None:
"""This function adds all files located in folder to an entity of a given type.
Args:
local_directory_path (str): local system path to the directory
entity_type (str): one of existing options ["LOCATION", "WORK_ORDER", "SITE_SURVEY", "EQUIPMENT"]
entity_id (string): valid entity ID
category (Optional[string]): file category name
Example:
```
location = client.get_location({("Country", "LS_IND_Prod_Copy")})
client.add_files(
local_directory_path="./documents_folder/",
entity_type="LOCATION",
entity_id=location.id,
category="category_name",
)
```
"""
for file in list_dir(local_directory_path):
add_file(client, file, entity_type, entity_id, category) | 5,333,412 |
def compare_rep(topic, replication_factor):
# type: (str, int) -> bool
"""Compare replication-factor in the playbook with the one actually set.
Keyword arguments:
topic -- topicname
replication_factor -- number of replications
Return:
bool -- True if change is needed, else False
"""
try:
metadata = admin.list_topics() # type(metadata.topics) = dict
except KafkaException as e:
msg = (
"Can not get metadata of topic %s: %s"
% (topic, e)
)
fail_module(msg)
old_rep = len(metadata.topics[topic].partitions[0].replicas) #type(partitions) = dict, access replicas with partition-id as key over .replicas-func
if replication_factor != old_rep:
if module.params['zookeeper'] is None:
msg = (
"For modifying the replication_factor of a topic,"
" you also need to set the zookeeper-parameter."
" At the moment, replication_factor is set to %s"
" and you tried to set it to %s."
% (old_rep, replication_factor)
)
fail_module(msg)
diff['before']['replication_factor'] = old_rep
diff['after']['replication_factor'] = replication_factor
return True
# if replication_factor == old_rep:
return False | 5,333,413 |
def verbosity_option_parser() -> ArgumentParser:
"""
Creates a parser suitable to parse the verbosity option in different subparsers
"""
parser = ArgumentParser(add_help=False)
parser.add_argument('--verbosity', dest=VERBOSITY_ARGNAME, type=str.upper,
choices=ALLOWED_VERBOSITY,
help='verbosity level to use for this command and subsequent ones.')
return parser | 5,333,414 |
def b2b_config(api):
"""Demonstrates creating a back to back configuration of tx and rx
ports, devices and a single flow using those ports as endpoints for
transmit and receive.
"""
config = api.config()
import snappi
config = snappi.Api().config()
config.options.port_options.location_preemption = True
tx_port, rx_port = config.ports \
.port(name='Tx Port', location='10.36.74.26;02;13') \
.port(name='Rx Port', location='10.36.74.26;02;14')
tx_device, rx_device = (config.devices \
.device(name='Tx Devices')
.device(name='Rx Devices')
)
tx_device.ethernets.ethernet(port_name=tx_port.name)
rx_device.ethernets.ethernet(port_name=rx_port.name)
tx_device.ethernets[-1].name = 'Tx Eth'
tx_device.ethernets[-1].mac = '00:00:01:00:00:01'
tx_device.ethernets[-1].ipv4_addresses.ipv4()
tx_device.ethernets[-1].ipv4_addresses[-1].name = 'Tx Ipv4'
tx_device.ethernets[-1].ipv4_addresses[-1].address = '1.1.1.1'
tx_device.ethernets[-1].ipv4_addresses[-1].gateway = '1.1.2.1'
tx_device.ethernets[-1].ipv4_addresses[-1].prefix = 16
vlan1, vlan2 = tx_device.ethernets[-1].vlans.vlan(name='v1').vlan(name='v2')
vlan1.id = 1
vlan2.id = 2
rx_device.ethernets[-1].name = 'Rx Eth'
rx_device.ethernets[-1].mac = '00:00:01:00:00:02'
flow = config.flows.flow(name='Tx -> Rx Flow')[0]
flow.tx_rx.port.tx_name = tx_port.name
flow.tx_rx.port.rx_name = rx_port.name
flow.size.fixed = 128
flow.rate.pps = 1000
flow.duration.fixed_packets.packets = 10000
eth, vlan, ip, tcp = flow.packet.ethernet().vlan().ipv4().tcp()
eth.src.value = '00:00:01:00:00:01'
eth.dst.values = ['00:00:02:00:00:01', '00:00:02:00:00:01']
eth.dst.metric_group = 'eth dst mac'
ip.src.increment.start = '1.1.1.1'
ip.src.increment.step = '0.0.0.1'
ip.src.increment.count = 10
ip.dst.decrement.start = '1.1.2.200'
ip.dst.decrement.step = '0.0.0.1'
ip.dst.decrement.count = 10
ip.priority.dscp.phb.values = [8, 16, 32]
ip.priority.dscp.ecn.value = 1
tcp.src_port.increment.start = 10
tcp.dst_port.increment.start = 1
return config | 5,333,415 |
def test_local_training_relative_output_dir(mocker: MockFixture, cli_runner: CliRunner):
"""
Tests issue #208 - Converting relative path to output-dir to absolute,
because Docker requires mount paths to be absolute
:param mocker: mocker fixture
:param cli_runner: Click runner fixture
"""
trainer_mock = mocker.patch.object(training, 'K8sTrainer', autospec=True).return_value
trainer_mock.model_training.spec.model.artifact_name_template = 'model_dir_template'
api_client = Mock()
mocker.patch.object(training_sdk, 'create_mt_config_file')
mocker.patch.object(training_sdk, 'stream_container_logs')
mocker.patch.object(training_sdk, 'raise_error_if_container_failed')
docker_mock: Mock = mocker.patch.object(training_sdk.docker, 'from_env').return_value
docker_mock.api.inspect_container = Mock(return_value={})
with tempfile.TemporaryDirectory(dir=os.curdir) as temp_dir:
temp_dir_path = pathlib.Path(temp_dir)
training_yml = temp_dir_path / 'training.yml'
training_yml.write_text(json.dumps(
{**ModelTraining(id='training1', spec=ModelTrainingSpec(toolchain='toolchain1')).to_dict(),
**{'kind': 'ModelTraining'}}))
toolchain_yml = temp_dir_path / 'toolchain.yml'
toolchain_yml.write_text(json.dumps({**ToolchainIntegration(id='toolchain1').to_dict(),
**{'kind': 'ToolchainIntegration'}}))
temp_dir_relative_path: str = os.path.relpath(temp_dir_path)
result: Result = cli_runner.invoke(
training.training_group,
['run', '--output-dir', temp_dir_relative_path, '--manifest-file', str(training_yml),
'--manifest-file', str(toolchain_yml), '--id', 'training1'],
obj=api_client)
assert result.exit_code == 0, f'command invocation ended with exit code {result.exit_code}'
assert result.exception is None, f'command invocation ended with exception {result.exception}'
abs_path = os.path.abspath(temp_dir)
for call in docker_mock.containers.run.call_args_list:
call_kwargs = call[1]
mounts: List[Mount] = call_kwargs.get('mounts', [])
for mount in filter(lambda m: m.get('Target') == MODEL_OUTPUT_CONTAINER_PATH, mounts):
assert os.path.dirname(mount.get('Source')) == abs_path | 5,333,416 |
def apply_patch(ffrom, fpatch, fto):
"""Apply given normal patch `fpatch` to `ffrom` to create
`fto`. Returns the size of the created to-data.
All arguments are file-like objects.
>>> ffrom = open('foo.mem', 'rb')
>>> fpatch = open('foo.patch', 'rb')
>>> fto = open('foo.new', 'wb')
>>> apply_patch(ffrom, fpatch, fto)
2780
"""
compression, to_size = read_header_normal(fpatch)
if to_size == 0:
return to_size
patch_reader = PatchReader(fpatch, compression)
dfdiff, ffrom = create_data_format_readers(patch_reader, ffrom, to_size)
to_pos = 0
while to_pos < to_size:
# Diff data.
for chunk_size, patch_data in iter_diff_chunks(patch_reader,
to_pos,
to_size):
from_data = ffrom.read(chunk_size)
if dfdiff is not None:
dfdiff_data = dfdiff.read(chunk_size)
data = bytearray(
(pb + fb + db) & 0xff for pb, fb, db in zip(patch_data,
from_data,
dfdiff_data)
)
else:
data = bytearray(
(pb + fb) & 0xff for pb, fb in zip(patch_data, from_data)
)
fto.write(data)
to_pos += chunk_size
# Extra data.
for chunk_size, patch_data in iter_extra_chunks(patch_reader,
to_pos,
to_size):
if dfdiff is not None:
dfdiff_data = dfdiff.read(chunk_size)
data = bytearray(
(dd + db) & 0xff for dd, db in zip(patch_data, dfdiff_data)
)
else:
data = patch_data
fto.write(data)
to_pos += chunk_size
# Adjustment.
size = unpack_size(patch_reader)
ffrom.seek(size, os.SEEK_CUR)
if not patch_reader.eof:
raise Error('End of patch not found.')
return to_size | 5,333,417 |
def fib(n):
"""Return the n'th Fibonacci number."""
if n < 0:
raise ValueError("Fibonacci number are only defined for n >= 0")
return _fib(n) | 5,333,418 |
def gen(n):
"""
Compute the n-th generator polynomial.
That is, compute (x + 2 ** 1) * (x + 2 ** 2) * ... * (x + 2 ** n).
"""
p = Poly([GF(1)])
two = GF(1)
for i in range(1, n + 1):
two *= GF(2)
p *= Poly([two, GF(1)])
return p | 5,333,419 |
def adjust_learning_rate(optimizer, epoch, opt):
""" Sets the learning rate to the initial LR decayed by 10 """
if opt.exp_lr:
""" test
A=np.arange(200);
np.round(np.power(.1, np.power(2., A/80.)-1), 6)[[0,80,120,160]]
test """
last_epoch = 2. ** (float(epoch) / int(opt.lr_decay_epoch)) - 1
else:
last_epoch = epoch // int(opt.lr_decay_epoch)
lr = base_lr(optimizer, opt) * (0.1 ** last_epoch)
print(lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr | 5,333,420 |
def get_prefix(bot, message):
"""A callable Prefix for our bot. This could be edited to allow per server prefixes."""
# Notice how you can use spaces in prefixes. Try to keep them simple though.
prefixes = ['!']
# If we are in a guild, we allow for the user to mention us or use any of the prefixes in our list.
return commands.when_mentioned_or(*prefixes)(bot, message) | 5,333,421 |
def parse_discontinuous_phrase(phrase: str) -> str:
"""
Transform discontinuous phrase into a regular expression. Discontinuity is
interpreted as taking place at any whitespace outside of terms grouped by
parentheses. That is, the whitespace indicates that anything can be in between
the left side and right side.
Example 1: x1 (x2 (x3"x4")) becomes x1.+(x2 (x3|x4))
"""
level = 0
parsed_phrase = ""
for index, char in enumerate(phrase):
if char == "(":
level += 1
elif char == ")":
level -= 1
elif char == " " and level == 0:
char = ".+"
parsed_phrase += char
return parsed_phrase | 5,333,422 |
def _StartService(service_name):
"""Starts a Windows service with the given name.
Args:
service_name: string The name of the service to be started.
"""
logging.info("Trying to start service %s.", service_name)
try:
win32serviceutil.StartService(service_name)
logging.info("Service '%s' started.", service_name)
except pywintypes.error as e:
if getattr(e, "winerror", None) == winerror.ERROR_SERVICE_DOES_NOT_EXIST:
logging.debug("Tried to start '%s', but the service is not installed.",
service_name)
else:
logging.exception("Encountered error trying to start '%s':", service_name) | 5,333,423 |
def preprocess(frame):
"""
Preprocess the images before they are sent into the model
"""
#Read the image
bgr_img = frame.astype(np.float32)
#Opencv reads the picture as (N) HWC to get the HW value
orig_shape = bgr_img.shape[:2]
#Normalize the picture
bgr_img = bgr_img / 255.0
#Convert the picture to Lab space
lab_img = cv.cvtColor(bgr_img, cv.COLOR_BGR2Lab)
#Gets the L
orig_l = lab_img[:, :, 0]
if not orig_l.flags['C_CONTIGUOUS']:
orig_l = np.ascontiguousarray(orig_l)
#resize
lab_img = cv.resize(lab_img, (MODEL_WIDTH, MODEL_HEIGHT)).astype(np.float32)
l_data = lab_img[:, :, 0]
if not l_data.flags['C_CONTIGUOUS']:
l_data = np.ascontiguousarray(l_data)
#The L-part minus the average
l_data = l_data - 50
return orig_shape, orig_l, l_data | 5,333,424 |
def draw_roc_curve(y_true, y_score, annot=True, name=None, ax=None):
"""Draws a ROC (Receiver Operating Characteristic) curve using class rankings predicted by a classifier.
Args:
y_true (array-like): True class labels (0: negative; 1: positive)
y_score (array-like): Predicted probability of positive-class membership
annot (bool, optional): Whether to create and add a label to the curve with the computed AUC
name (str, optional): Name of the curve to add to the AUC label
ax (Matplotlib.Axes, optional): The axes on which to draw the ROC curve
Returns:
ax (Matplotlib.Axes): The axes containing the ROC curve
"""
fpr, tpr, _ = roc_curve(y_true, y_score)
if ax is None:
ax = plt.gca()
# Add a label displaying the computed area under the curve
if annot:
roc_auc = auc(fpr, tpr)
if name is not None:
label = f'{name} AUC = {roc_auc:.3f}'
else:
label = f'AUC = {roc_auc:.3f}'
else:
label=None
ax.plot(fpr, tpr, label=label)
ax.set_xlabel('False positive rate')
ax.set_ylabel('True positive rate')
ax.legend(loc='best')
return ax | 5,333,425 |
def make_links_absolute(soup, base_url):
"""
Replace relative links with absolute links.
This one modifies the soup object.
"""
assert base_url is not None
#
for tag in soup.findAll('a', href=True):
tag['href'] = urljoin(base_url, tag['href'])
return soup | 5,333,426 |
def newVersion():
"""increments version counter in swhlab/version.py"""
version=None
fname='../swhlab/version.py'
with open(fname) as f:
raw=f.read().split("\n")
for i,line in enumerate(raw):
if line.startswith("__counter__"):
if version is None:
version = int(line.split("=")[1])
raw[i]="__counter__=%d"%(version+1)
with open(fname,'w') as f:
f.write("\n".join(raw))
print("upgraded from version %03d to %03d"%(version,version+1)) | 5,333,427 |
def get_predictions(model, dataloader):
"""takes a trained model and validation or test dataloader
and applies the model on the data producing predictions
binary version
"""
model.eval()
all_y_hats = []
all_preds = []
all_true = []
all_attention = []
for batch_id, (data, label) in enumerate(dataloader):
label = label.squeeze()
bag_label = label[0]
bag_label = bag_label.cpu()
y_hat, preds, attention = model(data.to("cuda:0"))
y_hat = y_hat.squeeze(dim=0) # for binary setting
y_hat = y_hat.cpu()
preds = preds.squeeze(dim=0) # for binary setting
preds = preds.cpu()
all_y_hats.append(y_hat.numpy().item())
all_preds.append(preds.numpy().item())
all_true.append(bag_label.numpy().item())
attention_scores = np.round(attention.cpu().data.numpy()[0], decimals=3)
all_attention.append(attention_scores)
print("Bag Label:" + str(bag_label))
print("Predicted Label:" + str(preds.numpy().item()))
print("attention scores (unique ones):")
print(np.unique(attention_scores))
# print(attention_scores)
del data, bag_label, label
return all_y_hats, all_preds, all_true | 5,333,428 |
def _getPymelType(arg, name) :
""" Get the correct Pymel Type for an object that can be a MObject, PyNode or name of an existing Maya object,
if no correct type is found returns DependNode by default.
If the name of an existing object is passed, the name and MObject will be returned
If a valid MObject is passed, the name will be returned as None
If a PyNode instance is passed, its name and MObject will be returned
"""
obj = None
results = {}
isAttribute = False
#--------------------------
# API object testing
#--------------------------
if isinstance(arg, _api.MObject) :
results['MObjectHandle'] = _api.MObjectHandle( arg )
obj = arg
elif isinstance(arg, _api.MObjectHandle) :
results['MObjectHandle'] = arg
obj = arg.object()
elif isinstance(arg, _api.MDagPath) :
results['MDagPath'] = arg
obj = arg.node()
elif isinstance(arg, _api.MPlug) :
isAttribute = True
obj = arg
results['MPlug'] = obj
if _api.isValidMPlug(arg):
pymelType = Attribute
else :
raise MayaAttributeError, "Unable to determine Pymel type: the passed MPlug is not valid"
# #---------------------------------
# # No Api Object : Virtual PyNode
# #---------------------------------
# elif objName :
# # non existing node
# pymelType = DependNode
# if '.' in objName :
# # TODO : some better checking / parsing
# pymelType = Attribute
else :
raise ValueError( "Unable to determine Pymel type for %r" % (arg,) )
if not isAttribute:
pymelType = _getPymelTypeFromObject( obj, name )
return pymelType, results | 5,333,429 |
def _parse_objective(objective):
"""
Modified from deephyper/nas/run/util.py function compute_objective
"""
if isinstance(objective, str):
negate = (objective[0] == '-')
if negate:
objective = objective[1:]
split_objective = objective.split('__')
kind = split_objective[1] if len(split_objective) > 1 else 'last'
mname = split_objective[0]
# kind: min/max/last
if negate:
if kind == 'min':
kind = 'max'
elif kind == 'max':
kind = 'min'
return mname, kind
elif callable(objective):
logger.warn('objective is a callable, not a str, setting kind="last"')
return None, 'last'
else:
raise TypeError(f'unknown objective type {type(objective)}') | 5,333,430 |
def redirect_path_context_processor(request):
"""Procesador para generar el redirect_to para la localización en el selector de idiomas"""
return {'language_select_redirect_to': translate_url(request.path, settings.LANGUAGE_CODE)} | 5,333,431 |
def exc_info_hook(exc_type, value, tb):
"""An exception hook that starts IPdb automatically on error if in interactive mode."""
if hasattr(sys, 'ps1') or not sys.stderr.isatty() or exc_type == KeyboardInterrupt:
# we are in interactive mode, we don't have a tty-like
# device,, or the user triggered a KeyboardInterrupt,
# so we call the default hook
sys.__excepthook__(exc_type, value, tb)
else:
import traceback
# import ipdb
import pudb
# we are NOT in interactive mode, print the exception
traceback.print_exception(exc_type, value, tb)
print()
input("Press any key to start debugging...")
# then start the debugger in post-mortem mode.
# pdb.pm() # deprecated
# ipdb.post_mortem(tb) # more modern
pudb.post_mortem(tb) | 5,333,432 |
def RightCenter(cell=None):
"""Take up horizontal and vertical space, and place the cell on the right center of it."""
return FillSpace(cell, "right", "center") | 5,333,433 |
def name_standard(name):
""" return the Standard version of the input word
:param name: the name that should be standard
:return name: the standard form of word
"""
reponse_name = name[0].upper() + name[1:].lower()
return reponse_name | 5,333,434 |
def getChildElementsListWithTagAttribValueMatch(parent, tag, attrib, value):
"""
This method takes a parent element as input and finds all the sub elements (children)
containing specified tag and an attribute with the specified value.
Returns a list of child elements.
Arguments:
parent = parent element
tag = tag value of the sub-element(child) to be searched for.
attrib = attribute name for the sub-element with above given tag should have.
value = attribute value that the sub-element with above given tag, attribute should have.
"""
child_elements = parent.findall(".//%s[@%s='%s']" % (tag, attrib, value))
return child_elements | 5,333,435 |
def f_engine (air_volume, energy_MJ):
"""Прямоточный воздушно реактивный двигатель.
Набегающий поток воздуха попадает в нагреватель, где расширяется,
А затем выбрасывается из сопла реактивной струёй.
"""
# Рабочее вещество, это атмосферный воздух:
working_mass = air_volume * AIR_DENSITY
# Делим энергию на полезную и бесполезную:
useful_energy_MJ = energy_MJ * ENGINE_USEFUL_ENERGY
useless_energy_MJ = energy_MJ - useful_energy_MJ
useful_energy_KJ = useful_energy_MJ * 1000
# Полезную энергию пускаем на разогрев воздуха:
working_mass_heat = AIR_HEAT_CAPACITY * useful_energy_KJ / working_mass
# Делаем поправку на температуру воздуха и переводим градусы в шкалу Кельвина:
working_mass_heat = AIR_TEMPERATURE + working_mass_heat + KELVIN_SCALE
# Давление разогретого воздуха увеличивается:
working_mass_pressure = f_heated_gas_pressure(working_mass_heat)
# Воздух подаётся на сопло Лаваля, так мы получаем скорость реактивной струи:
reactive_speed = f_de_laval_nozzle(working_mass_heat,
working_mass_pressure, ATMOSPHERIC_PRESSURE)
# Максимальная тяга, это масса рабочего вещества умноженная на его скорость:
max_engine_thrust = f_jet_force(working_mass, reactive_speed)
# Бесполезную тепловую энергию тоже требуется куда-то отводить:
engine_output = (max_engine_thrust, working_mass, reactive_speed, useless_energy_MJ)
return engine_output | 5,333,436 |
def not_numbers():
"""Non-numbers for (i)count."""
return [None, [1, 2], {-3, 4}, (6, 9.7)] | 5,333,437 |
def get_agol_token():
"""requests and returns an ArcGIS Token for the pre-registered application.
Client id and secrets are managed through the ArcGIS Developer's console.
"""
params = {
'client_id': app.config['ESRI_APP_CLIENT_ID'],
'client_secret': app.config['ESRI_APP_CLIENT_SECRET'],
'grant_type': "client_credentials"
}
request = requests.get(
'https://www.arcgis.com/sharing/oauth2/token',
params=params
)
token = request.json()
print("AGOL token acquired: {0}".format(token))
return token | 5,333,438 |
def native_python_local(ctx, host=False):
"""
Run the native Python knative container locally
"""
if host:
working_dir = FUNC_DIR
env = copy(os.environ)
env.update({
"LOG_LEVEL": "debug",
"FAASM_INVOKE_HOST": "0.0.0.0",
"FAASM_INVOKE_PORT": "8080",
"HOST_TYPE": "knative",
})
call("./run_knative_native.sh", cwd=working_dir, env=env, shell=True)
else:
img_name = "faasm/knative-native-python"
_do_knative_native_local(img_name) | 5,333,439 |
def default_validate(social_account):
"""
Функция по-умолчанию для ONESOCIAL_VALIDATE_FUNC. Ничего не делает.
"""
return None | 5,333,440 |
def _validate_prototype(key, prototype, protparents, visited):
"""
Run validation on a prototype, checking for inifinite regress.
"""
assert isinstance(prototype, dict)
if id(prototype) in visited:
raise RuntimeError("%s has infinite nesting of prototypes." % key or prototype)
visited.append(id(prototype))
protstrings = prototype.get("prototype")
if protstrings:
for protstring in make_iter(protstrings):
if key is not None and protstring == key:
raise RuntimeError("%s tries to prototype itself." % key or prototype)
protparent = protparents.get(protstring)
if not protparent:
raise RuntimeError("%s's prototype '%s' was not found." % (key or prototype, protstring))
_validate_prototype(protstring, protparent, protparents, visited) | 5,333,441 |
def test_script_with_debug_enabled(monkeypatch):
"""
Test setting --debug flag
"""
script = Script()
with monkeypatch.context() as context:
validate_script_debug_flag_enabled(
script,
context,
expected_args=DEFAULT_ARGS.copy()
) | 5,333,442 |
def pull_early_late_by_stop(line_number,SWIFTLY_API_KEY, dateRange, timeRange):
"""
Pulls from the Swiftly APIS to get OTP.
Follow the docs: http://dashboard.goswift.ly/vta/api-guide/docs/otp
"""
line_table = pd.read_csv('line_table.csv')
line_table.rename(columns={"DirNum":"direction_id","DirectionName":"DIRECTION_NAME"},inplace=True)
line_table['direction_id'] = line_table['direction_id'].astype(str)
headers = {'Authorization': SWIFTLY_API_KEY}
payload = {'agency': 'vta', 'route': line_number, 'dateRange': dateRange,'timeRange': timeRange, 'onlyScheduleAdherenceStops':'True'}
url = 'https://api.goswift.ly/otp/by-stop'
r = requests.get(url, headers=headers, params=payload)
try:
swiftly_df = pd.DataFrame(r.json()['data'])
swiftly_df.rename(columns={"stop_id":"STOP_ID"},inplace=True)
swiftly_df = pd.merge(swiftly_df,line_table.query('lineabbr==%s'%line_number)[['direction_id','DIRECTION_NAME']])
swiftly_df['STOP_ID'] = swiftly_df['STOP_ID'].astype(int)
return swiftly_df
except KeyError:
print(r.json()) | 5,333,443 |
def create_build_job(user, project, config, code_reference):
"""Get or Create a build job based on the params.
If a build job already exists, then we check if the build has already an image created.
If the image does not exists, and the job is already done we force create a new job.
Returns:
tuple: (build_job, image_exists[bool], build_status[bool])
"""
build_job, rebuild = BuildJob.create(
user=user,
project=project,
config=config,
code_reference=code_reference)
if build_job.succeeded and not rebuild:
# Check if image was built in less than an 6 hours
return build_job, True, False
if check_image(build_job=build_job):
# Check if image exists already
return build_job, True, False
if build_job.is_done:
build_job, _ = BuildJob.create(
user=user,
project=project,
config=config,
code_reference=code_reference,
nocache=True)
if not build_job.is_running:
# We need to build the image first
auditor.record(event_type=BUILD_JOB_STARTED_TRIGGERED,
instance=build_job,
actor_id=user.id,
actor_name=user.username)
build_status = start_dockerizer(build_job=build_job)
else:
build_status = True
return build_job, False, build_status | 5,333,444 |
def create(ranger_client: RangerClient, config: str):
"""
Creates a new Apache Ranger service repository.
"""
return ranger_client.create_service(json.loads(config)) | 5,333,445 |
def draw_border(img, border, col=255):
"""Draw a border on an image given the border coordinates."""
l, r, t, b = border
cv2.line(img, (l, t), (l, b), col, 2)
cv2.line(img, (l, t), (r, t), col, 2)
cv2.line(img, (l, b), (r, b), col, 2)
cv2.line(img, (r, t), (r, b), col, 2) | 5,333,446 |
def populate_glue_catalogue_from_metadata(table_metadata, db_metadata, check_existence = True):
"""
Take metadata and make requisite calls to AWS API using boto3
"""
database_name = db_metadata["name"]
database_description = ["description"]
table_name = table_metadata["table_name"]
tbl_def = metadata_to_glue_table_definition(table_metadata, db_metadata)
if check_existence:
try:
glue_client.get_database(Name=database_name)
except glue_client.exceptions.EntityNotFoundException:
overwrite_or_create_database(database_name, db_metadata["description"])
try:
glue_client.delete_table(DatabaseName=database_name, Name=table_name)
except glue_client.exceptions.EntityNotFoundException:
pass
return glue_client.create_table(
DatabaseName=database_name,
TableInput=tbl_def) | 5,333,447 |
def group_median_float64(*args, **kwargs): # real signature unknown
""" Only aggregates on axis=0 """
pass | 5,333,448 |
def choose_komoot_tour_live():
"""
Login with user credentials, download tour information,
choose a tour, and download it. Can be passed to
:func:`komoog.gpx.convert_tour_to_gpx_tracks`
afterwards.
"""
tours, session = get_tours_and_session()
for idx in range(len(tours)):
print(f"({idx+1}) {tours[idx]['name']}")
tour_id = int(input("Tour ID: "))
tour_id -= 1
tour = get_tour(tours,tour_id,session)
return tour | 5,333,449 |
def root():
"""
The root stac page links to each collection (product) catalog
"""
return _stac_response(
dict(
**stac_endpoint_information(),
links=[
dict(
title="Collections",
description="All product collections",
rel="children",
type="application/json",
href=url_for(".collections"),
),
dict(
title="Arrivals",
description="Most recently added items",
rel="child",
type="application/json",
href=url_for(".arrivals"),
),
dict(
title="Item Search",
rel="search",
type="application/json",
href=url_for(".stac_search"),
),
dict(rel="self", href=request.url),
# Individual Product Collections
*(
dict(
title=product.name,
description=product.definition.get("description"),
rel="child",
href=url_for(".collection", collection=product.name),
)
for product, product_summary in _model.get_products_with_summaries()
),
],
conformsTo=[
"https://api.stacspec.org/v1.0.0-beta.1/core",
"https://api.stacspec.org/v1.0.0-beta.1/item-search",
],
)
) | 5,333,450 |
def recursive_dict_of_lists(d, helper=None, prev_key=None):
"""
Builds dictionary of lists by recursively traversing a JSON-like
structure.
Arguments:
d (dict): JSON-like dictionary.
prev_key (str): Prefix used to create dictionary keys like: prefix_key.
Passed by recursive step, not intended to be used.
helper (dict): In case d contains nested dictionaries, you can specify
a helper dictionary with 'key' and 'value' keys to specify where to
look for keys and values instead of recursive step. It helps with
cases like: {'action': {'type': 'step', 'amount': 1}}, by passing
{'key': 'type', 'value': 'amount'} as a helper you'd get
{'action_step': [1]} as a result.
"""
d_o_l = {}
if helper is not None and helper['key'] in d.keys() and helper['value'] in d.keys():
if prev_key is not None:
key = f"{prev_key}_{helper['key']}"
else:
key = helper['key']
if key not in d_o_l.keys():
d_o_l[key] = []
d_o_l[key].append(d[helper['value']])
return d_o_l
for k, v in d.items():
if isinstance(v, dict):
d_o_l.update(recursive_dict_of_lists(v, helper=helper, prev_key=k))
else:
if prev_key is not None:
key = f'{prev_key}_{k}'
else:
key = k
if key not in d_o_l.keys():
d_o_l[key] = []
if isinstance(v, list):
d_o_l[key].extend(v)
else:
d_o_l[key].append(v)
return d_o_l | 5,333,451 |
def same_shape(shape1, shape2):
"""
Checks if two shapes are the same
Parameters
----------
shape1 : tuple
First shape
shape2 : tuple
Second shape
Returns
-------
flag : bool
True if both shapes are the same (same length and dimensions)
"""
if len(shape1) != len(shape2):
return False
for i in range(len(shape1)):
if shape1[i] != shape2[i]:
return False
return True | 5,333,452 |
def get_reference():
"""Get DrugBank references."""
return _get_model(drugbank.Reference) | 5,333,453 |
def consumer(cond):
"""wait for the condition and use the resource"""
logging.debug("Starting consumer thread")
with cond:
cond.wait()
logging.debug("Resource is available to consumer") | 5,333,454 |
def set_parameter_requires_grad(model, feature_extracting):
"""
Set if parameters require grad
If feature_extract = False, the model is fine-tuned and all model parameters are updated.
If feature_extract = True, only the last layer parameters are updated, the others remain fixed.
Args:
model:
feature_extracting: a boolean that defines if we are fine-tuning or feature extracting.
Returns:
"""
if feature_extracting:
for param in model.parameters():
param.requires_grad = False | 5,333,455 |
def __parse_search_results(collected_results, raise_exception_finally):
"""
Parses locally the results collected from the __mapped_pattern_matching:
- list of ( scores, matched_intervals, unprocessed_interval,
rank_superwindow, <meta info>, <error_info>)
This parsing
- evaluates the errors: logs and raise IkatsException if raise_exception_finally is True
- evaluates, encodes the unprocessed intervals
:param collected_results: the results collected from the __mapped_pattern_matching
:type list
:param raise_exception_finally: flag is True when it is demanded to raise
IkatsException when errors exist in the collected fields <error_info>
:type raise_exception_finally: bool
:return: unprocessed_info: dict stocking unprocessed intervals [sd, ed] by superwindow rank
:rtype: dict
"""
# On the driver/manager
# - driver: local computing of the re-evaluated intervals:
# => set unprocessed_info
# => logs errors
unprocessed_info = {}
error_exists = False
for status_cell in collected_results:
# logs error (when needed)
error = status_cell[-1]
cell_has__errors = False
if isinstance(error, collections.Iterable) and len(error) > 0:
cell_has__errors = True
for error_item in error:
LOGGER.error(error_item)
error_exists = error_exists or cell_has__errors
# unprocessed_info[ <rank_superwindow> ] = <unprocessed_interval>
unprocessed_info[status_cell[3]] = status_cell[2]
LOGGER.debug("Unprocessed_info[rank=%s]=%s", status_cell[3], status_cell[2])
if raise_exception_finally and error_exists:
raise IkatsException("At least one error has been collected: see driver/manager logs")
return unprocessed_info | 5,333,456 |
def decode_matrix_fbs(fbs):
"""
Given an FBS-encoded Matrix, return a Pandas DataFrame the contains the data and indices.
"""
matrix = Matrix.Matrix.GetRootAsMatrix(fbs, 0)
n_rows = matrix.NRows()
n_cols = matrix.NCols()
if n_rows == 0 or n_cols == 0:
return pd.DataFrame()
if matrix.RowIndexType() is not TypedArray.TypedArray.NONE:
raise ValueError("row indexing not supported for FBS Matrix")
columns_length = matrix.ColumnsLength()
columns_index = deserialize_typed_array((matrix.ColIndexType(), matrix.ColIndex()))
if columns_index is None:
columns_index = range(0, n_cols)
# sanity checks
if len(columns_index) != n_cols or columns_length != n_cols:
raise ValueError("FBS column count does not match number of columns in underlying matrix")
columns_data = {}
columns_type = {}
for col_idx in range(0, columns_length):
col = matrix.Columns(col_idx)
tarr = (col.UType(), col.U())
data = deserialize_typed_array(tarr)
columns_data[columns_index[col_idx]] = data
if len(data) != n_rows:
raise ValueError("FBS column length does not match number of rows")
if col.UType() is TypedArray.TypedArray.JSONEncodedArray:
columns_type[columns_index[col_idx]] = "category"
df = pd.DataFrame.from_dict(data=columns_data).astype(columns_type, copy=False)
# more sanity checks
if not df.columns.is_unique or len(df.columns) != n_cols:
raise KeyError("FBS column indices are not unique")
return df | 5,333,457 |
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Track states and offer events for sensors."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
return True | 5,333,458 |
def apply_dof_transformation_to_transpose_hexahedron(
entity_transformations: _Dict[str, _npt.NDArray[_np.float64]], entity_dofs: _Dict[str, _npt.NDArray[_np.int32]],
data: _np.array, cell_info: int
):
"""Apply dof transformations to some transposed data on a hexahedron.
Args:
entity_transformations: The DOF transformations for each entity.
entity_dofs: The number of DOFs on each entity.
data: The data. This will be changed by this function.
cell_info: An integer representing the orientations of the subentities of the cell.
"""
apply_dof_transformation_to_transpose(3, 12, 6, entity_transformations, entity_dofs,
data, cell_info, _List(["quadrilateral"] * 6)) | 5,333,459 |
def _all_lists_equal_lenght(values: t.List[t.List[str]]) -> bool:
"""
Tests to see if all the lengths of all the elements are the same
"""
for vn in values:
if len(values[0]) != len(vn):
return False
return True | 5,333,460 |
def receive_excel(send_json, token):
"""
Use the request_id to download the result, you shall see the file
in the same directory.
@param send_json: a json format dict
@param token: your access_token
"""
request_url = 'https://aip.baidubce.com/rest/2.0/solution/v1/form_ocr/get_request_result'
request_url = request_url + "?access_token=" + token
headers = {'content-type':'application/x-www-form-urlencoded'}
params = {'request_id': send_json['result'][0]['request_id']} # the send_json has an odd structure
response = requests.post(request_url, data=params, headers=headers)
if response.status_code == 200:
print (response.json())
# use regex to get the URI
temp = re.findall(r'[\S]+.xls', response.json()['result']['result_data'])[0]
os.system('curl -O ' + temp) # use curl -O to download the file
else:
raise('Download failed.') | 5,333,461 |
def test_create_dir_exist_but_file(mock_exists, mock_isfile, mock_makedirs, mock_msg):
"""Test error if path exist but is a file."""
mock_exists.return_value = True
mock_isfile.return_value = True
create_dir(dir_name)
mock_msg.assert_called_once_with(
"red", "Error: path {} exists and is not a directory".format(dir_name), 1
) | 5,333,462 |
def wind_shear(
shear: str, unit_alt: str = "ft", unit_wind: str = "kt", spoken: bool = False
) -> str:
"""Translate wind shear into a readable string
Ex: Wind shear 2000ft from 140 at 30kt
"""
if not shear or "WS" not in shear or "/" not in shear:
return ""
shear = shear[2:].rstrip(unit_wind.upper()).split("/")
wdir = core.spoken_number(shear[1][:3], True) if spoken else shear[1][:3]
return f"Wind shear {int(shear[0])*100}{unit_alt} from {wdir} at {shear[1][3:]}{unit_wind}" | 5,333,463 |
def get_security_profiles_command(security_profile: str = None):
"""
Get information about profiles.
"""
if security_profile:
xpath = f'{XPATH_RULEBASE}profiles/{security_profile}'
else:
xpath = f'{XPATH_RULEBASE}profiles'
result = get_security_profile(xpath)
if security_profile:
security_profiles = result.get('response', {}).get('result', {})
else:
security_profiles = result.get('response', {}).get('result', {}).get('profiles', {})
if '@dirtyId' in security_profiles:
demisto.debug(f'Found uncommitted item:\n{security_profiles}')
raise Exception('Please commit the instance prior to getting the security profiles.')
human_readable = ''
context = {}
if 'spyware' in security_profiles and security_profiles['spyware'] is not None:
spyware_content = []
profiles = security_profiles.get('spyware', {}).get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
spyware_rules = prettify_profiles_rules(rules)
spyware_content.append({
'Name': profile['@name'],
'Rules': spyware_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
spyware_rules = prettify_profiles_rules(rules)
spyware_content = [{
'Name': profiles['@name'],
'Rules': spyware_rules
}]
human_readable = tableToMarkdown('Anti Spyware Profiles', spyware_content)
context.update({"Panorama.Spyware(val.Name == obj.Name)": spyware_content})
if 'virus' in security_profiles and security_profiles['virus'] is not None:
virus_content = []
profiles = security_profiles.get('virus', {}).get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('decoder', {}).get('entry', [])
antivirus_rules = prettify_profiles_rules(rules)
virus_content.append({
'Name': profile['@name'],
'Decoder': antivirus_rules
})
else:
rules = profiles.get('decoder', {}).get('entry', [])
antivirus_rules = prettify_profiles_rules(rules)
virus_content = [{
'Name': profiles['@name'],
'Rules': antivirus_rules
}]
human_readable += tableToMarkdown('Antivirus Profiles', virus_content, headers=['Name', 'Decoder', 'Rules'],
removeNull=True)
context.update({"Panorama.Antivirus(val.Name == obj.Name)": virus_content})
if 'file-blocking' in security_profiles and security_profiles['file-blocking'] is not None:
file_blocking_content = []
profiles = security_profiles.get('file-blocking', {}).get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
file_blocking_rules = prettify_profiles_rules(rules)
file_blocking_content.append({
'Name': profile['@name'],
'Rules': file_blocking_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
file_blocking_rules = prettify_profiles_rules(rules)
file_blocking_content = [{
'Name': profiles['@name'],
'Rules': file_blocking_rules
}]
human_readable += tableToMarkdown('File Blocking Profiles', file_blocking_content)
context.update({"Panorama.FileBlocking(val.Name == obj.Name)": file_blocking_content})
if 'vulnerability' in security_profiles and security_profiles['vulnerability'] is not None:
vulnerability_content = []
profiles = security_profiles.get('vulnerability', {}).get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
vulnerability_rules = prettify_profiles_rules(rules)
vulnerability_content.append({
'Name': profile['@name'],
'Rules': vulnerability_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
vulnerability_rules = prettify_profiles_rules(rules)
vulnerability_content = [{
'Name': profiles['@name'],
'Rules': vulnerability_rules
}]
human_readable += tableToMarkdown('Vulnerability Protection Profiles', vulnerability_content)
context.update({"Panorama.Vulnerability(val.Name == obj.Name)": vulnerability_content})
if 'data-filtering' in security_profiles and security_profiles['data-filtering'] is not None:
data_filtering_content = []
profiles = security_profiles.get('data-filtering', {}).get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
data_filtering_rules = prettify_data_filtering_rules(rules)
data_filtering_content.append({
'Name': profile['@name'],
'Rules': data_filtering_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
data_filtering_rules = prettify_data_filtering_rules(rules)
data_filtering_content = [{
'Name': profiles['@name'],
'Rules': data_filtering_rules
}]
human_readable += tableToMarkdown('Data Filtering Profiles', data_filtering_content)
context.update({"Panorama.DataFiltering(val.Name == obj.Name)": data_filtering_content})
if 'url-filtering' in security_profiles and security_profiles['url-filtering'] is not None:
url_filtering_content = []
profiles = security_profiles.get('url-filtering', {}).get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
url_filtering_rules = prettify_get_url_filter(profile)
url_filtering_content.append({
'Name': profile['@name'],
'Rules': url_filtering_rules
})
else:
url_filtering_rules = prettify_get_url_filter(profiles)
url_filtering_content = [{
'Name': profiles['@name'],
'Rules': url_filtering_rules
}]
human_readable += tableToMarkdown('URL Filtering Profiles', url_filtering_content)
context.update({'Panorama.URLFilter(val.Name == obj.Name)': url_filtering_content})
if 'wildfire-analysis' in security_profiles and security_profiles['wildfire-analysis'] is not None:
wildfire_analysis_content = []
profiles = security_profiles.get('wildfire-analysis', {}).get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
wildfire_rules = prettify_wildfire_rules(rules)
wildfire_analysis_content.append({
'Name': profile['@name'],
'Rules': wildfire_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
wildfire_rules = prettify_wildfire_rules(rules)
wildfire_analysis_content = [{
'Name': profiles['@name'],
'Rules': wildfire_rules
}]
human_readable += tableToMarkdown('WildFire Profiles', wildfire_analysis_content)
context.update({"Panorama.WildFire(val.Name == obj.Name)": wildfire_analysis_content})
return_results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': context
}) | 5,333,464 |
def root_hash(hashes):
"""
Compute the root hash of a merkle tree with the given list of leaf hashes
"""
# the number of hashes must be a power of two
assert len(hashes) & (len(hashes) - 1) == 0
while len(hashes) > 1:
hashes = [sha256(l + r).digest() for l, r in zip(*[iter(hashes)] * 2)]
return hashes[0] | 5,333,465 |
def DumpStr(obj, pretty=False, newline=None, **json_dumps_kwargs):
"""Serialize a Python object to a JSON string.
Args:
obj: a Python object to be serialized.
pretty: True to output in human-friendly pretty format.
newline: True to append a newline in the end of result, default to the
previous argument ``pretty``.
json_dumps_kwargs: Any allowable arguments to json.dumps.
Returns:
The serialized JSON string.
"""
if newline is None:
newline = pretty
if pretty:
kwargs = dict(indent=2, separators=(',', ': '), sort_keys=True)
else:
kwargs = {}
kwargs.update(json_dumps_kwargs)
result = json.dumps(obj, **kwargs)
if newline:
result += '\n'
return result | 5,333,466 |
def test_packer_binary_file(host):
"""
Tests if packer binary is file type.
"""
assert host.file(PACKAGE_BINARY).is_file | 5,333,467 |
def snake_case(s: str):
"""
Transform into a lower case string with underscores between words.
Parameters
----------
s : str
Original string to transform.
Returns
-------
Transformed string.
"""
return _change_case(s, '_', str.lower) | 5,333,468 |
def multiple_workers_thread(worker_fn, queue_capacity_input=1000, queue_capacity_output=1000, n_worker=3):
"""
:param worker_fn: lambda (tid, queue): pass
:param queue_capacity:
:param n_worker:
:return:
"""
threads = []
queue_input = Queue.Queue(queue_capacity_input)
queue_output = Queue.Queue(queue_capacity_output)
for i in range(n_worker):
t = threading.Thread(target=worker_fn, args=(i, queue_input, queue_output))
threads.append(t)
t.start()
print 'multiple_workers_thread: start %s sub-processes' % n_worker
return queue_input, queue_output, threads | 5,333,469 |
def reorder(list_1: List[Any]) -> List[Any]:
"""This function takes a list and returns it in sorted order"""
new_list: list = []
for ele in list_1:
new_list.append(ele)
temp = new_list.index(ele)
while temp > 0:
if new_list[temp - 1] > new_list[temp]:
new_list[temp - 1], new_list[temp] = new_list[temp], new_list[temp-1]
else:
break
temp = temp - 1
return new_list | 5,333,470 |
def is_extended_markdown(view):
"""True if the view contains 'Markdown Extended'
syntax'ed text.
"""
return view.settings().get("syntax").endswith(
"Markdown Extended.sublime-syntax") | 5,333,471 |
def _generate_graph(rule_dict: Dict[int, List[PartRule]], upper_bound: int) -> Any:
"""
Create a new graph from the VRG at random
Returns None if the nodes in generated graph exceeds upper_bound
:return: newly generated graph
"""
node_counter = 1
new_g = LightMultiGraph()
new_g.add_node(0, label=0)
non_terminals = {0}
rule_ordering = [] # list of rule ids in the order they were fired
while len(non_terminals) > 0: # continue until no more non-terminal nodes
if new_g.order() > upper_bound: # early stopping
return None, None
node_sample = random.sample(non_terminals, 1)[0] # choose a non terminal node at random
lhs = new_g.nodes[node_sample]['label']
rhs_candidates = rule_dict[lhs]
if len(rhs_candidates) == 1:
rhs = rhs_candidates[0]
else:
weights = np.array([rule.frequency for rule in rhs_candidates])
weights = weights / np.sum(weights) # normalize into probabilities
idx = int(np.random.choice(range(len(rhs_candidates)), size=1, p=weights)) # pick based on probability
rhs = rhs_candidates[idx]
logging.debug(f'firing rule {rhs.id}, selecting node {node_sample} with label: {lhs}')
rule_ordering.append(rhs.id)
broken_edges = find_boundary_edges(new_g, {node_sample})
assert len(broken_edges) == lhs
new_g.remove_node(node_sample)
non_terminals.remove(node_sample)
nodes = {}
for n, d in rhs.graph.nodes(data=True): # all the nodes are internal
new_node = node_counter
nodes[n] = new_node
label = None
if 'label' in d: # if it's a new non-terminal add it to the set of non-terminals
non_terminals.add(new_node)
label = d['label']
node_color = None
if 'node_colors' in d.keys():
node_color = random.sample(d['node_colors'], 1)[0]
if label is None and node_color is None:
new_g.add_node(new_node, b_deg=d['b_deg'])
elif label is not None and node_color is None:
new_g.add_node(new_node, b_deg=d['b_deg'], label=label)
elif label is None and node_color is not None:
new_g.add_node(new_node, b_deg=d['b_deg'], node_color=node_color)
else:
new_g.add_node(new_node, b_deg=d['b_deg'], label=label, node_color=node_color)
node_counter += 1
# randomly assign broken edges to boundary edges
random.shuffle(broken_edges)
# randomly joining the new boundary edges from the RHS to the rest of the graph - uniformly at random
for n, d in rhs.graph.nodes(data=True):
num_boundary_edges = d['b_deg']
if num_boundary_edges == 0: # there are no boundary edges incident to that node
continue
assert len(broken_edges) >= num_boundary_edges
edge_candidates = broken_edges[:num_boundary_edges] # picking the first batch of broken edges
broken_edges = broken_edges[num_boundary_edges:] # removing them from future consideration
for e in edge_candidates: # each edge is either (node_sample, v) or (u, node_sample)
if len(e) == 2:
u, v = e
else:
u, v, d = e
if u == node_sample:
u = nodes[n]
else:
v = nodes[n]
logging.debug(f'adding broken edge ({u}, {v})')
if len(e) == 2:
new_g.add_edge(u, v)
else:
new_g.add_edge(u, v, attr_dict=d)
# adding the rhs to the new graph
for u, v, d in rhs.graph.edges(data=True):
#edge_multiplicity = rhs.graph[u][v]['weight'] #
edge_multiplicity = d['weight']
if 'edge_colors' in d.keys():
edge_color = random.sample(d['edge_colors'], 1)[0]
new_g.add_edge(nodes[u], nodes[v], weight=edge_multiplicity, edge_color=edge_color)
else:
new_g.add_edge(nodes[u], nodes[v], weight=edge_multiplicity)
logging.debug(f'adding RHS internal edge ({nodes[u]}, {nodes[v]}) wt: {edge_multiplicity}')
return new_g, rule_ordering | 5,333,472 |
def match_command_to_alias(command, aliases, match_multiple=False):
"""
Match the text against an action and return the action reference.
"""
results = []
for alias in aliases:
formats = list_format_strings_from_aliases([alias], match_multiple)
for format_ in formats:
try:
extract_parameters(format_str=format_['representation'],
param_stream=command)
except ParseException:
continue
results.append(format_)
return results | 5,333,473 |
def dan_acf(x, axis=0, fast=False):
"""
DFM's acf function
Estimate the autocorrelation function of a time series using the FFT.
:param x:
The time series. If multidimensional, set the time axis using the
``axis`` keyword argument and the function will be computed for every
other axis.
:param axis: (optional)
The time axis of ``x``. Assumed to be the first axis if not specified.
:param fast: (optional)
If ``True``, only use the largest ``2^n`` entries for efficiency.
(default: False)
"""
x = np.atleast_1d(x)
m = [slice(None), ] * len(x.shape)
# For computational efficiency, crop the chain to the largest power of
# two if requested.
if fast:
n = int(2**np.floor(np.log2(x.shape[axis])))
m[axis] = slice(0, n)
x = x
else:
n = x.shape[axis]
# Compute the FFT and then (from that) the auto-correlation function.
f = np.fft.fft(x-np.mean(x, axis=axis), n=2*n, axis=axis)
m[axis] = slice(0, n)
acf = np.fft.ifft(f * np.conjugate(f), axis=axis)[m].real
m[axis] = 0
return acf / acf[m] | 5,333,474 |
def set_selenium_local_session(proxy_address,
proxy_port,
proxy_username,
proxy_password,
proxy_chrome_extension,
headless_browser,
use_firefox,
browser_profile_path,
disable_image_load,
page_delay,
logger):
"""Starts local session for a selenium server.
Default case scenario."""
browser = None
err_msg = ''
if use_firefox:
firefox_options = Firefox_Options()
if headless_browser:
firefox_options.add_argument('-headless')
if browser_profile_path is not None:
firefox_profile = webdriver.FirefoxProfile(
browser_profile_path)
else:
firefox_profile = webdriver.FirefoxProfile()
# set English language
firefox_profile.set_preference('intl.accept_languages', 'en')
if disable_image_load:
# permissions.default.image = 2: Disable images load,
# this setting can improve pageload & save bandwidth
firefox_profile.set_preference('permissions.default.image', 2)
if proxy_address and proxy_port:
firefox_profile.set_preference('network.proxy.type', 1)
firefox_profile.set_preference('network.proxy.http',
proxy_address)
firefox_profile.set_preference('network.proxy.http_port',
proxy_port)
firefox_profile.set_preference('network.proxy.ssl',
proxy_address)
firefox_profile.set_preference('network.proxy.ssl_port',
proxy_port)
browser = webdriver.Firefox(firefox_profile=firefox_profile,
options=firefox_options)
# converts to custom browser
# browser = convert_selenium_browser(browser)
# authenticate with popup alert window
if (proxy_username and proxy_password):
proxy_authentication(browser,
logger,
proxy_username,
proxy_password)
else:
chromedriver_location = get_chromedriver_location()
chrome_options = Options()
chrome_options.add_argument('--mute-audio')
chrome_options.add_argument('--dns-prefetch-disable')
chrome_options.add_argument('--lang=en-US')
chrome_options.add_argument('--disable-setuid-sandbox')
chrome_options.add_argument('--no-sandbox')
# this option implements Chrome Headless, a new (late 2017)
# GUI-less browser. chromedriver 2.9 and above required
if headless_browser:
chrome_options.add_argument('--headless')
if disable_image_load:
chrome_options.add_argument(
'--blink-settings=imagesEnabled=false')
# replaces browser User Agent from "HeadlessChrome".
user_agent = "Chrome"
chrome_options.add_argument('user-agent={user_agent}'
.format(user_agent=user_agent))
capabilities = DesiredCapabilities.CHROME
# Proxy for chrome
if proxy_address and proxy_port:
prox = Proxy()
proxy = ":".join([proxy_address, str(proxy_port)])
if headless_browser:
chrome_options.add_argument(
'--proxy-server=http://{}'.format(proxy))
else:
prox.proxy_type = ProxyType.MANUAL
prox.http_proxy = proxy
prox.socks_proxy = proxy
prox.ssl_proxy = proxy
prox.add_to_capabilities(capabilities)
# add proxy extension
if proxy_chrome_extension and not headless_browser:
chrome_options.add_extension(proxy_chrome_extension)
# using saved profile for chrome
if browser_profile_path is not None:
chrome_options.add_argument(
'user-data-dir={}'.format(browser_profile_path))
chrome_prefs = {
'intl.accept_languages': 'en-US',
}
if disable_image_load:
chrome_prefs['profile.managed_default_content_settings.images'] = 2
chrome_options.add_experimental_option('prefs', chrome_prefs)
try:
browser = webdriver.Chrome(chromedriver_location,
desired_capabilities=capabilities,
chrome_options=chrome_options)
# gets custom instance
# browser = convert_selenium_browser(browser)
except WebDriverException as exc:
logger.exception(exc)
err_msg = 'ensure chromedriver is installed at {}'.format(
Settings.chromedriver_location)
return browser, err_msg
# prevent: Message: unknown error: call function result missing 'value'
matches = re.match(r'^(\d+\.\d+)',
browser.capabilities['chrome'][
'chromedriverVersion'])
if float(matches.groups()[0]) < Settings.chromedriver_min_version:
err_msg = 'chromedriver {} is not supported, expects {}+'.format(
float(matches.groups()[0]), Settings.chromedriver_min_version)
return browser, err_msg
browser.implicitly_wait(page_delay)
message = "Session started!"
highlight_print('browser', message, "initialization", "info", logger)
print('')
return browser, err_msg | 5,333,475 |
def match():
"""Show a timer of the match length and an upload button"""
player_west = request.form['player_west']
player_east = request.form['player_east']
start_time = dt.datetime.now().strftime('%Y%m%d%H%M')
# generate filename to save video to
filename = '{}_vs_{}_{}.h264'.format(player_west, player_east, start_time)
GAMESAVER.filename = filename
GAMESAVER.start_recording(filename)
return render_template('match.html',
player_west=player_west,
player_east=player_east,
filename=filename) | 5,333,476 |
def timeit_helper():
"""Part A: Obtain some profiling measurements using timeit."""
# YOUR CODE GOES HERE
pass | 5,333,477 |
def sync_db(domain, username, restore_as=None):
"""Call Formplayer API to force a sync for a user."""
user = check_user_access(domain, username, allow_enterprise=True)
if restore_as:
check_user_access(domain, restore_as)
use_livequery = FORMPLAYER_USE_LIVEQUERY.enabled(domain)
data = {
'action': 'sync-db',
'username': username,
'domain': domain,
'restoreAs': restore_as,
'useLiveQuery': use_livequery,
}
response_json = _post_data(data, user.user_id)
if not response_json.get("status") == "accepted":
raise FormplayerResponseException(response_json) | 5,333,478 |
def start_end_epoch(graph):
"""
Start epoch of graph.
:return: (start epoch, end epoch).
"""
start = 0
end = 0
for e in graph.edges_iter():
for _, p in graph[e[0]][e[1]].items():
end = max(end, p['etime_epoch_secs'])
if start == 0:
start = p['stime_epoch_secs']
else:
start = min(start, p['stime_epoch_secs'])
return (start, end) | 5,333,479 |
def test_loop(predefined_ll_short):
"""Test before and after loop created on list."""
a = predefined_ll_short
assert a.has_loop() is False
a.head._next._next._next._next = predefined_ll_short.head
assert a.has_loop() is True | 5,333,480 |
def get_all_metadata_from_users():
"""
Collects all unique tracks available in the database and retrieves their metadata.
Intended to be used with the data from the first experiment.
:return:
"""
track_set = set()
tracks = {}
for user in SessionUser.objects:
user_tracks = user.get_experiment_1_tracks()
track_set.update([track_id for track_id, track in user_tracks.items()])
tracks = {**tracks, **user_tracks}
track_counter = 0
total_tracks = len(track_set)
for track in track_set:
TrackData.get_track(tracks[track])
track_counter += 1
print(f"Track {track_counter} out of {total_tracks}") | 5,333,481 |
def distance_metric(seg_A, seg_B, dx):
"""
Measure the distance errors between the contours of two segmentations.
The manual contours are drawn on 2D slices.
We calculate contour to contour distance for each slice.
"""
table_md = []
table_hd = []
X, Y, Z = seg_A.shape
for z in range(Z):
# Binary mask at this slice
slice_A = seg_A[:, :, z].astype(np.uint8)
slice_B = seg_B[:, :, z].astype(np.uint8)
# The distance is defined only when both contours exist on this slice
if np.sum(slice_A) > 0 and np.sum(slice_B) > 0:
# Find contours and retrieve all the points
_, contours, _ = cv2.findContours(cv2.inRange(slice_A, 1, 1),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
pts_A = contours[0]
for i in range(1, len(contours)):
pts_A = np.vstack((pts_A, contours[i]))
_, contours, _ = cv2.findContours(cv2.inRange(slice_B, 1, 1),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
pts_B = contours[0]
for i in range(1, len(contours)):
pts_B = np.vstack((pts_B, contours[i]))
# Distance matrix between point sets
M = np.zeros((len(pts_A), len(pts_B)))
for i in range(len(pts_A)):
for j in range(len(pts_B)):
M[i, j] = np.linalg.norm(pts_A[i, 0] - pts_B[j, 0])
# Mean distance and hausdorff distance
md = 0.5 * (np.mean(np.min(M, axis=0)) + np.mean(np.min(M, axis=1))) * dx
hd = np.max([np.max(np.min(M, axis=0)), np.max(np.min(M, axis=1))]) * dx
table_md += [md]
table_hd += [hd]
# Return the mean distance and Hausdorff distance across 2D slices
mean_md = np.mean(table_md) if table_md else None
mean_hd = np.mean(table_hd) if table_hd else None
return mean_md, mean_hd | 5,333,482 |
def tomographic_redshift_bin(z_s, version=default_version):
"""DES analyses work in pre-defined tomographic redshift bins. This
function returns the photometric redshift bin as a function of photometric
redshift.
Parameters
----------
z_s : numpy array
Photometric redshifts.
version : string
Which catalog version to use.
Returns
-------
z_bin : numpy array
The tomographic redshift bin corresponding to each photometric
redshift. Returns -1 in case a redshift does not fall into any bin.
"""
if version == 'Y1':
z_bins = [0.2, 0.43, 0.63, 0.9, 1.3]
else:
raise RuntimeError(
"Unkown version of DES. Supported versions are {}.".format(
known_versions))
z_bin = np.digitize(z_s, z_bins) - 1
z_bin = np.where((z_s < np.amin(z_bins)) | (z_s >= np.amax(z_bins)) |
np.isnan(z_s), -1, z_bin)
return z_bin | 5,333,483 |
def is_point_in_rect(point, rect):
"""Checks whether is coordinate point inside the rectangle or not.
Rectangle is defined by bounding box.
:type point: list
:param point: testing coordinate point
:type rect: list
:param rect: bounding box
:rtype: boolean
:return: boolean check result
"""
x0, y0, x1, y1 = rect
x, y = point
if x0 <= x <= x1 and y0 <= y <= y1:
return True
return False | 5,333,484 |
def chinese_theorem_inv(modulo_list):
"""
Returns (x, n1*...*nk) such as
x mod mk = ak for all k, with
modulo_list = [(a1, n1), ..., (ak, nk)]
n1, ..., nk most be coprime 2 by 2.
"""
a, n = modulo_list[0]
for a2, n2 in modulo_list[1:]:
u, v = bezout(n, n2)
a, n = a*v*n2+a2*u*n, n*n2
for (a1, n1) in modulo_list:
assert a % n1 == a1
return ((n+a % n) % n, n) | 5,333,485 |
def write_dict_to_csv(filename, data):
"""Write a dictionary to a CSV file with the key as the first column."""
with open(filename, 'w') as csvfile:
writer = csv.writer(csvfile)
keys = sorted(data.keys())
for key in keys:
value = data[key]
row = [str(key)] + [str(v) for v in value]
writer.writerow(row) | 5,333,486 |
def clean():
"""
Clean test files
"""
if "output" not in os.listdir():
os.mkdir("output")
for f in os.listdir("./output"):
if os.path.isfile("./output/"+f):
os.remove("./output/"+f)
if "tmp" not in os.listdir():
os.mkdir("tmp")
for f in os.listdir("./tmp"):
if os.path.isfile("./tmp/"+f):
os.remove("./tmp/"+f) | 5,333,487 |
def compute_segment_cores(split_lines_of_utt):
"""
This function returns a list of pairs (start-index, end-index) representing
the cores of segments (so if a pair is (s, e), then the core of a segment
would span (s, s+1, ... e-1).
The argument 'split_lines_of_utt' is list of lines from a ctm-edits file
corresponding to a single utterance.
By the 'core of a segment', we mean a sequence of ctm-edits lines including
at least one 'cor' line and a contiguous sequence of other lines of the
type 'cor', 'fix' and 'sil' that must be not tainted. The segment core
excludes any tainted lines at the edge of a segment, which will be added
later.
We only initiate segments when it contains something correct and not
realized as unk (i.e. ref==hyp); and we extend it with anything that is
'sil' or 'fix' or 'cor' that is not tainted. Contiguous regions of 'true'
in the resulting boolean array will then become the cores of prototype
segments, and we'll add any adjacent tainted words (or parts of them).
"""
num_lines = len(split_lines_of_utt)
line_is_in_segment_core = [False] * num_lines
# include only the correct lines
for i in range(num_lines):
if (split_lines_of_utt[i][7] == 'cor'
and split_lines_of_utt[i][4] == split_lines_of_utt[i][6]):
line_is_in_segment_core[i] = True
# extend each proto-segment forwards as far as we can:
for i in range(1, num_lines):
if line_is_in_segment_core[i - 1] and not line_is_in_segment_core[i]:
edit_type = split_lines_of_utt[i][7]
if (not is_tainted(split_lines_of_utt[i])
and (edit_type == 'cor' or edit_type == 'sil'
or edit_type == 'fix')):
line_is_in_segment_core[i] = True
# extend each proto-segment backwards as far as we can:
for i in reversed(range(0, num_lines - 1)):
if line_is_in_segment_core[i + 1] and not line_is_in_segment_core[i]:
edit_type = split_lines_of_utt[i][7]
if (not is_tainted(split_lines_of_utt[i])
and (edit_type == 'cor' or edit_type == 'sil'
or edit_type == 'fix')):
line_is_in_segment_core[i] = True
# Get contiguous regions of line in the form of a list
# of (start_index, end_index)
segment_ranges = []
cur_segment_start = None
for i in range(0, num_lines):
if line_is_in_segment_core[i]:
if cur_segment_start is None:
cur_segment_start = i
else:
if cur_segment_start is not None:
segment_ranges.append((cur_segment_start, i))
cur_segment_start = None
if cur_segment_start is not None:
segment_ranges.append((cur_segment_start, num_lines))
return segment_ranges | 5,333,488 |
def local_neighborhood_nodes_for_element(index, feature_radius_pixels):
"""
local_neighborhood_nodes_for_element returns the indices of nodes which are in the local neighborhood of an
element. Note that the nodes and elements in a mesh have distinct coordinates: elements exist in the centroids
of cubes formed by the mesh of nodes.
:param index: the element for which we want the local neighborhood
:param feature_radius_pixels: minimum feature radius, in pixels
:return: the indices of the local neighborhood set
"""
# TODO: there might be an off-by-one error in here
neighbors = set()
x, y, z = elemental_index_to_nodal_index(index)
# allow our first index to vary the entire range
for i in range(math.ceil(x - feature_radius_pixels), math.floor(x + feature_radius_pixels) + 1):
# how much variability is left for the second index given the first?
leftover_y_radius = math.sqrt(feature_radius_pixels ** 2 - (x - i) ** 2)
for j in range(math.ceil(y - leftover_y_radius), math.floor(y + leftover_y_radius) + 1):
leftover_z_radius = math.sqrt(feature_radius_pixels ** 2 - (x - i) ** 2 - (y - j) ** 2)
for k in range(math.ceil(z - leftover_z_radius), math.floor(z + leftover_z_radius) + 1):
neighbors.add((i, j, k))
return neighbors | 5,333,489 |
def _classification(dataset='iris',k_range=[1,31],dist_metric='l1'):
"""
knn on classificaiton dataset
Inputs:
dataset: (str) name of dataset
k: (list) k[0]:lower bound of number of nearest neighbours; k[1]:upper bound of number of nearest neighbours
dist_metric: (str) 'l1' or 'l2'
Outputs:
validation accuracy
"""
print ('------Processing Dataset '+dataset+' ------')
x_train, x_valid, x_test, y_train, y_valid, y_test = load_dataset(dataset)
if y_train.dtype==np.dtype('bool'):
y_train = _cast_TF(y_train)
y_valid = _cast_TF(y_valid)
y_test = _cast_TF(y_test)
acc = []
predicted = _eval_knn(k_range,x_train,y_train,x_valid,y_valid,dist_metric,compute_loss=False)
for k in range(k_range[0],k_range[1]):
#print(k)
curr_predict = predicted['k='+str(k)]
#print(curr_predict)
result = np.argmax(curr_predict,axis=1)
#print(result)
gt = np.where(y_valid==True,1,0)
gt = np.argmax(gt,axis=1)
unique, counts = np.unique(result-gt, return_counts=True)
correct = dict(zip(unique, counts))[0]
#print(correct)
acc.append(correct/y_valid.shape[0])
return acc | 5,333,490 |
def parse_pgpass(hostname='scidb2.nersc.gov', username='desidev_admin'):
"""Read a ``~/.pgpass`` file.
Parameters
----------
hostname : :class:`str`, optional
Database hostname.
username : :class:`str`, optional
Database username.
Returns
-------
:class:`str`
A string suitable for creating a SQLAlchemy database engine, or None
if no matching data was found.
"""
from os.path import expanduser
fmt = "postgresql://{3}:{4}@{0}:{1}/{2}"
try:
with open(expanduser('~/.pgpass')) as p:
lines = p.readlines()
except FileNotFoundError:
return None
data = dict()
for l in lines:
d = l.strip().split(':')
if d[0] in data:
data[d[0]][d[3]] = fmt.format(*d)
else:
data[d[0]] = {d[3]: fmt.format(*d)}
if hostname not in data:
return None
try:
pgpass = data[hostname][username]
except KeyError:
return None
return pgpass | 5,333,491 |
def make_move(board, max_rows, max_cols, col, player):
"""Put player's piece in column COL of the board, if it is a valid move.
Return a tuple of two values:
1. If the move is valid, make_move returns the index of the row the
piece is placed in. Otherwise, it returns -1.
2. The updated board
>>> rows, columns = 2, 2
>>> board = create_board(rows, columns)
>>> row, board = make_move(board, rows, columns, 0, 'X')
>>> row
1
>>> get_piece(board, 1, 0)
'X'
>>> row, board = make_move(board, rows, columns, 0, 'O')
>>> row
0
>>> row, board = make_move(board, rows, columns, 0, 'X')
>>> row
-1
>>> row, board = make_move(board, rows, columns, -4, '0')
>>> row
-1
"""
if -1 < col and col < max_cols:
return put_piece(board, max_rows, col, player)
return (-1, board) | 5,333,492 |
def convert_timestamp(ts):
"""Converts the timestamp to a format suitable for Billing.
Examples of a good timestamp for startTime, endTime, and eventTime:
'2016-05-20T00:00:00Z'
Note the trailing 'Z'. Python does not add the 'Z' so we tack it on
ourselves.
"""
return ts.isoformat() + 'Z' | 5,333,493 |
def object_id(obj, clazz=None):
"""Turn a given object into an ID that can be stored in with
the notification."""
clazz = clazz or type(obj)
if isinstance(obj, clazz):
obj = obj.id
elif is_mapping(obj):
obj = obj.get('id')
return obj | 5,333,494 |
def get_email_from_request(request):
"""
Get 'Authorization' from request header,
and parse the email address using cpg-util
"""
auth_header = request.headers.get('Authorization')
if auth_header is None:
raise web.HTTPUnauthorized(reason='Missing authorization header')
try:
id_token = auth_header[7:] # Strip the 'bearer' / 'Bearer' prefix.
return email_from_id_token(id_token)
except ValueError as e:
raise web.HTTPForbidden(reason='Invalid authorization header') from e | 5,333,495 |
def test_install_file(tmp_path):
"""
test spm.pkgfiles.local
"""
assert (
spm.install_file(
"apache",
formula_tar=MagicMock(),
member=MockTar(),
formula_def={"name": "apache"},
conn={"formula_path": str(tmp_path / "test")},
)
== MockTar().path
) | 5,333,496 |
def retweet_fav_post(api):
"""
Retweets tweets with #motivation or #inspiration
@param api - api object created from config
"""
try:
for tweet in tweepy.Cursor(api.search,q=hashtag).items(1):
if not tweet.favorited and not tweet.retweeted:
try:
logger.info("Tweet fav")
tweet.favorite()
logger.info("Tweet retweeted")
tweet.retweet()
except Exception:
logger.error("Error on fav and retweet", exc_info=True)
except tweepy.TweepError as retweet_error:
logger.error(retweet_error.reason) | 5,333,497 |
def play(url, offset, text, card_data, response_builder):
"""Function to play audio.
Using the function to begin playing audio when:
- Play Audio Intent is invoked.
- Resuming audio when stopped / paused.
- Next / Previous commands issues.
https://developer.amazon.com/docs/custom-skills/audioplayer-interface-reference.html#play
REPLACE_ALL: Immediately begin playback of the specified stream,
and replace current and enqueued streams.
"""
# type: (str, int, str, Dict, ResponseFactory) -> Response
logger.info("play : 52 v2")
logger.info(url)
logger.info(offset)
logger.info(text)
logger.info(card_data)
if card_data:
logger.info("play : 60")
response_builder.set_card(
StandardCard(
title=card_data["title"], text=card_data["text"],
image=Image(
small_image_url=card_data["small_image_url"],
large_image_url=card_data["large_image_url"])
)
)
# Using URL as token as they are all unique
logger.info("play : 71")
response_builder.add_directive(
PlayDirective(
play_behavior=PlayBehavior.REPLACE_ALL,
audio_item=AudioItem(
stream=Stream(
token=url,
url=url,
offset_in_milliseconds=offset,
expected_previous_token=None),
metadata=add_screen_background(card_data) if card_data else None
)
)
).set_should_end_session(True)
logger.info("play : 85")
if text:
logger.info("play : 87")
response_builder.speak(text)
logger.info("play : 90")
return response_builder.response | 5,333,498 |
def all_flags_match_bombs(cells: List[List[Dict]]) -> bool:
"""
Checks whether all flags are placed correctly
and there are no flags over regular cells (not bombs)
:param cells: array of array of cells dicts
:return: True if all flags are placed correctly
"""
for row in cells:
for cell in row:
if cell["mask"] == CellMask.FLAG and cell["value"] != "*":
return False
return True | 5,333,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.