content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def add_landmarks(particle, d, angle):
"""
Adds a set of landmarks to the particle. Only used on first SLAM cycle
when no landmarks have been added.
:param particle: The particle to be updated
:param d: An array of distances to the landmarks
:param angle: An array of observation angles for the landmarks
:return: Returns the updated particle with landmarks added
"""
# Evaluate sine and cosine values for each observation in z
s = np.sin(pi_2_pi(particle.x[2, 0] + angle))
c = np.cos(pi_2_pi(particle.x[2, 0] + angle))
# Add new landmark locations to mu
particle.mu = np.vstack((particle.mu, np.array(
[particle.x[0, 0] + d * c,
particle.x[1, 0] + d * s]).T))
# Distance values
dpos = np.zeros((len(d), 2))
dpos[:, 0] = d * c # dx
dpos[:, 1] = d * s # dy
d_sq = dpos[:, 0]**2 + dpos[:, 1]**2
d = np.sqrt(d_sq)
H = calc_H(particle, dpos, d_sq, d)
# Add covariance matrices for landmarks
particle.sigma = np.vstack((particle.sigma,
np.linalg.inv(H) @ Q
@ np.linalg.inv(H.transpose((0, 2, 1)))))
particle.i = np.append(particle.i, np.full(len(d), 1))
return particle
| 12,900
|
def walk_forward_val_multiple(model, ts_list,
history_size=HISTORY_SIZE,
target_size=TARGET_SIZE) -> float:
"""
Conduct walk-forward validation for all states, average the results.
Parameters
----------
model -- The model to be validated
ts_list {list | np.ndarray} -- Array of time series vector
history_size {int} -- The window to use for model input
target_size {int} -- The target prediction window size
Returns
-------
'mse' {float} -- The weighted average MSE across all the states (weighted
by length of time series)
"""
total_error = 0.
total_steps = 0
for ts in ts_list:
mse_state, n_preds = walk_forward_val(model, ts,
history_size=history_size,
target_size=target_size,
return_count=True)
total_error += mse_state * n_preds
total_steps += n_preds
return total_error / total_steps
| 12,901
|
def main():
"""The 'real' entry point of this program"""
# parse args
parser = argparse.ArgumentParser(
prog=hdtop.const.PROG_NAME, description=hdtop.const.DESCRIPTION
)
parser.add_argument(
"action",
default="start",
nargs="?",
choices=_SUBPARSERS,
help="Action for the program",
)
args, remain = parser.parse_known_args()
# parse sub args
subparser: argparse.ArgumentParser = _SUBPARSERS[args.action]()
args = subparser.parse_args(remain, args)
# action
return args.func(args)
| 12,902
|
def inverse_project_lambert_equal_area(pt):
"""
Inverse Lambert projections
Parameters:
pt: point, as a numpy array
"""
X = pt[0]
Y = pt[1]
f = np.sqrt(1.0-(X**2.0+Y**2.0)/4)
return tensors.Vector([f*X,f*Y,-1.0+(X**2.0+Y**2.0)/2])
| 12,903
|
def _get_field_default(field: dataclasses.Field):
"""
Return a marshmallow default value given a dataclass default value
>>> _get_field_default(dataclasses.field())
<marshmallow.missing>
"""
# Remove `type: ignore` when https://github.com/python/mypy/issues/6910 is fixed
default_factory = field.default_factory # type: ignore
if default_factory is not dataclasses.MISSING:
return default_factory
elif field.default is dataclasses.MISSING:
return marshmallow.missing
return field.default
| 12,904
|
def download_file_from_s3(
s3_path: str,
local_path: str,
create_dirs: bool = True,
silent: bool = False,
raise_on_error: bool = True,
boto3_kwargs: Optional[Dict[str, Union[str, float]]] = None,
) -> bool:
"""Download a file from s3 to local machine.
Args:
s3_path: Full path on s3 in format "s3://<bucket_name>/<obj_path>".
local_path: Path on local machine.
create_dirs: Whether the path directory should be created. (Defaults to True)
silent: Whether to print debug information.
raise_on_error: Whether to raise exception on any errors. (Defaults to True)
boto3_kwargs: The parameters for s3.meta.client.download_fileobj() function.
Returns:
Boolean of whether the file was successfully downloaded.
"""
if boto3_kwargs is None:
boto3_kwargs = {}
bucket, key = decompose_s3_path(s3_path)
s3_client = boto3.client("s3")
try:
if not silent:
print(f"Downloading file from '{s3_path}' to '{local_path}'")
dir_name = os.path.dirname(local_path)
if create_dirs:
ensure_dirs(dir_name, silent=silent)
with tempfile.NamedTemporaryFile("wb", dir=dir_name, delete=False) as tf:
s3_client.download_fileobj(bucket, key, tf, **boto3_kwargs)
temp_path = tf.name
os.rename(temp_path, local_path)
except Exception as e:
print(f"ERROR: failed to download from {s3_path} to {local_path}: {e}")
if raise_on_error:
raise e
return False
return True
| 12,905
|
def get_all_votes(poll_id: int) -> List[Tuple[str, int]]:
"""
Get all votes for the current poll_id that are stored in the database
Args:
poll_id (int): Telegram's `message_id` for the poll
Returns:
List[Tuple[str, int]]: A list with the current votes in tuples (user, votes)
"""
postgres: Database = get_database()
select_query = (
select([postgres.motos_counter.c.username, postgres.motos_counter.c.vote])
.where(postgres.motos_counter.c.poll_id == poll_id)
.order_by(postgres.motos_counter.c.vote, postgres.motos_counter.c.date)
)
results = postgres.engine.execute(select_query)
return [(row["username"], row["vote"]) for row in results]
| 12,906
|
def plotWavefunc(x, potential, num_states, wave_vectsX, wave_vectsY, energy, func_name):
"""
This function is for the visualization of the Potential Energy Functions and
Corresponding Wave Functions which are the solutions of to the wave equation.
Inputs:
x : Data points
potential : Potential Energy Function
num_states : Number of energy primary states
wave_vects : Wave function - as a set of vectors
energy : Energy values of each state
func_name : Function name of potential energy curve
Outputs:
None
"""
fig = plt.figure('WaveFunc', facecolor='w', edgecolor='k')
for i in range(num_states-1, -1, -1):
ax = fig.add_subplot(num_states//2, num_states//2, i+1, projection='3d')
X, Y = np.meshgrid(x, x)
wave_vect = np.reshape(wave_vectsX[:,i], (len(wave_vectsX),1))*np.transpose(np.reshape(wave_vectsY[:,i], (len(wave_vectsX),1)))
ax.plot_surface(X, Y, wave_vect + energy[i])
plt.xlabel('Distance (x)', fontdict={'weight': 'bold', 'size': 8, 'color': 'black'})
plt.ylabel('Distance (y)', fontdict={'weight': 'bold', 'size': 8, 'color': 'black'})
ax.set_title('Energy {} = {:10.4f}eV'.format(i,energy[i]), fontdict={'fontsize': 10, 'fontweight': 'bold'})
plt.grid(b=True)
plt.autoscale(tight=True)
plt.suptitle(f'Wave Functions for {func_name}', fontsize = 22, fontweight = 'bold')
plt.savefig(f'wavePlot-{func_name}.png')
plt.close('all')
return
| 12,907
|
def getUnitConversion():
"""
Get the unit conversion from kT to kJ/mol
Returns
factor: The conversion factor (float)
"""
temp = 298.15
factor = Python_kb/1000.0 * temp * Python_Na
return factor
| 12,908
|
def find_where_and_nearest(array, value):
"""
Returns index and array[index] where value is closest to an array element.
"""
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx, array[idx]
| 12,909
|
def twoexpdisk(R,phi,z,glon=False,
params=[1./3.,1./0.3,1./4.,1./0.5,logit(0.1)]):
"""
NAME:
twoexpdisk
PURPOSE:
density of a sum of two exponential disks
INPUT:
R,phi,z - Galactocentric cylindrical coordinates or (l/rad,b/rad,D/kpc)
glon= (False) if True, input coordinates above are (l,b,D)
params= parameters [1/hR,1/hz,1/hR2,1/hz2,logit(amp2)]
OUTPUT:
density or log density
HISTORY:
2015-03-24 - Written - Bovy (IAS)
"""
amp= ilogit(params[4])
return (1.-amp)/2.*numpy.fabs(params[1])\
*numpy.exp(-params[0]*(R-_R0)-params[1]*numpy.fabs(z))\
+amp/2.*params[3]*numpy.exp(-params[2]*(R-_R0)-params[3]*numpy.fabs(z))
| 12,910
|
def init_linear(input_linear):
"""
初始化全连接层权重
"""
scope = np.sqrt(6.0 / (input_linear.weight.size(0) + input_linear.weight.size(1)))
nn.init.uniform_(input_linear.weight, -scope, scope)
if input_linear.bias is not None:
scope = np.sqrt(6.0 / (input_linear.bias.size(0) + 1))
input_linear.bias.data.uniform_(-scope, scope)
| 12,911
|
def sc(X):
"""Silhouette Coefficient"""
global best_k
score_list = [] # 用来存储每个K下模型的平局轮廓系数
silhouette_int = -1 # 初始化的平均轮廓系数阀值
for n_clusters in range(3, 10): # 遍历从2到10几个有限组
model_kmeans = KMeans(n_clusters=n_clusters, random_state=0) # 建立聚类模型对象
cluster_labels_tmp = model_kmeans.fit_predict(X) # 训练聚类模型
silhouette_tmp = metrics.silhouette_score(X, cluster_labels_tmp) # 得到每个K下的平均轮廓系数
score_list.append([n_clusters, silhouette_tmp]) # 将每次K及其得分追加到列表
if silhouette_tmp > silhouette_int: # 如果平均轮廓系数更高
best_k = n_clusters # 将最好的K存储下来
silhouette_int = silhouette_tmp # 将最好的平均轮廓得分存储下来
# best_kmeans = model_kmeans # 将最好的模型存储下来
# cluster_labels_k = cluster_labels_tmp # 将最好的聚类标签存储下来
return best_k
| 12,912
|
def is_tcp_port_open(host: str, tcp_port: int) -> bool:
"""Checks if the TCP host port is open."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2) # 2 Second Timeout
try:
sock.connect((host, tcp_port))
sock.shutdown(socket.SHUT_RDWR)
except ConnectionRefusedError:
return False
except socket.timeout:
return False
finally:
sock.close()
# Other errors are propagated as odd exceptions.
# We shutdown and closed the connection, but the server may need a second
# to start listening again. If the following error is seen, this timeout
# should be increased. 300ms seems to be the minimum.
#
# Connecting to J-Link via IP...FAILED: Can not connect to J-Link via \
# TCP/IP (127.0.0.1, port 19020)
time.sleep(0.5)
return True
| 12,913
|
def recursive_apply_dict(node: dict, fn: Callable) -> Any:
"""
Applies `fn` to the node, if `fn` changes the node,
the changes should be returned. If the `fn` does not change the node,
it calls `recursive_apply` on the children of the node.
In case the recursion on the children results in one or more
`runtool.datatypes.Versions` objects, the cartesian product of these
versions is calculated and a new `runtool.datatypes.Versions` object will be
returned containing the different versions of this node.
"""
# else merge children of type Versions into a new Versions object
expanded_children = []
new_node = {}
for key, value in node.items():
child = recursive_apply(value, fn)
# If the child is a Versions object, map the key to all its versions,
# child = Versions([1,2]),
# key = ['a']
# ->
# (('a':1), ('a':2))
if isinstance(child, Versions):
expanded_children.append(itertools.product([key], child))
else:
new_node[key] = child
if expanded_children:
# example:
# expanded_children = [(('a':1), ('a':2)), (('b':1), ('b':2))]
# new_node = {"c": 3}
# results in:
# [
# {'a':1, 'b':1, 'c':3},
# {'a':1, 'b':2, 'c':3},
# {'a':2, 'b':1, 'c':3},
# {'a':3, 'b':2, 'c':3},
# ]
new_node = [
fn(
dict(version_of_node, **new_node)
) # apply fn to the new version of the node
for version_of_node in itertools.product(*expanded_children)
]
# if the current node generated Versions object, these
# need to be flattened as well. For example:
# new_node = [Versions([1,2]), Versions([3,4])]
# results in
# Versions([[1,3], [1,4], [2,3], [2,4]])
if all(isinstance(val, Versions) for val in new_node):
return Versions(list(*itertools.product(*new_node)))
return Versions(new_node)
return fn(new_node)
| 12,914
|
def encode_image_array_as_jpg_str(image):
"""Encodes a numpy array into a JPEG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
JPEG encoded image string.
"""
image_pil = Image.fromarray(np.uint8(image))
output = six.BytesIO()
image_pil.save(output, format='JPEG')
jpg_string = output.getvalue()
output.close()
return jpg_string
| 12,915
|
def minify_response(response):
"""Minify response to save bandwith."""
if response.mimetype == u'text/html':
data = response.get_data(as_text=True)
response.set_data(minify(data, remove_comments=True,
remove_empty_space=True,
reduce_boolean_attributes=True))
return response
| 12,916
|
def test_RangeManager_9(qtbot, full_range, selection, value_type):
"""Entering invalid value in an entry field (especially the case when text contains `,`)"""
slider_steps = 1000
selection_to_range_min = 0.01
rman = RangeManager(slider_steps=slider_steps, selection_to_range_min=selection_to_range_min)
qtbot.addWidget(rman)
rman.show()
rman.set_range(full_range[0], full_range[1])
rman.set_selection(value_low=selection[0], value_high=selection[1])
assert rman.get_selection() == selection, "Incorrect selection"
enter_text_via_keyboard(qtbot, rman.le_min_value, "abc", finish=True)
assert rman.get_selection() == selection, "Incorrect selection"
enter_text_via_keyboard(qtbot, rman.le_min_value, "10,", finish=True)
assert rman.get_selection() == selection, "Incorrect selection"
enter_text_via_keyboard(qtbot, rman.le_max_value, "abc", finish=True)
assert rman.get_selection() == selection, "Incorrect selection"
enter_text_via_keyboard(qtbot, rman.le_max_value, "10.0,", finish=True)
assert rman.get_selection() == selection, "Incorrect selection"
| 12,917
|
def get_shorturlhash(myurl):
"""Returns a FNV1a hash of the UNquoted version of the passed URL."""
x = get_hash(unquote(myurl))
return x
| 12,918
|
def main():
"""Run DeePaC CLI."""
seed = 0
np.random.seed(seed)
tf.random.set_seed(seed)
rn.seed(seed)
modulepath = os.path.dirname(__file__)
builtin_configs = {"rapid": os.path.join(modulepath, "builtin", "config", "nn-img-rapid-cnn.ini"),
"sensitive": os.path.join(modulepath, "builtin", "config", "nn-img-sensitive-lstm.ini")}
builtin_weights = {"rapid": os.path.join(modulepath, "builtin", "weights", "nn-img-rapid-cnn.h5"),
"sensitive": os.path.join(modulepath, "builtin", "weights", "nn-img-sensitive-lstm.h5")}
remote_repo_url = "https://zenodo.org/api/records/4456008"
runner = MainRunner(builtin_configs, builtin_weights, remote_repo_url)
runner.parse()
| 12,919
|
def main():
"""
Monitor serial port and write it to stdout.
"""
# Parse arguments
arguments, parser = parse_arguments()
# Open serial port
port = serial.Serial(arguments.port, arguments.baud)
# Read and write until CTRL + C
start = time.time()
try:
while True:
line = port.readline()
if not line:
break
sys.stdout.write("%d,%s" % (time.time() - start, line))
sys.stdout.flush()
except KeyboardInterrupt:
pass
| 12,920
|
def filter_privacy_level(qs, clearance_level, exact=False):
"""
Function to exclude objects from a queryset, which got a higher clearance
level than the wanted maximum clearance level.
:qs: Django queryset.
:clearance_level: Minimum clearance level.
:exact: Boolean to check for the exact clearance level.
"""
if not qs:
return qs
c_type = ContentType.objects.get_for_model(qs.model)
kwargs = {
'content_type': c_type,
'object_id__in': qs.values_list('pk'),
'level__clearance_level{}'.format(
'' if exact else '__gt'): clearance_level,
}
private_objects = PrivacySetting.objects.filter(**kwargs).values_list(
'object_id')
if exact:
return qs.filter(pk__in=private_objects)
return qs.exclude(pk__in=private_objects)
| 12,921
|
def test_compare_device_types():
"""Tests that constants are configured and compared correctly."""
assert DeviceType.CEILING_FAN == "CF"
assert DeviceType.is_fan("CF")
assert DeviceType.MOTORIZED_SHADES == "MS"
assert DeviceType.is_shades("MS")
assert DeviceType.FIREPLACE == "FP"
assert DeviceType.is_fireplace("FP")
assert DeviceType.GENERIC_DEVICE == "GX"
assert DeviceType.is_generic("GX")
assert DeviceType.AIR_CONDITIONER == "AC"
assert DeviceType.is_air_conditioner("AC")
assert DeviceType.GARAGE_DOOR == "GD"
assert DeviceType.is_garage_door("GD")
assert DeviceType.BIDET == "BD"
assert DeviceType.is_bidet("BD")
assert DeviceType.LIGHT == "LT"
assert DeviceType.is_light("LT")
| 12,922
|
def test_update_clinvar_id(adapter, user_obj, institute_obj):
"""record an official clinvar submission name for a submission"""
submission_id = get_new_submission(adapter, user_obj, institute_obj)
# Update the submission with the official clinvar name
updated_submission = adapter.update_clinvar_id(clinvar_id='SUB0001', submission_id=submission_id)
# Assert that the submission was updated
assert adapter.get_clinvar_id(submission_id) == "SUB0001"
| 12,923
|
def validate_ceph_vol_params(config):
"""
Checks the presence of Ceph Volume parameters
"""
logger.info("checking ceph_vol_params")
ceph_vols = config_utils.get_ceph_vol(config)
for ceph_vol in ceph_vols:
validate_dict_data(ceph_vol, consts.HOST_KEY)
ceph_host = ceph_vol[consts.HOST_KEY]
validate_dict_data(ceph_host, consts.IP_KEY)
validate_dict_data(ceph_host, consts.NODE_TYPE_KEY)
validate_dict_data(ceph_host, consts.PASSWORD_KEY)
validate_dict_data(ceph_host, consts.USER_KEY)
| 12,924
|
def get_configuration_store(name=None,resource_group_name=None,opts=None):
"""
Use this data source to access information about an existing App Configuration.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.appconfiguration.get_configuration_store(name="existing",
resource_group_name="existing")
pulumi.export("id", example.id)
```
:param str name: The Name of this App Configuration.
:param str resource_group_name: The name of the Resource Group where the App Configuration exists.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:appconfiguration/getConfigurationStore:getConfigurationStore', __args__, opts=opts).value
return AwaitableGetConfigurationStoreResult(
endpoint=__ret__.get('endpoint'),
id=__ret__.get('id'),
location=__ret__.get('location'),
name=__ret__.get('name'),
primary_read_keys=__ret__.get('primaryReadKeys'),
primary_write_keys=__ret__.get('primaryWriteKeys'),
resource_group_name=__ret__.get('resourceGroupName'),
secondary_read_keys=__ret__.get('secondaryReadKeys'),
secondary_write_keys=__ret__.get('secondaryWriteKeys'),
sku=__ret__.get('sku'),
tags=__ret__.get('tags'))
| 12,925
|
def plot_particles(gauge_solutions, t, gaugenos='all', kwargs_plot=None,
extend='neither'):
"""
Plot particle locations as points for some set of gauges.
"""
from matplotlib import pyplot as plt
gaugenos = check_gaugenos_input(gauge_solutions, gaugenos)
if kwargs_plot is None:
kwargs_plot = {'marker':'o','markersize':2,'color':'k'}
pp = interp_particles(gauge_solutions, t, gaugenos, extend)
for k in pp.keys():
x,y = pp[k]
plt.plot([x],[y],**kwargs_plot)
| 12,926
|
def _linear(args, output_size, bias, scope=None, use_fp16=False):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError(
"Linear is expecting 2D arguments: %s" % str(shapes))
if not shape[1]:
raise ValueError(
"Linear expects shape[1] of arguments: %s" % str(shapes))
else:
total_arg_size += shape[1]
dtype = [a.dtype for a in args][0]
# Now the computation.
with vs.variable_scope(scope or "Linear"):
matrix = _variable_on_cpu('Matrix', [total_arg_size, output_size],
use_fp16=use_fp16)
if use_fp16:
dtype = tf.float16
else:
dtype = tf.float32
args = [tf.cast(x, dtype) for x in args]
if len(args) == 1:
res = math_ops.matmul(args[0], matrix)
else:
res = math_ops.matmul(array_ops.concat(args, 1), matrix)
if not bias:
return res
bias_term = _variable_on_cpu('Bias', [output_size],
tf.constant_initializer(0),
use_fp16=use_fp16)
return res + bias_term
| 12,927
|
def find_CI(method, samples, weights=None, coverage=0.683,
logpost=None, logpost_sort_idx=None,
return_point_estimate=False, return_coverage=False,
return_extras=False, options=None):
"""Compute credible intervals and point estimates from samples.
Arguments
---------
method : str
Method to compute CI. Options are "PJ-HPD", "tail CI", "std", and
"HPD".
PJ-HPD: Compute the CI from the joint posterior HPD region such that
the projected range of the HPDR has coverage ``coverage``.
See Joachimi et al. 2020.
The point estimate is the joint posterior MAP.
tail CI: This is the usual quantile CI. I.e., for CI (l,u) and
coverage c, P(x<l) = (1-c)/2 and P(x>u) = 1-(1-c)/2.
The point estimate is the median.
std: Compute the CI as (mean - n_sigma*std, mean + n_sigma*std).
``n_sigma`` is the number of standard devations that cover
``coverage`` in a normal distribution.
The point estimate is the mean.
HPD: Compute the HPDI of the samples.
The point estimate is the MAP.
samples : array
Samples to use.
weights : array, optional
Sample weights.
coverage : float, optional
Target coverage. This gets converted into sigmas. Default: 0.683.
logpost : array, optional
Array of the log posterior values of the samples. Required for method
``PJ-HPD``.
logpost_sort_idx : array, optional
Array of indices that sort the samples in descending posterior value.
If method is ``PJ-HPD`` and it is not provided, this will be computed
internally from logpost.
return_point_estimate : bool, optional
Whether to return the point_estimate.
return_coverage : bool, optional
Whether to return the actual coverage of the CI.
options : dict, optional
Additional options passed to the CI methods.
Returns
-------
(l, u) : tuple
Credible interval of the samples.
p : float
Point estimate. Only returned if return_point_estimate is true.
coverage : float
The achieved coverage of the returned CI.
"""
options = options or {}
extras = None
if method.lower() == "pj-hpd" or method.lower() == "projected joint hpd":
if logpost is None and logpost_sort_idx is None:
raise ValueError("For method PJ-HPD, either logpost or "
"logpost_sort_idx need to be specified.")
CI, MAP, alpha, n_sample = find_projected_joint_HPDI(
samples, weights,
coverage_1d_threshold=coverage,
sort_idx=logpost_sort_idx,
log_posterior=logpost,
return_map=True, return_coverage_1d=True,
return_n_sample=True,
**options)
point_estimate = MAP
extras = n_sample
elif method.lower() == "hpd" or method.lower() == "m-hpd":
CI, marg_MAP, alpha, no_constraints = find_marginal_HPDI(
samples, weights,
coverage=coverage,
return_map=True,
return_coverage=True,
check_prior_edges=True,
**options)
point_estimate = marg_MAP
extras = no_constraints
elif method.lower() == "tail ci" or method.lower() == "quantile ci":
CI, marg_median, alpha = find_quantile_CI(
samples, weights,
coverage=coverage,
return_median=True, return_coverage=True)
point_estimate = marg_median
elif method.lower() == "std":
CI, marg_mean, alpha = find_std_CI(
samples, weights, coverage=coverage,
return_mean=True, return_coverage=True)
point_estimate = marg_mean
else:
raise NotImplementedError(f"Method {method} not supported.")
result = [CI]
if return_point_estimate:
result += [point_estimate]
if return_coverage:
result += [alpha]
if return_extras:
result += [extras]
if len(result) == 1:
# Only CI
return result[0]
else:
return tuple(result)
| 12,928
|
def test_cli_add():
"""Should execute the command as expected."""
result = lobotomy.run_cli(["add", "lambda.create_function", "-"])
assert result.code == "ECHOED"
| 12,929
|
def calculating(gas_in, water_in, electricity_in):
""" The Function that is responsible for calculating utility bills """
global resources_for_count
resources_for_count = {'gas': 0.0, 'water': 0.0, 'electricity': 0.0}
dict_remembering('rate_dictionary.txt', stable_rate_dict)
calculate_chek('gas', gas_in, resources_for_count)
calculate_chek('water', water_in, resources_for_count)
calculate_chek('electricity', electricity_in, resources_for_count)
gas_calculated = (float(stable_rate_dict['gas']) * float(resources_for_count['gas']))
water_calculated = (float(stable_rate_dict['water']) * float(resources_for_count['water']))
electricity_calculated = (float(stable_rate_dict['electricity']) * float(resources_for_count['electricity']))
label_gas_calculated.configure(text='{:8.3f}'.format(gas_calculated))
label_water_calculated.configure(text='{:8.3f}'.format(water_calculated))
label_electricity_calculated.configure(text='{:8.3f}'.format(electricity_calculated))
| 12,930
|
def test_s3_service_readme(app):
"""Runs the example from the README."""
import os
from flask import Flask
from flask_warehouse import Warehouse
# 1. Configuring Warehouse
app = Flask(__name__)
app.config['WAREHOUSE_DEFAULT_SERVICE'] = 's3' # or 'file' for filesystem
app.config['WAREHOUSE_DEFAULT_LOCATION'] = 'us-west-1' # required for 's3'
app.config['WAREHOUSE_DEFAULT_BUCKET'] = None
app.config['AWS_ACCESS_KEY_ID'] = '...' # required for 's3'
app.config['AWS_SECRET_ACCESS_KEY'] = '...' # required for 's3'
warehouse = Warehouse(app)
# Object-oriented approach:
bucket = warehouse.bucket('mybucket')
oo_cubby = bucket.cubby('keys')
# Or compact approach:
compact_cubby = warehouse('s3:///mybucket/keys')
assert oo_cubby == compact_cubby
cubby = oo_cubby
# 2. Writing to/from bytes
contents = b'12345'
cubby.store(bytes=contents)
assert cubby.filesize() == 5
cubby_contents = cubby.retrieve()
assert cubby_contents == contents
# 3. Writing to/from files
filepath = "local.txt"
with open(filepath, 'wb') as f:
f.write(b"Here are the contents of a file.")
cubby.store(filepath=filepath)
assert os.path.getsize(filepath) == cubby.filesize()
assert cubby.retrieve() == open(filepath, 'rb').read()
| 12,931
|
def load_csv(filename, fields=None, y_column=None, sep=','):
""" Read the csv file."""
input = pd.read_csv(filename, skipinitialspace=True,
usecols=fields, sep=sep, low_memory=False)
input = input.dropna(subset=fields)
# dtype={"ss_list_price": float, "ss_wholesale_cost": float}
input_data = input.values
data = DataSource()
if y_column == None:
data.features = input_data[:, :-1]
data.labels = input_data[:, -1]
data.headers = input.keys()
else:
data.features = np.delete(
input_data, [y_column], axis=1) # input_data[:, :-1]
data.labels = input_data[:, y_column]
headers = np.array(input.keys())
data.headers = list(np.delete(headers, [y_column]))
data.headers.append(input.keys()[y_column])
# print(data.headers)
try:
data.file = filename.split("/")[-1]
except Exception:
data.file = filename
return data
| 12,932
|
def Normalize(array):
"""Normalizes numpy arrays into scale 0.0 - 1.0"""
array_min, array_max = array.min(), array.max()
return ((array - array_min)/(array_max - array_min))
| 12,933
|
def obtener_cantidad_anualmente(PaisDestino, AnioInicio, AnioFin):
"""
Obtener cantidad de vuelos entrantes anualmente dado un pais destino y un rango de años
Obtiene la cantidad total de vuelos entrantes de cada año
:param PaisDestino: Pais al que llegan los vuelos
:type PaisDestino: str
:param AnioInicio: Anio Inicio
:type AnioInicio: int
:param AnioFin: Anio Fin
:type AnioFin: int
:rtype: Dict[str, int]
"""
conversor = Conversor()
repository = DBRepository()
cursor, labels = repository.ObtenerDatosVuelosEntrantesAenaDadoPaisDestinoAnioMinMax(PaisDestino, AnioInicio, AnioFin)
arrayTuplas = conversor.ConvertirCursorToTuplas(cursor)
##Mostrar JSON Extendido
matriz, lista = conversor.ConvertirTuplasToMatriz(arrayTuplas, labels)
retval = conversor.ObtenerDataJSONExtendido(matriz)
return retval
| 12,934
|
def calculate_shap_for_test(training_data, y, pipeline, n_points_to_explain):
"""Helper function to compute the SHAP values for n_points_to_explain for a given pipeline."""
points_to_explain = training_data[:n_points_to_explain]
pipeline.fit(training_data, y)
return _compute_shap_values(pipeline, pd.DataFrame(points_to_explain), training_data)
| 12,935
|
def spdk_log_execution(duts, tester, log_handler):
"""
Change default logger handler.
"""
log_handler.config_execution('spdk')
tester.logger.config_execution('tester')
for dutobj in duts:
dutobj.logger.config_execution(
'dut' + settings.LOG_NAME_SEP + '%s' % dutobj.crb['My IP'])
| 12,936
|
def searchapi():
"""status"""
if len(sys.argv) == 1:
q = ""
else:
q = sys.argv[1]
result = requests.post("http://0.0.0.0:5000/search_user", data={"q": q})
try:
data = result.json()
if data:
print("[{}] {}".format(data["id"], data["name"]))
else:
print("No result")
except:
print("Not a valid JSON")
| 12,937
|
def read_num_write(input_string):
""" read in the number of output files
"""
pattern = ('NumWrite' +
one_or_more(SPACE) + capturing(INTEGER))
block = _get_training_data_section(input_string)
keyword = first_capture(pattern, block)
assert keyword is not None
return keyword
| 12,938
|
def stock_zh_a_minute(symbol: str = 'sh600751', period: str = '5', adjust: str = "") -> pd.DataFrame:
"""
股票及股票指数历史行情数据-分钟数据
http://finance.sina.com.cn/realstock/company/sh600519/nc.shtml
:param symbol: sh000300
:type symbol: str
:param period: 1, 5, 15, 30, 60 分钟的数据
:type period: str
:param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据;
:type adjust: str
:return: specific data
:rtype: pandas.DataFrame
"""
url = "https://quotes.sina.cn/cn/api/jsonp_v2.php/=/CN_MarketDataService.getKLineData"
params = {
"symbol": symbol,
"scale": period,
"datalen": "1023",
}
r = requests.get(url, params=params)
temp_df = pd.DataFrame(json.loads(r.text.split('=(')[1].split(");")[0])).iloc[:, :6]
try:
stock_zh_a_daily(symbol=symbol, adjust="qfq")
except:
return temp_df
if adjust == "":
return temp_df
if adjust == "qfq":
temp_df[["date", "time"]] = temp_df["day"].str.split(" ", expand=True)
need_df = temp_df[temp_df["time"] == "15:00:00"]
need_df.index = need_df["date"]
stock_zh_a_daily_qfq_df = stock_zh_a_daily(symbol=symbol, adjust="qfq")
result_df = stock_zh_a_daily_qfq_df.iloc[-len(need_df):, :]["close"].astype(float) / need_df["close"].astype(float)
temp_df.index = pd.to_datetime(temp_df["date"])
merged_df = pd.merge(temp_df, result_df, left_index=True, right_index=True)
merged_df["open"] = merged_df["open"].astype(float) * merged_df["close_y"]
merged_df["high"] = merged_df["high"].astype(float) * merged_df["close_y"]
merged_df["low"] = merged_df["low"].astype(float) * merged_df["close_y"]
merged_df["close"] = merged_df["close_x"].astype(float) * merged_df["close_y"]
temp_df = merged_df[["day", "open", "high", "low", "close", "volume"]]
temp_df.reset_index(drop=True, inplace=True)
return temp_df
if adjust == "hfq":
temp_df[["date", "time"]] = temp_df["day"].str.split(" ", expand=True)
need_df = temp_df[temp_df["time"] == "15:00:00"]
need_df.index = need_df["date"]
stock_zh_a_daily_qfq_df = stock_zh_a_daily(symbol=symbol, adjust="hfq")
result_df = stock_zh_a_daily_qfq_df.iloc[-len(need_df):, :]["close"].astype(float) / need_df["close"].astype(float)
temp_df.index = pd.to_datetime(temp_df["date"])
merged_df = pd.merge(temp_df, result_df, left_index=True, right_index=True)
merged_df["open"] = merged_df["open"].astype(float) * merged_df["close_y"]
merged_df["high"] = merged_df["high"].astype(float) * merged_df["close_y"]
merged_df["low"] = merged_df["low"].astype(float) * merged_df["close_y"]
merged_df["close"] = merged_df["close_x"].astype(float) * merged_df["close_y"]
temp_df = merged_df[["day", "open", "high", "low", "close", "volume"]]
temp_df.reset_index(drop=True, inplace=True)
return temp_df
| 12,939
|
def randomize_case(s: str) -> str:
"""Randomize string casing.
Parameters
----------
s : str
Original string
Returns
-------
str
String with it's letters in randomized casing.
"""
result = "".join(
[c.upper() if random.randint(0, 1) == 1 else c.lower() for c in s]
)
# If result contains letters and the result is same as original try again.
if UNICODE_LETTERS_RE.search(s) is not None and result == s:
return randomize_case(s)
else:
return result
| 12,940
|
def _get_results():
"""Run speedtest with speedtest.py"""
s = speedtest.Speedtest()
print("Testing download..")
s.download()
print("Testing upload..")
s.upload()
return s.results.ping, s.results.download, s.results.upload
| 12,941
|
def example_create_topics(a, topics):
""" Create topics """
new_topics = [NewTopic(topic, num_partitions=3, replication_factor=1) for topic in topics]
# Call create_topics to asynchronously create topics, a dict
# of <topic,future> is returned.
fs = a.create_topics(new_topics)
# Wait for operation to finish.
# Timeouts are preferably controlled by passing request_timeout=15.0
# to the create_topics() call.
# All futures will finish at the same time.
for topic, f in fs.items():
try:
f.result() # The result itself is None
print("Topic {} created".format(topic))
except Exception as e:
print("Failed to create topic {}: {}".format(topic, e))
| 12,942
|
def load_conf() -> None:
"""Loads configuration from .env file"""
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
global RESURRECT_PATH
RESURRECT_PATH = (
Path(os.getenv("RESURRECT_PATH", default="~/.tmux/resurrect"))
.expanduser()
.resolve()
)
verbose(RESURRECT_PATH)
if not RESURRECT_PATH.is_dir():
raise FileError(str(RESURRECT_PATH), hint="Not a valid directory")
| 12,943
|
def rx_data_handler(name, packet):
"""
Callback function for incoming XBee transmission.
Args:
name(str): name to identify the callback function in a dispatcher.
packet(dict): package received from the XBee API.
"""
# Split datapack header and payload -- Small misc packs.
if packet['rf_data'][0:3] == 'CTR': # control command pack
recv_pack = packet['rf_data'].split(',')
shared.status['command'] = recv_pack[1]
util.log_info("CMD: %s" % recv_pack[1])
elif packet['rf_data'][0:3] == 'IFO': # information string pack
recv_pack = packet['rf_data'].split(',')
util.log_info("IFO: %s" % recv_pack[1])
elif packet['rf_data'][0:3] == 'PRM': # parameter pack
recv_pack = packet['rf_data'].split(',')
if recv_pack[1][0:3] == 'MPC': # currently only DMPC is available
shared.param_mpc = mas.ParamMPC(unpack('=3s3f2i', recv_pack[1]))
util.log_info("New MPC param received: %s" % shared.param_mpc)
shared.status['new_param'] = True
elif packet['rf_data'] == 'CLR_RDV': # clear rendezvous command, no comma split.
shared.rendezvous = None
util.log_info("'CLR_RDV' received, clear rendezvous")
# guarantee packet integrity (payload length) -- Actual agent data pack
elif len(packet['rf_data']) == shared.PAYLOAD_LENGTH:
#from binascii import hexlify # logging binary bytes
#util.log_debug("DATAPACK: %s" % hexlify(packet['rf_data']))
# unpack data into lists and wrap into local datapacks
recv_pack = WrappedData(unpack('=3s2d5f', packet['rf_data']))
util.log_debug("%s" % recv_pack)
if recv_pack.ID == 'TAR': # target point pack
shared.rendezvous = recv_pack
util.log_debug("Rendezvous coordinate received.")
elif recv_pack.ID == 'ORG': # HOME_ORIGIN pack
shared.home_origin = recv_pack.location_global_relative
util.log_info("HOME_ORIGIN set: %s." % shared.home_origin)
elif recv_pack.ID[0] == 'A': # normal neighbor begins with 'Axx'
shared.neighbors[recv_pack.ID] = recv_pack
| 12,944
|
def test_deletion_of_resource_owner_consumer(models_fixture):
"""Test deleting of connected user."""
app = models_fixture
with app.app_context():
with db.session.begin_nested():
db.session.delete(User.query.get(app.resource_owner_id))
# assert that c1, t1, t2 deleted
assert db.session.query(
Client.query.filter(
Client.client_id == app.u1c1_id).exists()).scalar() is False
assert db.session.query(
Token.query.filter(
Token.id == app.u1c1u1t1_id).exists()).scalar() is False
assert db.session.query(
Token.query.filter(
Token.id == app.u1c1u2t2_id).exists()).scalar() is False
# still exist consumer
assert db.session.query(
User.query.filter(
User.id == app.consumer_id).exists()).scalar() is True
# delete consumer
db.session.delete(User.query.get(app.consumer_id))
| 12,945
|
def test_PipeJsonRpcSendAsync_4():
"""
Message timeout.
"""
def method_handler1():
ttime.sleep(1)
conn1, conn2 = multiprocessing.Pipe()
pc = PipeJsonRpcReceive(conn=conn2, name="comm-server")
pc.add_method(method_handler1, "method1")
pc.start()
async def send_messages():
p_send = PipeJsonRpcSendAsync(conn=conn1, name="comm-client")
p_send.start()
# Submit multiple messages at once. Messages should stay at the event loop
# and be processed one by one.
with pytest.raises(CommTimeoutError, match="Timeout while waiting for response to message"):
await p_send.send_msg("method1", timeout=0.5)
p_send.stop()
asyncio.run(send_messages())
pc.stop()
| 12,946
|
def findCursor(query, keyname, page_no, page_size):
"""Finds the cursor to use for fetching results from the given page.
We store a mapping of page_no->cursor in memcache. If this result is missing, we look for page_no-1, if that's
missing we look for page_no-2 and so on. Once we've found one (or we get back to page_no=0) then we need to fetch
results from that page forward, storing the results back in memcache as we go.
Args:
query: A query used to fetch data from the data store
keyname: A string that'll make the keys unique (e.g. all blog posts could have keyname='blog'
page_no: The page number we're after
page_size: The size of pages we're after"""
cursor_page = page_no
cursor = memcache.get('post-page-cursor:%s:%d:%d' % (keyname, cursor_page, page_size))
while not cursor:
cursor_page -= 1
if cursor_page == 0:
break
cursor = memcache.get('post-page-cursor:%s:%d:%d' % (keyname, cursor_page, page_size))
while cursor_page < page_no:
# if we have to fast-forward through pages then we'll store the pages in memcache as we go
if cursor_page == 0:
it = query.run()
else:
it = query.with_cursor(cursor)
n = 0
for _ in it:
n += 1
if n >= page_size:
break
cursor = query.cursor()
cursor_page += 1
memcache.set('post-page-cursor:%s:%d:%d' % (keyname, cursor_page, page_size), cursor)
return cursor
| 12,947
|
def _get_seq(window,variants,ref,genotypeAware):
"""
Using the variation in @variants, construct two haplotypes, one which
contains only homozygous variants, the other which contains both hom and het variants
by placing those variants into the reference base string
@param variants: A vcf_eval.ChromVariants object
@param low: the starting position
@param high: the ending position
@param ref: a parsers.genome object
@param loc: the location that we are trying to rescue
@param genotype: whether to phase hets onto their own sequence to check for genotype accuracy (if there are multiple and they don't overlap, phasing doesn't matter)
@return: a tuple of sequences of bases that comes from modifying the reference sequence with the variants
"""
low = window[0]
high = window[1]
hetChunks = []
homChunks = []
hetOffset = low
homOffset = low
# note: if genotypeAware is False, the het chunks/offset will not be used
def get_ref_bases(start,end):
"""VCF parser is 1-based, but genome is 0-based."""
return ref.ref(window[2],start-1,end-1)
def add_ref_bases_until(chunks,begin,end):
chunks.append(get_ref_bases(begin,end))
def add_alt(chunk,start,var):
add_ref_bases_until(chunk,start,var.pos)
chunk.append(var.alt[0])
for variant in variants:
loc = variant.pos
#print((variant.ref, get_ref_bases(variant.pos,variant.pos+len(variant.ref))))
verifyRefBases = get_ref_bases(variant.pos,variant.pos+len(variant.ref))
if ( variant.ref != verifyRefBases ):
raise RescueError("Variant ref does not match reference at " + window[2] + " " + str(loc) + ": " +variant.ref + " != " + verifyRefBases )
if not ( hetOffset <= loc and homOffset <= loc ):
raise RescueError("Attempted to rescue sequence containing overlapping variants around " + window[2] + " " + str(loc))
assert variant.genotype_type != GENOTYPE_TYPE.HOM_REF
assert variant.genotype_type != GENOTYPE_TYPE.NO_CALL
if ( (not genotypeAware) or variant.genotype_type == GENOTYPE_TYPE.HOM_VAR):
add_alt(homChunks,homOffset,variant)
homOffset = len(variant.ref) + loc
else: # ( variant.genotype_type == GENOTYPE_TYPE.HET )
add_alt(hetChunks,hetOffset,variant)
hetOffset = len(variant.ref) + loc
# NB: this check seems redundant with the assert after it
if ( hetOffset > high or homOffset > high ):
print("-----fail-----")
print(window)
print(map(str,variants))
print((homOffset,high))
assert hetOffset <= high and homOffset <= high
if ( genotypeAware ):
add_ref_bases_until(hetChunks,hetOffset,high)
add_ref_bases_until(homChunks,homOffset,high)
return (''.join(homChunks),''.join(hetChunks))
| 12,948
|
def launch_stats(db_obj, project, seed, query, state, cycles):
"""
(experimental) Obtain statistics of the runs using snapshots pulled down to
the local directory.
"""
import pandas as pd
from glob import glob
from disp.castep_analysis import SCFInfo
query = generate_fw_query(project, seed, state, query)
# DataFrame contains the basic data
dframe = pd.DataFrame(get_launch_info(db_obj.database.fireworks, query))
dframe['workdir'] = dframe['launch_dir'].apply(
lambda x: os.path.split(x)[1])
summaries = []
for _, row in dframe.iterrows():
workdir = row.workdir
castep_file = list(glob(workdir + '/*.castep'))
if not castep_file:
click.echo(('WARNING: requested FW <{}> of <{}>-'
'<{}> is not avalible locally').format(
row.fw_id, row.project_name, row.seed_name))
continue
castep_file = castep_file[0]
_summary = SCFInfo(castep_file).get_summary()
_summary['workdir'] = workdir
_summary['castep_file'] = castep_file
summaries.append(_summary)
if not summaries:
click.echo(
'No data to show - did you forget to pull runs using launch-dirs?')
click.echo('Aborting...')
return
sdframe = pd.DataFrame(summaries)
dframe = dframe.merge(sdframe, how='inner')
dframe['suffix'] = dframe['castep_file'].apply(
lambda x: x.split('-')[-1].replace('.castep', ''))
dframe['total_time'] /= 3600
columns = [
'fw_id', 'suffix', 'seed_name', 'project_name', 'avg_ionic_time',
'avg_elec_time', 'avg_elec_steps', 'ionic_steps', 'total_time'
]
if cycles:
dframe['pj_time'] = dframe['avg_ionic_time'] * cycles / 3600
columns.append('pj_time')
to_show = dframe[columns].set_index('fw_id').sort_values('project_name')
click.echo(tabulate(to_show, headers='keys'))
| 12,949
|
def interp_logpsd(data, rate, window, noverlap, freqs, interpolation='linear'):
"""Computes linear-frequency power spectral density, then uses interpolation
(linear by default) to estimate the psd at the desired frequencies."""
stft, linfreqs, times = specgram(data, window, Fs=rate, noverlap=noverlap, window = np.hamming(window))
ntimes = len(times)
logpsd = np.log10(np.abs(stft.T)**2)
interps = [scipy.interpolate.interp1d(linfreqs, logpsd[t,:], kind=interpolation) for t in range(ntimes)]
interped_logpsd = np.array([interps[t](freqs) for t in range(ntimes)])
return interped_logpsd, freqs, times
| 12,950
|
def load_replica_camera_traj(traj_file_path):
"""
the format:
index
"""
camera_traj = []
traj_file_handle = open(traj_file_path, 'r')
for line in traj_file_handle:
split = line.split()
#if blank line, skip
if not len(split):
continue
camera_traj.append(split)
traj_file_handle.close()
return camera_traj
| 12,951
|
def test_model(sess, graph, x_, y_):
"""
:param sess:
:param graph:
:param x_:
:param y_:
:return:
"""
data_len = len(x_)
batch_eval = batch_iter(x_, y_, 64)
total_loss = 0.0
total_acc = 0.0
input_x = graph.get_operation_by_name('input_x').outputs[0]
input_y = graph.get_operation_by_name('input_y').outputs[0]
drop_prob = graph.get_operation_by_name('drop_prob').outputs[0]
loss = graph.get_operation_by_name('loss/loss').outputs[0]
acc = graph.get_operation_by_name('accuracy/acc').outputs[0]
for x_batch, y_batch in batch_eval:
batch_len = len(x_batch)
feed_dict = {input_x: x_batch, input_y: y_batch,
drop_prob: 0}
test_loss, test_acc = sess.run([loss, acc], feed_dict=feed_dict)
total_loss += test_loss * batch_len
total_acc += test_acc * batch_len
return total_loss / data_len, total_acc / data_len
| 12,952
|
def epanechnikov(h: np.ndarray, Xi: np.ndarray, x: np.ndarray) -> np.ndarray:
"""Epanechnikov kernel.
Parameters:
h : bandwidth.
Xi : 1-D ndarray, shape (nobs, 1). The value of the training set.
x : 1-D ndarray, shape (1, nbatch). The value at which the kernel density is being estimated.
Returns:
ndarray of shape ``(n_obs, nbatch)``: The kernel_value at each training point for each var.
"""
u = (Xi - x) / h
out = 3 / 4 * (1 - u**2) * (np.abs(u) <= 1)
assert out.shape == (Xi.shape[0], x.shape[1])
return out
| 12,953
|
def punctuation(chars=r',.\"!@#\$%\^&*(){}\[\]?/;\'`~:<>+=-'):
"""Finds characters in text. Useful to preprocess text. Do not forget
to escape special characters.
"""
return rf'[{chars}]'
| 12,954
|
def part_a(instances):
"""Do all the work for part a: use gradient descent to solve, plot
objective value vs. iterations (log scale), step length vs.
iterations, and f(x) - p* vs. iterations (log scale).
Also, experiment with different alpha and beta values to see their
effect on total iterations required for all three problem instances.
"""
for d in instances:
# Perform gradient descent with the first problem instance.
x, obj_array, t_list = gradient_descent(**d)
# Plot.
plot_results(obj_array=obj_array, t_list=t_list,
method='Gradient Descent', param_str=d['param_str_eta'],
method_file='grad_desc', m=d['m'], n=d['n'], p_s=d['p*'])
print(f"Initial problem solved (grad desc). m={d['m']}, n={d['n']}")
# Determine the effect of alpha and beta for different problems.
alpha_array = np.arange(0.05, 0.5, 0.05)
beta_array = np.arange(0.1, 1, 0.1)
# Loop over problem sizes.
for d in instances:
# Initialize suplots. Do 3x3 since alpha & beta have len 9.
# We'll make the size fit with 0.5" margins, with an extra 0.5"
# for safety.
# noinspection PyTypeChecker
fig, ax_it = plt.subplots(nrows=3, ncols=3, sharex=True, sharey=True,
figsize=(9.5, 7))
fig.suptitle(
r'Number of Iterations vs. $\beta$ for Different Values '
rf"of $\alpha$. Problem Size: $m={d['m']}, n={d['n']}$. "
rf"$\eta={d['eta']}$",
fontsize='x-large'
)
ax_it = ax_it.flatten()
# Loop over backtracking parameters.
for idx, alpha in enumerate(alpha_array):
# Track iterations.
it_count = []
for beta in beta_array:
# Perform gradient descent.
result = gradient_descent(
x_0=d['x_0'], a=d['a'], eta=d['eta'], alpha=alpha,
beta=beta, it_max=d['it_max'])
print(f"Solved (grad desc) for m={d['m']}, n={d['n']}, "
f"alpha={alpha:.2f}, beta={beta:.2f}")
# Track number of iterations.
it_count.append(len(result[1]))
# Plot.
ax = ax_it[idx]
ax.text(0.08, 0.8, rf'$\mathbf{{\alpha={alpha:.2f}}}$',
transform=ax.transAxes, fontsize='large',
fontweight='bold')
# ax.set_title(rf'$\alpha={alpha:.2f}$')
ax.plot(beta_array, it_count, linewidth=2)
ax.set_xlabel(r'$\beta$')
ax.set_xticks(beta_array)
# Label our y-axes on the left.
if idx % 3 == 0:
ax.set_ylabel('Number of Iterations')
ax.grid(True)
# Tighten the final layout.
fig.tight_layout(h_pad=0, w_pad=0, pad=0, rect=[0, 0, 1, 0.9])
fig.savefig(f"figs/alpha_beta_it_{d['m']}_{d['n']}.eps",
orientation='landscape', format='eps')
plt.close(fig)
print(f"Done looping over alpha and beta for m={d['m']}, n={d['n']}")
| 12,955
|
def log_http_request(f):
"""Decorator to enable logging on an HTTP request."""
level = get_log_level()
def new_f(*args, **kwargs):
request = args[1] # Second argument should be request.
object_type = 'Request'
object_id = time.time()
log_name = object_type + '.' + str(object_id)
setattr(request, 'LOG_ID', object_id)
logger = logging.getLogger(log_name)
logger.setLevel(level)
handler = LogModelHandler(object_type, object_id)
logger.addHandler(handler)
return f(*args, **kwargs)
new_f.func_name = f.func_name
return new_f
| 12,956
|
def look_at(vertices, eye, at=[0, 0, 0], up=[0, 1, 0]):
"""
"Look at" transformation of vertices.
"""
if (vertices.ndimension() != 3):
raise ValueError('vertices Tensor should have 3 dimensions')
place = vertices.place
# if list or tuple convert to numpy array
if isinstance(at, list) or isinstance(at, tuple):
at = paddle.to_tensor(at, dtype=paddle.float32, place=place)
# if numpy array convert to tensor
elif isinstance(at, np.ndarray):
at = paddle.to_tensor(at).to(place)
elif paddle.is_tensor(at):
at = at.to(place)
if isinstance(up, list) or isinstance(up, tuple):
up = paddle.to_tensor(up, dtype=paddle.float32, place=place)
elif isinstance(up, np.ndarray):
up = paddle.to_tensor(up).to(place)
elif paddle.is_tensor(up):
up = up.to(place)
if isinstance(eye, list) or isinstance(eye, tuple):
eye = paddle.to_tensor(eye, dtype=paddle.float32, place=place)
elif isinstance(eye, np.ndarray):
eye = paddle.to_tensor(eye).to(place)
elif paddle.is_tensor(eye):
eye = eye.to(place)
batch_size = vertices.shape[0]
if eye.ndimension() == 1:
eye = eye[None, :].tile([batch_size, 1])
if at.ndimension() == 1:
at = at[None, :].tile([batch_size, 1])
if up.ndimension() == 1:
up = up[None, :].tile([batch_size, 1])
# prevent paddle no grad error
at.stop_gradient = False
eye.stop_gradient = False
up.stop_gradient = False
# create new axes
# eps is chosen as 0.5 to match the chainer version
z_axis = F.normalize(at - eye, epsilon=1e-5)
x_axis = F.normalize(paddle.cross(up, z_axis), epsilon=1e-5)
y_axis = F.normalize(paddle.cross(z_axis, x_axis), epsilon=1e-5)
# create rotation matrix: [bs, 3, 3]
r = paddle.concat((x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), axis=1)
# apply
# [bs, nv, 3] -> [bs, nv, 3] -> [bs, nv, 3]
if vertices.shape != eye.shape:
eye = eye[:, None, :]
vertices = vertices - eye
vertices = paddle.matmul(vertices, r.swapaxes(1,2))
return vertices
| 12,957
|
def _count(expr, pat, flags=0):
"""
Count occurrences of pattern in each string of the sequence or scalar
:param expr: sequence or scalar
:param pat: valid regular expression
:param flags: re module flags, e.g. re.IGNORECASE
:return:
"""
return _string_op(expr, Count, output_type=types.int64,
_pat=pat, _flags=flags)
| 12,958
|
def simplex3_vertices():
"""
Returns the vertices of the standard 3-simplex. Each column is a vertex.
"""
v = np.array([
[1, 0, 0],
[-1/3, +np.sqrt(8)/3, 0],
[-1/3, -np.sqrt(2)/3, +np.sqrt(2/3)],
[-1/3, -np.sqrt(2)/3, -np.sqrt(2/3)],
])
return v.transpose()
| 12,959
|
def open_backed_up(fname, mode='r', suffix='~'):
"""A context manager for opening a file with a backup. If an exception is
raised during manipulating the file, the file is restored from the backup
before the exception is reraised.
Keyword arguments:
- fname: path towards the file to be opened
- mode: mode of opening the file (passed on to open()) (default: "r")
- suffix: the suffix to use for the backup file (default: "~")
"""
# If the file does not exist, create it.
if not os.path.exists(fname):
open(fname, 'w').close()
bak_fname = None
# If it does exist, create a backup.
else:
bak_fname = fname + suffix
copy2(fname, bak_fname)
try:
f = open(fname, mode)
yield f
except Exception as e:
if bak_fname is not None:
move(bak_fname, fname)
raise e
# Closing.
f.close()
| 12,960
|
def get_output_msg(status, num_logs):
""" Returnes the output message in accordance to the script status """
if status == EXECUTION_STATE_COMPLETED:
return "Retrieved successfully {} logs that triggered the alert".format(num_logs)
else:
return "Failed to retrieve logs. Please check the script's logs to see what went wrong..."
| 12,961
|
def kSEQK(age):
"""Age-dependent organ-specific absorbed dose rate per unit kerma rate,
normalized against the corresponding value for an adult
Parameters
----------
age: float or list
Age(s) when kSEQK is evaluated.
"""
k=[]
if (not isinstance(age,list)) and (not isinstance(age,np.ndarray)):
age=[age]
for a in age:
if a<20: #TODO is that /1017 actually correct?
k.append((0.0124*a**4-0.5364*a**3+7.4882*a**2-44.888*a+1209.8)/1000)
#k.append((0.0015*a**5 - 0.1214*a**4 + 3.473*a**3 - 40.28*a**2 + 136.3*a + 1233)/1017)
else:
k.append(1.0)
if len(k) == 1:
return k[0]
else:
return np.array(k)
| 12,962
|
def print_arbitrary_fluxes(fluxes: Fluxes, fid: TextIO, max_columns=5) -> None:
"""Print fluxes in FISPACT arbitrary flux format.
Args:
fluxes: what to print
fid: output stream
max_columns: max number of columns in a row
"""
print_fluxes(fluxes, fid, True, max_columns)
| 12,963
|
def insert_channel_links(message: str) -> str:
"""
Takes a message and replaces all of the channel references with
links to those channels in Slack formatting.
:param message: The message to modify
:return: A modified copy of the message
"""
message_with_links = message
matches = re.findall(r'#[a-z0-9\-_(){}\[\]\'\"/]{1,22}', message)
for match in matches:
channel_name = match[1:]
channel = bot.channels.get(channel_name)
if channel is not None:
channel_link_string = f"<#{channel.id}|{channel.name}>"
message_with_links = message_with_links.replace(match, channel_link_string)
return message_with_links
| 12,964
|
def roles_remove(user, role):
"""Remove user from role."""
user_obj = find_user(user)
if user_obj is None:
raise click.UsageError("User not found.")
role = _datastore._prepare_role_modify_args(role)
if role is None:
raise click.UsageError("Cannot find role.")
if _datastore.remove_role_from_user(user_obj, role):
click.secho(
f'Role "{role.name}" removed from user "{user}" successfully.',
fg="green",
)
else:
raise click.UsageError("Cannot remove role from user.")
| 12,965
|
def check_int_uuid(uuid):
"""Check that the int uuid i pass is valid."""
try:
converted = UUID(int=uuid, version=4)
except ValueError:
return False
return converted.int == uuid
| 12,966
|
def calc_E_E_C_hs_d_t_i(i, device, region, A_A, A_MR, A_OR, L_CS_d_t, L_CL_d_t):
"""暖冷房区画𝑖に設置された冷房設備機器の消費電力量(kWh/h)を計算する
Args:
i(int): 暖冷房区画の番号
device(dict): 暖冷房機器の仕様
region(int): 省エネルギー地域区分
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
L_CS_d_t(ndarray): 冷房区画の冷房顕熱負荷
L_CL_d_t(ndarray): 冷房区画の冷房潜熱負荷
Returns:
ndarray: 暖冷房区画𝑖に設置された冷房設備機器の消費電力量(kWh/h)
"""
if device['type'] == 'ルームエアコンディショナー':
# 仕様の取得
A_HCZ_i = calc_A_HCZ_i(i, A_A, A_MR, A_OR)
q_rtd_C = rac_spec.get_q_rtd_C(A_HCZ_i)
e_rtd_C = rac_spec.get_e_rtd_C(device['e_class'], q_rtd_C)
# 電力消費量の計算
E_E_C_d_t_i = rac.calc_E_E_C_d_t(
region=region,
q_rtd_C=q_rtd_C,
e_rtd_C=e_rtd_C,
dualcompressor=device['dualcompressor'],
L_CS_d_t=L_CS_d_t[i - 1],
L_CL_d_t=L_CL_d_t[i - 1]
)
else:
raise ValueError(device['type'])
print('{} E_E_C_d_t_{} = {} [kWh] (L_H_d_t_{} = {} [MJ])'.format(device['type'], i, np.sum(E_E_C_d_t_i), i,
np.sum(L_CS_d_t + L_CL_d_t)))
return E_E_C_d_t_i
| 12,967
|
def add_irregular_adjectives(ctx):
"""Add regular irregular adjectives to the database."""
session = ctx.session
gender = ENUM['gender']
case = ENUM['case']
number = ENUM['number']
with open(ctx.config['IRREGULAR_ADJECTIVES']) as f:
for adj in yaml.load_all(f):
stem = AdjectiveStem(name=adj['name'])
session.add(stem)
session.flush()
# Mark the stem as irregular
complete = adj['complete']
irreg = StemIrregularity(stem=stem, fully_described=complete)
session.add(irreg)
session.flush()
util.tick(stem.name)
for form in adj['forms']:
name = form['name']
gender_id = gender[form['gender']]
case_id = case[form['case']]
number_id = number[form['number']]
result = Adjective(stem=stem, name=name, gender_id=gender_id,
case_id=case_id, number_id=number_id)
session.add(result)
session.commit()
session.close()
| 12,968
|
def shift_time(x, dt):
"""Shift time axis to the left by dt. Used to account for pump & lamp delay"""
x -= dt
return x
| 12,969
|
def eps_xfer(request,client_slug=None,show_slug=None):
"""
Returns all the episodes for a show as json.
Used to synk public url's with the main conference site.
"""
client=get_object_or_404(Client,slug=client_slug)
show=get_object_or_404(Show,client=client,slug=show_slug)
# eps = Episode.objects.filter(show=show)
eps=eps_filters(request.GET).filter(show=show).order_by('start')
if "id" in request.GET:
eps = eps.filter( id=request.GET['id'] )
fields=['id',
'state',
'location', 'location_slug',
'sequence',
'name', 'slug', 'authors', 'description',
'start', 'duration',
'released', 'license', 'tags',
'conf_key', 'conf_url',
'host_url', 'public_url', 'rax_mp4_url',
'archive_url', 'archive_mp4_url',
'twitter_url',
'comment',
]
if request.user.is_authenticated():
fields.extend(['emails', 'edit_key',])
if "fields" in request.GET:
fields_whitelist = request.GET['fields'].split(',')
print(fields_whitelist)
fields = [f for f in fields if f in fields_whitelist]
"""
serializers.serialize("json", eps,
fields=fields, use_natural_foreign_keys=True,
stream=response)
"""
gold_list = ['location', 'location_slug']
ds=[]
for ep in eps:
d = {}
for f in fields:
if f == 'location':
d[f] = ep.location.name
elif f == 'location_slug':
d[f] = ep.location.name = ep.location.slug
else:
d[f]=getattr(ep,f)
# archive_mp4_url is really the url of the page
# make a mp4 url too
# the mp4 link is now:
# https://archive.org/download/pyohio_2019-Changing_Lives_through_Open_Source_Passion_and_Mentoring/Changing_Lives_through_Open_Source_Passion_and_Mentoring.mp4
if 'archive_mp4_url' in d:
d['archive_url'] = d['archive_mp4_url']
d['archive_mp4_url'] = ""
if 'start' in d:
d['start_at'] = (d['start']
- datetime.timedelta(minutes=5)
).strftime('%H:%M %d.%m.%Y')
ds.append(d)
response = HttpResponse(content_type="application/json")
json.dump( ds, response, cls=serializers.json.DjangoJSONEncoder )
return response
| 12,970
|
def format_types (md, module_name, module_obj):
"""walk the list of types in the module"""
md.append("---")
md.append("## [module types](#{})".format(module_name, "types"))
for name, obj in inspect.getmembers(module_obj):
if obj.__class__.__module__ == "typing":
if not str(obj).startswith("~"):
obj_md = document_type([module_name], name, obj)
md.extend(obj_md)
| 12,971
|
def check_massfracs(df):
"""
"""
elements = chem_elements.elements
elements = list(set(self.df.columns).intersection(set(elements)))
check = df[elements].sum(axis = 1)
diff = abs(check - 1.0)
if diff.max() >= 1.e-3:
raise error_handling.ProgramError("Large errors in sum(mass fractions) for stellar models")
# Allow scaling of mass fractions. Sometimes there are rounding errors in data tables, etc.
scale = 1 / check
df.loc[:,elements] = df[elements].mul(scale, axis = 'rows')
check2 = abs(df[elements].sum(axis = 1) - 1.0)
if check2.max() >= 1.e-12:
raise error_handling.ProgramError("Unable to scale mass fractions.")
| 12,972
|
def register(class_, option=None, get_funcs={}):
"""A decorator to register a function as the way to display an object of class_
"""
if option:
key = (class_, option)
else:
key = class_
def decorator(func):
class_function_mapping[key] = (func, get_funcs)
return func
return decorator
| 12,973
|
def startup(target: machine.Machine,
workload: str,
count: int = 5,
port: int = 0,
**kwargs):
"""Time the startup of some workload.
Args:
target: A machine object.
workload: The workload to run.
count: Number of containers to start.
port: The port to check for liveness, if provided.
**kwargs: Additional container options.
Returns:
The mean start-up time in seconds.
"""
# Load before timing.
image = target.pull(workload)
netcat = target.pull("netcat")
count = int(count)
port = int(port)
with helpers.Timer() as timer:
for _ in range(count):
if not port:
# Run the container synchronously.
target.container(image, **kwargs).run()
else:
# Run a detached container until httpd available.
with target.container(image, port=port, **kwargs).detach() as server:
(server_host, server_port) = server.address()
target.container(netcat).run(host=server_host, port=server_port)
return timer.elapsed() / float(count)
| 12,974
|
def schema_handler(request):
"""
Handle schema request from UI.
"""
logger.debug("schema_handler: enter")
req = request.GET.get('payload', '')
action = request.GET.get('action', '')
logger.debug('Received schema Request (%s)' % action)
if not request.user.is_authenticated():
logger.error('User must be logged in !!')
return HttpResponse(Response.error(action, 'Unauthorized'))
if action == 'get-schema':
return get_schema(request, req)
elif action == 'get-all-schema':
return get_schema(request, req, all=True)
elif action == 'download-schema':
return download_schema(request, req)
elif action == 'add-schema':
return add_schema(request, req)
| 12,975
|
def log2_fold_change(df, samp_grps):
"""
calculate fold change - fixed as samp_grps.mean_names[0] over samp_grps.mean_names[1],
where the mean names are sorted alphabetically. The log has already been taken,
so the L2FC is calculated as mean0 - mean1
:param df: expanded and/or filtered dataframe
:param samp_grps: SampleGroups() object
:return: dataframe with fold change column appended, with name as in samp_grps.fc_name
"""
mean1 = samp_grps.mean_names[0]
mean2 = samp_grps.mean_names[1]
df[samp_grps.fc_name] = df[mean1] - df[mean2]
return df
| 12,976
|
def infer_wheel_units(pos):
"""
Given an array of wheel positions, infer the rotary encoder resolution, encoding type and units
The encoding type varies across hardware (Bpod uses X1 while FPGA usually extracted as X4), and
older data were extracted in linear cm rather than radians.
:param pos: a 1D array of extracted wheel positions
:return units: the position units, assumed to be either 'rad' or 'cm'
:return resolution: the number of decoded fronts per 360 degree rotation
:return encoding: one of {'X1', 'X2', 'X4'}
"""
if len(pos.shape) > 1: # Ensure 1D array of positions
pos = pos.flatten()
# Check the values and units of wheel position
res = np.array([wh.ENC_RES, wh.ENC_RES / 2, wh.ENC_RES / 4])
# min change in rad and cm for each decoding type
# [rad_X4, rad_X2, rad_X1, cm_X4, cm_X2, cm_X1]
min_change = np.concatenate([2 * np.pi / res, wh.WHEEL_DIAMETER * np.pi / res])
pos_diff = np.median(np.abs(np.ediff1d(pos)))
# find min change closest to min pos_diff
idx = np.argmin(np.abs(min_change - pos_diff))
if idx < len(res):
# Assume values are in radians
units = 'rad'
encoding = idx
else:
units = 'cm'
encoding = idx - len(res)
enc_names = {0: 'X4', 1: 'X2', 2: 'X1'}
return units, int(res[encoding]), enc_names[int(encoding)]
| 12,977
|
def extract_tform(landmarks, plane_name):
"""Compute the transformation that maps the reference xy-plane at origin to the GT standard plane.
Args:
landmarks: [landmark_count, 3] where landmark_count=16
plane_name: 'tv' or 'tc'
Returns:
trans_vec: translation vector [3]
quat: quaternions [4]
mat: 4x4 transformation matrix [4, 4]
"""
if plane_name == 'tv':
# Landmarks lying on the TV plane
landmarks_plane = np.vstack((landmarks[1:8], landmarks[12:14]))
# Compute transformation
z_vec, p_plane = fit_plane(landmarks_plane)
landmarks_plane_proj = project_on_plane(landmarks_plane, z_vec, p_plane)
landmarks_line = landmarks_plane_proj[[0, 1, 2, 7, 8], :]
x_vec, p_line = fit_line(landmarks_line)
y_vec = geometry.unit_vector(np.cross(z_vec, x_vec))
# 4x4 transformation matrix
mat = np.eye(4)
mat[:3, :3] = np.vstack((x_vec, y_vec, z_vec)).transpose()
mat[:3, 3] = landmarks_plane_proj[0]
# Quaternions and translation vector
quat = geometry.quaternion_from_matrix(mat[:3, :3])
trans_vec = mat[:3, 3]
elif plane_name == 'tc':
# Landmarks lying on the TC plane
cr = landmarks[10]
cl = landmarks[11]
csp = landmarks[12]
# Compute transformation
csp_cl = cl - csp
csp_cr = cr - csp
z_vec = np.cross(csp_cl, csp_cr)
z_vec = geometry.unit_vector(z_vec)
cr_cl_mid = (cr + cl) / 2.0
x_vec = geometry.unit_vector(cr_cl_mid - csp)
y_vec = geometry.unit_vector(np.cross(z_vec, x_vec))
# 4x4 transformation matrix
mat = np.eye(4)
mat[:3, :3] = np.vstack((x_vec, y_vec, z_vec)).transpose()
mat[:3, 3] = (cr_cl_mid + csp) / 2.0
# Quaternions and translation vector
quat = geometry.quaternion_from_matrix(mat[:3, :3])
trans_vec = mat[:3, 3]
else:
raise ValueError('Invalid plane name.')
return trans_vec, quat, mat
| 12,978
|
def load_events(fhandle: TextIO) -> annotations.Events:
"""Load an URBAN-SED sound events annotation file
Args:
fhandle (str or file-like): File-like object or path to the sound events annotation file
Raises:
IOError: if txt_path doesn't exist
Returns:
Events: sound events annotation data
"""
times = []
labels = []
confidence = []
reader = csv.reader(fhandle, delimiter="\t")
for line in reader:
times.append([float(line[0]), float(line[1])])
labels.append(line[2])
confidence.append(1.0)
events_data = annotations.Events(
np.array(times), "seconds", labels, "open", np.array(confidence)
)
return events_data
| 12,979
|
def loadandcleanRAPIDexport(rapidsubproductsexport):
"""
:param rapidsubproductsexport: an Excel file name with ".xlsx" extention
:return:
"""
exportfile = os.path.realpath("gannt_data/"+ rapidsubproductsexport)
df = pd.read_excel(exportfile,na_values=["-"])
df = convertFYQfieldstodates(df)
df = merge_product_subproduct(df)
df = splitnamefields(df)
return df
| 12,980
|
def set_nan(df, chrom_bed_file):
"""This function will take in a dataframe and chromosome length bed file
and will replace 0's with np.nan according to each chromosome length.
This will fix any issues when calculating Z-scores"""
# Build dictionary of key=chromosome and value=chromosome_length
chrom_length_dict = {}
for v in chrom_bed_file.itertuples():
chrom_length_dict[v[1]] = v[2]
continue
# Iterate through each column
for chrom in df.columns.to_list():
current_chrom_length = chrom_length_dict[str(chrom)]
# Iterate through each value of a column in reverse
for index, value in zip(
reversed(df.index.to_list()),
reversed(df[chrom].to_list())
):
# Check if index is greater than length of chromosome
if index > current_chrom_length:
df.at[index, chrom] = np.nan
else:
break
return df
| 12,981
|
def configure_smoothing(new_d,smoothing_scans):
"""
# <batchstep method="net.sf.mzmine.modules.peaklistmethods.peakpicking.smoothing.SmoothingModule">
# <parameter name="Peak lists" type="BATCH_LAST_PEAKLISTS"/>
# <parameter name="Filename suffix">smoothed</parameter>
# <parameter name="Filter width">9</parameter>
# <parameter name="Remove original peak list">false</parameter>
# </batchstep>
"""
idx = [i for i,d in enumerate(new_d['batch']['batchstep']) if 'SmoothingModule' in d['@method']][0]
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Filter width' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%.3f'%(smoothing_scans)
return new_d
| 12,982
|
def contact_us():
""" Contact Us Route
Route to lead to the contact page
Args:
None
Returns:
rendered template for contact_us.html
"""
return render_template('contact_us.html', title='CONP | Contact Us', user=current_user)
| 12,983
|
def costFunc1(x, module, output, col, row, bbox, img, prfObj):
"""Debugging function.
Does the same as costFunc, but col and row are constants,
and only the brightness of the prf can be changed.
"""
model = prfObj.getPrfForBbox(module, output, col, row, bbox)
model *= x[0]
cost = img-model
cost = np.sum(cost**2)
return cost
| 12,984
|
def query_field(boresight, r1=None, r2=None, observatory='apo',
mag_range=None, mag_column=None, database_params=None):
"""Selects Gaia DR2 stars for a field, from the database.
Parameters
----------
boresight : tuple
A tuple with the right ascension and declination of the boresight,
in degrees.
r1,r2 : float
The internal and external radii along which the GFAs are located, in
degrees.
observatory : str
The observatory, used to load the default configuration for the GFAs.
mag_range : tuple
The range of magnitudes used to select stars.
mag_column : str
The name of the magnitude column to query.
database_params : dict
A dictionary of database parameters to create the connection. Can
include ``user``, ``host``, ``port``, and ``dbname``.
Returns
-------
`~pandas.Dataframe`
A dataframe with the selected stars.
"""
obs_data = config[observatory]
r1 = r1 or obs_data['r1']
r2 = r2 or obs_data['r2']
mag_range = mag_range or config['mag_range']
mag_column = mag_column or config['mag_column']
query = ('WITH x AS MATERIALIZED (SELECT source_id, ra, dec, '
'{mag_column}, pmra, pmdec '
'FROM gaia_dr2_source WHERE '
'q3c_radial_query(ra, dec, {ra}, {dec}, {r2}) AND '
'NOT q3c_radial_query(ra, dec, {ra}, {dec}, {r1})) '
'SELECT * FROM x WHERE {mag_column} > {g_min} AND '
'{mag_column} < {g_max};')
query = query.format(ra=boresight[0], dec=boresight[1], r1=r1, r2=r2,
g_min=mag_range[0], g_max=mag_range[1],
mag_column=mag_column)
if database_params is None:
database_params = config['database']
conn_str = ''
for key in database_params:
conn_str += f'{key}={database_params[key]} '
connection = psycopg2.connect(conn_str)
data = pandas.read_sql(query, connection)
connection.close()
return data
| 12,985
|
def test_deploy_flow_only_exclude_input_schema_if_none(
fc, mocked_responses, input_schema, expected
):
"""Verify the *input_schema* is not excluded even if it's false-y."""
mocked_responses.add("POST", "https://flows.api.globus.org/flows")
fc.deploy_flow(
# Included arguments
flow_definition=VALID_FLOW_DEFINITION,
title="--title--",
input_schema=input_schema,
# Excluded arguments
subtitle="",
description=None,
# Other arguments
validate_definition=False,
validate_schema=False,
dry_run=False,
)
data = json.loads(mocked_responses.calls[0].request.body)
assert "subtitle" not in data
assert "description" not in data
assert ("input_schema" in data) is expected
| 12,986
|
def get_throttling_equilibria(simulation_config, input_params, priority_queue=True, dev_team_factor=1.0):
"""
Returns the equilibrium profiles for throttling configuration under analysis.
:param simulation_config:
:param input_params:
:return:
"""
desc_inf003 = "THROTTLING_INF003"
process_configuration_inf003 = dict(simulation_config)
process_configuration_inf003["THROTTLING_ENABLED"] = True
process_configuration_inf003["GATEKEEPER_CONFIG"] = None
process_configuration_inf003["INFLATION_FACTOR"] = 0.03
process_configuration_inf003["SUCCESS_RATE"] = 0.95
if priority_queue and dev_team_factor == 0.5:
filename_inf003 = "INF3.0_PRIQUEUE_True_DEVFACTOR_0.5_equilibrium_results.csv"
filename_inf010 = "INF10.0_PRIQUEUE_True_DEVFACTOR_0.5_equilibrium_results.csv"
filename_inf020 = "INF20.0_PRIQUEUE_True_DEVFACTOR_0.5_equilibrium_results.csv"
elif priority_queue and dev_team_factor == 1.0:
filename_inf003 = "INF3.0_PRIQUEUE_True_DEVFACTOR_1.0_equilibrium_results.csv"
filename_inf010 = "INF10.0_PRIQUEUE_True_DEVFACTOR_1.0_equilibrium_results.csv"
filename_inf020 = "INF20.0_PRIQUEUE_True_DEVFACTOR_1.0_equilibrium_results.csv"
elif not priority_queue and dev_team_factor == 0.5:
filename_inf003 = "INF3.0_PRIQUEUE_False_DEVFACTOR_0.5_equilibrium_results.csv"
filename_inf010 = "INF10.0_PRIQUEUE_False_DEVFACTOR_0.5_equilibrium_results.csv"
filename_inf020 = "INF20.0_PRIQUEUE_False_DEVFACTOR_0.5_equilibrium_results.csv"
elif not priority_queue and dev_team_factor == 1.0:
filename_inf003 = "INF3.0_PRIQUEUE_False_DEVFACTOR_1.0_equilibrium_results.csv"
filename_inf010 = "INF10.0_PRIQUEUE_False_DEVFACTOR_1.0_equilibrium_results.csv"
filename_inf020 = "INF20.0_PRIQUEUE_False_DEVFACTOR_1.0_equilibrium_results.csv"
equilibrium_profiles_inf003 = get_profiles_from_file("csv/" + filename_inf003, scenario_desc=desc_inf003,
input_params=input_params)
desc_inf010 = "THROTTLING_INF010"
process_configuration_inf010 = dict(process_configuration_inf003)
process_configuration_inf010["INFLATION_FACTOR"] = 0.10
equilibrium_profiles_inf010 = get_profiles_from_file("csv/" + filename_inf010, scenario_desc=desc_inf010,
input_params=input_params)
desc_inf020 = "THROTTLING_INF020"
process_configuration_inf020 = dict(process_configuration_inf003)
process_configuration_inf020["INFLATION_FACTOR"] = 0.20
equilibrium_profiles_inf020 = get_profiles_from_file("csv/" + filename_inf020, scenario_desc=desc_inf020,
input_params=input_params)
return [{"desc": desc_inf003,
"simulation_configuration": process_configuration_inf003,
"equilibrium_profiles": equilibrium_profiles_inf003},
{"desc": desc_inf010,
"simulation_configuration": process_configuration_inf010,
"equilibrium_profiles": equilibrium_profiles_inf010},
{"desc": desc_inf020,
"simulation_configuration": process_configuration_inf020,
"equilibrium_profiles": equilibrium_profiles_inf020}]
| 12,987
|
def checkRoot():
"""Check if script was run as root"""
if not os.geteuid() == 0:
sys.exit("You must be root to run this command, please use sudo and try again.")
| 12,988
|
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
| 12,989
|
def test_list_byte_length_nistxml_sv_iv_list_byte_length_1_2(mode, save_output, output_format):
"""
Type list/byte is restricted by facet length with value 5.
"""
assert_bindings(
schema="nistData/list/byte/Schema+Instance/NISTSchema-SV-IV-list-byte-length-1.xsd",
instance="nistData/list/byte/Schema+Instance/NISTXML-SV-IV-list-byte-length-1-2.xml",
class_name="NistschemaSvIvListByteLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 12,990
|
def clean_key(func):
"""Provides a clean, readable key from the funct name and module path.
"""
module = func.__module__.replace("formfactoryapp.", "")
return "%s.%s" % (module, func.__name__)
| 12,991
|
def _autolabel(rects, ax, **kwargs):
"""
Attach a text label above each bar in *rects*, displaying its height.
Args:
rects: heights of the bar
ax: Matplotlib Axes object where labels will be generated.
"""
fontsize = kwargs.get('fontsize', None)
number_decimals = kwargs.get('number_decimals', 2)
for rect in rects:
height = rect.get_height()
height_str = util.format_float_lower_than_1(
round(height, number_decimals)
)
ax.annotate(height_str, # '{}'.format(height)
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom',
fontsize=fontsize)
| 12,992
|
async def async_setup_entry(
hass: HomeAssistantType,
config_entry: ConfigEntry,
async_add_entities: Callable[[list[entity.Entity], bool], None],
):
"""Add hantest sensor platform from a config_entry."""
integration: AmsHanIntegration = hass.data[DOMAIN][config_entry.entry_id]
processor: MeterMeasureProcessor = MeterMeasureProcessor(
hass, config_entry, async_add_entities, integration.measure_queue
)
# start processing loop task
integration.add_task(hass.loop.create_task(processor.async_process_measures_loop()))
| 12,993
|
def rotY(M, alpha):
"""Rotates polygon M around Y axis by alpha degrees.
M needs to be a Numpy Array with shape (4,N) with N>=1"""
T = np.eye(4)
alpha_radians = np.radians(alpha)
sin = np.sin(alpha_radians)
cos = np.cos(alpha_radians)
T[0,0] = cos
T[2,2] = cos
T[0,2] = sin
T[2,0] = -sin
return np.dot(T,M)
| 12,994
|
def is_image_file(filename):
"""
:param filename:
:return:
"""
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
| 12,995
|
def main(src, *, dest=None, exporter=None, filtering=False, pagebreaks=False, save=False, debug=False):
"""
Runs Otter Export
Args:
src (``str``): path to source notebook
dest (``Optional[str]``): path at which to write PDF
exporter (``Optional[str]``): exporter name
filtering (``bool``): whether to filter cells using HTML comments
pagebreaks (``bool``): whether to pagebreak between filtered regions; ignored if ``filtering``
is ``False``
save (``bool``): whether to save any intermediate files (e.g. ``.tex``, ``.html``)
debug (``bool``): whether to run in debug mode (print full error messages)
**kwargs: ignored kwargs (a remnant of how the argument parser is built)
"""
export_notebook(
src,
dest = dest,
exporter_type = exporter,
filtering = filtering,
pagebreaks = pagebreaks,
save_tex = save,
save_html = save,
debug = debug
)
| 12,996
|
def invert_comp_specifier(comp_specifier):
""" return the opposite (logical negation) of @p comp_specifier """
inverse_map = {
Comparison.Equal: Comparison.NotEqual,
Comparison.Less: Comparison.GreaterOrEqual,
Comparison.LessOrEqual: Comparison.Greater,
Comparison.NotEqual: Comparison.Equal,
Comparison.Greater: Comparison.LessOrEqual,
Comparison.GreaterOrEqual: Comparison.Less,
}
return inverse_map[comp_specifier]
| 12,997
|
def latlong2utm(point):
"""
This function converts a point from lat long to utm
Input : point : (lat,long)
Output : utm point : (x,y,z, n)
"""
import utm
return utm.from_latlon(point[0],point[1])
| 12,998
|
def multiply(a,b):
"""
multiply values
Args:
a ([float/int]): any value
b ([float/int]): any value
"""
return a*b
| 12,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.