content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def _safe_isnan(x):
"""Wrapper for isnan() so it won't fail on non-numeric values."""
try:
return isnan(x)
except TypeError:
return False
| 5,341,200
|
def get_system( context, system_id = None ):
"""
Finds a system matching the given identifier and returns its resource
Args:
context: The Redfish client object with an open session
system_id: The system to locate; if None, perform on the only system
Returns:
The system resource
"""
system_uri_pattern = "/redfish/v1/Systems/{}"
avail_systems = None
# If given an identifier, get the system directly
if system_id is not None:
system = context.get( system_uri_pattern.format( system_id ) )
# No identifier given; see if there's exactly one member
else:
avail_systems = get_system_ids( context )
if len( avail_systems ) == 1:
system = context.get( system_uri_pattern.format( avail_systems[0] ) )
else:
raise RedfishSystemNotFoundError( "Service does not contain exactly one system; a target system needs to be specified: {}".format( ", ".join( avail_systems ) ) )
# Check the response and return the system if the response is good
try:
verify_response( system )
except:
if avail_systems is None:
avail_systems = get_system_ids( context )
raise RedfishSystemNotFoundError( "Service does not contain a system called {}; valid systems: {}".format( system_id, ", ".join( avail_systems ) ) ) from None
return system
| 5,341,201
|
def train_unloading_station_pop(station, train, escalator):
"""
calculate the population of the platform while people are disembarking
from the train. Each run reprisents the passing of one second in time.
"""
"""
People exit the train, but first we need to calculate the train boarding,
and exiting rate. if there are fewer spaces on the platform than the maximum
boarding rate of the trains, then the rate at which people can exit the train
in one second is equal to the number of empty spaces on the platform.
"""
if (station.capacity - station.pop) < (train.board_rate * train.cars) and \
(station.capacity - station.pop) > 0 and \
train.travelers_exiting >= (train.board_rate * train.cars):
exit_train_rate = (station.capacity - station.pop)
elif (station.capacity - station.pop) <= 0:
print("station is at capapcity!")
exit_train_rate = 0
elif train.travelers_exiting <= (train.board_rate * train.cars) and \
(station.capacity - station.pop) >= train.travelers_exiting:
exit_train_rate = train.travelers_exiting
else:
exit_train_rate = (train.board_rate * train.cars)
print("exit train rate:", exit_train_rate, "\n")
if train.travelers_exiting - exit_train_rate >= 0:
train.travelers_exiting -= exit_train_rate
train.pop -= exit_train_rate
else:
train.pop = train.pop - train.travelers_exiting
train.travelers_exiting = 0
station.travelers_arriving += exit_train_rate - \
int(station.escalators_exiting * escalator.rate)
station.travelers_departing += int(station.escalators_entering * escalator.rate)
| 5,341,202
|
def assert_no_cycle(
g: networkx.DiGraph
) -> None:
"""If the graph has cycles, throws AssertionError.
This can be used to make sure that a refinements graph is a DAG.
Parameters
----------
g :
A refinements graph.
"""
logger.debug('Looking for cycles in belief graph')
try:
cyc = networkx.algorithms.cycles.find_cycle(g)
except networkx.exception.NetworkXNoCycle:
return
msg = 'Cycle found in hierarchy graph: %s' % cyc
assert False, msg
| 5,341,203
|
def cartesian_to_polar(x, y, xorigin=0.0, yorigin=0.0):
"""
Helper function to convert Cartesian coordinates to polar coordinates
(centred at a defined origin). In the polar coordinates, theta is an
angle measured clockwise from the Y axis.
:Parameters:
x: float
X coordinate of point
y: float
Y coordinate of point
xorigin: float (optional)
X coordinate of origin (if not zero)
yorigin: float (optional)
Y coordinate of origin (if not zero)
:Returns:
(r, theta): tuple of 2 floats
Polar coordinates of point. NOTE: theta is in radians.
"""
xdiff = float(x) - float(xorigin)
ydiff = float(y) - float(yorigin)
distsq = (xdiff * xdiff) + (ydiff * ydiff)
r = math.sqrt(distsq)
theta = PIBY2 - math.atan2(ydiff, xdiff)
# Adjust theta to be in the range 0 - 2*PI
while theta < 0.0:
theta += PI2
while theta > PI2:
theta -= PI2
return (r, theta)
| 5,341,204
|
def keypoint_dict_to_struct(keypoint_dict):
""" parse a keypoint dictionary form into a keypoint info structure """
keypoint_info_struct = KeypointInfo()
if 'keypoints' in keypoint_dict:
for k in keypoint_dict['keypoints']:
keypoint = keypoint_info_struct.keypoints.add()
keypoint.xloc = k[0]
keypoint.yloc = k[1]
if 'jacobians' in keypoint_dict:
for j in keypoint_dict['jacobians']:
jacobian = keypoint_info_struct.jacobians.add()
jacobian.d11 = j[0][0]
jacobian.d12 = j[0][1]
jacobian.d21 = j[1][0]
jacobian.d22 = j[1][1]
keypoint_info_struct.pts = keypoint_dict['pts']
keypoint_info_struct.index = keypoint_dict['index']
return keypoint_info_struct
| 5,341,205
|
def softmax_regression(img):
"""
定义softmax分类器:
只通过一层简单的以softmax为激活函数的全连接层,可以得到分类的结果
Args:
img -- 输入的原始图像数据
Return:
predict -- 分类的结果
"""
predict = paddle.layer.fc(
input=img, size=10, act=paddle.activation.Softmax())
return predict
| 5,341,206
|
def BlockdevWipe(disk, offset, size):
"""Wipes a block device.
@type disk: L{objects.Disk}
@param disk: the disk object we want to wipe
@type offset: int
@param offset: The offset in MiB in the file
@type size: int
@param size: The size in MiB to write
"""
try:
rdev = _RecursiveFindBD(disk)
except errors.BlockDeviceError:
rdev = None
if not rdev:
_Fail("Cannot wipe device %s: device not found", disk.iv_name)
if offset < 0:
_Fail("Negative offset")
if size < 0:
_Fail("Negative size")
if offset > rdev.size:
_Fail("Wipe offset is bigger than device size")
if (offset + size) > rdev.size:
_Fail("Wipe offset and size are bigger than device size")
_DumpDevice("/dev/zero", rdev.dev_path, offset, size, True)
| 5,341,207
|
def incremental_quality(
wavelength: ndarray,
flux: ndarray,
*,
mask: Optional[Union[Quantity, ndarray]] = None,
percent: Union[int, float] = 10,
**kwargs,
) -> Tuple[ndarray, ndarray]:
"""Determine spectral quality in incremental sections.
Parameters
----------
wavelength: array-like or Quantity
Wavelength of spectrum.
flux: array-like or Quantity
Flux of spectrum.
mask: array-like, Quantity or None
Pixel weight mask.
percent: Union[int, float] (default=10)
The percent size of chunk around each wavelength position.
kwargs:
Extra arguments passed onto quality() (including mask).
Returns
-------
x: ndarray
Central wavelength values of each section.
q: ndarray
Spectral quality for each section.
"""
positions = log_chunks(wavelength, percent)
qualities = []
for pos1, pos2 in zip(positions[:-1], positions[1:]):
pos_mask = (wavelength >= pos1) & (wavelength < pos2)
if np.sum(pos_mask) <= 1:
# 1 or less points in this section
continue
x = wavelength[pos_mask]
y = flux[pos_mask]
if mask is not None:
z = mask[pos_mask]
else:
z = mask # None
try:
q = quality(x, y, mask=z, **kwargs)
except:
q = np.nan
qualities.append([np.nanmean(x), q])
x, q = np.asarray(qualities).T
return x, q
| 5,341,208
|
def plot_ci_forest(n, ci_term, estimator, features, targets):
"""confidence interval plot for forest estimator"""
"""
Parameters
----------
n: int
The n timestamp of prediction going to be plotted
ci_term:
"""
predictions = []
for est in estimator.estimators_:
predictions.append(est.predict(features.iloc[n,:].to_numpy().reshape(1,-1)))
predictions = np.array(predictions)
prediction_list = predictions.reshape(predictions.shape[0], predictions.shape[2])
fig = plt.figure(figsize=(16,7))
plt.plot(np.quantile(prediction_list, 0.5, axis = 0), 'gx-', label='Prediction')
plt.plot(np.quantile(prediction_list, ci_term, axis = 0), 'g--', label='{} % lower bond'.format(ci_term*100))
plt.plot(np.quantile(prediction_list, 1-ci_term, axis = 0), 'g--', label='{} % upper bond'.format(100-ci_term*100))
plt.plot(targets.iloc[n,:].to_numpy(), 'ro', label='Ground truth')
plt.xlabel('hours', **font)
plt.ylabel('KWh', **font)
plt.legend(loc='upper left', fontsize = 15)
plt.show()
| 5,341,209
|
def get_platform():
"""Gets the users operating system.
Returns:
An `int` representing the users operating system.
0: Windows x86 (32 bit)
1: Windows x64 (64 bit)
2: Mac OS
3: Linux
If the operating system is unknown, -1 will be returned.
"""
return defaultdict(lambda: -1, {
"Windows": 1 if platform.machine().endswith("64") else 0,
"Darwin": 2,
"Linux:": 3,
})[platform.system()]
| 5,341,210
|
def count_number_of_digits_with_unique_segment_numbers(displays: str) -> int:
"""Counts the number of 1, 4, 7 or 8s in the displays."""
displays = [Display.from_string(d) for d in displays.splitlines()]
num_digits = 0
for display in displays:
num_digits += sum(len(c) in (2, 3, 4, 7) for c in display.output_value)
return num_digits
| 5,341,211
|
def svo_filter_url(telescope, photofilter, zeropoint='AB'):
"""
Returns the URL where the filter transmission curve is hiding.
Requires arguments:
telescope: SVO-like name of Telescope/Source of photometric system.
photofilter: SVO-like name of photometric filter.
Optional:
zeropoint: String. Either 'AB', 'Vega', or 'ST'.
Output:
url: URL of the relevant file.
"""
url = 'http://svo2.cab.inta-csic.es/theory/fps3/fps.php?' + \
'PhotCalID=' + telescope + '/' + photofilter + '/' + zeropoint
return url
| 5,341,212
|
def calculate_sampling_rate(timestamps):
"""
Parameters
----------
x : array_like of timestamps, float (unit second)
Returns
-------
float : sampling rate
"""
if isinstance(timestamps[0], float):
timestamps_second = timestamps
else:
try:
v_parse_datetime = np.vectorize(parse_datetime)
timestamps = v_parse_datetime(timestamps)
timestamps_second = []
timestamps_second.append(0)
for i in range(1, len(timestamps)):
timestamps_second.append((timestamps[i] - timestamps[
i - 1]).total_seconds())
except Exception:
sampling_rate = None
pass
steps = np.diff(timestamps_second)
sampling_rate = round(1 / np.min(steps[steps != 0]))
return sampling_rate
| 5,341,213
|
async def main(inventory: List[str], macaddr: MacAddress):
"""
Given an inventory of devices and the MAC address to locate, try to find
the location of the end-host. As a result of checking the network, the
User will see an output of either "found" identifying the network device
and port, or "Not found".
Parameters
----------
inventory: List[str]
The list of network devices to check.
macaddr: MacAddress
The end-host MAC addresss to locate
"""
with Progress() as progressbar:
found = await _search_network(
inventory, macaddr=macaddr, progressbar=progressbar
)
if not found:
print("Not found.")
return
print(f"Found {macaddr} on device {found.device}, interface {found.interface}")
| 5,341,214
|
def compute_ss0(y, folds):
"""
Compute the sum of squares based on null models (i.e., always predict the average `y` of the training data).
Parameters
----------
y : ndarray
folds : list of ndarray
Each element is an ndarray of integers, which are the indices of members of the fold.
Returns
-------
ss0 : float
The sum of squares based on null models.
"""
yhat0 = np.zeros_like(y)
for test_idx in folds:
m = np.ones_like(y, dtype=bool)
m[test_idx] = False
yhat0[test_idx] = y[m].mean()
ss0 = np.sum((y - yhat0)**2)
return ss0
| 5,341,215
|
def read_logging_data(folder_path):
"""
Description:\n
This function reads all csv files in the folder_path folder into one dataframe. The files should be in csv format and the name of each file should start with 'yrt' and contain '_food_data' in the middle. For example, yrt1999_food_data123.csv is a valid file name that would be read into the dataframe if it exists in the folder_path folder.
Input:\n
- folder_path(string) : path to the folder that contain the data.
Output:\n
- a dataframe contains all the csv files in the folder given.
"""
data_lst = glob.glob('{}/yrt*_food_data*.csv'.format(folder_path))
dfs = []
for x in data_lst:
dfs.append(pd.read_csv(x))
df = pd.concat(dfs)
return df.reset_index(drop=True)
| 5,341,216
|
def get_user_strlist_options(*args):
"""
get_user_strlist_options(out)
"""
return _ida_kernwin.get_user_strlist_options(*args)
| 5,341,217
|
def SendKey(key: int, waitTime: float = OPERATION_WAIT_TIME) -> None:
"""
Simulate typing a key.
key: int, a value in class `Keys`.
"""
keybd_event(key, 0, KeyboardEventFlag.KeyDown | KeyboardEventFlag.ExtendedKey, 0)
keybd_event(key, 0, KeyboardEventFlag.KeyUp | KeyboardEventFlag.ExtendedKey, 0)
time.sleep(waitTime)
| 5,341,218
|
def coord_sampler(img, coords):
""" Sample img batch at integer (x,y) coords
img: [B,C,H,W], coords: [B,2,N]
returns: [B,C,N] points
"""
B,C,H,W = img.shape
N = coords.shape[2]
batch_ref = torch.meshgrid(torch.arange(B), torch.arange(N))[0]
out = img[batch_ref, :, coords[:,1,:], coords[:,0,:]]
return out.permute(0,2,1)
| 5,341,219
|
def directory(name, *args):
"""
Returns the directory with the specified name, as an absolute path.
:param name: The name of the directory. One of "textures" or "models".
:args Elements that will be appended to the named directory.
:return: The full path of the named directory.
"""
top = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
dirs = {
'bin': 'bin',
'config': 'bin',
'top': '',
'textures': 'core/assets/minecraft/textures',
'blockstates': 'core/assets/minecraft/blockstates',
'models': "core/assets/minecraft/models",
'minecraft': "core/assets/minecraft",
'site': 'site',
'core': 'core',
}
is_defaults = name == 'defaults'
if is_defaults:
name = args[0]
args = args[1:]
path = dirs[name]
if is_defaults:
path = path.replace('core/', 'default_resourcepack/')
path = os.path.join(top, path)
for arg in args:
for part in arg.split('/'):
path = os.path.join(path, part)
return path
| 5,341,220
|
def adapt_ListOfX(adapt_X):
"""This will create a multi-column adapter for a particular type.
Note that the type must itself need to be in array form. Therefore
this function serves to seaprate out individual lists into multiple
big lists.
E.g. if the X adapter produces array (a,b,c)
then this adapter will take an list of Xs and produce a master array:
((a1,a2,a3),(b1,b2,b3),(c1,c2,c3))
Takes as its argument the adapter for the type which must produce an
SQL array string.
Note that you should NOT put the AsIs in the adapt_X function.
The need for this function arises from the fact that we may want to
actually handle list-creating types differently if they themselves
are in a list, as in the example above, we cannot simply adopt a
recursive strategy.
Note that master_list is the list representing the array. Each element
in the list will represent a subarray (column). If there is only one
subarray following processing then the outer {} are stripped to give a
1 dimensional array.
"""
def adapter_function(param):
if not AsIs:
raise ImportError('There was a problem importing psycopg2.')
param = param.value
result_list = []
for element in param: # Where param will be a list of X's
result_list.append(adapt_X(element))
test_element = result_list[0]
num_items = len(test_element.split(","))
master_list = []
for x in range(num_items):
master_list.append("")
for element in result_list:
element = element.strip("{").strip("}")
element = element.split(",")
for x in range(num_items):
master_list[x] = master_list[x] + element[x] + ","
if num_items > 1:
master_sql_string = "{"
else:
master_sql_string = ""
for x in range(num_items):
# Remove trailing comma
master_list[x] = master_list[x].strip(",")
master_list[x] = "{" + master_list[x] + "}"
master_sql_string = master_sql_string + master_list[x] + ","
master_sql_string = master_sql_string.strip(",")
if num_items > 1:
master_sql_string = master_sql_string + "}"
return AsIs("'{}'".format(master_sql_string))
return adapter_function
| 5,341,221
|
def add_sighting(pokemon_name):
"""Add new sighting to a user's Pokédex."""
user_id = session.get('user_id')
user = User.query.get_or_404(user_id)
pokemon = Pokemon.query.filter_by(name=pokemon_name).first_or_404()
# 16% chance logging a sighting of a Pokémon with ditto_chance = True will
# instead be logged as a Ditto
# Through manual spamming I tested this, and it does work!
if pokemon.chance_of_ditto():
pokemon = Pokemon.query.filter_by(name='Ditto').first_or_404()
pokemon_id = pokemon.pokemon_id
user_sighting = Sighting.query.filter((Sighting.user_id == user_id) & (Sighting.pokemon_id == pokemon_id)).one_or_none()
# Ensuring unique Pokémon only in a user's sightings
if user_sighting is None:
new_sighting = Sighting(user_id=user_id,
pokemon_id=pokemon_id)
new_sighting.save()
flash('Professor Willow: Wonderful! Your work is impeccable. Keep up the good work!')
return redirect(f'/user/{user_id}')
else:
flash('Professor Willow: You\'ve already seen this Pokémon!')
return redirect(f'/user/{user_id}')
| 5,341,222
|
def edit_colors_names_group(colors, names):
"""
idx map to colors and its names. names index is 1 increment up
based on new indexes (only 0 - 7)
"""
# wall
colors[0] = np.array([120, 120, 120], dtype = np.uint8)
names[1] = 'wall'
# floor
colors[1] = np.array([80, 50, 50], dtype = np.uint8)
names[2] = 'floor'
# plant
colors[2] = np.array([4, 200, 3], dtype = np.uint8)
names[3] = 'plant'
# ceiling
colors[3] = np.array([120, 120, 80], dtype = np.uint8)
names[4] = 'ceiling'
# furniture
colors[4] = np.array([204, 5, 255], dtype = np.uint8)
names[5] = 'furniture'
# person
colors[5] = np.array([150, 5, 61], dtype = np.uint8)
names[6] = 'person'
# door
colors[6] = np.array([8, 255, 51], dtype = np.uint8)
names[7] = 'door'
# objects
colors[7] = np.array([6, 230, 230], dtype = np.uint8)
names[8] = 'objects'
return colors, names
| 5,341,223
|
def download_last_year():
"""下载当年国债利率数据至本地"""
content_type = 'application/x-msdownload'
url = 'http://yield.chinabond.com.cn/cbweb-mn/yield_main?locale=zh_CN#'
driver = make_headless_browser_with_auto_save_path(
DATA_DIR, content_type)
driver.get(url)
time.sleep(1)
# driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
css = '.t1 > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(4) > a:nth-child(1)'
driver.find_element_by_css_selector(css).click()
driver.switch_to.window(driver.window_handles[-1])
time.sleep(1)
try:
d_btn_css = 'body > form:nth-child(1) > table:nth-child(2) > tbody:nth-child(1) > tr:nth-child(3) > td:nth-child(1) > a:nth-child(1) > h4:nth-child(1)'
driver.find_element_by_css_selector(d_btn_css).click()
# 等待下载完成后才关闭浏览器
time.sleep(1)
except Exception as e:
print(f'{e!r}')
download_path = os.path.join(expanduser('~'), 'Downloads')
for root, _, files in os.walk(download_path):
for name in files:
if name.endswith("xlsx") and name.find('中债国债收益率曲线标准期限信息'):
src = os.path.join(root, name)
dst = os.path.join(DATA_DIR,f'{datetime.date.today().year}.xlsx')
move(src, dst)
break
driver.quit()
| 5,341,224
|
def check(a, b, obj_type):
"""
Check that the sent and recv'd data matches.
"""
if obj_type in ("bytes", "memoryview"):
assert a == b
elif obj_type == "numpy":
import numpy as np
np.testing.assert_array_equal(a, b)
elif obj_type == "cupy":
import cupy
cupy.testing.assert_array_equal(a, b)
elif obj_type == "numba":
import numba
import numba.cuda
import numpy as np
np_a = a.copy_to_host()
np_b = b.copy_to_host()
np.testing.assert_array_equal(np_a, np_b)
else:
raise ValueError(obj_type)
| 5,341,225
|
def semidoc_mass_dataset(params,
file_names,
num_hosts,
num_core_per_host,
seq_len,
num_predict,
is_training,
use_bfloat16=False,
num_threads=64,
record_shuffle_size=256,
sequence_shuffle_size=2048):
# pylint: disable=g-doc-args
"""Get semi-doc level mass dataset.
Notes:
- Each sequence comes from the same document (except for boundary cases).
This is different from the standard sent-level mass dataset.
- No consecutivity is ensured across batches, which is different from the
standard doc-level mass dataset.
- Effectively, semi-doc dataset maintains short range (seq_len) dependency,
which is more random than doc-level and less random than sent-level.
Returns:
a tf.data.Dataset
"""
# pylint: enable=g-doc-args
bsz_per_core = params["batch_size"]
if num_hosts > 1:
host_id = params["context"].current_host
else:
host_id = 0
##### Split input files across hosts
if len(file_names) >= num_hosts:
file_paths = file_names[host_id::num_hosts]
else:
file_paths = file_names
tf.logging.info("Host %d handles %d files:", host_id, len(file_paths))
##### Parse records
dataset = tf.data.Dataset.from_tensor_slices(file_paths)
dataset = parse_record(dataset=dataset,
parser=get_record_parser(),
is_training=is_training,
num_threads=num_threads,
file_shuffle_size=len(file_paths),
record_shuffle_size=record_shuffle_size)
# process dataset
dataset = mass_process(dataset, seq_len, num_predict, use_bfloat16)
# Sequence level shuffle
if is_training and sequence_shuffle_size:
tf.logging.info("Seqeunce level shuffle with size %d",
sequence_shuffle_size)
dataset = dataset.shuffle(buffer_size=sequence_shuffle_size)
# batching
dataset = dataset.batch(bsz_per_core, drop_remainder=True)
# Prefetch
dataset = dataset.prefetch(num_core_per_host)
return dataset
| 5,341,226
|
def ListChrootSnapshots(buildroot):
"""Wrapper around cros_sdk --snapshot-list."""
cmd = ['cros_sdk', '--snapshot-list']
cmd_snapshots = RunBuildScript(
buildroot, cmd, chromite_cmd=True, stdout=True)
return cmd_snapshots.output.splitlines()
| 5,341,227
|
async def post_github_webhook(
request: Request,
logger: BoundLogger = Depends(logger_dependency),
http_client: httpx.AsyncClient = Depends(http_client_dependency),
arq_queue: ArqQueue = Depends(arq_dependency),
) -> Response:
"""Process GitHub webhook events."""
if not config.enable_github_app:
return Response(
"GitHub App is not enabled",
status_code=status.HTTP_501_NOT_IMPLEMENTED,
)
body = await request.body()
if config.github_webhook_secret is None:
return Response(
"The webhook secret is not configured",
status_code=status.HTTP_501_NOT_IMPLEMENTED,
)
webhook_secret = config.github_webhook_secret.get_secret_value()
event = Event.from_http(request.headers, body, secret=webhook_secret)
# Bind the X-GitHub-Delivery header to the logger context; this identifies
# the webhook request in GitHub's API and UI for diagnostics
logger = logger.bind(github_delivery=event.delivery_id)
logger.debug("Received GitHub webhook", payload=event.data)
# Give GitHub some time to reach internal consistency.
await asyncio.sleep(1)
await webhook_router.dispatch(event, logger, arq_queue)
return Response(status_code=status.HTTP_202_ACCEPTED)
| 5,341,228
|
def _load_cache(b_cache_path):
""" Loads the cache file requested if possible. The file must not be world writable. """
cache_version = 1
if not os.path.isfile(b_cache_path):
display.vvvv("Creating Galaxy API response cache file at '%s'" % to_text(b_cache_path))
with open(b_cache_path, 'w'):
os.chmod(b_cache_path, 0o600)
cache_mode = os.stat(b_cache_path).st_mode
if cache_mode & stat.S_IWOTH:
display.warning("Galaxy cache has world writable access (%s), ignoring it as a cache source."
% to_text(b_cache_path))
return
with open(b_cache_path, mode='rb') as fd:
json_val = to_text(fd.read(), errors='surrogate_or_strict')
try:
cache = json.loads(json_val)
except ValueError:
cache = None
if not isinstance(cache, dict) or cache.get('version', None) != cache_version:
display.vvvv("Galaxy cache file at '%s' has an invalid version, clearing" % to_text(b_cache_path))
cache = {'version': cache_version}
# Set the cache after we've cleared the existing entries
with open(b_cache_path, mode='wb') as fd:
fd.write(to_bytes(json.dumps(cache), errors='surrogate_or_strict'))
return cache
| 5,341,229
|
def nmad_filter(
dh_array: np.ndarray, inlier_mask: np.ndarray, nmad_factor: float = 5, max_iter: int = 20, verbose: bool = False
) -> np.ndarray:
"""
Iteratively remove pixels where the elevation difference (dh_array) in stable terrain (inlier_mask) is larger \
than nmad_factor * NMAD.
Iterations will stop either when the NMAD change is less than 0.1, or after max_iter iterations.
:params dh_array: 2D array of elevation difference.
:params inlier_mask: 2D boolean array of areas to include in the analysis (inliers=True).
:param nmad_factor: The factor by which the stable dh NMAD has to be multiplied to calculate the outlier threshold
:param max_iter: Maximum number of iterations (normally not reached, just for safety)
:param verbose: set to True to print some statistics to screen.
:returns: 2D boolean array with updated inliers set to True
"""
# Mask unstable terrain
dh_stable = dh_array.copy()
dh_stable.mask[~inlier_mask] = True
nmad_before = xdem.spatialstats.nmad(dh_stable)
if verbose:
print(f"NMAD before: {nmad_before:.2f}")
print("Iteratively remove large outliers")
# Iteratively remove large outliers
for i in range(max_iter):
outlier_threshold = nmad_factor * nmad_before
dh_stable.mask[np.abs(dh_stable) > outlier_threshold] = True
nmad_after = xdem.spatialstats.nmad(dh_stable)
if verbose:
print(f"Remove pixels where abs(value) > {outlier_threshold:.2f} -> New NMAD: {nmad_after:.2f}")
# If NMAD change is loweer than a set threshold, stop iterations, otherwise stop after max_iter
if nmad_before - nmad_after < 0.1:
break
nmad_before = nmad_after
return ~dh_stable.mask
| 5,341,230
|
def process_contexts(server, contexts, p_ctx, error=None):
"""Method to be called in the auxiliary context."""
for ctx in contexts:
ctx.descriptor.aux.initialize_context(ctx, p_ctx, error)
if error is None or ctx.descriptor.aux.process_exceptions:
ctx.descriptor.aux.process_context(server, ctx)
| 5,341,231
|
def setup_output_vcf(outname, t_vcf):
"""
Create an output vcf.Writer given the input vcf file as a templte
writes the full header and
Adds info fields:
sizeCat
MEF
Returns a file handler and a dict with {individual_id: column in vcf}
"""
out = open(outname, 'w')
line = t_vcf.readline()
samp_columns = {}
while not line.startswith("#CHROM"):
out.write(line)
line = t_vcf.readline()
# edit the header
out.write('##INFO=<ID=sizeCat,Number=A,Type=String,Description="Size category of variant">\n')
out.write('##INFO=<ID=MEF,Number=.,Type=String,Description="Names of families that contain mendelian error">\n')
out.write(line)
for pos, iid in enumerate(line.strip().split('\t')[9:]):
samp_columns[iid] = pos + 9
return out, samp_columns
| 5,341,232
|
def load_json_file():
"""loads the json file"""
dirname = os.path.dirname(os.path.abspath(__file__))
json_filepath = os.path.join(dirname, "rps101_data.json")
with open(json_filepath) as f:
json_load = json.load(f)
return json_load
| 5,341,233
|
def validate_processing_hooks():
"""Validate the enabled processing hooks.
:raises: MissingHookError on missing or failed to load hooks
:raises: RuntimeError on validation failure
:returns: the list of hooks passed validation
"""
hooks = [ext for ext in processing_hooks_manager()]
enabled = set()
errors = []
for hook in hooks:
deps = getattr(hook.obj, 'dependencies', ())
missing = [d for d in deps if d not in enabled]
if missing:
errors.append('Hook %(hook)s requires the following hooks to be '
'enabled before it: %(deps)s. The following hooks '
'are missing: %(missing)s.' %
{'hook': hook.name,
'deps': ', '.join(deps),
'missing': ', '.join(missing)})
enabled.add(hook.name)
if errors:
raise RuntimeError("Some hooks failed to load due to dependency "
"problems:\n%s" % "\n".join(errors))
return hooks
| 5,341,234
|
def rnn_stability_loss(rnn_output, beta):
"""
REGULARIZING RNNS BY STABILIZING ACTIVATIONS
https://arxiv.org/pdf/1511.08400.pdf
:param rnn_output: [time, batch, features]
:return: loss value
"""
if beta == 0.0:
return 0.0
# [time, batch, features] -> [time, batch]
l2 = tf.sqrt(tf.reduce_sum(tf.square(rnn_output), axis=-1))
# [time, batch] -> []
return beta * tf.reduce_mean(tf.square(l2[1:] - l2[:-1]))
| 5,341,235
|
def test_http_request_proxy_error_based_on_status(mock_base_http_request, client):
"""
When http request return status code 407 then appropriate error message should display.
"""
# Configure
mock_base_http_request.return_value = mock_http_response(status=407)
# Execute
with pytest.raises(ValueError) as e:
client.http_request('GET', '/test/url/suffix')
# Assert
assert 'Proxy Error - cannot connect to proxy. Either try clearing the \'Use system proxy\' check-box or check ' \
'the host, authentication details and connection details for the proxy.' == str(e.value)
| 5,341,236
|
def import_csv_to_calendar_api():
"""The main route of the Project.\
If requested with a GET: renders the page of the import operation.
If requested with POST: Starts importing events in the file to be uploaded present in the POST data.
"""
if request.method == "GET":
return make_response(render_template('import.html.j2'), 200)
elif request.method == "POST":
max_events = int(request.form["max_events"])
if max_events == 0:
max_events = None
events_freq = int(request.form["events_freq"])
if 'file' not in request.files:
flash(("No file part"), category='danger')
return redirect(request.url)
file = request.files['file']
if file.filename == "":
flash("No file selected for upload", category='danger')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file__path = os.path.join(app.config["UPLOAD_FOLDER"], filename)
file.save(file__path)
operation_now = import_oprtation(number_events=max_events,
filename=filename)
db.session.add(operation_now)
try:
op_id = db.session.query(import_oprtation).order_by(
import_oprtation.id.desc()).first().id
except Exception as e:
print(e)
db.session.commit()
all_events = csv_tt_to_json_events(file__path,
dates,
max_events=max_events,
events_freq=events_freq)
for event in all_events:
resource = None
teachers = []
std_sets_emails = []
if "resource" in event["attendees"][-1].keys():
resource = event["attendees"][-1]
event["attendees"].remove(resource)
for ev_att in event["attendees"]:
if Teacher.query.filter_by(
teacher_email=ev_att["email"]).first():
teachers.append(ev_att)
else:
std_sets_emails.append(ev_att)
event["attendees"].clear()
event["attendees"].extend(teachers)
if resource is not None:
event["attendees"].append(resource)
for std_mail in std_sets_emails:
cal_rec = Calendar.query.filter_by(
std_email=std_mail["email"]).first()
if cal_rec:
calendar_id = cal_rec.calendar_id_google
else:
print("Calendar does not exist")
pprint(event)
print("_________")
continue
resp = google.post(
"/calendar/v3/calendars/{}/events".format(calendar_id),
json=event,
params={"sendUpates": "none"})
if resp.status_code == 200:
gevent_id = resp.json()["id"]
try:
event_logged = events__log(
gevent_id=gevent_id,
gcalendar_id=calendar_id,
import_id=op_id)
except Exception as e:
print(e)
print(event)
try:
db.session.add(event_logged)
db.session.commit()
except Exception as e:
print(e)
else:
print("Could not insert event")
print(resp.text)
flash(("Added {} events to calendar".format(len(all_events))),
category='success')
return redirect(url_for('import_csv_to_calendar_api'))
else:
flash('Allowed file types are: csv', category="info")
return redirect(request.url)
| 5,341,237
|
def xproto_fields(m, table):
""" Generate the full list of models for the xproto message `m` including fields from the classes it inherits.
Inserts the special field "id" at the very beginning.
Each time we descend a new level of inheritance, increment the offset field numbers by 100. The base
class's fields will be numbered from 1-99, the first descendant will be number 100-199, the second
descdendant numbered from 200-299, and so on. This assumes any particular model as at most 100
fields.
"""
model_fields = [x.copy() for x in m["fields"]]
for field in model_fields:
field["accessor"] = m["fqn"]
fields = xproto_base_fields(m, table) + model_fields
# The "id" field is a special field. Every model has one. Put it up front and pretend it's part of the
if not fields:
raise Exception(
"Model %s has no fields. Check for missing base class." % m["name"]
)
id_field = {
"type": "int32",
"name": "id",
"options": {},
"id": "1",
"accessor": fields[0]["accessor"],
}
fields = [id_field] + fields
# Walk through the list of fields. They will be in depth-first search order from the base model forward. Each time
# the model changes, offset the protobuf field numbers by 100.
offset = 0
last_accessor = fields[0]["accessor"]
for field in fields:
if field["accessor"] != last_accessor:
last_accessor = field["accessor"]
offset += 100
field_id = int(field["id"])
if (field_id < 1) or (field_id >= 100):
raise Exception(
"Only field numbers from 1 to 99 are permitted, field %s in model %s"
% (field["name"], field["accessor"])
)
field["id"] = int(field["id"]) + offset
# Check for duplicates
fields_by_number = {}
for field in fields:
id = field["id"]
dup = fields_by_number.get(id)
if dup:
raise Exception(
"Field %s has duplicate number %d with field %s in model %s"
% (field["name"], id, dup["name"], field["accessor"])
)
fields_by_number[id] = field
return fields
| 5,341,238
|
def HyBO(objective=None, n_eval=200, path=None, parallel=False, store_data=True, problem_id=None, **kwargs):
"""
:param objective:
:param n_eval:
:param path:
:param parallel:
:param kwargs:
:return:
"""
acquisition_func = expected_improvement
n_vertices = adj_mat_list = None
eval_inputs = eval_outputs = log_beta = sorted_partition = lengthscales = None
time_list = elapse_list = pred_mean_list = pred_std_list = pred_var_list = None
if objective is not None:
exp_dir = experiment_directory()
objective_id_list = [objective.__class__.__name__]
if hasattr(objective, 'random_seed_info'):
objective_id_list.append(objective.random_seed_info)
if hasattr(objective, 'data_type'):
objective_id_list.append(objective.data_type)
objective_id_list.append('HyBO')
if problem_id is not None:
objective_id_list.append(problem_id)
objective_name = '_'.join(objective_id_list)
model_filename, data_cfg_filaname, logfile_dir = model_data_filenames(exp_dir=exp_dir,
objective_name=objective_name)
n_vertices = objective.n_vertices
adj_mat_list = objective.adjacency_mat
grouped_log_beta = torch.ones(len(objective.fourier_freq))
log_order_variances = torch.zeros((objective.num_discrete + objective.num_continuous))
fourier_freq_list = objective.fourier_freq
fourier_basis_list = objective.fourier_basis
suggested_init = objective.suggested_init # suggested_init should be 2d tensor
n_init = suggested_init.size(0)
num_discrete = objective.num_discrete
num_continuous = objective.num_continuous
lengthscales = torch.zeros((num_continuous))
print("******************* initializing kernel ****************")
kernel = MixedDiffusionKernel(log_order_variances=log_order_variances, grouped_log_beta=grouped_log_beta, fourier_freq_list=fourier_freq_list,
fourier_basis_list=fourier_basis_list, lengthscales=lengthscales,
num_discrete=num_discrete, num_continuous=num_continuous)
surrogate_model = GPRegression(kernel=kernel)
eval_inputs = suggested_init
eval_outputs = torch.zeros(eval_inputs.size(0), 1, device=eval_inputs.device)
for i in range(eval_inputs.size(0)):
eval_outputs[i] = objective.evaluate(eval_inputs[i])
assert not torch.isnan(eval_outputs).any()
log_beta = eval_outputs.new_zeros(num_discrete)
log_order_variance = torch.zeros((num_discrete + num_continuous))
sorted_partition = [[m] for m in range(num_discrete)]
lengthscale = torch.zeros((num_continuous))
time_list = [time.time()] * n_init
elapse_list = [0] * n_init
pred_mean_list = [0] * n_init
pred_std_list = [0] * n_init
pred_var_list = [0] * n_init
surrogate_model.init_param(eval_outputs)
print('(%s) Burn-in' % time.strftime('%H:%M:%S', time.localtime()))
sample_posterior = posterior_sampling(surrogate_model, eval_inputs, eval_outputs, n_vertices, adj_mat_list, log_order_variance,
log_beta, lengthscale, sorted_partition, n_sample=1, n_burn=1, n_thin=1)
log_order_variance = sample_posterior[1][0]
log_beta = sample_posterior[2][0]
lengthscale = sample_posterior[3][0]
sorted_partition = sample_posterior[4][0]
print('')
else:
surrogate_model, cfg_data, logfile_dir = load_model_data(path, exp_dir=experiment_directory())
for _ in range(n_eval):
start_time = time.time()
reference = torch.min(eval_outputs, dim=0)[0].item()
print('(%s) Sampling' % time.strftime('%H:%M:%S', time.localtime()))
sample_posterior = posterior_sampling(surrogate_model, eval_inputs, eval_outputs, n_vertices, adj_mat_list, log_order_variance,
log_beta, lengthscale, sorted_partition, n_sample=10, n_burn=0, n_thin=1)
hyper_samples, log_order_variance_samples, log_beta_samples, lengthscale_samples, partition_samples, freq_samples, basis_samples, edge_mat_samples = sample_posterior
log_order_variance = log_order_variance_samples[-1]
log_beta = log_beta_samples[-1]
lengthscale = lengthscale_samples[-1]
sorted_partition = partition_samples[-1]
print('\n')
# print(hyper_samples[0])
# print(log_order_variance)
# print(log_beta)
# print(lengthscale)
# print(sorted_partition)
# print('')
x_opt = eval_inputs[torch.argmin(eval_outputs)]
inference_samples = inference_sampling(eval_inputs, eval_outputs, n_vertices,
hyper_samples, log_order_variance_samples, log_beta_samples, lengthscale_samples, partition_samples,
freq_samples, basis_samples, num_discrete, num_continuous)
suggestion = next_evaluation(objective, x_opt, eval_inputs, inference_samples, partition_samples, edge_mat_samples,
n_vertices, acquisition_func, reference, parallel)
next_eval, pred_mean, pred_std, pred_var = suggestion
processing_time = time.time() - start_time
print("next_eval", next_eval)
eval_inputs = torch.cat([eval_inputs, next_eval.view(1, -1)], 0)
eval_outputs = torch.cat([eval_outputs, objective.evaluate(eval_inputs[-1]).view(1, 1)])
assert not torch.isnan(eval_outputs).any()
time_list.append(time.time())
elapse_list.append(processing_time)
pred_mean_list.append(pred_mean.item())
pred_std_list.append(pred_std.item())
pred_var_list.append(pred_var.item())
displaying_and_logging(logfile_dir, eval_inputs, eval_outputs, pred_mean_list, pred_std_list, pred_var_list,
time_list, elapse_list, hyper_samples, log_beta_samples, lengthscale_samples, log_order_variance_samples, store_data)
print('Optimizing %s with regularization %.2E up to %4d visualization random seed : %s'
% (objective.__class__.__name__, objective.lamda if hasattr(objective, 'lamda') else 0, n_eval,
objective.random_seed_info if hasattr(objective, 'random_seed_info') else 'none'))
| 5,341,239
|
def write_asset(asset, target_directory):
"""Write file represented by asset to target_directory
Args:
asset (dict): an asset dict
target_directory (str): path to a directory in which all files will be written
"""
target_path = os.path.join(
target_directory, asset["target_location"], asset["target_name"]
)
os.makedirs(os.path.dirname(target_path), exist_ok=True)
if "copy_method" in asset:
copy_file_asset(asset, target_path)
elif "bytes" in asset:
logger.debug(f"Writing asset bytes to {target_path}.")
with open(target_path, "wb") as f:
f.write(asset["bytes"])
else:
raise ConfigError(
"Cannot write asset. Asset must have either a `copy_method` or `bytes` key."
)
| 5,341,240
|
def test(all=False):
"""Alias of `invoke test_osf`.
"""
if all:
test_all()
else:
test_osf()
| 5,341,241
|
def create_decomp_expand_fn(custom_decomps, dev, decomp_depth=10):
"""Creates a custom expansion function for a device that applies
a set of specified custom decompositions.
Args:
custom_decomps (Dict[Union(str, qml.operation.Operation), Callable]): Custom
decompositions to be applied by the device at runtime.
dev (qml.Device): A quantum device.
decomp_depth: The maximum depth of the expansion.
Returns:
Callable: A custom expansion function that a device can call to expand
its tapes within a context manager that applies custom decompositions.
**Example**
Suppose we would like a custom expansion function that decomposes all CNOTs
into CZs. We first define a decomposition function:
.. code-block:: python
def custom_cnot(wires):
return [
qml.Hadamard(wires=wires[1]),
qml.CZ(wires=[wires[0], wires[1]]),
qml.Hadamard(wires=wires[1])
]
We then create the custom function (passing a device, in order to pick up any
additional stopping criteria the expansion should have), and then register the
result as a custom function of the device:
>>> custom_decomps = {qml.CNOT : custom_cnot}
>>> expand_fn = qml.transforms.create_decomp_expand_fn(custom_decomps, dev)
>>> dev.custom_expand(expand_fn)
"""
custom_op_names = [op if isinstance(op, str) else op.__name__ for op in custom_decomps.keys()]
# Create a new expansion function; stop at things that do not have
# custom decompositions, or that satisfy the regular device stopping criteria
custom_fn = qml.transforms.create_expand_fn(
decomp_depth,
stop_at=qml.BooleanFn(lambda obj: obj.name not in custom_op_names),
device=dev,
)
# Finally, we set the device's custom_expand_fn to a new one that
# runs in a context where the decompositions have been replaced.
def custom_decomp_expand(self, circuit, max_expansion=decomp_depth):
with _custom_decomp_context(custom_decomps):
return custom_fn(circuit, max_expansion=max_expansion)
return custom_decomp_expand
| 5,341,242
|
def get_lats_map(floor: float, ceil: float) -> Dict[int, List[float]]:
"""
Get map of lats in full minutes with quarter minutes as keys.
Series considers lat range is [-70;69] and how objects are stored in s3.
"""
full = full_minutes([floor, ceil])
out = {d: [d + dd for dd in OBJ_COORD_PARTS] for d in full[1:-1]}
start = full[0]
out[start] = [start + d for d in OBJ_COORD_PARTS if start + d >= floor]
end = full[-1]
out[end] = [end + d for d in OBJ_COORD_PARTS if end + d <= ceil]
return {k: d for k, d in out.items() if -70 <= k < 70}
| 5,341,243
|
def format_dict(body: Dict[Any, Any]) -> str:
"""
Formats a dictionary into a multi-line bulleted string of key-value pairs.
"""
return "\n".join(
[f" - {k} = {getattr(v, 'value', v)}" for k, v in body.items()]
)
| 5,341,244
|
def block3(x, filters, kernel_size=3, stride=1, groups=32,
conv_shortcut=True, name=None):
"""A residual block.
# Arguments
x: input tensor.
filters: integer, filters of the bottleneck layer.
kernel_size: default 3, kernel size of the bottleneck layer.
stride: default 1, stride of the first layer.
groups: default 32, group size for grouped convolution.
conv_shortcut: default True, use convolution shortcut if True,
otherwise identity shortcut.
name: string, block label.
# Returns
Output tensor for the residual block.
"""
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
if conv_shortcut is True:
shortcut = Conv2D((64 // groups) * filters, 1, strides=stride,
use_bias=False, name=name + '_0_conv')(x)
shortcut = BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name=name + '_0_bn')(shortcut)
else:
shortcut = x
x = Conv2D(filters, 1, use_bias=False, name=name + '_1_conv')(x)
x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name=name + '_1_bn')(x)
x = Activation('relu', name=name + '_1_relu')(x)
c = filters // groups
x = ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)
x = DepthwiseConv2D(kernel_size, strides=stride, depth_multiplier=c,
use_bias=False, name=name + '_2_conv')(x)
print
x_shape = backend.int_shape(x)[1:-1]
x = Reshape(x_shape + (groups, c, c))(x)
output_shape = x_shape + (groups, c) if backend.backend() == 'theano' else None
x = Lambda(lambda x: sum([x[:, :, :, :, i] for i in range(c)]),
output_shape=output_shape, name=name + '_2_reduce')(x)
x = Reshape(x_shape + (filters,))(x)
x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name=name + '_2_bn')(x)
x = Activation('relu', name=name + '_2_relu')(x)
x = Conv2D((64 // groups) * filters, 1,
use_bias=False, name=name + '_3_conv')(x)
x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name=name + '_3_bn')(x)
x = Add(name=name + '_add')([shortcut, x])
x = Activation('relu', name=name + '_out')(x)
return x
| 5,341,245
|
def test_docstring_remove():
"""Test removal of docstrings with optimize=2."""
import ast
import marshal
code = """
'module_doc'
def f():
'func_doc'
class C:
'class_doc'
"""
tree = ast.parse(code)
for to_compile in [code, tree]:
compiled = compile(to_compile, "<test>", "exec", optimize=2)
ns = {}
exec(compiled, ns)
assert '__doc__' not in ns
assert ns['f'].__doc__ is None
assert ns['C'].__doc__ is None
# Check that the docstrings are gone from the bytecode and not just
# inaccessible.
marshalled = str(marshal.dumps(compiled))
assert 'module_doc' not in marshalled
assert 'func_doc' not in marshalled
assert 'class_doc' not in marshalled
| 5,341,246
|
def random_sleep(n):
"""io"""
time.sleep(2)
return n
| 5,341,247
|
def yolo2_mobilenet_body(inputs, num_anchors, num_classes, alpha=1.0):
"""Create YOLO_V2 MobileNet model CNN body in Keras."""
mobilenet = MobileNet(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)
# input: 416 x 416 x 3
# mobilenet.output : 13 x 13 x (1024*alpha)
# conv_pw_11_relu(layers[73]) : 26 x 26 x (512*alpha)
conv_head1 = compose(
DarknetConv2D_BN_Leaky(int(1024*alpha), (3, 3)),
DarknetConv2D_BN_Leaky(int(1024*alpha), (3, 3)))(mobilenet.output)
# conv_pw_11_relu output shape: 26 x 26 x (512*alpha)
conv_pw_11_relu = mobilenet.layers[73].output
conv_head2 = DarknetConv2D_BN_Leaky(int(64*alpha), (1, 1))(conv_pw_11_relu)
# TODO: Allow Keras Lambda to use func arguments for output_shape?
conv_head2_reshaped = Lambda(
space_to_depth_x2,
output_shape=space_to_depth_x2_output_shape,
name='space_to_depth')(conv_head2)
x = Concatenate()([conv_head2_reshaped, conv_head1])
x = DarknetConv2D_BN_Leaky(int(1024*alpha), (3, 3))(x)
x = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1), name='predict_conv')(x)
return Model(inputs, x)
| 5,341,248
|
def LeastSquare_nonlinearFit_general(
X,
Y,
func,
func_derv,
guess,
weights=None,
maxRelativeError=1.0e-5,
maxIteratitions=100,
):
"""Computes a non-linear fit using the least square method following: http://ned.ipac.caltech.edu/level5/Stetson/Stetson2_2_1.html
It takes the following arguments:
X - array of x-parameters
Y - y-array
func - the function f(X,parameters)
func_derv - a function that returns a 2D array giving along the columns the derivatives of the function 'f' with respect to each fit parameter ( df /dp_i )
guess - a first guess for the fit parameters
weights - the weights associated to each point
maxRelativeError - stop the iteration once the error in each parameter is below this threshold
maxIterations - stop the iteration after this many steps
Returns: parameters, fit_error, chi_square, noPoints, succes (True if successful)
"""
functionName = "'analysis.LeastSquare_nonlinearFit_general'"
if weights is None:
weights = Y.copy()
weights[:] = 1
noPoints = Y.size # number of data points used for the fit
noParams = len(guess) # number of fit parameters
# iterate starting with the initial guess until finding the best fit parameters
a = guess
iteration, notConverged = 0, True
while iteration < maxIteratitions and notConverged:
tempX = func_derv(
X, a
) # the derivatives for the current parameter values
tempDiff = Y - func(
X, a
) # the difference between function values and Y-values
std = (weights * tempDiff ** 2).sum() # the current sum of the squares
step = np.linalg.lstsq(tempX, tempDiff)[0]
while True:
a2 = a + step
tempStd = (
weights * (Y - func(X, a2)) ** 2
).sum() # the sum of the squares for the new parameter values
if tempStd > std:
step /= (
2.0
) # wrong estimate for the step since it increase the deviation from Y values; decrease step by factor of 2
else:
a += step
break
if (np.abs(step / a) < maxRelativeError).all():
notConverged = False # the iteration has converged
iteration += 1
print(iteration, a, step, std, tempStd)
# compute the standard deviation for the best fit parameters
derivatives = func_derv(X, a)
M = np.zeros((noParams, noParams), np.float64)
for i in range(noParams):
for j in range(i, noParams):
M[i, j] = (weights * derivatives[:, i] * derivatives[:, j]).sum()
M[j, i] = M[i, j]
Minv = np.linalg.inv(M)
chiSquare = (weights * (Y - func(X, a)) ** 2).sum() / (
noPoints - noParams
) # fit residuals
a_error = np.zeros(noParams, np.float64)
for i in range(noParams):
a_error[i] = (chiSquare * Minv[i, i]) ** 0.5
return a, a_error, chiSquare, noPoints, iteration < maxIteratitions
| 5,341,249
|
def _get_extracted_csv_table(relevant_subnets, tablename, input_path, sep=";"):
""" Returns extracted csv data of the requested SimBench grid. """
csv_table = read_csv_data(input_path, sep=sep, tablename=tablename)
if tablename == "Switch":
node_table = read_csv_data(input_path, sep=sep, tablename="Node")
bus_bus_switches = set(get_bus_bus_switch_indices_from_csv(csv_table, node_table))
else:
bus_bus_switches = {}
extracted_csv_table = _extract_csv_table_by_subnet(csv_table, tablename, relevant_subnets,
bus_bus_switches=bus_bus_switches)
return extracted_csv_table
| 5,341,250
|
def main():
""" Runs the main parsing function on each list of powers and saves the resulting data. """
for power_type in [ARCANA, ARCANE_DISCOVERY, DOMAIN, EXPLOIT, HEX, SPELL]:
powers = parse_powers(
power_type['requests'],
power_type['list_regex'],
power_type['defaults'],
power_type['power_regexes'],
power_type['process_fn'],
power_type['exceptions_fn']
)
save_json(powers, power_type['filename'])
| 5,341,251
|
def has_prefix(sub_s):
"""
:param sub_s: (str) A substring that is constructed by neighboring letters on a 4x4 square grid.
:return: (bool) If there is any words with prefix stored in sub_s.
"""
for word in vocabulary_list:
if word.strip().startswith(sub_s):
return True
# Check all vocabularies in dictionary.txt, if there is no vocabulary start with sub_s then return false.
return False
| 5,341,252
|
def test_prompt_for_both_mnemonics(mock_bitcoincore):
"""Test prompting for both mnemonic and recovery mnemonic"""
mock_bitcoincore.return_value = AuthServiceProxy('testnet_txs')
estimate = {'blocks': 3, 'feerate': 1, }
mock_bitcoincore.return_value.estimatesmartfee.return_value = estimate
with mock.patch('garecovery.recoverycli.user_input') as user_input_:
mnemonic = open(datafile('mnemonic_6.txt')).read()
mnemonic = ' '.join(mnemonic.split())
recovery_mnemonic = open(datafile('mnemonic_7.txt')).read()
recovery_mnemonic = ' '.join(recovery_mnemonic.split())
user_input_.side_effect = (mnemonic, recovery_mnemonic)
output = get_output([
'--rpcuser=abc',
'--rpcpassword=abc',
'2of3',
'--network=testnet',
'--key-search-depth={}'.format(key_depth),
'--search-subaccounts={}'.format(sub_depth),
'--destination-address={}'.format(destination_address),
])
user_input_.assert_has_calls([
mock.call('mnemonic/hex seed: '),
mock.call('recovery mnemonic/hex seed: '),
])
assert output.strip() == open(datafile("signed_2of3_5")).read().strip()
| 5,341,253
|
def test_jenkinslts_repofile_isfile(host):
"""
Tests if jenkins repo files for DEBIAN/EL systems is file type.
"""
assert host.file(DEBIAN_REPO_FILE).is_file or \
host.file(EL_REPO_FILE).is_file
| 5,341,254
|
def work_out_entity(context,entity):
"""
One of Arkestra's core functions
"""
# first, try to get the entity from the context
entity = context.get('entity', None)
if not entity:
# otherwise, see if we can get it from a cms page
request = context['request']
if request.current_page:
entity = entity_for_page(request.current_page)
else: # we must be in a plugin, either in the page or in admin
page = context['plugin'].get("page", None)
if page:
entity = entity_for_page(page)
else:
entity = None
return entity
| 5,341,255
|
def rectify(link:str, parent:str, path:str):
"""A function to check a link and verify that it should be captured or not.
For e.g. any external URL would be blocked. It would also take care that all the urls are properly formatted.
Args:
**link (str)**: the link to rectify.
**parent (str)**: the complete url of the page from which the link was found.
**path (str)**: the path (after the domain) of the page from which the link was found.
Returns:
**str**: the properly formatted link.
"""
if (link.startswith("#")) or (":" in link) or ("../" in link):
return path
if not link.startswith("/"):
if parent.endswith("/"):
if not path.endswith("/"):
path += "/"
return path + link
else:
path = "/".join(path.split("/")[:-1])+"/"
return path + link
return link
| 5,341,256
|
def send_start_run_message(run_number: int,
instrument_name: str,
broker_address: str=default_broker_address,
broker_version: str=default_broker_version):
"""Send a run start message to topic <instrument_name>_runInfo on the given broker"""
builder = flatbuffers.Builder(0)
name = builder.CreateString(instrument_name)
run_info.RunStart.RunStartStart(builder)
run_info.RunStart.RunStartAddStartTime(builder, current_time_ns())
run_info.RunStart.RunStartAddRunNumber(builder, run_number)
run_info.RunStart.RunStartAddInstrumentName(builder, name)
run_start = run_info.RunStart.RunStartEnd(builder)
run_info.RunInfo.RunInfoStart(builder)
run_info.RunInfo.RunInfoAddInfoTypeType(builder, run_info.InfoTypes.InfoTypes().RunStart)
run_info.RunInfo.RunInfoAddInfoType(builder, run_start)
info = run_info.RunInfo.RunInfoEnd(builder)
builder.Finish(info)
message = prepare_flatbuffer_message(builder, b'ba57')
topic_name = "{}_runInfo".format(instrument_name).encode()
send_message(message, topic_name, broker_address, broker_version)
| 5,341,257
|
def cleanup_3():
"""
1. Sort keys of each json file.
2. rename media filenames with zero padding.
"""
sorted_keys = [
"id",
"name",
"description",
"collection_name",
"collection_description",
"transaction_time",
"eth_price",
"eth_price_decimal",
"usd_price",
"usd_volume",
"usd_marketcap",
"media_filenames",
"has_audio_in_video",
]
for filename in os.listdir(new_json_dir):
if filename.endswith(".json"):
json_path = new_json_dir / filename
new_data = dict()
with json_path.open() as f:
data = json.load(f)
f.close()
int_id = int(data["id"])
padded_id: str = f"{int_id:05}"
for key in sorted_keys:
if key == "media_filenames":
media_filenames = data.get(key, [])
new_media_filenames = []
for media_filename in media_filenames:
extension = Path(media_filename).suffix
new_name = f"{padded_id}{extension}"
new_media_filenames.append(new_name)
new_path = media_dir / new_name
if not new_path.is_file():
print(f"Missing file: {new_name}")
new_data[key] = new_media_filenames
else:
new_data[key] = data.get(key)
with json_path.open("w") as f:
json.dump(new_data, f, indent=4)
| 5,341,258
|
def nested_pids_and_relations(app, db):
"""Fixture for a nested PIDs and the expected serialized relations."""
# Create some PIDs and connect them into different nested PID relations
pids = {}
for idx in range(1, 12):
pid_value = str(idx)
p = PersistentIdentifier.create('recid', pid_value, object_type='rec',
status=PIDStatus.REGISTERED)
pids[idx] = p
VERSION = resolve_relation_type_config('version').id
# 1 (Version)
# / | \
# 2 3 4
PIDRelation.create(pids[1], pids[2], VERSION, 0)
PIDRelation.create(pids[1], pids[3], VERSION, 1)
PIDRelation.create(pids[1], pids[4], VERSION, 2)
# Define the expected PID relation tree for of the PIDs
expected_relations = {}
expected_relations[4] = {
u'relations': {
'version': [
{u'children': [{u'pid_type': u'recid',
u'pid_value': u'2'},
{u'pid_type': u'recid',
u'pid_value': u'3'},
{u'pid_type': u'recid',
u'pid_value': u'4'}],
u'index': 2,
u'is_child': True,
u'previous': {'pid_type': 'recid', 'pid_value': '3'},
u'next': None,
u'is_last': True,
u'is_parent': False,
u'parent': {u'pid_type': u'recid',
u'pid_value': u'1'},
u'type': 'version'
}
],
}
}
return pids, expected_relations
| 5,341,259
|
def test_siren_not_existing_year(host, siren, year):
"""
Test the /company/siren/year endpoint. Should return a JSON and RC 404.
:param host: Fixture of the API host.
:param siren: Parametrise fixture of a SIREN.
:param year: Parametrise fixture of the year to return.
"""
response = get("http://" + host + "/company/siren/" + str(siren) + "/" + str(year))
assert response.status_code == 404, "WRONG HTTP RETURN CODE"
assert loads(response.text) is not None, "NOT RETURNING A JSON"
| 5,341,260
|
def get_classifier(classifier):
""" Run given classifier on GLoVe embedding model """
if classifier is None:
exit(1)
print('Running ', classifier)
get_predictive_model(classifier)
#return classifier
| 5,341,261
|
def main():
"""Main entry point"""
algorithms = [
# selectionsort,
insertionsort,
# Uncommend these lines after implementing algorithms
# bubblesort,
# quicksort,
# mergesort,
# heapsort,
]
benchmark(algorithms, max_array_size, initial_array_size=100, step=5)
plot_time()
# plot_access()
| 5,341,262
|
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = line.expandtabs()
return len(expline) - len(expline.lstrip())
| 5,341,263
|
def keyup_events(event, debug,):
""" Check events for when a key is released """
debug.check_strokes(event.key)
| 5,341,264
|
def reprocess_subtitle_file(path, max_chars=30, max_stretch_time=3, max_oldest_start=10):
"""Combine subtitles across a time period"""
file_name, ext = os.path.splitext(path)
compressed_subtitle_file = file_name + '.ass'
subs = pysubs2.load(path)
compressed_subs = compress(subs, max_chars, max_stretch_time, max_oldest_start)
compressed_subs.save(compressed_subtitle_file)
logger.info(f'Combined {len(subs)} subtitles from {path} to {len(compressed_subs)} in {compressed_subtitle_file}')
return compressed_subtitle_file, subs[len(subs) - 1].end
| 5,341,265
|
def read_github_repos(repo_names):
"""ENTRY POINT: Yields GitHub repos information as dictionaries from database."""
for repo_name in repo_names:
repo = GitHubRepo.query.filter_by(name=repo_name).first()
yield {
"name": repo.name,
"full_name": repo.full_name,
"description": repo.description,
"license": repo.license,
"private": repo.private,
"stars": repo.stars,
"forks": repo.forks,
"commits": repo.commits,
"open_issues": repo.open_issues,
"languages": [{"name": lang.name, "color": lang.color} for lang in repo.languages],
"url": repo.url,
}
| 5,341,266
|
def load(dataFormat,path,ignoreEntities=[],ignoreComplexRelations=True):
"""
Load a corpus from a variety of formats. If path is a directory, it will try to load all files of the corresponding data type. For standoff format, it will use any associated annotations files (with suffixes .ann, .a1 or .a2)
:param dataFormat: Format of the data files to load ('standoff','biocxml','pubannotation','simpletag')
:param path: Path to data. Can be directory or an individual file. Should be the txt file for standoff.
:param ignoreEntities: List of entity types to ignore while loading
:param ignoreComplexRelations: Whether to filter out relations where one argument is another relation (must be True as kindred doesn't currently support complex relations)
:type dataFormat: str
:type path: str
:type ignoreEntities: list
:type ignoreComplexRelations: bool
:return: Corpus of loaded documents
:rtype: kindred.Corpus
"""
assert dataFormat in ['standoff','biocxml','pubannotation','simpletag']
assert ignoreComplexRelations == True, "ignoreComplexRelations must be True as kindred doesn't currently support complex relations"
corpus = kindred.Corpus()
if os.path.isdir(path):
directory = path
filenames = sorted(list(os.listdir(directory)))
for filename in filenames:
if dataFormat == 'standoff' and filename.endswith('.txt'):
absPath = os.path.join(directory, filename)
doc = loadDataFromStandoff(absPath,ignoreEntities=ignoreEntities)
corpus.addDocument(doc)
elif dataFormat == 'biocxml' and filename.endswith('.bioc.xml'):
absPath = os.path.join(directory, filename)
tempCorpus = loadDataFromBioC(absPath,ignoreEntities=ignoreEntities)
corpus.documents += tempCorpus.documents
elif dataFormat == 'pubannotation' and filename.endswith('.json'):
absPath = os.path.join(directory, filename)
doc = loadDataFromPubAnnotationJSON(absPath,ignoreEntities=ignoreEntities)
corpus.addDocument(doc)
elif dataFormat == 'simpletag' and filename.endswith('.simple'):
absPath = os.path.join(directory, filename)
with open(absPath,'r') as f:
filecontents = f.read().strip()
doc = parseSimpleTag(filecontents,ignoreEntities=ignoreEntities)
doc.sourceFilename = filename
corpus.addDocument(doc)
if len(corpus.documents) == 0:
raise RuntimeError("No documents loaded from directory (%s). Are you sure this directory contains the corpus (format: %s)" % (path,dataFormat))
elif dataFormat == 'standoff':
doc = loadDataFromStandoff(path,ignoreEntities=ignoreEntities)
corpus.addDocument(doc)
elif dataFormat == 'biocxml':
corpus = loadDataFromBioC(path,ignoreEntities=ignoreEntities)
elif dataFormat == 'pubannotation':
doc = loadDataFromPubAnnotationJSON(path,ignoreEntities=ignoreEntities)
corpus.addDocument(doc)
elif dataFormat == 'simpletag':
with open(path,'r') as f:
filecontents = f.read().strip()
doc = parseSimpleTag(filecontents,ignoreEntities=ignoreEntities)
doc.sourceFilename = os.path.basename(path)
corpus.addDocument(doc)
return corpus
| 5,341,267
|
def test_edit_report_change_invalid_date(client, user, db_setup):
"""verifty that we can post data with a different date and change the
report in the database."""
report = Report.objects.get(reported_by__first_name="Homer")
url = reverse("tfat:edit_report", kwargs={"report_id": report.id})
data = {"report_date": "not a date"}
client.login(username=user.email, password="Abcd1234")
response = client.post(url, data)
content = str(response.content)
assert "Enter a valid date/time." in content
| 5,341,268
|
def read(*parts):
"""
Build an absolute path from *parts* and return the contents of the resulting file.
Assumes UTF-8 encoding.
"""
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, *parts), "rb", "utf-8") as f:
return f.read()
| 5,341,269
|
def read_idl_catalog(filename_sav, expand_extended=True):
"""
Read in an FHD-readable IDL .sav file catalog.
Deprecated. Use `SkyModel.read_fhd_catalog` instead.
Parameters
----------
filename_sav: str
Path to IDL .sav file.
expand_extended: bool
If True, return extended source components.
Default: True
Returns
-------
:class:`pyradiosky.SkyModel`
"""
warnings.warn(
"This function is deprecated, use `SkyModel.read_fhd_catalog` instead. "
"This function will be removed in version 0.2.0.",
category=DeprecationWarning,
)
skyobj = SkyModel()
skyobj.read_fhd_catalog(
filename_sav,
expand_extended=expand_extended,
)
return skyobj
| 5,341,270
|
def guid2bytes(s):
"""Converts a GUID to the serialized bytes representation"""
assert isinstance(s, str)
assert len(s) == 36
p = struct.pack
return b"".join([
p("<IHH", int(s[:8], 16), int(s[9:13], 16), int(s[14:18], 16)),
p(">H", int(s[19:23], 16)),
p(">Q", int(s[24:], 16))[2:],
])
| 5,341,271
|
def config(config_class=None, name="config", group=None):
"""
Class decorator that registers a custom configuration class with
`Hydra's ConfigStore API <https://hydra.cc/docs/tutorials/structured_config/config_store>`_.
If defining your own custom configuration class, your class must do the following:
* Register with `Hydra's ConfigStore API <https://hydra.cc/docs/tutorials/structured_config/config_store>`_ (which this decorator does for you).
* Register as a `@dataclass <https://docs.python.org/3/library/dataclasses.html>`_.
Example::
@config(name="db")
@dataclass
class DBConfig(BaseConfig):
host: str = "localhost"
.. note:: Make sure @dataclass comes after @config.
This also supports `Hydra Config Groups <https://hydra.cc/docs/tutorials/structured_config/config_groups>`_, example::
@config
@dataclass
class Config(BaseConfig):
db: Any = MISSING
@config(name="mysql", group="db")
@dataclass
class MySqlConfig:
host: str = "mysql://localhost"
@config(name="postgres", group="db")
@dataclass
class PostgresConfig:
host: str = "postgres://localhost"
postgres_specific_data: str = "some special data"
Then when running the job you can do the following::
$ python3 -m project_package db=mysql
:param name: Name of the configuration, used to locate overrides.
:type name: str
:param group: Group name to support Hydra Config Groups.
:type group: str
"""
@functools.wraps(config_class)
def wrapper(config_class, name, group):
cs = ConfigStore.instance()
cs.store(name=name, node=config_class, group=group)
return config_class
if config_class:
return wrapper(config_class, name, group)
def recursive_wrapper(config_class):
return config(config_class, name, group)
return recursive_wrapper
| 5,341,272
|
def display_title_screen():
""" Displays the title screen """
global screen
_done = False
background_music.play()
screen = pygame.display.set_mode(size , FULLSCREEN)
while not _done:
_done = _button_press("return")
screen.blit(title_screen,(0,0))
pygame.display.flip()
clock.tick(60)
background_music.stop()
| 5,341,273
|
def truncate_words(text, max_chars, break_words=False, padding=0):
"""
Truncate a string to max_chars, optionally truncating words
"""
if break_words:
return text[:-abs(max_chars - len(text)) - padding]
words = []
for word in text.split():
length = sum(map(len, words)) + len(word) + len(words) - 1 + padding
if length >= max_chars:
break
words.append(word)
return ' '.join(words)
| 5,341,274
|
def test__save_to_cloud_storage(mocker):
"""
Test saving examples, outputs and meta to bucket
Args:
mocker: mocker fixture from pytest-mocker
"""
upload_blob_mock = mocker.patch(
"cd_helper.CDHelper._upload_blob", return_value=upload_blob)
write_to_os_mock = mocker.patch(
"cd_helper.CDHelper._write_to_local_fs", return_value={"": ""})
example = Example(
name="name",
pipeline_id="pipeline_id",
sdk=SDK_JAVA,
filepath="filepath",
code="code_of_example",
output="output_of_example",
status=STATUS_UNSPECIFIED,
tag=None,
link="link")
CDHelper()._save_to_cloud_storage([example])
write_to_os_mock.assert_called_with(example)
upload_blob_mock.assert_called_with(source_file="", destination_blob_name="")
| 5,341,275
|
def interpolate_mean_tpr(FPRs=None, TPRs=None, df_list=None):
"""
mean_fpr, mean_tpr = interpolate_mean_tpr(FPRs=None, TPRs=None, df_list=None)
FPRs: False positive rates (list of n arrays)
TPRs: True positive rates (list of n arrays)
df_list: DataFrames with TPR, FPR columns (list of n DataFrames)
"""
# seed empty linspace
mean_tpr, mean_fpr = 0, np.linspace(0, 1, 101)
if TPRs and FPRs:
for idx, PRs in enumerate(zip(FPRs, TPRs)):
mean_tpr += np.interp(mean_fpr, PRs[0], PRs[1])
elif df_list:
for idx, df_ in enumerate(df_list):
mean_tpr += np.interp(mean_fpr, df_.FPR, df_.TPR)
else:
print("Please give valid inputs.")
return None, None
# normalize by length of inputs (# indices looped over)
mean_tpr /= (idx+1)
# add origin point
mean_fpr = np.insert(mean_fpr, 0, 0)
mean_tpr = np.insert(mean_tpr, 0, 0)
return mean_fpr, mean_tpr
| 5,341,276
|
def _generate_copy_from_codegen_rule(plugin, target_name, thrift_src, file):
"""Generates a rule that copies a generated file from the plugin codegen output
directory out into its own target. Returns the name of the generated rule.
"""
invoke_codegen_rule_name = _invoke_codegen_rule_name(
plugin,
target_name,
thrift_src,
)
plugin_path_prefix = "gen-cpp2-{}".format(plugin.name)
rule_name = _copy_from_codegen_rule_name(plugin, target_name, thrift_src, file)
cmd = " && ".join(
[
"mkdir `dirname $OUT`",
"cp $(location :{})/{} $OUT".format(invoke_codegen_rule_name, file),
],
)
fb_native.genrule(
name = rule_name,
out = "{}/{}".format(plugin_path_prefix, file),
cmd = cmd,
)
return rule_name
| 5,341,277
|
def mir_right(data):
"""
Append Mirror to right
"""
return np.append(data[...,::-1],data,axis=-1)
| 5,341,278
|
def simulate_master(
mpi_comm, n_simulation_problem_batches_per_process, origin_simulation_problem,
simulation_problem_variations, predecessor_node_lists, truth_tables, max_t,
n_simulation_problems, db_conn, output_directory):
"""
Top-level function of Simulate mode for master. For every combination of initial state, fixed nodes,
and perturbations simulates for requested number of steps.
:param mpi_comm: MPI communicator object
:param n_simulation_problem_batches_per_process: number of batches to split single process load into
:param origin_simulation_problem: simulation problem to start variations from
:param simulation_problem_variations: variations of simulation problems
:param predecessor_node_lists: list of predecessor node lists
:param truth_tables: list of dicts (key: tuple of predecessor node states,
value: resulting node state)
:param max_t: maximum simulation time
:param n_simulation_problems: number of simulations to perform
:param db_conn: database connection, for storing simulations
:param output_directory: output directory path
:return: None
"""
n_processes_text = '{} worker processes'.format(mpi_comm.n_workers) if \
mpi_comm.n_workers > 0 else 'Single process'
logging.getLogger().info('{} will be used to perform {} simulations...'.format(
n_processes_text, n_simulation_problems))
_simulate_until_max_t = partial(simulate_until_max_t, max_t)
generic_master(
mpi_comm, n_simulation_problems, n_simulation_problem_batches_per_process,
_simulate_until_max_t, origin_simulation_problem, simulation_problem_variations,
predecessor_node_lists, truth_tables, set(), None, store_simulation, [],
write_simulations_to_db, db_conn, True, max_t, output_directory, inf)
| 5,341,279
|
def _add_policies_to_role_authorization_scope(
scope_checker: RoleAuthorizationScopeChecker, system_id: str, policies: List[PolicyBean]
):
"""添加权限到分级管理员的范围里"""
# 需要被添加的策略列表
need_added_policies = []
# Note: 以下代码里有调用 scope_checker 的 protected 方法是因为暂时不侵入修改scope_checker来提供一个public方法
try:
scope_checker._check_system_in_scope(system_id)
# 如果不需要整个系统都添加,则遍历每条权限进行判断
for p in policies:
try:
scope_checker._check_policy_in_scope(system_id, p)
except APIException:
# 校验不通过的,则需要添加到分级管理员范围内
need_added_policies.append(p)
except APIException:
# 整个系统都不在分级管理员的授权范围内,则所有policies都需要添加到分级管理员范围内
need_added_policies = policies
if len(need_added_policies) == 0:
return
# 获取分级管理员可授权范围
role_id = scope_checker.role.id
auth_scopes: List[AuthScopeSystem] = RoleBiz().list_auth_scope(role_id)
# 找到对应要变更的系统位置
index = -1
for idx, auth_scope in enumerate(auth_scopes):
if auth_scope.system_id == system_id:
index = idx
break
# 新老策略进行合并
policies_dict = {p.id: PolicyBean.parse_obj(p) for p in auth_scopes[index].actions} if index != -1 else {}
for policy in need_added_policies:
# 不在,表示新增的
if policy.action_id not in policies_dict:
policies_dict[policy.action_id] = policy
continue
# 策略合并
policy.add_related_resource_types(policies_dict[policy.action_id].related_resource_types)
policies_dict[policy.action_id] = policy
auth_scope = AuthScopeSystem(
system_id=system_id, actions=parse_obj_as(List[AuthScopeAction], [p.dict() for p in policies_dict.values()])
)
# 若系统不在可授权范围内,则直接追加,否则替换掉
if index == -1:
auth_scopes.append(auth_scope)
else:
auth_scopes[index] = auth_scope
# 保存
RoleScope.objects.filter(role_id=role_id, type=RoleScopeType.AUTHORIZATION.value).update(
content=json_dumps([one.dict() for one in auth_scopes]),
)
| 5,341,280
|
def mask_nms(masks, bbox_scores, instances_confidence_threshold=0.5, overlap_threshold=0.7):
"""
NMS-like procedure used in Panoptic Segmentation
Remove the overlap areas of different instances in Instance Segmentation
"""
panoptic_seg = np.zeros(masks.shape[:2], dtype=np.uint8)
sorted_inds = list(range(len(bbox_scores)))
current_segment_id = 0
segments_score = []
for inst_id in sorted_inds:
score = bbox_scores[inst_id]
if score < instances_confidence_threshold:
break
mask = masks[:, :, inst_id]
mask_area = mask.sum()
if mask_area == 0:
continue
intersect = (mask > 0) & (panoptic_seg > 0)
intersect_area = intersect.sum()
if intersect_area * 1.0 / mask_area > overlap_threshold:
continue
if intersect_area > 0:
mask = mask & (panoptic_seg == 0)
current_segment_id += 1
# panoptic_seg[np.where(mask==1)] = current_segment_id
# panoptic_seg = panoptic_seg + current_segment_id*mask
panoptic_seg = np.where(mask == 0, panoptic_seg, current_segment_id)
segments_score.append(score)
# print(np.unique(panoptic_seg))
return panoptic_seg, segments_score
| 5,341,281
|
def table_to_lookup(table):
"""Converts the contents of a dynamodb table to a dictionary for reference.
Uses dump_table to download the contents of a specified table, then creates
a route lookup dictionary where each key is (route id, express code) and
contains elements for avg_speed, and historic_speeds.
Args:
table: A boto3 Table object from which all data will be read
into memory and returned.
Returns:
A dictionary with (route id, segment id) keys and average speed (num),
historic speeds (list), and local express code (str) data.
"""
# Put the data in a dictionary to reference when adding speeds to geojson
items = dump_table(table)
route_lookup = {}
for item in items:
if 'avg_speed_m_s' in item.keys():
route_id = int(item['route_id'])
local_express_code = item['local_express_code']
hist_speeds = [float(i) for i in item['historic_speeds']]
route_lookup[(route_id, local_express_code)] = {
'avg_speed_m_s': float(item['avg_speed_m_s']),
'historic_speeds': hist_speeds
}
return route_lookup
| 5,341,282
|
def yaml2bib(
bib_fname: str,
dois_yaml: str,
replacements_yaml: Optional[str],
static_bib: Optional[str],
doi2bib_database: str,
crossref_database: str,
email: str,
) -> None:
"""Convert a yaml file to bib file with the correct journal abbreviations.
Parameters
----------
bib_fname: str
Output file. (default: ``'dissertation.bib'``)
dois_yaml: str
The ``key: doi`` YAML file, may contain wildcards (``*``).
(default: ``'bib.yaml'``, example: ``'*/*.yaml'``)
replacements_yaml: str
Replacements to perform, might be ``None``.
(default: ``None``, example: ``'replacements.yaml'``)
static_bib: str
Static bib entries, might be ``None``, may contain wildcards (``*``).
(default: ``None``, example: ``'chapter_*/not_on_crossref.bib'``)
doi2bib_database: str
The doi2bib database folder 📁 to not query doi.org more than needed.
(default: ``'yaml2bib-doi2bib.db'``)
crossref_database: str
The Crossref database folder 📁 to not query crossref.org more than needed.
(default: ``'yaml2bib-doi2bib.db'``)
email: str
E-mail 📧 for crossref.org, such that one can make faster API.
(default: ``'anonymous'``, example: ``'bas@nijho.lt'``)
Returns
-------
None
Examples
--------
Example invocation for my `thesis <https://gitlab.kwant-project.org/qt/basnijholt/thesis-bas-nijholt>`_.
.. code-block:: bash
yaml2bib \\
--bib_fname "dissertation.bib" \\
--dois_yaml "*/*.yaml" \\
--replacements_yaml "replacements.yaml" \\
--static_bib "chapter_*/not_on_crossref.bib" \\
--email "bas@nijho.lt"
"""
etiquette = crossref.restful.Etiquette("publist", contact_email=email)
works = crossref.restful.Works(etiquette=etiquette)
dois = parse_doi_yaml(dois_yaml)
replacements = parse_replacements_yaml(replacements_yaml)
entries = get_bib_entries(
dois, replacements, doi2bib_database, crossref_database, works
)
bib_files = static_bib_entries(static_bib)
write_output(entries, bib_files, bib_fname)
| 5,341,283
|
def _initialize_weights(variable_scope, n_head, n_input, n_hidden):
""" initialize the weight of Encoder with multiple heads, and Decoder
"""
with tf.variable_scope(variable_scope, reuse=tf.AUTO_REUSE):
all_weights = dict()
## forward, Each head has the same dimension ad n_hidden
n_head_hidden = n_hidden/n_head
# n_head_hidden = n_hidden
for i in range(n_head):
index = i + 1
weight_key = 'w1_%d' % index
bias_key = 'b1_%d' % index
all_weights[weight_key] = tf.get_variable(weight_key, shape=[n_input, n_head_hidden],
initializer=tf.contrib.layers.xavier_initializer())
all_weights[bias_key] = tf.Variable(tf.zeros([n_head_hidden], dtype = tf.float32), name = bias_key)
## reconstruct
all_weights['w2'] = tf.Variable(tf.zeros([n_hidden, n_input], dtype = tf.float32), name = 'w2')
all_weights['b2'] = tf.Variable(tf.zeros([n_input], dtype = tf.float32), name = 'b2')
## DEBUG:
for key in all_weights.keys():
tensor = all_weights[key]
print ("DEBUG: Shape of Weight %s" % key)
print (tensor.shape)
return all_weights
| 5,341,284
|
def add_run_qc_command(cmdparser):
"""
Create a parser for the 'run_qc' command
"""
p = cmdparser.add_command('run_qc',help="Run QC procedures",
description="Run QC procedures for sequencing "
"projects in ANALYSIS_DIR.")
# Defaults
default_nthreads = __settings.qc.nprocessors
fastq_screen_subset = __settings.qc.fastq_screen_subset
max_concurrent_jobs = __settings.general.max_concurrent_jobs
max_cores = __settings.general.max_cores
max_batches = __settings.general.max_batches
enable_conda = ("yes" if __settings.conda.enable_conda else "no")
conda_env_dir = __settings.conda.env_dir
# Build parser
p.add_argument('--projects',action='store',
dest='project_pattern',default=None,
help="simple wildcard-based pattern specifying a "
"subset of projects and samples to run the QC on. "
"PROJECT_PATTERN should be of the form 'pname[/sname]', "
"where 'pname' specifies a project (or set of "
"projects) and 'sname' optionally specifies a sample "
"(or set of samples).")
p.add_argument('--fastq_screen_subset',action='store',dest='subset',
type=int,default=fastq_screen_subset,
help="specify size of subset of total reads to use for "
"fastq_screen (i.e. --subset option); (default %d, set to "
"0 to use all reads)" % fastq_screen_subset)
p.add_argument('-t','--threads',action='store',dest="nthreads",
type=int,default=default_nthreads,
help="number of threads to use for QC script "
"(default: %s)" % ('taken from job runner'
if not default_nthreads
else default_nthreads,))
p.add_argument('--ungzip-fastqs',action='store_true',dest='ungzip_fastqs',
help="create decompressed copies of fastq.gz files")
p.add_argument('--max-jobs',action='store',
dest='max_jobs',default=max_concurrent_jobs,type=int,
help="explicitly specify maximum number of concurrent QC "
"jobs to run (default %s, change in settings file)"
% max_concurrent_jobs)
p.add_argument('--qc_dir',action='store',dest='qc_dir',default='qc',
help="explicitly specify QC output directory (nb if "
"supplied then the same QC_DIR will be used for each "
"project. Non-absolute paths are assumed to be relative to "
"the project directory). Default: 'qc'")
p.add_argument('--fastq_dir',action='store',dest='fastq_dir',default=None,
help="explicitly specify subdirectory of DIR with "
"Fastq files to run the QC on.")
p.add_argument("--10x_chemistry",
choices=sorted(CELLRANGER_ASSAY_CONFIGS.keys()),
dest="cellranger_chemistry",default="auto",
help="assay configuration for 10xGenomics scRNA-seq; if "
"set to 'auto' (the default) then cellranger will attempt "
"to determine this automatically")
p.add_argument('--10x_transcriptome',action='append',
metavar='ORGANISM=REFERENCE',
dest='cellranger_transcriptomes',
help="specify cellranger transcriptome reference datasets "
"to associate with organisms (overrides references defined "
"in config file)")
p.add_argument('--10x_premrna_reference',action='append',
metavar='ORGANISM=REFERENCE',
dest='cellranger_premrna_references',
help="specify cellranger pre-mRNA reference datasets "
"to associate with organisms (overrides references defined "
"in config file)")
p.add_argument('--report',action='store',dest='html_file',default=None,
help="file name for output HTML QC report (default: "
"<QC_DIR>_report.html)")
add_runner_option(p)
add_modulefiles_option(p)
# Conda options
conda = p.add_argument_group("Conda dependency resolution")
conda.add_argument('--enable-conda',choices=["yes","no"],
dest="enable_conda",default=enable_conda,
help="use conda to resolve task dependencies; can "
"be 'yes' or 'no' (default: %s)" % enable_conda)
conda.add_argument('--conda-env-dir',action='store',
dest="conda_env_dir",default=conda_env_dir,
help="specify directory for conda enviroments "
"(default: %s)" % ('temporary directory'
if not conda_env_dir else
conda_env_dir))
# Job control options
job_control = p.add_argument_group("Job control options")
job_control.add_argument('-j','--maxjobs',type=int,action='store',
dest="max_jobs",metavar='NJOBS',
default=max_concurrent_jobs,
help="maxiumum number of jobs to run "
"concurrently (default: %s)"
% (max_concurrent_jobs
if max_concurrent_jobs else 'no limit'))
job_control.add_argument('-c','--maxcores',type=int,action='store',
dest='max_cores',metavar='NCORES',
default=max_cores,
help="maximum number of cores available for "
"running jobs (default: %s)"
% (max_cores if max_cores else 'no limit'))
job_control.add_argument('-b','--maxbatches',type=int,action='store',
dest='max_batches',metavar='NBATCHES',
default=__settings.general.max_batches,
help="enable dynamic batching of pipeline "
"jobs with maximum number of batches set to "
"NBATCHES (default: %s)"
% (max_batches if max_batches
else 'no batching'))
# Advanced options
advanced = p.add_argument_group('Advanced/debugging options')
advanced.add_argument('--verbose',action="store_true",
dest="verbose",default=False,
help="run pipeline in 'verbose' mode")
advanced.add_argument('--work-dir',action="store",
dest="working_dir",default=None,
help="specify the working directory for the "
"pipeline operations")
add_debug_option(advanced)
# Deprecated options
deprecated = p.add_argument_group('Deprecated/defunct options')
deprecated.add_argument('--no-ungzip-fastqs',action='store_true',
dest='no_ungzip_fastqs',
help="don't create uncompressed copies of "
"fastq.gz files (does nothing; this is now the "
"default, use --ungzip-fastqs to turn on "
"decompression)")
p.add_argument('analysis_dir',metavar="ANALYSIS_DIR",nargs="?",
help="auto_process analysis directory (optional: defaults "
"to the current directory)")
| 5,341,285
|
def sh(cmd, stdin="", sleep=False):
""" run a command, send stdin and capture stdout and exit status"""
if sleep:
time.sleep(0.5)
# process = Popen(cmd.split(), stdin=PIPE, stdout=PIPE)
process = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE)
process.stdin.write(bytes(stdin, "utf-8"))
stdout = process.communicate()[0].decode('utf-8').strip()
process.stdin.close()
returncode = process.returncode
return returncode, stdout
| 5,341,286
|
def array_to_slide(arr: np.ndarray) -> openslide.OpenSlide:
"""converts a numpy array to a openslide.OpenSlide object
Args:
arr (np.ndarray): input image array
Returns:
openslide.OpenSlide: a slide object from openslide
"""
assert isinstance(arr, np.ndarray)
slide = openslide.ImageSlide(Image.fromarray(arr))
return slide
| 5,341,287
|
def baseline_replacement_by_blur(arr: np.array, patch_slice: Sequence,
blur_kernel_size: int = 15, **kwargs) -> np.array:
"""
Replace a single patch in an array by a blurred version.
Blur is performed via a 2D convolution.
blur_kernel_size controls the kernel-size of that convolution (Default is 15).
Assumes unbatched channel first format.
"""
nr_channels = arr.shape[0]
# Create blurred array.
blur_kernel_size = (1, *([blur_kernel_size] * (arr.ndim - 1)))
kernel = np.ones(blur_kernel_size, dtype=arr.dtype)
kernel *= 1.0 / np.prod(blur_kernel_size)
kernel = np.tile(kernel, (nr_channels, 1, *([1] * (arr.ndim - 1))))
if arr.ndim == 3:
arr_avg = conv2D_numpy(
x=arr,
kernel=kernel,
stride=1,
padding=0,
groups=nr_channels,
pad_output=True,
)
elif arr.ndim == 2:
raise NotImplementedError("1d support not implemented yet")
else:
raise ValueError("Blur supports only 2d inputs")
# Perturb array.
arr_perturbed = copy.copy(arr)
arr_perturbed[patch_slice] = arr_avg[patch_slice]
return arr_perturbed
| 5,341,288
|
def admit_dir(file):
""" create the admit directory name from a filename
This filename can be a FITS file (usually with a .fits extension
or a directory, which would be assumed to be a CASA image or
a MIRIAD image. It can be an absolute or relative address
"""
loc = file.rfind('.')
ext = '.admit'
if loc < 0:
return file + ext
else:
if file[loc:] == ext:
print "Warning: assuming a re-run on existing ",file
return file
return file[:loc] + ext
| 5,341,289
|
def grad(values: list[int], /) -> list[int]:
"""Compute the gradient of a sequence of values."""
return [v2 - v1 for v1, v2 in zip(values, values[1:])]
| 5,341,290
|
def subtract(
num1: Union[int, float], num2: Union[int, float], *args
) -> Union[int, float]:
"""Subtracts given numbers"""
sub: Union[int, float] = num1 - num2
for num in args:
sub -= num
return sub
| 5,341,291
|
def get_submodules(module):
"""
Attempts to find all submodules of a given module object
"""
_skip = [
"numpy.f2py",
"numpy.f2py.__main__",
"numpy.testing.print_coercion_tables",
]
try:
path = module.__path__
except Exception:
path = [getfile(module)]
modules = {_name(module): module}
for importer, modname, ispkg in pkgutil.walk_packages(
path=path,
prefix=_name(module) + ".",
onerror=lambda x: None,
):
# Some known packages cause issues
if modname in _skip:
continue
try:
modules[modname] = importlib.import_module(modname)
except Exception:
pass
return modules
| 5,341,292
|
def ucr_context_cache(vary_on=()):
"""
Decorator which caches calculations performed during a UCR EvaluationContext
The decorated function or method must have a parameter called 'context'
which will be used by this decorator to store the cache.
"""
def decorator(fn):
assert 'context' in fn.__code__.co_varnames
assert isinstance(vary_on, tuple)
@wraps(fn)
def _inner(*args, **kwargs):
# shamelessly stolen from quickcache
callargs = inspect.getcallargs(fn, *args, **kwargs)
context = callargs['context']
prefix = '{}.{}'.format(
fn.__name__[:40] + (fn.__name__[40:] and '..'),
hashlib.md5(inspect.getsource(fn).encode('utf-8')).hexdigest()[-8:]
)
cache_key = (prefix,) + tuple(callargs[arg_name] for arg_name in vary_on)
if context.exists_in_cache(cache_key):
return context.get_cache_value(cache_key)
res = fn(*args, **kwargs)
context.set_cache_value(cache_key, res)
return res
return _inner
return decorator
| 5,341,293
|
def extract_value(item, key):
"""Get the value for the given key or return an empty string if no value is found."""
value = item.find(key).text
if value is None:
value = ""
else:
value = sanitize_value(value)
return value
| 5,341,294
|
def create_train_test_split(data: pd.DataFrame,
split_size: float = .8,
seed: int = 42) -> list:
"""
takes the final data set and splits it into random train and test subsets.
Returns a list containing train-test split of inputs
Args:
data: dataset to be split into train/test
split_size: the size of the train dataset (default .8)
seed: pass an int for reproducibility purposes
Returns:
A list containing train-test split of inputs
"""
# assert split size between 0 and 1
assert 0 <= split_size <= 1, "split_size out of bounds"
assert isinstance(data, pd.DataFrame), "no DataFrame provided"
assert isinstance(seed, int), "provided seed is no integer"
# split into features and target
# features = data.drop('target', axis=1)
# target = data['target']
# stratify by the target to ensure equal distribution
return train_test_split(data, train_size=split_size, random_state=seed, shuffle=True)
| 5,341,295
|
def add_devices(session, devices):
""" Adds the devices in the list to the data store
Args:
session: Active database session
devices: list of Device classes
"""
_add(session, devices)
session.commit()
| 5,341,296
|
def calcRotationVector(vertexSelection, norm):
"""None"""
pass
| 5,341,297
|
def get_vec(text, model, stopwords):
"""
Transform text pandas series in array with the vector representation of the
sentence using fasttext model
"""
array_fasttext = np.array([sent2vec(x, model, stopwords) for x in text])
return array_fasttext
| 5,341,298
|
def convert_to_npz(kwargs):
"""
:param kwargs: npy-file:path or name:namestr and destination:path
"""
assert "identifier" in kwargs.keys(), "you need to define at least a npyfile-identifier"
identifier = kwargs["identifier"]
if "folder" in kwargs.keys():
folder = kwargs["folder"]
else:
folder = ""
dest = kwargs["destination"]
if "name" in kwargs.keys():
name = kwargs["name"]
else:
name = identifier
npy_file = os.path.join(folder, identifier+".npy")
data = np.load(npy_file)
np.savez_compressed(os.path.join(dest, identifier + ".npz"), **{name:data})
if "verbose" in kwargs.keys() and kwargs["verbose"]:
print("converted file {} to npz".format(npy_file))
| 5,341,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.