content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
async def test_age_gate_error():
"""
Should show the error message on incorrect input
"""
u = User(addr="27820001001", state=StateData(name="state_age_gate"), session_id=1)
app = Application(u)
msg = Message(
content="invalid",
to_addr="27820001002",
from_addr="27820001001",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert len(reply.content) < 140
assert u.state.name == "state_age_gate"
| 16,800
|
def load_config(filepath: str) -> DictStrAny:
"""Read config file.
The config file can be in yaml, json or toml.
toml is recommended for readability.
"""
ext = os.path.splitext(filepath)[1]
if ext == ".yaml":
with open_read_text(filepath) as fp:
config_dict = yaml.load(fp.read(), Loader=yaml.Loader)
elif ext == ".toml":
config_dict = toml.load(filepath)
elif ext == ".json":
with open_read_text(filepath) as fp:
config_dict = json.load(fp)
else:
raise NotImplementedError(f"Config extention {ext} not supported")
assert isinstance(config_dict, dict)
return config_dict
| 16,801
|
def _expected_datatypes(product_type):
"""
Aux function. Contains the most current lists of keys we expect there to be in the different forms of metadata.
"""
if product_type == "SLC":
# Only the datetimes need to be parsed.
expected_dtypes = {
"acquisition_start_utc": "parse_datetime_single",
"acquisition_end_utc": "parse_datetime_single",
"dc_estimate_time_utc": "parse_datetime_single",
"first_pixel_time_utc": "parse_datetime_single",
"state_vector_time_utc": "parse_datetime_vect",
"zerodoppler_start_utc": "parse_datetime_single",
"zerodoppler_end_utc": "parse_datetime_single",
}
elif product_type == "GRD":
# All the fields need to be parsed, so all the datatypes are input.
expected_dtypes = {
"acquisition_end_utc": "parse_datetime_single", # single datetime
"acquisition_mode": str,
"acquisition_prf": float,
"acquisition_start_utc": str,
"ant_elev_corr_flag": bool,
"area_or_point": str,
"avg_scene_height": float,
"azimuth_spacing": float,
"azimuth_look_bandwidth": float,
"azimuth_look_overlap": float,
"azimuth_looks": int,
"azimuth_time_interval": float,
"calibration_factor": float,
"carrier_frequency": float,
"chirp_bandwidth": float,
"chirp_duration": float,
"coord_center": "parse_float_vect", # 1d vect of floats, needs to be parsed
"coord_first_far": "parse_float_vect",
"coord_first_near": "parse_float_vect",
"coord_last_far": "parse_float_vect",
"coord_last_near": "parse_float_vect",
"dc_estimate_coeffs": "parse_float_vect",
"dc_estimate_poly_order": int,
"dc_estimate_time_utc": "parse_datetime_vect", # datetime vector
"dc_reference_pixel_time": float,
"doppler_rate_coeffs": "parse_float_vect",
"doppler_rate_poly_order": int,
"doppler_rate_reference_pixel_time": float,
"gcp_terrain_model": str,
"geo_ref_system": str,
"grsr_coefficients": "parse_float_vect",
"grsr_ground_range_origin": float,
"grsr_poly_order": int,
"grsr_zero_doppler_time": "parse_datetime_single", # single datetime
"heading": float,
"incidence_angle_coefficients": "parse_float_vect",
"incidence_angle_ground_range_origin": float,
"incidence_angle_poly_order": int,
"incidence_angle_zero_doppler_time": "parse_datetime_single", # single datetime
"incidence_center": float,
"incidence_far": float,
"incidence_near": float,
"look_side": str,
"mean_earth_radius": float,
"mean_orbit_altitude": float,
"number_of_azimuth_samples": int,
"number_of_dc_estimations": int,
"number_of_range_samples": int,
"number_of_state_vectors": int,
"orbit_absolute_number": int,
"orbit_direction": str,
"orbit_processing_level": str,
"orbit_relative_number": int,
"orbit_repeat_cycle": int,
"polarization": str,
"posX": "parse_float_vect",
"posY": "parse_float_vect",
"posZ": "parse_float_vect",
"processing_prf": float,
"processing_time": "parse_datetime_single", # single datetime
"processor_version": str,
"product_file": str,
"product_level": str,
"product_name": str,
"product_type": str,
"range_looks": int,
"range_sampling_rate": float,
"range_spacing": float,
"range_spread_comp_flag": bool,
"sample_precision": str,
"satellite_look_angle": str,
"satellite_name": str,
"slant_range_to_first_pixel": float,
"state_vector_time_utc": "parse_datetime_vect", # 1d vect of datetimes, need to be parsed.
"total_processed_bandwidth_azimuth": float,
"velX": "parse_float_vect",
"velY": "parse_float_vect",
"velZ": "parse_float_vect",
"window_function_azimuth": str,
"window_function_range": str,
"zerodoppler_end_utc": "parse_datetime_single", # single datetime
"zerodoppler_start_utc": "parse_datetime_single", # single datetime
}
elif product_type == "xml":
raise NotImplementedError
elif not isinstance(product_type, str):
raise TypeError(
'Did not understand input "product_type", a str was expected but a %s datatype variable was input.'
% type(product_type)
)
else:
raise ValueError(
'Did not understand input "product_type", either "SLC", "GRD" or "xml" was expected but %s was input.'
% product_type
)
return expected_dtypes
| 16,802
|
def browser():
"""The browser driver
We launch the SHEBANQ site in preparation of doing many tests.
We do not only deliver a driver, but also a wait object.
"""
with Safari() as driver:
yield driver
print("Closing test browser")
| 16,803
|
def convert_vecs_to_var(
c_sys: CompositeSystem, vecs: List[np.ndarray], on_para_eq_constraint: bool = True
) -> np.ndarray:
"""converts hs of povm to vec of variables.
Parameters
----------
c_sys : CompositeSystem
CompositeSystem of this state.
vecs : List[np.ndarray]
list of vec of povm elements.
on_para_eq_constraint : bool, optional
uses equal constraints, by default True.
Returns
-------
np.ndarray
list of vec of variables.
"""
var = copy.copy(vecs)
if on_para_eq_constraint:
del var[-1]
var = np.hstack(var)
return var
| 16,804
|
def run_tests():
"""
This methods dinamically create tests based on the contents of the 'qgis'
subfolder.
The structure of files in the subfolder must be as follows:
- For each dataset that you want to use, create a subfolder under 'qgis'
- Add the layer file in that subfolder. It must be named 'testlayer.gpkg'
or 'testlayer.tiff' depending of whether it is a vector or a raster layer
- In the same subfolder, along with the layer file, you can add as many
qml files as you want to test. The names of these files will be used
to set the name of the corresponding test
- For each qml file, a .geostyler file with the same name must exist in
the subfolder. It must contain the expected geostyler representation
of the style in the qml file.
- The test will load the testlayer file, assign the qml to it, generate
a geostyler representation from it, and then compare to the expected
geostyler result.
"""
suite = unittest.TestSuite()
main_folder = os.path.join(os.path.dirname(__file__), "data", "qgis")
for subfolder in os.listdir(main_folder):
datafile = os.path.join(main_folder, subfolder, "testlayer.gpkg")
if not os.path.exists(datafile):
datafile = os.path.join(main_folder, subfolder, "testlayer.tiff")
subfolder_path = os.path.join(main_folder, subfolder)
for style in os.listdir(subfolder_path):
if style.lower().endswith("qml"):
stylefile = os.path.join(subfolder_path, style)
name = os.path.splitext(stylefile)[0]
expectedfile = name + ".geostyler"
with open(expectedfile) as f:
expected = json.load(f)
setattr(
QgisToStylerTest,
"test_" + name,
test_function(datafile, stylefile, expected),
)
suite = unittest.defaultTestLoader.loadTestsFromTestCase(QgisToStylerTest)
unittest.TextTestRunner().run(suite)
| 16,805
|
def test_validate_without_code(superuser, collection):
"""Attempt editing entry without a code."""
form = EditForm(superuser, collection.code, friendly_name='The old books', category='library')
assert form.validate() is False
assert _('This field is required.') in form.code.errors
| 16,806
|
def convert_CSV_into_df(file_loc):
"""
Generate Panda dataframe from CSV data (rotation and position).
"""
df = pd.DataFrame()
for directory in glob.glob(file_loc): # Selecting all the folders in dataset directory.
d = [] # Empty list.
f = directory.split(os.sep)
for file in glob.glob(directory+"*.csv"): # Reading all the CSV files in dataset directory one by one.
d.append(file)
d = sorted(d) # Ensures rotation and position are together
while len(d)!=0:
rot = d.pop(0) # Rmove the header row from rotation and postion CSV.
pos = d.pop(0)
df1 = pd.read_csv(rot, nrows=200) # Read the first 200 rows from rotation and position CSV. value can be 200 or 150.
df2 = pd.read_csv(pos, nrows=200)
df_merge = merge_rot_pos(df1,df2,f[1]) # Call the mearge function to mearge fetch data of rotation and position CSV with class lable.
df = df.append(df_merge,ignore_index=True) # Append the merge data to panda dataframe one by one.
return df
| 16,807
|
def ziw2md(md_file, export_md_path, tmp_path, abs_img_path=False):
"""将.md.ziw文件转为标准md文件,导出图片和附件文件到本地目录"""
ziw_zip = zipfile.ZipFile(md_file)
ziw_zip.extractall(tmp_path)
ziw_zip.close()
print(f"正在转换《{md_file.stem}》……")
export_md_file = export_md_path.joinpath(md_file.parent.stem, md_file.stem.replace('.md', '')+'.md')
export_attachment_path = export_md_file.parent / export_md_file.stem # 图片、附件保存目录
with open(tmp_path / 'index.html', encoding='utf-16') as f1:
content = f1.read()
content = content.replace('</div>', '\n')
content = content.replace('<br>', '\n')
content = content.replace('<br/>', '\n')
'''
pattern1 = re.compile(r'<!doctype.*?</head>', re.DOTALL | re.IGNORECASE | re.MULTILINE)
content = pattern1.sub('', content)
pattern2 = re.compile(r'.*WizHtmlContentBegin-->', re.DOTALL | re.IGNORECASE | re.MULTILINE)
content = pattern2.sub('', content)
content = re.sub(r'<body.*?>', '', content)
content = content.replace('<', '<')
content = content.replace('>', '>')
content = content.replace(' ', ' ')
content = content.replace('<div>', '')
content = content.replace('</div>', '\n')
content = content.replace('<br/>', '\n')
content = content.replace('<br>', '\n')
content = content.replace('</body></html>', '')
# content = html2text.html2text(content)
content = content.replace(r'\---', '---').strip()
content = re.sub(r'<ed_tag name="markdownimage" .*?</ed_tag>', '', content).strip() # 替换包含图片链接文件的文末内容
'''
tree = etree.HTML(content)
content = tree.xpath('//body')[0].xpath('string(.)')
# 将图片文件链接改为相应目录
if abs_img_path:
content = content.replace('index_files', export_attachment_path)
else:
content = content.replace('index_files', export_attachment_path.stem)
# 分目录输出markdown文件
if not (export_md_path / md_file.parent.stem).exists():
(export_md_path / md_file.parent.stem).mkdir()
with open(export_md_file, 'w', encoding='utf-8') as f2:
f2.write(content)
print(f'已导出:{export_md_file}。')
# 将index_files目录下图片文件复制到以markdown文件标题命名的目录
if (tmp_path / 'index_files').exists():
# shutil.copytree((tmp_path / 'index_files'), export_attachment_path, dirs_exist_ok=True)
(tmp_path / 'index_files').rename(export_attachment_path)
# 将附件目录下文件复制到以markdown文件标题命名的目录
attachment_path = md_file.parent.joinpath(md_file.stem, '.md_Attachments')
if attachment_path.exists():
if not export_attachment_path.exists():
export_attachment_path.mkdir()
for attachment in attachment_path.glob('*.*'):
shutil.copy2(attachment, export_attachment_path)
# shutil.rmtree(tmp_path)
| 16,808
|
def timetable_to_subrip(aligned_timetable):
"""
Converts the aligned timetable into the SubRip format.
Args:
aligned_timetable (list[dict]):
An aligned timetable that is output by the `Aligner` class.
Returns:
str:
Text representing a SubRip file.
"""
# Define a variable to contain the file's contents
file_contents = ""
# Process each block
for i, block in enumerate(aligned_timetable):
# Define a temporary variable to store this caption block
block_text = f"{i + 1}\n" # Every SubRip caption block starts with a number
# Get the start and end time of the block
start_time = timedelta_to_subrip_time(timedelta(seconds=block["start_time"]))
end_time = timedelta_to_subrip_time(timedelta(seconds=block["end_time"]))
# Add the timing line to the block of text
block_text += f"{start_time} --> {end_time}\n"
# Add the line of text from the `block` to the block of text
block_text += block["text"] + "\n\n"
# Add the `block_text` to the `file_contents`
file_contents += block_text
# Return the final file's contents
return file_contents
| 16,809
|
def add(a: T.Tensor, b: T.Tensor) -> T.Tensor:
"""
Add tensor a to tensor b using broadcasting.
Args:
a: A tensor
b: A tensor
Returns:
tensor: a + b
"""
return a + b
| 16,810
|
def integrate(que):
"""
check if block nears another block and integrate them
@param que: init blocks
@type que: deque
@return: integrated block
@rtype: list
"""
blocks = []
t1, y, x = que.popleft()
blocks.append([y, x])
if t1 == 2:
blocks.append([y, x + 1])
elif t1 == 3:
blocks.append([y + 1, x])
return blocks
| 16,811
|
def handler(sig, frame) -> None:
"""Handles Ctrl+c by letting the Collector() know to shut down"""
current_pid = os.getpid()
if current_pid == parent_pid:
reason = f"Received shutdown signal {sig}"
log.debug(f"Parent caught signal {sig} - dispatching shutdown event")
# Dispatch shutdown event in parent process which also causes SIGTERM to be sent
# to the process group and in turn causes the shutdown event in all child
# processes.
dispatch_event(
Event(EventType.SHUTDOWN, {"reason": reason, "emergency": False})
)
else:
reason = f"Received shutdown signal {sig} from parent process"
log.debug(
f"Child with PID {current_pid} shutting down"
" - you might see exceptions from interrupted worker threads"
)
# Child's threads have 3s to shut down before the following thread will
# shut them down hard.
kt = threading.Thread(target=delayed_exit, name="shutdown")
kt.start()
# Dispatch shutdown event in child process
dispatch_event(
Event(EventType.SHUTDOWN, {"reason": reason, "emergency": False}),
blocking=False,
)
sys.exit(0)
| 16,812
|
def test_check_size_human_correct(docker_client, image_name):
"""Checks size with human readable limit."""
overflow = check_image_size(docker_client, image_name, '10 GiB')
assert overflow < 0
| 16,813
|
def test_binary_query(cbcsdk_mock):
"""Testing Binary Querying"""
called = False
def post_validate(url, body, **kwargs):
nonlocal called
if not called:
called = True
assert body['expiration_seconds'] == 3600
else:
assert body['expiration_seconds'] == 10
return BINARY_GET_FILE_RESP
sha256 = "00a16c806ff694b64e566886bba5122655eff89b45226cddc8651df7860e4524"
cbcsdk_mock.mock_request("GET", f"/ubs/v1/orgs/test/sha256/{sha256}", BINARY_GET_METADATA_RESP)
api = cbcsdk_mock.api
binary = api.select(Binary, sha256)
assert isinstance(binary, Binary)
cbcsdk_mock.mock_request("GET", f"/ubs/v1/orgs/test/sha256/{sha256}/summary/device", BINARY_GET_DEVICE_SUMMARY_RESP)
summary = binary.summary
cbcsdk_mock.mock_request("POST", "/ubs/v1/orgs/test/file/_download", post_validate)
url = binary.download_url()
assert summary is not None
assert url is not None
url = binary.download_url(expiration_seconds=10)
assert url is not None
| 16,814
|
def parse_struct_encoding(struct_encoding: bytes) -> typing.Tuple[typing.Optional[bytes], typing.Sequence[bytes]]:
"""Parse an array type encoding into its name and field type encodings."""
if not struct_encoding.startswith(b"{"):
raise ValueError(f"Missing opening brace in struct type encoding: {struct_encoding!r}")
if not struct_encoding.endswith(b"}"):
raise ValueError(f"Missing closing brace in struct type encoding: {struct_encoding!r}")
try:
# Stop searching for the equals if an opening brace
# (i. e. the start of another structure type encoding)
# is reached.
# This is necessary to correctly handle struct types with no name that contain a struct type with a name,
# such as b"{{foo=ii}}" (an unnamed struct containing a struct named "foo" containing two integers).
try:
end = struct_encoding.index(b"{", 1)
except ValueError:
end = -1
equals_pos = struct_encoding.index(b"=", 1, end)
except ValueError:
name = None
field_type_encoding_string = struct_encoding[1:-1]
else:
name = struct_encoding[1:equals_pos]
field_type_encoding_string = struct_encoding[equals_pos+1:-1]
field_type_encodings = list(split_encodings(field_type_encoding_string))
return name, field_type_encodings
| 16,815
|
def split_train_crops(project_name, center, crops_dir='crops', subset=0.15, by_fraction=True, train_name='train',
seed=1000):
"""
:param project_name: string
Name of dataset. for example, set this to 'dsb-2018', or 'bbbc010-2012'
:param center: string
Set this to 'medoid' or 'centroid'
:param crops_dir: string
Name of the crops directory. Default value = 'crops'
:param subset: int/float
if by_fraction is True, then subset should be set equal to the percentage of image crops reserved for validation.
if by_fraction is False, then subset should be set equal to the number of image crops reserved for validation.
:param by_fraction: boolean
if True, then reserve a fraction <1 of image crops for validation
:param train_name: string
name of directory containing train image and instance crops
:param seed: int
:return:
"""
image_dir = os.path.join(crops_dir, project_name, train_name, 'images')
instance_dir = os.path.join(crops_dir, project_name, train_name, 'masks')
center_dir = os.path.join(crops_dir, project_name, train_name, 'center-' + center)
image_names = sorted(glob(os.path.join(image_dir, '*.tif')))
instance_names = sorted(glob(os.path.join(instance_dir, '*.tif')))
center_names = sorted(glob(os.path.join(center_dir, '*.tif')))
indices = np.arange(len(image_names))
np.random.seed(seed)
np.random.shuffle(indices)
if (by_fraction):
subsetLen = int(subset * len(image_names))
else:
subsetLen = int(subset)
valIndices = indices[:subsetLen]
image_path_val = os.path.join(crops_dir, project_name, 'val', 'images/')
instance_path_val = os.path.join(crops_dir, project_name, 'val', 'masks/')
center_path_val = os.path.join(crops_dir, project_name, 'val', 'center-' + center + '/')
val_images_exist = False
val_masks_exist = False
val_center_images_exist = False
if not os.path.exists(image_path_val):
os.makedirs(os.path.dirname(image_path_val))
print("Created new directory : {}".format(image_path_val))
else:
val_images_exist = True
if not os.path.exists(instance_path_val):
os.makedirs(os.path.dirname(instance_path_val))
print("Created new directory : {}".format(instance_path_val))
else:
val_masks_exist = True
if not os.path.exists(center_path_val):
os.makedirs(os.path.dirname(center_path_val))
print("Created new directory : {}".format(center_path_val))
else:
val_center_images_exist = True
if not val_images_exist and not val_masks_exist and not val_center_images_exist:
for val_index in valIndices:
shutil.move(image_names[val_index], os.path.join(crops_dir, project_name, 'val', 'images'))
shutil.move(instance_names[val_index], os.path.join(crops_dir, project_name, 'val', 'masks'))
shutil.move(center_names[val_index], os.path.join(crops_dir, project_name, 'val', 'center-' + center))
print("Val Images/Masks/Center-{}-image crops saved at {}".format(center,
os.path.join(crops_dir, project_name, 'val')))
else:
print("Val Images/Masks/Center-{}-image crops already available at {}".format(center, os.path.join(crops_dir,
project_name,
'val')))
| 16,816
|
def _comp_point_coordinate(self):
"""Compute the point coordinates needed to plot the Slot.
Parameters
----------
self : Slot19
A Slot19 object
Returns
-------
point_list: list
A list of Points
"""
Rbo = self.get_Rbo()
# alpha is the angle to rotate Z0 so ||Z1,Z8|| = W0
alpha0 = self.comp_angle_opening() / 2
alpha1 = self.comp_angle_bottom() / 2
# comp point coordinate (in complex)
Z_ = Rbo * exp(1j * 0)
Z0 = Z_ * exp(1j * alpha0)
if self.is_outwards():
Z1 = (Rbo + self.H0) * exp(1j * alpha1)
else: # inward slot
Z1 = (Rbo - self.H0) * exp(1j * alpha1)
# symetry
Z2 = Z1.conjugate()
Z3 = Z0.conjugate()
return [Z3, Z2, Z1, Z0]
| 16,817
|
def getargspec(func):
"""Like inspect.getargspec but supports functools.partial as well."""
if inspect.ismethod(func):
func = func.__func__
if type(func) is partial:
orig_func = func.func
argspec = getargspec(orig_func)
args = list(argspec[0])
defaults = list(argspec[3] or ())
kwoargs = list(argspec[4])
kwodefs = dict(argspec[5] or {})
if func.args:
args = args[len(func.args):]
for arg in func.keywords or ():
try:
i = args.index(arg) - len(args)
del args[i]
try:
del defaults[i]
except IndexError:
pass
except ValueError: # must be a kwonly arg
i = kwoargs.index(arg)
del kwoargs[i]
del kwodefs[arg]
return inspect.FullArgSpec(args, argspec[1], argspec[2],
tuple(defaults), kwoargs,
kwodefs, argspec[6])
while hasattr(func, '__wrapped__'):
func = func.__wrapped__
if not inspect.isfunction(func):
raise TypeError('%r is not a Python function' % func)
return inspect.getfullargspec(func)
| 16,818
|
def empirical_kernel_fn(f: ApplyFn,
trace_axes: Axes = (-1,),
diagonal_axes: Axes = ()
) -> EmpiricalKernelFn:
"""Returns a function that computes single draws from NNGP and NT kernels.
Args:
f:
the function whose NTK we are computing. `f` should have the signature
`f(params, inputs[, rng])` and should return an `np.ndarray` outputs.
trace_axes:
output axes to trace the output kernel over, i.e. compute only the trace
of the covariance along the respective pair of axes (one pair for each
axis in `trace_axes`). This allows to save space and compute if you are
only interested in the respective trace, but also improve approximation
accuracy if you know that covariance along these pairs of axes converges
to a `constant * identity matrix` in the limit of interest (e.g.
infinite width or infinite `n_samples`). A common use case is the channel
/ feature / logit axis, since activation slices along such axis are i.i.d.
and the respective covariance along the respective pair of axes indeed
converges to a constant-diagonal matrix in the infinite width or infinite
`n_samples` limit.
Also related to "contracting dimensions" in XLA terms.
(https://www.tensorflow.org/xla/operation_semantics#dotgeneral)
diagonal_axes:
output axes to diagonalize the output kernel over, i.e. compute only the
diagonal of the covariance along the respective pair of axes (one pair for
each axis in `diagonal_axes`). This allows to save space and compute, if
off-diagonal values along these axes are not needed, but also improve
approximation accuracy if their limiting value is known theoretically,
e.g. if they vanish in the limit of interest (e.g. infinite
width or infinite `n_samples`). If you further know that on-diagonal
values converge to the same constant in your limit of interest, you should
specify these axes in `trace_axes` instead, to save even more compute and
gain even more accuracy. A common use case is computing the variance
(instead of covariance) along certain axes.
Also related to "batch dimensions" in XLA terms.
(https://www.tensorflow.org/xla/operation_semantics#dotgeneral)
Returns:
A function to draw a single sample the NNGP and NTK empirical kernels of a
given network `f`.
"""
kernel_fns = {
'nngp': empirical_nngp_fn(f, trace_axes, diagonal_axes),
'ntk': empirical_ntk_fn(f, trace_axes, diagonal_axes)
}
@utils.get_namedtuple('EmpiricalKernel')
def kernel_fn(x1: np.ndarray,
x2: Optional[np.ndarray],
get: Union[None, str, Tuple[str, ...]],
params: PyTree,
**apply_fn_kwargs) -> Dict[str, np.ndarray]:
"""Computes a single sample of the empirical kernel of type `get`.
Args:
x1:
first batch of inputs.
x2:
second batch of inputs. `x2=None` means `x2=x1`. `f(x2)` must have a
matching shape with `f(x1)` on `trace_axes` and `diagonal_axes`.
get:
type of the empirical kernel. `get=None` means `get=("nngp", "ntk")`.
Can be a string (`"nngp"`) or a tuple of strings (`("ntk", "nngp")`).
params:
A `PyTree` of parameters about which we would like to compute the
neural tangent kernel.
**apply_fn_kwargs:
keyword arguments passed to `apply_fn`. `apply_fn_kwargs` will be split
into `apply_fn_kwargs1` and `apply_fn_kwargs2` by the `_split_kwargs`
function which will be passed to `apply_fn`. In particular, the rng key
in `apply_fn_kwargs`, will be split into two different (if `x1!=x2`) or
same (if `x1==x2`) rng keys. See the `_read_key` function for more
details.
Returns:
A single sample of the empirical kernel. The shape is "almost"
`zip(f(x1).shape, f(x2).shape)` except for:
1) `trace_axes` are absent as they are contracted over.
2) `diagonal_axes` are present only once.
All other axes are present twice.
If `get` is a string, returns the requested `np.ndarray`. If `get` is a
tuple, returns an `EmpiricalKernel` namedtuple containing the
requested information.
"""
if get is None:
get = ('nngp', 'ntk')
return {g: kernel_fns[g](x1, x2, params, **apply_fn_kwargs)
for g in get} # pytype: disable=wrong-arg-count
return kernel_fn
| 16,819
|
def read_image_batch(image_paths, image_size=None, as_list=False):
"""
Reads image array of np.uint8 and shape (num_images, *image_shape)
* image_paths: list of image paths
* image_size: if not None, image is resized
* as_list: if True, return list of images,
else return np.ndarray (default)
:return: np.ndarray or list
"""
images = None
for i, image_path in enumerate(image_paths):
im = load_img(image_path)
if image_size is not None:
im = im.resize(image_size, Image.LANCZOS)
x = img_to_array(im).astype(np.uint8)
if images is None:
if not as_list:
images = np.zeros((len(image_paths),) + x.shape,
dtype=np.uint8)
else: images = []
if not as_list: images[i, ...] = x
else: images.append(x)
return images
| 16,820
|
def dc_vm_backup(request, dc, hostname):
"""
Switch current datacenter and redirect to VM backup page.
"""
dc_switch(request, dc)
return redirect('vm_backup', hostname=hostname)
| 16,821
|
def compute_total_distance(path):
"""compute total sum of distance travelled from path list"""
path_array = np.diff(np.array(path), axis=0)
segment_distance = np.sqrt((path_array ** 2).sum(axis=1))
return np.sum(segment_distance)
| 16,822
|
def get_relative_positions_matrix(length_x, length_y, max_relative_position):
"""Generates matrix of relative positions between inputs."""
range_vec_x = tf.range(length_x)
range_vec_y = tf.range(length_y)
# shape: [length_x, length_y]
distance_mat = tf.expand_dims(range_vec_x, -1) - tf.expand_dims(range_vec_y, 0)
distance_mat_clipped = tf.clip_by_value(distance_mat, -max_relative_position,
max_relative_position)
# Shift values to be >= 0. Each integer still uniquely identifies a relative
# position difference.
final_mat = distance_mat_clipped + max_relative_position
return final_mat
| 16,823
|
def func_split_item(k):
""" Computes the expected value and variance of the splitting item random variable S.
Computes the expression (26b) and (26c) in Theorem 8. Remember that r.v. S is the value of index s
such that $\sum_{i=1}^{s-1} w(i) \leq k$ and $\sum_{i=1}^s w(i) > k$.
Args:
k: Int. The capacity of the Knapsack Problem instance.
Returns:
s: float. The expected value of the splitting item random variable.
var_split: float. The variance of the splitting item random variable.
"""
b = 1 + 1 / k # Defining a cumbersome base
s = (1 + 1 / k) ** k # Computing the split item
var_split = (3 + 1 / k) * b ** (k - 1) - b ** (2 * k) # Computing the variance of the split item
return s, var_split
| 16,824
|
def watch_less():
"""watch less"""
local('watchmedo shell-command --patterns="*.less" ' +
"--recursive --command='lessc %s/web/static/style.less "%CURRENT_PATH +
"> %s/web/static/style.css'"%CURRENT_PATH )
| 16,825
|
def eq(equation: str) -> int:
"""Evaluate the equation."""
code = compile(equation, "<string>", "eval")
return eval(code)
| 16,826
|
def is_client_in_data(hass: HomeAssistant, unique_id: str) -> bool:
"""Check if ZoneMinder client is in the Home Assistant data."""
prime_config_data(hass, unique_id)
return const.API_CLIENT in hass.data[const.DOMAIN][const.CONFIG_DATA][unique_id]
| 16,827
|
def parse_date(datestr):
""" Given a date in xport format, return Python date. """
return datetime.strptime(datestr, "%d%b%y:%H:%M:%S")
| 16,828
|
def unconfigure_l2vpn_evpn(device):
""" unconfig l2vpn evpn
Args:
device (`obj`): Device object
Returns:
None
Raises:
SubCommandFailure
"""
log.info(
"Unconfiguring 'l2vpn evpn' globally"
)
configs = []
configs.append("no l2vpn evpn")
try:
device.configure(configs)
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to unconfigure 'l2vpn evpn' globally"
'Error:{e}'.format(e=e)
)
| 16,829
|
def generate_database(m, n, uni_range_low=None, uni_range_high=None, exact_number=False):
"""
- Generate Universe by picking n random integers from low (inclusive) to high (exclusive).
If exact_number, then Universe.size == n
- Generate a Database of m records, over the Universe
"""
# generate Universe
if exact_number:
objects = range(n)
else:
objects = list(np.random.randint(uni_range_low, uni_range_high, size=n))
uni = Universe(objects)
# generate Database
db = uni.random_db(m)
return db
| 16,830
|
def interface_style():
"""Return current platform interface style (light or dark)."""
try: # currently only works on macOS
from Foundation import NSUserDefaults as NSUD
except ImportError:
return None
style = NSUD.standardUserDefaults().stringForKey_("AppleInterfaceStyle")
if style == "Dark":
return "dark"
else:
return "light"
| 16,831
|
def get_num_forces(cgmodel):
"""
Given a CGModel() class object, this function determines how many forces we are including when evaluating the energy.
:param cgmodel: CGModel() class object
:type cgmodel: class
:returns:
- total_forces (int) - Number of forces in the coarse grained model
:Example:
>>> from foldamers.cg_model.cgmodel import CGModel
>>> cgmodel = CGModel()
>>> total_number_forces = get_num_forces(cgmodel)
"""
total_forces = 0
if cgmodel.include_bond_forces:
total_forces = total_forces + 1
if cgmodel.include_nonbonded_forces:
total_forces = total_forces + 1
if cgmodel.include_bond_angle_forces:
total_forces = total_forces + 1
if cgmodel.include_torsion_forces:
total_forces = total_forces + 1
return total_forces
| 16,832
|
def cybrowser_dialog(id=None, text=None, title=None, url=None, base_url=DEFAULT_BASE_URL):
"""Launch Cytoscape's internal web browser in a separate window
Provide an id for the window if you want subsequent control of the window e.g., via cybrowser hide.
Args:
id (str): The identifier for the new browser window
text (str): HTML text to initially load into the browser
title (str): Text to be shown in the title bar of the browser window
url (str): The URL the browser should load
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://127.0.0.1:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
dict: {'id': id} where ``id`` is the one provided as a parameter to this function
Raises:
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> cybrowser_dialog(id='Test Window', title='Hello Africa', text='<HTML><HEAD><TITLE>Hello</TITLE></HEAD><BODY>Hello, world!</BODY></HTML>')
{'id': 'Test Window'}
>>> cybrowser_dialog(id='CytoWindow', title='Cytoscape Home Page', url='http://www.cytoscape.org')
{'id': 'CytoWindow'}
See Also:
:meth:`cybrowser_show`, :meth:`cybrowser_hide`
"""
id_str = f' id="{id}"' if id else ''
text_str = f' text="{text}"' if text else ''
title_str = f' title="{title}"' if title else ''
url_str = f' url="{url}"' if url else ''
res = commands.commands_post(f'cybrowser dialog{id_str}{text_str}{title_str}{url_str}', base_url=base_url)
return res
| 16,833
|
def main():
"""
Main function where you can test how VideoCameraServer works
"""
# Placing imports here so it will be imported only if user want to test algorithm, not when importing
# Class DepthCameraServer
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import sensors_classes as sensors
from images_processing_class import ImagesProcessing
import struct
import time
# Starting Thread which receives data from VideoCamera, port od thread's socket must be the same as the port at
# which data from VideoCamera is redirected, to be sure check where VideoCamera data stream is send in script env.py
depth_camera_server = DepthCameraServer('localhost', 60012)
depth_camera_server.run()
pose_server = sensors.Pose_server('localhost', 60007)
pose_server.run()
# Waiting 1 sec to be sure than depth_camera_server has received minimum 1 image, because program will crash if
# depth_camera_server doesn't have time to receive an image
time.sleep(1)
points = depth_camera_server.get_points()
lista_punktow = []
x = []
y = []
z = []
data_pose_dict = pose_server.get_all()
pose_x = data_pose_dict['x']
pose_y = data_pose_dict['y']
pose_z = data_pose_dict['z']
yawp = data_pose_dict['yaw']
pitchp = data_pose_dict['pitch']
rollp = data_pose_dict['roll']
# Each 3D point is a set of float(x,y,z). Each point has a size of 12 bytes because
# 3*sizeof(float) = 12 bytes, that's why we are dividing data into parts with size of 12 and then
# converting this data to tuple with 3 float (xyz).
#
# Processing cloud of points to seperate x, y and z was copied from dcam_old.py
#
for i in range(0, len(points) - 12, 12):
xyz = struct.unpack('fff', points[i:i + 12])
# rotation is included
x1p, y1p, z1p = rotation(xyz[2], xyz[0], xyz[1], yawp, pitchp, rollp)
# data from pose is included
xp = round(x1p + pose_x, 1)
yp = round(y1p + pose_y, 1)
zp = round(z1p + pose_z, 1)
temp = [xp, yp, zp]
lista_punktow.append(temp)
# Choosing only these points which have minimum 0.45 meters at z-axis, but why???
for i in lista_punktow:
x.append(i[0])
y.append(i[1])
z.append(i[2])
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(x, y, z, cmap='viridis', linewidth=0.5)
ax.scatter(x[0], y[0], z[0], c='red')
ax.scatter(x[1], y[1], z[1], c='yellow')
ax.scatter(x[2], y[2], z[2], c='black')
ax.scatter(pose_x, pose_y, pose_z, c='green')
plt.show()
| 16,834
|
def initialize_stat_dict():
"""Initializes a dictionary which will hold statistics about compositions.
Returns:
A dictionary containing the appropriate fields initialized to 0 or an
empty list.
"""
stat_dict = dict()
for lag in [1, 2, 3]:
stat_dict['autocorrelation' + str(lag)] = []
stat_dict['notes_not_in_key'] = 0
stat_dict['notes_in_motif'] = 0
stat_dict['notes_in_repeated_motif'] = 0
stat_dict['num_starting_tonic'] = 0
stat_dict['num_repeated_notes'] = 0
stat_dict['num_octave_jumps'] = 0
stat_dict['num_fifths'] = 0
stat_dict['num_thirds'] = 0
stat_dict['num_sixths'] = 0
stat_dict['num_seconds'] = 0
stat_dict['num_fourths'] = 0
stat_dict['num_sevenths'] = 0
stat_dict['num_rest_intervals'] = 0
stat_dict['num_special_rest_intervals'] = 0
stat_dict['num_in_key_preferred_intervals'] = 0
stat_dict['num_resolved_leaps'] = 0
stat_dict['num_leap_twice'] = 0
stat_dict['num_high_unique'] = 0
stat_dict['num_low_unique'] = 0
return stat_dict
| 16,835
|
def nrrd_to_nii(file):
"""
A function that converts the .nrrd atlas to .nii file format
Parameters
----------
file: tuples
Tuple of coronal, sagittal, and horizontal slices you want to view
Returns
-------
F_im_nii: nibabel.nifti2.Nifti2Image
A nifti file format that is used by various medical imaging techniques.
Notes
-------
From: #from: https://nipy.org/nibabel/coordinate_systems.html
"""
_nrrd = nrrd.read(file)
data = _nrrd[0]
header = _nrrd[1] # noqa: F841
F_im_nii = nib.Nifti2Image(data, np.eye(4))
return F_im_nii
| 16,836
|
def reqeustVerifyAuthhandler(request):
"""
본인인증 전자서명을 요청합니다.
- 본인인증 서비스에서 이용기관이 생성하는 Token은 사용자가 전자서명할 원문이 됩니다. 이는 보안을 위해 1회용으로 생성해야 합니다.
- 사용자는 이용기관이 생성한 1회용 토큰을 서명하고, 이용기관은 그 서명값을 검증함으로써 사용자에 대한 인증의 역할을 수행하게 됩니다.
"""
try:
# Kakaocert 이용기관코드, Kakaocert 파트너 사이트에서 확인
clientCode = '020040000001'
# 본인인증 요청정보 객체
requestObj = RequestVerifyAuth(
# 고객센터 전화번호, 카카오톡 인증메시지 중 "고객센터" 항목에 표시
CallCenterNum = '1600-8536',
# 인증요청 만료시간(초), 최대값 1000, 인증요청 만료시간(초) 내에 미인증시 만료 상태로 처리됨
Expires_in = 60,
# 수신자 생년월일, 형식 : YYYYMMDD
ReceiverBirthDay = '19900108',
# 수신자 휴대폰번호
ReceiverHP = '01043245117',
# 수신자 성명
ReceiverName = '정요한',
# 별칭코드, 이용기관이 생성한 별칭코드 (파트너 사이트에서 확인가능)
# 카카오톡 인증메시지 중 "요청기관" 항목에 표시
# 별칭코드 미 기재시 이용기관의 이용기관명이 "요청기관" 항목에 표시
SubClientID = '',
# 인증요청 메시지 부가내용, 카카오톡 인증메시지 중 상단에 표시
TMSMessage = 'TMSMessage0423',
# 인증요청 메시지 제목, 카카오톡 인증메시지 중 "요청구분" 항목에 표시
TMSTitle = 'TMSTitle 0423',
# 은행계좌 실명확인 생략여부
# true : 은행계좌 실명확인 절차를 생략
# false : 은행계좌 실명확인 절차를 진행
# 카카오톡 인증메시지를 수신한 사용자가 카카오인증 비회원일 경우, 카카오인증 회원등록 절차를 거쳐 은행계좌 실명확인 절차를 밟은 다음 전자서명 가능
isAllowSimpleRegistYN = False,
# 수신자 실명확인 여부
# true : 카카오페이가 본인인증을 통해 확보한 사용자 실명과 ReceiverName 값을 비교
# false : 카카오페이가 본인인증을 통해 확보한 사용자 실명과 RecevierName 값을 비교하지 않음.
isVerifyNameYN = True,
# 전자서명할 토큰 원문
Token = 'Token Value 2345',
# PayLoad, 이용기관이 생성한 payload(메모) 값
PayLoad = 'Payload123',
)
result = kakaocertService.requestVerifyAuth(clientCode, requestObj)
return render(request, 'response.html', {'receiptId': result.receiptId})
except KakaocertException as KE:
return render(request, 'exception.html', {'code': KE.code, 'message': KE.message})
| 16,837
|
def _file_name_to_valid_time(bulletin_file_name):
"""Parses valid time from file name.
:param bulletin_file_name: Path to input file (text file in WPC format).
:return: valid_time_unix_sec: Valid time.
"""
_, pathless_file_name = os.path.split(bulletin_file_name)
valid_time_string = pathless_file_name.replace(
PATHLESS_FILE_NAME_PREFIX + '_', '')
return time_conversion.string_to_unix_sec(
valid_time_string, TIME_FORMAT_IN_FILE_NAME)
| 16,838
|
def cidr_validator(value, return_ip_interface=False):
"""Validate IPv4 + optional subnet in CIDR notation"""
try:
if '/' in value:
ipaddr, netmask = value.split('/')
netmask = int(netmask)
else:
ipaddr, netmask = value, 32
if not validators.ipv4_re.match(ipaddr) or not 1 <= netmask <= 32:
raise ValueError
ipi = ipaddress.ip_interface(six.text_type(value))
if ipi.is_reserved:
raise ValueError
except ValueError:
raise ValidationError(_('Enter a valid IPv4 address or IPv4 network.'))
if return_ip_interface:
return ipi
| 16,839
|
def rh2a(rh, T, e_sat_func=e_sat_gg_water):
"""
Calculate the absolute humidity from relative humidity, air temperature,
and pressure.
Parameters
----------
rh:
Relative humidity in Pa / Pa
T:
Temperature in K
e_sat_func: func, optional
Function to estimate the saturation pressure. E.g. e_sat_gg_water for
water and e_sat_gg_ice for ice.
Returns
-------
float :
absolute humidity [kg / kg]
"""
with np.errstate(divide='ignore', invalid='ignore'):
if np.any(rh > 5):
raise TypeError("rh must not be in %")
e = rh*e_sat_func(T)
a = e / (meteo_si.constants.Rvapor*T)
return a
| 16,840
|
def container():
"""
Container management, type maple container --help for more info
"""
pass
| 16,841
|
def encoder_decoder_generator(start_img):
"""
"""
layer1 = Conv2D(64, kernel_size=4, strides=2, activation='elu', padding='same')(start_img)
layer2 = Conv2D(64, kernel_size=4, strides=2, activation='elu', padding='same')(layer1)
layer3 = Conv2D(64, kernel_size=4, strides=1, activation='elu', padding='same')(layer2)
layer4 = Conv2DTranspose(64, kernel_size=4, strides=2, activation='elu', padding="same")(layer3)
layer5 = Conv2DTranspose(64, kernel_size=4, strides=2, activation='elu', padding="same")(layer4)
layer6 = Conv2D(64, kernel_size=2, strides=1, activation='elu', padding='same')(layer5)
# Make sure that generator output is in the same range as `inputs`
# ie [-1, 1].
net = Conv2D(3, kernel_size=1, activation = 'tanh', padding='same')(layer6)
return net
| 16,842
|
def off():
"""
Turns the buzzer off (sets frequency to zero Hz)
Returns:
None
"""
return _rc.writeAttribute(OPTYPE.BUZZER_FREQ, [0])
| 16,843
|
def contentBrowser(*args, **kwargs):
"""
This command is used to edit and query a Content Browser.
Returns: `string` The name of the panel
"""
pass
| 16,844
|
def data_preprocess(dataset, data_dir, dest_base_dir, predictor_path):
"""
Parameters
----------
dataset: str
KDEF or Rafd, dataset names
data_dir: str
path to data directory to fetch images
dest_base_dir:
directory to store all outputs
predictor_path:
predictor for computing dlib landmarks
Download from http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
Returns
-------
Saves all dlib landmarks, aligned images/landmarks in dedicated folders on dest_base_dir
"""
print('COMPUTING LANDMARKS')
# create file list, compute landmarks and return with list for alignment
lnd_dir = os.path.join(dest_base_dir, dataset + '_LANDMARKS')
paths_txt = os.path.join(dest_base_dir, dataset + '_frontal_list.txt')
if not os.path.exists(lnd_dir):
os.makedirs(lnd_dir)
img_list = list_and_landmarks(dataset, data_dir, lnd_dir, paths_txt, predictor_path)
print('PERFORMING ALIGNMENT')
# init aligner
fa = FaceAligner(desiredFaceWidth=128)
# destination directory structure
dest_parent_folder_path = os.path.join(dest_base_dir, dataset + '_Aligned')
dest_sub_folder_datapath = os.path.join(dest_parent_folder_path, dataset)
dest_sub_folder_lndpath = os.path.join(dest_parent_folder_path, dataset + '_LANDMARKS')
if not os.path.exists(dest_parent_folder_path):
os.makedirs(dest_sub_folder_datapath)
os.makedirs(dest_sub_folder_lndpath)
for index, row in enumerate(img_list):
# read each image and landmarks
# ----file paths
im_path = row
file_name = row.split('/')[-1].split('.')[0]
lnd_file_path = os.path.join(lnd_dir, file_name + '.txt')
# ----read_image
image = cv2.imread(im_path)
# get landmarks
points = np.loadtxt(lnd_file_path).reshape(68, 2)
# compute alignment
image_aligned, points_aligned = fa.align(image, points)
# store back in a folder structure similar to Rafd
row_impath = os.path.join(dest_sub_folder_datapath, file_name + '.JPG')
row_lndpath = os.path.join(dest_sub_folder_lndpath, file_name + '.txt')
cv2.imwrite(row_impath, image_aligned)
with open(row_lndpath, 'w') as file:
for idx_l in range(68):
file.write("{} {}\n".format(points_aligned[idx_l, 0], points_aligned[idx_l, 1]))
| 16,845
|
def generateLicence(expires, file=licenceFile, key=settings.SECRET_KEY, client=client, provider=provider, created=datetime.datetime.now()):
"""
Creates a licence file.
Parameters:
- expires (datetime): the licence expiration date.
- file (string): the path to the licence file.
- key (string): the cryptographic signing key (the same key is used for signing and unsigning).
- client (string): the licence client.
- provider (string): the licence provider.
- created (datetime): the licence creation date.
Returns:
- None
"""
licence = {
"CLIENT": str(client),
"PROVIDER": str(provider),
"CREATED": str(created)[:-7],
"EXPIRES": str(expires)[:-7],
}
with open(file, "w") as f:
f.write(Signer().sign(json.dumps(licence, indent=3) + "\n"))
| 16,846
|
def ircelsos_data_dir():
"""Get the data directory
Adapted from jupyter_core
"""
home = os.path.expanduser('~')
if sys.platform == 'darwin':
return os.path.join(home, 'Library', 'ircelsos')
elif os.name == 'nt':
appdata = os.environ.get('APPDATA', os.path.join(home, '.local', 'share'))
return os.path.join(appdata, 'ircelsos')
else:
# Linux, non-OS X Unix, AIX, etc.
xdg = os.environ.get("XDG_DATA_HOME", os.path.join(home, '.local', 'share'))
return os.path.join(xdg, 'ircelsos')
| 16,847
|
def get_shodan_dicts():
"""Build Shodan dictionaries that hold definitions and naming conventions."""
risky_ports = [
"ftp",
"telnet",
"http",
"smtp",
"pop3",
"imap",
"netbios",
"snmp",
"ldap",
"smb",
"sip",
"rdp",
"vnc",
"kerberos",
]
name_dict = {
"ftp": "File Transfer Protocol",
"telnet": "Telnet",
"http": "Hypertext Transfer Protocol",
"smtp": "Simple Mail Transfer Protocol",
"pop3": "Post Office Protocol 3",
"imap": "Internet Message Access Protocol",
"netbios": "Network Basic Input/Output System",
"snmp": "Simple Network Management Protocol",
"ldap": "Lightweight Directory Access Protocol",
"smb": "Server Message Block",
"sip": "Session Initiation Protocol",
"rdp": "Remote Desktop Protocol",
"kerberos": "Kerberos",
}
risk_dict = {
"ftp": "FTP",
"telnet": "Telnet",
"http": "HTTP",
"smtp": "SMTP",
"pop3": "POP3",
"imap": "IMAP",
"netbios": "NetBIOS",
"snmp": "SNMP",
"ldap": "LDAP",
"smb": "SMB",
"sip": "SIP",
"rdp": "RDP",
"vnc": "VNC",
"kerberos": "Kerberos",
}
# Create dictionaries for CVSSv2 vector definitions using https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator
av_dict = {
"NETWORK": "A vulnerability exploitable with network access means the vulnerable software is bound to the network stack and the attacker does not require local network access or local access. Such a vulnerability is often termed “remotely exploitable”. An example of a network attack is an RPC buffer overflow.",
"ADJACENT_NETWORK": "A vulnerability exploitable with adjacent network access requires the attacker to have access to either the broadcast or collision domain of the vulnerable software. Examples of local networks include local IP subnet, Bluetooth, IEEE 802.11, and local Ethernet segment.",
"LOCAL": "A vulnerability exploitable with only local access requires the attacker to have either physical access to the vulnerable system or a local (shell) account. Examples of locally exploitable vulnerabilities are peripheral attacks such as Firewire/USB DMA attacks, and local privilege escalations (e.g., sudo).",
}
ac_dict = {
"LOW": "Specialized access conditions or extenuating circumstances do not exist. The following are examples: The affected product typically requires access to a wide range of systems and users, possibly anonymous and untrusted (e.g., Internet-facing web or mail server). The affected configuration is default or ubiquitous. The attack can be performed manually and requires little skill or additional information gathering. The 'race condition' is a lazy one (i.e., it is technically a race but easily winnable).",
"MEDIUM": "The access conditions are somewhat specialized; the following are examples: The attacking party is limited to a group of systems or users at some level of authorization, possibly untrusted. Some information must be gathered before a successful attack can be launched. The affected configuration is non-default, and is not commonly configured (e.g., a vulnerability present when a server performs user account authentication via a specific scheme, but not present for another authentication scheme). The attack requires a small amount of social engineering that might occasionally fool cautious users (e.g., phishing attacks that modify a web browser’s status bar to show a false link, having to be on someone’s “buddy” list before sending an IM exploit).",
"HIGH": "Specialized access conditions exist. For example, in most configurations, the attacking party must already have elevated privileges or spoof additional systems in addition to the attacking system (e.g., DNS hijacking). The attack depends on social engineering methods that would be easily detected by knowledgeable people. For example, the victim must perform several suspicious or atypical actions. The vulnerable configuration is seen very rarely in practice. If a race condition exists, the window is very narrow.",
}
ci_dict = {
"NONE": "There is no impact to the confidentiality of the system",
"PARTIAL": "There is considerable informational disclosure. Access to some system files is possible, but the attacker does not have control over what is obtained, or the scope of the loss is constrained. An example is a vulnerability that divulges only certain tables in a database.",
"COMPLETE": "There is total information disclosure, resulting in all system files being revealed. The attacker is able to read all of the system's data (memory, files, etc.).",
}
return risky_ports, name_dict, risk_dict, av_dict, ac_dict, ci_dict
| 16,848
|
def safe_download(f):
"""
Makes a download safe, by trapping any app errors and redirecting
to a default landing page.
Assumes that the first 2 arguments to the function after request are
domain and app_id, or there are keyword arguments with those names
"""
@wraps(f)
def _safe_download(request, *args, **kwargs):
domain = args[0] if len(args) > 0 else kwargs["domain"]
app_id = args[1] if len(args) > 1 else kwargs["app_id"]
latest = True if request.GET.get('latest') == 'true' else False
target = request.GET.get('target') or None
try:
request.app = get_app(domain, app_id, latest=latest, target=target)
return f(request, *args, **kwargs)
except (AppEditingError, CaseError), e:
logging.exception(e)
messages.error(request, "Problem downloading file: %s" % e)
return HttpResponseRedirect(reverse("corehq.apps.app_manager.views.view_app", args=[domain,app_id]))
return _safe_download
| 16,849
|
def Moebius(quaternion_or_infinity, a,b=None,c=None,d=None):
"""
The Moebius transformation of a quaternion (z)
with parameters a,b,c and d
>>> import qmath
>>> a = qmath.quaternion([1,1,1,0])
>>> b = qmath.quaternion([-2,1,0,1])
>>> c = qmath.quaternion([1,0,0,0])
>>> d = qmath.quaternion([0,-1,-3,-4])
>>> z = qmath.quaternion([1,1,3,4])
>>> qmath.Moebius(z,a,b,c,d)
(-5.0+7.0i+7.0k)
>>> d = - z
>>> z = qmath.Moebius(z,a,b,c,d)
>>> z
'Infinity'
>>> qmath.Moebius(z,a,b,c,d)
(1.0+1.0i+1.0j)
"""
if type(a) == tuple:
return Moebius(quaternion_or_infinity,a[0],a[1],a[2],a[3])
else:
A = quaternion(a)
B = quaternion(b)
C = quaternion(c)
D = quaternion(d)
if A * D - B * C == 0:
raise RuntimeError(' this is not a Moebius transformation')
elif quaternion_or_infinity == 'Infinity':
return A / C
else:
Z = quaternion(quaternion_or_infinity)
try:
return (A * Z + B) * quaternion.inverse(C * Z + D)
except:
return 'Infinity'
| 16,850
|
def init_full_x(setup_pickleddb_database, monkeypatch):
"""Init original experiment"""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
name = "full_x"
orion.core.cli.main(
(
"hunt --init-only -n {name} --config orion_config.yaml ./black_box.py "
"-x~uniform(-10,10)"
)
.format(name=name)
.split(" ")
)
orion.core.cli.main("insert -n {name} script -x=0".format(name=name).split(" "))
| 16,851
|
def _passthrough_zotero_data(zotero_data):
"""
Address known issues with Zotero metadata.
Assumes zotero data should contain a single bibliographic record.
"""
if not isinstance(zotero_data, list):
raise ValueError('_passthrough_zotero_data: zotero_data should be a list')
if len(zotero_data) > 1:
# Sometimes translation-server creates multiple data items for a single record.
# If so, keep only the parent item, and remove child items (such as notes).
# https://github.com/zotero/translation-server/issues/67
zotero_data = zotero_data[:1]
return zotero_data
| 16,852
|
def telebot():
"""endpoint responsible to parse and respond bot webhook"""
payload = json.loads(request.data)
message = payload.get('message', payload.get('edited_message',''))
msg_from = message.get('from')
user_id = msg_from.get('id')
user_first_name = msg_from.get('first_name','')
user_last_name = msg_from.get('last_name','')
user_is_bot = msg_from.get('is_bot')
chat = message.get('chat')
chat_id = chat.get('id')
command = message.get('text')
if user_is_bot or message == '':
return jsonify({'method': 'sendMessage','chat_id' : chat_id,'text': 'Sorry I can\'t answer you!'})
bot_response = {
'method': 'sendMessage',
'chat_id' : chat_id,
'text': f'[{user_first_name} {user_last_name}](tg://user?id={user_id}) {command}',
'parse_mode':'Markdown',
}
return jsonify(bot_response)
| 16,853
|
def pkg_config(cfg):
"""Returns PkgConfig pkg config object."""
pkg_config_py = os.path.join(get_vta_hw_path(), "config/pkg_config.py")
libpkg = {"__file__": pkg_config_py}
exec(compile(open(pkg_config_py, "rb").read(), pkg_config_py, "exec"), libpkg, libpkg)
PkgConfig = libpkg["PkgConfig"]
return PkgConfig(cfg)
| 16,854
|
def run(*options, cfg=None, debug=False):
"""Run training and validation of model
Notes:
Options can be passed in via the options argument and loaded from the cfg file
Options from default.py will be overridden by options loaded from cfg file
Options from default.py will be overridden by options loaded from cfg file
Options passed in via options argument will override option loaded from cfg file
Args:
*options (str,int ,optional): Options used to overide what is loaded from the
config. To see what options are available consult
default.py
cfg (str, optional): Location of config file to load. Defaults to None.
debug (bool): Places scripts in debug/test mode and only executes a few iterations
"""
# Configuration:
update_config(config, options=options, config_file=cfg)
# The model will be saved under: outputs/<config_file_name>/<model_dir>
config_file_name = "default_config" if not cfg else cfg.split("/")[-1].split(".")[0]
try:
output_dir = generate_path(
config.OUTPUT_DIR, git_branch(), git_hash(), config_file_name, config.TRAIN.MODEL_DIR, current_datetime(),
)
except TypeError:
output_dir = generate_path(config.OUTPUT_DIR, config_file_name, config.TRAIN.MODEL_DIR, current_datetime(),)
# Logging:
load_log_configuration(config.LOG_CONFIG)
logger = logging.getLogger(__name__)
logger.debug(config.WORKERS)
# Set CUDNN benchmark mode:
torch.backends.cudnn.benchmark = config.CUDNN.BENCHMARK
# we will write the model under outputs / config_file_name / model_dir
config_file_name = "default_config" if not cfg else cfg.split("/")[-1].split(".")[0]
# Fix random seeds:
torch.manual_seed(config.SEED)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(config.SEED)
np.random.seed(seed=config.SEED)
# Augmentation:
basic_aug = Compose(
[
Normalize(mean=(config.TRAIN.MEAN,), std=(config.TRAIN.STD,), max_pixel_value=1),
PadIfNeeded(
min_height=config.TRAIN.PATCH_SIZE,
min_width=config.TRAIN.PATCH_SIZE,
border_mode=config.OPENCV_BORDER_CONSTANT,
always_apply=True,
mask_value=255,
value=0,
),
Resize(
config.TRAIN.AUGMENTATIONS.RESIZE.HEIGHT, config.TRAIN.AUGMENTATIONS.RESIZE.WIDTH, always_apply=True,
),
PadIfNeeded(
min_height=config.TRAIN.AUGMENTATIONS.PAD.HEIGHT,
min_width=config.TRAIN.AUGMENTATIONS.PAD.WIDTH,
border_mode=config.OPENCV_BORDER_CONSTANT,
always_apply=True,
mask_value=255,
),
]
)
if config.TRAIN.AUGMENTATION:
train_aug = Compose([basic_aug, HorizontalFlip(p=0.5)])
val_aug = basic_aug
else:
train_aug = val_aug = basic_aug
# Training and Validation Loaders:
TrainPatchLoader = get_patch_loader(config)
logging.info(f"Using {TrainPatchLoader}")
train_set = TrainPatchLoader(
config.DATASET.ROOT,
config.DATASET.NUM_CLASSES,
split="train",
is_transform=True,
stride=config.TRAIN.STRIDE,
patch_size=config.TRAIN.PATCH_SIZE,
augmentations=train_aug,
#augmentations=Resize(config.TRAIN.AUGMENTATIONS.RESIZE.HEIGHT, config.TRAIN.AUGMENTATIONS.RESIZE.WIDTH, always_apply=True),
debug=True
)
logger.info(train_set)
n_classes = train_set.n_classes
val_set = TrainPatchLoader(
config.DATASET.ROOT,
config.DATASET.NUM_CLASSES,
split="val",
is_transform=True,
stride=config.TRAIN.STRIDE,
patch_size=config.TRAIN.PATCH_SIZE,
augmentations=val_aug,
#augmentations=Resize(config.TRAIN.AUGMENTATIONS.RESIZE.HEIGHT, config.TRAIN.AUGMENTATIONS.RESIZE.WIDTH, always_apply=True),
debug=True
)
logger.info(val_set)
if debug:
logger.info("Running in debug mode..")
train_set = data.Subset(train_set, range(config.TRAIN.BATCH_SIZE_PER_GPU*config.NUM_DEBUG_BATCHES))
val_set = data.Subset(val_set, range(config.VALIDATION.BATCH_SIZE_PER_GPU))
train_loader = data.DataLoader(
train_set, batch_size=config.TRAIN.BATCH_SIZE_PER_GPU, num_workers=config.WORKERS, shuffle=True
)
val_loader = data.DataLoader(
val_set, batch_size=config.VALIDATION.BATCH_SIZE_PER_GPU, num_workers=1
) # config.WORKERS)
# Model:
model = getattr(models, config.MODEL.NAME).get_seg_model(config)
device = "cuda" if torch.cuda.is_available() else "cpu"
model = model.to(device)
# Optimizer and LR Scheduler:
optimizer = torch.optim.SGD(
model.parameters(),
lr=config.TRAIN.MAX_LR,
momentum=config.TRAIN.MOMENTUM,
weight_decay=config.TRAIN.WEIGHT_DECAY,
)
epochs_per_cycle = config.TRAIN.END_EPOCH // config.TRAIN.SNAPSHOTS
snapshot_duration = epochs_per_cycle * len(train_loader) if not debug else 2 * len(train_loader)
scheduler = CosineAnnealingScheduler(
optimizer, "lr", config.TRAIN.MAX_LR, config.TRAIN.MIN_LR, cycle_size=snapshot_duration
)
# Tensorboard writer:
summary_writer = create_summary_writer(log_dir=path.join(output_dir, "logs"))
# class weights are inversely proportional to the frequency of the classes in the training set
class_weights = torch.tensor(config.DATASET.CLASS_WEIGHTS, device=device, requires_grad=False)
# Loss:
criterion = torch.nn.CrossEntropyLoss(weight=class_weights, ignore_index=255, reduction="mean")
# Ignite trainer and evaluator:
trainer = create_supervised_trainer(model, optimizer, criterion, prepare_batch, device=device)
transform_fn = lambda output_dict: (output_dict["y_pred"].squeeze(), output_dict["mask"].squeeze())
evaluator = create_supervised_evaluator(
model,
prepare_batch,
metrics={
"nll": Loss(criterion, output_transform=transform_fn),
"pixacc": pixelwise_accuracy(n_classes, output_transform=transform_fn, device=device),
"cacc": class_accuracy(n_classes, output_transform=transform_fn),
"mca": mean_class_accuracy(n_classes, output_transform=transform_fn),
"ciou": class_iou(n_classes, output_transform=transform_fn),
"mIoU": mean_iou(n_classes, output_transform=transform_fn),
},
device=device,
)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
# Logging:
trainer.add_event_handler(
Events.ITERATION_COMPLETED, logging_handlers.log_training_output(log_interval=config.PRINT_FREQ),
)
trainer.add_event_handler(Events.EPOCH_COMPLETED, logging_handlers.log_lr(optimizer))
# Tensorboard and Logging:
trainer.add_event_handler(Events.ITERATION_COMPLETED, tensorboard_handlers.log_training_output(summary_writer))
trainer.add_event_handler(Events.ITERATION_COMPLETED, tensorboard_handlers.log_validation_output(summary_writer))
# add specific logger which also triggers printed metrics on training set
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
evaluator.run(train_loader)
tensorboard_handlers.log_results(engine, evaluator, summary_writer, n_classes, stage="Training")
logging_handlers.log_metrics(engine, evaluator, stage="Training")
# add specific logger which also triggers printed metrics on validation set
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
tensorboard_handlers.log_results(engine, evaluator, summary_writer, n_classes, stage="Validation")
logging_handlers.log_metrics(engine, evaluator, stage="Validation")
# dump validation set metrics at the very end for debugging purposes
if engine.state.epoch == config.TRAIN.END_EPOCH and debug:
fname = f"metrics_{config_file_name}_{config.TRAIN.MODEL_DIR}.json"
metrics = evaluator.state.metrics
out_dict = {x: metrics[x] for x in ["nll", "pixacc", "mca", "mIoU"]}
with open(fname, "w") as fid:
json.dump(out_dict, fid)
log_msg = " ".join(f"{k}: {out_dict[k]}" for k in out_dict.keys())
logging.info(log_msg)
# Checkpointing: snapshotting trained models to disk
checkpoint_handler = SnapshotHandler(
output_dir,
config.MODEL.NAME,
extract_metric_from("mIoU"),
lambda: (trainer.state.iteration % snapshot_duration) == 0,
)
evaluator.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {"model": model})
logger.info("Starting training")
trainer.run(train_loader, max_epochs=config.TRAIN.END_EPOCH, epoch_length=len(train_loader), seed=config.SEED)
summary_writer.close()
| 16,855
|
def main_xss(start_url, proxy=None, agent=None, **kwargs):
"""
main attack method to be called
"""
tamper = kwargs.get("tamper", None)
verbose = kwargs.get("verbose", False)
batch = kwargs.get("batch", False)
force = kwargs.get("force_ssl", False)
question_msg = (
"it appears that heuristic tests have shown this URL may not be a good "
"candidate to perform XSS tests on, would you like to continue anyways"
)
if not batch:
question = lib.core.common.prompt(
question_msg, opts="yN"
) if not lib.core.settings.URL_QUERY_REGEX.match(start_url) else "y"
else:
question = lib.core.common.prompt(
question_msg, opts="yN", default="y"
)
if not question.lower().startswith("y"):
return
try:
if tamper:
lib.core.settings.logger.info(lib.core.settings.set_color(
"tampering payloads with '{}'".format(tamper)
))
find_xss_script(start_url)
lib.core.settings.logger.info(lib.core.settings.set_color(
"loading payloads"
))
payloads = __load_payloads()
if verbose:
lib.core.settings.logger.debug(lib.core.settings.set_color(
"a total of {} payloads loaded".format(len(payloads)), level=10
))
lib.core.settings.logger.info(lib.core.settings.set_color(
"payloads will be written to a temporary file and read from there"
))
filename = create_urls(start_url, payloads, tamper=tamper, verbose=verbose, force=force)
lib.core.settings.logger.info(lib.core.settings.set_color(
"loaded URL's have been saved to '{}'".format(filename), level=25
))
lib.core.settings.logger.info(lib.core.settings.set_color(
"testing for XSS vulnerabilities on host '{}'".format(start_url)
))
if proxy is not None:
lib.core.settings.logger.info(lib.core.settings.set_color(
"using proxy '{}'".format(proxy)
))
success = set()
with open(filename) as urls:
for i, url in enumerate(urls.readlines(), start=1):
url = url.strip()
payload = find_xss_script(url)
try:
result = scan_xss(url, proxy=proxy, agent=agent)
if verbose:
lib.core.settings.logger.info(lib.core.settings.set_color(
"trying payload '{}'".format(payload)
))
if result[0] != "sqli" and result[0] is True:
success.add(url)
if verbose:
lib.core.settings.logger.debug(lib.core.settings.set_color(
"payload '{}' appears to be usable".format(payload), level=15
))
elif result[0] is "sqli":
if i <= 1:
lib.core.settings.logger.error(lib.core.settings.set_color(
"loaded URL '{}' threw a DBMS error and appears to be injectable, test for "
"SQL injection, backend DBMS appears to be '{}'".format(
url, result[1]
), level=40
))
else:
if verbose:
lib.core.settings.logger.error(lib.core.settings.set_color(
"SQL error discovered", level=40
))
else:
if verbose:
lib.core.settings.logger.debug(lib.core.settings.set_color(
"host '{}' does not appear to be vulnerable to XSS attacks with payload '{}'".format(
start_url, payload
), level=10
))
except (
requests.exceptions.ConnectionError,
requests.exceptions.TooManyRedirects,
requests.exceptions.ReadTimeout,
requests.exceptions.InvalidURL
):
if not payload == "":
lib.core.settings.logger.error(lib.core.settings.set_color(
"payload '{}' caused a connection error, assuming no good and continuing".format(payload), level=40
))
if len(success) != 0:
lib.core.settings.logger.info(lib.core.settings.set_color(
"possible XSS scripts to be used:", level=25
))
lib.core.settings.create_tree(start_url, list(success))
else:
lib.core.settings.logger.error(lib.core.settings.set_color(
"host '{}' does not appear to be vulnerable to XSS attacks".format(start_url), level=40
))
question_msg = "would you like to keep the created URLs saved for further testing"
if not batch:
save = lib.core.common.prompt(
question_msg, opts="yN"
)
else:
save = lib.core.common.prompt(
question_msg, opts="yN", default="n"
)
if save.lower().startswith("n"):
os.remove(filename)
else:
os.remove(filename)
except KeyboardInterrupt:
if not lib.core.common.pause():
lib.core.common.shutdown()
| 16,856
|
def _np_copy(a, out=None):
"""
Return an array copy of the given object.
Parameters
----------
a : ndarray
Input data.
out : ndarray or None, optional
Alternative output array in which to place the result. It must have
the same shape and dtype as the expected output.
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-------
This function differs from the original `numpy.copy
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.copy.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- Does not support "order" parameter.
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when ``x`` is modified, ``y`` is also modified, but not ``z``:
>>> x[0] = 10
>>> x[0] == y[0]
array([1.])
>>> x[0] == z[0]
array([0.])
"""
pass
| 16,857
|
def root(tmpdir):
"""Return a pytest temporary directory"""
return tmpdir
| 16,858
|
def _check_storage(log_fn: tp.Callable) -> bool:
"""See if the storage system is alive."""
from app.crud.core import ready
try:
log_fn('Attempting to contact storage system', depth=1)
result = ready()
return result
except Exception as ex:
log_fn(ex, level=logging.WARN, depth=1)
return False
| 16,859
|
def ignore_ip_addresses_rule_generator(ignore_ip_addresses):
"""
generate tshark rule to ignore ip addresses
Args:
ignore_ip_addresses: list of ip addresses
Returns:
rule string
"""
rules = []
for ip_address in ignore_ip_addresses:
rules.append("-Y ip.dst != {0}".format(ip_address))
return rules
| 16,860
|
def readmission(aFileName):
"""
Load a mission from a file into a list. The mission definition is in the Waypoint file
format (http://qgroundcontrol.org/mavlink/waypoint_protocol#waypoint_file_format).
This function is used by upload_mission().
"""
print "\nReading mission from file: %s" % aFileName
cmds = vehicle.commands
missionlist=[]
with open(aFileName) as f:
for i, line in enumerate(f):
if i==0:
if not line.startswith('QGC WPL 110'):
raise Exception('File is not supported WP version')
else:
linearray=line.split('\t')
ln_index=int(linearray[0])
ln_currentwp=int(linearray[1])
ln_frame=int(linearray[2])
ln_command=int(linearray[3])
ln_param1=float(linearray[4])
ln_param2=float(linearray[5])
ln_param3=float(linearray[6])
ln_param4=float(linearray[7])
ln_param5=float(linearray[8])
ln_param6=float(linearray[9])
ln_param7=float(linearray[10])
ln_autocontinue=int(linearray[11].strip())
cmd = Command( 0, 0, 0, ln_frame, ln_command, ln_currentwp, ln_autocontinue, ln_param1, ln_param2, ln_param3, ln_param4, ln_param5, ln_param6, ln_param7)
missionlist.append(cmd)
return missionlist
| 16,861
|
def remove_stop_words(words):
"""Remove all stop words.
Args:
words (list): The list of words
Returns:
list: An updated word list with stopwords removed.
"""
# http://stackoverflow.com/questions/5486337/
# how-to-remove-stop-words-using-nltk-or-python
return [w for w in words if w.lower() not in stopwords.words('english')]
| 16,862
|
def plotSpikes(spkt,spkid=0,newFigure=False,color='b'):
"""Plot spikes instants."""
start_plot = time.time()
doPlot = False
#if len(spkt[:,:]) > 0:
print "\n Plotting Raster..."
if newFigure:
plt.figure()
if len(spkt.shape)==2 and spkid==0:
try:
for i in range(len(spkt[:,0])):
plt.plot(spkt[i,:], np.ones(len(spkt[i,:]))*i, color+'|', markeredgewidth=1.5)
except IndexError:
print("\n*****\n\nWarning: 'spkt' has no length, aparently no spikes were fired.\n\n*****\n")
else:
doPlot = True
elif len(spkt.shape)==1 and spkid is not 0:
try:
plt.plot(spkt,spkid, color+'|', markeredgewidth=1.5)
except IndexError:
print("\n*****\n\nWarning: 'spkt' has no length, aparently no spikes were fired.\n\n*****\n")
else:
doPlot = True
else:
print("\n*****\n\nWarning: check 'spkt'.\n\n*****\n")
if doPlot:
plt.title("Spike Trains")
plt.xlabel("Time [ms]")
plt.ylabel("# Motor neuron")
plt.show()
elapsed_time = time.time() - start_plot
print(" Done; plotRaster time: %.2f s" %elapsed_time)
else:
elapsed_time = time.time() - start_plot
print(" Done with warnings; plotRaster time: %.2f s" %elapsed_time)
| 16,863
|
def table_triples(tables, include_type=True):
"""Make triples based on table predictions"""
for table in tables:
table["triples"] = list(triples.yield_triples(table, include_type=True))
yield table
| 16,864
|
def task_gather_quiver(self):
"""We wrote the "gathered" files during task construction.
"""
job_done_fn = fn(self.job_done)
touch(job_done_fn)
| 16,865
|
def synthetic_costs_1():
""" Uncertainty in 5 points at [0,0] on X1 can cause it to flip
to [1,0] if needed to misclassify
Uncertainty in 1 point at [1,1] on X2 can cause it to flip
to [1,0] if needed to misclassify
All other points certain
"""
costs = np.array([[1,4],[1,4],[1,4],[1,4],[1,4],[4,4],[4,4],
[4,4],[4,4],[4,4],
[4,1],
[4,4],[4,4]])
return costs
| 16,866
|
def task(ctx, config):
"""
Run all cram tests from the specified paths on the specified
clients. Each client runs tests in parallel.
Limitations:
Tests must have a .t suffix. Tests with duplicate names will
overwrite each other, so only the last one will run.
For example::
tasks:
- ceph:
- cram:
clients:
client.0:
- qa/test.t
- qa/test2.t]
client.1: [qa/test.t]
branch: foo
You can also run a list of cram tests on all clients::
tasks:
- ceph:
- cram:
clients:
all: [qa/test.t]
:param ctx: Context
:param config: Configuration
"""
assert isinstance(config, dict)
assert 'clients' in config and isinstance(config['clients'], dict), \
'configuration must contain a dictionary of clients'
clients = teuthology.replace_all_with_clients(ctx.cluster,
config['clients'])
testdir = teuthology.get_testdir(ctx)
overrides = ctx.config.get('overrides', {})
refspec = get_refspec_after_overrides(config, overrides)
git_url = teuth_config.get_ceph_qa_suite_git_url()
log.info('Pulling tests from %s ref %s', git_url, refspec)
try:
for client, tests in clients.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
remote.run(
args=[
'mkdir', '--', client_dir,
run.Raw('&&'),
'virtualenv', '{tdir}/virtualenv'.format(tdir=testdir),
run.Raw('&&'),
'{tdir}/virtualenv/bin/pip'.format(tdir=testdir),
'install', 'cram==0.6',
],
)
clone_dir = '{tdir}/clone.{role}'.format(tdir=testdir, role=client)
remote.run(args=refspec.clone(git_url, clone_dir))
for test in tests:
assert test.endswith('.t'), 'tests must end in .t'
remote.run(
args=[
'cp', '--', os.path.join(clone_dir, test), client_dir,
],
)
with parallel() as p:
for role in clients.keys():
p.spawn(_run_tests, ctx, role)
finally:
for client, tests in clients.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
test_files = set([test.rsplit('/', 1)[1] for test in tests])
# remove test files unless they failed
for test_file in test_files:
abs_file = os.path.join(client_dir, test_file)
remote.run(
args=[
'test', '-f', abs_file + '.err',
run.Raw('||'),
'rm', '-f', '--', abs_file,
],
)
# ignore failure since more than one client may
# be run on a host, and the client dir should be
# non-empty if the test failed
remote.run(
args=[
'rm', '-rf', '--',
'{tdir}/virtualenv'.format(tdir=testdir),
clone_dir,
run.Raw(';'),
'rmdir', '--ignore-fail-on-non-empty', client_dir,
],
)
| 16,867
|
def example_one(request, context=None):
""" Return web page for example one. """
if context is None:
context = {}
session = request.session.get("ApiSession", None)
if session is None:
return no_session_set(request)
session = Session.deserialize(session)
origin_codes = get_codes_with_filter(session, REPORTING_AIRPORT_CODE, 0)
context.update(
{
"title": "Example 1",
"active": "example_one",
"origin_codes": origin_codes,
"dest_codes": EXAMPLE_ONE_DESTS,
}
)
return render(request, "example_one.html", context)
| 16,868
|
def decode(cls: Any, value: bytes) -> Any:
"""Decode value in katcp message to a type.
If a union type is provided, the value must decode successfully (i.e.,
without raising :exc:`ValueError`) for exactly one of the types in the
union, otherwise a :exc:`ValueError` is raised.
Parameters
----------
cls
The target type, or a :class:`typing.Union` of types.
value
Raw (but unescaped) value in katcp message
Raises
------
ValueError
if `value` does not have a valid value for `cls`
TypeError
if `cls` is not a registered type or union of registered
types.
See also
--------
:func:`register_type`
"""
union_args = _union_args(cls)
if union_args is not None:
values = [] # type: List[Any]
for type_ in union_args:
try:
values.append(decode(type_, value))
except ValueError:
pass
if len(values) == 1:
return values[0]
elif not values:
raise ValueError('None of the types in {} could decode {!r}'.format(
cls, value))
else:
raise ValueError('{!r} is ambiguous for {}'.format(value, cls))
else:
return get_type(cls).decode(cls, value)
| 16,869
|
def ensure_parent(path):
"""Ensure that the parent directory of the given path exists.
"""
parent = os.path.dirname(path)
if parent and not os.path.exists(parent):
os.makedirs(parent)
| 16,870
|
def test_HarSanitizer_load_wordlist():
"""Test successful HarSantizer.load_wordlist()"""
hs = HarSanitizer()
word_list = hs.load_wordlist(wordlist=['word1', u'word2', 'word3'])
assert isinstance(word_list, list)
assert word_list[2] == "word3"
| 16,871
|
def load_f0(fhandle: TextIO) -> annotations.F0Data:
"""Load an ikala f0 annotation
Args:
fhandle (str or file-like): File-like object or path to f0 annotation file
Raises:
IOError: If f0_path does not exist
Returns:
F0Data: the f0 annotation data
"""
lines = fhandle.readlines()
f0_midi = np.array([float(line) for line in lines])
f0_hz = librosa.midi_to_hz(f0_midi) * (f0_midi > 0)
confidence = (f0_hz > 0).astype(float)
times = (np.arange(len(f0_midi)) * TIME_STEP) + (TIME_STEP / 2.0)
f0_data = annotations.F0Data(times, f0_hz, confidence)
return f0_data
| 16,872
|
def excepthook(exc_type, exc_value, exc_traceback):
"""Handle unhandled exceptions, default exception hook."""
if isinstance(exc_value, OSError):
# Handle OSError differently by giving more details.
message = (
f'Error inesperado del sistema operativo.\n'
'['
f'{exc_type.__name__}'
f'{f"/{errno.errorcode[exc_value.errno]}" if exc_value.errno is not None else ""}'
f'{f"/Win{exc_value.winerror}" if exc_value.winerror is not None else ""}'
']\n'
f'{exc_value.strerror}.\n'
f'{f"«{exc_value.filename}»" if exc_value.filename is not None else ""}'
f'{f" -> «{exc_value.filename2}»" if exc_value.filename2 is not None else ""}'
)
else:
message = f'Excepción {exc_type.__name__} sin gestionar.\n{exc_value}'
message += '\n'
message += '\n'.join([f'Línea {frame.lineno}: {frame.line}' for frame in tb.extract_tb(exc_traceback)]).rstrip()
error(message)
| 16,873
|
def media_to_csv(media: Union[List[Media], Movie, LimitedSeries, Podcast, TVShow]):
"""Exports the specified media into a CSV file
:param media: The Media object to convert into CSV
"""
__media_to(media, as_csv=True)
| 16,874
|
def remove_fallen(lst):
"""removes fallen orcs from a list"""
return [x for x in lst if x.standing]
| 16,875
|
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(ExceptionsChecker(linter))
| 16,876
|
def gc_resnet152(num_classes):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(GCBottleneck, [3, 8, 36, 3], num_classes=num_classes)
model.avgpool = nn.AdaptiveAvgPool2d(1)
return model
| 16,877
|
def consume_entropy(generated_password: str, quotient: int, max_length: int) -> str:
"""
Takes the entropy (quotient) and the length of password (max_length) required
and uses the remainder of their division as the index to pick a character from
the characters list.
This process occurs recursively until the password is of the required length.
"""
if len(generated_password) >= max_length:
return generated_password
quotient, remainder = divmod(quotient, len(characters))
generated_password += characters[remainder]
return consume_entropy(generated_password, quotient, max_length)
| 16,878
|
def randomNormal(n, height, baseshape=[]):
"""
Generate random positions, normally distributed along z. Base shape can be:
[] (1D sim)
[Ly] (2D sim)
[Lx, Ly] (3D sim)
Where Lx, Ly are lengths along x, y.
"""
nDim = len(baseshape) + 1
pos = np.zeros([n, nDim])
z = np.random.randn(n)
z *= height
pos[:,-1] = z
for i in range(nDim - 1):
pos[:, i] = np.random.rand(n) * baseshape[i]
return pos
| 16,879
|
def index(model_name=None):
"""
Index page.
"""
registered_models = mdb.registered_models
if model_name:
model = next((m for m in registered_models if m.__name__.lower() == model_name.lower()), None)
elif registered_models:
model = registered_models[0]
if not model:
abort(404)
model_name = model.__name__.lower()
# 获取指定model的索引
index_dict = OrderedDict({'_id': ObjectId})
for i in model.indexes:
value = i['fields']
if isinstance(value, str):
index_dict[value] = model._valid_paths[value]
elif isinstance(value, list):
for val in value:
if isinstance(val, tuple):
field, direction = val
index_dict[field] = model._valid_paths[field]
else:
index_dict[val] = model._valid_paths[val]
# 查询条件
''' 20161123/Samuel/暂时不使用populate_model来生成查询条件
# 调用populate_model将查询条件转化为数据对象, 会自动转换查询条件的数据类型
search_record = populate_model(request.args, model, False)
# 将数据对象中非空的值提取出来, 构造成一个mongoDB查询的条件
condition = {f: v for f, v in search_record.iteritems() if v}
'''
condition = {}
for k, t in index_dict.items():
v = request.args.get(k, None)
if v:
cv = convert_from_string(v, t)
condition[k.replace('.$', '')] = cv
# 翻页支持
page = int(request.args.get('_p', 1))
count = model.count(condition)
start = (page - 1) * PAGE_COUNT
# 返回结果只显示索引中的字段
projection = {k.replace('.$', ''): True for k in index_dict}
current_app.logger.debug(
'There are %s %ss for condition %s, with projection %s' % (count, model_name, condition, projection))
# TODO: 排序
records = model.find(condition, projection, start, PAGE_COUNT)
pagination = Pagination(page, PAGE_COUNT, count)
# current_app.logger.debug('Indexed fields for %s are %s' % (model_name, index_dict))
return render_template('/crud/index.html',
models=registered_models,
model=model,
index_dict=index_dict,
records=records,
pagination=pagination)
| 16,880
|
def download(names, tempdir=None, extra_args=None):
"""Gather pip packages in `tempdir`
Arguments:
names (list): Names of packages to install, in pip-format,
e.g. ["six==1"]
tempdir (str, optional): Absolute path to where pip packages go until
they've been installed as Rez packages, defaults to the cwd
extra_args (list, optional): Additional arguments, typically only
relevant to pip rather than pipz
Returns:
distributions (list): Downloaded distlib.database.InstalledDistribution
Raises:
OSError: On anything gone wrong with subprocess and pip
"""
extra_args = extra_args or []
assert isinstance(names, (list, tuple)), (
"%s was not a tuple or list" % names
)
assert all(isinstance(name, _basestring) for name in names), (
"%s contained non-string" % names
)
tempdir = tempdir or os.getcwd()
# Build pip commandline
cmd = [
"python", "-m", "pip", "install",
"--target", tempdir,
# Only ever consider wheels, anything else is ancient
"--use-pep517",
# Handle case where the Python distribution used alongside
# pip already has a package installed in its `site-packages/` dir.
"--ignore-installed",
# rez pip users don't have to see this
"--disable-pip-version-check",
]
for extra_arg in extra_args:
if extra_arg in cmd:
print_warning("'%s' argument ignored, used internally" % extra_arg)
continue
cmd += [extra_arg]
cmd += names
call(cmd)
return sorted(
find_distributions(tempdir),
# Upper-case characters typically come first
key=lambda d: d.key
)
| 16,881
|
def _parseCellContentsSection(fileAsList, lineIdx):
""" returns fractCoords from Cell Contents section of castep
Args:
fileAsList(str list): Each entry is 1 line of the castep input file
lineIdx(int): The index containing the line "cell contents"
Returns
fractCoords: nx4 iter with each containing [x,y,z,symbol]. Used to init UnitCell objects
"""
finished = False
while not finished:
currLine = fileAsList[lineIdx].strip()
if "Fractional coord" in fileAsList[lineIdx]:
lineIdx = lineIdx + 3
fractCoords = list()
while "xx" not in currLine:
currLine = fileAsList[lineIdx].strip()
splitLine = currLine.split()
if len(splitLine) == 1:
break
currCoords = [float(x) for x in splitLine[3:6]] + [splitLine[1]]
fractCoords.append(currCoords)
lineIdx = lineIdx + 1
break
else:
lineIdx = lineIdx+1
return fractCoords, lineIdx
| 16,882
|
def dev_update(obj, directory):
"""Update plugin parts which have changed since previous update.
Optionally pass in the DIRECTORY of the plugin (defaults to cwd).
"""
directory = Path(directory)
plugin_toml_path = directory / "plugin.toml"
if not plugin_toml_path.exists():
lib.log_error("Not in a plugin directory.")
sys.exit(1)
with get_modified_plugin_directories(
directory, reset=obj["force"]
) as modified_plugin_directories:
if modified_plugin_directories:
with lib.temp_directory() as temp_directory:
shutil.copy(plugin_toml_path, temp_directory)
for modified_directory in modified_plugin_directories:
lib.log(f"Including: {modified_directory}")
shutil.copytree(
directory / modified_directory,
temp_directory / modified_directory,
)
api = lib.get_api(**obj)
lib.run_plugins_task(
api,
"dev_update_plugin",
dict(),
"Uploading to server",
data=lib.create_targz_as_bytes(temp_directory),
)
else:
lib.log("Nothing to do.")
| 16,883
|
def write_tester(format, layer, out_file):
"""Write plate with these keys to this file."""
plato = FORMAT_CLASSES[format](
out_file,
width_in_units=15, height_in_units=2, unit_mm=19,
case_thickness=3.5, padding=1, corner_radius=2, kerf=0.18)
plato.draw_outside()
for i in range(15):
k = (i - 4) * 0.05 # Range from -0.20 to +0.50
key = Key(str(k), (i, 0), (1, 2))
if layer in ['plate', 'combined']:
plato.draw_cherry_mx_switches([key], kerf=k)
if layer in ['under', 'combined']:
plato.draw_cherry_mx_under_switches([key], kerf=k)
if layer in ['clips']:
plato.draw_cherry_mx_switch_clips([key])
if layer in ['caps', 'combined']:
plato.draw_key_caps([key])
if layer in ['caps', 'combined', 'clips']:
plato.draw_screw_heads(8, indent=2)
plato.draw_screws(8, indent=2)
plato.save()
| 16,884
|
def get_avg_no_of_feat_values(contents):
"""
Helper to calculate numbers of different values
of categorical features, averaged for all features
"""
total = 0
for i in range(0, len(contents[0])):
total += len(set([x[i] for x in contents]))
return float(total) / float(len(contents[0]))
| 16,885
|
def plot_trend_line(axes_, xd, yd, c='r', alpha=1, cus_loc = None, text_color='black', return_params=False,
extra_text='', t_line_1_1=True, fit_function=None, fontsize_=12, add_text=True):
"""Make a line of best fit"""
#create clean series
x_, y_ = coincidence(xd,yd)
if fit_function is not None:
params = curve_fit(fit_function, x_, y_)
print('fitted parameters')
print(params[0])
fit_line_x = np.arange(int(np.nanmin(x_)),int(np.nanmax(x_))+1,.1)
plotting_par_list = [fit_line_x]
for fit_par in params[0]:
plotting_par_list.append(fit_par)
funt_par = tuple(plotting_par_list)
fit_line_y = fit_function(*funt_par)
axes_.plot(fit_line_x, fit_line_y, c, alpha=alpha)
# calculate R2
plotting_par_list = [x_]
params_str_ = ''
for i_, fit_par in enumerate(params[0]):
if extra_text == '':
params_str_ = params_str_ + 'fit parameters ' + str(i_+1) + ': ' + '$%0.2f$' % (fit_par) + '\n'
else:
params_str_ = params_str_ + extra_text + '$%0.2f$' % (fit_par) + '\n'
plotting_par_list.append(fit_par)
funt_par = tuple(plotting_par_list)
fit_line_y = fit_function(*funt_par)
residuals = y_ - fit_line_y
ss_res = np.sum(residuals**2)
ss_tot = np.sum((y_ - np.mean(y_))**2)
Rsqr = float(1 - (ss_res / ss_tot))
# Plot R^2 value
x_1 = np.nanmin(x_)
y_2 = np.nanmax(y_)
error_text = '$R^2 = %0.2f$' % Rsqr
if cus_loc is None:
axes_.text(x_1, y_2 , params_str_ + error_text, fontsize=fontsize_,
horizontalalignment='left',verticalalignment='top',color=text_color,
bbox={'facecolor': 'white', 'edgecolor': 'none'})
else:
axes_.text(cus_loc[0], cus_loc[1] , params_str_ + error_text, fontsize=fontsize_,
horizontalalignment='left',verticalalignment='top',color=text_color,
bbox={'facecolor': 'white', 'edgecolor': 'none'})
else:
# Calculate trend line
params = np.polyfit(x_, y_, 1)
intercept = params[-1]
slope = params[-2]
minxd = np.nanmin(x_)
maxxd = np.nanmax(x_)
xl = np.array([minxd, maxxd])
yl = slope * xl + intercept
print('fitted parameters')
print(slope, intercept)
# Plot trend line
axes_.plot(xl, yl, c, alpha=alpha)
# Calculate R Squared
poly_1d = np.poly1d(params)
ybar = np.sum(y_) / len(y_)
ssreg = np.sum((poly_1d(x_) - ybar) ** 2)
sstot = np.sum((y_ - ybar) ** 2)
Rsqr = float(ssreg / sstot)
# Plot R^2 value
x_1 = np.nanmin(x_)
y_2 = np.nanmax(y_)
if intercept >= 0:
if extra_text=='':
equat_text = '$Y = %0.2f*x + %0.2f$' % (slope,intercept)
else:
equat_text = extra_text + '\n' + '$Y = %0.2f*x + %0.2f$' % (slope,intercept)
else:
if extra_text=='':
equat_text = '$Y = %0.2f*x %0.2f$' % (slope,intercept)
else:
equat_text = extra_text + '\n' + '$Y = %0.2f*x %0.2f$' % (slope,intercept)
error_text = '$R^2 = %0.2f$' % Rsqr
if add_text:
if cus_loc is None:
axes_.text(x_1, y_2 , equat_text + '\n' + error_text, fontsize=fontsize_,
horizontalalignment='left',verticalalignment='top',color=text_color)
else:
axes_.text(cus_loc[0], cus_loc[1] , equat_text + '\n' + error_text, fontsize=fontsize_,
horizontalalignment='left',verticalalignment='top',color=text_color)
# plot 1:1 line if true
if t_line_1_1:
xy_min = np.min([np.nanmin(x_),np.nanmin(y_)])
xy_max = np.max([np.nanmax(x_),np.nanmax(y_)])
axes_.plot([xy_min, xy_max], [xy_min, xy_max], 'k--')
if return_params:
return Rsqr, params
else:
return Rsqr
| 16,886
|
def get_oil_type_atb(
oil_attrs, origin, destination, transport_data_dir, random_generator
):
"""Randomly choose type of cargo oil spilled from an ATB (articulated tug and barge) based on
AIS track origin & destination, and oil cargo attribution analysis.
Unlike traditional tank barges, the vessels with 'atb' designation are known oil-cargo vessels.
We used three different data sources to verify: AIS, Dept of Ecology's fuel transfer records
and Charlie Costanzo's ATB list. Details of traffic can be seen in this google spreadsheet:
https://docs.google.com/spreadsheets/d/1dlT0JydkFG43LorqgtHle5IN6caRYjf_3qLrUYqANDY/edit
Because of this pre-identification and selection method, we can assume that all ATBs are
oil-cargo atbs and that the absence of origin-destination information is due to issues in
linking ship tracks and not ambiguity about whether traffic is oil-cargo traffic.
:param dict oil_attrs: Oil attribution information from the output of make_oil_attrs.py.
:param str or None origin: Origin of AIS track from which spill occurs.
:param str or None destination: Destination of AIS track from which spill occurs.
:param transport_data_dir: Directory path to marine_transport_data files repository
cloned from https://github.com/MIDOSS/marine_transport_data.
:type transport_data_dir: :py:class:`pathlib.Path`
:param random_generator: PCG-64 random number generator
:type random_generator: :py:class:`numpy.random.Generator`
:return: Type of oil spilled.
:rtype: str
"""
vessel_type = "atb"
# Assign US and CAD origin/destinations from oil_attrs file
CAD_origin_destination = oil_attrs["categories"]["CAD_origin_destination"]
US_origin_destination = oil_attrs["categories"]["US_origin_destination"]
# Get cargo oil type attribution information from oil-type yaml files
yaml_file = transport_data_dir / Path(oil_attrs["files"]["CAD_origin"]).name
with yaml_file.open("rt") as f:
CAD_yaml = yaml.safe_load(f)
yaml_file = transport_data_dir / Path(oil_attrs["files"]["WA_destination"]).name
with yaml_file.open("rt") as f:
WA_in_yaml = yaml.safe_load(f)
WA_in_noinfo = _calc_no_info_facilities(WA_in_yaml)
yaml_file = transport_data_dir / Path(oil_attrs["files"]["WA_origin"]).name
with yaml_file.open("rt") as f:
WA_out_yaml = yaml.safe_load(f)
WA_out_noinfo = _calc_no_info_facilities(WA_out_yaml)
# US_origin is for US as origin
yaml_file = transport_data_dir / Path(oil_attrs["files"]["US_origin"]).name
with yaml_file.open("rt") as f:
US_yaml = yaml.safe_load(f)
# US_combined represents the combined import and export of oil
yaml_file = transport_data_dir / Path(oil_attrs["files"]["US_combined"]).name
with yaml_file.open("rt") as f:
USall_yaml = yaml.safe_load(f)
yaml_file = transport_data_dir / Path(oil_attrs["files"]["Pacific_origin"]).name
with yaml_file.open("rt") as f:
Pacific_yaml = yaml.safe_load(f)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# NOTE: these pairs need to be used together for "get_oil_type_cargo"
# (but don't yet have error-checks in place):
# - "WA_in_yaml" and "destination"
# - "WA_out_yaml" and "origin"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if origin in CAD_origin_destination:
if origin == "Westridge Marine Terminal":
if destination == "U.S. Oil & Refining":
oil_type = get_oil_type_cargo(
CAD_yaml, origin, vessel_type, random_generator
)
elif destination in US_origin_destination:
oil_type = get_oil_type_cargo(
CAD_yaml, origin, vessel_type, random_generator
)
elif destination in CAD_origin_destination:
# assume export within CAD is from Jet fuel storage tanks
# as there is a pipeline to Parkland for crude oil
oil_type = "jet"
else:
oil_type = get_oil_type_cargo(
CAD_yaml, origin, vessel_type, random_generator
)
else:
if destination in US_origin_destination:
# we have better information on WA fuel transfers,
# so I prioritize this information source
oil_type = get_oil_type_cargo(
WA_in_yaml, destination, vessel_type, random_generator
)
elif destination == "ESSO Nanaimo Departure Bay":
oil_type = get_oil_type_cargo(
CAD_yaml, destination, vessel_type, random_generator
)
elif destination == "Suncor Nanaimo":
oil_type = get_oil_type_cargo(
CAD_yaml, destination, vessel_type, random_generator
)
else:
oil_type = get_oil_type_cargo(
CAD_yaml, origin, vessel_type, random_generator
)
elif origin in US_origin_destination and origin not in WA_out_noinfo[vessel_type]:
if destination == "Westridge Marine Terminal":
# Westridge stores jet fuel from US for re-distribution
oil_type = "jet"
else:
oil_type = get_oil_type_cargo(
WA_out_yaml, origin, vessel_type, random_generator
)
elif (
destination in US_origin_destination
and destination not in WA_in_noinfo[vessel_type]
):
oil_type = get_oil_type_cargo(
WA_in_yaml, destination, vessel_type, random_generator
)
elif destination in CAD_origin_destination:
if destination == "Westridge Marine Terminal":
# Westridge doesn't receive crude for storage
oil_type = "jet"
else:
oil_type = get_oil_type_cargo(
CAD_yaml, destination, vessel_type, random_generator
)
elif origin == "Pacific":
oil_type = get_oil_type_cargo(
Pacific_yaml, origin, vessel_type, random_generator
)
elif origin == "US":
oil_type = get_oil_type_cargo(US_yaml, origin, vessel_type, random_generator)
else:
# For all other traffic, use a generic fuel attribution from the combined
# US import and export
oil_type = get_oil_type_cargo(USall_yaml, None, vessel_type, random_generator)
return oil_type
| 16,887
|
def get_jwt():
"""
Get Authorization token and validate its signature
against the application's secret key, .
"""
expected_errors = {
KeyError: WRONG_PAYLOAD_STRUCTURE,
AssertionError: JWK_HOST_MISSING,
InvalidSignatureError: WRONG_KEY,
DecodeError: WRONG_JWT_STRUCTURE,
InvalidAudienceError: WRONG_AUDIENCE,
TypeError: KID_NOT_FOUND
}
token = get_auth_token()
try:
jwks_payload = jwt.decode(token, options={'verify_signature': False})
assert 'jwks_host' in jwks_payload
jwks_host = jwks_payload.get('jwks_host')
key = get_public_key(jwks_host, token)
aud = request.url_root
payload = jwt.decode(
token, key=key, algorithms=['RS256'], audience=[aud.rstrip('/')]
)
set_ctr_entities_limit(payload)
return payload
except tuple(expected_errors) as error:
message = expected_errors[error.__class__]
raise AuthorizationError(message)
| 16,888
|
def start_volume(name, force=False):
"""
Start a gluster volume
name
Volume name
force
Force the volume start even if the volume is started
.. versionadded:: 2015.8.4
CLI Example:
.. code-block:: bash
salt '*' glusterfs.start mycluster
"""
cmd = "volume start {0}".format(name)
if force:
cmd = "{0} force".format(cmd)
volinfo = info(name)
if name not in volinfo:
log.error("Cannot start non-existing volume %s", name)
return False
if not force and volinfo[name]["status"] == "1":
log.info("Volume %s already started", name)
return True
return _gluster(cmd)
| 16,889
|
def hist_equal(img, z_max=255):
"""
直方图均衡化,将暗的地方变量,亮的地方变暗
:param img:
:param z_max: 原图像最亮的地方减去最暗的地方的值
:return:
"""
if len(img.shape) == 2:
height, width = img.shape
n_chan = 1
elif len(img.shape) == 3:
height, width, n_chan = img.shape
print(img[:, :, 0].shape)
# H, W = img.shape
# S is the total of pixels
n_pixle = height * width
out = img.copy()
sum_h = 0.
if n_chan == 1:
for i in range(1, 255):
ind = np.where(img == i)
sum_h += len(img[ind])
z_prime = z_max / n_pixle * sum_h
out[ind] = z_prime
else:
for c in range(n_chan):
tmp_img = img[:, :, c]
tmp_out = tmp_img.copy()
for i in range(1, 255):
ind = np.where(tmp_img == i)
sum_h += len(tmp_img[ind])
z_prime = z_max / n_pixle * sum_h
tmp_out[ind] = z_prime
out[:, :, c] = tmp_out
out = out.astype(np.uint8)
return out
| 16,890
|
def combine(*indices_lists):
"""
Return all the combinations from lists of indices
:param indices_lists: each argument is a list of indices (it must be a list)
:return: The combined list of indices
"""
if len([*indices_lists]) > 1:
return [i for i in product(*indices_lists)]
else:
return set(*indices_lists)
| 16,891
|
def get_endpoint(query):
"""
Regex to parse domain and API endpoint from a SoQL query via FROM
statement
:param query: str, SoQL-formatted query
:return
url, endpoint, query: str objects, domain, endpoint, and
original query sans FROM statement
"""
url = re.search(r'\w+\.\w+\.(\w{2,3})', query, flags=re.I)
endpoint = re.search(r'(\w{4}-\w{4})\.json', query, flags=re.I)
query = re.sub(r'from( +|\t+|\n+).+', '', query, flags=re.I)
return url.group(), endpoint.group(1), query
| 16,892
|
def load_data(filename: str) ->pd.DataFrame:
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
df = pd.read_csv(filename)
df.fillna(0, inplace=True)
df[["id", "price", "bedrooms", "bathrooms", "sqft_living", "sqft_lot", "floors", "waterfront", "view",
"condition", "grade", "sqft_above", "sqft_basement", "yr_built", "yr_renovated",
"zipcode", "lat", "long", "sqft_living15", "sqft_lot15"]] = df[
["id", "price", "bedrooms", "bathrooms", "sqft_living", "sqft_lot", "floors", "waterfront", "view",
"condition", "grade", "sqft_above", "sqft_basement", "yr_built", "yr_renovated",
"zipcode", "lat", "long", "sqft_living15", "sqft_lot15"]].apply(pd.to_numeric)
df['date'] = df['date'].astype("str").apply(lambda s: s[:8])
df['date'] = df['date'].astype('float64')
df = df[
(df["id"] >= 1) &
(df["date"] >= 20000000) & (df["date"] <= 20220000) &
(df["price"] >= 50000) & (df["price"] <= 10000000) &
(df["bedrooms"] >= 0) & (df["bedrooms"] <= 15) &
(df["bathrooms"] >= 0) & (df["bathrooms"] <= 12) &
(df["sqft_living"] >= 200) & (df["sqft_living"] <= 100000) &
(df["sqft_lot"] >= 450) & (df["sqft_lot"] <= 1800000) &
(df["floors"] >= 1) & (df["floors"] <= 4) &
(df["waterfront"] == 0) | (df["waterfront"] == 1) &
(df["view"] >= 0) & (df["view"] <= 4) &
(df["condition"] >= 1) & (df["condition"] <= 5) &
(df["grade"] >= 1) & (df["grade"] <= 13) &
(df["sqft_above"] >= 250) & (df["sqft_above"] <= 10000) &
(df["sqft_basement"] >= 0) & (df["sqft_basement"] <= 5000) &
(df["yr_built"] >= 1800) & (df["yr_built"] <= 2022) &
(df["yr_renovated"] >= 0) & (df["yr_renovated"] <= 2022) &
(df["zipcode"] >= 98000) & (df["zipcode"] <= 99000) &
(df["lat"] >= 47) & (df["lat"] <= 48) &
(df["long"] >= -123) & (df["long"] <= -121) &
(df["sqft_living15"] >= 300) & (df["sqft_living15"] <= 10000) &
(df["sqft_lot15"] >= 300) & (df["sqft_lot15"] <= 1000000)
]
# inserting the "yr_renovated" col the last year in which the building had had any renovation.
df["yr_renovated"] = df[["yr_built", "yr_renovated"]].max(axis=1)
prices_by_zipcode = pd.DataFrame({'zipcode': df['zipcode'], 'price': df['price']})
prices_by_zipcode = prices_by_zipcode.groupby('zipcode').mean()
prices_by_zipcode.rename(columns={'price': 'mean_price'}, inplace=True)
df = pd.merge(df, prices_by_zipcode, on='zipcode')
df = df.drop(['id', 'zipcode', 'lat', 'long'], 1)
return df
| 16,893
|
def expect_returns(
signature: Signature,
*types: Optional[Type],
allow_unannotated: bool = True,
):
"""
Check if the function signature returns one of the given types
"""
if signature.return_annotation == Signature.empty:
if not allow_unannotated:
raise InvalidReturnType(f"return type must be annotated")
elif signature.return_annotation not in types:
formatted = ["None" if t is None else t.__name__ for t in types]
raise InvalidArgument(
f"expected return type to be one of {formatted}, got {signature.return_annotation.__name__!r}"
)
| 16,894
|
def version_match(required, candidate):
"""Test that an available version is a suitable match for a required
version.
To be suitable a version must be of the same major version as required
and be at least a match in minor/patch level.
eg. 3.3 is a match for a required 3.1 but 4.1 is not.
:param tuple required: the version that must be met.
:param tuple candidate: the version to test against required.
:returns: True if candidate is suitable False otherwise.
:rtype: bool
"""
return _discover.version_match(required, candidate)
| 16,895
|
def get_last_upgraded_at(module: base.Module) -> Optional[datetime.datetime]:
"""
Get the timestamp of the last time this module was upgraded.
"""
return settings.get_last_upgraded_at(module.name)
| 16,896
|
def parse_header_file(header_file):
"""Parse a single header file to get all defined constants out of it."""
resolved_values = collections.OrderedDict()
raw_matches = {}
with open(header_file, "r") as fd:
all_file_lines = collections.OrderedDict(
[
(lineno, line.strip())
for lineno, line in enumerate(fd, start=1)
if not line.isspace()
]
)
line_iterator = iter(all_file_lines.items())
for lineno, line in line_iterator:
line, _comment = clean_line(line)
# First check to see if this is a #define statement
match = re.match(r"^#define\s+UC_(?P<id>\w+)\s+(?P<value>.*)$", line)
if match:
name = "UC_" + match.group("id")
raw_value = match.group("value")
try:
resolved_values[name] = ast.literal_eval(raw_value)
except (NameError, SyntaxError, ValueError):
raw_matches[name] = raw_value
continue
# Not a #define; see if it's an enum.
if "enum uc_" not in line.lower():
continue
# This is the beginning of an enum. Subsequent lines until the closing `}` are
# part of it. We need to keep track because enums without an explicitly defined
# value are incremented by one from the previous enum value.
next_enum_value = 0
enum_start_line = lineno
while True:
lineno, line = next(line_iterator, (None, None))
if line is None:
# Hit EOF before we hit the end of the enum. That's odd.
logging.warning(
"Hit EOF before end of enum beginning on line %d.", enum_start_line
)
break
elif "}" in line:
# Hit the end of the enum.
break
line, _comment = clean_line(line)
# Sometimes we have multiple enum definitions on one line. We need to handle
# these one at a time. Splitting the line by commas should be enough to
# separate out multiple expressions.
for expression in line.strip(",").split(","):
expression = expression.strip()
if not expression:
continue
# See if this enum value is being assigned rather than implicit.
match = re.match(r"^UC_(?P<id>\w+)\s*=\s*(?P<expr>.+)$", expression)
if match:
# Enum value is assigned. Whatever's on the right-hand side, any
# names it references must already be defined.
name = "UC_" + match.group("id")
raw_value = match.group("expr")
try:
processed_value = eval(raw_value, resolved_values)
except NameError as nerr:
logging.error(
"Failed to resolve %r on line %d: %s", name, lineno, nerr
)
continue
resolved_values[name] = processed_value
next_enum_value = processed_value + 1
else:
# Not an explicit assignment. Expect this expression to be just a
# single identifier.
match = re.match(r"^UC_(\w+)$", expression)
if match:
name = match.group(1)
resolved_values["UC_" + name] = next_enum_value
next_enum_value += 1
else:
raise SyntaxError(
"Couldn't match any expression type to: %r" % expression
)
for name, raw_value in raw_matches.items():
# Convert any remaining values that are still unresolved. This usually only
# applies to #define macros that reference other constants.
if name not in resolved_values:
resolved_values[name] = eval(raw_value, resolved_values)
return resolved_values
| 16,897
|
def get_linux_distribution(get_full_name, supported_dists):
"""Abstract platform.linux_distribution() call which is deprecated as of
Python 3.5 and removed in Python 3.7"""
try:
supported = platform._supported_dists + (supported_dists,)
osinfo = list(
platform.linux_distribution(
full_distribution_name=get_full_name,
supported_dists=supported
)
)
if not osinfo or osinfo == ['', '', '']:
return get_linux_distribution_from_distro(get_full_name)
full_name = platform.linux_distribution()[0].strip()
osinfo.append(full_name)
except AttributeError:
return get_linux_distribution_from_distro(get_full_name)
return osinfo
| 16,898
|
def update(
name: str,
git: bool = False,
local: bool = False,
url: str = "",
head: str = "",
tag: str = "",
commit_hash: str = "",
location: str = "",
):
"""Update a package
"""
target_location = ".refl"
p = Project.load("project.refl")
dependencies = [x for x in p.project.dependencies if x.name == name]
dependency: Optional[Package] = None
if len(dependencies) > 0:
dependency = dependencies[0]
if dependency is None:
log.error("Could not find the dependency, maybe try with --soft")
return
if not git and not local:
git = Origin.parse(dependency.origin) is Origin.GIT
local = Origin.parse(dependency.origin) is Origin.LOCAL
if git:
url = dependency.options["git_url"] if url is None else url
name = git_parse(url).repo if name is None else name
origin = Origin.GIT
options = GitOptions(git_url=url, head=head, commit_hash=commit_hash, tag=tag)
elif local:
location = dependency.options["location"] if location is None else location
assert name is not None, "Please pass the name of the package: --name <name>"
origin = Origin.LOCAL
options = LocalOptions(local_url=location)
i = InstallPackage(name=name, origin=origin, options=options)
i(target_location)
# Uninstall the current package
u = UninstallPackage(name=name)
removed = u(target_location, non_exact=False)
p.remove_dependency(name=name)
p.add_dependency(
name=name,
git=git,
local=local,
url=url,
head=head,
tag=tag,
commit_hash=commit_hash,
location=location,
)
| 16,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.