content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def addsParallelTrack(Area, initPoint, alt):
"""
Adds mission to perform Parallel Track across specified area.
"""
dFSA = 30 # Functional Search Area (m)
cmds = vehicle.commands
print ( " Clear any existing commands")
cmds.clear()
print ( " Define/add new commands.")
# Add new commands. The meaning/order of the parameters is documented in the Command class.
#Calculate track properties
trackLength = 2 * math.sqrt(dFSA/math.pi)
legConst = 5 #Arbitrary ratio of leg length to track length
legLength = legConst * trackLength #Leg length function of dFSA radius
numLegs = Area / (trackLength * legLength)
#Add MAV_CMD_NAV_TAKEOFF command. This is ignored if the vehicle is already in the air.
cmds.add(Command( 0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT, mavutil.mavlink.MAV_CMD_NAV_TAKEOFF, 0, 0, 0, 0, 0, 0, 0, 0, alt))
#Go to initial point as specified by user
cmds.add(Command( 0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT, mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 0, 0, 0, 0, initPoint.lat, initPoint.lon, alt))
#Define waypoint pattern - point(lat, long, alt)
i = 1;
waypoint = initPoint
while i <= numLegs :
# Strafe
waypoint = get_location_metres(waypoint, 0, (legLength*(-1)**i))
cmds.add(Command( 0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT, mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 0, 0, 0, 0, waypoint.lat, waypoint.lon, alt))
# Advance
waypoint = get_location_metres(waypoint, trackLength, 0)
cmds.add(Command( 0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT, mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 0, 0, 0, 0, waypoint.lat, waypoint.lon, alt))
i += 1
# Return to Launch
cmds.add(Command( 0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT, mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 0, 0, 0, 0, initPoint.lat, initPoint.lon, alt))
print ( " Upload new commands to vehicle")
cmds.upload()
| 5,340,300
|
def longest_substring_using_lists(s: str) -> int:
"""
find the longest substring without repeating characters
644 ms 14.3 MB
>>> longest_substring_using_lists("abac")
3
>>> longest_substring_using_lists("abcabcbb")
3
>>> longest_substring_using_lists("bbbbb")
1
>>> longest_substring_using_lists("pwwkew")
3
"""
words = list()
longest = 0
for char in s:
# for each character
removals = []
for word_idx in range(len(words)):
# check all found words for the char
word = words[word_idx]
if char in word:
# if it exists then set its length to longest if it is the longest
longest = max(longest, len(word))
removals.append(word)
else:
# else add char to word
words[word_idx] += char
for remove in removals:
words.remove(remove)
# add char into words
words.append(char)
return max(longest, *[len(word) for word in words])
| 5,340,301
|
def tensorflow_custom_preprocessing_example():
"""Example usage to get face embeddings from cropped image of human face"""
import numpy as np
from tensorflow.keras.preprocessing import image
image_preprocessor = create_preprocessing_model()
embeddings_model = VGGFace(model="senet50", pooling="avg", include_top=False, input_shape=(224, 224, 3))
img = image.load_img('../image/ajb.jpg', target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
preprocessed = image_preprocessor.predict(x)
embeddings = embeddings_model.predict(preprocessed)
print("TensorFlow embeddings: ", embeddings)
| 5,340,302
|
def getuserobj(user_id=None):
"""
登录查询用户是否存在的专用接口函数
:param user_id: 用户id(username)
:return: if exit: return 用户对象
else return None
"""
dbobj = connectMysql.connectMysql()
if user_id is '' or user_id is None:
dbobj.close_db()
return None
else:
userdata = dbobj.select_db(sql="select * from secret where ID = %s " % user_id)
if userdata is ():
# print("ID = %s and password = %s 未查询到数据" % (user_id, password))
dbobj.close_db()
return None
else:
dbobj.close_db()
return userdata[0]
| 5,340,303
|
def _parse_track_df(df: pd.DataFrame, track_id: int, track_name: str, track_comment: str,
data_year: int) -> dict:
"""
parses track data
:param df: data representing a track
:param track_id: track id
:param track_name: track name
:param track_comment: track comment
:param data_year: year to which the data is relevant
:return: parsed data
"""
must = from_list = choice = corner_stones = complementary = minor = additional_hug = 0
point_columns = [i for i, c in enumerate(df.columns) if 'כ נקודות' in c]
for i, r in df.iterrows():
category = r[0]
if 'סה\"כ' in category:
continue
raw_points = [r[i] for i in point_columns]
for raw_point in raw_points:
if not raw_point or pd.isnull(raw_point): # no need to take Nan or 0 value
continue
try:
points = float(raw_point)
except ValueError:
match = RE_RANGE.match(raw_point) or RE_MIN.match(raw_point)
if match:
points = float(match[1] or match[2])
else:
continue
if category in (MUST, MUST_IN_HUG, MUST_PROGRAMMING, MUST_SAFETY_LIBRARY) \
or MUST in category:
must += points
elif category in CHOICE_FROM_LIST or 'במסגרת האשכול' in category:
from_list += points
elif category == CHOICE_IN_HUG:
choice += points
elif CORNER_STONES in category:
corner_stones += points
elif category == COMPLEMENTARY:
complementary += points
elif category == MINOR:
minor += points
elif category == ADDITIONAL_HUG:
additional_hug += points
else:
# print(f'Could not identify {category}={raw_point}, defaulting to MUST')
must += points
return {'track_number': track_id,
'data_year': data_year,
'name': track_name,
'points_must': must,
'points_from_list': from_list,
'points_choice': choice,
'points_complementary': complementary,
'points_corner_stones': corner_stones,
'points_minor': minor,
'points_additional_hug': additional_hug,
'comment': track_comment or ''}
| 5,340,304
|
def parse_tileset(
tileset: TileSet
) -> Tuple[Mapping[Axes, int], TileCollectionData]:
"""
Parse a :py:class:`slicedimage.TileSet` for formatting into an
:py:class:`starfish.imagestack.ImageStack`.
Parameters:
-----------
tileset : TileSet
The tileset to parse.
Returns:
--------
Tuple[Tuple[int, int], TileSetData] :
A tuple consisting of the following:
1. The (y, x) size of each tile.
2. A :py:class:`starfish.imagestack.tileset.TileSetData` that can be queried to obtain
the image data and extras metadata of each tile, as well as the extras metadata of
the entire :py:class:`slicedimage.TileSet`.
"""
tile_data = TileSetData(tileset)
tile_shape = tileset.default_tile_shape
# if we don't have the tile shape, then we peek at the first tile and get its shape.
if tile_shape is None:
tile_key = next(iter(tile_data.keys()))
tile = tile_data.get_tile_by_key(tile_key)
tile_shape = tile.tile_shape
return (
tile_shape,
tile_data,
)
| 5,340,305
|
def travis():
"""
Add a .travis.yml file to run dialog flow tests
"""
cfg.travis()
| 5,340,306
|
def test_walk(setup_groups):
"""Test the ``GroupPathX.walk()`` function."""
group_path = GroupPathX()
assert [c.path for c in sorted(group_path.walk())] == [
"a",
"a/b",
"a/c",
"a/c/d",
"a/c/e",
"a/c/e/g",
"a/f",
]
| 5,340,307
|
def pad_sents(sents, pad_token):
""" Pad list of sentences(SMILES) according to the longest sentence in the batch.
@param sents (list[list[str]]): list of SMILES, where each sentence
is represented as a list of tokens
@param pad_token (str): padding token
@returns sents_padded (list[list[str]]): list of SMILES where SMILES shorter
than the max length SMILES are padded out with the pad_token, such that
each SMILES in the batch now has equal length.
"""
sents_padded = []
max_length = max([len(sentence) for sentence in sents])
sents_padded = [sentence+(max_length-len(sentence))*[pad_token] for sentence in sents]
return sents_padded
| 5,340,308
|
def bicubic_interpolation_filter(sr):
"""Creates a bicubic interpolation filter."""
return _interpolation_filter(sr, cv2.INTER_CUBIC)
| 5,340,309
|
def prepareTrainingData(imgs, pathOut, psfStats, imgInBitDepth, samplesPerImg, cameraResolution):
"""
Prepare raw images to train the CNN for MTF estimation.
:param imgs: Array of raw 1-channel images.
:param pathOut: Path to the destination TFRecord file.
:param psfStats: Real PSF images with corresponding metadata (to be published by the original work authors).
:param imgInBitDepth: Bit depth of the input images (default: 16).
:param samplesPerImg: Number of blur kernels to use per image (default: 6).
:param cameraResolution: If psfStats is not None, you may specify the original camera resolution (default: [8688, 5792]).
:return: Images (with PSFs applied) and corresponding label (MTF values) wrapped in a TFRecord file.
"""
imgInMaxVal = 2 ** imgInBitDepth - 1
imgOutMaxVal = 255.0
writer = tf.io.TFRecordWriter(pathOut)
# Iterate training images
for imgIdx, img in enumerate(imgs):
print("Image number:", imgIdx)
if img.shape != (256, 256):
continue
# Normalize image to range [0,1]
img = img / (2 ** imgInMaxVal - 1)
# If there are PSFS given, pick some randomly. Otherwise, PSFs are simulated later.
if psfStats is not None:
psfStatsRandIdx = np.random.choice(range(len(psfStats)), samplesPerImg, replace=False)
psfStatsRand = [psfStats[i, ...] for i in psfStatsRandIdx]
else:
psfStatsRand = range(samplesPerImg)
for psfStat in psfStatsRand:
# If there are PSFs, rotate them so that radial and tengential directions align with x and y directions.
# Note: Not needed for estimatinos in horizontal and vertical image directions.
if psfStats is not None:
psfSensorPos = [psfStat[0], psfStat[1]]
psf = psfStat[2]
psf = rotateImg(psf, psfSensorPos, cameraResolution)
psf = psf / np.sum(psf)
# Combine PSF with random gaussian blur for more variety.
if uniform(0, 1) <= 0.3:
randomSigma = uniform(0, 5)
randGaussKernel = generateGaussKernel(randomSigma)
psf = cv2.filter2D(psf, -1, randGaussKernel)
# If there are no PSFs given, simulate random motion blur or defocus blur kernels.
else:
# According to PSF sizes of the original paper, to assure the correct MTF indices.
kernelSize = 111
if uniform(0, 1) <= 0.5:
randomIntensity = uniform(0, 1)
psf = generateMotionBlurKernel(None, kernelSize, randomIntensity, asNumpyArray=True)
else:
randomKernelDiameter = int(uniform(0, 31))
psf = generateDefocusKernel(randomKernelDiameter, kernelSize)
# Apply PSF to image and also get a 90° rotated version.
blurredImg = cv2.filter2D(img, -1, psf)
blurredImgRot = cv2.rotate(blurredImg, cv2.ROTATE_90_CLOCKWISE)
# Extract MTF values from PSF in horizontal and vertical image directions.
n = psf[0].size
mtf = abs(fftshift(fft2(psf)))
mtfRad = mtf[n//2, n//2:n-1]
mtfTang= mtf[n//2:n-1, n//2]
labelRad = [mtfRad[i] for i in FREQ_INDICES]
labelTang = [mtfTang[i] for i in FREQ_INDICES]
# Encode image as .png and append it with the horizontal MTF values to the TFRecord dataset.
blurredImg = (blurredImg * imgOutMaxVal).astype("uint8")
encoded_image_string = cv2.imencode(".png", blurredImg)[1].tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'X': tf.train.Feature(bytes_list=tf.train.BytesList(value=[encoded_image_string])),
'y': tf.train.Feature(float_list=tf.train.FloatList(value=labelRad))
}))
writer.write(example.SerializeToString())
# Encode the rotated image as .png and append it with the vertical MTF values to the TFRecord dataset.
blurredImgRot = (blurredImgRot * imgOutMaxVal).astype("uint8")
encoded_image_string = cv2.imencode(".png", blurredImgRot)[1].tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'X': tf.train.Feature(bytes_list=tf.train.BytesList(value=[encoded_image_string])),
'y': tf.train.Feature(float_list=tf.train.FloatList(value=labelTang))
}))
writer.write(example.SerializeToString())
writer.close()
print("Finished writing .tfrecord file.")
| 5,340,310
|
def is_receive_waiting():
"""Check to see if a payload is waiting in the receive buffer"""
#extern RADIO_RESULT radio_is_receive_waiting(void);
res = radio_is_receive_waiting_fn()
# this is RADIO_RESULT_OK_TRUE or RADIO_RESULT_OK_FALSE
# so it is safe to evaluate it as a boolean number.
return (res != 0)
| 5,340,311
|
def wait(game):
"""Do nothing."""
outputter.display_game_text('You doze off for a while. Nothing happens.')
| 5,340,312
|
def test_cli():
"""Test the main CLI command."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert "Show the version and exit." in result.output
assert "Show this message and exit." in result.output
| 5,340,313
|
def _validate_matching_filter_data_type(part_types, filters: List[type(Filter)]) -> None:
""" Validate that the filters passed are matching to the partitions'
listed datatypes, otherwise throw a ValueError.
This includes validating comparisons too.
Args:
part_types (dict): A dictionary of all partitions to their datatypes
filters (List[type(Filter)]): List of filters to validate
Returns:
None
"""
num_comparisons = [
">",
"<",
"<=",
">="
]
for f in filters:
try:
fil_part = part_types[f["partition"]]
except:
raise ValueError("Filter does not have a matching partition.")
if (f["comparison"] in num_comparisons):
if fil_part in NON_NUM_TYPES:
raise ValueError(
f"Comparison {f['comparison']} cannot be used on partition types of {fil_part}")
| 5,340,314
|
def residual_block(
x,
filters: int,
weight_decay: float,
*,
strides: typing.Union[int, typing.Tuple[int, int]],
dilation: typing.Union[int, typing.Tuple[int, int]],
groups: int,
base_width: int,
downsample,
use_basic_block: bool,
use_cbam: bool,
cbam_channel_reduction: int,
activation: str,
pre_activation: bool,
small_input: bool,
name: str,
):
""" Residual block.
Design follows [2] where Strides=2 in the 3x3 convolution instead of the first 1x1
convolution for bottleneck block. This increases the Top1 for ~0.5, with a slight
performance drawback of ~5% images/sec. Last BN in each residual branch are
zero-initialized following [3] so that the residual branch starts with zeros and
each residual block behaves like an identity.This improves the model by 0.2~0.3%.
- Attention Layers
- CBAM: Convolutional Block Attention Module
[1] Deep Residual Learning for Image Recognition
https://arxiv.org/abs/1512.03385
[2] resnet_50_v1_5_for_pytorch
https://ngc.nvidia.com/catalog/model-scripts/nvidia
[3] Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour
https://arxiv.org/abs/1706.02677
[4] Identity Mappings in Deep Residual Networks
https://arxiv.org/abs/1603.05027
"""
x = eval("basic" if use_basic_block else "bottleneck")(
x,
filters,
weight_decay,
strides=strides,
dilation=dilation,
groups=groups,
base_width=base_width,
downsample=downsample,
use_cbam=use_cbam,
cbam_channel_reduction=cbam_channel_reduction,
activation=activation,
pre_activation=pre_activation,
small_input=small_input,
name=name,
)
return x
| 5,340,315
|
def trace_downstream_main(in_features, in_fdr_raster, out_feature_class, surface_raster=None):
""" Trace downstream from input features, using the flow direction raster
returning polyline of the trace.
"""
try:
arcpy.env.overwriteOutput = True
# Get information about flow raster.
# Assuming the surface raster matches in all respects
fdr_raster = arcpy.Raster(in_fdr_raster)
cell_width = fdr_raster.meanCellWidth
cell_height = fdr_raster.meanCellHeight
max_row = fdr_raster.height # number of rows
max_col = fdr_raster.width # number of columns
upper_left = fdr_raster.extent.upperLeft
# Create output feature class
spatial_ref = get_template_information(in_fdr_raster)
has_z = get_has_z(surface_raster)
path, name = os.path.split(out_feature_class)
arcpy.management.CreateFeatureclass(
path, name, geometry_type="POLYLINE",
has_z=has_z,
spatial_reference=spatial_ref)
# Add a field to transfer FID from input
arcpy.management.AddField(out_feature_class, "ORIG_FID", "LONG")
# convert rasters to arrays
fdr = arcpy.RasterToNumPyArray(in_fdr_raster, nodata_to_value=0)
# the surface could be DEM or filled DEM
if surface_raster != None:
fill = arcpy.RasterToNumPyArray(surface_raster, nodata_to_value=0)
##else:
## fill = numpy.zeros((max_row, max_col), numpy.int)
# get the max size of the numpy array
#max_row, max_col = numpy.shape(fdr)
with arcpy.da.InsertCursor(out_feature_class, ["SHAPE@", "ORIG_FID"]) as insert_cursor:
with arcpy.da.SearchCursor(in_features, ["SHAPE@XY", "OID@"]) as read_cursor:
for read_row in read_cursor:
pnt = read_row[0]
oid = read_row[1]
# convert point coordinates into raster indices
col = abs(int((upper_left.X - pnt[0]) / cell_width))
row = abs(int((upper_left.Y - pnt[1]) / cell_height))
# Create an array object needed to create features
array = arcpy.Array()
if surface_raster != None:
# get the Z value of the surface at (row ,col)
point_z = numpy.asscalar(fill[row, col])
else:
point_z = 0
# Add the initial point to the array
array.add(arcpy.Point(pnt[0], pnt[1], point_z))
# Loop thru the trace
done = False
while not done:
# move to downstream cell
last_r = row # store current r value
last_c = col # store current c value
row, col = move_to_next_pixel(fdr, row, col)
if surface_raster != None:
# get the Z value of the surface at (row ,col)
point_z = numpy.asscalar(fill[row, col])
else:
point_z = 0
# Calculate the coordinates of x and y (in map units)
point_x = get_coord_x(col, cell_width, upper_left)
point_y = get_coord_y(row, cell_height, upper_left)
# save this coordinate to our list
array.add(arcpy.Point(point_x, point_y, point_z))
# Check to see if done
# If not moved from last location (sink)
if last_r == row and last_c == col:
done = True
# Check to see if out of bounds
if row < 0 or row > max_row:
done = True
if col < 0 or col > max_col:
done = True
# Done Tracing
# add the feature using the insert cursor
polyline = arcpy.Polyline(array, spatial_ref, True, False)
insert_cursor.insertRow([polyline, oid])
except arcpy.ExecuteError:
line, filename, err = trace()
err_message = 'Geoprocessing error on {} of {}'.format(line, filename)
print err_message
arcpy.AddError(err_message)
arcpy.AddError(arcpy.GetMessages(2))
except:
line, filename, err = trace()
err_message = 'Python error on {} of {} : with error - {}'.format(line, filename, err)
print err_message
arcpy.AddError(err_message)
finally:
# Final cleanup goes here
pass
| 5,340,316
|
def vacancy_based_on_freq(service,duration,frequency,earliest,latest,local_timezone):
"""
Check vacant timeslot with the user inputed duration for the frequency/week the user inputed.
service: get authentication from Google
duration: the length of the new event (int)
frequency: number of days in a week (int)
earliest: earliest time for timeframe (int)
latest: latest time for timeframe (int)
local_timezone: assigned timezone
"""
result = {}
week = 7
for i in range(week):
if check_vacancy(service,duration,i+1,earliest,latest,local_timezone) == None:
print(f'No slots left on this date. Still {frequency} spots left in the week to fill.')
pass
else:
result[i+1] = check_vacancy(service,duration,i+1,earliest,latest,local_timezone)
frequency -= 1
print(f'Yes! There is a timeslot! Now {frequency} spots left in the week.')
if frequency == 0:
break
return result
| 5,340,317
|
def run_grid_search(fixed_height, threshold, net_weight, net_thresh, stroke_width_thresh, text_height_thresh,
text_line_percentage):
"""
Run a grid search on different hyperparameters to find the best setting for detecting headings on a page.
:param fixed_height: All images should be scaled to have this fixed height.
:type fixed_height: int
:param threshold: A value in [0, 1] that should be reached by the heading detection confidence value for a specific
text line to define it as a heading text line.
:type threshold: float
:param net_weight: A value in [0, 10] that weights the confidence value of the neural network output. Gets
downscaled to [0, 1] later on.
:type net_weight: int
:param net_thresh: If the net confidence is greater than or equal to this value the text line is considered a
heading. Gets downscaled to [0, 1] later on.
:type net_thresh: int
:param stroke_width_thresh: If the confidence of the stroke width feature coming from the distance transformation
is greater than or equal to this value the text line is considered a heading. Gets downscaled to [0, 1] later on.
:type stroke_width_thresh: int
:param text_height_thresh: If the confidence of the text height feature coming from the distance transformation
is greater than or equal to this value the text line is considered a heading. Gets downscaled to [0, 1] later on.
:type text_height_thresh: int
:param text_line_percentage: For a TextRegion to be defined as a heading region at least this percentage of text
lines should be recognized as headings by the algorithm. Defined as a value in [0, 10]. Gets downscaled to [0, 1]
later on.
:type text_line_percentage: int
:return:
"""
net_weight_f = net_weight / 10
net_thresh_f = net_thresh / 10
stroke_width_thresh_f = stroke_width_thresh / 10
text_height_thresh_f = text_height_thresh / 10
text_line_percentage_f = text_line_percentage / 10
sw_th_thresh_upper_bound = min(stroke_width_thresh, text_height_thresh)
for sw_th_thresh in range(sw_th_thresh_upper_bound - 1, sw_th_thresh_upper_bound + 1, 1):
sw_th_thresh_f = sw_th_thresh / 10
for stroke_width_weight in range(0, 10 - net_weight + 1, 1):
stroke_width_weight_f = stroke_width_weight / 10
text_height_weight_f = (10 - net_weight - stroke_width_weight) / 10
os.system("python -u "
"./citlab-article-separation/article_separation/image_segmentation/net_post_processing/heading_evaluation.py "
"--path_to_gt_list {} "
"--path_to_pb {} "
"--fixed_height {} "
"--threshold {} "
"--net_weight {} "
"--stroke_width_weight {} "
"--text_height_weight {} "
"--gpu_devices '' "
"--log_file_folder {} "
"--net_thresh {} "
"--stroke_width_thresh {} "
"--text_height_thresh {} "
"--sw_th_thresh {} "
"--text_line_percentage {}"
.format(PATH_TO_GT_LIST, PATH_TO_PB, fixed_height, threshold, net_weight_f, stroke_width_weight_f,
text_height_weight_f, LOG_FILE_FOLDER, net_thresh_f, stroke_width_thresh_f,
text_height_thresh_f, sw_th_thresh_f, text_line_percentage_f))
| 5,340,318
|
def show(vol_path):
""" Execute qemu-img show inside a container, direct mapping the volume """
name = "qemu-img"
image = "breqwatr/qemu-img:latest"
path = Path(vol_path)
vol_abspath = path.absolute().__str__()
run = f"qemu-img info {vol_abspath}"
mount = f"-v {vol_abspath}:{vol_abspath}"
cmd = f"docker run --rm -it --name {name} {mount} {image} {run}"
shell(cmd)
| 5,340,319
|
def load_extensions():
"""
NOTE: This code is a copy of the code in econ_platform_core.extensions.__init__.py.
I will need to figure out how to make this function not use the current directory.
TODO: Merge this function with the one in econ_platform_core.
Imports all *.py files in this directory (in alphabetical order).
Since the order of import will eventually matter, will need to add something to force a order of import operations.
For now, not am issue (can just use the alphabetical order rule to fix problems).
All errors are caught and largely ignored (other than listing the module that failed, and a text dump on the
console.
Returns [loaded_extensions, failed_extensions]
The operations on import of an extension:
(1) The import itself. If you wish, you can just put a script that is executed.
(2) If the module has a variable (hopefully a string) with the name 'extension_name', that is used as the extension
name for display, otherwise it is the name of the text file.
(3) If the module has a main() function, it is called.
Since logging is not yet initialised, things are dumped to console rather than logged. (If you really need logging
for debugging purposes, you could turn on logging in the extension.)
:return: list
"""
# There might be some iteration tools in importlib, but no time to read documentation...
this_dir = os.path.dirname(__file__)
flist = os.listdir(this_dir)
# Do alphabetical order
flist.sort()
exclusion_list = ['__init__']
loaded_extensions = []
failed_extensions = []
decorated_fails = []
use_monkey_example = econ_platform_core.PlatformConfiguration['Options'].getboolean('UseMonkeyPatchExample')
if not use_monkey_example:
exclusion_list.append('monkey_patch_example')
for fname in flist:
fname = fname.lower()
if not fname.endswith('.py'):
continue
fname = fname[:-3]
if fname in exclusion_list:
continue
# Import it!
try:
mod = importlib.import_module('econ_platform.extensions.' + fname)
if hasattr(mod, 'extension_name'):
fname = str(mod.extension_name)
# Try running main()
if hasattr(mod, 'main'):
mod.main()
print('Extension {0} loaded.'.format(fname))
loaded_extensions.append(fname)
except Exception as ex:
print('Failure loading extension:', fname)
print(type(ex), str(ex))
failed_extensions.append(fname)
decorated_fails.append((fname, str(ex)))
return (loaded_extensions, failed_extensions, decorated_fails)
| 5,340,320
|
def byte_list_to_nbit_le_list(data, bitwidth, pad=0x00):
"""! @brief Convert a list of bytes to a list of n-bit integers (little endian)
If the length of the data list is not a multiple of `bitwidth` // 8, then the pad value is used
for the additional required bytes.
@param data List of bytes.
@param bitwidth Width in bits of the resulting values.
@param pad Optional value used to pad input data if not aligned to the bitwidth.
@result List of integer values that are `bitwidth` bits wide.
"""
bytewidth = bitwidth // 8
datalen = len(data) // bytewidth * bytewidth
res = [sum((data[offset + i] << (i * 8)) for i in range(bytewidth))
for offset in range(0, datalen, bytewidth)
]
remainder = len(data) % bytewidth
if remainder != 0:
pad_count = bytewidth - remainder
padded_data = list(data[-remainder:]) + [pad] * pad_count
res.append(sum((padded_data[i] << (i * 8)) for i in range(bytewidth)))
return res
| 5,340,321
|
def admin_not_need_apply_check(func):
"""
admin用户不需要申请权限检查
"""
@wraps(func)
def wrapper(view, request, *args, **kwargs):
if request.user.username == ADMIN_USER:
raise error_codes.INVALID_ARGS.format(_("用户admin默认拥有任意权限, 无需申请"))
return func(view, request, *args, **kwargs)
return wrapper
| 5,340,322
|
def modify_client_properties(ResourceId=None, ClientProperties=None):
"""
Modifies the properties of the specified Amazon WorkSpaces clients.
See also: AWS API Documentation
Exceptions
:example: response = client.modify_client_properties(
ResourceId='string',
ClientProperties={
'ReconnectEnabled': 'ENABLED'|'DISABLED'
}
)
:type ResourceId: string
:param ResourceId: [REQUIRED]\nThe resource identifiers, in the form of directory IDs.\n
:type ClientProperties: dict
:param ClientProperties: [REQUIRED]\nInformation about the Amazon WorkSpaces client.\n\nReconnectEnabled (string) --Specifies whether users can cache their credentials on the Amazon WorkSpaces client. When enabled, users can choose to reconnect to their WorkSpaces without re-entering their credentials.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
WorkSpaces.Client.exceptions.InvalidParameterValuesException
WorkSpaces.Client.exceptions.ResourceNotFoundException
WorkSpaces.Client.exceptions.AccessDeniedException
:return: {}
:returns:
(dict) --
"""
pass
| 5,340,323
|
def add_alias(alias_name, font_name):
"""Add an alias for a font family name.
e.g. add_alias('fixed', 'Monotype')
"""
global aliases
aliases[alias_name] = font_name
| 5,340,324
|
def check_args_preproc(args):
"""Ensure preprocessor parser arguments are valid."""
check_args_common(args)
if args.ecmwf_dir:
args.ggam_dir = args.ecmwf_dir
args.ggas_dir = args.ecmwf_dir
args.spam_dir = args.ecmwf_dir
# Limit should either be all zero or all non-zero.
limit_check = args.limit[0] == 0
for limit_element in args.limit[1:]:
if (limit_element == 0) ^ limit_check:
warnings.warn('All elements of --limit should be non-zero.',
OracWarning, stacklevel=2)
if not os.path.isdir(args.atlas_dir):
raise FileMissing('RTTOV Atlas directory', args.atlas_dir)
if not os.path.isfile(args.calib_file):
raise FileMissing('AATSR calibration file', args.calib_file)
if not os.path.isdir(args.coef_dir):
raise FileMissing('RTTOV coefficients directory', args.coef_dir)
if not os.path.isdir(args.emis_dir):
raise FileMissing('ETTOV emissivity directory', args.emis_dir)
if not os.path.isdir(args.emos_dir):
raise FileMissing('EMOS temporary directory', args.emos_dir)
if not os.path.isdir(args.ggam_dir):
raise FileMissing('ECMWF GGAM directory', args.ggam_dir)
if not os.path.isdir(args.ggas_dir):
raise FileMissing('ECMWF GGAS directory', args.ggas_dir)
if not os.path.isdir(args.hr_dir):
raise FileMissing('ECMWF high resolution directory', args.hr_dir)
if not os.path.isdir(args.mcd43c3_dir):
raise FileMissing('MODIS MCD43C1 directory', args.mcd43c1_dir)
if not os.path.isdir(args.mcd43c1_dir):
raise FileMissing('MODIS MCD43C3 directory', args.mcd43c3_dir)
if not os.path.isdir(args.occci_dir):
raise FileMissing('OC CCI directory', args.occci_dir)
if not os.path.isdir(args.nise_dir):
raise FileMissing('NISE directory', args.nise_dir)
if not os.path.isdir(args.spam_dir):
raise FileMissing('ECMWF SPAM directory', args.spam_dir)
if not os.path.isfile(args.usgs_file):
raise FileMissing('USGS file', args.usgs_file)
| 5,340,325
|
def parse_headers(header_list):
"""
Convert headers from our serialized dict with lists for keys to a
HTTPMessage
"""
header_string = b""
for key, values in header_list.items():
for v in values:
header_string += \
key.encode('utf-8') + b":" + v.encode('utf-8') + b"\r\n"
return compat.get_httpmessage(header_string)
| 5,340,326
|
def _rrv_add_ ( s , o ) :
"""Addition of RooRealVar and ``number''
>>> var = ...
>>> num = ...
>>> res = var + num
"""
if not isinstance ( o , val_types ) : return NotImplemented
if isinstance ( o , _RRV_ ) and not o.isConstant() : o = o.ve ()
elif hasattr ( o , 'getVal' ) : o = o.getVal ()
#
v = s.getVal() if s.isConstant() else s.ve()
#
return v + o
| 5,340,327
|
def stations_by_distance(stations, p):
"""This module sorts stations by distance and returns a
list of (station, town, distance) tupules."""
from haversine import haversine, Unit #import haversine function from library
list_station_dist = [] #initiates list to store stations and distance
#iterate through stations and calculate distamces
for station in stations:
distance = haversine(station.coord, p) #use haversine function to calculate distance between station and p
list_station_dist.append((station.name, station.town, distance)) #add data to list
sorted_list = sorted_by_key(list_station_dist, 2) #use sorting module to sort by distance
return sorted_list
| 5,340,328
|
def create_SHA_256_hash_of_file(file):
"""
Function that returns the SHA 256 hash of 'file'.\n
Logic taken from https://www.quickprogrammingtips.com/python/how-to-calculate-sha256-hash-of-a-file-in-python.html
"""
sha256_hash = hashlib.sha256()
with open(file, "rb") as f:
# Read and update hash string value in blocks of 4K
for byte_block in iter(lambda: f.read(4096), b""):
sha256_hash.update(byte_block)
# Converting to upper case because that's what is required by the policy
# service. See their code:
# https://dev.azure.com/msasg/Bing_and_IPG/_git/Aether?path=/src/aether/platform/backendV2/BlueBox/PolicyService/Microsoft.MachineLearning.PolicyService/Workers/CatalogValidation.cs
return sha256_hash.hexdigest().upper()
| 5,340,329
|
def partition_average(partition):
"""Given a partition, calculates the expected number of words sharing the same hint"""
score = 0
total = 0
for hint in partition:
score += len(partition[hint])**2
total += len(partition[hint])
return score / total
| 5,340,330
|
def set_params(config):
"""Configure parameters based on loaded configuration"""
params = {
'path': None,
'minio': None,
'minio_access_key': None,
'minio_secret_key': None,
'minio_secure': True,
'minio_ca_certs': None,
'minio_bucket': 'catalogue',
'minio_path': '',
'url': None,
'client': None,
'instance': None,
'timeout': DEFAULT_TIMEOUT,
'verify': False,
'cert': None,
'thread_cnt': DEFAULT_THREAD_COUNT,
'wsdl_replaces': DEFAULT_WSDL_REPLACES,
'excluded_member_codes': [],
'excluded_subsystem_codes': [],
'filtered_hours': 24,
'filtered_days': 30,
'filtered_months': 12,
'cleanup_interval': 7,
'days_to_keep': 30,
'work_queue': queue.Queue(),
'results': {},
'results_lock': Lock(),
'shutdown': Event()
}
if 'output_path' in config:
params['path'] = config['output_path']
LOGGER.info('Configuring "path": %s', params['path'])
if 'minio_url' in config:
params['minio'] = config['minio_url']
LOGGER.info('Configuring "minio_url": %s', params['minio'])
if 'minio_access_key' in config:
params['minio_access_key'] = config['minio_access_key']
LOGGER.info('Configuring "minio_access_key": %s', params['minio_access_key'])
if 'minio_secret_key' in config:
params['minio_secret_key'] = config['minio_secret_key']
LOGGER.info('Configuring "minio_secret_key": <password hidden>')
if 'minio_secure' in config:
params['minio_secure'] = config['minio_secure']
LOGGER.info('Configuring "minio_secure": %s', params['minio_secure'])
if 'minio_ca_certs' in config:
params['minio_ca_certs'] = config['minio_ca_certs']
LOGGER.info('Configuring "minio_ca_certs": %s', params['minio_ca_certs'])
if 'minio_bucket' in config:
params['minio_bucket'] = config['minio_bucket']
LOGGER.info('Configuring "minio_bucket": %s', params['minio_bucket'])
if 'minio_path' in config:
params['minio_path'] = config['minio_path']
params['minio_path'].strip('/')
if params['minio_path']:
params['minio_path'] += '/'
LOGGER.info('Configuring "minio_path": %s', params['minio_path'])
if params['path'] is None and params['minio'] is None:
LOGGER.error('Configuration error: No output path or MinIO URL are provided')
return None
if 'server_url' in config:
params['url'] = config['server_url']
LOGGER.info('Configuring "url": %s', params['url'])
else:
LOGGER.error('Configuration error: Local Security Server URL is not provided')
return None
if 'client' in config and len(config['client']) in (3, 4):
params['client'] = config['client']
LOGGER.info('Configuring "client": %s', params['client'])
else:
LOGGER.error(
'Configuration error: Client identifier is incorrect. Expecting list of identifiers. '
'Example: ["INST", "CLASS", "MEMBER_CODE", "MEMBER_CLASS"])')
return None
if 'instance' in config and config['instance']:
params['instance'] = config['instance']
LOGGER.info('Configuring "instance": %s', params['instance'])
if 'timeout' in config and config['timeout'] > 0.0:
params['timeout'] = config['timeout']
LOGGER.info('Configuring "timeout": %s', params['timeout'])
if 'server_cert' in config and config['server_cert']:
params['verify'] = config['server_cert']
LOGGER.info('Configuring "verify": %s', params['verify'])
if 'client_cert' in config and 'client_key' in config \
and config['client_cert'] and config['client_key']:
params['cert'] = (config['client_cert'], config['client_key'])
LOGGER.info('Configuring "cert": %s', params['cert'])
if 'thread_count' in config and config['thread_count'] > 0:
params['thread_cnt'] = config['thread_count']
LOGGER.info('Configuring "thread_cnt": %s', params['thread_cnt'])
if 'wsdl_replaces' in config:
params['wsdl_replaces'] = config['wsdl_replaces']
LOGGER.info('Configuring "wsdl_replaces": %s', params['wsdl_replaces'])
if 'excluded_member_codes' in config:
params['excluded_member_codes'] = config['excluded_member_codes']
LOGGER.info('Configuring "excluded_member_codes": %s', params['excluded_member_codes'])
if 'excluded_subsystem_codes' in config:
params['excluded_subsystem_codes'] = config['excluded_subsystem_codes']
LOGGER.info(
'Configuring "excluded_subsystem_codes": %s', params['excluded_subsystem_codes'])
if 'filtered_hours' in config and config['filtered_hours'] > 0:
params['filtered_hours'] = config['filtered_hours']
LOGGER.info('Configuring "filtered_hours": %s', params['filtered_hours'])
if 'filtered_days' in config and config['filtered_days'] > 0:
params['filtered_days'] = config['filtered_days']
LOGGER.info('Configuring "filtered_days": %s', params['filtered_days'])
if 'filtered_months' in config and config['filtered_months'] > 0:
params['filtered_months'] = config['filtered_months']
LOGGER.info('Configuring "filtered_months": %s', params['filtered_months'])
if 'cleanup_interval' in config and config['cleanup_interval'] > 0:
params['cleanup_interval'] = config['cleanup_interval']
LOGGER.info('Configuring "cleanup_interval": %s', params['cleanup_interval'])
if 'days_to_keep' in config and config['days_to_keep'] > 0:
params['days_to_keep'] = config['days_to_keep']
LOGGER.info('Configuring "days_to_keep": %s', params['days_to_keep'])
if params['path'] is not None and params['minio'] is not None:
LOGGER.warning('Saving to both local and MinIO storage is not supported')
if params['minio']:
LOGGER.info('Using MinIO storage')
else:
LOGGER.info('Using local storage')
LOGGER.info('Configuration done')
return params
| 5,340,331
|
def encrypt(source, destination, key_pair_name):
"""Encrypts a file or directory."""
try:
encrypt_wrapper(source, destination, key_pair_name)
cleanup(destination)
except Exception as e:
print(f"Error: {e}")
cleanup(destination)
| 5,340,332
|
def trigger(work):
"""
Call to trigger a LP
"""
global _event
old = _event
_event = AsyncResult()
old.set(work)
| 5,340,333
|
def post_stop_watch():
"""
This method change watcher status to true and return -> "watching": false
"""
url = common.combine_url(
config.INGESTION_AGENT_URL,
config.INGESTION_WATCHER_STATUS,
config.INGESTION_STOP_WATCHER,
)
resp = base_requests.send_post_request(url)
return resp
| 5,340,334
|
def flush():
"""Drop all collections in the database."""
if os.environ.get('FLASK_ENV', 'development') == 'production':
print('You should not flush the database in production.')
return
client = MongoClient(
host=os.environ.get('MONGO_HOST'),
port=int(os.environ.get('MONGO_PORT'))
)
redis_client = redis.Redis(
host=os.environ.get('REDIS_HOST'), port=os.environ.get('REDIS_PORT')
)
# List of collections to drop
collections = [
'users', 'feedback', 'saved_simulations', 'shared_simulations',
'alloys'
]
# Start dropping those collections for each of our dev environments.
print(
'Dropping collections in <{}> database:'.format('arc_dev'),
file=sys.stderr
)
for c in collections:
client['arc_dev'].drop_collection(c)
print(
'Dropping collections in <{}> database:'.format('arc_test'),
file=sys.stderr
)
for c in collections:
client['arc_dev'].drop_collection(c)
dbs_created = redis_client.config_get('databases')
print('Flushing Redis: {}'.format(dbs_created), file=sys.stderr)
redis_client.flushall()
| 5,340,335
|
def combine_grad_fields(field1, field2):
"""
Combines two gradient fields by summing the gradiends in every point.
The absolute values of each pixel are not interesting.
Inputs:
- field1: np.array(N, M) of Pixels.
- field2: np.array(N, M) of Pixels.
Output:
- out_field: np.array(N, M) of Pixels.
"""
assert field1.shape[0] == field2.shape[0], "field1.shape[0] != field2.shape[0]"
assert field1.shape[1] == field2.shape[1], "field1.shape[1] != field2.shape[1]"
out_field = np.ndarray(field1.shape, dtype=np.object)
N, M = field1.shape
for i in range(N):
for j in range(M):
grad = field1[i, j].grad + field2[i, j].grad
out_field[i, j] = Pixel(i, j, 0, grad)
out_field[i, j].normalize_grad()
return out_field
| 5,340,336
|
def plot_time_speed(ax, path):
"""
Makes speed plot
:param ax: axes
:param path: path, contains trajectory items
:return:
"""
if len(path['items']) > 0:
velocities = [item['length'] / item['duration'] * 3600 for item in path["items"]]
times = [item['duration'] for item in path["items"]]
times = [0] + times + times[-1:]
velocities = velocities[0:1] + velocities + velocities[-1:]
ax.step(np.cumsum(times) + path['start_time'], velocities, where='post')
ax.set_ylim(bottom=0)
ax.set_ylim(top=max(velocities) * 1.1)
ax.set_xlabel('Timestamp, s')
ax.set_ylabel('Speed, knt')
ax.grid()
| 5,340,337
|
def start_queue_manager(hostname, portnr, auth_code, logger):
"""
Starts the queue manager process.
"""
p = Process(target=_queue_manager_target, args=(hostname, portnr, auth_code, logger))
p.start()
for i in range(10):
time.sleep(2)
if get_event_queue(hostname, portnr, auth_code) is not None:
break
else:
logger.debug(f"Queue ready {i}")
return p
| 5,340,338
|
def rm(service, key):
"""
Remove a node.
The node is identified by a given KEY, which can be either the node's ID or
its name.
"""
try:
service.delete(key)
click.echo('The node was removed correctly.')
except Exception as e:
click.echo('Unable to remove the node:\n' + str(e))
| 5,340,339
|
def edit_module_form(request, module_id):
"""
Only the instructor who is the creator of the course to which this module belongs can access this.
"""
course = Module.objects.get(moduleID=module_id).getCourse()
if request.user.role != 1 or (course.instructorID.userID != request.user.userID):
context={
'message': "You do not have access to this page."
}
return render(request, 'ICE/message.html', context)
instructor_id = request.user.userID
if request.method == 'POST':
module=Module.objects.get(moduleID=module_id)
ordNum = 0
for key, value in request.POST.items():
if key=='orderNumber':
ordNum = value
course = module.getCourse()
modules = Module.objects.filter(courseID=course.courseID)
maxOrd = 0
sameOrd = 0
for m in modules:
if m.orderNumber > maxOrd:
maxOrd = m.orderNumber
if int(maxOrd) < int(ordNum):
for m in modules:
if m.orderNumber > module.orderNumber:
mod = Module.objects.get(moduleID = m.moduleID)
mod.orderNumber -= 1
mod.save()
module.orderNumber=course.numOfModules
module.save()
elif int(ordNum) == 0:
for m in modules:
if m.orderNumber < module.orderNumber:
mod = Module.objects.get(moduleID = m.moduleID)
mod.orderNumber += 1
mod.save()
module.orderNumber = 1
module.save()
else:
for m in modules:
if int(m.orderNumber) == int(ordNum):
sameOrd = m.orderNumber
if int(sameOrd) != 0 and int(sameOrd) > int(module.orderNumber):
for m in modules:
if int(m.orderNumber) <= int(sameOrd) and int(m.orderNumber) > int(module.orderNumber):
mod = Module.objects.get(moduleID=m.moduleID)
mod.orderNumber = mod.orderNumber - 1
mod.save()
module.orderNumber = ordNum
module.save()
elif int(sameOrd) != 0 and int(sameOrd) < int(module.orderNumber):
for m in modules:
if int(m.orderNumber) >= int(sameOrd) and int(m.orderNumber) < int(module.orderNumber):
mod = Module.objects.get(moduleID=m.moduleID)
mod.orderNumber = mod.orderNumber + 1
mod.save()
module.orderNumber = ordNum
module.save()
return redirect('../../instructorCourse/courseID='+str(course.courseID)+'&moduleID=1/')
form = EditModuleForm()
return render(request, 'edit_module.html', {'moduleform': form})
| 5,340,340
|
async def test_app_created_then_show_wait_form(
hass, app, app_oauth_client, smartthings_mock):
"""Test SmartApp is created when one does not exist and shows wait form."""
flow = SmartThingsFlowHandler()
flow.hass = hass
smartthings_mock.apps.return_value = []
smartthings_mock.create_app.return_value = (app, app_oauth_client)
result = await flow.async_step_user({'access_token': str(uuid4())})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'wait_install'
| 5,340,341
|
def test_dewpoint_specific_humidity_old_signature():
"""Test dewpoint from specific humidity using old signature issues specific error."""
p = 1013.25 * units.mbar
temperature = 20. * units.degC
q = 0.012 * units.dimensionless
with pytest.raises(ValueError, match='changed in 1.0'):
dewpoint_from_specific_humidity(q, temperature, p)
| 5,340,342
|
def r2(y_true, y_pred):
"""
:math:`R^2` (coefficient of determination) regression score function.
Best possible score is 1.0, lower values are worse.
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: R2
"""
return r2_score(y_true, y_pred)
| 5,340,343
|
def add_message_event(proto_message, span, message_event_type, message_id=1):
"""Adds a MessageEvent to the span based off of the given protobuf
message
"""
span.add_time_event(
time_event=time_event.TimeEvent(
datetime.utcnow(),
message_event=time_event.MessageEvent(
message_id,
type=message_event_type,
uncompressed_size_bytes=proto_message.ByteSize()
)
)
)
| 5,340,344
|
def passstore(config, name):
"""Get password file"""
return config.passroot / name
| 5,340,345
|
def test_plugin_class():
"""If a plugin class is passed to find_plugin(), it shoud be returned back."""
plugin = pybtex.plugin.find_plugin("pybtex.database.input", 'bibtex')
plugin2 = pybtex.plugin.find_plugin("pybtex.database.input", plugin)
nose.tools.assert_equal(plugin, plugin2)
| 5,340,346
|
def scan_code(job_id):
"""
Do all the heavy lifting... Query the Org for all the Apex Classes and build the Symbol Table
"""
# Load the job from the database
job = models.Job.objects.get(pk=job_id)
job.status = 'Processing'
job.save()
try:
# Init the scan job and run
scan_job = ScanJob(job)
scan_job.scan_org()
# If the user wants the result emailed
if job.email_result:
utils.send_finished_email(job)
except Exception as ex:
job.status = 'Error'
job.error = str(ex)
job.stack_trace = traceback.format_exc()
job.save()
| 5,340,347
|
def _get(url, **fields):
"""Get a GroupMe API url using urllib3.
Can have arbitrary string parameters
which will be part of the GET query string."""
fields["token"] = login.get_login()
response = HTTP.request("GET", GROUPME_API + url, fields=fields)
# 2XX Success
if 200 <= response.status < 300:
if response.status != 200:
warn(
"Unexpected status code %d when querying %s. "
"Please open an issue at %s/issues/new"
% (response.status, response.geturl(), HOMEPAGE)
)
data = response.data.decode("utf-8")
return json.loads(data)["response"]
# 304 Not Modified: we reached the end of the data
if response.status == 304:
return None
# 401 Not Authorized
if response.status == 401:
sys.exit(
"Permission denied. Maybe you typed your password wrong? "
"Try changing it with -D."
)
# Unknown status code
raise RuntimeError(
response,
"Got bad status code %d when querying %s: %s"
% (response.status, response.geturl(), response.data.decode("utf-8")),
)
| 5,340,348
|
def init_cluster():
"""
Initialise Cluster
:return:
"""
#using default username/password to login first, create new admin user base on provided value, then delete admin
api = ApiResource(server_host=cmx.cm_server, username="admin", password="admin")
api.create_user(cmx.username, cmx.password, ['ROLE_ADMIN'])
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
api.delete_user("admin")
# Update Cloudera Manager configuration
cm = api.get_cloudera_manager()
cm.update_config({"REMOTE_PARCEL_REPO_URLS": "http://archive.cloudera.com/cdh5/parcels/{latest_supported}",
"PHONE_HOME": False, "PARCEL_DISTRIBUTE_RATE_LIMIT_KBS_PER_SECOND": "1024000"})
print "> Initialise Cluster"
if cmx.cluster_name in [x.name for x in api.get_all_clusters()]:
print "Cluster name: '%s' already exists" % cmx.cluster_name
else:
print "Creating cluster name '%s'" % cmx.cluster_name
api.create_cluster(name=cmx.cluster_name, version=cmx.cluster_version)
| 5,340,349
|
def change_instance_type_instancecmd(params, hostname, new_instance_name):
""" change the instance type of a specified instance to a new one """
instance = convert_host_name_to_instance(hostname)
instance.modify_attribute(DryRun=params.aws_dryrun,
Attribute='instanceType',
Value=new_instance_name)
| 5,340,350
|
def coord_shell_array(nvt_run, func, li_atoms, species_dict, select_dict,
run_start, run_end):
"""
Args:
nvt_run: MDAnalysis Universe
func: One of the neighbor statistical method (num_of_neighbor_one_li,
num_of_neighbor_one_li_simple)
li_atoms: Atom group of the Li atoms.
species_dict (dict): A dict of coordination cutoff distance
of the interested species.
select_dict: A dictionary of species selection.
run_start (int): Start time step.
run_end (int): End time step.
"""
num_array = func(
nvt_run, li_atoms[0], species_dict, select_dict, run_start, run_end
)
for li in tqdm_notebook(li_atoms[1::]):
this_li = func(
nvt_run, li, species_dict, select_dict, run_start, run_end
)
for kw in num_array.keys():
num_array[kw] = np.concatenate((num_array.get(kw),
this_li.get(kw)), axis=0)
return num_array
| 5,340,351
|
def qa():
"""
QA server settings
"""
env.hosts = ['ombu@qa.ombuweb.com']
env.host_type = 'staging'
env.user = 'ombu'
env.host_webserver_user = 'www-data'
env.host_site_path = '/vol/main/foo/bar'
env.base_url = 'http://qa.ombuweb.com'
# DB settings
env.db_db = 'foo'
env.db_user = 'foo_user'
env.db_pw = 'bar'
env.db_host = 'localhost'
| 5,340,352
|
def get_events(number):
"""Shows basic usage of the Google Calendar API.
Prints the start and name of the next 10 events on the user's calendar.
"""
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
store = file.Storage(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'token.json'))
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'credentials.json', SCOPES))
creds = tools.run_flow(flow, store)
service = build('calendar', 'v3', http=creds.authorize(Http()))
# Call the Calendar API
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
events_result = service.events().list(calendarId='primary', timeMin=now,
maxResults=number, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
return events
| 5,340,353
|
def plot_transactions_ts(transactional_df, frequency="M", aggregation="n_purchases", reg=False, black_friday_dates=None, plot_black_friday=False, plot_normal_only=False, **kwargs):
"""
plota a evolucao das compras no tempo
black_friday_dates:: list of datetime.date
"""
# preventing unwnated modifications to original df
transactional_df = transactional_df.copy().rename(columns={"data": "date", "receita": "revenue", "id_cliente": "customer_id"})
transactional_df = transactional_df[["date", "revenue", "customer_id"] if not 'black_friday' in transactional_df.columns else ["date", "revenue", "customer_id", "black_friday"]]
transactional_df.index = transactional_df['date']
# if black friday dates are explicity given, a new column is added to the dataframe flagging the relevant purchases
if black_friday_dates:
transactional_df["black_friday"] = transactional_df["date"].dt.date.isin(black_friday_dates).astype(np.int8)
# level of aggregation
assert frequency not in ('Y'), "invalid frequency - use plot_transactions_y"
grouper = transactional_df.resample(frequency)
# aggregating data
if aggregation == "n_purchases":
df = grouper.size().rename(aggregation).to_frame()
elif aggregation == "revenue":
df = grouper["revenue"].sum().rename(aggregation).to_frame()
elif aggregation == "mean_ticket":
df = grouper["revenue"].mean().rename(aggregation).to_frame()
elif aggregation == "n_customers":
df = grouper["customer_id"].nunique().rename(aggregation).to_frame()
else:
raise ValueError(f"unknown aggregation {aggregation} - available agregations: n_purchases, revenue, mean_ticket, n_customers")
# for frequency grouping toubleshooting
# if kwargs.get("troubleshoot_frequency", False):
df = df.join(grouper["date"].max().rename("date_max"))
df = df.join(grouper["date"].min().rename("date_min"))
df["n_days"] = (df["date_max"] - df["date_min"]).dt.days + 1
if kwargs.get("full_intervals_only", False):
if frequency == "M":
df = df[df["n_days"] >= kwargs.get("full_interval_m", 28)].copy()
elif frequency == "W":
df = df[df["n_days"] >= kwargs.get("full_interval_m", 7)].copy()
if "black_friday" in transactional_df.columns:
if frequency != 'Y':
df = df.join(grouper["black_friday"].max())
if plot_black_friday or plot_normal_only:
assert "black_friday" in df.columns, "No Black Friday Information Available"
# n_purchases on normal days
df[f"{aggregation}_normal"] = df[aggregation]
df.loc[df["black_friday"] == 1, f"{aggregation}_normal"] = np.nan
df[f"{aggregation}_normal"] = df[f"{aggregation}_normal"].interpolate(method="linear")
# por plotting reasons, considering "neighbor" rows as black_friday == 1
try:
bf_idx = [(i-1, i, i+1) for i in df.reset_index()[df.reset_index()["black_friday"] == 1].index]
bf_idx = list(set(list(sum(bf_idx, ()))))
df.iloc[bf_idx, (df.columns == "black_friday").argmax()] = 1
except IndexError:
pass
# n_purchases on black friday days
df[f"{aggregation}_bf"] = df[aggregation]
df.loc[df["black_friday"] != 1, f"{aggregation}_bf"] = np.nan
# plot!
ax = kwargs.get("ax")
if not ax:
fig, ax = plt.subplots(figsize=kwargs.get("figsize", (18,4)))
if plot_black_friday:
(df[f'{aggregation}_normal']).rolling(kwargs.get("rolling_window", 1)).mean().plot(ax=ax, label=kwargs.get("label_normal", "Normal"))
(df[f'{aggregation}_bf']).rolling(kwargs.get("rolling_window", 1)).mean().plot(ax=ax, label=kwargs.get("label_bf", "Black Friday"))
# simple linear regression - WARNING: simplistic treatment of timeseries data
if reg:
f = np.poly1d(np.polyfit(range(df.shape[0]), (df[f'{aggregation}_normal']).values, 1))
df["fitted_line"] = f(np.arange(df.shape[0]))
df["fitted_line"].plot(ax=ax, lw=2, ls='--', alpha=.5, label="Eq_normal: " + f"{f}".strip())
elif plot_normal_only:
(df[f'{aggregation}_normal']).rolling(kwargs.get("rolling_window", 1)).mean().plot(ax=ax, label=kwargs.get("label_normal", "Normal"))
# simple linear regression - WARNING: simplistic treatment of timeseries data
if reg:
f = np.poly1d(np.polyfit(range(df.shape[0]), (df[f'{aggregation}_normal']).values, 1))
df["fitted_line"] = f(np.arange(df.shape[0]))
df["fitted_line"].plot(ax=ax, lw=2, ls='--', alpha=.5, label="Eq_normal: " + f"{f}".strip())
else:
(df[aggregation]).rolling(kwargs.get("rolling_window", 1)).mean().plot(ax=ax, label=kwargs.get("label"))
# simple linear regression - WARNING: simplistic treatment of timeseries data
if reg:
f = np.poly1d(np.polyfit(range(df.shape[0]), (df[aggregation]).values, 1))
df["fitted_line"] = f(np.arange(df.shape[0]))
df["fitted_line"].plot(ax=ax, lw=2, ls='--', alpha=.5, label="Eq_normal: " + f"{f}".strip())
if kwargs.get("legend", False):
ax.legend()
ax.set_title(kwargs.get("title", f"{aggregation.upper()} - {frequency}"), size=kwargs.get("title_size", 14))
ax.set_xlabel(kwargs.get("xlabel",""))
return ax
| 5,340,354
|
def generate_token(public_id):
"""
Simple token generator returning encoded JWT
:param public_id: unique string user identification
:return JWT: authorization token for given public_id
"""
# if User.query.filter_by(public_id=public_id).one_or_none() is None:
# return jsonify(404, "ID unverified")
# else:
timestamp = int(time.time())
payload = {
"iss": JWT_ISSUER,
"iat": int(timestamp),
"exp": int(timestamp + JWT_LIFETIME_SECONDS),
"sub": str(public_id),
}
return jwt.encode(payload, JWT_SECRET, algorithm=JWT_ALGORITHM)
| 5,340,355
|
def test_sanitizing_1d():
"""Test if dataset cleanup gives expected results."""
a = np.arange(10).astype(object)
a[4:6] = None
b = np.arange(10).astype(complex)
b[4] = np.nan
a_clean = np.hstack((a[:4], a[5:]))
b_clean = np.hstack((b[:4], b[5:]))
dd = DataDict(
a=dict(values=a),
b=dict(values=b, axes=['a']),
)
assert dd.validate()
dd2 = dd.remove_invalid_entries()
assert dd2.validate()
assert num.arrays_equal(dd2.data_vals('a'), a_clean)
assert num.arrays_equal(dd2.data_vals('b'), b_clean)
| 5,340,356
|
def get_user_permission_all_url_list(user_id):
"""
获取用户全部权限 url list
:param user_id: 用户id
:return:
"""
logging.info('get_user_permission_all_url_list')
try:
user_permission_list = db_get_user_permission_all_list(user_id)
permission_url_list = list()
for user_permission in user_permission_list:
permission_url_list.append(user_permission.permission_url)
return permission_url_list
except Exception as e:
logging.debug(e)
raise e
| 5,340,357
|
def plot_predicted_data(training_actual_df, predicted_df, date_col, actual_col,
pred_col=PredictionKeys.PREDICTION.value, prediction_percentiles=None,
title="", test_actual_df=None, is_visible=True,
figsize=None, path=None, fontsize=None,
line_plot=False, markersize=70, lw=2, linestyle='-'):
"""
plot training actual response together with predicted data; if actual response of predicted
data is there, plot it too.
Parameters
----------
training_actual_df : pd.DataFrame
training actual response data frame. two columns required: actual_col and date_col
predicted_df : pd.DataFrame
predicted data response data frame. two columns required: actual_col and pred_col. If
user provide prediction_percentiles, it needs to include them as well in such
`prediction_{x}` where x is the correspondent percentiles
prediction_percentiles : list
list of two elements indicates the lower and upper percentiles
date_col : str
the date column name
actual_col : str
pred_col : str
title : str
title of the plot
test_actual_df : pd.DataFrame
test actual response dataframe. two columns required: actual_col and date_col
is_visible : boolean
whether we want to show the plot. If called from unittest, is_visible might = False.
figsize : tuple
figsize pass through to `matplotlib.pyplot.figure()`
path : str
path to save the figure
fontsize : int; optional
fontsize of the title
line_plot : bool; default False
if True, make line plot for observations; otherwise, make scatter plot for observations
markersize : int; optional
point marker size
lw : int; optional
out-of-sample prediction line width
linestyle : str
linestyle of prediction plot
Returns
-------
matplotlib axes object
"""
if is_empty_dataframe(training_actual_df) or is_empty_dataframe(predicted_df):
raise ValueError("No prediction data or training response to plot.")
if not is_ordered_datetime(predicted_df[date_col]):
raise ValueError("Prediction df dates is not ordered.")
plot_confid = False
if prediction_percentiles is None:
_pred_percentiles = [5, 95]
else:
_pred_percentiles = prediction_percentiles
if len(_pred_percentiles) != 2:
raise ValueError("prediction_percentiles has to be None or a list with length=2.")
confid_cols = ['prediction_{}'.format(_pred_percentiles[0]),
'prediction_{}'.format(_pred_percentiles[1])]
if set(confid_cols).issubset(predicted_df.columns):
plot_confid = True
if not figsize:
figsize = (16, 8)
if not fontsize:
fontsize = 16
_training_actual_df = training_actual_df.copy()
_predicted_df = predicted_df.copy()
_training_actual_df[date_col] = pd.to_datetime(_training_actual_df[date_col])
_predicted_df[date_col] = pd.to_datetime(_predicted_df[date_col])
fig, ax = plt.subplots(facecolor='w', figsize=figsize)
if line_plot:
ax.plot(_training_actual_df[date_col].values,
_training_actual_df[actual_col].values,
marker=None, color='black', lw=lw, label='train response', linestyle=linestyle)
else:
ax.scatter(_training_actual_df[date_col].values,
_training_actual_df[actual_col].values,
marker='.', color='black', alpha=0.8, s=markersize,
label='train response')
ax.plot(_predicted_df[date_col].values,
_predicted_df[pred_col].values,
marker=None, color='#12939A', lw=lw, label=PredictionKeys.PREDICTION.value, linestyle=linestyle)
# vertical line separate training and prediction
if _training_actual_df[date_col].values[-1] < _predicted_df[date_col].values[-1]:
ax.axvline(x=_training_actual_df[date_col].values[-1], color='#1f77b4', linestyle='--')
if test_actual_df is not None:
test_actual_df = test_actual_df.copy()
test_actual_df[date_col] = pd.to_datetime(test_actual_df[date_col])
if line_plot:
ax.plot(test_actual_df[date_col].values,
test_actual_df[actual_col].values,
marker=None, color='#FF8C00', lw=lw, label='train response', linestyle=linestyle)
else:
ax.scatter(test_actual_df[date_col].values,
test_actual_df[actual_col].values,
marker='.', color='#FF8C00', alpha=0.8, s=markersize,
label='test response')
# prediction intervals
if plot_confid:
ax.fill_between(_predicted_df[date_col].values,
_predicted_df[confid_cols[0]],
_predicted_df[confid_cols[1]],
facecolor='#42999E', alpha=0.5)
ax.set_title(title, fontsize=fontsize)
ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.5)
ax.legend()
if path:
fig.savefig(path)
if is_visible:
plt.show()
else:
plt.close()
return ax
| 5,340,358
|
def make_plot(x_vals, yVals, title, xLabel, yLabel, style, newFig=False, logX=False, logY=False):
"""Plots x_vals vs. yVals with supplied titles and labels."""
if newFig:
pylab.figure()
pylab.title(title)
pylab.xlabel(xLabel)
pylab.ylabel(yLabel)
pylab.plot(x_vals, yVals, style)
if logX:
pylab.semilogx()
if logY:
pylab.semilogy()
| 5,340,359
|
def CheckTreeIsOpen(input_api, output_api, url, closed, url_text):
"""Similar to the one in presubmit_canned_checks except it shows an helpful
status text instead.
"""
assert(input_api.is_committing)
try:
connection = input_api.urllib2.urlopen(url)
status = connection.read()
connection.close()
if input_api.re.match(closed, status):
long_text = status + '\n' + url
try:
connection = input_api.urllib2.urlopen(url_text)
text = connection.read()
connection.close()
match = input_api.re.search(r"\<div class\=\"Notice\"\>(.*)\<\/div\>",
text)
if match:
long_text = match.group(1).strip()
except IOError:
pass
return [output_api.PresubmitPromptWarning("The tree is closed.",
long_text=long_text)]
except IOError:
pass
return []
| 5,340,360
|
def tnr_ecma_st(signal, fs, prominence=True):
"""Computation of tone-to-noise ration according to ECMA-74, annex D.9
for a stationary signal.
The T-TNR value is calculated according to ECMA-TR/108
Parameters
----------
signal :numpy.array
A stationary signal in [Pa].
fs : integer
Sampling frequency.
prominence : boolean
If True, the algorithm only returns the prominent tones, if False it returns all tones detected.
Default is True.
Output
------
t_tnr : array of float
global TNR value, along time if is_stationary = False
tnr : array of float
TNR values for each detected tone
promi : array of bool
prominence criterion for each detected tone
tones_freqs : array of float
frequency of the detected tones
"""
# Compute db spectrum
spectrum_db, freq_axis = spectrum(signal, fs, db=True)
# Compute tnr values
tones_freqs, tnr, prom, t_tnr = _tnr_main_calc(spectrum_db, freq_axis)
prom = prom.astype(bool)
if prominence == False:
return t_tnr, tnr, prom, tones_freqs
else:
return t_tnr, tnr[prom], prom[prom], tones_freqs[prom]
| 5,340,361
|
def set_seed(seed, config = None):
"""
set initial seed for reproduction
"""
if config is None:
raise ValueError("config should not be None")
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = str_to_bool(config["cudnn_deterministic_toggle"])
torch.backends.cudnn.benchmark = str_to_bool(config["cudnn_benchmark_toggle"])
| 5,340,362
|
def pop_legacy_palette(kwds, *color_defaults):
"""
Older animations in BPA and other areas use all sorts of different names for
what we are now representing with palettes.
This function mutates a kwds dictionary to remove these legacy fields and
extract a palette from it, which it returns.
"""
palette = kwds.pop('palette', None)
if palette:
legacy = [k for k, _ in color_defaults if k in kwds]
if legacy:
raise ValueError('Cannot set palette and ' + ', '.join(legacy))
return palette
values = [kwds.pop(k, v) for k, v in color_defaults]
if values and color_defaults[0][0] in ('colors', 'palette'):
values = values[0]
return make.colors(values or None)
| 5,340,363
|
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T).
Parameters
----------
X : array-like
First matrix
Y : array-like
Second matrix
"""
return np.dot(X.ravel(), Y.ravel())
| 5,340,364
|
def main():
"""Continuum normalize a fits file interactively."""
args = _parser()
data = fits.getdata(args.fitsname)
hdr = fits.getheader(args.fitsname)
print(data)
print(type(data))
print(data.shape)
if isinstance(data, fits.fitsrec.FITS_rec):
wave = data.field(0)
flux = data.field(1)
else:
wave = np.arange(len(data))
flux = data
cf = pg.ContinuumInteractive(wave, flux)
cf.plot([wave[0], wave[-1]], [1, 1], 'k--')
# Opens the GUI and starts the interactive session.
c = cf.findContinuum()
norm_flux = c["normalizedData"]
if args.flux_errors:
flux_error(wave, flux, c)
if args.plot:
plt.title("Normalized data")
plt.plot(wave, norm_flux, 'b.--')
plt.xlabel("Wavelength")
plt.ylabel("Normalized Flux")
plt.show()
fitssave = args.fitsname.replace(".fits", ".{!s}.fits".format(args.suffix))
if len(data) == 2:
fits.writeto(fitssave, norm_flux, header=hdr)
else:
fits.writeto(fitssave, norm_flux, header=hdr)
| 5,340,365
|
def branches(verbose=False):
"""Show the list of revisions that have more than one next revision."""
a = get_alembic()
print_stdout = a.config.print_stdout
get_revision = a.script_directory.get_revision
for r in a.branches():
print_stdout(r.cmd_format(verbose, include_branches=True, include_doc=True, include_parents=True, tree_indicators=True))
for nr in r.nextrev:
nr = get_revision(nr)
print_stdout(' -> {0}'.format(nr.cmd_format(False, include_branches=True, include_doc=True, include_parents=True, tree_indicators=True)))
| 5,340,366
|
async def handle_waste_view(ack, body, client, view):
"""Process input from waste form"""
logger.info("Processing waste input...")
logger.info(body)
raw_leaders = view['state']['values']['input_a']['leader_names']['selected_options']
leader_list = [" - " + n['value'] for n in raw_leaders]
regulars = float(view['state']['values']['input_b']['regulars']['value'])
spicy = float(view['state']['values']['input_c']['spicy']['value'])
nuggets = float(view['state']['values']['input_d']['nuggets']['value'])
strips = float(view['state']['values']['input_e']['strips']['value'])
g_filets = float(view['state']['values']['input_f']['grilled1']['value'])
g_nuggets = float(view['state']['values']['input_g']['grilled2']['value'])
# Check that input is numeric when it needs to be
chicken_list = [regulars, spicy, nuggets, strips, g_filets, g_nuggets]
# for item in chicken_list:
# if not isinstance(item, float):
# payload = {
# "response_action": "errors",
# "errors": {
# "block_id": "error_message"
# }
# }
# Store data
total_weight = sum(chicken_list)
sh = gc.open_by_key(creds.waste_id)
goal_sheet = sh.worksheet("Goals")
goal_values = goal_sheet.get_all_values()
goals = {}
for row in goal_values:
if row[0] == "Type":
continue
goals[row[0]] = float(row[1])
user = await client.users_info(user=body['user']['id'])
user_name = user['user']['real_name']
new_line = "\n"
block1 = {
"type": "section",
"text": {"type": "mrkdwn", "text": f"*Submitted by:* {user_name}"}
}
block2 = {
"type": "section",
"text": {"type": "mrkdwn",
"text": (f"*Leaders on:*\n"
f"{new_line.join(leader_list)}\n")
}
}
block3_text = "*Weights:*\n"
if total_weight > 0:
if regulars:
if regulars >= goals['Filets']:
block3_text += f"_Regulars: {regulars} lbs._\n"
else:
block3_text += f"Regulars: {regulars} lbs.\n"
if spicy:
if spicy >= goals['Spicy']:
block3_text += f"_Spicy: {spicy} lbs._\n"
else:
block3_text += f"Spicy: {spicy} lbs.\n"
if nuggets:
if nuggets >= goals['Nuggets']:
block3_text += f"_Nuggets: {nuggets} lbs._\n"
else:
block3_text += f"Nuggets: {nuggets} lbs.\n"
if strips:
if strips >= goals['Strips']:
block3_text += f"_Strips: {strips} lbs._\n"
else:
block3_text += f"Strips: {strips} lbs.\n"
if g_filets:
if g_filets >= goals['Grilled Filets']:
block3_text += f"_Grilled Filets: {g_filets} lbs._\n"
else:
block3_text += f"Grilled Filets: {g_filets} lbs.\n"
if g_nuggets:
if g_nuggets >= goals['Grilled Nuggets']:
block3_text += f"_Grilled Nuggets: {g_nuggets} lbs._\n"
else:
block3_text += f"Grilled Nuggets: {g_nuggets} lbs.\n"
to_post = [str(datetime.now()), regulars, spicy, nuggets, strips, g_filets, g_nuggets]
# Handle breakfast items
if datetime.now().hour < 13:
breakfast = float(view['state']['values']['input_h']['breakfast']['value'])
to_post.append(breakfast)
g_breakfast = float(view['state']['values']['input_i']['grilled3']['value'])
to_post.append(g_breakfast)
if sum([breakfast, g_breakfast]) > 0:
total_weight += sum([breakfast, g_breakfast])
if breakfast:
if breakfast >= goals['Breakfast Filets']:
block3_text += f"_Breakfast Filets: {breakfast} lbs._\n"
else:
block3_text += f"Breakfast Filets: {breakfast} lbs.\n"
if g_breakfast:
if g_breakfast >= goals['Grilled Breakfast']:
block3_text += f"_Grilled Breakfast: {g_breakfast} lbs._\n"
else:
block3_text += f"Grilled Breakfast: {g_breakfast} lbs.\n"
block3 = {
"type": "section",
"text": {"type": "mrkdwn", "text": block3_text}
}
blocks = [block1, block2, block3]
other = view['state']['values']['input_j']['other']['value']
if other:
block4 = {
"type": "section",
"text": {"type": "mrkdwn", "text": f"*Notes:*\n{other}"}
}
blocks.append(block4)
block5 = {
"type": "section",
"text": {"type": "mrkdwn", "text": "Please remember to replace stickers on all waste containers."}
}
blocks.append(block5)
await ack()
# Send data to Google Sheet
try:
sheet = sh.worksheet("Data")
sheet.append_row(to_post, value_input_option='USER_ENTERED')
except gspread.exceptions.GSpreadException as e:
return await client.chat_postMessage(channel=body['user']['id'],
text=e)
except Exception as e:
await client.chat_postMessage(channel=body['user']['id'],
text=f"There was an error while storing the message to the Google Sheet.\n{e}")
await client.chat_postMessage(channel=creds.pj_user_id,
text=f"There was an error while storing the message to the Google Sheet.\n{e}")
return
await client.chat_postMessage(channel=creds.boh_channel,
blocks=blocks,
text="New waste report posted.")
| 5,340,367
|
def random_superposition(dim: int) -> np.ndarray:
"""
Args:
dim: Specified size returns a 2^dim length array.
Returns:
Normalized random array.
"""
state_vector = np.random.standard_normal(dim).astype(complex)
state_vector += 1j * np.random.normal(dim)
state_vector /= np.linalg.norm(state_vector)
return state_vector
| 5,340,368
|
def predict(dag_model: Dag, test_data: Tensor) -> MultitaskMultivariateNormal:
"""
Can use this little helper function to predict from a Dag without
wrapping it in a DagGPyTorchModel.
"""
dag_model.eval()
with no_grad(), fast_pred_var():
return dag_model(test_data)
| 5,340,369
|
def edit_skill():
"""Edit a skill entry in the skills table for a certain user. """
id = request.form['id']
skill_level = request.form['skill_level']
skills.update({'skill_level': skill_level}, id=id)
return good_json_response('success')
| 5,340,370
|
async def hello(ctx):
"""Say hello to the bot""" # the command description can be supplied as the docstring
await ctx.send(f"Hello {ctx.author}!")
| 5,340,371
|
def oais_process_transfer(uuid, accession_id='', archivematica_id=None):
"""Mark the transfer in progress.
This function should be called if the transfer is processing. See
:py:func:`invenio_archivematica.tasks.oais_start_transfer`.
The signal
:py:data:`invenio_archivematica.signals.oais_transfer_processing`
is called with the sip as function parameter.
:param str uuid: the UUID of the sip
:param str archivematica_id: the ID of the AIP in Archivematica
"""
ark = Archive.get_from_sip(uuid)
ark.status = ArchiveStatus.PROCESSING_TRANSFER
ark.archivematica_id = archivematica_id
db.session.commit()
oais_transfer_processing.send(SIP(ark.sip))
| 5,340,372
|
def is_catalogue_link(link):
"""check whether the specified link points to a catalogue"""
return link['type'] == 'application/atom+xml' and 'rel' not in link
| 5,340,373
|
def morlet_window(width: int, sigma: float) -> np.ndarray:
"""
Unadjusted Morlet window function.
Parameters
----------
width : integer (positive power of 2)
Window width to use - power of two as window of two corresponds to Nyquist rate.
sigma : float
Corresponds to the frequency of the frequency of the wavelet.
Returns
-------
output : real ndarray
Normalised Morlet wavelet vector.
Notes
-----
https://en.wikipedia.org/wiki/Morlet_wavelet
"""
# fixed width wavelet translates to a fixed width Fourier transformed wavelet in frequency spectrum
# Definition - https://en.wikipedia.org/wiki/Morlet_wavelet
c_pi = (1 + np.exp(- sigma ** 2) - 2 * np.exp(- 0.75 * sigma ** 2)) ** (-1 / 2)
t = (np.arange(width + 1) - (width / 2)) * (10 / width)
wavelet = c_pi * (np.pi ** (-1 / 4)) * (np.exp(1j * sigma * t) - np.exp(- (1 / 2) * sigma ** 2))
output = np.exp(- (1 / 2) * t ** 2) * wavelet.real
return output
| 5,340,374
|
def tfidfvec():
"""
中文特征值化
:return: None
"""
c1, c2, c3 = cutword()
print(c1, c2, c3)
tf = TfidfVectorizer()
data = tf.fit_transform([c1, c2, c3])
print(tf.get_feature_names())
print(data.toarray())
return None
| 5,340,375
|
def resolve_guid(guid, suffix=None):
"""Resolve GUID to corresponding URL and return result of appropriate
view function. This effectively yields a redirect without changing the
displayed URL of the page.
:param guid: GUID value (not the object)
:param suffix: String to append to GUID route
:return: Werkzeug response
"""
# Get prefix; handles API routes
prefix = request.path.split(guid)[0].rstrip('/')
# Look up GUID
guid_object = Guid.load(guid)
if guid_object:
# verify that the object is a GuidStoredObject descendant. If a model
# was once a descendant but that relationship has changed, it's
# possible to have referents that are instances of classes that don't
# have a redirect_mode attribute or otherwise don't behave as
# expected.
if not isinstance(guid_object.referent, GuidStoredObject):
sentry.log_message(
'Guid `{}` resolved to non-guid object'.format(guid)
)
raise HTTPError(http.NOT_FOUND)
referent = guid_object.referent
if referent is None:
logger.error('Referent of GUID {0} not found'.format(guid))
raise HTTPError(http.NOT_FOUND)
mode = referent.redirect_mode
if mode is None:
raise HTTPError(http.NOT_FOUND)
url = referent.deep_url if mode == 'proxy' else referent.url
url = _build_guid_url(url, prefix, suffix)
# Always redirect API URLs; URL should identify endpoint being called
if prefix or mode == 'redirect':
if request.query_string:
url += '?' + request.query_string
return redirect(url)
return proxy_url(url)
# GUID not found; try lower-cased and redirect if exists
guid_object_lower = Guid.load(guid.lower())
if guid_object_lower:
return redirect(
_build_guid_url(
guid.lower(), prefix, suffix
)
)
# GUID not found
raise HTTPError(http.NOT_FOUND)
| 5,340,376
|
def _is_array(obj: Any) -> bool:
"""Whether the object is a numpy array."""
return isinstance(obj, np.ndarray)
| 5,340,377
|
def has_ao_1e_int_overlap(trexio_file) -> bool:
"""Check that ao_1e_int_overlap variable exists in the TREXIO file.
Parameter is a ~TREXIO File~ object that has been created by a call to ~open~ function.
Returns:
True if the variable exists, False otherwise
Raises:
- Exception from trexio.Error class if TREXIO return code ~rc~ is TREXIO_FAILURE and prints the error message using string_of_error.
- Exception from some other error (e.g. RuntimeError).
"""
try:
rc = pytr.trexio_has_ao_1e_int_overlap(trexio_file.pytrexio_s)
if rc == TREXIO_FAILURE:
raise Error(rc)
except:
raise
if rc == TREXIO_SUCCESS:
return True
else:
return False
| 5,340,378
|
async def get_non_existent_ids(collection, id_list: Sequence[str]) -> Set[str]:
"""
Return the IDs that are in `id_list`, but don't exist in the specified `collection`.
:param collection: the database collection to check
:param id_list: a list of document IDs to check for existence
:return: a list of non-existent IDs
"""
existing_group_ids = await collection.distinct("_id", {"_id": {"$in": id_list}})
return set(id_list) - set(existing_group_ids)
| 5,340,379
|
def create_source_fc(header):
"""
Creates :class:`parser.file_configuration_t` instance, configured to
contain path to C++ source file
:param header: path to C++ source file
:type header: str
:rtype: :class:`parser.file_configuration_t`
"""
return file_configuration_t(
data=header,
content_type=file_configuration_t.CONTENT_TYPE.STANDARD_SOURCE_FILE)
| 5,340,380
|
def check_freq(var: xr.DataArray, freq: str, strict: bool = True):
"""Raise an error if not series has not the expected temporal frequency or is not monotonically increasing.
Parameters
----------
var : xr.DataArray
Input array.
freq : str
The temporal frequency defined using the Pandas frequency strings, e.g. 'A', 'M', 'D', 'H', 'T',
'S'. Note that a 3-hourly time series is declared as '3H'.
strict : bool
Whether or not multiples of the frequency are considered invalid. With `strict` set to False, a '3H' series
will not raise an error if freq is set to 'H'.
"""
v_freq = xr.infer_freq(var.time)
if v_freq != freq:
if (freq in v_freq) and not strict:
return
raise ValidationError(
"Time series has temporal frequency `{v_freq}`, expected `{freq}`."
)
| 5,340,381
|
def prune(valid_cols, new_item_set, search_space):
""" prune invalid combination of columns
Args:
--------
valid_cols: existing valid column list
new_item_set: item set to be merged
search_space: the search space that stores possible candidates
Returns:
--------
"""
if len(valid_cols) == 0:
return
for se in valid_cols:
intersection = set(se) & new_item_set
if len(intersection) == 0:
continue
union = set(se) | new_item_set
for item in list(intersection):
diff = sorted(list(union - set([item])))
if diff in search_space:
search_space.remove(diff)
| 5,340,382
|
def StorageFlatten(cache_line_size, create_bound_attribute=False):
"""Flatten the multi-dimensional read/write to 1D.
Parameters
----------
cache_line_size: int
The size of CPU cache line.
create_bound_attribute:
Whether to create bound attributes.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.StorageFlatten(cache_line_size, create_bound_attribute)
| 5,340,383
|
def wrap(node):
"""Stringify the parse tree node and wrap it in parentheses if it might be
ambiguous.
"""
if isinstance(node, (IntNode, CallNode, SymbolNode)):
return str(node)
else:
return "(" + str(node) + ")"
| 5,340,384
|
def _ParseProjectNameMatch(project_name):
"""Process the passed project name and determine the best representation.
Args:
project_name: a string with the project name matched in a regex
Returns:
A minimal representation of the project name, None if no valid content.
"""
if not project_name:
return None
return project_name.lstrip().rstrip('#: \t\n')
| 5,340,385
|
def assign_variables(assignment_expressions, df, locals_dict, df_alias=None, trace_rows=None):
"""
Evaluate a set of variable expressions from a spec in the context
of a given data table.
Expressions are evaluated using Python's eval function.
Python expressions have access to variables in locals_d (and df being
accessible as variable df.) They also have access to previously assigned
targets as the assigned target name.
lowercase variables starting with underscore are temp variables (e.g. _local_var)
and not returned except in trace_results
uppercase variables starting with underscore are temp scalar variables (e.g. _LOCAL_SCALAR)
and not returned except in trace_assigned_locals
This is useful for defining general purpose local constants in expression file
Users should take care that expressions (other than temp scalar variables) should result in
a Pandas Series (scalars will be automatically promoted to series.)
Parameters
----------
assignment_expressions : pandas.DataFrame of target assignment expressions
target: target column names
expression: pandas or python expression to evaluate
df : pandas.DataFrame
locals_d : Dict
This is a dictionary of local variables that will be the environment
for an evaluation of "python" expression.
trace_rows: series or array of bools to use as mask to select target rows to trace
Returns
-------
variables : pandas.DataFrame
Will have the index of `df` and columns named by target and containing
the result of evaluating expression
trace_df : pandas.DataFrame or None
a dataframe containing the eval result values for each assignment expression
"""
np_logger = NumpyLogger(logger)
def is_throwaway(target):
return target == '_'
def is_temp_scalar(target):
return target.startswith('_') and target.isupper()
def is_temp(target):
return target.startswith('_')
def to_series(x):
if x is None or np.isscalar(x):
return pd.Series([x] * len(df.index), index=df.index)
return x
assert assignment_expressions.shape[0] > 0
trace_assigned_locals = trace_results = None
if trace_rows is not None:
# convert to numpy array so we can slice ndarrays as well as series
trace_rows = np.asanyarray(trace_rows)
if trace_rows.any():
trace_results = OrderedDict()
trace_assigned_locals = OrderedDict()
# avoid touching caller's passed-in locals_d parameter (they may be looping)
_locals_dict = local_utilities()
if locals_dict is not None:
_locals_dict.update(locals_dict)
if df_alias:
_locals_dict[df_alias] = df
else:
_locals_dict['df'] = df
local_keys = list(_locals_dict.keys())
# build a dataframe of eval results for non-temp targets
# since we allow targets to be recycled, we want to only keep the last usage
variables = OrderedDict()
# need to be able to identify which variables causes an error, which keeps
# this from being expressed more parsimoniously
for e in zip(assignment_expressions.target, assignment_expressions.expression):
target, expression = e
assert isinstance(target, str), \
"expected target '%s' for expression '%s' to be string not %s" % \
(target, expression, type(target))
if target in local_keys:
logger.warning("assign_variables target obscures local_d name '%s'", str(target))
if is_temp_scalar(target) or is_throwaway(target):
try:
x = eval(expression, globals(), _locals_dict)
except Exception as err:
logger.error("assign_variables error: %s: %s", type(err).__name__, str(err))
logger.error("assign_variables expression: %s = %s", str(target), str(expression))
raise err
if not is_throwaway(target):
_locals_dict[target] = x
if trace_assigned_locals is not None:
trace_assigned_locals[uniquify_key(trace_assigned_locals, target)] = x
continue
try:
# FIXME - log any numpy warnings/errors but don't raise
np_logger.target = str(target)
np_logger.expression = str(expression)
saved_handler = np.seterrcall(np_logger)
save_err = np.seterr(all='log')
# FIXME should whitelist globals for security?
globals_dict = {}
expr_values = to_series(eval(expression, globals_dict, _locals_dict))
np.seterr(**save_err)
np.seterrcall(saved_handler)
# except Exception as err:
# logger.error("assign_variables error: %s: %s", type(err).__name__, str(err))
# logger.error("assign_variables expression: %s = %s", str(target), str(expression))
# raise err
except Exception as err:
logger.exception(f"assign_variables - {type(err).__name__} ({str(err)}) evaluating: {str(expression)}")
raise err
if not is_temp(target):
variables[target] = expr_values
if trace_results is not None:
trace_results[uniquify_key(trace_results, target)] = expr_values[trace_rows]
# update locals to allows us to ref previously assigned targets
_locals_dict[target] = expr_values
if trace_results is not None:
trace_results = pd.DataFrame.from_dict(trace_results)
trace_results.index = df[trace_rows].index
# add df columns to trace_results
trace_results = pd.concat([df[trace_rows], trace_results], axis=1)
assert variables, "No non-temp variables were assigned."
# we stored result in dict - convert to df
variables = util.df_from_dict(variables, index=df.index)
return variables, trace_results, trace_assigned_locals
| 5,340,386
|
def ResetAnalysisForANewAnalysis(master_name,
builder_name,
build_number,
build_completed=False,
pipeline_status_path=None,
current_version=None):
"""Resets the WfAnalysis object to start a new analysis."""
analysis = WfAnalysis.Get(master_name, builder_name, build_number)
analysis.Reset(
pipeline_status_path=pipeline_status_path,
status=analysis_status.RUNNING,
analysis_result_status=None,
start_time=time_util.GetUTCNow(),
end_time=None,
version=current_version,
build_completed=build_completed)
| 5,340,387
|
def getCharts(dmldata: bytearray) -> list:
"""Get DrawingML object from clipboard"""
stream = io.BytesIO(dmldata)
with zipfile.ZipFile(stream, "r") as z:
with z.open("[Content_Types].xml") as f:
tree = ET.fromstring(f.read())
part_names = []
for link in tree.findall(Override):
content_type = link.attrib["ContentType"]
if content_type == ChartType:
part_name = link.attrib["PartName"]
part_names.append(part_name)
charts = []
for part_name in part_names:
with io.TextIOWrapper(z.open(part_name.strip("/"), "r"), encoding='utf-8') as f:
xmltext = f.read()
chartfile = ChartFile(xmltext)
charts.append(chartfile.chart)
return charts
| 5,340,388
|
def docker():
"""Docker utilities."""
pass
| 5,340,389
|
def explore_validation_time_gap_threshold_segments(participant_list, time_gap_list = [100, 200, 300, 400, 500, 1000, 2000], prune_length = None,
auto_partition_low_quality_segments = False):
"""Explores different threshiold values for the invalid time gaps in the Segments for all Participants in the list
"""
seglen = 0
segs = 0
participants = []
for p in participant_list:
print("pid:", p.pid)
if p.require_valid_segments == True:
raise Exception("explore_validation_threshold_segments should be called with a list of Participants with require_valid_segments = False")
tvalidity = []
for seg in p.segments:
seglen += seg.completion_time
segs += len(p.segments)
for tresh in time_gap_list: ##time-gap
invc = 0
invsegs=[]
for seg in p.segments:
if seg.calc_validity2(tresh) == False:
invc +=1
if len(invsegs)>0:
print("seg:",invsegs)
tvalidity.append((tresh, invc))
participants.append( (p.pid,tvalidity, len(p.segments) ) )
print ( (tvalidity, len(p.segments)) )
print("average seg len",seglen/float(segs))
return participants
| 5,340,390
|
def convolve_design(X, hrf, opt=None):
"""convolve each column of a 2d design matrix with hrf
Args:
X ([2D design matrix]): time by cond, or list of onsets
hrf ([1D hrf function]): hrf
opt: if onset case, provides n_times and tr for
interpolation
Returns:
[convdes]: 2D: Samples by cond
"""
# if onset-time case
if type(X) is list:
errmsg = 'n_times needs to be in opt'
np.testing.assert_equal(
'n_times' in opt,
True,
err_msg=errmsg)
n_times = opt['n_times']
tr = opt['tr']
# calc
n_conditions = len(X)
convdes = np.zeros((n_times, n_conditions))
all_times = np.linspace(0, tr*(n_times-1), n_times)
hrf_times = np.linspace(0, tr*(len(hrf)-1), len(hrf))
for q in range(n_conditions):
# onset times for qth condition in run p
otimes = X[q]
# intialize
yvals = np.zeros((n_times))
# loop over onset times
for r in otimes:
# interpolate to find values at the
# data sampling time points
f = pchip(
r + hrf_times,
hrf,
extrapolate=False)(all_times)
yvals = yvals + np.nan_to_num(f)
# record
convdes[:, q] = yvals
# normal vector or matrix cases
else:
ndims = X.ndim
if ndims == 1:
ntime = X.shape[0]
convdes = np.convolve(X, hrf)
convdes = convdes[range(ntime)]
else:
ntime, ncond = X.shape
convdes = np.asarray(
[np.convolve(X[:, x], hrf, ) for x in range(ncond)]).T
convdes = convdes[range(ntime), :]
return convdes
| 5,340,391
|
def relax_incr_dimensions(iet, **kwargs):
"""
Recast Iterations over IncrDimensions as ElementalFunctions; insert
ElementalCalls to iterate over the "main" and "remainder" regions induced
by the IncrDimensions.
"""
sregistry = kwargs['sregistry']
efuncs = []
mapper = {}
for tree in retrieve_iteration_tree(iet):
iterations = [i for i in tree if i.dim.is_Incr]
if not iterations:
continue
root = iterations[0]
if root in mapper:
continue
outer, inner = split(iterations, lambda i: not i.dim.parent.is_Incr)
# Compute the iteration ranges
ranges = []
for i in outer:
maxb = i.symbolic_max - (i.symbolic_size % i.dim.step)
ranges.append(((i.symbolic_min, maxb, i.dim.step),
(maxb + 1, i.symbolic_max, i.symbolic_max - maxb)))
# Remove any offsets
# E.g., `x = x_m + 2 to x_M - 2` --> `x = x_m to x_M`
outer = [i._rebuild(limits=(i.dim.root.symbolic_min, i.dim.root.symbolic_max,
i.step))
for i in outer]
# Create the ElementalFunction
name = sregistry.make_name(prefix="bf")
body = compose_nodes(outer)
dynamic_parameters = flatten((i.symbolic_bounds, i.step) for i in outer)
dynamic_parameters.extend([i.step for i in inner if not is_integer(i.step)])
efunc = make_efunc(name, body, dynamic_parameters)
efuncs.append(efunc)
# Create the ElementalCalls
calls = []
for p in product(*ranges):
dynamic_args_mapper = {}
for i, (m, M, b) in zip(outer, p):
dynamic_args_mapper[i.symbolic_min] = m
dynamic_args_mapper[i.symbolic_max] = M
dynamic_args_mapper[i.step] = b
for j in inner:
if j.dim.root is i.dim.root and not is_integer(j.step):
value = j.step if b is i.step else b
dynamic_args_mapper[j.step] = (value,)
calls.append(efunc.make_call(dynamic_args_mapper))
mapper[root] = List(body=calls)
iet = Transformer(mapper).visit(iet)
return iet, {'efuncs': efuncs}
| 5,340,392
|
def _get_lspci_name(line):
"""Reads and returns a 'name' from a line of `lspci` output."""
hush = line.split('[')
return '['.join(hush[0:-1]).strip()
| 5,340,393
|
def dumps_bytes(obj):
"""
Serialize ``obj`` to JSON formatted ``bytes``.
"""
b = dumps(obj)
if isinstance(b, unicode):
b = b.encode("ascii")
return b
| 5,340,394
|
def get_child_right_position(position: int) -> int:
"""
heap helper function get the position of the right child of the current node
>>> get_child_right_position(0)
2
"""
return (2 * position) + 2
| 5,340,395
|
def remove_files() -> None:
"""Function that deletes multiple files when called during exit operation.
Warnings:
Deletes:
- all ``.lock`` files created for alarms and reminders.
- ``location.yaml`` file, to recreate a new one next time around.
- ``meetings`` file, to recreate a new one next time around.
"""
os.removedirs('alarm') if os.path.isdir('alarm') else None
os.removedirs('reminder') if os.path.isdir('reminder') else None
os.remove('location.yaml') if os.path.isfile('location.yaml') else None
os.remove('meetings') if os.path.isfile('meetings') else None
| 5,340,396
|
def convert_paragraphs_to_s2orc(paragraphs: List, old_to_new: Dict) -> List[Dict]:
"""
Convert paragraphs into S2ORC format
"""
# TODO: temp code to process body text into S2ORC format. this includes getting rid of sub/superscript spans.
# also combining fig & table spans into ref spans.
# also remapping the reference / bib labels to the new ones defined earlier in this function.
# temporarily, we cant support PMC xml parse bibs, so remove all links to the bibliography (cuz they'll be wrong)
for paragraph_blob in paragraphs:
del paragraph_blob['sup_spans']
del paragraph_blob['sub_spans']
paragraph_blob['ref_spans'] = []
for fig_tab_span in paragraph_blob['fig_spans'] + paragraph_blob['table_spans']:
# replace old ref_id with new ref_id. default to None if null
# optional, just wanted to check if this ever happens
assert fig_tab_span['ref_id']
fig_tab_span['ref_id'] = old_to_new.get(fig_tab_span['ref_id'])
paragraph_blob['ref_spans'].append(fig_tab_span)
del paragraph_blob['fig_spans']
del paragraph_blob['table_spans']
for cite_span in paragraph_blob['cite_spans']:
# replace old cite ids with new cite ids. again default to None if null
# optional, just wanted to check if this ever happens
assert cite_span['ref_id']
cite_span['ref_id'] = old_to_new.get(cite_span['ref_id'])
return paragraphs
| 5,340,397
|
def get_feature_set_details(shape_file_path):
""" This function gets the shape type of the shapefile and make a list
of fields to be added to output summary table based on that shape type """
try:
# Checking for geometry type
feat_desc = arcpy.Describe(shape_file_path)
arcpy.AddMessage(("Shapefile is of '{0}' type.")
.format(str(feat_desc.shapeType)))
# According to shape type kame a list of fields to be added to
# summary table
list_of_fields = ["summaryfield", "summaryvalue"]
if feat_desc.shapeType.upper() == "POLYGON":
list_of_fields += ["area_acres", "area_sqkm"]
elif feat_desc.shapeType.upper() == "POLYLINE":
list_of_fields += ["length_Miles", "length_Km"]
elif feat_desc.shapeType.upper() == "POINT":
list_of_fields += ["Count"]
return [feat_desc.shapeType, list_of_fields]
except Exception as error:
arcpy.AddError("Error occurred during execution:" + str(error))
| 5,340,398
|
def test_create_order(mocker, expected_response, expected_data, client) -> None:
"""Test the create_order of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.create_order(pair_id=1, amount=100, kind="buy", price=100)
assert actual_data == expected_data
| 5,340,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.