content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def gcp_iam_organization_role_permission_remove_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Remove permissions from custom organization role.
Args:
client (Client): GCP API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
return remove_custom_role_permissions(client_request_get_method=client.gcp_iam_organization_role_get_request,
client_request_update_method=client.gcp_iam_organization_role_update_request,
args=args) | 5,331,900 |
def MainLoop(N: int, save_every: int = False, plot_histogram: bool = False):
"""MainLoop
Parameters
==========
N: int
Number of iterations
save_every: int or False
if not False, save a snapshot of the simulation every `save_every`
iterations
plot_histogram: bool
toggles plotting a histogram at the end of the run
"""
AvalancheCountArray = []
#creating of overloaded sand base
matrix = GetMatrixBase([101, 101], 4)
#initialization of dune after several avalanches
OneTimeStepSimulation(matrix)
PrevMatrixTotalCount = np.sum(matrix)
CurMatrixTotalCount = PrevMatrixTotalCount
for i in tqdm.trange(N):
OneTimeStepSimulation(matrix)
CurMatrixTotalCount = np.sum(matrix)
AvalancheCountArray.append(CurMatrixTotalCount - PrevMatrixTotalCount)
PrevMatrixTotalCount = CurMatrixTotalCount
SandFalling(matrix, 1)
if save_every and (i % save_every == 0):
SaveImage(matrix, f'soc{i:05d}.png')
np.histogram(AvalancheCountArray)
if plot_histogram:
import matplotlib.pyplot as plt
plt.hist(AvalancheCountArray)
plt.show() | 5,331,901 |
async def test_server_failure_with_error(cli: TestClient) -> None:
"""Test invalid response from RTSPtoWebRTC server."""
assert isinstance(cli.server, TestServer)
cli.server.app["response"].append(
aiohttp.web.json_response({"status": 1, "payload": "a message"}, status=502)
)
client = WebClient(cast(ClientSession, cli))
with pytest.raises(ResponseError, match=r"server failure:.*a message.*"):
await client.webrtc("demo1", "0", OFFER_SDP) | 5,331,902 |
def command_coverage_html(args):
"""
:type args: CoverageConfig
"""
output_files = command_coverage_combine(args)
for output_file in output_files:
dir_name = 'test/results/reports/%s' % os.path.basename(output_file)
env = common_environment()
env.update(dict(COVERAGE_FILE=output_file))
run_command(args, env=env, cmd=['coverage', 'html', '-i', '-d', dir_name]) | 5,331,903 |
def do_publish(dry_run: bool):
"""Run the gradle command to publish to our maven repo."""
run_command(['./gradlew', 'publish'], dry_run) | 5,331,904 |
def get_hostname(ipv) -> str:
"""
Get hostname from IPv4 and IPv6.
:param ipv: ip address
:return: hostname
"""
return socket.gethostbyaddr(ipv)[0] | 5,331,905 |
def load_model():
"""
Load CLIP model into memory.
Will download the model from the internet if it's not found in `WAGTAIL_CLIP_DOWNLOAD_PATH`.
"""
device = torch.device("cpu")
model, preprocess = clip.load("ViT-B/32", device, download_root=DOWNLOAD_PATH)
return model, device, preprocess | 5,331,906 |
def exception_response(ex: Exception):
"""Generate JSON payload from ApiException or Exception object."""
if not ex:
app.logger.error("Function received argument: None!")
return __make_response(
500,
{
"error" : "Unknown",
"details" : "api.exception_response() received: None!"
}
)
#
try:
if isinstance(ex, Exception):
# Member variable '.ApiException' reveals the type
if getattr(ex, 'ApiException', None):
app.logger.error(
"ApiException: '{}'"
.format(str(ex))
)
response_code = ex.code
response_payload = ex.to_dict()
else:
# Unexpected error, log trace by using logger.exception()
app.logger.exception(str(ex))
from traceback import format_exception
e = format_exception(type(ex), ex, ex.__traceback__)
response_payload = {
"error" : e[-1],
"trace" : "".join(e[1:-1])
}
response_code = 500
return __make_response(response_code, response_payload)
else:
return __make_response(
500,
{
"error" : "Uknown",
"details" : "api.exception_response() received unsupported argument",
"type" : type(ex)
}
)
except Exception as e:
app.logger.exception("api.exception_response(): Internal Error!")
return __make_response(
500,
{
"error" : "Internal Error",
"details" : "api.exception_response() internal failure!"
}
) | 5,331,907 |
def test_raster_module(test_dir):
"""
tests the following functions from the raster module:
spatially_match
cilp_and_snap
project_resample
enf_rastlist
is_rast
raster_overlap
clip_and_snap
null_define
to_numpy
from_numpy
null_set_range
clip_to_shape
grab_info
"""
# if no data folder exists at the location, build the test environment
if not os.path.exists(os.path.join(test_dir,"raw","MODIS")):
fetch_test_MODIS(test_dir)
if not os.path.exists(os.path.join(test_dir,"raw","VA_shapefile")):
fetch_VA_shapefile(test_dir)
if not os.path.exists(os.path.join(test_dir, "raw","SRTM")):
fetch_test_SRTM(test_dir)
# first, lets extract some MODIS data over hampton roads
print("preparing sample MODIS MOD10A1 data for raster opperations")
product_dir = os.path.join(test_dir, "raw", "MODIS", "MOD10A1")
extract_dir = os.path.join(test_dir, "pre_processed", "MODIS", "MOD10A1","0_extract")
mosaic_dir = os.path.join(extract_dir, "1_mosaic")
modis.extract_from_hdf(product_dir, [3], "FracSnow", extract_dir)
modis.mosaic(extract_dir, outdir = mosaic_dir, pixel_type = "8_BIT_UNSIGNED")
# test spatially match function
print("testing function 'spatially_match' and its dependencies")
dem_path = os.path.join(test_dir, "pre_processed","SRTM","VAcoast_DEM.tif")
sm_dir = os.path.join(test_dir,"pre_processed","MODIS","MOD10A1","2_spatially_match")
raster.spatially_match(dem_path, mosaic_dir, sm_dir, resamp_type = "NEAREST")
# set null values in MODIS data as well as in the DEM
print("testing 'null_set_range' function")
raster.null_set_range(dem_path,low_thresh = 0, NoData_Value = 0)
raster.null_set_range(sm_dir, high_thresh = 101, NoData_Value = 101)
# test overlap finding functions with just the first spatially matched image in modis series
print("testing 'raster_overlap' function")
sample_path = os.path.join(sm_dir, "MOD10A1.A2015031.mosaic.005.2015033065804_FracSnow_sm.tif")
overshp_path = os.path.join(test_dir, "pre_processed/MODIS/MOD10A1/clip_extent/clip_extent.shp")
raster.raster_overlap(dem_path, sample_path, overshp_path)
# test the clip_to_shape function
print("testing 'clip_to_shape' function")
clipdir = os.path.join(test_dir,"pre_processed","MODIS","MOD10A1","3_clipped")
vashape = os.path.join(test_dir,"raw","VA_shapefile","tl_2013_51_cousub.shp")
raster.clip_to_shape(sm_dir, overshp_path, clipdir)
return | 5,331,908 |
def check(pack, inst):
"""
A function to check if an instruction is present in the packet
Input:
- pack: The packet to be checked
- inst: The instruction
Output:
Returns True if the instruction is present in the packet else Fase
"""
inst_key = getPacketKey(inst[0])
for key in inst_key:
if key:
if pack[key] == inst:
return True
return False | 5,331,909 |
def test_calc_eirp():
"""
Unit test for calculating the Equivalent Isotropically Radiated Power.
"""
power = 30 # watts
antenna_gain = 38 # dB
# losses = 4 #dB
assert round(calc_eirp(power, antenna_gain)) == 68 | 5,331,910 |
def find_invalid_filenames(filenames, repository_root):
"""Find files that does not exist, are not in the repo or are directories.
Args:
filenames: list of filenames to check
repository_root: the absolute path of the repository's root.
Returns: A list of errors.
"""
errors = []
for filename in filenames:
if not os.path.abspath(filename).startswith(repository_root):
errors.append((filename, 'Error: File %s does not belong to '
'repository %s' % (filename, repository_root)))
if not os.path.exists(filename):
errors.append((filename,
'Error: File %s does not exist' % (filename,)))
if os.path.isdir(filename):
errors.append((filename, 'Error: %s is a directory. Directories are'
' not yet supported' % (filename,)))
return errors | 5,331,911 |
def test_constructor():
""" """
args = CfgNode()
args.img_name_unique = True
args.taxonomy = 'oracle' # pretend testing a model training in own taxonomy (oracle),
# not in unified universal taxonomy
args.vis_freq = 1
args.model_path = '/path/to/dummy/model'
data_list = get_dummy_datalist()
dataset_name = 'camvid-11'
class_names = load_class_names(dataset_name)
camvid_class_names = [
'Building',
'Tree',
'Sky',
'Car',
'SignSymbol',
'Road',
'Pedestrian',
'Fence',
'Column_Pole',
'Sidewalk',
'Bicyclist',
]
assert class_names == camvid_class_names
save_folder = f'{_ROOT}/accuracy_calculator_data'
num_eval_classes = 11
ac = AccuracyCalculator(
args=args,
data_list=data_list,
dataset_name=dataset_name,
class_names=class_names,
save_folder=save_folder,
eval_taxonomy='test_dataset',
num_eval_classes=num_eval_classes,
excluded_ids=[]
) | 5,331,912 |
def get_score(true, predicted):
"""Returns F1 per instance"""
numerator = len(set(predicted.tolist()).intersection(set(true.tolist())))
p = numerator / float(len(predicted))
r = numerator / float(len(true))
if r == 0.:
return 0.
return 2 * p * r / float(p + r) | 5,331,913 |
def check_ratio_argv(_argv):
"""Return bool, check optional argument if images are searched by same ratio"""
# [-1] To avoid checking 3 places at one, this argument is always last
return bool(_argv[-2] in ARGV["search by ratio"] and _argv[-1] in ARGV["search by ratio"]) | 5,331,914 |
def binary_erosion(input, structure = None, iterations = 1, mask = None,
output = None, border_value = 0, origin = 0, brute_force = False):
"""Multi-dimensional binary erosion with the given structure.
An output array can optionally be provided. The origin parameter
controls the placement of the filter. If no structuring element is
provided an element is generated with a squared connectivity equal
to one. The border_value parameter gives the value of the array
outside the border. The erosion operation is repeated iterations
times. If iterations is less than 1, the erosion is repeated until
the result does not change anymore. If a mask is given, only those
elements with a true value at the corresponding mask element are
modified at each iteration.
"""
return _binary_erosion(input, structure, iterations, mask,
output, border_value, origin, 0, brute_force) | 5,331,915 |
def chord_to_freq_ratios(chord):
"""Return the frequency ratios of the pitches in <chord>
Args:
chord (tuple of ints): see <get_consonance_score>.
Returns:
list of ints:
"""
numerators = [JI_NUMS[i] for i in chord]
denoms = [JI_DENOMS[i] for i in chord]
denominator = get_lcm(denoms)
numerators = [(numerators[i] * denominator) // denoms[i] for i in \
range(len(numerators))]
return numerators, denominator | 5,331,916 |
def proto_factor_cosine(local_proto, global_proto):
"""
[C, D]: D is 64 or 4
"""
# factor = 1
norm_local = torch.norm(local_proto, dim=-1, keepdim=False)
norm_global = torch.norm(global_proto, dim=-1, keepdim=False) # [C]
factor_refined = torch.sum(local_proto*global_proto, dim=-1, keepdim=False)/(norm_local*norm_global+1e-6)
return factor_refined | 5,331,917 |
def urlparse(d, keys=None):
"""Returns a copy of the given dictionary with url values parsed."""
d = d.copy()
if keys is None:
keys = d.keys()
for key in keys:
d[key] = _urlparse(d[key])
return d | 5,331,918 |
def doFDR(pvalues,
vlambda=numpy.arange(0,0.95,0.05),
pi0_method="smoother",
fdr_level=None,
robust=False,
smooth_df = 3,
smooth_log_pi0 = False):
"""modeled after code taken from http://genomics.princeton.edu/storeylab/qvalue/linux.html.
I did not like the error handling so I translated most to python.
Compute FDR after method by Storey et al. (2002).
"""
if min(pvalues) < 0 or max(pvalues) > 1:
raise ValueError( "p-values out of range" )
if len(vlambda) > 1 and len(vlambda) < 4:
raise ValueError(" If length of vlambda greater than 1, you need at least 4 values." )
if len(vlambda) > 1 and (min(vlambda) < 0 or max(vlambda) >= 1):
raise ValueError( "vlambda must be within [0, 1).")
m = len(pvalues)
# these next few functions are the various ways to estimate pi0
if len(vlambda)==1:
vlambda = vlambda[0]
if vlambda < 0 or vlambda >=1 :
raise ValueError( "vlambda must be within [0, 1).")
pi0 = numpy.mean( [ x >= vlambda for x in pvalues ] ) / (1.0 - vlambda)
pi0 = min(pi0, 1.0)
R.assign( "pi0", pi0)
else:
pi0 = numpy.zeros( len(vlambda), numpy.float )
for i in range( len(vlambda) ):
pi0[i] = numpy.mean( [x >= vlambda[i] for x in pvalues ]) / (1.0 -vlambda[i] )
R.assign( "pi0", pi0)
R.assign( "vlambda", vlambda)
if pi0_method=="smoother":
if smooth_log_pi0:
pi0 = math.log(pi0)
R.assign( "smooth_df", smooth_df)
spi0 = R("""spi0 <- smooth.spline(vlambda,pi0, df = smooth_df)""")
pi0 = R("""pi0 <- predict( spi0, x = max(vlambda) )$y""")
if smooth_log_pi0:
pi0 = math.exp(pi0)
elif pi0_method=="bootstrap":
minpi0 = min(pi0)
mse = numpy.zeros( len(vlambda), numpy.float )
pi0_boot = numpy.zeros( len(vlambda), numpy.float )
R.assign( "pvalues", pvalues)
pi0 = R("""
m <- length(pvalues)
minpi0 <- min(pi0)
mse <- rep(0,length(vlambda))
pi0_boot <- rep(0,length(vlambda))
for(i in 1:100)
{
pvalues_boot <- sample(pvalues,size=m,replace=TRUE)
for(i in 1:length(vlambda))
{
pi0_boot[i] <- mean(pvalues_boot>vlambda[i])/(1-vlambda[i])
}
mse <- mse + (pi0_boot-minpi0)^2
}
pi0 <- min(pi0[mse==min(mse)])""")
else:
raise ValueError( "'pi0_method' must be one of 'smoother' or 'bootstrap'.")
pi0 = min(pi0,1.0)
R.assign( "pi0", pi0 )
if pi0 <= 0:
raise ValueError( "The estimated pi0 <= 0. Check that you have valid p-values or use another vlambda method." )
if fdr_level != None and (fdr_level <= 0 or fdr_level > 1):
raise ValueError( "'fdr_level' must be within (0, 1].")
# The estimated q-values calculated here
#u = numpy.argsort( p )
# change by Alan
# ranking function which returns number of observations less than or equal
R.assign( "pvalues", pvalues )
R.assign( "robust", robust )
qvalues = R("""u <- order(pvalues)
qvalues.rank <- function(x)
{
idx <- sort.list(x)
fc <- factor(x)
nl <- length(levels(fc))
bin <- as.integer(fc)
tbl <- tabulate(bin)
cs <- cumsum(tbl)
tbl <- rep(cs, tbl)
tbl[idx] <- tbl
return(tbl)
}
v <- qvalues.rank(pvalues)
m <- length(pvalues)
qvalues <- pi0 * m * pvalues / v
if(robust)
{
qvalues <- pi0*m*pvalues/(v*(1-(1-pvalues)^m))
}
qvalues[u[m]] <- min(qvalues[u[m]],1)
for(i in (m-1):1)
{
qvalues[u[i]] <- min(qvalues[u[i]],qvalues[u[i+1]],1)
}
qvalues
""")
result = FDRResult()
result.mQValues = qvalues
if fdr_level != None:
result.mPassed = [ x <= fdr_level for x in result.mQValues ]
else:
result.mPassed = [ False for x in result.mQValues ]
result.mPValues = pvalues
result.mPi0 = pi0
result.mLambda = vlambda
return result | 5,331,919 |
def sequence(ini, end, step=1):
""" Create a sequence from ini to end by step. Similar to
ee.List.sequence, but if end != last item then adds the end to the end
of the resuting list
"""
end = ee.Number(end)
if step == 0:
step = 1
amplitude = end.subtract(ini)
mod = ee.Number(amplitude).mod(step)
seq = ee.List.sequence(ini, end, step)
condition = mod.neq(0)
final = ee.Algorithms.If(condition, seq.add(end), seq)
return ee.List(final) | 5,331,920 |
def kBET_single(
matrix,
batch,
k0=10,
knn=None,
verbose=False
):
"""
params:
matrix: expression matrix (at the moment: a PCA matrix, so do.pca is set to FALSE
batch: series or list of batch assignemnts
returns:
kBET observed rejection rate
"""
anndata2ri.activate()
ro.r("library(kBET)")
if verbose:
print("importing expression matrix")
ro.globalenv['data_mtrx'] = matrix
ro.globalenv['batch'] = batch
if verbose:
print("kBET estimation")
ro.globalenv['knn_graph'] = knn
ro.globalenv['k0'] = k0
ro.r(
"batch.estimate <- kBET("
" data_mtrx,"
" batch,"
" knn=knn_graph,"
" k0=k0,"
" plot=FALSE,"
" do.pca=FALSE,"
" heuristic=FALSE,"
" adapt=FALSE,"
f" verbose={str(verbose).upper()}"
")"
)
try:
score = ro.r("batch.estimate$summary$kBET.observed")[0]
except rpy2.rinterface_lib.embedded.RRuntimeError:
score = np.nan
anndata2ri.deactivate()
return score | 5,331,921 |
def get_eye_center_position(face: Face) -> Tuple[numpy.int64, numpy.int64]:
"""Get the center position between the eyes of the given face.
Args:
face (:class:`~.types.Face`):
The face to extract the center position from.
Returns:
Tuple[:data:`numpy.int64`, :data:`numpy.int64`]:
The position directly between the eyes of the face
"""
(left_start, left_end), (right_start, right_end) = get_eye_positions(face)
return (left_start + right_start) // 2, (left_end + right_end) // 2 | 5,331,922 |
def test_request_scope_interface():
"""
A Request can be instantiated with a scope, and presents a `Mapping`
interface.
"""
request = Request({"type": "http", "method": "GET", "path": "/abc/"})
assert request["method"] == "GET"
assert dict(request) == {"type": "http", "method": "GET", "path": "/abc/"}
assert len(request) == 3 | 5,331,923 |
def fail(msg):
"""Prints the error message and exits"""
sys.stderr.write('\033[91m' + msg + '\033[0m\n')
exit(1) | 5,331,924 |
def esc_quotes(strng):
""" Return the input string with single and double quotes escaped out.
"""
return strng.replace('"', '\\"').replace("'", "\\'") | 5,331,925 |
def gen_fake_game_data():
"""Creates an example Game object"""
game = Game(
gameday_id='2014/04/04/atlmlb-wasmlb-1',
venue='Nationals Park',
start_time=parser.parse('2014-04-04T13:05:00-0400'),
game_data_directory='/components/game/mlb/year_2014/month_04/day_04/gid_2014_04_04_atlmlb_wasmlb_1',
home_name_abbrev='WSH',
home_team_city='Washington',
home_team_name='Nationals',
away_name_abbrev='ATL',
away_team_city='Atlanta',
away_team_name='Braves',
home_team_runs=1,
away_team_runs=2
)
return game | 5,331,926 |
def kill_entity(entity: EntityID):
"""
Add entity to the deletion stack and removes them from the turn queue.
"""
# if not player
if entity != get_player():
# delete from world
delete_entity(entity)
turn_queue = hourglass.get_turn_queue()
# if turn holder create new queue without them
if entity == hourglass.get_turn_holder():
# ensure the game state reflects the new queue
hourglass.next_turn(entity)
elif entity in turn_queue:
# remove from turn queue
turn_queue.pop(entity)
else:
event_hub.post(LoseConditionMetEvent()) | 5,331,927 |
def extract_data(url,file_path):
"""
extract data from kaggle
"""
#setup session
with session() as c:
#post request
c.post('https://www.kaggle.com/account/login',data=payload)
#open file to write
with open(file_path,'wb') as handle:
#get request
response=c.get(url,stream=True)
print(response)
for block in response.iter_content(1024):
handle.write(block) | 5,331,928 |
def unsafe_load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve all tags, even those known to be
unsafe on untrusted input.
"""
return load_all(stream, UnsafeLoader) | 5,331,929 |
def random_name(url, type):
"""
对文件或文件夹进行随机重命名(防止产生因同名而无法重命名的问题)(具体类型则根据所给的文件类型type决定,用户调用相应的方法后type自动赋值)
:param url: 用户传入的文件夹的地址
:return: 返回文件夹中所有文件或文件夹重命名之前的名字的列表
"""
if not os.path.exists(url):
url=resource_manager.Properties.getRootPath() + resource_manager.getSeparator() +url
doc = os.listdir(url)
if type == 'D':
con=config_parser.ConfigParser('D')
else:
con=config_parser.ConfigParser('F')
for files in doc:
filetype = os.path.splitext(files)[1]
if os.path.exists(url):
old=url+resource_manager.getSeparator()+files
else:
old=resource_manager.Properties.getRootPath()+resource_manager.getSeparator()+url+resource_manager.getSeparator()+files
if os.path.isdir(old) and type=='D':
random = random_string()
New = url + resource_manager.getSeparator() + random+ filetype
os.rename(old, New);
_store_(con,url, files, random + filetype)
elif os.path.isfile(old) and type=='F':
random = random_string()
if os.path.exists(url):
New = url + resource_manager.getSeparator() + random+ filetype
else:
New = url + resource_manager.getSeparator() + random
os.rename(old, New);
_store_(con,url, files, random + filetype)
con.save()
list = doc
return list; | 5,331,930 |
def peakdet(v, delta, x = None):
"""
Converted from MATLAB script at http://billauer.co.il/peakdet.html
"""
maxtab = []
mintab = []
if x is None:
x = arange(len(v))
v = asarray(v)
if len(v) != len(x):
sys.exit('Input vectors v and x must have same length')
if not isscalar(delta):
sys.exit('Input argument delta must be a scalar')
if delta <= 0:
sys.exit('Input argument delta must be positive')
mn, mx = Inf, -Inf
mnpos, mxpos = NaN, NaN
lookformax = True
for i in arange(len(v)):
this = v[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx-delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn+delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
return array(maxtab), array(mintab) | 5,331,931 |
def jaccard(set1, set2):
"""
computes the jaccard coefficient between two sets
@param set1: first set
@param set2: second set
@return: the jaccard coefficient
"""
if len(set1) == 0 or len(set2) == 0:
return 0
inter = len(set1.intersection(set2))
return inter / (len(set1) + len(set2) - inter) | 5,331,932 |
def create_cluster_custom_object(group: str, version: str, plural: str,
resource: Dict[str, Any] = None,
resource_as_yaml_file: str = None,
secrets: Secrets = None) -> Dict[str, Any]:
"""
Delete a custom object in the given namespace.
Read more about custom resources here:
https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/
""" # noqa: E501
api = client.CustomObjectsApi(create_k8s_api_client(secrets))
body = load_body(resource, resource_as_yaml_file)
try:
r = api.create_cluster_custom_object(
group, version, plural, body, _preload_content=False
)
return json.loads(r.data)
except ApiException as x:
if x.status == 409:
logger.debug(
"Custom resource object {}/{} already exists".format(
group, version))
return json.loads(x.body)
else:
raise ActivityFailed(
"Failed to create custom resource object: '{}' {}".format(
x.reason, x.body)) | 5,331,933 |
def test_greater_than_equal_to_validator_image_container():
""" Test the greater than equal to validator with an image container """
validator = greater_than_equal_to_validator(1.5)
assert str(validator) == "Value(s) must be greater than or equal to 1.5"
image_container = NumpyImageContainer(image=np.array([[-0.5, 0.2], [0.1, -0.9]]))
assert not validator(image_container)
image_container = NumpyImageContainer(image=np.array([[1.5, 2.2], [1.7, 90]]))
assert validator(image_container)
image_container = NumpyImageContainer(image=np.array([[1.51, 2.2], [1.7, 90]]))
assert validator(image_container) | 5,331,934 |
def normalize_flags(flags, user_config):
"""Combine the argparse flags and user configuration together.
Args:
flags (argparse.Namespace): The flags parsed from sys.argv
user_config (dict): The user configuration taken from
~/.artman/config.yaml.
Returns:
tuple (str, dict): 2-tuple containing:
- pipeline name
- pipeline arguments
"""
if flags.root_dir:
flags.root_dir = os.path.abspath(flags.root_dir)
flags.config = os.path.join(flags.root_dir, flags.config)
else:
flags.root_dir = os.getcwd()
flags.config = os.path.abspath(flags.config)
root_dir = flags.root_dir
flags.output_dir = os.path.abspath(flags.output_dir)
pipeline_args = {}
# Determine logging verbosity and then set up logging.
verbosity = INFO
if getattr(flags, 'verbosity', None):
verbosity = getattr(flags, 'verbosity')
setup_logging(verbosity)
# Save local paths, if applicable.
# This allows the user to override the path to api-client-staging or
# toolkit on his or her machine.
pipeline_args['root_dir'] = root_dir
pipeline_args['toolkit_path'] = user_config.local.toolkit
pipeline_args['generator_args'] = flags.generator_args
artman_config_path = flags.config
if not os.path.isfile(artman_config_path):
logger.error(
'Artman config file `%s` doesn\'t exist.' % artman_config_path)
sys.exit(96)
try:
artifact_config = loader.load_artifact_config(
artman_config_path, flags.artifact_name, flags.aspect)
except ValueError as ve:
logger.error('Artifact config loading failed with `%s`' % ve)
sys.exit(96)
legacy_config_dict = converter.convert_to_legacy_config_dict(
artifact_config, root_dir, flags.output_dir)
logger.debug('Below is the legacy config after conversion:\n%s' %
pprint.pformat(legacy_config_dict))
language = Artifact.Language.Name(
artifact_config.language).lower()
# Set the pipeline
artifact_type = artifact_config.type
pipeline_args['artifact_type'] = Artifact.Type.Name(artifact_type)
pipeline_args['aspect'] = Artifact.Aspect.Name(artifact_config.aspect)
if artifact_type == Artifact.GAPIC_ONLY:
pipeline_name = 'GapicOnlyClientPipeline'
pipeline_args['language'] = language
elif artifact_type == Artifact.GAPIC:
pipeline_name = 'GapicClientPipeline'
pipeline_args['language'] = language
elif artifact_type == Artifact.DISCOGAPIC:
pipeline_name = 'DiscoGapicClientPipeline'
pipeline_args['language'] = language
pipeline_args['discovery_doc'] = artifact_config.discovery_doc
elif artifact_type == Artifact.GRPC:
pipeline_name = 'GrpcClientPipeline'
pipeline_args['language'] = language
elif artifact_type == Artifact.GAPIC_CONFIG:
pipeline_name = 'GapicConfigPipeline'
elif artifact_type == Artifact.DISCOGAPIC_CONFIG:
pipeline_name = 'DiscoGapicConfigPipeline'
pipeline_args['discovery_doc'] = artifact_config.discovery_doc
if os.path.abspath(flags.output_dir) != os.path.abspath(DEFAULT_OUTPUT_DIR):
logger.warning("`output_dir` is ignored in DiscoGapicConfigGen. "
+ "Yamls are saved at the path specified by `gapic_yaml`.")
pipeline_args['output_dir'] = tempfile.mkdtemp()
elif artifact_type == Artifact.PROTOBUF:
pipeline_name = 'ProtoClientPipeline'
pipeline_args['language'] = language
else:
raise ValueError('Unrecognized artifact.')
# Parse out the full configuration.
config_args = config_util.load_config_spec(legacy_config_dict, language)
config_args.update(pipeline_args)
pipeline_args = config_args
# Print out the final arguments to stdout, to help the user with
# possible debugging.
pipeline_args_repr = yaml.dump(
pipeline_args,
block_seq_indent=2,
default_flow_style=False,
indent=2, )
logger.info('Final args:')
for line in pipeline_args_repr.split('\n'):
if 'token' in line:
index = line.index(':')
line = line[:index + 2] + '<< REDACTED >>'
logger.info(' {0}'.format(line))
# Return the final arguments.
return pipeline_name, pipeline_args | 5,331,935 |
def setup_maxbolthist(ax): # pragma: no cover
"""Builds the simulation velocity histogram
visualisation pane.
Parameters
----------
ax: Axes object
The axes position that the pane should be placed in.
"""
ax.step([0], [0], color="#34a5daff")
ax.set_ylabel("PDF", fontsize=16)
ax.set_xlabel("Velocity/ms$^{-1}$", fontsize=16) | 5,331,936 |
def printResultTable(resObj):
"""
Print a result table (similar to the web version) to *stdout*, like::
# ref prop np components(s)
---- -------------------- ------ ---- ----------------------------------------
0 Krolikowska2012 dens 65 1-ethyl-3-methylimidazolium thiocyanate
1 Klomfar2015a dens 37 1-ethyl-3-methylimidazolium thiocyanate
2 Freire2011 dens 18 1-ethyl-3-methylimidazolium thiocyanate
3 Neves2013b dens 18 1-ethyl-3-methylimidazolium thiocyanate
"""
print('\n # {0:20s} {1:6s} {2:>4s} {3:s}'.format('ref', 'prop', 'np', 'components(s)'))
print('{0:s} {1:s} {2:s} {3:s} {4:s}'.format('-' * 4, '-' * 20, '-' * 6, '-' * 4, '-' * 40))
for i in range(0, len(resObj)):
r = resObj[i]
print('{0:4d} {1:20s} {2:6s} {3:4d} {4:s}'.
format( i, r.sref, prop2abr[r.prop], r.np, ' | '.join(r.listOfComp) ) ) | 5,331,937 |
def get_fast_annotations():
"""
Title: Get Fast Annotations
Description : Get annotations for a list of sequences in a compressed form
URL: /sequences/get_fast_annotations
Method: GET
URL Params:
Data Params: JSON
{
"sequences": list of str ('ACGT')
the list of sequence strings to query the database (can be any length). Alternatively, can be list of SILVA IDs (in case dbname is set to 'silva')
"region": int (optional)
the region id (default=1 which is V4 515F 806R)
"get_term_info": bool (optional)
True (default) to return also information about each term, False not to return
"get_taxonomy": bool (optional)
True (default) to get the dbbact assigned taxonomy for each query sequence
"get_parents": bool (optional)
True (default) to get the parent terms for each annotation ontology term, False to just get tge annotation terms
"get_all_exp_annotations": bool (optional)
True (default) to get all the annotations from each experiment containing one annotation with the sequence, False to just get the annotations with the sequence
"use_sequence_translator": bool (optional)
True (default) to get also annotations for dbbact sequences from other regions linked to the query sequences using the wholeseqdb (i,e, SILVA)
False to get just annotations for dbbact sequences that match exactly the queryy sequences
"dbname": str, optional
If supplied (i.e. 'silva'), assume sequence is the identifier in dbname (i.e. 'FJ978486' for 'silva' instead of acgt sequence)
Success Response:
Code : 200
Content :
{
annotations: dict of (annotationid: details):
annotationid : the annotationid used in seqannotations
details:
{
"annotationid" : int
the id of the annotation
"user" : str
name of the user who added this annotation
(userName from UsersTable)
"addedDate" : str (DD-MM-YYYY HH:MM:SS)
date when the annotation was added
(addedDate from CurationsTable)
"expid" : int
the ID of the experiment from which this annotation originated
(uniqueId from ExperimentsTable)
(see Query Experiment)
"currType" : str
curration type (differential expression/contaminant/etc.)
(description from CurationTypesTable)
"method" : str
The method used to detect this behavior (i.e. observation/ranksum/clustering/etc")
(description from MethodTypesTable)
"agentType" : str
Name of the program which submitted this annotation (i.e. heatsequer)
(description from AgentTypesTable)
"description" : str
Free text describing this annotation (i.e. "lower in green tomatoes comapred to red ones")
"private" : bool
True if the curation is private, False if not
"CurationList" : list of
{
"detail" : str
the type of detail (i.e. ALL/HIGH/LOW)
(description from CurationDetailsTypeTable)
"term" : str
the ontology term for this detail (i.e. feces/ibd/homo sapiens)
(description from OntologyTable)
}
"parents" : list of tuples (type, list of terms)
{
type : type of the annotation type ('high'/'low','all')
list of terms - list of ontology terms which are annotated or parents of annotated ontology term
}
}
seqannotations : list of (seqid, annotationids):
{
seqpos : position of the sequence in the list
annotationids : list of int
the annotationsid associated with this sequence
}
term_info : dict of {term, dict}:
Information about each term which appears in the annotation parents. Key is the ontolgy term. the value dict is:
{
'total_annotations' : int
total number of annotations where this term appears (as a parent)
'total_sequences' : int
total number of sequences in annotations where this term appears (as a parent)
}
taxonomy : list of str
The dbbact assigned taxonomy for each sequence (ordered in the same order as query sequences)
}
Details :
Return a dict of details for all the annotations associated with at least one of the sequences used as input, and a list of seqpos and the associated annotationids describing it
(i.e. a sparse representation of the annotations vector for the input sequence list)
Validation:
If an annotation is private, return it only if user is authenticated and created the curation. If user not authenticated, do not return it in the list
If annotation is not private, return it (no need for authentication)
"""
debug(3, 'get_fast_annotations', request)
cfunc = get_fast_annotations
alldat = request.get_json()
if alldat is None:
return(getdoc(cfunc))
sequences = alldat.get('sequences')
if sequences is None:
return('sequences parameter missing', 400)
region = alldat.get('region')
get_term_info = alldat.get('get_term_info', True)
get_taxonomy = alldat.get('get_taxonomy', True)
get_parents = alldat.get('get_parents', True)
use_sequence_translator = alldat.get('use_sequence_translator', True)
dbname = alldat.get('dbname', None)
if dbname is not None:
use_sequence_translator = True
get_all_exp_annotations = alldat.get('get_all_exp_annotations', True)
if use_sequence_translator:
seq_translate_api = g.seq_translate_api
else:
seq_translate_api = None
err, annotations, seqannotations, term_info, taxonomy = dbannotations.GetFastAnnotations(g.con, g.cur, sequences, region=region, userid=current_user.user_id, get_term_info=get_term_info, get_taxonomy=get_taxonomy, get_parents=get_parents, get_all_exp_annotations=get_all_exp_annotations, seq_translate_api=seq_translate_api, dbname=dbname)
if err:
errmsg = 'error encountered while getting the fast annotations: %s' % err
debug(6, errmsg)
return(errmsg, 400)
res = {'annotations': annotations, 'seqannotations': seqannotations, 'term_info': term_info, 'taxonomy': taxonomy}
debug(3, 'returning fast annotations for %d original sequences. returning %s annotations' % (len(sequences), len(res['annotations'])))
return json.dumps(res) | 5,331,938 |
def to_numeric(arg):
"""
Converts a string either to int or to float.
This is important, because e.g. {"!==": [{"+": "0"}, 0.0]}
"""
if isinstance(arg, str):
if '.' in arg:
return float(arg)
else:
return int(arg)
return arg | 5,331,939 |
def test_slurm_free_format(tmp_path: Path):
"""Test the slurm script generation with the user passes its own script."""
free_format = """#!/bin/bash
#SBATCH -N 1
#SBATCH -t 00:05:00
#SBATCH -p godzilla
module load awesome-package/3.14.15
"""
scheduler = {"name": "slurm", "free_format": free_format}
check_script(tmp_path, scheduler) | 5,331,940 |
def get_kde_polyfit_estimator(samples, N=100000, bandwidth=200, maxlength=150000, points=500, degree=50):
"""多項式近似したバージョンを返すやつ 一応両方かえす"""
f = get_kde_estimator(samples, N, bandwidth)
x = np.linspace(1, maxlength, points)
z = np.polyfit(x, f(x), degree)
return (lambda x: np.where(x<=maxlength, np.poly1d(z)(x), np.poly1d(z)(maxlength))), f | 5,331,941 |
def read_tracker(file_name):
"""
"""
with open(file_name, "r") as f:
return int(f.readline()) | 5,331,942 |
def MixR2VaporPress(qv, p):
"""Return Vapor Pressure given Mixing Ratio and Pressure
INPUTS
qv (kg kg^-1) Water vapor mixing ratio`
p (Pa) Ambient pressure
RETURNS
e (Pa) Water vapor pressure
"""
return qv * p / (Epsilon + qv) | 5,331,943 |
def sigmoid_derivative(dA, cache):
"""
Implement the backward propagation for a single SIGMOID unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
s = 1/(1+np.exp(-Z+1e-8))
'''
s = np.zeros_like(Z)
for i in range(len(Z)):
if Z[i] >= 0:
t = np.exp(-Z[i])
s[i] = 1 / (1 + t)
else:
# if x is less than zero then z will be small, denom can't be
# zero because it's 1+z.
t = np.exp(Z[i])
s[i] = t/(1+t)
'''
dZ = dA * s * (1-s)
assert (dZ.shape == Z.shape)
return dZ | 5,331,944 |
def parse_group(tx: neo4j.Transaction, group: dict):
"""Parse a group object.
Arguments:
tx {neo4j.Transaction} -- Neo4j Transaction
group {dict} -- Single group object from the bloodhound json.
"""
properties = group['Properties']
identifier = group['ObjectIdentifier']
members = group['Members']
property_query = 'UNWIND $props AS prop MERGE (n:Base {objectid: prop.source}) ON MATCH SET n:Group ON CREATE SET n:Group SET n += prop.map'
props = {'map': properties, 'source': identifier}
tx.run(property_query, props=props)
if 'Aces' in group and group['Aces'] is not None:
process_ace_list(group['Aces'], identifier, "Group", tx)
for member in members:
query = build_add_edge_query(member['MemberType'], 'Group', 'MemberOf', '{isacl: false}')
tx.run(query, props=dict(source=member['MemberId'], target=identifier)) | 5,331,945 |
def get_params(config, alphabet):
"""
:param config: config
:param alphabet: alphabet dict
:return:
"""
# get algorithm
config.learning_algorithm = get_learning_algorithm(config)
# save best model path
config.save_best_model_path = config.save_best_model_dir
if config.test is False:
if os.path.exists(config.save_best_model_path):
shutil.rmtree(config.save_best_model_path)
# get params
config.embed_num = alphabet.word_alphabet.vocab_size # word number
config.label_num = alphabet.label_alphabet.vocab_size # label number
config.paddingId = alphabet.word_paddingId
config.alphabet = alphabet
print("embed_num : {},class_num : {}".format(config.embed_num,config.label_num))
print("PaddingID {}".format(config.paddingId)) | 5,331,946 |
def part2(lines, rounds=100):
"""
>>> data = load_example(__file__, '24')
>>> part2(data, 0)
10
>>> part2(data, 1)
15
>>> part2(data, 2)
12
>>> part2(data, 3)
25
>>> part2(data, 4)
14
>>> part2(data, 5)
23
>>> part2(data, 6)
28
>>> part2(data, 7)
41
>>> part2(data, 8)
37
>>> part2(data, 9)
49
>>> part2(data, 10)
37
>>> part2(data, 20)
132
>>> part2(data, 30)
259
>>> part2(data, 40)
406
>>> part2(data, 50)
566
>>> part2(data, 60)
788
>>> part2(data, 70)
1106
>>> part2(data, 80)
1373
>>> part2(data, 90)
1844
>>> part2(data, 100)
2208
"""
endpoints = prepare_endpoints(lines)
real_endpoints = {ep: True for ep, count in Counter(endpoints).items() if count % 2 == 1}
return simulate(real_endpoints, rounds) | 5,331,947 |
def binary_search(
items,
target_key,
target_key_hi=None,
key=None,
lo=None,
hi=None,
target=Target.any,
):
"""
Search for a target key using binary search and return (found?,
index / range).
The returned index / range is as follows according to the desired
target:
* Target.lo: lo
* Target.hi: hi
* Target.any: Any `x` such that `lo <= x < hi`
* Target.range: (lo, hi)
Where:
* `lo` is the smallest index s.t. `target_key <= key(items[lo])`
* `hi` is the smallest index s.t. `target_key_hi < key(items[hi])`
Thus, the slice of items matching the target key(s) is `[lo:hi]`.
Arguments:
* items: Indexable such that its keys are sorted.
* target_key: What to search for. Keys must be orderable.
* key: Key function taking arguments (index, item) that returns the
sort key for the item at the given index. (This allows one to
have a separate array of keys.) If `None`, items are their own
keys.
* lo: Initial lower bound index (inclusive)
* hi: Initial upper bound index (exclusive)
* target: What in the items to target: existence, low index, high
index, or the whole range. See `Target`.
* target_key_hi: If searching for a range, search for target keys k
in `target_key <= k < target_key_hi`. (Ignored otherwise.)
"""
if target == Target.range:
if target_key_hi is None:
target_key_hi = target_key
_, lo_idx, hi_le, hi_gt = _binary_search(
items, target_key, target_key_hi, key, lo, hi, Target.lo)
_, hi_idx, _, _ = _binary_search(
items, target_key_hi, None, key, hi_le, hi_gt, Target.hi)
return (lo_idx < hi_idx, (lo_idx, hi_idx))
else:
found, idx, _, _ = _binary_search(
items, target_key, None, key, lo, hi, target)
return (found, idx) | 5,331,948 |
def test_wep_open_auth(dev, apdev):
"""WEP Open System authentication"""
hostapd.add_ap(apdev[0]['ifname'],
{ "ssid": "wep-open",
"wep_key0": '"hello"' })
dev[0].connect("wep-open", key_mgmt="NONE", wep_key0='"hello"',
scan_freq="2412")
hwsim_utils.test_connectivity(dev[0].ifname, apdev[0]['ifname'])
if "[WEP]" not in dev[0].request("SCAN_RESULTS"):
raise Exception("WEP flag not indicated in scan results") | 5,331,949 |
def func_real_dirty_gauss(dirty_beam):
"""Returns a parameteric model for the map of a point source,
consisting of the interpolated dirty beam along the y-axis
and a sinusoid with gaussian envelope along the x-axis.
This function is a wrapper that defines the interpolated
dirty beam.
Parameters
----------
dirty_beam : scipy.interpolate.interp1d
Interpolation function that takes as an argument el = sin(za)
and outputs an np.ndarray[nel, nra] that represents the dirty
beam evaluated at the same right ascension as the map.
Returns
-------
real_dirty_gauss : np.ndarray[nra*ndec]
Model prediction for the map of the point source.
"""
def real_dirty_gauss(
coord, peak_amplitude, centroid_x, centroid_y, fwhm_x, offset, fringe_rate
):
"""Returns a parameteric model for the map of a point source,
consisting of the interpolated dirty beam along the y-axis
and a sinusoid with gaussian envelope along the x-axis.
Parameter
---------
coord : [ra, dec]
Tuple containing the right ascension and declination, each
of which is coordinate vectors of length nra and ndec, respectively.
peak_amplitude : float
Model parameter. Normalization of the gaussian
in the right ascension direction.
centroid_x : float
Model parameter. Centroid of the gaussian in degrees in the
right ascension direction.
centroid_y : float
Model parameter. Centroid of the dirty beam in degrees in the
declination direction.
fwhm_x : float
Model parameter. Full width at half maximum of the gaussian
in degrees in the right ascension direction.
offset : float
Model parameter. Constant background value of the map.
fringe_rate : float
Model parameter. Frequency of the sinusoid.
Returns
-------
model : np.ndarray[nra*ndec]
Model prediction for the map of the point source.
"""
x, y = coord
model = (
peak_amplitude
* np.exp(
-4.0 * np.log(2.0) * ((x[:, np.newaxis] - centroid_x) / fwhm_x) ** 2
)
* dirty_beam(y - _dec_to_el(centroid_y))
) + offset
phase = np.exp(
2.0j
* np.pi
* np.cos(np.radians(centroid_y))
* np.sin(-np.radians(x - centroid_x))
* fringe_rate
)
return (model * phase[:, np.newaxis]).real.ravel()
return real_dirty_gauss | 5,331,950 |
def shortest_path(graph, a_node, b_node):
""" code by Eryk Kopczynski """
front = deque()
front.append(a_node)
came_from = {a_node: [a_node]}
while front:
cp = front.popleft()
for np in graph.neighbors(cp):
if np not in came_from:
front.append(np)
came_from[np] = [came_from[cp], np]
"""flatten added by Bruce Wernick. This is purely cosmetic and not ideal.
It looks like the came_from dict is storing unnecessary information!
"""
return flatten(came_from.get(b_node)) | 5,331,951 |
def rank_compute(prediction, att_plt, key, byte):
"""
- prediction : predictions of the NN
- att_plt : plaintext of the attack traces
- key : Key used during encryption
- byte : byte to attack
"""
(nb_trs, nb_hyp) = prediction.shape
idx_min = nb_trs
min_rk = 255
key_log_prob = np.zeros(nb_hyp)
rank_evol = np.full(nb_trs,255)
prediction = np.log(prediction+1e-40)
for i in range(nb_trs):
for k in range(nb_hyp):
key_log_prob[k] += prediction[i,AES_Sbox[k^att_plt[i,byte]]] #Computes the hypothesis values
rank_evol[i] = rk_key(key_log_prob,key)
return rank_evol | 5,331,952 |
def purelin(n):
"""
Linear
"""
return n | 5,331,953 |
def arrayDimension(inputArray):
"""Returns the dimension of a list-formatted array.
The dimension of the array is defined as the number of nested lists.
"""
return len(arraySize(inputArray)) | 5,331,954 |
def process_data(records, root) -> bool:
"""Creates the xml file that will be imported in pure."""
for record in records:
item_metadata = record["metadata"]
# If the rdm record has a uuid means that it was imported from pure - REVIEW
if "uuid" in item_metadata:
continue
# Checks if the record was created today
if record["created"] <= datetime.today().strftime("%Y-%m-%d"):
return False
# Adds fields to the created xml element
populate_xml(item_metadata, record, root)
return True | 5,331,955 |
def bitbang_backdoor(d, handler_address, hook_address = 0x18ccc, verbose = False):
"""Invoke the bitbang_backdoor() main loop using another debug interface.
To avoid putting the original debug interface into a stuck state, this
installs the bitbang_backdoor as a mainloop hook on a counter, to spin
the mainloop for a particular number of iterations before we start.
(Why the hook instead of a SCSI timeout? Running the backdoor directly
from a SCSI handler seems to break things so badly that even with a timeout
set, the driver hangs.)
The hook overlay doesn't have to keep working once we launch the backdoor.
"""
overlay_hook(d, hook_address, '''
static int counter = 0;
const int ticks_per_loop = 100;
const int loops = 1000;
MT1939::SysTime::wait_ticks(ticks_per_loop);
if (counter == loops) {
bitbang_backdoor();
}
if (counter <= loops) {
counter++;
}
''', handler_address=handler_address, verbose=verbose) | 5,331,956 |
def upload_object(object_location: ObjectLocation, stream: io.BytesIO) -> None:
"""
Upload the given data stream as an object to s3.
:param object_location: Location of the object to create/update.
:param stream: Byte steam of the object data.
"""
s3 = boto3.client("s3")
result = s3.upload_fileobj(stream, object_location.bucket.name, object_location.key)
log.debug(f"Result of upload to {object_location}: {result}") | 5,331,957 |
def hopcroft(G, S):
"""Hopcroft's algorthm for computing state equivalence.
Parameters
----------
G : fully deterministic graph
S : iterable
one half of the initial (bi)partition
Returns
-------
Partition
"""
sigma = alphabet(G)
partition = Partition(list(G))
p1, p2 = partition.split(S)[0]
smaller = partition.select_smaller(p1, p2)
wait_set = set()
for a in sigma:
wait_set.add((smaller, a))
while wait_set:
p, a = wait_set.pop()
inv_a_p = G.in_edges(partition.parts[p], data="label")
inv_a_p = (p for (p, q, label) in inv_a_p if label == a)
for (p1, p2) in partition.split(inv_a_p):
for b in sigma:
if (p1, b) in wait_set:
wait_set.add((p2, b))
else:
smaller = partition.select_smaller(p1, p2)
wait_set.add((smaller, b))
return partition | 5,331,958 |
def TVRegDiff(data, itern, alph, u0=None, scale='small', ep=1e-6, dx=None,
plotflag=_has_matplotlib, diagflag=True, precondflag=True,
diffkernel='abs', cgtol=1e-4, cgmaxit=100):
"""
Estimate derivatives from noisy data based using the Total
Variation Regularized Numerical Differentiation (TVDiff)
algorithm.
Parameters
----------
data : ndarray
One-dimensional array containing series data to be
differentiated.
itern : int
Number of iterations to run the main loop. A stopping
condition based on the norm of the gradient vector g
below would be an easy modification. No default value.
alph : float
Regularization parameter. This is the main parameter
to fiddle with. Start by varying by orders of
magnitude until reasonable results are obtained. A
value to the nearest power of 10 is usally adequate.
No default value. Higher values increase
regularization strenght and improve conditioning.
u0 : ndarray, optional
Initialization of the iteration. Default value is the
naive derivative (without scaling), of appropriate
length (this being different for the two methods).
Although the solution is theoretically independent of
the initialization, a poor choice can exacerbate
conditioning issues when the linear system is solved.
scale : {large' or 'small' (case insensitive)}, str, optional
Default is 'small'. 'small' has somewhat better boundary
behavior, but becomes unwieldly for data larger than
1000 entries or so. 'large' has simpler numerics but
is more efficient for large-scale problems. 'large' is
more readily modified for higher-order derivatives,
since the implicit differentiation matrix is square.
ep : float, optional
Parameter for avoiding division by zero. Default value
is 1e-6. Results should not be very sensitive to the
value. Larger values improve conditioning and
therefore speed, while smaller values give more
accurate results with sharper jumps.
dx : float, optional
Grid spacing, used in the definition of the derivative
operators. Default is the reciprocal of the data size.
plotflag : bool, optional
Flag whether to display plot at each iteration.
Default is True. Useful, but adds significant
running time.
diagflag : bool, optional
Flag whether to display diagnostics at each
iteration. Default is True. Useful for diagnosing
preconditioning problems. When tolerance is not met,
an early iterate being best is more worrying than a
large relative residual.
precondflag: bool, optional
Flag whether to use a preconditioner for conjugate gradient solution.
Default is True. While in principle it should speed things up,
sometimes the preconditioner can cause convergence problems instead,
and should be turned off. Note that this mostly makes sense for 'small'
scale problems; for 'large' ones, the improved preconditioner is one
of the main features of the algorithms and turning it off defeats the
point.
diffkernel: str, optional
Kernel to use in the integral to smooth the derivative. By default it's
the absolute value, |u'| (value: "abs"). However, it can be changed to
being the square, (u')^2 (value: "sq"). The latter produces smoother
derivatives, whereas the absolute values tends to make them more blocky.
Default is abs.
cgtol: float, optional
Tolerance to use in conjugate gradient optimisation. Default is 1e-4.
cgmaxit: int, optional
Maximum number of iterations to use in conjugate gradient optimisation.
Default is 100
Returns
-------
u : ndarray
Estimate of the regularized derivative of data. Due to
different grid assumptions, length(u) = length(data) + 1
if scale = 'small', otherwise length(u) = length(data).
"""
# Make sure we have a column vector
data = np.array(data)
assert len(data.shape) == 1, "data is not one-dimensional"
# Get the data size.
n = len(data)
# Default checking. (u0 is done separately within each method.)
if dx is None:
dx = 1.0 / n
# Different methods for small- and large-scale problems.
if (scale.lower() == 'small'):
# Differentiation operator
d0 = -np.ones(n)/dx
du = np.ones(n-1)/dx
dl = np.zeros(n-1)
dl[-1] = d0[-1]
d0[-1] *= -1
D = sparse.diags([dl, d0, du], [-1, 0, 1])
DT = D.transpose()
# Antidifferentiation and its adjoint
def A(x): return (np.cumsum(x) - 0.5 * (x + x[0])) * dx
def AT(x): return np.concatenate([[sum(x[1:])/2.0],
(sum(x)-np.cumsum(x)+0.5*x)[1:]])*dx
# Default initialization is naive derivative
if u0 is None:
u0 = D*data
u = u0.copy()
# Since Au( 0 ) = 0, we need to adjust.
ofst = data[0]
# Precompute.
ATb = AT(ofst - data) # input: size n
# Main loop.
for ii in range(1, itern+1):
if diffkernel == 'abs':
# Diagonal matrix of weights, for linearizing E-L equation.
Q = sparse.spdiags(1. / (np.sqrt((D * u)**2 + ep)), 0, n, n)
# Linearized diffusion matrix, also approximation of Hessian.
L = dx * DT * Q * D
elif diffkernel == 'sq':
L = dx * DT * D
else:
raise ValueError('Invalid diffkernel value')
# Gradient of functional.
g = AT(A(u)) + ATb + alph * L * u
#print(g)
# Prepare to solve linear equation.
if precondflag:
# Simple preconditioner.
P = alph * sparse.spdiags(L.diagonal() + 1, 0, n, n)
else:
P = None
def linop(v): return (alph * L * v + AT(A(v)))
linop = splin.LinearOperator((n, n), linop)
s, info_i = sparse.linalg.cg(
linop, g, x0=None, tol=cgtol, maxiter=cgmaxit,
callback=None, M=P, atol='legacy')
#print(s)
if diagflag:
log_iteration(ii, s[0], u, g)
if (info_i > 0):
logging.warning(
"WARNING - convergence to tolerance not achieved!")
elif (info_i < 0):
logging.warning("WARNING - illegal input or breakdown")
# Update solution.
u = u - s
#print(u)
# # Test the convergence condition
# s_norm = np.sqrt(np.sum(np.array(s).ravel() ** 2))
# u_norm = np.sqrt(np.sum(np.array(u).ravel() ** 2))
# norm = s_norm / u_norm
# print(norm)
# Display plot.
if plotflag:
plt.plot(u)
plt.show()
elif (scale.lower() == 'large'):
# Construct anti-differentiation operator and its adjoint.
def A(v): return np.cumsum(v)
def AT(w): return (sum(w) * np.ones(len(w)) -
np.transpose(np.concatenate(([0.0],
np.cumsum(w[:-1])))))
# Construct differentiation matrix.
c = np.ones(n)
D = sparse.spdiags([-c, c], [0, 1], n, n) / dx
mask = np.ones((n, n))
mask[-1, -1] = 0.0
D = sparse.dia_matrix(D.multiply(mask))
DT = D.transpose()
# Since Au( 0 ) = 0, we need to adjust.
data = data - data[0]
# Default initialization is naive derivative.
if u0 is None:
u0 = np.concatenate(([0], np.diff(data)))
u = u0
# Precompute.
ATd = AT(data)
# Main loop.
for ii in range(1, itern + 1):
if diffkernel == 'abs':
# Diagonal matrix of weights, for linearizing E-L equation.
Q = sparse.spdiags(1. / (np.sqrt((D * u)**2 + ep)), 0, n, n)
# Linearized diffusion matrix, also approximation of Hessian.
L = DT * Q * D
elif diffkernel == 'sq':
L = DT * D
else:
raise ValueError('Invalid diffkernel value')
# Gradient of functional.
g = AT(A(u)) - ATd
g = g + alph * L * u
# Build preconditioner.
if precondflag:
c = np.cumsum(range(n, 0, -1))
B = alph * L + sparse.spdiags(c[::-1], 0, n, n)
# droptol = 1.0e-2
R = sparse.dia_matrix(np.linalg.cholesky(B.todense()))
P = np.dot(R.transpose(), R)
else:
P = None
# Prepare to solve linear equation.
def linop(v): return (alph * L * v + AT(A(v)))
linop = splin.LinearOperator((n, n), linop)
s, info_i = sparse.linalg.cg(
linop, -g, x0=None, tol=cgtol, maxiter=cgmaxit, callback=None,
M=P, atol='legacy')
if diagflag:
log_iteration(ii, s[0], u, g)
if (info_i > 0):
logging.warning(
"WARNING - convergence to tolerance not achieved!")
elif (info_i < 0):
logging.warning("WARNING - illegal input or breakdown")
# Update current solution
u = u + s
# Display plot
if plotflag:
plt.plot(u / dx)
plt.show()
u = u / dx
return u | 5,331,959 |
def replaceidlcode(lines,mjd,day=None):
"""
Replace IDL code in lines (array of strings) with the results of code
execution. This is a small helper function for translate_idl_mjd5_script().
"""
# day
# psfid=day+138
# domeid=day+134
if day is not None:
ind,nind = dln.where( (lines.lower().find('day')>-1) &
(lines.lower().startswith('day=')==False) )
if nind>0:
lines[ind] = lines[ind].replace('day',str(day))
# indgen
# ims=day+149+indgen(2)
ind,nind = dln.where(lines.lower().find('indgen(')>-1)
if nind>0:
lines[ind] = lines[ind].replace('indgen(','np.arange(')
# Deal with assignment lines with code to execute
ind,nind = dln.where( ((lines.lower().find('+')>-1) |
(lines.lower().find('-')>-1) |
(lines.lower().find('*')>-1) |
(lines.lower().find('np.arange')>-1)) &
(lines.lower().find('=')>-1) &
(lines.lower().find('mkplan')==-1) )
for i in range(nind):
line1 = lines[ind[i]]
lo = line1.find('=')
key = line1[0:lo]
val = eval(line1[lo+1:])
if (type(val) is int) | (type(val) is str):
lines[ind[i]] = key+'='+str(val)
else:
lines[ind[i]] = key+'='+str(list(val))
# Deal with mkplan lines with code to execute
ind,nind = dln.where( ((lines.lower().find('+')>-1) |
(lines.lower().find('-')>-1) |
(lines.lower().find('*')>-1) |
(lines.lower().find('np.arange')>-1)) &
(lines.lower().find('=')>-1) &
(lines.lower().find('mkplan')>-1) )
for i in range(nind):
line1 = lines[ind[i]]
raise ValueError('This has not been implemented yet')
return lines | 5,331,960 |
def test(
model: nn.Module,
classes: dict,
data_loader: torch.utils.data.DataLoader,
criterion: nn.Module,
# scheduler: nn.Module,
epoch: int,
num_iteration: int,
use_cuda: bool,
tensorboard_writer: torch.utils.tensorboard.SummaryWriter,
name_step: str,
):
""" Test a given model
Args:
model (nn.Module): model to test.
classes (dict): dictionnary containing the classes and their indice.
data_loader (torch.utils.data.DataLoader): data loader with the data to test the model on.
criterion (nn.Module): loss function.
epoch (int): epoch of training corresponding to the model.
num_iteration (int): number of iterations since the beginning of the training corresponding to the model.
use_cuda (bool): boolean to decide if cuda should be used.
tensorboard_writer (torch.utils.tensorboard.SummaryWriter): writer to write the metrics in tensorboard.
name_step (str): name of the step to write it in the description of the progress_bar
Returns:
loss (float): final loss
accuracy_top1 (float): final accuracy top1
accuracy_top5 (float): final accuracy top5
confidence_mean (float): mean confidence
"""
# Switch the model to eval mode
model.eval()
# Initialize the trackers for the loss and the accuracy
loss_tracker = utils.MetricTracker()
accuracy_top1_tracker = utils.MetricTracker()
accuracy_top5_tracker = utils.MetricTracker()
confidence_tracker = utils.MetricTracker()
# Initialize confusing matrix
confusion_matrix_tracker = utils.ConfusionMatrix(classes)
# create BackgroundGenerator and wrap it in tqdm progress bar
progress_bar = tqdm(BackgroundGenerator(data_loader, max_prefetch=32), total=len(data_loader))
for data in progress_bar:
inputs, targets = data
if use_cuda:
inputs = inputs.cuda()
targets = targets.cuda()
# forward pass
outputs = model(inputs)
loss = criterion(outputs, targets)
confidence, prediction = outputs.topk(dim=1, k=5)
# scheduler.step(loss)
# Track loss, accuracy and confidence
loss_tracker.update(loss.item())
accuracy_top1_tracker.update(
(prediction[:, 0] == targets).sum().item(), targets.numel())
accuracy_top5_tracker.update(
(prediction[:, :5] == targets[:, None]).sum().item(), targets.numel())
confidence_tracker.update(confidence[:, 0].sum().item(), targets.numel())
# Update the confusion matrix
confusion_matrix_tracker.update_confusion_matrix(targets.cpu(), prediction[:, 0].cpu())
# Update the progress_bar information
progress_bar.set_description(f"Epoch {epoch + 1}/{args.epochs} {name_step}")
progress_bar.set_postfix(
loss=f"{loss_tracker.average:05.5f}",
accuracy_top1=f"{100 * accuracy_top1_tracker.average:05.2f}",
accuracy_top5=f"{100 * accuracy_top5_tracker.average:05.2f}",)
# Add the new values to the tensorboard summary writer
tensorboard_writer.add_scalar("loss", loss_tracker.average, num_iteration)
tensorboard_writer.add_scalar("accuracy_top1", accuracy_top1_tracker.average, num_iteration)
tensorboard_writer.add_scalar("accuracy_top5", accuracy_top5_tracker.average, num_iteration)
tensorboard_writer.add_scalar(
"confidence_mean", confidence_tracker.average, num_iteration
)
tensorboard_writer.add_figure("confusion_matrix", confusion_matrix_tracker.plot_confusion_matrix(normalize=True), num_iteration)
tensorboard_writer.flush()
return (
loss_tracker.average,
accuracy_top1_tracker.average,
accuracy_top5_tracker.average,
confidence_tracker.average,
) | 5,331,961 |
async def test_media_player_update(
hass: HomeAssistant,
ufp: MockUFPFixture,
doorbell: Camera,
unadopted_camera: Camera,
):
"""Test media_player entity update."""
await init_entry(hass, ufp, [doorbell, unadopted_camera])
assert_entity_counts(hass, Platform.MEDIA_PLAYER, 1, 1)
new_camera = doorbell.copy()
new_camera.talkback_stream = Mock()
new_camera.talkback_stream.is_running = True
mock_msg = Mock()
mock_msg.changed_data = {}
mock_msg.new_obj = new_camera
ufp.api.bootstrap.cameras = {new_camera.id: new_camera}
ufp.ws_msg(mock_msg)
await hass.async_block_till_done()
state = hass.states.get("media_player.test_camera_speaker")
assert state
assert state.state == STATE_PLAYING | 5,331,962 |
def get_grad_spherical_harmonics(xyz, l, m):
"""Compute the gradient of the Real Spherical Harmonics of the AO.
Args:
xyz : array (Nbatch,Nelec,Nrbf,Ndim) x,y,z, distance component of each
point from each RBF center
l : array(Nrbf) l quantum number
m : array(Nrbf) m quantum number
Returns:
Y array (Nbatch,Nelec,Nrbf,3) : value of each grad SH at each point
"""
Y = torch.zeros_like(xyz)
# l=0
ind = (l == 0).nonzero().view(-1)
Y[:, :, ind, :] = _grad_spherical_harmonics_l0(xyz[:, :, ind, :])
# l=1
indl = (l == 1)
if torch.any(indl):
for mval in [-1, 0, 1]:
indm = (m == mval)
ind = (indl * indm).nonzero().view(-1)
if len(ind > 0):
# _tmp = _grad_spherical_harmonics_l1(xyz[:, :, ind, :], mval)
Y[:, :, ind, :] = _grad_spherical_harmonics_l1(
xyz[:, :, ind, :], mval)
# l=2
indl = (l == 2)
if torch.any(indl):
for mval in [-2, -1, 0, 1, 2]:
indm = (m == mval)
ind = (indl * indm).nonzero().view(-1)
if len(ind > 0):
Y[:, :, ind, :] = _grad_spherical_harmonics_l2(
xyz[:, :, ind, :], mval)
return Y | 5,331,963 |
def send_photo(self, user_ids, filepath, thread_id=None):
"""
:param self: bot
:param filepath: file path to send
:param user_ids: list of user_ids for creating group or
one user_id for send to one person
:param thread_id: thread_id
"""
user_ids = _get_user_ids(self, user_ids)
if not isinstance(user_ids, (list, str)):
self.logger.error("user_ids must be a list or string")
return False
if self.reached_limit("messages"):
self.logger.info("Out of messages for today.")
return False
if not os.path.exists(filepath):
self.logger.error("File %s is not found", filepath)
return False
mime_type = guess_type(filepath)
if mime_type[0] != "image/jpeg":
self.logger.error("Only jpeg files are supported")
return False
self.delay("message")
if not self.api.send_direct_item(
"photo", user_ids, filepath=filepath, thread=thread_id
):
self.logger.info("Message to %s wasn't sent", user_ids)
return False
self.total["messages"] += 1
return True | 5,331,964 |
def references_from_string(string: str) -> List[
Union[InputReference, TaskReference, ItemReference]
]:
"""Generate a reference object from a reference string
Arguments:
string {str} -- A reference string (eg: `{{inputs.example}}`)
Raises:
ValueError: Input string cannot be parsed as a reference object
Returns:
List[Union[InputReference, TaskReference, ItemReference]] -- A list of reference objects
"""
pattern = r"{{\s*([_a-zA-Z0-9.\-\$#\?]*)\s*}}"
match = re.findall(pattern, string, flags=re.MULTILINE)
refs = []
for ref in match:
split_ref = ref.split('.')
ref_type = split_ref[0]
if ref_type == 'input':
assert len(split_ref) == 2, \
f'Input Reference must be in formatted as "input.variable" not {ref}.'
ref = InputReference(variable=split_ref[1])
elif ref_type == 'tasks':
assert len(split_ref) == 3, \
ValueError(
f'Task Reference should be in format "tasks.task-name.variable" but'
f' found: {ref}'
)
ref = TaskReference(
name=split_ref[1], variable=split_ref[2])
elif ref_type == 'item':
variable = '.'.join(split_ref[1:])
ref = ItemReference(variable=variable)
else:
raise ValueError(f'Reference of type {ref_type} not recognized: {ref}')
refs.append(ref)
return refs | 5,331,965 |
def fillTriangle(canvas, ax, ay, bx, by, cx, cy):
"""
helper for gridViewable
based on
http://www-users.mat.uni.torun.pl/~wrona/3d_tutor/tri_fillers.html
assumes a->b->c is ccw, ax<bx, ax<cx
"""
dxb = (by-ay)/(bx-ax)
dxc = (cy-ay)/(cx-ax)
if cx > bx + 1:
secondx = int(bx)
thirdx = int(cx)
dxb2 = (cy-by)/(cx-bx)
dxc2 = dxc
elif bx > cx + 1:
secondx = int(cx)
thirdx = int(bx)
dxb2 = dxb
dxc2 = (by-cy)/(bx-cx)
else:
secondx = int(bx)
thirdx = secondx
dxb2 = 0.
dxc2 = 0.
syb = ay
syc = ay
for sx in range(int(ax), secondx):
syb += dxb
syc += dxc
canvas[sx, int(syb):int(syc)] = True
for sx in range(secondx, thirdx):
syb += dxb2
syc += dxc2
canvas[sx, int(syb):int(syc)] = True | 5,331,966 |
def arrange_images(normalized_posters, blur_factor, blur_radius):
"""
Arranges images to create a collage.
Arguments:
norm_time_posters: tuple(float, PIL.Image)
Normalized instances of time and area for time and posters respectively.
blur_factor:
Number of times to apply a blurring operation to diffuse wasted space.
blur_radius:
Radius of neighbourhood for use as Gaussian blurring parameter.
Returns:
collage: np.array
A collage of images heuristically packed together.
"""
# as a greedy heuristic sort by size first to minimize wasted area
normalized_posters = sorted(
normalized_posters, key=lambda x: x.size[0] * x.size[1], reverse=True
)
sizes = [x.size for x in normalized_posters]
positions = rpack.pack(sizes)
max_width = max(a[0] + b[0] for a, b in zip(positions, sizes))
max_height = max(a[1] + b[1] for a, b in zip(positions, sizes))
collage = np.full([max_height + 1, max_width + 1, 3], 255, dtype=np.uint8)
deadspace = np.full_like(collage, True)
# place images
for (x, y), img in zip(positions, normalized_posters):
dx, dy = img.size
collage[y : y + dy, x : x + dx] = img
deadspace[y : y + dy, x : x + dx] = False
# identify all deadspace which looks harsh on the eyes
deadspace = np.where(deadspace)
# diffuse deadspace to get a softer background
gaussian_blur = ImageFilter.GaussianBlur(radius=blur_radius)
for _ in range(blur_factor):
blurred = Image.fromarray(collage).filter(gaussian_blur)
collage[deadspace] = np.array(blurred)[deadspace]
return collage | 5,331,967 |
def remove_files(vectors):
"""Remove all files derived from the vectors file."""
for fname in glob.glob("%s.*" % vectors):
os.remove(fname) | 5,331,968 |
def get_zomato_data_for_google_restaurants(rest_dict):
""" Finds restaurants from the Google results that did not
intersect with Zomato results and fills in Google data
for them one at a time
"""
for restaurant in rest_dict.keys():
# Search for matching restaurant using coordinates, name, and radius
if 'zomato_rating' not in rest_dict[restaurant].keys():
r = requests.get('https://developers.zomato.com/api/v2.1/search'
+ '?q=' + restaurant
+ '&lat=' + str(rest_dict[restaurant]['latitude'])
+ '&lon=' + str(rest_dict[restaurant]['longitude'])
+ '&radius=500'
+ '&sort=rating',
headers=config.zomato_header)
if r.ok:
if len(r.json()['restaurants']) > 0:
# Sometimes there are more than 1 results and we need to
# find the correct one if it exists. Sort through results
# and see if any match the Google restaurant info
try:
logging.debug(str(len(r.json()['restaurants']))
+ " results(s) found for " + restaurant.encode('utf-8')
+ " in Zomato")
except TypeError:
logging.debug(str(len(r.json()['restaurants']))
+ " results(s) found for " + restaurant
+ " in Zomato")
# Shelving the name match for now
#
# for item in r.json()['restaurants']:
# # Check if name matches or address matches to verify
# # match. There are rare situations where name match
# # can't detect the match so we check address too
# if (restaurant_name_split_match(restaurant,
# item['restaurant']['name'])
# or address_match(rest_dict[restaurant]['address'],
# item['restaurant']['location']['address'])):
# matched_restaurant = item
# break
else:
# If Google restaurant did not match any of results
# in Zomato, remove it
logging.debug("Name in Zomato not matching close "
+ "enough to what was in Google... removing")
rest_dict.pop(restaurant, None)
continue
# Get data now that restaurant match verified
#
# rest_dict[restaurant]['zomato_id'] = (
# matched_restaurant['restaurant']['id'])
# rest_dict[restaurant]['zomato_rating'] = (
# matched_restaurant['restaurant']['user_rating']['aggregate_rating'])
# rest_dict[restaurant]['zomato_review_count'] = (
# matched_restaurant['restaurant']['user_rating']['votes'])
# rest_dict[restaurant]['zomato_price'] = (
# int(matched_restaurant['restaurant']['price_range']) * '$')
# rest_dict[restaurant]['avg_cost_for_2'] = (
# matched_restaurant['restaurant']['average_cost_for_two'])
# rest_dict[restaurant]['cuisines'] = (
# matched_restaurant['restaurant']['cuisines'])
#
# Remove restaurant if rating is low for quality purposes
#
# if float(rest_dict[restaurant]['zomato_rating']) < 2:
# logging.debug(restaurant.encode('utf-8')
# + " rating less than 2 in zomato, removing "
# + "from list")
# rest_dict.pop(restaurant, None)
else:
# No results found so remove that restaurant from the list
logging.info(restaurant.encode('utf-8')
+ " not found in zomato, removing from list")
rest_dict.pop(restaurant, None)
else:
# Request failure
logging.error(restaurant.encode('utf-8')
+ "zomato request was not okay, status "
+ str(r.status_code))
logging.info(restaurant.encode('utf-8')
+ " being removed due to request failure")
rest_dict.pop(restaurant, None) | 5,331,969 |
def make_text(text, position=(0, 0, 0), height=1):
"""
Return a text object at the specified location with a given height
"""
sm = SpriteMaterial(map=TextTexture(string=text, color='white', size=100, squareTexture=False))
return Sprite(material=sm, position = position, scaleToTexture=True, scale=[1, height, 1]) | 5,331,970 |
def fix_valid(log_date: str) -> None:
"""修复设定is_valid值,正常的设定为1
Args:
log_date: 日期
"""
collection = water_heater.get_summary_save()
for item in collection.find({'log_date': log_date}):
id = item['_id']
print(id)
if item['cumulative_heat_time'] < 0 or item['cumulative_heat_water'] < 0 or \
item['cumulative_duration_machine'] < 0 or item['cumulative_use_electricity'] < 0 or \
item['cumulative_electricity_saving'] < 0:
collection.update_one({'_id': id}, {'$set': {'is_valid': 1}})
else:
collection.update_one({'_id': id}, {'$set': {'is_valid': 0}})
print('date {0} is finish'.format(log_date))
return | 5,331,971 |
async def bot_restarter():
""" If bot process is dead, restart """
logger.info('Started bot restarter')
while 1:
await asyncio.sleep(5)
if runner.bot_process is None or runner.bot_process.poll() is not None:
logger.info('Restarting bot because it seems to have ended.')
await runner.start_bot() | 5,331,972 |
def get_features(user_features, documents, ARGS, BOW = False, Conversational = False, User = False, SNAPSHOT_LEN = False, Questions = False, COMMENT_LEN = True):
"""
Generates Features:
Type of Features:
- BOW: bag of words features
- Conversational: features extracted from the conversation
- User: features based on participant information
- SNAPSHOT_LEN: number of comments in the final snapshot
- Questions: question features
- COMMENT_LEN: number of comments added to the conversation
"""
STATUS, ASPECTS, attacker_profile_ASPECTS, LEXICONS, QUESTIONS, UNIGRAMS_LIST, BIGRAMS_LIST = ARGS
feature_sets = []
# BOW features
bow_features = []
for pair in documents:
conversation, clss, conv_id = pair
feature_set = {}
# exclude last action
actions = conversation['action_feature']
end_time = max([a['timestamp_in_sec'] for a in actions])
actions = [a for a in actions if a['timestamp_in_sec'] < end_time]
actions = sorted(actions, \
key=lambda k: (k['timestamp_in_sec'], k['id'].split('.')[1], k['id'].split('.')[2]))[::-1]
comments_actions = [a for a in actions if a['comment_type'] == 'SECTION_CREATION' or a['comment_type'] == 'COMMENT_ADDING']
# update feature set
feature_set.update(_get_term_features(comments_actions, UNIGRAMS_LIST, BIGRAMS_LIST))
bow_features.append((copy.deepcopy(feature_set), clss))
# Conversational featrues
conv_features = []
for pair in documents:
conversation, clss, conv_id = pair
feature_set = {}
# exclude last action
actions = conversation['action_feature']
end_time = max([a['timestamp_in_sec'] for a in actions])
actions = [a for a in actions if a['timestamp_in_sec'] < end_time]
actions = sorted(actions, \
key=lambda k: (k['timestamp_in_sec'], k['id'].split('.')[1], k['id'].split('.')[2]))[::-1]
# only keep comment adding and section creation
comments_actions = [a for a in actions if a['comment_type'] == 'SECTION_CREATION' or a['comment_type'] == 'COMMENT_ADDING']
# conversational features from all actions that adds a comment
feature_set.update(_get_global_action_features(comments_actions))
# conversational features from the last N actions that adds a comment
feature_set.update(_get_last_n_action_features(comments_actions, 1, LEXICONS))
# conversational features from the last action that adds a comment of each participant
feature_set.update(_get_action_features(comments_actions, LEXICONS))
# conversational features based on a single participant's behavior in the conversation
feature_set.update(_get_repeatition_features(comments_actions))
# question features
if Questions:
feature_set.update(_get_question_features(conv_id, QUESTIONS))
actions = actions[::-1]
# conversational features based on reply relations
feature_set.update(_get_balance_features(actions))
# number of comments in last snapshot
if SNAPSHOT_LEN:
feature_set['snapshot_len'] = conversation['snapshot_len']
conv_features.append((copy.deepcopy(feature_set), clss))
# pariticipant features
# extract the last participant's profile
participant_features = []
starter_attack_profiles = {0: [], 1:[]}
non_starter_attack_profiles = {0: [], 1: []}
all_profiles = {0: [], 1: []}
blocks = []
user_info = []
for ind, pair in enumerate(documents):
conversation, clss, conv_id = pair
# is the starter of the conversation also the last participant in the conversation
actions = conversation['action_feature']
start_time = min([a['timestamp_in_sec'] for a in actions])
end_time = max([a['timestamp_in_sec'] for a in actions])
for a in actions:
if a['timestamp_in_sec'] == start_time:
if 'user_text' in a:
starter = a['user_text']
else:
starter = 'anon'
if a['timestamp_in_sec'] == end_time:
if 'user_text' in a:
ender = a['user_text']
else:
ender = 'anon'
feature_set, user_infos = _user_features(actions, user_features[conv_id], ASPECTS, STATUS, QUESTIONS[conv_id])
# last participant's profile
p, b = attacker_profile(conversation, user_infos, attacker_profile_ASPECTS)
user_info.append(user_infos)
if starter == ender:
starter_attack_profiles[clss].append(p)
else:
non_starter_attack_profiles[clss].append(p)
all_profiles[clss].append(p)
# participants' block histories
blocks.append(int(b))
# update participant features
participant_features.append((copy.deepcopy(feature_set), clss))
feature_sets = []
# update the returned feature set given the parameters
for ind, pair in enumerate(documents):
conversation, clss, conv_id = pair
actions = conversation['action_feature']
end_time = max([a['timestamp_in_sec'] for a in actions])
actions = [a for a in actions if a['timestamp_in_sec'] < end_time]
comments_actions = [a for a in actions if a['comment_type'] == 'SECTION_CREATION' or a['comment_type'] == 'COMMENT_ADDING']
feature_set = {}
if COMMENT_LEN:
feature_set = {'no_comments': len(comments_actions)}
if BOW:
feature_set.update(bow_features[ind][0])
if Conversational:
feature_set.update(conv_features[ind][0])
if User:
feature_set.update(participant_features[ind][0])
feature_sets.append((feature_set, clss))
return user_info, starter_attack_profiles, non_starter_attack_profiles, all_profiles, feature_sets | 5,331,973 |
def incomeStat(headers):
"""
收益统计
:param headers:
:return:
"""
time.sleep(0.3)
url = f'https://kd.youth.cn/wap/user/balance?{headers["Referer"].split("?")[1]}'
try:
response = requests_session().get(url=url, headers=headers, timeout=50).json()
print('收益统计')
print(response)
if response['status'] == 0:
return response
else:
return
except:
print(traceback.format_exc())
return | 5,331,974 |
def t_matrix(phi, theta, psi, sequence):
""" Return t_matrix to convert angle rate to angular velocity"""
if sequence == 'ZYX':
t_m = np.array([[1, np.sin(phi)*np.tan(theta), np.cos(phi)*np.tan(theta)],\
[0, np.cos(phi), -np.sin(phi)],\
[0, np.sin(phi)/np.cos(theta), np.cos(phi)/np.cos(theta)]])
else:
t_m = np.eye(3)
return t_m | 5,331,975 |
def point_in_fence(x, y, points):
"""
计算点是否在围栏内
:param x: 经度
:param y: 纬度
:param points: 格式[[lon1,lat1],[lon2,lat2]……]
:return:
"""
count = 0
x1, y1 = points[0]
x1_part = (y1 > y) or ((x1 - x > 0) and (y1 == y)) # x1在哪一部分中
points.append((x1, y1))
for point in points[1:]:
x2, y2 = point
x2_part = (y2 > y) or ((x2 > x) and (y2 == y)) # x2在哪一部分中
if x2_part == x1_part:
x1, y1 = x2, y2
continue
mul = (x1 - x) * (y2 - y) - (x2 - x) * (y1 - y)
if mul > 0: # 叉积大于0 逆时针
count += 1
elif mul < 0:
count -= 1
x1, y1 = x2, y2
x1_part = x2_part
if count == 2 or count == -2:
return True
else:
return False | 5,331,976 |
def test_location_detail(location: Location):
"""test_location_detail.
Args:
location (Location): location
"""
assert (reverse("api:location-detail",
kwargs={"pk": location.id
}) == f"/api/locations/{location.id}")
assert resolve(
f"/api/locations/{location.id}").view_name == "api:location-detail" | 5,331,977 |
def parse_prior(composition, alphabet, weight=None):
"""Parse a description of the expected monomer distribution of a sequence.
Valid compositions:
* None or 'none'
No composition sepecified
* 'auto' or 'automatic'
Use the typical average distribution
for proteins and an equiprobable distribution for
everything else.
* 'equiprobable'
All monomers have the same probability.
* a percentage, e.g. '45%' or a fraction '0.45'
The fraction of CG bases for nucleotide alphabets
* a species name, e.g. 'E. coli', 'H. sapiens',
Use the average CG percentage for the species's genome.
* An explicit distribution
e.g. {'A':10, 'C':40, 'G':40, 'T':10}
"""
if composition is None:
return None
comp = composition.strip()
if comp.lower() == "none":
return None
if weight is None and alphabet is not None:
weight = sqrt(float(len(alphabet)))
if weight < 0:
raise ValueError("Weight cannot be negative.")
if comp.lower() == "equiprobable":
prior = weight * equiprobable_distribution(len(alphabet))
elif comp.lower() == "auto" or comp.lower() == "automatic":
if alphabet == unambiguous_protein_alphabet:
prior = weight * asarray(aa_composition, float64)
else:
prior = weight * equiprobable_distribution(len(alphabet))
elif comp in std_percentCG:
prior = weight * base_distribution(std_percentCG[comp])
elif comp[-1] == "%":
prior = weight * base_distribution(float(comp[:-1]))
elif isfloat(comp):
prior = weight * base_distribution(float(comp) * 100.0)
elif composition[0] == "{" and composition[-1] == "}":
explicit = composition[1:-1]
explicit = (
explicit.replace(",", " ")
.replace("'", " ")
.replace('"', " ")
.replace(":", " ")
.split()
)
if len(explicit) != len(alphabet) * 2:
raise ValueError("Explicit prior does not match length of alphabet")
prior = -ones(len(alphabet), float64)
try:
for r in range(len(explicit) // 2):
letter = explicit[r * 2]
index = alphabet.ord(letter)
value = float(explicit[r * 2 + 1])
prior[index] = value
except ValueError:
raise ValueError("Cannot parse explicit composition")
if any(prior == -1.0):
raise ValueError(
"Explicit prior does not match alphabet"
) # pragma: no cover
prior /= sum(prior)
prior *= weight
else:
raise ValueError("Unknown or malformed composition: %s" % composition)
if len(prior) != len(alphabet):
raise ValueError(
"The sequence alphabet and composition are incompatible."
) # pragma: no cover
return prior | 5,331,978 |
def get_queues(prefix=None):
"""
Gets a list of SQS queues. When a prefix is specified, only queues with names
that start with the prefix are returned.
:param prefix: The prefix used to restrict the list of returned queues.
:return: A list of Queue objects.
"""
if prefix:
queue_iter = sqs.queues.filter(QueueNamePrefix=prefix)
else:
queue_iter = sqs.queues.all()
queues = list(queue_iter)
if queues:
logger.info("Got queues: %s", ', '.join([q.url for q in queues]))
else:
logger.warning("No queues found.")
return queues | 5,331,979 |
def _over_descriptions(
symbol_table: SymbolTable,
) -> Iterator[Tuple[Optional[Symbol], Description]]:
"""
Iterate over all the descriptions in the meta-model.
The symbol indicates the symbol that encompasses the description (*e.g.*
a class if the description is related to a member or a property). This gives,
for example, the context when we have to resolve references in the downstream code.
"""
for symbol in symbol_table.symbols:
for description in _over_descriptions_in_symbol(symbol):
yield symbol, description
for verification in symbol_table.verification_functions:
if verification.description is not None:
yield None, verification.description
if symbol_table.meta_model.description is not None:
yield None, symbol_table.meta_model.description | 5,331,980 |
def _genNodesNormal(numNodes=None, center=None, standardDeviation=None):
"""
Generate randomized node using Normal distribution within a bounding area
Parameters
----------
numNodes: int
Required, number of nodes to be generated
centerLat: float, Required
Latitude of the center point
centerLon: float, Required
Longitude of the center point
standardDeviation: float, Required
StandardDeviation of normal distribution
Returns
-------
list of lists
A list of coordinates uniformly distributed
"""
# Initialize
locs = []
# Randomized generate nodes in normal distribution
for i in range(numNodes):
rndUniform = np.random.uniform(0, 360)
rndNormal = np.random.normal(0, standardDeviation)
newLoc = geoPointInDistance2D(center, rndUniform, rndNormal)
locs.append(newLoc)
return locs | 5,331,981 |
def util_test_normalize(mean, std, op_type):
"""
Utility function for testing Normalize. Input arguments are given by other tests
"""
if op_type == "cpp":
# define map operations
decode_op = c_vision.Decode()
normalize_op = c_vision.Normalize(mean, std)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=normalize_op)
elif op_type == "python":
# define map operations
transforms = [
py_vision.Decode(),
py_vision.ToTensor(),
py_vision.Normalize(mean, std)
]
transform = py_vision.ComposeOp(transforms)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data = data.map(input_columns=["image"], operations=transform())
else:
raise ValueError("Wrong parameter value")
return data | 5,331,982 |
def calc_sub_from_constant(func, in_data, **kwargs):
"""[SubFromConstant](https://docs.chainer.org/en/v4.3.0/reference/generated/chainer.functions.add.html)
See the documentation for [AddConstant](#addconstant)
"""
return _calc(func, in_data, **kwargs) | 5,331,983 |
def mask_distance_matrix(dmat, weight_bins=weight_bins):
"""
Answer: yep, a larger weight is assigned to a pair of residues forming a contact.
I assigned 20.5, 5.4, 1 to the distance 0-8, 8-15, and >15, respectively, for residue pairs (i, j) where |i-j| >=24.
These numbers were derived from simple statistics of an old training set.
However, you don't have to be very accurate here.
When |i-j| is small, you can reduce 20.5 and 5.4 to smaller values.
:param dmat: A distance matrix
:param bins: The quantized distance matrix
:return: The quantized distance matrix
"""
b, m, n = dmat.size()
imj = b * [[[abs(i-j) >= 24 for j in range(n)] for i in range(m)]]
t_imj = torch.tensor(imj, dtype=torch.float, device=device)
masks = quantize_distance_matrix(dmat, weight_bins, False)
return masks, t_imj | 5,331,984 |
def get_output():
"""Return the set output setting."""
return _output | 5,331,985 |
def cash_grouped_nb(target_shape, cash_flow_grouped, group_lens, init_cash_grouped):
"""Get cash series per group."""
check_group_lens(group_lens, target_shape[1])
out = np.empty_like(cash_flow_grouped)
from_col = 0
for group in range(len(group_lens)):
to_col = from_col + group_lens[group]
cash_now = init_cash_grouped[group]
for i in range(cash_flow_grouped.shape[0]):
flow_value = cash_flow_grouped[i, group]
cash_now = add_nb(cash_now, flow_value)
out[i, group] = cash_now
from_col = to_col
return out | 5,331,986 |
def agreement():
"""Input for Accepting license
"""
form = LicenseForm()
if form.validate_on_submit():
gluu_settings.db.set("ACCEPT_GLUU_LICENSE", "Y" if form.accept_gluu_license.data else "N")
return redirect(url_for(wizard_steps.next_step()))
with open("./LICENSE", "r") as f:
agreement_file = f.read()
if request.method == "GET":
# populate form data from settings
form.accept_gluu_license.data = gluu_settings.db.get("ACCEPT_GLUU_LICENSE")
wizard_steps.current_step = 'license'
return render_template("wizard/index.html",
license=agreement_file,
form=form,
current_step=wizard_steps.step_number(),
template="license") | 5,331,987 |
def _patch_output():
"""
Patches 'google.protobuf.text_format.PrintFieldValue' to support redacted
fields.
"""
global _PATCHED
if not _PATCHED:
_PATCHED = True
_PrintFieldValue = google.protobuf.text_format.PrintFieldValue
def PrintFieldValueRedacted(field, value, out, *args, **kwargs):
field_options_desc = field.GetOptions()
if(field_options_desc.HasExtension(ENCRYPTED_FIELD_OPT) and
field_options_desc.Extensions[ENCRYPTED_FIELD_OPT]):
value = "<REDACTED>"
_PrintFieldValue(field, value, out, *args, **kwargs)
google.protobuf.text_format.PrintFieldValue = PrintFieldValueRedacted | 5,331,988 |
def p_stmtdelim(p):
"""stmtdelim : mb_stmtdelim NEWLINE
stmtdelim : mb_stmtdelim SEMI""" | 5,331,989 |
def _BuildTargets(targets, jobs_count):
"""Builds target with Clang coverage instrumentation.
This function requires current working directory to be the root of checkout.
Args:
targets: A list of targets to build with coverage instrumentation.
jobs_count: Number of jobs to run in parallel for compilation. If None, a
default value is derived based on CPUs availability.
"""
logging.info('Building %s.', str(targets))
autoninja = 'autoninja'
if coverage_utils.GetHostPlatform() == 'win':
autoninja += '.bat'
subprocess_cmd = [autoninja, '-C', BUILD_DIR]
if jobs_count is not None:
subprocess_cmd.append('-j' + str(jobs_count))
subprocess_cmd.extend(targets)
subprocess.check_call(subprocess_cmd)
logging.debug('Finished building %s.', str(targets)) | 5,331,990 |
def enlarge_thumbnails(apps, schema_editor):
"""Enlarge existing thumbnails to the new size."""
File = apps.get_model("core", "File")
for file_ in File.objects.all():
image = Image.open(file_.file)
image.thumbnail((400, 400), Image.ANTIALIAS)
thumb = io.BytesIO()
image.save(thumb, format="jpeg", quality=80, optimize=True, progressive=True)
file_.thumbnail = InMemoryUploadedFile(
thumb, None, file_.file.name, 'image/jpeg', thumb.tell(), None
)
file_.save() | 5,331,991 |
def rotmat2quat(mat: torch.Tensor) -> torch.Tensor:
"""Converts rotation matrix to quaternion.
This uses the algorithm found on
https://en.wikipedia.org/wiki/Rotation_matrix#Quaternion
, and follows the code from ceres-solver
https://github.com/ceres-solver/ceres-solver/blob/master/include/ceres/rotation.h
"""
mat_shape = mat.shape
assert mat_shape[-2:] == (3, 3)
mat = torch.reshape(mat, [-1, 3, 3])
# Case A: Easy case
r = torch.sqrt(torch.clamp_min(1. + mat[:, 0, 0] + mat[:, 1, 1] + mat[:, 2, 2], 0.0))
s = 0.5 / r
quat = torch.stack([
0.5 * r,
(mat[:, 2, 1] - mat[:, 1, 2]) * s,
(mat[:, 0, 2] - mat[:, 2, 0]) * s,
(mat[:, 1, 0] - mat[:, 0, 1]) * s
], dim=-1)
near_pi = isclose(r, 0.0)
if torch.sum(near_pi) > 0:
# Case B0, B1, B2: ~180deg rotation
quats1 = mat.new_zeros([mat.shape[0], 3, 4])
case_idx = torch.argmax(torch.diagonal(mat, dim1=-1, dim2=-2), dim=-1)
for case, (i, j, k) in enumerate([[0, 1, 2], [1, 2, 0], [2, 0, 1]]):
r = torch.sqrt(mat[..., i, i] - mat[..., j, j] - mat[..., k, k] + 1.0)
s = 0.5 / r
quats1[:, case, 0] = (mat[:, k, j] - mat[:, j, k]) * s
quats1[:, case, i + 1] = 0.5 * r
quats1[:, case, j + 1] = (mat[:, i, j] + mat[:, j, i]) * s
quats1[:, case, k + 1] = (mat[:, k, i] + mat[:, i, k]) * s
quat1 = quats1[torch.arange(mat.shape[0]), case_idx, :]
quat[near_pi] = quat1[near_pi]
quat = torch.reshape(quat, [*mat_shape[:-2], 4])
return quat | 5,331,992 |
def _release_lock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release() | 5,331,993 |
def equalize(image):
"""
Equalize the image histogram. This function applies a non-linear
mapping to the input image, in order to create a uniform
distribution of grayscale values in the output image.
Args:
image (PIL image): Image to be equalized
Returns:
image (PIL image), Equalized image.
"""
return ImageOps.equalize(image) | 5,331,994 |
def validate_output(value):
"""Validate "output" parameter."""
if value is not None:
if isinstance(value, str):
value = value.split(",")
# filter out empty names
value = list(filter(None, value))
return value | 5,331,995 |
def start_monitoring_hpc(
config_infra,
ssh_config,
monitoring_options,
simulate,
**kwargs): # pylint: disable=W0613
""" Starts monitoring using the HPC Exporter """
if not simulate and "hpc_exporter_address" in ctx.instance.runtime_properties and \
ctx.instance.runtime_properties["hpc_exporter_address"]:
monitoring_id = ctx.instance.runtime_properties["monitoring_id"]
hpc_exporter_address = ctx.instance.runtime_properties["hpc_exporter_address"]
ctx.logger.info('Creating Collector in HPC Exporter...')
if 'ssh_config' in ctx.instance.runtime_properties:
ssh_config = ctx.instance.runtime_properties['ssh_config']
infrastructure_interface = config_infra['infrastructure_interface'].lower()
infrastructure_interface = "pbs" if infrastructure_interface == "torque" else infrastructure_interface
monitor_period = monitoring_options["monitor_period"] if "monitor_period" in monitoring_options else 30
deployment_label = monitoring_options["deployment_label"] if "deployment_label" in monitoring_options \
else ctx.deployment.id
hpc_label = monitoring_options["hpc_label"] if "hpc_label" in monitoring_options else ctx.node.name
only_jobs = monitoring_options["only_jobs"] if "only_jobs" in monitoring_options else False
if (infrastructure_interface != "slurm") and (infrastructure_interface != "pbs"):
ctx.logger.warning("HPC Exporter doesn't support '{0}' interface. Collector will not be created."
.format(infrastructure_interface))
ctx.instance.runtime_properties["hpc_exporter_address"] = None
return
payload = {
"host": ssh_config["host"],
"scheduler": infrastructure_interface,
"scrape_interval": monitor_period,
"deployment_label": deployment_label,
"monitoring_id": monitoring_id,
"hpc_label": hpc_label,
"only_jobs": only_jobs,
"ssh_user": ssh_config["user"]
}
if "password" in ssh_config and ssh_config["password"]:
payload["ssh_password"] = ssh_config["password"]
else:
payload["ssh_pkey"] = ssh_config["private_key"]
url = hpc_exporter_address + '/collector'
ctx.logger.info("Creating collector in " + str(url))
response = requests.request("POST", url, json=payload)
if not response.ok:
ctx.logger.error("Failed to start node monitor: {0}: {1}".format(response.status_code, response.content))
return
ctx.logger.info("Monitor started for HPC: {0} ({1})".format(ssh_config["host"], hpc_label))
elif simulate:
ctx.logger.warning('monitor simulated')
else:
ctx.logger.warning("No HPC Exporter selected for node {0}. Won't create a collector in any HPC Exporter for it."
.format(ctx.node.name)) | 5,331,996 |
def dummy_img(w, h, intensity=200):
"""Creates a demodata test image"""
img = np.zeros((int(h), int(w)), dtype=np.uint8) + intensity
return img | 5,331,997 |
def getHandler(database):
"""
a function instantiating and returning this plugin
"""
return Events(database, 'events', public_endpoint_extensions=['insert']) | 5,331,998 |
def RegQueryValueEx(key, valueName=None):
""" Retrieves the type and data for the specified registry value.
Parameters
key A handle to an open registry key.
The key must have been opened with the KEY_QUERY_VALUE access right
valueName The name of the registry value. it is optional.
Return Value
If the function succeeds, the return a tuple of the value's name and RegistryValue object data.
If the function fails, a RegistryBaseException exception is raised, unless:
If the key is not open, an InvalidHandleException is raised
If access is denied, an AccesDeniedException isRaised
If the value does not exist, the function raises KeyError
"""
try:
(dataType, data, dataLength) = c_api.RegQueryValueExW(key=key, name=valueName)
data = (dtypes.BYTE * dataLength.value)()
(dataType, data, dataLength) = c_api.RegQueryValueExW(key=key, name=valueName,
data=data, dataLength=dataLength)
return RegistryValueFactory().by_type(dataType)(data)
except errors.WindowsError as exception:
errors.catch_and_raise_general_errors(exception)
logging.exception(exception)
raise errors.RegistryBaseException(exception.winerror, exception.strerror) | 5,331,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.