content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def solution(A): # O(N^2)
"""
For a given value A, compute the number with the fewest number of
squared values and return them within an array.
eg. 26 can be computed with squared values [25, 1] or [16, 9, 1], but the
answer is only [25, 1] as we are looking for the fewest number of
squared values
>>> solution(26)
[25, 1]
>>> solution(128)
[64, 64]
>>> solution(33)
[25, 4, 4]
>>> solution(256)
[256]
"""
queue = deque() # O(1)
ready_queue(A, queue, []) # O(N)
return process_queue(queue) # O(N^2) | 27,600 |
def gmrt_guppi_bb(rawfile, npol=2, header=None, chunk=None, samples_per_frame=4096, nchan=1):
"""
To read gmrt raw voltages file of GWB to convert to guppi raw
:USAGE:
--------
$ gmrt_raw_toguppi [-h] [-f FILENAME] [-c CHUNK] [-hdr HEADER] [-hf HEADER_FILE] [-hfo HEADER_FILE_OUTPUT]
To read gmrt raw voltages file of GWB to convert to guppi raw
optional arguments:
-h, --help show this help message and exit
-f FILENAME, --filename FILENAME
Input filename for conversion to guppiraw.
-c CHUNK, --chunk CHUNK
Input chunk size to read the desired chunk of byte.
-hdr HEADER, --header HEADER
Input header to inject to the raw file.
-hf HEADER_FILE, --header-file HEADER_FILE
Input header from path to inject to the raw file.
-hfo HEADER_FILE_OUTPUT, --header-file-output HEADER_FILE_OUTPUT
output header from path to inject to the raw file.
NOTE
-----
imaginary data is not being read as documentation(https://baseband.readthedocs.io/en/stable/api/baseband.guppi.open.html#baseband.guppi.open):
For GUPPI, complex data is only allowed when nchan > 1.
"""
b=time.time()
if path.isfile(rawfile):
rawname=Path(rawfile).stem
if header is None:
header = {#'CHAN_BW':-100,
'TBIN':1, #provide sample rate in astropy.units * Hz
'TELESCOP':'GMRT',
'NPOL':npol,
'NCHAN':nchan,
'OBSERVER':'Vishal Gajjar',
'STT_IMJD':58132,
'STT_SMJD':51093,
'NBITS':8}
print(f'selected parameters: rawfile={rawfile}, npol={npol}, header={header}, chunk={chunk}, samples_per_frame={samples_per_frame}, nchan={nchan}')
print(f'copying file:{rawfile}')
if chunk is None:
npcm_data=np.memmap(rawfile, dtype='<i1', mode='r' )#,shape=(4096,))
else:
npcm_data=np.memmap(rawfile, dtype='<i1', mode='r', shape=(chunk,))
print(f'copied file :{time.time()-b}')
#npcm_data.flush()
#number_of_frames = totalsamples/samples_per_frame
#shape = (samples_per_frame,number_of_frames)
#npcm_data.flush()
real1_d =npcm_data # 0,2,4 indexed
im_d=np.zeros(np.shape(real1_d))
resd=np.array([real1_d,im_d], dtype='<i1').transpose()
guppifile=rawname+''
print(f'writing file stem: {guppifile}')
#fgh = guppi.open(guppifile+'_guppi.{file_nr:04d}.raw', 'ws', frames_per_file=1180013,
fgh = guppi.open(guppifile+'_guppi.0000.raw', 'ws',
samples_per_frame=samples_per_frame, nchan=nchan,
#npol=npol, #sample_rate=2.0E+08*u.Hz,
**header)
print(f'data shape: {np.shape(resd)}')
fgh.write(resd)
# -------------- when you have [p1r1,p1i1,p2r1,p2i1...]
# im_d = npcm_data[1::2] # even indexed
# ## pol1, pol2 = npcm_data[::2], npcm_data[1::2] # if no imaginary is in the bytes
# #pol1, pol2 = real_d[::2], real_d[1::2]
# ## pol1, pol2 = npcm_data[::2][::2], npcm_data[::2][1::2]
# pol1_real = real_d[::2]
# pol2_real = real_d[1::2]
# pol1_im=im_d[1::2]
# pol2_im=im_d[::2] # if you need imaginary and real
# pol1=pol1_real+pol1_im*1j
# pol2=pol2_real+pol2_im*1j
# #resd=np.array([pol1,pol2]).transpose()
# guppifile=rawname+''
# print(f'writing file stem: {guppifile}')
# #fgh = guppi.open(guppifile+'_guppi.{file_nr:04d}.raw', 'ws', frames_per_file=1180013,
# fgh = guppi.open(guppifile+'_guppi.0000.raw', 'ws',
# samples_per_frame=samples_per_frame, nchan=nchan,
# #npol=npol, #sample_rate=2.0E+08*u.Hz,
# **header)
# #fgh.write(resd)
# resd=np.array([[pol1,pol2],[pol1,pol2]] , dtype='complex64').transpose()
# print(f'data shape: {np.shape(resd)}')
# #fgh.write(np.array([npcm_data[::2][::2], npcm_data[::2][1::2]]).transpose())
# fgh.write(resd)
#fgh.write(np.array(npcm_data))
print(f'file writing completed: {time.time()-b}')
fgh.close()
return f'file created: {guppifile}'
else:
return f'file does not exist : {rawfile}' | 27,601 |
def get_device_path():
"""Return device path."""
if is_gce():
return None
devices = get_devices()
device_serial = environment.get_value('ANDROID_SERIAL')
for device in devices:
if device_serial == device.serial:
return device.path
return None | 27,602 |
def pattern_maker(size, dynamic):
"""
Generate a pattern with pixel values drawn from the [0, 1] uniform
distribution
"""
def pattern():
return np.random.rand(size)
def static():
a_pattern = pattern()
def fn():
return a_pattern
return fn
return pattern if dynamic else static() | 27,603 |
def RMSE(a, b):
""" Return Root mean squared error """
return np.sqrt(np.square(np.subtract(a, b)).mean()) | 27,604 |
def alpha_a_b(coord, N, silent=True):
"""Calculate alpha, a, b for a rectangle with coordinates coord and
truncation at N."""
[x0, x1, y0, y1] = coord
a = 0
for zero in zeros[:N]:
a += exp(-zero*y0)/abs(complex(0.5, zero))
b = 0
for zero in zeros[N:]:
b += exp(-zero*y0)/abs(complex(0.5, zero))
def F_north(x):
return abs(F_N(complex(x, y1), N))
def F_south(x):
return abs(F_N(complex(x, y0), N))
def F_east(y):
return abs(F_N(complex(x1, y), N))
def F_west(y):
return abs(F_N(complex(x0, y), N))
# def x_bounds(f_new, x_new, f_old, x_old):
# return x0 <= x_new[0] <= x1
# def y_bounds(f_new, x_new, f_old, x_old):
# return y0 <= x_new[0] <= y1
ns_kwargs = {"bounds":[(x0, x1)]}
ew_kwargs = {"bounds":[(y0, y1)]}
min_north = basinhopping(F_north, 0.5*(x0 + x1), stepsize=0.5*(x1-x0), minimizer_kwargs=ns_kwargs)
min_south = basinhopping(F_south, 0.5*(x0 + x1), stepsize=0.5*(x1-x0), minimizer_kwargs=ns_kwargs)
min_east = basinhopping(F_east, 0.5*(y0 + y1), stepsize=0.5*(y1-y0), minimizer_kwargs=ew_kwargs)
min_west = basinhopping(F_west, 0.5*(y0 + y1), stepsize=0.5*(y1-y0), minimizer_kwargs=ew_kwargs)
# if not silent:
# print('min_north')
# print(min_north)
# print('min_south')
# print(min_south)
# print('min_east')
# print(min_east)
# print('min_west')
# print(min_west)
min_north = min_north.fun
min_south = min_south.fun
min_east = min_east.fun
min_west = min_west.fun
if not silent:
print((min_north, min_south, min_east, min_west))
alpha = min(min_north, min_south, min_east, min_west)
return alpha, a, b | 27,605 |
async def test_hub_support_wireless(opp):
"""Test updating hub devices when hub support wireless interfaces."""
# test that the device list is from wireless data list
hub = await setup_mikrotik_entry(opp)
assert hub.api.support_wireless is True
assert hub.api.devices["00:00:00:00:00:01"]._params == DHCP_DATA[0]
assert hub.api.devices["00:00:00:00:00:01"]._wireless_params == WIRELESS_DATA[0]
# devices not in wireless list will not be added
assert "00:00:00:00:00:02" not in hub.api.devices | 27,606 |
def asdataset(
dataclass: Any,
reference: Optional[DataType] = None,
dataoptions: Any = None,
) -> Any:
"""Create a Dataset object from a dataclass object.
Args:
dataclass: Dataclass object that defines typed Dataset.
reference: DataArray or Dataset object as a reference of shape.
dataoptions: Options for Dataset creation.
Returns:
Dataset object created from the dataclass object.
"""
if dataoptions is None:
try:
dataoptions = dataclass.__dataoptions__
except AttributeError:
dataoptions = DataOptions(xr.Dataset)
model = DataModel.from_dataclass(dataclass)
dataset = dataoptions.factory()
for entry in model.data_vars:
dataset[entry.name] = entry(reference)
for entry in model.coords:
if entry.name in dataset.dims:
dataset.coords[entry.name] = entry(dataset)
for entry in model.coords:
if entry.name not in dataset.dims:
dataset.coords[entry.name] = entry(dataset)
for entry in model.attrs:
dataset.attrs[entry.name] = entry()
return dataset | 27,607 |
def get_edge_size(reader: ChkDirReader, chunks: list[ChunkRange], tilesize: int) -> int:
"""Gets the size of an edge tile from an unknown chunk"""
for chunk in chunks:
data: bytes = deflate_range(reader, chunk.start, chunk.end, True)
if data is None:
continue
try:
decompressed: bytes = lzo.decompress(data, False, MAX_BUFFER_LEN)
pixel_count: float = len(decompressed) / 4 # RGBA per-pixel
edge_length = pixel_count / tilesize # rect edge length
return int(edge_length)
except: # pylint: disable=bare-except
continue
return -1 | 27,608 |
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
time.sleep(2) # This line can prevent deadlock problem in some cases.
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
# encode mask results
if isinstance(result[0], tuple):
result = [(bbox_results, encode_mask_results(mask_results))
for bbox_results, mask_results in result]
results.extend(result)
if rank == 0:
batch_size = len(result)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
gpu_collect = True
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results | 27,609 |
def toRegexp(exp,terminate=False,lower=False):
""" Case sensitive version of the previous one, for backwards compatibility """
return toCl(exp,terminate,wildcards=('*',),lower=lower) | 27,610 |
def writePLYPointsAndPolygons(filename, xyzs, polygons):
"""given a list of polygons and a matching list of colours, write the
coloured polygons to a PLY file"""
dim = xyzs.shape[1]
with open(filename, 'w') as f:
f.write('ply\nformat ascii 1.0\nelement vertex ')
f.write(str(xyzs.shape[0]))
f.write('\nproperty float x\nproperty float y\n')
if dim==3:
f.write('property float z\n')
f.write('element face ')
f.write(str(len(polygons)))
f.write('\nproperty list uchar int vertex_index\nend_header\n')
for xyz,color,idx in zip(xyzs, polygons, xrange(len(polygons))):
#print 'printing %d (%d points) with color %s' % (idx, totals[idx], str(color))
if dim==2:
for xyz in xyzs:
f.write('%f %f\n' % (xyz[0], xyz[1]))
else:
for xyz in xyzs:
f.write('%f %f %f\n' % (xyz[0], xyz[1], xyz[2]))
for pol in polygons:
f.write(str(len(pol)))
for p in pol:
f.write(' '+str(int(p)))
f.write('\n') | 27,611 |
def setup_logging(conf_dict):
"""Setup logging configurations."""
logging.config.dictConfig(config=conf_dict)
log.debug(msg='Setup logging configurations.') | 27,612 |
def marker_used_in_game(marker_id: int) -> bool:
"""
Determine whether the marker ID is used in the game.
:param marker_id: An official marker number, mapped to the competitor range.
:returns: True if the market is used in the game.
"""
return any([marker_id in marker_range for marker_range in MARKER_SIZES]) | 27,613 |
async def data_to_json(data: list):
"""
1. Take input data, of a list of tasks.
2. Write the data into a json file (tasks.json)
Args:
data (list): [list of tasks]
"""
data = json.dumps(data)
with open(filepath, 'w') as file:
file.write(data) | 27,614 |
def iround(x):
"""
Round an array to the nearest integer.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {numpy.ndarray, scalar}
The rounded elements in `x`, with `int` dtype.
"""
return np.round(x).astype(int) | 27,615 |
def multiLineManager(pentadList = [], debugMode = False):
""" Takes the complete list of pentads and returns this list once every multilines
being put on a single line. That's ALL.
"""
output = []
currentStatement = ""
presentChar = 'a'
startingLine = 0
i = 0
state = "Nothing"
for i in range (len(pentadList)):
########### Variables #############
for j in range (len(pentadList[i].text)):
presentChar = str(pentadList[i].text[j])
if(debugMode) : print("MultiLM printing --> ", "j = ", j, " len = ", len(pentadList[i].text), "presentChar = ", presentChar)
##########################################
if(currentStatement == ""):
if(debugMode) : print("MultiLM printing --> ", "No char yet in the buffer")
startingLine = i
if(j == len(pentadList[i].text)-1):
if(debugMode) : print("MultiLM printing --> ", "\'\\n\' detected.")
state = "Store and Restart"
if(presentChar != '\n' and presentChar != '\\' ):
currentStatement += presentChar
if(debugMode) : print("MultiLM printing --> ", "char \'", presentChar, "\' added to", currentStatement)
if(presentChar == '\\'):
if(currentStatement != ""):
currentStatement += " "
if(debugMode) : print("MultiLM printing --> ", "char ", presentChar, "\' detected and replaced by space.") #to avoid the case "unsigned\int" becoming "unsignedint" for eg.
if(state == "Store and Restart"):
state = "Nothing"
newPentad = pentadStruct([pentadList[startingLine].lines[0], pentadList[i].lines[1]], currentStatement)
for roleOfPreviousLine in pentadList[i].roles:
newPentad.addRole(roleOfPreviousLine.type, roleOfPreviousLine.mainVar, roleOfPreviousLine.otherVars)
output.append(newPentad)
currentStatement = ""
if(currentStatement != ""):
output.append(pentadStruct([startingLine, i], currentStatement))
return spaceNormalizer(output, debugMode) | 27,616 |
def get_ec2_conn():
"""
Requried: env.aws_region, env.aws_access_key, env.aws_secret_access_key
return conneciton to aws ec2
"""
conn = boto.ec2.connect_to_region(
env.aws_region,
aws_access_key_id=env.aws_access_key,
aws_secret_access_key=env.aws_secret_access_key
)
if conn is None:
print(red("Can't connect to ec2 region"))
return conn | 27,617 |
def eval_nominal_domain(pool: SamplerPool, env: SimEnv, policy: Policy, init_states: list) -> list:
"""
Evaluate a policy using the nominal (set in the given environment) domain parameters.
:param pool: parallel sampler
:param env: environment to evaluate in
:param policy: policy to evaluate
:param init_states: initial states of the environment which will be fixed if not set to None
:return: list of rollouts
"""
# Strip all domain randomization wrappers from the environment
env = remove_all_dr_wrappers(env, verbose=True)
pool.invoke_all(_setup_env_policy, env, policy)
# Run with progress bar
with tqdm(leave=False, file=sys.stdout, unit='rollouts', desc='Sampling') as pb:
return pool.run_map(_run_rollout_nom, init_states, pb) | 27,618 |
def create_final_comment_objects():
"""Goes through the final comments and returns an array
of objects."""
arr = [] # Stores objects
for line in final_file:
row = line.split(",")
# Set object variables for each object before adding it to the array
comment_number, comment_author, account_karma, comment_score, \
comment_num_replies, comment_permalink, comment_id, \
comment_length = [i.strip('\n') for i in row]
# Add the comment object to the array
arr.append(Final_Comment(comment_number, comment_author, account_karma, \
comment_score, comment_num_replies, \
comment_permalink, comment_id, comment_length))
return arr | 27,619 |
def cmd_yandex_maps(term):
"""Yandex Maps Search."""
redirect('http://maps.yandex.ru/?text=%s' % term) | 27,620 |
def normalise_target_name(name, used=[], max_length=None):
"""
Check that name[:max_length] is not in used and
append a integer suffix if it is.
"""
def generate_name(name, i, ml):
# Create suffix string
i_name = '' if i == 0 else '_' + str(i)
# Return concatenated string if ml is not set
if ml is None:
ml = len(name) + len(i_name)
t_name = name
else:
# Work out amount of name to drop
length = len(name) + len(i_name) - ml
t_name = name if length <= 0 else name[:-length]
# If the length of i_name is greater than ml
# just warn and revert to straight append
if len(i_name) >= ml:
log.warn('Too many repetitions of name %s.', name)
t_name = name
o_name = ''.join(filter(None, [t_name, i_name]))
return '{:{ml}.{ml}}'.format(o_name, ml=ml)
name = re.sub(r'[^-A-Za-z0-9_]', '_', name)
i = 0
test_name = generate_name(name, i, max_length)
while test_name in used:
i += 1
test_name = generate_name(name, i, max_length)
return test_name | 27,621 |
def construct_pairwise_df(sr: pd.Series, np_fun):
"""Constructs an upper diagonal df from all pairwise comparisons of a sr"""
sr = sr.sort_index()
_mat = np.triu(np_fun(sr.to_numpy() - sr.to_numpy()[:, None]), k=1)
_mat[np.tril_indices(_mat.shape[0])] = None
return pd.DataFrame(_mat, index=sr.index.get_level_values('qid'),
columns=sr.index.get_level_values('qid')).rename_axis(index='qid_1', columns='qid_2') | 27,622 |
def read_all_csv_subscenarios_from_dir_and_insert_into_db(
conn,
quiet,
subscenario,
table,
inputs_dir,
use_project_method,
project_is_tx,
cols_to_exclude_str,
custom_method,
):
"""
:param conn: database connection object
:param quiet: boolean
:param subscenario: string
:param table: string
:param inputs_dir: string
:param use_project_method: boolean
:param project_is_tx: boolean
:param cols_to_exclude_str: string
:param custom_method: string
Read data from all subscenario CSVs in a directory and insert them into
the database.
"""
# List all files in directory and look for CSVs
csv_files = [f for f in os.listdir(inputs_dir) if f.endswith(".csv")]
# Check that the subscenario IDs based on the file names are unique
check_ids_are_unique(
inputs_dir=inputs_dir,
csv_files=csv_files,
use_project_method=use_project_method,
)
# If the subscenario is included, make a list of tuples for the subscenario
# and inputs, and insert into the database via the relevant method
for csv_file in csv_files:
if not quiet:
print("...importing CSV {}".format(csv_file))
get_subscenario_data_and_insert_into_db(
conn=conn,
quiet=quiet,
subscenario=subscenario,
table=table,
dir_subsc=False,
inputs_dir=inputs_dir,
csv_file=csv_file,
use_project_method=use_project_method,
project_is_tx=project_is_tx,
skip_subscenario_info=False,
skip_subscenario_data=False,
cols_to_exclude_str=cols_to_exclude_str,
custom_method=custom_method,
) | 27,623 |
def privacy(request):
"""This returns the privacy policy page"""
return render(request=request, template_name="registration/privacy.html") | 27,624 |
def seq_search(items, key):
"""顺序查找"""
for index, item in enumerate(items):
if item == key:
return index
return -1 | 27,625 |
def _check_moog_files (fp,mode='r',clobber=True,max_filename_length=None):
""" Takes a moog keyword and extracts from the moogpars """
# - - - - - - - - - - - - filepath
if fp is None:
return
# - - - - - - - - - - - - check file mode
if mode not in ('r','w'):
raise ValueError("mode must be 'r' or 'w'")
# - - - - - - - - - - - - check the maximum filelength
if max_filename_length is None:
max_filename_length = opts['moog.moog_max_pathlength']
if len(fp) > max_filename_length:
warn("Filepath '{}' is too long for MOOG (max {}) omitting".format(fp,max_filename_length))
return
# - - - - - - - - - - - - check file
exists = os.path.isfile(fp)
if not exists and mode == 'r':
raise IOError("File does not exist '{}'".format(fp))
elif exists and mode == 'w' and not clobber:
raise IOError("File exist, not clobbering '{}'".format(fp))
return fp | 27,626 |
def checkStatus(testNode, testNodeArgs):
"""Test --terminate-at-block stops at the correct block."""
Print(" ".join([
"The test node has begun receiving from the producing node and",
"is expected to stop at or little bigger than the block number",
"specified here: ",
testNodeArgs
]))
# Read block information from the test node as it runs.
head, lib = getBlockNumInfo(testNode)
Print("Test node head = {}, lib = {}.".format(head, lib))
if "irreversible" in testNodeArgs:
checkIrreversible(head, lib)
else:
checkHeadOrSpeculative(head, lib) | 27,627 |
def parse_prediction_key(key):
"""The "name" or "key" of a predictor is assumed to be like:
`ProHotspotCtsProvider(Weight=Classic(sb=400, tb=8), DistanceUnit=150)`
Parse this into a :class:`PredictionKey` instance, where
- `name` == "ProHotspotCtsProvider"
- `details` will be the dict: {"Weight" : "Classic(sb=400, tb=8)",
"DistanceUnit" : 150}
(Attempts to parse to ints or floats if possible).
"""
if "(" not in key:
return PredictionKey(key, {})
i = key.index("(")
name = key[:i].strip()
dets = key[i+1:-1]
dets = [x.strip() for x in _split_by_comma_not_in_brackets(dets)]
details = {}
for x in dets:
if "=" not in x:
key, value = x, None
else:
i = x.index("=")
key = x[:i].strip()
value = x[i+1:].strip()
try:
value = int(value)
except ValueError:
pass
if isinstance(value, str):
try:
value = float(value)
except ValueError:
pass
details[key] = value
return PredictionKey(name, details) | 27,628 |
def tick2dayfrac(tick, nbTicks):
"""Conversion tick -> day fraction."""
return tick / nbTicks | 27,629 |
def load_esol_semi_supervised(unlabeled_size=0.1, seed=2666):
"""
Parameters
----------
unlabeled_size :
(Default value = 0.1)
seed :
(Default value = 2666)
Returns
-------
"""
esol_labeled = pinot.data.esol() # Get labeled and unlabeled data
esol_unlabeled = utils.load_unlabeled_data(
os.path.dirname(utils.__file__) + "/esol_synthetic_smiles.txt",
unlabeled_size,
seed=seed,
)()
np.random.seed(seed)
esol_labeled.extend(esol_unlabeled)
np.random.shuffle(
esol_labeled
) # Combine and mix labeled and unlabeled data
return esol_labeled | 27,630 |
def get_business_day_of_month(year, month, count):
"""
For a given month get the Nth business day by count.
Count can also be negative, e.g. pass in -1 for "last"
"""
r = rrule(MONTHLY, byweekday=(MO, TU, WE, TH, FR),
dtstart=datetime.datetime(year, month, 1),
bysetpos=count)
res = r[0]
if (res == None or res.month != month or res.year != year):
raise ValueError("No dates found in range. is there a flaw in your logic?")
return res.date() | 27,631 |
def inpolygon(wkt, longitude, latitude):
""" To determine whether the longitude and latitude coordinate is within the orbit
:param wkt(str): the orbit wkt info
:param longitude: to determine whether the longitude within the orbit
:param latitude: to determine whether the latitude within the orbit
:return: logical value whether the coordinate within the orbit and multipolygon
"""
multipolygon = shapely.wkt.loads(wkt)
point = shapely.geometry.Point(longitude, latitude)
return multipolygon.contains(point), multipolygon | 27,632 |
def matrixMultVec(matrix, vector):
"""
Multiplies a matrix with a vector and returns the result as a new vector.
:param matrix: Matrix
:param vector: vector
:return: vector
"""
new_vector = []
x = 0
for row in matrix:
for index, number in enumerate(row):
x += number * vector[index]
new_vector.append(x)
x = 0
return new_vector | 27,633 |
def get_dev_value(weight, error):
"""
:param weight: shape [N, 1], the importance weight for N source samples in the validation set
:param error: shape [N, 1], the error value for each source sample in the validation set
(typically 0 for correct classification and 1 for wrong classification)
"""
N, d = weight.shape
_N, _d = error.shape
assert N == _N and d == _d, 'dimension mismatch!'
weighted_error = weight * error
cov = np.cov(np.concatenate((weighted_error, weight), axis=1), rowvar=False)[0][1]
var_w = np.var(weight, ddof=1)
eta = - cov / var_w
return np.mean(weighted_error) + eta * np.mean(weight) - eta | 27,634 |
def biLSTM(f_lstm, b_lstm, inputs, dropout_x=0.):
"""Feature extraction through BiLSTM
Parameters
----------
f_lstm : VariationalDropoutCell
Forward cell
b_lstm : VariationalDropoutCell
Backward cell
inputs : NDArray
seq_len x batch_size
dropout_x : float
Variational dropout on inputs
Returns
-------
outputs : NDArray
Outputs of BiLSTM layers, seq_len x 2 hidden_dims x batch_size
"""
for f, b in zip(f_lstm, b_lstm):
inputs = nd.Dropout(inputs, dropout_x, axes=[0]) # important for variational dropout
fo, _ = f.unroll(length=inputs.shape[0], inputs=inputs, layout='TNC', merge_outputs=True)
bo, _ = b.unroll(length=inputs.shape[0], inputs=inputs.flip(axis=0), layout='TNC',
merge_outputs=True)
f.reset()
b.reset()
inputs = nd.concat(fo, bo.flip(axis=0), dim=2)
return inputs | 27,635 |
def refer_expression(captions, n_ground=1, prefix="refer expressions:", sort=True):
"""
n_ground > 1
ground_indices
[1, 0, 2]
source_text
refer expressions: <extra_id_0> red crayon <extra_id_1> Yellow banana <extra_id_2> black cow
target_text
<vis_extra_id_1> <vis_extra_id_0> <vis_extra_id_2>
n_ground == 1
source_text
refer expressions: red crayon
target_text
<vis_extra_id_1>
"""
n_boxes = len(captions)
if sort:
ground_indices = torch.randperm(n_boxes)[:n_ground].sort().values
else:
ground_indices = torch.randperm(n_boxes)[:n_ground]
ground_indices = ground_indices.tolist()
source_text = [prefix]
target_text = []
if n_ground == 1:
idx = ground_indices[0]
source_text.append(f'{captions[idx]}')
target_text.append(f'<vis_extra_id_{idx}>')
else:
for j, idx in enumerate(ground_indices):
source_text.append(f'<extra_id_{j}>')
source_text.append(f'{captions[idx]}')
target_text.append(f'<vis_extra_id_{idx}>')
# target_text.append('</s>')
source_text = " ".join(source_text)
target_text = " ".join(target_text)
# return ground_indices, source_text, target_text
return source_text, target_text | 27,636 |
def url_to_filename(base, url):
"""Return the filename to which the page is frozen.
base -- path to the file
url -- web app endpoint of the page
"""
if url.endswith('/'):
url = url + 'index.html'
return base / url.lstrip('/') | 27,637 |
def _msd_anom_3d(time, D_alpha, alpha):
"""3d anomalous diffusion function."""
return 6.0*D_alpha*time**alpha | 27,638 |
def graticule(dpar=None, dmer=None, coord=None, local=None, **kwds):
"""Draw a graticule on the current Axes.
Parameters
----------
dpar, dmer : float, scalars
Interval in degrees between meridians and between parallels
coord : {'E', 'G', 'C'}
The coordinate system of the graticule (make rotation if needed,
using coordinate system of the map if it is defined).
local : bool
If True, draw a local graticule (no rotation is performed, useful for
a gnomonic view, for example)
Notes
-----
Other keyword parameters will be transmitted to the projplot function.
See Also
--------
delgraticules
"""
import pylab
f = pylab.gcf()
wasinteractive = pylab.isinteractive()
pylab.ioff()
try:
if len(f.get_axes()) == 0:
ax = PA.HpxMollweideAxes(f, (0.02, 0.05, 0.96, 0.9), coord=coord)
f.add_axes(ax)
ax.text(
0.86,
0.05,
ax.proj.coordsysstr,
fontsize=14,
fontweight="bold",
transform=ax.transAxes,
)
for ax in f.get_axes():
if isinstance(ax, PA.SphericalProjAxes):
ax.graticule(dpar=dpar, dmer=dmer, coord=coord, local=local, **kwds)
finally:
pylab.draw()
if wasinteractive:
pylab.ion() | 27,639 |
def upload_files(container):
"""
Upload html and json files to container
:param container:
:return:
"""
print('Enter upload_files()')
# Create container
cmd = 'az storage container create -n {} --account-name clitestresultstac --account-key {}'
os.popen(cmd.format(container, ACCOUNT_KEY))
# Upload files
for root, dirs, files in os.walk(ARTIFACT_DIR):
for name in files:
if name.endswith('html') or name.endswith('json'):
fullpath = os.path.join(root, name)
cmd = 'az storage blob upload -f {} -c {} -n {} --account-name clitestresultstac'
cmd = cmd.format(fullpath, container, name)
print('Running: ' + cmd)
os.popen(cmd)
print('Exit upload_files()') | 27,640 |
def make_json_response(status_code, json_object, extra_headers=None):
"""
Helper function to serialize a JSON object and add the JSON content type header.
"""
headers = {
"Content-Type": 'application/json'
}
if extra_headers is not None:
headers.update(extra_headers)
return status_code, json.dumps(json_object), headers | 27,641 |
def tmp_envfile(tmp_path, monkeypatch):
"""Create a temporary environment file."""
tmp_file_path = tmp_path / "setenv.txt"
monkeypatch.setenv("GITHUB_ENV", os.fspath(tmp_file_path))
return tmp_file_path | 27,642 |
def debug():
"""
Runs a tmux session with all services in different panes, enabling you to quickly debug,
examine and restart parts of your app quickly.
"""
_purge_previous_dirs()
commands = ["PYTHONPATH={0} {1} {0}/deployment/libexec/run.py -d".format(LOCAL_PROJECT_ROOT, sys.executable)]
if config.mongodb.enabled:
commands.append("mongod --auth --dbpath {}".format(_TESTING_MONGO_DB_PATH))
if config.redis.enabled:
commands.append("redis-server")
if config.rabbitmq.enabled:
commands.append("rabbitmq-server")
if config.celery.enabled:
celeryd_path = _find_executable("celeryd")
commands.append("cd {0}/www && PYTHONPATH={0} {1} -l DEBUG -B --config=config.celeryconfig".format(LOCAL_PROJECT_ROOT, celeryd_path))
commands.append("{} {}/mailboxer_smtpd.py -vv -p 2525".format(sys.executable, LOCAL_PROJECT_ROOT))
run_tmux_session("{}-test".format(config.app.name), commands) | 27,643 |
def calc_dst_temerin_li(time, btot, bx, by, bz, speed, speedx, density, version='2002n', linear_t_correction=False):
"""Calculates Dst from solar wind input according to Temerin and Li 2002 method.
Credits to Xinlin Li LASP Colorado and Mike Temerin.
Calls _jit_calc_dst_temerin_li. All constants are defined in there.
Note: vx has to be used with a positive sign throughout the calculation.
Parameters
==========
time : np.array
Array containing time variables.
btot : np.array
Array containing Btot.
bx : np.array
Array containing Bx in coordinate system ?.
by : np.array
Array containing By in coordinate system ?.
bz : np.array
Array containing Bz in coordinate system ?.
speed : np.array
Array containing solar wind speed.
speedx : np.array
Array containing solar wind speed in x-direction.
density : np.array
Array containing solar wind density.
version : str (default='2002')
String determining which model version should be used.
Returns
=======
dst_burton : np.array
Array with calculated Dst values over timesteps time.
"""
# Arrays
dst1=np.zeros(len(bz))
dst2=np.zeros(len(bz))
dst3=np.zeros(len(bz))
dst_tl=np.zeros(len(bz))
# Define initial values (needed for convergence, see Temerin and Li 2002 note)
dst1[0:10]=-15
dst2[0:10]=-13
dst3[0:10]=-2
if version == '2002':
newparams = False
else:
newparams = True
if version in ['2002', '2002n']:
# julian_days = [sunpy.time.julian_day(num2date(x)) for x in time]
julian_days = [astropy.time.Time(num2date(x), format='datetime', scale='utc').jd for x in time]
return _jit_calc_dst_temerin_li_2002(time, btot, bx, by, bz, speed, speedx, density, dst1, dst2, dst3, dst_tl, julian_days, newparams=newparams)
elif version == '2006':
dst1[0:10], dst2[0:10], dst3[0:10] = -10, -5, -10
ds1995 = time - date2num(datetime(1995,1,1))
ds2000 = time - date2num(datetime(2000,1,1))
# YEARLY DRIFT CORRECTION TERM (NOT IN PAPER)
if linear_t_correction:
drift_corr = -0.014435865642103548 * ds2000 + 9.57670996872173
else:
drift_corr = 0.
return _jit_calc_dst_temerin_li_2006(ds1995, ds2000, btot, bx, by, bz, speed, speedx, density, dst1, dst2, dst3) + drift_corr | 27,644 |
def parsestrfile(str_inpath):
"""Returns dictionary containing :class:`~gemmi.Structure` objects and another one with the file names.
:param str_inpath: Either a directory or file path.
:type str_inpath: str
:raises KeyError: More than one structure file containing same identifier.
:return strdict: A dictionary containing imported :class:`~gemmi.Structure` objects.
:rtype strdict: dict [str, :class:`~gemmi.Structure`]
:return filedict: A dictionary containing file names.
:rtype filedict: dict [str, str]
"""
strdict={}
filedict={}
if os.path.isfile(str_inpath):
structure=gemmi.read_structure(str_inpath)
pdbid=structure.name.lower()
strdict[pdbid]=structure
filedict[pdbid]=os.path.basename(str_inpath)
elif os.path.isdir(str_inpath):
filelist=os.listdir(str_inpath)
for file in filelist:
if os.isfile(file):
try:
structure=gemmi.read_structure(file)
pdbid=structure.name.lower()
if pdbid in strdict:
raise KeyError('Structure '+pdbid+' loaded more than once. Check files in directory and remove duplicates.')
strdict[pdbid]=structure
filedict[pdbid]=os.path.basename(str_inpath)
except:
pass
return strdict, filedict | 27,645 |
def evaluate_regression_model(
model: object,
test_sets: list,
show_plot: bool = True,
use_rasterization: bool = False,
) -> None:
"""
Evaluates a trained regression model on test data sets.
Parameters
----------
model : object
Trained model (must have `predict()` method).
test_sets : list
List of test data sets, where each entry is a tuple:
X_eval : np.ndarray
Prediction data.
y_true : np.ndarray
True labels for `X_eval`.
test_set_name : str
Name of test set.
xlabel : str
Label for x-axis of pred/true plot.
ylabel : str
Label for y-axis of pred/true plot.
show_plot : bool, optional
Whether or not to call `plt.show()` to show the plot (defaults to True)
use_rasterization : bool, optional
Whether or not to enable rasterization for scatter plots of many data points
(defaults to False).
"""
num_test_sets = len(test_sets)
_, axes = plt.subplots(nrows=1, ncols=num_test_sets, figsize=(4 * num_test_sets, 5))
test_set_chars = ascii_lowercase[:num_test_sets]
for test_set, test_set_char, ax in zip(test_sets, test_set_chars, axes):
X_eval, y_true, test_set_name, xlabel, ylabel = test_set
pred_labels = model.predict(X_eval)
pred_true_corr, _ = pearsonr(pred_labels, y_true)
ax_scatter_handle = ax.scatter(pred_labels, y_true, s=15)
if use_rasterization and len(pred_labels) > 1000:
ax_scatter_handle.set_rasterized(True)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(
f"({test_set_char}) Correlation: {pred_true_corr:.3f} ({test_set_name})"
)
if show_plot:
plt.show() | 27,646 |
def handle_import(labfile, labjs):
"""랩 파일이 참고하는 외부 랩 파일 가져오기.
Args:
labfile (Str): 랩파일 경로
labjs (dict): 랩 데이터
"""
if 'import' not in labjs:
return labjs
if '_imported_' not in labjs:
labjs['_imported_'] = []
adir = os.path.dirname(labfile)
for imp in labjs['import']:
path = os.path.join(adir, f'{imp}.lab.json')
if not os.path.isfile(path):
raise FileNotFoundError(path)
with open(path, 'rt', encoding='utf8') as f:
body = f.read()
data = json.loads(body)
if 'import' in data:
handle_import(labfile, data)
labjs['_imported_'].append(AttrDict(data)) | 27,647 |
def reconstruct(vars_to_reconstruct, scheme, order_used):
"""
Reconstructs all variables using the requested scheme.
:param vars_to_reconstruct: The variables at the cell centers.
:type vars_to_reconstruct: list of list of double
:param Reconstruction.Scheme scheme: The reconstruction scheme to use.
:param order_used: Filled by the function and is used to return
the order of the reconstruction used.
:type order_used: list of int
:return: (`list of list of double`) The face reconstructed variables.
Each variable is of length `2 * number_of_cells`
"""
reconstructed_vars = [None] * len(vars_to_reconstruct)
for i in range(len(vars_to_reconstruct)):
extents = np.asarray([len(vars_to_reconstruct[i])])
reconstructed_vars[i] = _recons_dispatch[scheme](
vars_to_reconstruct[i], np.asarray(extents), 1, scheme, order_used)
return np.asarray(reconstructed_vars) | 27,648 |
def dataframe2naf(
df_meta: pd.DataFrame,
overwrite_existing_naf: bool=False,
rerun_files_with_naf_errors: bool=False,
engine: str=None,
naf_version: str=None,
dtd_validation: bool=False,
params: dict={},
nlp=None,
) -> pd.DataFrame:
"""Batch processor for NAF
Args:
df_meta: the dataframe containing the meta data for the NAF files.
overwrite_existing_naf: if True then existing NAF files are overwritten (default = False)
rerun_files_with_naf_errors: if True then documents that produced NAF errors are run again (default = False)
engine: name of the NLP processor to be used (default = None)
naf_version: NAF version to be used
dtd_validation: perform validation of each NAF file (default = False)
params: additional parameters for NAF conversion
Returns:
pd.DataFrame: the dataframe with (updated) metadata
"""
if "naf:status" not in df_meta.columns:
df_meta["naf:status"] = ""
for row in df_meta.index:
if "dc:language" in df_meta.columns:
dc_language = df_meta.loc[row, "dc:language"].lower()
else:
dc_language = None
df_meta.loc[
row, "naf:status"] = "ERROR, no dc:language in DataFrame"
if "dc:source" in df_meta.columns:
dc_source = df_meta.loc[row, "dc:source"]
else:
dc_source = None
df_meta.loc[row, "naf:status"] = "ERROR, no dc:source in DataFrame"
if "naf:source" in df_meta.columns and not pd.isna(df_meta.loc[row, "naf:source"]):
output = df_meta.loc[row, "naf:source"]
else:
if dc_source is not None:
output = os.path.splitext(dc_source)[0] + ".naf.xml"
if dc_source and dc_language and output:
# logging per processed file
log_file: str = os.path.splitext(dc_source)[0] + ".log"
logging.basicConfig(filename=log_file,
level=logging.WARNING, filemode="w")
if os.path.exists(output) and not overwrite_existing_naf:
# the NAF file exists and we should not overwrite existing naf
# files -> skip
df_meta.loc[row, "naf:status"] = "OK"
df_meta.loc[row, "naf:source"] = output
continue
elif (
"error" in df_meta.loc[row, "naf:status"].lower()
and not rerun_files_with_naf_errors
):
# the status is ERROR and we should not rerun files with errors
# -> skip
continue
else:
# put columns in params
params = {
col: df_meta.loc[row, col]
for col in df_meta.columns
if col not in ["naf:source", "naf:status"]
}
try:
doc = parse2naf.generate_naf(
input=dc_source,
engine=engine,
language=dc_language,
naf_version=naf_version,
dtd_validation=dtd_validation,
params=params,
nlp=nlp,
)
if not os.path.exists(output):
doc.write(output)
else:
if overwrite_existing_naf:
doc.write(output)
df_meta.loc[row, "naf:status"] = "OK"
df_meta.loc[row, "naf:source"] = output
except:
df_meta.loc[row, "naf:status"] = "ERROR, generate_naf"
return df_meta | 27,649 |
def fs_inplace_rename_regex(src: str, pattern: str, replace: str, only_basename: bool = True, dry_run: bool = False):
"""DEPRECATED NO MORE DEVELOPMENT"""
if only_basename:
parent, basename = os.path.split(src)
dst = os.path.join(parent, re.sub(pattern, replace, basename))
else:
dst = re.sub(pattern, replace, src)
if src != dst:
print('* {} ->\n {}'.format(src, dst))
if not dry_run:
shutil.move(src, dst) | 27,650 |
def dev_view(request, slug=""):
"""View for homepage or individual developer."""
if slug == "":
dev_name = list(Dev.objects.all().values_list('dev_name', flat=True))
dev_img_address = list(Dev.objects.values_list('dev_image_address', flat=True))
dev_slug = list(Dev.objects.values_list('dev_slug', flat=True))
dev_order = list(Dev.objects.values_list('dev_order_pop', flat=True))
if len(list(TaskResult.objects.filter(task_name='homepage.tasks.google_fetch_query').values())) == 1:
g_query_datetime_init = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_query'
).values_list()[0])
else:
task_id_query = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_query'
).values())[1]['task_id']
g_query_datetime_init = list(TaskResult.objects.filter(
task_id=task_id_query
).values_list()[0])
g_query_datetime = g_query_datetime_init[11]
if len(list(TaskResult.objects.filter(task_name='homepage.tasks.google_fetch_dev').values())) == 1:
g_dev_datetime_init = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values_list()[0])
else:
task_id_dev = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values())[1]['task_id']
g_dev_datetime_init = list(TaskResult.objects.filter(
task_id=task_id_dev
).values_list()[0])
g_dev_datetime = g_dev_datetime_init[11]
if g_dev_datetime > g_query_datetime:
g_datetime = g_dev_datetime
elif g_dev_datetime < g_query_datetime:
g_datetime = g_query_datetime
if len(list(TaskResult.objects.filter(task_name='homepage.tasks.google_fetch_query').values())) == 1:
g_query = json.loads(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_query'
).values()[0]['result'])
else:
task_id_query = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_query'
).values())[1]['task_id']
g_query = json.loads(TaskResult.objects.filter(
task_id=task_id_query
).values()[0]['result'])
if len(list(TaskResult.objects.filter(task_name='homepage.tasks.google_fetch_dev').values())) == 1:
g_dev = json.loads(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values()[0]['result'])
else:
task_id_dev = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values())[1]['task_id']
g_dev = json.loads(TaskResult.objects.filter(
task_id=task_id_dev
).values()[0]['result'])
# 2-day date filter for homepage 'Latest News'
def date_criteria(g_inp):
dates = [re.search('[0-9]{4}-[0-9]{2}-[0-9]{2}', g_inp[i][8]).group(0) for i in range(len(g_inp))]
dates_datetime = [datetime.strptime(i, '%Y-%m-%d') for i in dates]
today = datetime.today()
time_criteria = datetime(year=today.year, month=today.month, day=today.day - 2)
return [g_inp[i] for i in range(len(g_inp)) if dates_datetime[i] >= time_criteria]
entries_for_carousel_init = [date_criteria(g_dev) + date_criteria(g_query)][0]
entries_for_carousel = [i for i in entries_for_carousel_init if i[9] != 'none']
entries_for_latest_news_init = entries_for_carousel
entries_for_latest_news_init = sorted(entries_for_latest_news_init, key=lambda sort: sort[8], reverse=True)
link_latest_news = [i[1] for i in entries_for_latest_news_init]
link_count = [link_latest_news.count(link_latest_news[i]) for i in
range(len(link_latest_news))]
link_zip = list(zip(link_latest_news, link_count))
link_unique = [link_zip[i][0] if link_zip[i][1] == 1 else 'none' for i in
range(len(link_zip))]
nonunique_indices_link = [i for i, x in enumerate(link_unique) if x == "none"]
nonunique_check_link = []
nonunique_entries_nonrepeat_link = []
for i in nonunique_indices_link:
nonunique_check_link.append(link_latest_news[i])
count_inst = nonunique_check_link.count(link_latest_news[i])
if count_inst == 1:
nonunique_entries_nonrepeat_link.append(entries_for_latest_news_init[i])
google_search_results_unique = []
for i in range(len(link_unique)):
try:
if link_unique[i] != 'none':
google_search_results_unique.append(entries_for_latest_news_init[i])
except IndexError:
pass
google_search_results_combined = google_search_results_unique + nonunique_entries_nonrepeat_link
page = request.GET.get('page', 1)
paginator2 = Paginator(google_search_results_combined, 2000)
try:
entries_for_latest_news = paginator2.page(page)
except PageNotAnInteger:
entries_for_latest_news = paginator2.page(1)
except EmptyPage:
entries_for_latest_news = paginator2.page(paginator2.num_pages)
random.shuffle(entries_for_carousel)
if request.user.is_authenticated:
if request.method == "POST":
p_form = FavoriteGamesUpdateForm(data=request.POST)
user_fav = list(FavoriteGames.objects.all().values_list())
user_slug_list = [user_fav[i][2] for i in range(len(user_fav))
if user_fav[i][1] == request.user.profile.id]
if request.POST["dev_user_str"] not in user_slug_list:
if p_form.is_valid():
form_instance = p_form.save(commit=False)
form_instance.profile = Profile.objects.get(user=request.user)
form_instance.dev_user_str = p_form.cleaned_data["dev_user_str"]
form_instance.save()
else:
FavoriteGames.objects.filter(
profile_id=request.user.profile.id
).filter(
dev_user_str=request.POST.get('dev_user_str')
).delete()
fav_game_check = list(FavoriteGames.objects.filter(profile_id=request.user.profile.id).values())
devs_in_favs = [fav_game_check[i]['dev_user_str'] for i in range(len(fav_game_check))]
dev_game_check_list = []
for j, i in enumerate(dev_slug):
if i in devs_in_favs:
dev_game_check_list.append('yes')
else:
dev_game_check_list.append('no')
else:
dev_game_check_list = ""
dev_list_name = sorted(list(zip_longest(dev_name, dev_img_address, dev_slug, dev_game_check_list, dev_order)),
key=lambda lowercase: lowercase[0].lower())
dev_list_pop = sorted(list(zip_longest(dev_name, dev_img_address, dev_slug, dev_game_check_list, dev_order)),
key=lambda dev_order_list: dev_order_list[4])
cache_key = "test_cache_key"
if cache.get(cache_key) is not None:
paginator_for_class_1 = Paginator(cache.get(cache_key), 48)
else:
cache.set(
cache_key,
dev_list_pop,
60 * 60 * 4,
)
context = {
'numbers': dev_list_pop,
'entries': entries_for_carousel,
'latest_news': entries_for_latest_news,
'g_query_datetime': g_query_datetime,
'g_dev_datetime': g_dev_datetime,
'g_datetime': g_datetime,
}
if request.method == "POST":
return redirect("/")
else:
return render(request, "homepage/dev_base.html", context)
else:
dev_query_results_init = TaskResult.objects.filter(task_name='homepage.tasks.rawg_fetch_dev')
dev_query_results = json.loads(dev_query_results_init.values()[0]['result'])
if len(list(TaskResult.objects.filter(task_name='homepage.tasks.google_fetch_dev').values())) == 1:
g_dev_datetime_init = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values_list()[0])
else:
task_id_dev = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values())[1]['task_id']
g_dev_datetime_init = list(TaskResult.objects.filter(
task_id=task_id_dev
).values_list()[0])
g_dev_datetime = g_dev_datetime_init[11]
slug_index1 = [dev_query_results][0][0].index(slug)
dev_list = [dev_query_results[0][slug_index1]]
slugs_per_dev_list = dev_query_results[1][slug_index1]
names_per_dev_list = dev_query_results[2][slug_index1]
ratings_per_dev_list = dev_query_results[3][slug_index1]
background_img_per_dev_list = dev_query_results[4][slug_index1]
released_per_dev_list = dev_query_results[5][slug_index1]
full_clip_per_dev_list = dev_query_results[6][slug_index1]
ratings_count_per_dev_list = dev_query_results[7][slug_index1]
dev_game_data = sorted(list(zip_longest(dev_list, slugs_per_dev_list, names_per_dev_list,
ratings_per_dev_list, background_img_per_dev_list,
released_per_dev_list,
full_clip_per_dev_list, ratings_count_per_dev_list)),
key=lambda sort: sort[7], reverse=True)
dev_game_data2 = []
for i in range(len(dev_game_data)):
try:
if dev_game_data[i][4] is not None:
dev_game_data2.append(dev_game_data[i])
except IndexError:
pass
page = request.GET.get('page', 1)
paginator2 = Paginator(dev_game_data2, 2000)
try:
numbers = paginator2.page(page)
except PageNotAnInteger:
numbers = paginator2.page(1)
except EmptyPage:
numbers = paginator2.page(paginator2.num_pages)
if len(list(TaskResult.objects.filter(task_name='homepage.tasks.google_fetch_dev').values())) == 1:
google_query_results = json.loads(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values()[0]['result'])
else:
task_id_dev = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values())[1]['task_id']
google_query_results = json.loads(TaskResult.objects.filter(
task_id=task_id_dev
).values()[0]['result'])
dev_name_list = list(Dev.objects.all().values_list('dev_name', flat=True))
dev_slug_list = list(Dev.objects.all().values_list('dev_slug', flat=True))
dev_img_list = list(Dev.objects.values_list('dev_image_address', flat=True))
dev_slug_index = dev_slug_list.index(slug)
dev_name_for_site = dev_name_list[dev_slug_index]
dev_img_for_site = dev_img_list[dev_slug_index]
google_search_results = [google_query_results[i] if google_query_results[i][6] == slug else 'none'
for i in range(len(google_query_results))]
google_search_results2 = []
for i in range(len(google_search_results)):
try:
if google_search_results[i] != 'none':
google_search_results2.append(google_search_results[i])
except IndexError:
pass
context = {
'numbers': numbers,
'google_search_results': google_search_results2,
'dev_name_for_site': dev_name_for_site,
'dev_img_for_site': dev_img_for_site,
'g_dev_datetime': g_dev_datetime,
}
return render(request, "homepage/dev_iter.html", context) | 27,651 |
def dpuEnableTaskProfile(task):
"""
Enable profiling facility of DPU Task while running to get its performance metrics
task: DPU Task. This parameter should be gotten from the result of dpuCreatTask()
Returns: 0 on success, or report error in case of any failure
"""
return pyc_libn2cube.pyc_dpuEnableTaskProfile(task) | 27,652 |
def get_isotopic_distribution(z):
"""
For an element with number ``z``, returns two ``np.ndarray`` objects containing that element's weights and relative abundances.
Args:
z (int): atomic number
Returns:
masses (np.ndarray): list of isotope masses
weights (np.ndarray): list of weights (relative to 1.00 for largest)
"""
z = str(z)
masses = list(ISOTOPE_DICTIONARY[z].keys())
weights = list(ISOTOPE_DICTIONARY[z].values())
return np.array(masses), np.array(weights) | 27,653 |
def url_root():
"""根路径"""
return """
<p>Hello ! Welcome to Rabbit's WebServer Platform !</p>
<a href="http://www.miibeian.gov.cn/" target="_blank" style="">京ICP备 18018365 号</a> @2018Rabbit
""" | 27,654 |
def range4():
"""Never called if plot_directive works as expected."""
raise NotImplementedError | 27,655 |
def solve_network():
"""CLI interface method for solver"""
argparser = argparse.ArgumentParser()
argparser.add_argument("design", help="JSON file to Analyze")
argparser.add_argument("-i", "--input", help="Config file")
args = argparser.parse_args()
file_c = open(args.input, "r")
file_d = open(args.design, "r")
JSON = json.loads(file_d.read())
device = Device(JSON)
parse_config(file_c, device)
print("Printing the Micrfluidic Network's edges:")
for edge in device.G.edges():
print("Printing Edge :", edge)
electrical_network = ENetwork(device)
print("Printing the Electical Network's edges:")
for edge in electrical_network.G.edges():
print("Printing Edge :", edge, electrical_network.get_edge_data(edge))
solver = Solver()
solver.initialize(electrical_network)
solver.solve()
print("Final Results")
for cpoint in electrical_network.get_all_cpoints():
parts = cpoint.id.split("_")
nodename = device.get_component(parts[0]).name
if len(parts) > 1:
nodename = device.get_component(parts[0]).name + "_" + parts[1]
print("Node: ", nodename, " Pressure: ", cpoint.pressure) | 27,656 |
def CreateMatrix(args, context, history_id, gcs_results_root, release_track):
"""Creates a new iOS matrix test in Firebase Test Lab from the user's params.
Args:
args: an argparse namespace. All the arguments that were provided to this
gcloud command invocation (i.e. group and command arguments combined).
context: {str:obj} dict containing the gcloud command context, which
includes the Testing API client+messages libs generated by Apitools.
history_id: {str} A history ID to publish Tool Results to.
gcs_results_root: the root dir for a matrix within the GCS results bucket.
release_track: the release track that the command is invoked from.
Returns:
A TestMatrix object created from the supplied matrix configuration values.
"""
creator = MatrixCreator(args, context, history_id, gcs_results_root,
release_track)
return creator.CreateTestMatrix(uuid.uuid4().hex) | 27,657 |
def mute_control(net):
"""
Use this function to set all controllers in net out of service, e. g. when you want to use the
net with new controllers
:param net: pandapowerNet
:return:
"""
check_controller_frame(net)
net.controller['in_service'] = False | 27,658 |
def test_get_worst_rating_shortterm_with_inferring_rating_provider(
rtg_inputs_shortterm,
):
"""Test computation of best ratings on a security (line-by-line) basis."""
actual = rtg.get_worst_ratings(rtg_inputs_shortterm, tenor="short-term")
expectations = pd.Series(
data=["A-2", "B", "A-1", "D", "B", np.nan, "A-2", "A-3"], name="worst_rtg"
)
pd.testing.assert_series_equal(actual, expectations) | 27,659 |
def run(ctx, dry_run, task_profile):
"""Create from a task profile, and submit."""
from jsub.command.run import Run
task_profile_file = click.format_filename(task_profile)
cmd = Run(jsubrc=ctx.obj['jsubrc'], task_profile_file=task_profile_file, dry_run=dry_run)
cmd.execute() | 27,660 |
def _download_and_extract_zip_from_github(site_info):
"""
from https://pelican-blog/archive/master.zip
to: /path/pelican-blog
"""
unique_id = uuid4().hex
zip_file_url = site_info["ZIP_URL"]
zip_file_name = os.path.join(
settings.PELICAN_PUBLISHER["WORKING_ROOT"],
"{}-{}.zip".format(site_info["NAME"], unique_id),
)
site_stage_path = os.path.join(
settings.PELICAN_PUBLISHER["WORKING_ROOT"],
"{}-{}".format(site_info["NAME"], unique_id),
)
logger.debug("zip file name: {}".format(zip_file_name))
logger.debug("site stage path: {}".format(site_stage_path))
# download zip file
r = requests.get(zip_file_url)
if not r.ok:
logger.error("download failed")
return None, None
open(zip_file_name, "wb").write(r.content)
logger.info("download finished")
# extract zip file
ZipFile(zip_file_name).extractall(site_stage_path)
# pelican site file check
site_file_path = None
for p in Path(site_stage_path).glob("*"):
if p.is_dir():
site_file_path = p.as_posix()
break
if site_file_path is None:
logger.error("pelican site file extract failed, more subdir")
return None, None
logger.info("extracted pelican site file to: {}".format(site_file_path))
return site_stage_path, site_file_path | 27,661 |
def map_keys(func, d):
""" Returns a new dict with func applied to keys from d, while values
remain unchanged.
>>> D = {'a': 1, 'b': 2}
>>> map_keys(lambda k: k.upper(), D)
{'A': 1, 'B': 2}
>>> assert map_keys(identity, D) == D
>>> map_keys(identity, {})
{}
"""
return dict((func(k), v) for k, v in d.iteritems()) | 27,662 |
def generate_qiniu_token(object_name, use_type, expire_time=600):
"""
用于生成七牛云上传所需要的Token
:param object_name: 上传到七牛后保存的文件名
:param use_type: 操作类型
:param expire_time: token过期时间,默认为600秒,即十分钟
:return:
"""
bucket_name = PRIVATE_QINIU_BUCKET_NAME
from qiniu import Auth
# 需要填写你的 Access Key 和 Secret Key
access_key = PRIVATE_QINIU_ACCESS_KEY
secret_key = PRIVATE_QINIU_SECRET_KEY
# 构建鉴权对象
q = Auth(access_key, secret_key)
# 上传策略示例
# https://developer.qiniu.com/kodo/manual/1206/put-policy
policy = {
# 'callbackUrl':'https://requestb.in/1c7q2d31',
# 'callbackBody':'filename=$(fname)&filesize=$(fsize)'
# 'persistentOps':'imageView2/1/w/200/h/200'
}
token = q.upload_token(bucket_name, object_name, expire_time, policy)
base_url = PRIVATE_MEDIA_URL_PREFIX
return (object_name, token, base_url, expire_time) | 27,663 |
def get_s3_object(bucket, key_name, local_file):
"""Download a S3 object to a local file in the execution environment
Parameters
----------
bucket: string, required
S3 bucket that holds the message
key: string, required
S3 key is the email object
Returns
-------
email_msg: email.message.Message object
"""
tracer.put_metadata('object', f's3://{bucket}/{key_name}')
try:
s3_resource.Bucket(bucket).download_file(key_name, local_file)
result = 'ok'
tracer.put_annotation('OBJECT_DOWNLOAD', 'SUCCESS')
except Exception as e:
tracer.put_annotation('OBJECT_DOWNLOAD', 'FAILURE')
result = f'Error: {str(e)}'
return(result) | 27,664 |
def jaxpr_eqns_input_sizes(jaxpr) -> np.ndarray:
"""Return a list of input sizes for each equation in the jaxpr.
Args:
jaxpr: Jaxpr to get input sizes for.
Returns:
A #eqns * #eqns numpy array of input sizes. cost[l, r] represents the
input size of the l-th to (r - 1)-th equation in the jaxpr.
"""
length = len(jaxpr.eqns)
input_sizes = np.full((length + 1, length + 1), 0, dtype=np.float32)
outvars = OrderedSet()
for k in range(0, length + 1):
if k > 0:
outvars = outvars.union(jaxpr.eqns[k - 1].outvars)
invars = OrderedSet()
total_size = 0
for r in range(k + 1, length + 1):
for invar in jaxpr.eqns[r - 1].invars:
if (isinstance(invar, Var) and invar in outvars and
invar not in invars):
invars.add(invar)
total_size += invar.aval.size * invar.aval.dtype.itemsize
input_sizes[k, r] = total_size
return input_sizes | 27,665 |
def extract_message(raw_html):
"""Returns the content of the message element.
This element appears typically on pages with errors.
:param raw_html: Dump from any page.
"""
results = re_message.findall(raw_html)
if results:
return results[0]
return None | 27,666 |
def do_pca_anomaly_scores(obs, n_components):
"""Returns numpy array with data points labelled as outliers
Parameters
----------
data:
no_of_clusters: numpy array like data_point
score: numpy like data
""" | 27,667 |
def _weighted_essentially_non_oscillatory_vectorized(
eno_order: int, values: Array, spacing: float, boundary_condition: Callable[[Array, int],
Array]) -> Tuple[Array, Array]:
"""Implements a more "vectorized" but ultimately slower version of `weighted_essentially_non_oscillatory`."""
if eno_order < 1:
raise ValueError(f"`eno_order` must be at least 1; got {eno_order}.")
values = boundary_condition(values, eno_order)
diffs = (values[1:] - values[:-1]) / spacing
if eno_order == 1:
return (diffs[:-1], diffs[1:])
substencil_approximations = _align_substencil_values(
jax.vmap(jnp.correlate, (None, 0), 0)(diffs, _diff_coefficients(eno_order)), jnp)
diffs2 = diffs[1:] - diffs[:-1]
chol_T = jnp.asarray(np.linalg.cholesky(_smoothness_indicator_quad_form(eno_order)).swapaxes(-1, -2))
smoothness_indicators = _align_substencil_values(
jnp.sum(jnp.square(jax.vmap(jax.vmap(jnp.correlate, (None, 0), 1), (None, 0), 0)(diffs2, chol_T)), -1), jnp)
unscaled_weights = 1 / jnp.square(smoothness_indicators + WENO_EPS)
unnormalized_weights = (jnp.asarray(_substencil_coefficients(eno_order)[..., np.newaxis]) *
jnp.stack([unscaled_weights[:, :-1], unscaled_weights[:, 1:]]))
weights = unnormalized_weights / jnp.sum(unnormalized_weights, 1, keepdims=True)
return tuple(jnp.sum(jnp.stack([substencil_approximations[:-1], substencil_approximations[1:]]) * weights, 1)) | 27,668 |
def add_to_bashrc(user, text):
"""
Add text to a users .bashrc file
"""
homeDir = os.path.expanduser("~" + user)
bashrc_file = homeDir + "/.bashrc"
with open(bashrc_file, "a") as myfile:
myfile.write(text)
myfile.close() | 27,669 |
def test_atomic_normalized_string_max_length_4_nistxml_sv_iv_atomic_normalized_string_max_length_5_1(mode, save_output, output_format):
"""
Type atomic/normalizedString is restricted by facet maxLength with
value 1000.
"""
assert_bindings(
schema="nistData/atomic/normalizedString/Schema+Instance/NISTSchema-SV-IV-atomic-normalizedString-maxLength-5.xsd",
instance="nistData/atomic/normalizedString/Schema+Instance/NISTXML-SV-IV-atomic-normalizedString-maxLength-5-1.xml",
class_name="NistschemaSvIvAtomicNormalizedStringMaxLength5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 27,670 |
def defaults(dictionary, overwriteNone=False, **kwargs):
"""
Set default values of a given dictionary, option to overwrite None values.
Returns given dictionary with values updated by kwargs unless they already existed.
:param dict dictionary:
:param overwriteNone: Whether to overwrite None values.
:param kwargs:
"""
for key, value in dictionary.items():
dictValueIsNone = value is None
kwargsHasValue = key in kwargs
if overwriteNone and dictValueIsNone and kwargsHasValue:
continue
# Overwrite kwargs with dictionary
kwargs[key] = value
return kwargs | 27,671 |
def sunrise_sunset():
""" Show the sunrise and sunset time."""
st.write("_____________________________________")
st.title("Sunrise and Sunset")
obs = mgr.weather_at_place(location)
weather = obs.weather
sunrise_unix = datetime.utcfromtimestamp(int(weather.sunrise_time()))
sunrise_date = sunrise_unix.date()
sunrise_time = sunrise_unix.time()
sunset_unix = datetime.utcfromtimestamp(int(weather.sunset_time()))
sunset_date = sunset_unix.date()
sunset_time = sunset_unix.time()
st.write(f"#### Sunrise Date: {sunrise_date}")
st.write(f"### --Sunrise Time: {sunrise_time}")
st.write(f"#### Sunset Date: {sunset_date}")
st.write(f"### --Sunset Time: {sunset_time}") | 27,672 |
def expected_average_shortest_distance_to_miner(
crawl_graph: Union[
ProbabilisticWeightedCrawlGraph[CrawledNode], CrawlGraph[CrawledNode]
],
distances: Optional[np.ndarray] = None,
miner_probability: Optional[Dict[CrawledNode, float]] = None,
) -> Dict[CrawledNode, float]:
"""Estimates the average shortest distance to a miner for each node in the graph"""
if not isinstance(crawl_graph, ProbabilisticWeightedCrawlGraph):
crawl_graph = ProbabilisticWeightedCrawlGraph(crawl_graph)
if miner_probability is None:
miner_probability = estimate_miner_probability(crawl_graph)
if distances is None:
distances = crawl_graph.probabilistic_shortest_distances()
elif (
distances.ndim != 2
or distances.shape[0] != len(crawl_graph)
or distances.shape[1] != len(crawl_graph)
):
raise ValueError(
f"distances is expected to be an {len(crawl_graph)}x{len(crawl_graph)} matrix"
)
return {
node: sum(
distances[index][i] * miner_probability[crawl_graph.nodes[i]]
for i in range(len(crawl_graph))
)
for node, index in tqdm(
((n, crawl_graph.node_indexes[n]) for n in crawl_graph),
desc="calculating expected distance to miners",
leave=False,
unit=" nodes",
total=len(crawl_graph),
)
} | 27,673 |
def plot_route(data, pi, costs, title, idx_in_batch = 0, is_tensor = True):
"""Plots journey of agent
Args:
data: dataset of graphs
pi: (batch, decode_step) # tour
idx_in_batch: index of graph in data to be plotted
"""
if is_tensor:
# Remove extra zeros
pi_ = get_clean_path(pi[idx_in_batch].cpu().numpy())
cost = costs[idx_in_batch].cpu().numpy()
else:
pi_ = pi
cost = costs
depot_xy = data[0][idx_in_batch].cpu().numpy()
customer_xy = data[1][idx_in_batch].cpu().numpy()
demands = data[2][idx_in_batch].cpu().numpy()
# customer_labels = ['(' + str(i) + ', ' + str(demand) + ')' for i, demand in enumerate(demands.round(2), 1)]
customer_labels = ['(' + str(demand) + ')' for demand in demands.round(2)]
xy = np.concatenate([depot_xy.reshape(1, 2), customer_xy], axis = 0)
# Get list with agent loops in path
list_of_paths, cur_path = [], []
for idx, node in enumerate(pi_):
cur_path.append(node)
if idx != 0 and node == 0:
if cur_path[0] != 0:
cur_path.insert(0, 0)
list_of_paths.append(cur_path)
cur_path = []
path_traces = []
for i, path in enumerate(list_of_paths, 1):
coords = xy[[int(x) for x in path]]
# Calculate length of each agent loop
lengths = np.sqrt(np.sum(np.diff(coords, axis = 0) ** 2, axis = 1))
total_length = np.sum(lengths)
path_traces.append(go.Scatter(x = coords[:, 0],
y = coords[:, 1],
mode = 'markers+lines',
name = f'tour{i} Length = {total_length:.3f}',
opacity = 1.0))
trace_points = go.Scatter(x = customer_xy[:, 0],
y = customer_xy[:, 1],
mode = 'markers+text',
name = 'Customer (demand)',
text = customer_labels,
textposition = 'top center',
marker = dict(size = 7),
opacity = 1.0
)
trace_depo = go.Scatter(x = [depot_xy[0]],
y = [depot_xy[1]],
mode = 'markers+text',
name = 'Depot (capacity = 1.0)',
text = ['1.0'],
textposition = 'bottom center',
marker = dict(size = 23),
marker_symbol = 'triangle-up'
)
layout = go.Layout(title = dict(text = f'<b>VRP{customer_xy.shape[0]} {title}, Total Length = {cost:.3f}</b>', x = 0.5, y = 1, yanchor = 'bottom', yref = 'paper', pad = dict(b = 10)),#https://community.plotly.com/t/specify-title-position/13439/3
# xaxis = dict(title = 'X', ticks='outside'),
# yaxis = dict(title = 'Y', ticks='outside'),#https://kamino.hatenablog.com/entry/plotly_for_report
xaxis = dict(title = 'X', range = [0, 1], showgrid=False, ticks='outside', linewidth=1, mirror=True),
yaxis = dict(title = 'Y', range = [0, 1], showgrid=False, ticks='outside', linewidth=1, mirror=True),
showlegend = True,
width = 750,
height = 700,
autosize = True,
template = "plotly_white",
legend = dict(x = 1, xanchor = 'right', y =0, yanchor = 'bottom', bordercolor = '#444', borderwidth = 0)
# legend = dict(x = 0, xanchor = 'left', y =0, yanchor = 'bottom', bordercolor = '#444', borderwidth = 0)
)
data = [trace_points, trace_depo] + path_traces
fig = go.Figure(data = data, layout = layout)
fig.show() | 27,674 |
def strip_logEntry(etree_obj):
"""In-place remove elements and attributes that are only valid in v2 types from v1
LogEntry.
Args: etree_obj: ElementTree ElementTree holding a v1 LogEntry.
"""
for event_el in etree_obj.findall("event"):
if event_el.text not in (
"create",
"read",
"update",
"delete",
"replicate",
"synchronization_failed",
"replication_failed",
):
event_el.text = "create" | 27,675 |
def warn(msg, *args):
"""
log warning messages
:param str or object msg: the message + format
:param list args: message arguments
"""
_log(logging.WARN, msg, *args) | 27,676 |
def create(name, frontend, backend, database, cache, pipeline):
""" creates a synth wireframe with your desired frontend,
backend, database, caching service, and ci/cd pipeline
"""
root_dir = os.path.dirname(os.path.abspath(__file__))
copy_dir = root_dir + "/projects_master/nginx_router/"
if not frontend and not backend and not database and not cache:
click.echo("all synth services can't be None")
exit(1)
# make the directory for the project if it doesn't exist
try:
os.mkdir(name)
shutil.copyfile(root_dir + "/projects_master/README.md",
"{}/README.md".format(name))
except FileExistsError:
click.echo('Directory {} already exists.'
.format(name) +
" Please choose a different name.")
exit(1)
#<--- NGINX ROUTER SECTION --->#
# - handles all container routing - #
# - thus needed by default - #
os.makedirs("{}/nginx_router/nginx_conf".format(name))
# NGINX config files
shutil.copyfile(copy_dir + "nginx_conf/default.conf",
"{}/nginx_router/nginx_conf/default.conf"
.format(name))
shutil.copyfile(copy_dir + "nginx_conf/nginx.conf",
"{}/nginx_router/nginx_conf/nginx.conf"
.format(name))
# NGINX docker file
shutil.copyfile(copy_dir + "Dockerfile.dev",
"{}/nginx_router/Dockerfile.dev"
.format(name))
shutil.copyfile(copy_dir + "Dockerfile",
"{}/nginx_router/Dockerfile"
.format(name))
#<--- COMPOSE SECTION --->#
# - base compose file for router - #
# - gets appended to as needed - #
shutil.copyfile(root_dir +
"/projects_master/docker-compose.yml",
"{}/docker-compose.yml".format(name))
# gather some info for the part builder
front_enabled = False
if frontend is not None:
front_enabled = True
back_enabled = False
if backend is not None:
back_enabled = True
# build PartBuilder instance
pb = PartBuilder(parts_root=root_dir + "/parts",
project_name=name,
front_enabled=front_enabled,
back_enabled=back_enabled)
#<--- DATABASE SECTION --->#
if database is not None:
if database == "mysql":
click.echo("MySQL 5.7.6 has permissions issues, " +
"using 5.7 instead")
try:
# create directory for volume mounting
os.makedirs("{}/database/data".format(name))
# add database section to docker-compose file
pb.add_part(database)
except PartBuilderException as pbe:
# error out if the database isn't allowed
click.echo(pbe)
cleanup(name)
#<--- CACHING SECTION --->#
if cache is not None:
try:
# add cache section to docker-compose file
pb.add_part(cache)
except PartBuilderException as pbe:
# error out if cache isn't allowed
click.echo(pbe)
cleanup(name)
#<--- FRONTEND SECTION --->#
if frontend is not None:
try:
# copy directory tree into project directory
shutil.copytree(copy_dir + "frontend/{}"
.format(frontend),
"{}/nginx_router/frontend/"
.format(name))
# add frontend section to docker-compose file
pb.add_part(frontend, database, cache)
except (PartBuilderException, FileNotFoundError) as desc_e:
# error out if caching service isn't allowed
if type(desc_e) is FileNotFoundError:
click.echo("FileNotFoundError: {}".format(desc_e))
if type(desc_e) is PartBuilderException:
click.echo("PartBuilderException: {}".format(desc_e))
cleanup(name)
except Exception as e:
# error out if frontend isn't allowed
traceback.print_tb(e.__traceback__)
cleanup(name)
#<--- BACKEND SECTION --->#
if backend is not None:
try:
# copy directory tree into project
shutil.copytree(copy_dir + "backend/{}"
.format(backend),
"{}/nginx_router/backend/"
.format(name))
# add backend section to docker-compose file
pb.add_part(backend, database, cache)
except (PartBuilderException, FileNotFoundError) as desc_e:
# error out if caching service isn't allowed
if type(desc_e) is FileNotFoundError:
click.echo("FileNotFoundError: {}".format(desc_e))
if type(desc_e) is PartBuilderException:
click.echo("PartBuilderException: {}".format(desc_e))
cleanup(name)
except Exception as e:
traceback.print_tb(e.__traceback__)
cleanup(name)
#<--- PIPELINE SECTION --->#
if pipeline is not None:
try:
# add and build pipeline yaml file
pb.build_pipeline(name, pipeline, {
"frontend": frontend,
"backend": backend,
"database": database,
"cache": cache
})
except PartBuilderException as desc_e:
# error out if pipeline isn't allowed
click.echo("PartBuilderException: {}".format(desc_e))
cleanup(name)
click.echo("\nsynthesized project directory {}".format(name))
click.echo("run:\n\n\tcd {}; docker-compose up\n"
.format(name))
click.echo("to start your development containers!\n") | 27,677 |
def merge_mapping(ensembl_dict, mygene_website_dict, add_source=False):
"""First use gene2ensembl as single match NCBI gene ID (if == 1 match).
Next, if no gene2ensembl match, then look at gene_info (Entrez) to find which NCBI
ID from the NCBI multi mapping list returns the same ensembl symbol as the
ensembl main file, and use corresponding NCBI gene ID as single match.
OUTPUT generator:
---------------------
Tuple with ensembl gene ID and NCBI gene ID
"""
print("step 5 start: Generator-decide whether to use gene2ensembl or symbol for mapping")
for key in ensembl_dict:
ncbi_list = ensembl_dict[key]['data']['ncbi_list']
ensembl_symbol = ensembl_dict[key]['data']['symbol'].upper()
gene2ensembl_ncbi_gene_id_match_list = ensembl_dict[key]['data']['gene2ensembl']
if len(gene2ensembl_ncbi_gene_id_match_list) == 1:
if add_source is False:
yield (key, int(gene2ensembl_ncbi_gene_id_match_list[0]))
else:
yield (key, int(gene2ensembl_ncbi_gene_id_match_list[0]), '1')
else:
ensembl_symbol_list_from_mygene = []
for ncbi_id in ncbi_list:
try:
ensembl_symbol_list_from_mygene.append(mygene_website_dict[ncbi_id].upper())
except KeyError:
# need this here; keeps list size/order will never match with ensembl_symbol)
ensembl_symbol_list_from_mygene.append('symbol_not_found')
if ensembl_symbol in ensembl_symbol_list_from_mygene:
if ensembl_symbol_list_from_mygene.count(ensembl_symbol) == 1:
ncbi_idx = ensembl_symbol_list_from_mygene.index(ensembl_symbol)
if add_source is False:
yield (key, int(ncbi_list[ncbi_idx]))
else:
yield (key, int(ncbi_list[ncbi_idx]), '2')
print("step 5 end") | 27,678 |
def dummy_state_sb(dummy_state: State, dummy_train_dataloader: DataLoader, conv_model: MosaicClassifier,
loss_fun_tuple: Callable, epoch: int, batch: int) -> State:
"""Dummy state with required values set for Selective Backprop
"""
dummy_state.train_dataloader = dummy_train_dataloader
dummy_state.epoch = epoch
dummy_state.step = epoch * dummy_state.steps_per_epoch + batch
dummy_state.model = conv_model
dummy_state.model.module.loss = loss_fun_tuple
return dummy_state | 27,679 |
def create_model(species={}, parameters={}, reactions={}, events={}):
"""Returns an SBML Level 3 model.
Example:
species = { 'E': 1, \
'EM': 0, \
'EM2': 0, \
'F': 100, \
}
parameters = {'k': (1e-06,'per_min'), \
}
reactions = { 'Production_E': \
{ 're': [(1,'E'),(1,'F')], \
'pr': [(2,'E')], \
'kin' : 'k * E * F' \
}, \
}
events = {'e': \
{ 'trigger': 'true', \
'delay': '10', \
'assignments': [('M','1'),], \
}, \
}
"""
# Create an empty SBMLDocument object. It's a good idea to check for
# possible errors. Even when the parameter values are hardwired like
# this, it is still possible for a failure to occur (e.g., if the
# operating system runs out of memory).
try:
document = sbml.SBMLDocument(3, 1)
except ValueError:
raise RuntimeError("Could not create SBMLDocumention object")
# Create the basic Model object inside the SBMLDocument object.
model = document.createModel()
check(model, "create model")
check(model.setTimeUnits("second"), "set model-wide time units")
check(model.setExtentUnits("item"), "set model units of extent")
check(
model.setSubstanceUnits("item"), "set model substance units"
) # mole, item, gram, kilogram, dimensionless
# Create a unit definition we will need later.
per_second = model.createUnitDefinition()
check(per_second, "create unit definition")
check(per_second.setId("per_min"), "set unit definition id")
unit = per_second.createUnit()
check(unit, "create unit")
check(unit.setKind(sbml.UNIT_KIND_SECOND), "set unit kind")
check(unit.setExponent(-1), "set unit exponent")
check(unit.setScale(0), "set unit scale")
check(
unit.setMultiplier(1), "set unit multiplier"
)
# Create a compartment inside this model
c1 = model.createCompartment()
check(c1, "create compartment")
check(c1.setId("c1"), "set compartment id")
check(c1.setConstant(True), 'set compartment "constant"')
check(c1.setSize(1), 'set compartment "size"')
check(c1.setSpatialDimensions(3), "set compartment dimensions")
check(
c1.setUnits("dimensionless"), "set compartment size units"
)
# Create species inside this model, set the required attributes
# for each species in SBML Level 3 (which are the 'id', 'compartment',
# 'constant', 'hasOnlySubstanceUnits', and 'boundaryCondition'
# attributes), and initialize the amount of the species along with the
# units of the amount.
for s_str, s_val in species.items():
s = model.createSpecies()
check(s, "create species")
check(s.setId(s_str), "set species id")
check(s.setCompartment("c1"), "set species compartment")
check(s.setConstant(False), 'set "constant" attribute')
check(s.setInitialAmount(float(s_val)), "set initial amount")
check(s.setSubstanceUnits("item"), "set substance units")
check(s.setBoundaryCondition(False), 'set "boundaryCondition"')
check(s.setHasOnlySubstanceUnits(False), 'set "hasOnlySubstanceUnits"')
# Create a parameter object inside this model, set the required
# attributes 'id' and 'constant' for a parameter in SBML Level 3, and
# initialize the parameter with a value along with its units.
for k_str in parameters:
k = model.createParameter()
check(k, "create parameter k")
check(k.setId(k_str), "set parameter id")
check(k.setConstant(True), 'set parameter "constant"')
check(k.setValue(parameters[k_str][0]), "set parameter value")
check(k.setUnits(parameters[k_str][1]), "set parameter units")
# Create a reaction inside this model, set the reactants and products,
# and set the reaction rate expression (the SBML "kinetic law"). We
# set the minimum required attributes for all of these objects. The
# units of the reaction rate are determined from the 'timeUnits' and
# 'extentUnits' attributes on the Model object.
for r_str in reactions:
r = model.createReaction()
check(r, "create reaction")
check(r.setId(r_str), "set reaction id")
check(r.setReversible(False), "set reaction reversibility flag")
check(r.setFast(False), 'set reaction "fast" attribute')
reactants = reactions[r_str]["re"]
for re_val, re_str in reactants:
species_ref = r.createReactant()
check(species_ref, "create reactant")
check(species_ref.setSpecies(re_str), "assign reactant species")
check(species_ref.setStoichiometry(re_val), "set set stoichiometry")
check(species_ref.setConstant(True), 'set "constant" on species')
products = reactions[r_str]["pr"]
for pr_val, pr_str in products:
species_ref = r.createProduct()
check(species_ref, "create product")
check(species_ref.setSpecies(pr_str), "assign product species")
check(species_ref.setStoichiometry(pr_val), "set set stoichiometry")
check(species_ref.setConstant(True), 'set "constant" on species')
math_ast = sbml.parseL3Formula(reactions[r_str]["kin"])
kinetic_law = r.createKineticLaw()
check(math_ast, f"create AST for rate expression")
check(kinetic_law, "create kinetic law")
check(kinetic_law.setMath(math_ast), "set math on kinetic law")
# create events
for e_str in events:
e = model.createEvent()
check(e, "create event")
check(e.setId(e_str), "set id")
check(e.setUseValuesFromTriggerTime(False), "?")
t = model.createTrigger()
check(t, "create trigger")
check(
t.setMath(sbml.parseL3Formula(events[e_str]["trigger"])),
"set trigger condition",
)
check(t.setPersistent(False), "default not persistent")
check(t.setInitialValue(False), "default not initially true")
check(e.getTrigger().getMath(), 'Problem when creating the trigger condition. The trigger will not work.')
# print( '> ' + sbml.formulaToString(e.getTrigger().getMath()) )
d = model.createDelay()
check(d, "create delay")
check(d.setMath(sbml.parseFormula(events[e_str]["delay"])), "set math")
check(e.setDelay(d), "set delay")
for ass in events[e_str]["assignments"]:
ea = model.createEventAssignment()
check(ea, "check event assignment")
check(ea.setVariable(ass[0]), "set variable")
check(ea.setMath(sbml.parseL3Formula(ass[1])), "set math")
return document | 27,680 |
def get_network_connection_query(endpoint_ids: str, args: dict) -> str:
"""Create the network connection query.
Args:
endpoint_ids (str): The endpoint IDs to use.
args (dict): The arguments to pass to the query.
Returns:
str: The created query.
"""
remote_ip_list = args.get('remote_ip', '')
if not remote_ip_list:
raise DemistoException('Please provide a remote_ip argument.')
remote_ip_list = wrap_list_items_in_double_quotes(remote_ip_list)
local_ip_filter = ''
if args.get('local_ip'):
local_ip_list = wrap_list_items_in_double_quotes(args.get('local_ip', ''))
local_ip_filter = f'and action_local_ip in({local_ip_list})'
port_list = args.get('port')
port_list_filter = f'and action_remote_port in({port_list})' if port_list else ''
return f'''dataset = xdr_data | filter agent_id in ({endpoint_ids}) and event_type = STORY
{local_ip_filter} and action_remote_ip in({remote_ip_list}) {port_list_filter}|
fields agent_hostname, agent_ip_addresses, agent_id, actor_effective_username, action_local_ip, action_remote_ip,
action_remote_port, dst_action_external_hostname, action_country, actor_process_image_name, actor_process_image_path,
actor_process_command_line, actor_process_image_sha256, actor_process_instance_id, actor_process_causality_id''' | 27,681 |
def dump_into_json(filename, metrics):
"""Dump the metrics dictionary into a JSON file
It will automatically dump the dictionary:
metrics = {'duration': duration,
'voltage_extremes': voltage_extremes,
'num_beats': num_beats,
'mean_hr_bpm': mean_hr_bpm,
'beats': beats}.
in to a JSON file with the file name as the data file name.
:param filename: name of the file being read
:param metrics: a dictionary containing duration,
voltage extremes, number of beats, beats per minute,
and the time where beats occur
:returns:
- successful_JSON - test if it has successfully create JSON
"""
successful_JSON = False
try:
output_file = open(filename + '.json', 'w')
json.dump(metrics, output_file)
output_file.close()
successful_JSON = True
except TypeError:
print("Unsuccessfully output JSON file")
return successful_JSON | 27,682 |
def _costfun(params, pose0, fixed_pt3d, n_cams, n_pts, cam_idxs, pt3d_idxs, pts2d, K, px_err_sd):
"""
Compute residuals.
`params` contains camera parameters and 3-D coordinates.
"""
if isinstance(params, (tuple, list)):
params = np.array(params)
params = np.hstack((pose0, params))
poses, pts3d = _unpack(params, n_cams, n_pts if len(fixed_pt3d) == 0 else 0)
points_3d = fixed_pt3d if len(pts3d) == 0 else pts3d
points_proj = _project(points_3d[pt3d_idxs], poses[cam_idxs], K)
px_err = ((pts2d - points_proj) / px_err_sd[:, None]).ravel()
return px_err | 27,683 |
def dbg_get_memory_info(*args):
"""
dbg_get_memory_info() -> PyObject *
This function returns the memory configuration of a debugged process.
@return:
None if no debugger is active
tuple(start_ea, end_ea, name, sclass, sbase, bitness, perm)
"""
return _ida_idd.dbg_get_memory_info(*args) | 27,684 |
def get_headers(base_url: str, client_id: str, client_secret: str, grant_type: str, verify: bool):
"""
Create header with OAuth 2.0 authentication information.
:type base_url: ``str``
:param base_url: Base URL of the IdentityIQ tenant.
:type client_id: ``str``
:param client_id: Client Id for OAuth 2.0.
:type client_secret: ``str``
:param client_secret: Client Secret for OAuth 2.0.
:type grant_type: ``str``
:param grant_type: Grant Type for OAuth 2.0. Defaulted to 'client_credentials' if not provided.
:return: Header with OAuth 2.0 information if client_id & client_secret are provided, else None.
This will return None if the client_id & client_secret were not valid (authorized).
"""
if base_url is None or client_id is None or client_secret is None:
return None
if grant_type is None:
grant_type = 'client_credentials'
auth_cred = client_id + ':' + client_secret
iiq_oauth_body = f'grant_type={grant_type}'
iiq_oauth_headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic %s' % base64.b64encode(auth_cred.encode()).decode()
}
oauth_response = requests.request("POST", url=f'{base_url}{IIQ_OAUTH_EXT}', data=iiq_oauth_body,
headers=iiq_oauth_headers, verify=verify)
if oauth_response is not None and 200 <= oauth_response.status_code < 300:
return {
'Authorization': 'Bearer %s' % oauth_response.json().get('access_token', None),
'Content-Type': 'application/json'
}
else:
err_msg = 'Failed to get response'
if oauth_response is not None:
err_msg += f' {oauth_response.status_code}'
raise DemistoException(err_msg) | 27,685 |
def draw_pie(fracs, labels):
"""
This method is to plot the pie chart of labels, then save it into '/tmp/' folder
"""
logging.info("Drawing the pie chart..")
fig = plt.figure()
plt.pie(fracs, labels=labels, autopct=make_autopct(fracs), shadow=True)
plt.title("Top 10 labels for newly opened issues")
figname = "piechart_{}_{}.png".format(str(datetime.datetime.today().date()),
str(datetime.datetime.today().time()))
fig.savefig("/tmp/{}".format(figname))
pic_path = "/tmp/{}".format(figname)
return pic_path | 27,686 |
def eval_function_old(param, param_type):
""" Eval Function (Deprecated)
isOwner 0xe982E462b094850F12AF94d21D470e21bE9D0E9C
:param param:
:param param_type:
:return:
"""
try:
splitted_input = param.split(' ')
except TypeError:
pass
else:
try:
print(splitted_input)
if len(splitted_input[1][2:]) != 40:
print('launch error, address must be 40 alfanumeric hash')
else:
re.search('0x[0-9,aA-zZ]{40}', splitted_input[1]).group(0)
except IndexError:
print('there is not enough data to verify current input')
pass
return splitted_input[1] | 27,687 |
def _multi_convert(value):
"""
Function try and convert numerical values to numerical types.
"""
try:
value = int(value, 10)
except ValueError:
try:
value = float(value)
except ValueError:
pass
return value | 27,688 |
def cprint(text, color='normal', bold=True):
"""Print the given text using blessings terminal.
Arguments:
text (str): Text to print.
color (str): Color to print the message in.
bold (bool): Whether or not the text should be printed in bold.
"""
if color == 'cyan':
to_print = COLOR_TERM.cyan(text)
elif color == 'green':
to_print = COLOR_TERM.green(text)
elif color == 'magenta':
to_print = COLOR_TERM.magenta(text)
elif color == 'red':
to_print = COLOR_TERM.red(text)
elif color == 'white':
to_print = COLOR_TERM.white(text)
else:
# Normal text
to_print = text
if bold and color != 'normal':
print(COLOR_TERM.bold(to_print))
else:
print(to_print) | 27,689 |
def strip_all_collection_fields(ctx):
"""
Strip all the `collection` string fields, so whitespace at the beginning and the end are removed.
"""
with db_session_manager() as session:
click.confirm(
f"Are you sure you want to run this script? It will strip whitespaces to all the string"
"fields in the `collection` table",
abort=True,
)
query = """
update project
set
owner=TRIM(owner),
name=TRIM(name),
description=TRIM(description),
contact_name=TRIM(contact_name),
contact_email=TRIM(contact_email),
data_submission_policy_version=TRIM(data_submission_policy_version)
where
owner != TRIM(owner) OR
name != TRIM(name) OR
description != TRIM(description) OR
contact_name != TRIM(contact_name) OR
contact_email != TRIM(contact_email) OR
data_submission_policy_version != TRIM(data_submission_policy_version)
"""
session.execute(query)
session.commit() | 27,690 |
def destroy_node_and_cleanup(driver, node):
"""
Destroy the provided node and cleanup any left over EBS volumes.
"""
volumes = driver.list_volumes(node=node)
assert (
INSTANCE_NAME_STRING in node.name
), "Refusing to delete node without %s in the name" % (INSTANCE_NAME_STRING)
print("")
print(('Destroying node "%s"...' % (node.name)))
node.destroy()
assert len(volumes) <= 1
print("Cleaning up any left-over EBS volumes for this node...")
# Give it some time for the volume to become detached from the node
time.sleep(10)
for volume in volumes:
# Additional safety checks
if volume.extra.get("instance_id", None) != node.id:
continue
if volume.size not in [8, 30]:
# All the volumes we use are 8 GB EBS volumes
# Special case is Windows 2019 with 30 GB volume
continue
destroy_volume_with_retry(driver=driver, volume=volume) | 27,691 |
def main():
""" creates a new Assignment, prompts for its values and display them. """
new_assignment = Assignment()
new_assignment.prompt()
new_assignment.display() | 27,692 |
def dbinom(n, p):
"""Binomial Distribution
n = number of repetitions
p = success probability
Used when a certain experiment is repeated n times
with a 0 ≤ P ≤ 1 probability to succeed once.
This doesn't return a value, but rather the specified binomial function
"""
def b(k):
"""Returns the probability of k successes"""
if 0 <= k <= n:
q = 1 - p
return rperm(n, k) * p**k * q**(n-k)
else:
return 0
# Allow accessing the used 'n' and 'p' values from the function
b.__dict__['n'] = n
b.__dict__['p'] = p
b.__dict__['expected'] = n * p
b.__dict__['variance'] = (n * p) * (1-p)
return b | 27,693 |
def array_to_image(x, data_format='channels_last'):
"""Converts a 3D Numpy array to a PIL Image instance.
Args:
x: Input Numpy array.
data_format: Image data format, either "channels_first" or "channels_last".
Returns:
A PIL Image instance.
Raises:
ValueError: if invalid `x` or `data_format` is passed.
"""
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape: %s' % (x.shape,))
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format: %s' % data_format)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if x.shape[2] == 4:
return Image.fromarray(x.astype('uint8'), 'RGBA')
elif x.shape[2] == 3:
return Image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
if np.max(x) > 255:
return Image.fromarray(x[:, :, 0].astype('int32'), 'I')
return Image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: %s' % (x.shape[2],)) | 27,694 |
def test_celery__TransactionAwareTask__delay__5(celery_session_worker, zcml):
"""It allows to run two tasks in a single session."""
auth = zope.component.getUtility(
zope.authentication.interfaces.IAuthentication)
principal = auth.getPrincipal('example.user')
z3c.celery.celery.login_principal(principal)
result1 = get_principal_title_task.delay()
zope.security.management.endInteraction()
principal = auth.getPrincipal('zope.user')
z3c.celery.celery.login_principal(principal)
result2 = get_principal_title_task.delay()
transaction.commit()
assert 'Ben Utzer' == result1.get()
assert 'User' == result2.get() | 27,695 |
def worker_process_download_tvtorrent(
tvTorUnit, client = None, maxtime_in_secs = 14400,
num_iters = 1, kill_if_fail = False ):
"""
Used by, e.g., :ref:`get_tv_batch`, to download missing episodes on the Plex_ TV library. Attempts to use the Deluge_ server, specified in :numref:`Seedhost Services Setup`, to download an episode. If successful then uploads the finished episode from the remote SSH server to the Plex_ server and local directory, specified in :numref:`Local and Remote (Seedhost) SSH Setup`.
:param dict tvTorUnit: a :py:class:`dict` representing a summarized magnet link searching operation on an episode. The format and meaning of this data structure is described in :py:meth:`create_tvTorUnits <howdy.tv.tv.create_tvTorUnits>`.
:param DelugeRPC client: optional argument, the `DelugeRPCClient <Deluge RPC client_>`_ object that at a low level uses the Deluge_ server to download the Magnet link at the remote SSH server. If ``None``, then this client is created using :py:meth:`get_deluge_client <howdy.core.core_deluge.get_deluge_client>`.
:param int maxtime_in_secs: optional argument, the maximum time to wait for a Magnet link found by the Jackett_ server to fully download through the Deluge_ server. Must be :math:`\ge 60` seconds. Default is 14400 seconds.
:param int num_iters: optional argument, the maximum number of Magnet links to try and fully download before giving up. The list of Magnet links to try for each missing episode is ordered from *most* seeders + leechers to *least*. Must be :math:`\ge 1`. Default is 1.
:param bool kill_if_fail: optional argument. If ``True``, then on failing operation kill the torrent download on the Deluge_ server and delete any files associated with it. If ``False``, then keep the torrent download on failure.
:returns: If successful, creates a two element :py:class:`tuple`: the first element is the base name of the episode that is uploaded to the Plex_ server, and the second element is a status :py:class:`dictionary <dict>` with three keys.
* the ``status`` is ``"SUCCESS"``.
* the ``message`` describes the final status of the operation.
* the ``time`` tells how long, in seconds, the successful operation took.
If unsuccessful, returns a failing tuple: the first element is ``None``, and the the second element is a status :py:class:`dictionary <dict>` with three keys.
* the ``status`` is ``"FAILURE"``.
* the ``message`` describes the illuminating reason as to how this operation failed.
* the ``time`` tells how long, in seconds, the failing operation took.
:rtype: tuple
.. seealso::
* :ref:`get_tv_batch`.
* :py:meth:`get_remaining_episodes <howdy.tv.tv.get_remaining_episodes>`.
* :py:meth:`create_tvTorUnits <howdy.tv.tv.create_tvTorUnits>`.
* :py:meth:`download_batched_tvtorrent_shows <howdy.tv.tv.download_batched_tvtorrent_shows>`.
.. _`Deluge RPC client`: https://github.com/JohnDoee/deluge-client
.. _Deluge: https://en.wikipedia.org/wiki/Deluge_(software)
"""
time0 = time.time( )
assert( maxtime_in_secs > 0 )
#
if client is None:
client, status = core_deluge.get_deluge_client( )
if client is None:
return None, _create_status_dict(
'FAILURE', 'cannot create or run a valid deluge RPC client.', time0 )
#
## now get list of torrents, choose "top" one
def _process_jackett_items( tvTorUnit, shared_list ):
t0 = time.time( )
torFileName = tvTorUnit[ 'torFname' ]
totFname = tvTorUnit[ 'totFname' ]
minSize = tvTorUnit[ 'minSize' ]
maxSize = tvTorUnit[ 'maxSize' ]
minSize_x265 = tvTorUnit[ 'minSize_x265' ]
maxSize_x265 = tvTorUnit[ 'maxSize_x265' ]
series_name = tvTorUnit[ 'tvshow' ]
mustHaveString = torFileName.split( )[ -1 ]
do_raw = tvTorUnit[ 'do_raw' ]
logging.info( 'jackett start: %s, %s, %s' % (
torFileName, mustHaveString, series_name ) )
#
## try this twice if it can
torFileNameAlt = re.sub('\(([0-9]+)\)', '', torFileName ).strip( )
torFileNames = [ torFileName, ]
if torFileNameAlt != torFileName: torFileNames.append( torFileNameAlt )
for tfn in torFileNames:
logging.info( 'processing jackett from "%s", using "%s" now, at %0.3f seconds after start.' % (
torFileName, tfn, time.time( ) - time0 ) )
data, status = get_tv_torrent_jackett(
tfn, maxnum = 100, keywords = [ 'x264', 'x265', '720p' ],
minsizes = [ minSize, minSize_x265 ],
maxsizes = [ maxSize, maxSize_x265 ],
keywords_exc = [ 'xvid' ], raw = do_raw,
must_have = [ mustHaveString ] )
if status == 'SUCCESS': break
if status != 'SUCCESS':
shared_list.append( ( 'jackett', _create_status_dict( 'FAILURE', status, t0 ), 'FAILURE' ) )
return
logging.info( 'successfully processed jackett on %s in %0.3f seconds.' % (
torFileName, time.time( ) - t0 ) )
shared_list.append( ( 'jackett', data, 'SUCCESS' ) )
#
def _process_eztv_io_items( tvTorUnit, shared_list ):
t0 = time.time( )
torFileName = tvTorUnit[ 'torFname' ]
totFname = tvTorUnit[ 'totFname' ]
minSize = tvTorUnit[ 'minSize' ]
maxSize = tvTorUnit[ 'maxSize' ]
minSize_x265 = tvTorUnit[ 'minSize_x265' ]
maxSize_x265 = tvTorUnit[ 'maxSize_x265' ]
series_name = tvTorUnit[ 'tvshow' ]
mustHaveString = torFileName.split( )[ -1 ]
logging.info( 'eztv.io start: %s' % torFileName )
#
data, status = get_tv_torrent_eztv_io(
torFileName, maxnum = 100, series_name = series_name,
minsizes = [ minSize, minSize_x265],
maxsizes = [ maxSize, maxSize_x265] )
if status != 'SUCCESS':
shared_list.append(
( 'eztv.io', _create_status_dict( 'FAILURE', status, time0 ), 'FAILURE' ) )
return
data_filt = list(filter(
lambda elem: any(map(lambda tok: tok in elem['title'].lower( ),
( 'x264', 'x265', '720p' ) ) ) and
'xvid' not in elem['title'].lower( ), data ) )
if len( data_filt ) == 0:
shared_list.append(
( 'eztv.io', _create_status_dict(
'FAILURE', 'ERROR, COULD NOT FIND %s IN EZTV.IO.' % torFileName, t0 ), 'FAILURE' ) )
return
logging.info( 'successfully processed eztv.io on %s in %0.3f seconds.' % (
torFileName, time.time( ) - t0 ) )
shared_list.append( ( 'eztv.io', data_filt, 'SUCCESS' ) )
#
def _process_zooqle_items( tvTorUnit, shared_list ):
t0 = time.time( )
torFileName = tvTorUnit[ 'torFname' ]
totFname = tvTorUnit[ 'totFname' ]
minSize = tvTorUnit[ 'minSize' ]
maxSize = tvTorUnit[ 'maxSize' ]
minSize_x265 = tvTorUnit[ 'minSize_x265' ]
maxSize_x265 = tvTorUnit[ 'maxSize_x265' ]
series_name = tvTorUnit[ 'tvshow' ]
mustHaveString = torFileName.split( )[ -1 ]
logging.info( 'zooqle start: %s' % torFileName )
#
data, status = get_tv_torrent_zooqle( torFileName, maxnum = 100 )
if status != 'SUCCESS':
shared_list.append( ( 'zooqle', _create_status_dict( 'FAILURE', status, t0 ), 'FAILURE' ) )
return
data_filt = list(filter(
lambda elem: any(map(lambda tok: tok in elem['title'].lower( ),
( 'x264', 'x265', '720p' ) ) ) and
'xvid' not in elem['title'].lower( ) and
elem['torrent_size'] >= minSize*1e6 and
elem['torrent_size'] <= maxSize*1e6, data ) )
if len( data_filt ) == 0:
shared_list.append(
( 'zooqle', _create_status_dict(
'FAILURE', 'ERROR, COULD NOT FIND %s IN ZOOQLE.' % torFileName, t0 ), 'FAILURE' ) )
logging.info( 'successfully processed zooqle on %s in %0.3f seconds.' % (
torFileName, time.time( ) - t0 ) )
shared_list.append( ( 'zooqle', data_filt, 'SUCCESS' ) )
m = Manager( )
shared_list = m.list( )
jobs = [ ]
for targ in ( _process_jackett_items, _process_eztv_io_items, _process_zooqle_items ):
job = Process( target = targ, args = ( tvTorUnit, shared_list ) )
job.daemon = False
jobs.append( job )
job.start( )
for job in jobs: job.join( )
for job in jobs: job.close( )
#shared_list = list(map(
# lambda proc: proc( tvTorUnit ),
# ( _process_jackett_items, _process_eztv_io_items, _process_zooqle_items ) ) )
error_tup = list(map(
lambda dat: ( dat[0], dat[1] ), filter(lambda dat: dat[-1] == 'FAILURE', shared_list ) ) )
data = list( chain.from_iterable( map(lambda dat: dat[1],
filter(lambda dat: dat[-1] == 'SUCCESS', shared_list ) ) ) )
#
## status of downloaded elements
torFileName = tvTorUnit[ 'torFname' ]
totFname = tvTorUnit[ 'totFname' ]
if len( data ) == 0:
return None, dict( error_tup )
print( 'got %d candidates for %s in %0.3f seconds.' % (
len(data), torFileName, time.time( ) - time0 ) )
#
## wrapped away in another method
return _worker_process_tvtorrents(
client, data, torFileName, totFname,
maxtime_in_secs, num_iters, kill_if_fail ) | 27,696 |
def find_fixture(
gameweek,
team,
was_home=None,
other_team=None,
kickoff_time=None,
season=CURRENT_SEASON,
dbsession=session,
):
"""Get a fixture given a gameweek, team and optionally whether
the team was at home or away, the kickoff time and the other team in the
fixture.
"""
fixture = None
if not isinstance(team, str):
team_name = get_team_name(team, season=season, dbsession=dbsession)
else:
team_name = team
if not team_name:
raise ValueError("No team with id {} in {} season".format(team, season))
if other_team and not isinstance(other_team, str):
other_team_name = get_team_name(other_team, season=season, dbsession=dbsession)
else:
other_team_name = other_team
query = (
dbsession.query(Fixture).filter_by(gameweek=gameweek).filter_by(season=season)
)
if was_home is True:
query = query.filter_by(home_team=team_name)
elif was_home is False:
query = query.filter_by(away_team=team_name)
elif was_home is None:
query = query.filter(
or_(Fixture.away_team == team_name, Fixture.home_team == team_name)
)
else:
raise ValueError("was_home must be True, False or None")
if other_team_name:
if was_home is True:
query = query.filter_by(away_team=other_team_name)
elif was_home is False:
query = query.filter_by(home_team=other_team_name)
elif was_home is None:
query = query.filter(
or_(
Fixture.away_team == other_team_name,
Fixture.home_team == other_team_name,
)
)
fixtures = query.all()
if not fixtures or len(fixtures) == 0:
raise ValueError(
"No fixture with season={}, gw={}, team_name={}, was_home={}, other_team_name={}".format(
season, gameweek, team_name, was_home, other_team_name
)
)
if len(fixtures) == 1:
fixture = fixtures[0]
elif kickoff_time:
# team played multiple games in the gameweek, determine the
# fixture of interest using the kickoff time,
kickoff_date = dateparser.parse(kickoff_time)
kickoff_date = kickoff_date.replace(tzinfo=timezone.utc)
kickoff_date = kickoff_date.date()
for f in fixtures:
f_date = dateparser.parse(f.date)
f_date = f_date.replace(tzinfo=timezone.utc)
f_date = f_date.date()
if f_date == kickoff_date:
fixture = f
break
if not fixture:
raise ValueError(
"No unique fixture with season={}, gw={}, team_name={}, was_home={}, kickoff_time={}".format(
season, gameweek, team_name, was_home, kickoff_time
)
)
return fixture | 27,697 |
def restart_dhcp():
"""Uses systemctl to restart isc-dhcpd server.
Note: it is not (yet) in scope to dynamically update the '/etc/dhcp/dhcpd.conf' file."""
command_restart = [
"sudo",
"--non-interactive",
"-E",
"systemctl",
"restart",
"isc-dhcp-server",
]
command_status = [
"systemctl",
"show",
"isc-dhcp-server",
"--no-page",
]
try:
subprocess.check_call(command_restart)
output_str_lines = (
subprocess.check_output(command_status).decode().splitlines(keepends=False)
)
output = {}
for line in output_str_lines:
items = line.split("=")
output.update({items[0]: items[1]})
if output["ActiveState"] != "active":
raise ValueError("Failed to get DHCP server running")
except subprocess.CalledProcessError as exc:
log.error("Could not restart isc-dhcp-server")
raise exc | 27,698 |
async def jail(ctx, member: discord.Member, duration: str, *, reason=""):
"""Jails the member given for the duration given (in the familiar M0DBOT format). Optionally, add a reason which goes in #police_reports.
One interesting thing is that consecutive jails override each other, allowing you to extend sentences."""
if member.top_role >= ctx.author.top_role:
return
await member.add_roles(jail)
await member.remove_roles(memer)
durat = parse_duration(duration)
if durat is not None:
add_punishment("unjail", member, parse_duration(duration))
await ctx.send(jail_response)
await strike(member, f"{member.name}(`{member.id}`) was jailed for {duration} by: {ctx.author.name}(`{ctx.author.id}`) because: {reason}") | 27,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.