content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def convert_operand_kind(operand_tuple):
"""Returns the corresponding operand type used in spirv-tools for the given
operand kind and quantifier used in the JSON grammar.
Arguments:
- operand_tuple: a tuple of two elements:
- operand kind: used in the JSON grammar
- quantifier: '', '?', or '*'
Returns:
a string of the enumerant name in spv_operand_type_t
"""
kind, quantifier = operand_tuple
# The following cases are where we differ between the JSON grammar and
# spirv-tools.
if kind == 'IdResultType':
kind = 'TypeId'
elif kind == 'IdResult':
kind = 'ResultId'
elif kind == 'IdMemorySemantics' or kind == 'MemorySemantics':
kind = 'MemorySemanticsId'
elif kind == 'IdScope' or kind == 'Scope':
kind = 'ScopeId'
elif kind == 'IdRef':
kind = 'Id'
elif kind == 'ImageOperands':
kind = 'Image'
elif kind == 'Dim':
kind = 'Dimensionality'
elif kind == 'ImageFormat':
kind = 'SamplerImageFormat'
elif kind == 'KernelEnqueueFlags':
kind = 'KernelEnqFlags'
elif kind == 'LiteralExtInstInteger':
kind = 'ExtensionInstructionNumber'
elif kind == 'LiteralSpecConstantOpInteger':
kind = 'SpecConstantOpNumber'
elif kind == 'LiteralContextDependentNumber':
kind = 'TypedLiteralNumber'
elif kind == 'PairLiteralIntegerIdRef':
kind = 'LiteralIntegerId'
elif kind == 'PairIdRefLiteralInteger':
kind = 'IdLiteralInteger'
elif kind == 'PairIdRefIdRef': # Used by OpPhi in the grammar
kind = 'Id'
if kind == 'FPRoundingMode':
kind = 'FpRoundingMode'
elif kind == 'FPFastMathMode':
kind = 'FpFastMathMode'
if quantifier == '?':
kind = 'Optional{}'.format(kind)
elif quantifier == '*':
kind = 'Variable{}'.format(kind)
return 'SPV_OPERAND_TYPE_{}'.format(
re.sub(r'([a-z])([A-Z])', r'\1_\2', kind).upper())
| 10,100
|
def setNumIterations(numIter):
"""
Sets the number of times an iterative constraint solver is repeated.
Increasing the number of iterations improves the constraint solver at the cost of performances & the speed of the game engine.
@param numIter: The number of timesubsteps. (Input 0 to suspend simulation numSubStep)
@type numIter: integer
"""
| 10,101
|
def screen_missing_data(database,subject,begin=None,end=None):
""" Returns a DataFrame contanining the percentage (range [0,1]) of loss data
calculated based on the transitions of screen status. In general, if
screen_status(t) == screen_status(t+1), we declared we have at least one
missing point.
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
count: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"usr not given in string format"
screen = database.raw(table='AwareScreen', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = screen.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = screen.iloc[len(screen)-1]['datetime']
screen=screen.drop_duplicates(subset=['datetime'],keep='first')
screen = screen.drop(['device','user','time'],axis=1)
screen=screen.loc[begin:end]
screen['screen_status']=pd.to_numeric(screen['screen_status'])
#Include the missing points that are due to shutting down the phone
shutdown = shutdown_info(database,subject,begin,end)
shutdown=shutdown.rename(columns={'battery_status':'screen_status'})
shutdown['screen_status']=0
screen = screen.merge(shutdown, how='outer', left_index=True, right_index=True)
screen['screen_status'] = screen.fillna(0)['screen_status_x'] + screen.fillna(0)['screen_status_y']
screen = screen.drop(['screen_status_x','screen_status_y'],axis=1)
dates=screen.datetime_x.combine_first(screen.datetime_y)
screen['datetime']=dates
screen = screen.drop(['datetime_x','datetime_y'],axis=1)
#Detect missing data points
screen['missing']=0
screen['next']=screen['screen_status'].shift(-1)
screen['dummy']=screen['screen_status']-screen['next']
screen['missing'] = np.where(screen['dummy']==0, 1, 0)
screen['missing'] = screen['missing'].shift(1)
screen = screen.drop(['dummy','next'], axis=1)
screen = screen.fillna(0)
screen['datetime'] = screen['datetime'].apply( lambda screen : datetime.datetime(year=screen.year, month=screen.month, day=screen.day))
screen = screen.drop(['screen_status'], axis=1)
count=pd.pivot_table(screen,values='missing',index='datetime', aggfunc='count')
count = screen.groupby(['datetime','missing'])['missing'].count().unstack(fill_value=0)
count['missing'] = count[1.0]/(count[0.0]+count[1.0])
count = count.drop([0.0,1.0], axis=1)
if (pd.Timestamp.tzname(count.index[0]) != 'EET'):
if pd.Timestamp.tzname(count.index[0]) != 'EEST':
count.index = pd.to_datetime(count.index).tz_localize('Europe/Helsinki')
return count
| 10,102
|
def check(text):
"""Check the text."""
error_code = "example.first"
msg = "First line always has an error."
reverse(text)
return [(1, 1, error_code, msg)]
| 10,103
|
def figure(**kwargs):
"""
Create a new figure with the given settings.
Settings like the current colormap, title or axis limits as stored in the
current figure. This function creates a new figure, restores the default
settings and applies any settings passed to the function as keyword
arguments.
**Usage examples:**
>>> # Restore all default settings
>>> mlab.figure()
>>> # Restore all default settings and set the title
>>> mlab.figure(title="Example Figure")
"""
global _plt
_plt = _Figure()
_plt.kwargs.update(kwargs)
return _plt
| 10,104
|
async def test_get_event_format_no_authorization(
client: _TestClient, mocker: MockFixture, event_format_interval_start: dict
) -> None:
"""Should return 401 Unauthorized."""
EVENT_ID = "event_id_1"
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.get_event_format",
return_value=event_format_interval_start,
)
with aioresponses(passthrough=["http://127.0.0.1"]) as m:
m.post("http://example.com:8081/authorize", status=401)
resp = await client.get(f"/events/{EVENT_ID}/format")
assert resp.status == 401
| 10,105
|
def parse(content: str, target: str = "all") -> List[Inline]:
"""Parses an HTML document and extracts."""
soup = BeautifulSoup(content, "html.parser")
if target == "all":
search_queries = chain(*_VALID_TARGETS.values())
elif target in _VALID_TARGETS.keys():
search_queries = chain(_VALID_TARGETS[target])
else:
raise ValueError("Invalid Target")
elements = []
for q in search_queries:
for tag in soup.find_all(q.search_function):
if q.attr_name:
inline = Inline(tag[q.attr_name], tag.sourceline, tag.sourcepos)
else:
if not tag.contents:
continue
inline = Inline(tag.contents[0], tag.sourceline, tag.sourcepos)
elements.append(inline)
return elements
| 10,106
|
def array_ravel(arr):
"""Flatten a C/F array into a 1D array without enforcing the ordering of
the each element.
Args
----
arr: array
Returns
-------
A flattened 1D array
"""
raise NotImplementedError
| 10,107
|
def create_output(verified_specific_headers_list:list) -> str:
""" Design Output """
if args.verbose is True:
print("[!] INFO: Outputting Specific Header Information")
return_output = ""
for specific_header in verified_specific_headers_list:
split_header = specific_header.split(":")
if split_header[1] != "":
return_output += f"{split_header[0]:<25} is declared -> DATA:{split_header[1]:30}\n"
else:
return_output += f"{split_header[0]:<25} is NOT declared -> NO DATA\n"
return return_output
| 10,108
|
def registered_response_data():
"""Body (bytes) of the registered response."""
return b"response data"
| 10,109
|
def generate_pages(prefix='', **kwargs):
"""
This function creates a paginator and yields one page at a time.
:param prefix: the prefix (starting under the bucket) of the key name
:return: one page of contents
"""
bucket = kwargs.get('bucket', BUCKET_DEFAULT)
check_arg_bucket(bucket)
s3_client = get_client()
paginator = s3_client.get_paginator('list_objects')
parameters = {'Bucket': bucket, 'Prefix': prefix, 'Delimiter': ''}
p_iterator = paginator.paginate(**parameters)
for obj in p_iterator.search('Contents'):
if obj:
key_name = obj.get('Key', '')
if key_name.endswith("/"):
LOGGER.info("- skipping key: %s", key_name)
continue
yield obj
| 10,110
|
def parse_log(log_file):
"""
Parses a log file into a list of lists containing the messages logged
:param log_file: path-like: Path to the log file
:return: list of lists containing messages in the log file
"""
parsed_logs = [[] for i in range(5)]
with open(log_file, 'r') as f:
for line in f.readlines():
parts = line.split(':')
for i in range(0, len(parts)):
parts[i] = parts[i].strip()
if parts[0] == LogLevel.ERROR:
parsed_logs[0].append(":".join(parts[1:]))
elif parts[0] == LogLevel.WARNING:
parsed_logs[1].append(":".join(parts[1:]))
elif parts[0] == LogLevel.INFO:
parsed_logs[2].append(":".join(parts[1:]))
elif parts[0] == LogLevel.STARTUP:
parsed_logs[3].append(":".join(parts[1:]))
else:
parsed_logs[3].append(line)
return parsed_logs
| 10,111
|
def print_build_cmds(cliargs, params):
"""Generate bash-code with `docker build` commands."""
print("""set -e
set echo off
echo "minikan dockerization - Start"
""")
if "builder" in cliargs:
print(_build_builder(params))
if "all" in cliargs or "base" in cliargs:
print(_build_base(params))
if "all" in cliargs or "zk" in cliargs:
print(_build_zk(params))
if "all" in cliargs or "kafka" in cliargs:
print(_build_kafka(params))
print("""echo "minikan dockerization - Done"
echo
echo Review produced images:
docker images kafkanetes/minikan-builder| head
docker images kafkanetes/minikan-base | head
docker images kafkanetes/minikan-zk | head
docker images kafkanetes/minikan-kafka | head
echo
echo Optionally, inspect image content by running:
echo docker run -it kafkanetes/minikan-zk:{kan_zk_version} bash
echo docker run -it kafkanetes/minikan-kafka:{kan_kafka_version} bash
""".format(**params))
# Optionally, upload images
if "--push" in cliargs:
print("""set echo off
echo "Upload minikan docker images"
docker push kafkanetes/minikan-builder:{minikan_version}
docker push kafkanetes/minikan-base:{minikan_version}
docker push kafkanetes/minikan-zk:{kan_zk_version}
docker push kafkanetes/minikan-kafka:{kan_kafka_version}
""".format(**params))
| 10,112
|
def print_mask_info(data):
"""
Print mask info, including locations of not null masks.
Args:
data (Xarray dataset): File containing masks.
Returns:
Printed statements of total number of times (3-hourly for CESM CAM files)
containing mcss.
"""
print("number of masks with an MCS:", np.argwhere(data.binary_tag.sum(axis=1).sum(axis=1).values!=0).shape[0])
print("number of masks without an MCS:", np.argwhere(data.binary_tag.sum(axis=1).sum(axis=1).values==0).shape[0])
print(np.argwhere(data.binary_tag.sum(axis=1).sum(axis=1).values!=0).reshape(-1))
return
| 10,113
|
def generate_experiment_fn(train_files,
eval_files,
num_epochs=None,
train_batch_size=40,
eval_batch_size=40,
embedding_size=8,
first_layer_size=100,
num_layers=4,
scale_factor=0.7,
**experiment_args):
"""Create an experiment function given hyperparameters.
See command line help text for description of args.
Returns:
A function (output_dir) -> Experiment where output_dir is a string
representing the location of summaries, checkpoints, and exports.
this function is used by learn_runner to create an Experiment which
executes model code provided in the form of an Estimator and
input functions.
All listed arguments in the outer function are used to create an
Estimator, and input functions (training, evaluation, serving).
Unlisted args are passed through to Experiment.
"""
# Check verbose logging flag
verbose_logging = experiment_args.pop('verbose_logging')
model.set_verbose_logging(verbose_logging)
def _experiment_fn(output_dir):
# num_epochs can control duration if train_steps isn't
# passed to Experiment
train_input = model.generate_input_fn(
train_files,
num_epochs=num_epochs,
batch_size=train_batch_size,
)
# Don't shuffle evaluation data
eval_input = model.generate_input_fn(
eval_files,
batch_size=eval_batch_size,
shuffle=False
)
return tf.contrib.learn.Experiment(
model.build_estimator(
output_dir,
embedding_size=embedding_size,
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(first_layer_size * scale_factor**i))
for i in range(num_layers)
]
),
train_input_fn=train_input,
eval_input_fn=eval_input,
# export strategies control the prediction graph structure
# of exported binaries.
export_strategies=[saved_model_export_utils.make_export_strategy(
model.serving_input_fn,
default_output_alternative_key=None,
exports_to_keep=1
)],
**experiment_args
)
return _experiment_fn
| 10,114
|
def get_inclination_and_azimuth_from_locations(self, locations):
"""
self must to point to Main_InputWindow
"""
"""
Return "Inc" and "Azi" array objects in reference units.
"""
Inc = []
Azi = []
for MD in locations:
tangentVector = get_ASCT_from_MD(self, MD)
verticalVector = np.array([0.0,0.0,1.0,0.0])
if np.allclose( tangentVector, verticalVector, atol=1e-2, rtol=0.0 ):
tangentVector = verticalVector
inc = np.arccos( tangentVector[2] )
if inc==0.0:
azi = 0.0
else:
sinazi = tangentVector[0]/np.sin(inc)
cosazi = tangentVector[1]/np.sin(inc)
if sinazi>=0:
azi = np.arccos( cosazi )
elif sinazi<0:
azi = 2*np.pi-np.arccos( cosazi )
Inc.append(inc)
Azi.append(azi)
return np.array(Inc), np.array(Azi)
| 10,115
|
def _ensure_package(base, *parts):
"""Ensure that all the components of a module directory path exist, and
contain a file __init__.py."""
bits = []
for bit in parts[:-1]:
bits.append(bit)
base.ensure(*(bits + ['__init__.py']))
return base.ensure(*parts)
| 10,116
|
def make_cat_matrix(n_rows: int, n_cats: int) -> tm.CategoricalMatrix:
"""Make categorical matrix for benchmarks."""
mat = tm.CategoricalMatrix(np.random.choice(np.arange(n_cats, dtype=int), n_rows))
return mat
| 10,117
|
def add_stabilizer_nodes(boundaries_raw, electrodes, nr_nodes_between):
"""
Segmentation of nodes:
we have the existing nodes
N.F is the ratio of required nodes and existing nodes
first, add N nodes to each segment
then, add one more node to the F first segments
* assume ordered boundaries
"""
boundaries = []
boundaries = boundaries_raw
# find first electrode in boundary
for nr in range(electrodes.shape[0] - 1):
index0 = np.where(
(boundaries[:, 0] == electrodes[nr, 0]) &
(boundaries[:, 1] == electrodes[nr, 1])
)[0]
index1 = np.where(
(boundaries[:, 0] == electrodes[nr + 1, 0]) &
(boundaries[:, 1] == electrodes[nr + 1, 1])
)[0]
index0 = index0[0]
index1 = index1[0]
if index1 - index0 < 0:
index0, index1 = index1, index0
running_index = index0
nr_nodes = index1 - index0 - 1
while nr_nodes < nr_nodes_between:
# determine line equation
xy0 = boundaries[running_index, 0:2]
xy1 = boundaries[running_index + 1, 0:2]
direction = xy1 - xy0
heading = direction / np.sqrt(np.sum(direction ** 2))
# new node
xy_new = xy0 + heading * direction / 2.0
a = boundaries[running_index, 2][np.newaxis]
xyb = np.hstack((xy_new, a))
boundaries = np.insert(boundaries, running_index + 1, xyb, axis=0)
# 2, because we have to count the new one
running_index += 2
index1 += 1
nr_nodes += 1
if running_index == index1:
running_index = index0
return boundaries
| 10,118
|
def test_find_by_username(session, client, jwt):
"""Assert that user find by username is working as expected."""
user = User.find_by_username(TEST_TOKEN['username'])
if not user:
User.create_from_jwt_token(TEST_TOKEN, 'PS12345')
user = User.find_by_username(TEST_TOKEN['username'])
assert user
assert user.id
assert user.username == 'username_TEST1'
assert user.iss == 'issuer_TEST1'
assert user.sub == 'subject_TEST1'
assert user.firstname == 'given_name_TEST1'
assert user.lastname == 'family_name_TEST1'
| 10,119
|
def adjust_learning_rate(optimizer, lr_init, epoch):
"""decrease the learning rate at 160 and 180 epoch ( from LDAM-DRW, NeurIPS19 )"""
lr = lr_init
if epoch < 5:
lr = (epoch + 1) * lr_init / 5
else:
if epoch >= 160:
lr /= 100
if epoch >= 180:
lr /= 100
for param_group in optimizer.param_groups:
param_group['lr'] = lr
| 10,120
|
def read_data(creds):
"""Read court tracking data in and drop duplicate case numbers"""
# try:
df = gsheet.read_data(gsheet.open_sheet(gsheet.init_sheets(creds),"01_Community_lawyer_test_out_final","Frontend"))
# df.drop_duplicates("Case Number",inplace=True) #Do we want to drop duplicates???
return df
| 10,121
|
def _build_indie_lyrics(
root: str, num_workers: int = 8, max_size: int = 200000
) -> DocumentArray:
"""
Builds the indie lyrics dataset. Download the CSV files from:
https://www.kaggle.com/datasets/neisse/scrapped-lyrics-from-6-genres
:param root: the dataset root folder.
:param num_workers: the number of parallel workers to use.
:param max_size: used to randomly subsample from dataset if greater than 0
:return: DocumentArray
"""
return _build_lyrics(
genre='Indie',
root=root.replace('indie-lyrics', 'lyrics'),
num_workers=num_workers,
max_size=max_size,
)
| 10,122
|
def getHostname(request):
"""
Utility method for getting hostname of client.
"""
if request.getClientIP() in LOOPBACK_ADDRESSES and has_headers(request, X_FORWARDED_FOR):
# nginx typically returns ip addresses
addr = get_headers(request, X_FORWARDED_FOR)
if isIPAddress(addr):
# we really shouldn't do such blocking calls in twisted,
# but the twisted dns interface is rather terrible and
# odd things happen when using it
# Set timeout to 1 second to limit the possible damage
try:
socket.setdefaulttimeout(1)
info = socket.gethostbyaddr(addr)
return info[0]
except socket.error, msg:
log.msg("Error performing reverse lookup: %s" % msg)
return addr
else:
addr
else:
hostname = request.getClient()
if hostname is None:
hostname = request.getClientIP()
return hostname
| 10,123
|
def CollapseSolutionPosition(x,x0):
"""
Calculate a free-fall collapse solution
x - position to calculate time at in cm
x0 - initial position in cm
Sam Geen, March 2018
"""
X = x/x0
t = (np.arccos(np.sqrt(X)) + np.sqrt(X * (1.0-X))) * x0**1.5 / np.sqrt(2.0*units.G*gravity.centralmass)
return t
| 10,124
|
def main():
"""RUN DECODING."""
parser = argparse.ArgumentParser()
# decode setting
parser.add_argument("--feats", required=True,
type=str, help="list or directory of aux feat files")
parser.add_argument("--checkpoint", required=True,
type=str, help="model file")
parser.add_argument("--outdir", required=True,
type=str, help="directory to save generated samples")
parser.add_argument("--stats", default=None,
type=str, help="hdf5 file including statistics")
parser.add_argument("--config", default=None,
type=str, help="configure file")
parser.add_argument("--fs", default=16000,
type=int, help="sampling rate")
parser.add_argument("--batch_size", default=32,
type=int, help="number of batch size in decoding")
parser.add_argument("--n_gpus", default=1,
type=int, help="number of gpus")
# other setting
parser.add_argument("--intervals", default=1000,
type=int, help="log interval")
parser.add_argument("--seed", default=1,
type=int, help="seed number")
parser.add_argument("--verbose", default=1,
type=int, help="log level")
args = parser.parse_args()
# set log level
if args.verbose > 0:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
else:
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
logging.warning("logging is disabled.")
# show arguments
for key, value in vars(args).items():
logging.info("%s = %s" % (key, str(value)))
# check arguments
if args.stats is None:
args.stats = os.path.dirname(args.checkpoint) + "/stats.h5"
if args.config is None:
args.config = os.path.dirname(args.checkpoint) + "/model.conf"
if not os.path.exists(args.stats):
raise FileNotFoundError("statistics file is missing (%s)." % (args.stats))
if not os.path.exists(args.config):
raise FileNotFoundError("config file is missing (%s)." % (args.config))
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# fix seed
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# fix slow computation of dilated conv
# https://github.com/pytorch/pytorch/issues/15054#issuecomment-450191923
torch.backends.cudnn.benchmark = True
# load config
config = torch.load(args.config)
# get file list
if os.path.isdir(args.feats):
feat_list = sorted(find_files(args.feats, "*.h5"))
elif os.path.isfile(args.feats):
feat_list = read_txt(args.feats)
else:
logging.error("--feats should be directory or list.")
sys.exit(1)
# prepare the file list for parallel decoding
feat_lists = np.array_split(feat_list, args.n_gpus)
feat_lists = [f_list.tolist() for f_list in feat_lists]
# define transform
scaler = StandardScaler()
scaler.mean_ = read_hdf5(args.stats, "/" + config.feature_type + "/mean")
scaler.scale_ = read_hdf5(args.stats, "/" + config.feature_type + "/scale")
wav_transform = transforms.Compose([
lambda x: encode_mu_law(x, config.n_quantize)])
feat_transform = transforms.Compose([
lambda x: scaler.transform(x)])
# define gpu decode function
def gpu_decode(feat_list, gpu):
# set default gpu and do not track gradient
torch.cuda.set_device(gpu)
torch.set_grad_enabled(False)
# define model and load parameters
if config.use_upsampling_layer:
upsampling_factor = config.upsampling_factor
else:
upsampling_factor = 0
model = WaveNet(
n_quantize=config.n_quantize,
n_aux=config.n_aux,
n_resch=config.n_resch,
n_skipch=config.n_skipch,
dilation_depth=config.dilation_depth,
dilation_repeat=config.dilation_repeat,
kernel_size=config.kernel_size,
upsampling_factor=upsampling_factor)
model.load_state_dict(torch.load(
args.checkpoint,
map_location=lambda storage,
loc: storage)["model"])
model.eval()
model.cuda()
# define generator
generator = decode_generator(
feat_list,
batch_size=args.batch_size,
feature_type=config.feature_type,
wav_transform=wav_transform,
feat_transform=feat_transform,
upsampling_factor=config.upsampling_factor,
use_upsampling_layer=config.use_upsampling_layer,
use_speaker_code=config.use_speaker_code)
# decode
if args.batch_size > 1:
for feat_ids, (batch_x, batch_h, n_samples_list) in generator:
logging.info("decoding start")
samples_list = model.batch_fast_generate(
batch_x, batch_h, n_samples_list, args.intervals)
for feat_id, samples in zip(feat_ids, samples_list):
wav = decode_mu_law(samples, config.n_quantize)
sf.write(args.outdir + "/" + feat_id + ".wav", wav, args.fs, "PCM_16")
logging.info("wrote %s.wav in %s." % (feat_id, args.outdir))
else:
for feat_id, (x, h, n_samples) in generator:
logging.info("decoding %s (length = %d)" % (feat_id, n_samples))
samples = model.fast_generate(x, h, n_samples, args.intervals)
wav = decode_mu_law(samples, config.n_quantize)
sf.write(args.outdir + "/" + feat_id + ".wav", wav, args.fs, "PCM_16")
logging.info("wrote %s.wav in %s." % (feat_id, args.outdir))
# parallel decode
processes = []
for gpu, feat_list in enumerate(feat_lists):
p = mp.Process(target=gpu_decode, args=(feat_list, gpu,))
p.start()
processes.append(p)
# wait for all process
for p in processes:
p.join()
| 10,125
|
def scaleSpectralSky_cor(subframe, badpixelmask=None, maxshift=20, fitwidth=2, pord=1, nmed=3, dispaxis=0, spatial_index=None, refpix=None, tord=2):
"""
Use cross-correlation to subtract tilted sky backgrounds.
subframe : NumPy array
data subframe containing sky data to be subtracted (and,
perhaps, an object's spectral trace).
badpixelmask : None or NumPy array
A boolean array, equal to zero for good pixels and unity for bad
pixels. If this is set, the first step will be a call to
:func:`nsdata.bfixpix` to interpolate over these values.
nmed : int
size of 2D median filter for pre-smoothing.
pord : int
degree of spectral tilt. Keep this number low!
maxshift : int
Maximum acceptable shift. NOT YET IMPLEMENTED!
fitwidth : int
Maximum radius (in pixels) for fitting to the peak of the
cross-correlation.
nmed : int
Size of window for 2D median filter (to reject bad pixels, etc.)
dispaxis : int
set dispersion axis: 0 = horizontal and 1 = vertical
spatial_index : None, or 1D NumPy array of type *bool*
Which spatial rows (if dispaxis=0) to use when fitting the tilt
of sky lines across the spectrum. If you want to use all, set
to None. If you want to ignore some (e.g., because there's a
bright object's spectrum there) then set those rows' elements
of spatial_index to 'False'.
refpix : scalar
Pixel along spatial axis to which spectral fits should be
aligned; if a spectral trace is present, one should set
"refpix" to the location of the trace.
tord : int
Order of polynomial fits along spatial direction in aligned
2D-spectral frame, to account for misalignments or
irregularities of tilt direction.
:RETURNS:
a model of the sky background, of the same shape as 'subframe.'
"""
# 2012-09-22 17:04 IJMC: Created
# 2012-12-27 09:53 IJMC: Edited to better account for sharp edges
# in backgrounds.
from scipy import signal
from nsdata import bfixpix
# Parse inputs
if not isinstance(subframe, np.ndarray):
subframe = pyfits.getdata(subframe)
if badpixelmask is None:
pass
else:
badpixelmask = np.array(badpixelmask).astype(bool)
subframe = bfixpix(subframe, badpixelmask, retdat=True)
if dispaxis==1:
subframe = subframe.transpose()
# Define necessary variables and vectors:
npix, nlam = subframe.shape
if spatial_index is None:
spatial_index = np.ones(npix, dtype=bool)
else:
spatial_index = np.array(spatial_index, copy=False)
if refpix is None:
refpix = npix/2.
lampix = np.arange(nlam)
tpix = np.arange(npix)
alllags = np.arange(nlam-maxshift*2) - np.floor(nlam/2 - maxshift)
# Median-filter the input data:
if nmed > 1:
ssub = signal.medfilt2d(subframe, nmed)
else:
ssub = subframe.copy()
ref = np.median(ssub, axis=0)
#allcor = np.zeros((npix, nlam-maxshift*2))
shift = np.zeros(npix, dtype=float)
for ii in tpix:
# Cross-correlate to measure alignment at each row:
cor = np.correlate(ref[maxshift:-maxshift], signal.medfilt(ssub[ii], nmed)[maxshift:-maxshift], mode='same')
# Measure offset of each row:
maxind = alllags[(cor==cor.max())].mean()
fitind = np.abs(alllags - maxind) <= fitwidth
quadfit = np.polyfit(alllags[fitind], cor[fitind], 2)
shift[ii] = -0.5 * quadfit[1] / quadfit[0]
shift_polyfit = an.polyfitr(tpix[spatial_index], shift[spatial_index], pord, 3) #, w=weights)
refpos = np.polyval(shift_polyfit, refpix)
#pdb.set_trace()
fitshift = np.polyval(shift_polyfit, tpix) - refpos
# Interpolate each row to a common frame to create an improved reference:
newssub = np.zeros((npix, nlam))
for ii in tpix:
newssub[ii] = np.interp(lampix, lampix+fitshift[ii], ssub[ii])
#pdb.set_trace()
newref = np.median(newssub[spatial_index,:], axis=0)
tfits = np.zeros((nlam, tord+1), dtype=float)
newssub2 = np.zeros((npix, nlam))
for jj in range(nlam):
tfits[jj] = an.polyfitr(tpix, newssub[:,jj], tord, 3)
newssub2[:, jj] = np.polyval(tfits[jj], tpix)
# Create the final model of the sky background:
skymodel = np.zeros((npix, nlam), dtype=float)
shiftmodel = np.zeros((npix, nlam), dtype=float)
for ii in tpix:
#skymodel[ii] = np.interp(lampix, lampix-fitshift[ii], newref)
skymodel[ii] = np.interp(lampix, lampix-fitshift[ii], newssub2[ii])
shiftmodel[ii] = np.interp(lampix, lampix+fitshift[ii], ssub[ii])
#pdb.set_trace()
if dispaxis==1:
skymodel = skymodel.transpose()
return skymodel, shiftmodel, newssub, newssub2
| 10,126
|
def login():
"""
Implements the login feature for the app.
Errors are shown if incorrect details are used. If the user tried
to access a page requiring login without being authenticated,
they are redirected there after sign in.
"""
if current_user.is_authenticated:
return redirect(url_for("auth.index"))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(
username=form.username.data
).first() # None if invalid
if user is None or not user.check_password(form.password.data):
flash("Invalid username or password")
return redirect(url_for("auth.login"))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get("next")
"""To prevent malicious users from adding a malicious site into the parameters,
this checks to see if the url is relative.
"""
if not next_page or url_parse(next_page).netloc != "" or next_page == "/logout":
next_page = url_for("auth.index")
return redirect(next_page)
return render_template("login.html", form=form)
| 10,127
|
def _get_plot_axes(grid):
"""Find which axes are being plotted.
Parameters
----------
grid : Grid
Returns
-------
tuple
"""
plot_axes = [0, 1, 2]
if np.unique(grid.nodes[:, 0]).size == 1:
plot_axes.remove(0)
if np.unique(grid.nodes[:, 1]).size == 1:
plot_axes.remove(1)
if np.unique(grid.nodes[:, 2]).size == 1:
plot_axes.remove(2)
return tuple(plot_axes)
| 10,128
|
def make_parser(inheritable=False):
"""Make parser.
Parameters
----------
inheritable: bool
whether the parser can be inherited from (default False).
if True, sets ``add_help=False`` and ``conflict_hander='resolve'``
Returns
-------
parser: ArgumentParser
"""
parser = argparse.ArgumentParser(
description="get_globular_clusters",
add_help=~inheritable,
conflict_handler="resolve" if ~inheritable else "error",
)
parser.add_argument(
"output_dir",
type=str,
# default="../../data",
help="The data directory",
)
# parser.add_argument(
# "--data_dir",
# type=str,
# default="data",
# help="The input data directory",
# )
return parser
| 10,129
|
def _read_uint(addr):
""" Read a uint """
value = gdb.parse_and_eval("*(unsigned int*)0x%x" % addr)
try:
if value is not None:
return _cast_uint(value)
except gdb.MemoryError:
pass
print("Can't read 0x%x to lookup KASLR uint value" % addr)
return None
| 10,130
|
def line_at_infinity(n):
"""the line at infinity just contains the points at infinity"""
return points_at_infinity(n)
| 10,131
|
def check_load(work, varname, warning=lambda x: print(x)):
"""Check conditions on load"""
if not hasattr(work, "scholar_id"):
warning("[Warning] Work {} does not have scholar_id".format(varname))
if getattr(work, "place", None) is None:
warning("[Error] Work {} does not have place".format(varname))
| 10,132
|
def check_section(config:Namespace, name:str) -> Namespace:
"""Check that a section with the specified name is present."""
section = config._get(name)
if section is None:
raise ConfigurationError(f"Section {name} not found in configuration")
if not isinstance(section, Namespace):
raise ConfigurationError(f"Configuration error: {name} not a section")
return section
| 10,133
|
def ceil(a):
"""The ceil function.
Args:
a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): A number or a matrix.
Returns:
The least integer greater than or equal to `a`.
"""
return _unary_operation(_ti_core.expr_ceil, math.ceil, a)
| 10,134
|
def Body(
default: Any = Undefined,
*,
default_factory: Optional[NoArgAnyCallable] = None,
alias: str = None,
title: str = None,
description: str = None,
const: bool = None,
gt: float = None,
ge: float = None,
lt: float = None,
le: float = None,
multiple_of: float = None,
min_items: int = None,
max_items: int = None,
min_length: int = None,
max_length: int = None,
regex: str = None,
**extra: Any,
) -> Any:
"""
Used to provide extra information about a field, either for the model schema or complex validation. Some arguments
apply only to number fields (``int``, ``float``, ``Decimal``) and some apply only to ``str``.
:param default: since this is replacing the field’s default, its first argument is used
to set the default, use ellipsis (``...``) to indicate the field is required
:param default_factory: callable that will be called when a default value is needed for this field
If both `default` and `default_factory` are set, an error is raised.
:param alias: the public name of the field
:param title: can be any string, used in the schema
:param description: can be any string, used in the schema
:param const: this field is required and *must* take it's default value
:param gt: only applies to numbers, requires the field to be "greater than". The schema
will have an ``exclusiveMinimum`` validation keyword
:param ge: only applies to numbers, requires the field to be "greater than or equal to". The
schema will have a ``minimum`` validation keyword
:param lt: only applies to numbers, requires the field to be "less than". The schema
will have an ``exclusiveMaximum`` validation keyword
:param le: only applies to numbers, requires the field to be "less than or equal to". The
schema will have a ``maximum`` validation keyword
:param multiple_of: only applies to numbers, requires the field to be "a multiple of". The
schema will have a ``multipleOf`` validation keyword
:param min_items: only applies to list or tuple and set, requires the field to have a minimum length.
:param max_items: only applies to list or tuple and set, requires the field to have a maximum length.
:param min_length: only applies to strings, requires the field to have a minimum length. The
schema will have a ``maximum`` validation keyword
:param max_length: only applies to strings, requires the field to have a maximum length. The
schema will have a ``maxLength`` validation keyword
:param regex: only applies to strings, requires the field match again a regular expression
pattern string. The schema will have a ``pattern`` validation keyword
:param extra: any additional keyword arguments will be added as is to the schema
"""
if default is not Undefined and default_factory is not None:
raise ValueError("cannot specify both default and default_factory")
return BodyInfo(
default,
default_factory=default_factory,
alias=alias,
title=title,
description=description,
const=const,
gt=gt,
ge=ge,
lt=lt,
le=le,
multiple_of=multiple_of,
min_items=min_items,
max_items=max_items,
min_length=min_length,
max_length=max_length,
regex=regex,
**extra,
)
| 10,135
|
def process_name(row):
"""
this is a function to process name
"""
substring = row['title']
if substring is None:
return
if len(substring) > 0 and substring[0].isalpha():
name_set.add(substring)
| 10,136
|
def square(array, x, y, size, mag):
"""For each diamond in the array, set the midpoint of that diamond to be
the average of the four corner points plus a random value. """
x1 = x - size
y1 = y - size
x2 = x + size
y2 = y + size
div = 4.0
l = len(array)
if x1 >= 0:
a = array[x1, y]
else:
a = 0.0
div -= 1.0
if y1 >= 0:
b = array[x, y1]
else:
b = 0.0
div -= 1.0
if x2 < l:
c = array[x2, y]
else:
c = 0.0
div -= 1.0
if y2 < l:
d = array[x, y2]
else:
d = 0.0
div -= 1.0
if div:
array[x, y] = (a + b + c + d) / div + scaled_random(mag)
| 10,137
|
def warp(img, pers_margin=425, margin_bottom=50, margin_top=450, margin_sides=150, reverse=False):
"""
This function warps an image. For the transformation a src polygon and a destination
polygon are used. The source polygon is calculated by the image shape and the margins
given. The destination polygon is calculated solely on the image shape.
:param img: Input image
:param pers_margin: This value determines how sharp the polygon is
:param margin_bottom: This value sets the distance between the polygon and the bottom of
the image
:param margin_top: This value sets the distance between the polygon and the top of the
image
:param margin_sides: This value sets the distance between the polygon and the sides of the
image
:param reverse: If True, src and dst will be swapped, thus the image will be unwarped
:return: Warped image
"""
img_size = (img.shape[1], img.shape[0])
# Four source coordinates
src = np.float32(
[[img_size[0] - margin_sides - pers_margin, margin_top],
[img_size[0] - margin_sides, img_size[1] - margin_bottom],
[margin_sides, img_size[1] - margin_bottom],
[margin_sides + pers_margin, margin_top]])
# Four destination coordinates
dst = np.float32(
[[img_size[0]*3//4, 0],
[img_size[0]*3//4, img_size[1]],
[img_size[0]//4, img_size[1]],
[img_size[0]//4, 0]])
# Compute perspective transform matrix
if not reverse:
m = cv2.getPerspectiveTransform(src, dst)
else:
m = cv2.getPerspectiveTransform(dst, src)
# Warp image
warped = cv2.warpPerspective(img, m, img_size, flags=cv2.INTER_LINEAR)
return warped
| 10,138
|
def align(sx, sy):
""" Align two groups of sentences
:param sx:
:param sy:
:param sx:
:param sy:
"""
cx = map(char_length, sx)
cy = map(char_length, sy)
# noinspection PyTypeChecker
for (i1, i2), (j1, j2) in reversed(list(_align(cx, cy))):
yield ' '.join(sx[i1:i2]), ' '.join(sy[j1:j2])
| 10,139
|
def get_all_doorstations(hass):
"""Get all doorstations."""
return [
entry[DOOR_STATION]
for entry in hass.data[DOMAIN].values()
if DOOR_STATION in entry
]
| 10,140
|
def test_runtime_config_attribute_cvejob_cpe2pkg_path():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_CPE2PKG_PATH')
config = RuntimeConfig()
assert config._config.cpe2pkg_path == 'cpe2pkg.jar'
os.environ['CVEJOB_CPE2PKG_PATH'] = 'cpe2pkg10.jar'
config = RuntimeConfig()
assert config._config.cpe2pkg_path == 'cpe2pkg10.jar'
if old_value is not None:
os.environ['CVEJOB_CPE2PKG_PATH'] = old_value
| 10,141
|
def get_weather() -> dict:
"""Makes an api request for the weather api
country code queries the specific country
city name queries the specific city within that country
units determines the type of numerical data returned (centigrade or Fahrenheit)
:return: the response from the api
"""
query = f"{city_name},{country_code}"
url_current_weather = f"https://api.openweathermap.org/data/2.5/weather?q={query}" \
f"&appid={api_key}&units={units}"
response = requests.get(url_current_weather).json()
if response["cod"] != 200:
log.error(json.dumps(response, indent=4))
response = None
return response
| 10,142
|
def _create_off_value():
"""create off value"""
return Tensor(0.0, mstype.float32)
| 10,143
|
def is_datetime(value):
"""
Check if an object is a datetime
:param value:
:return:
"""
result = False
if isinstance(value, datetime.datetime):
result = True
# else:
# result = is_datetime_str(str(value))
return result
| 10,144
|
def sigmoid(z):
"""Sigmoid function"""
if z > 100:
return 0
return 1.0 / (1.0 + math.exp(z))
| 10,145
|
async def get_company_sumary(symbol: str, db: Session = Depends(get_db)):
"""
This method receibe a symbol, if does not exits in our database
go to extract data, save it on our database and retunr the
stored data
"""
company_solver = CompanySolver(company_symbol=symbol)
_ = company_solver.get_company_data(db)
return _
| 10,146
|
def make_dataloaders(params: MinkLocParams, debug=False):
"""
Create training and validation dataloaders that return groups of k=2 similar elements
:param train_params:
:param model_params:
:return:
"""
datasets = make_datasets(params, debug=debug)
dataloders = {}
train_sampler = BatchSampler(datasets['train'], batch_size=params.batch_size,
batch_size_limit=params.batch_size_limit,
batch_expansion_rate=params.batch_expansion_rate)
# Collate function collates items into a batch and applies a 'set transform' on the entire batch
train_collate_fn = make_collate_fn(datasets['train'], params.model_params.version, params.dataset_name,
params.model_params.mink_quantization_size)
dataloders['train'] = DataLoader(datasets['train'], batch_sampler=train_sampler, collate_fn=train_collate_fn,
num_workers=params.num_workers, pin_memory=True)
if 'val' in datasets:
val_sampler = BatchSampler(datasets['val'], batch_size=params.batch_size)
# Collate function collates items into a batch and applies a 'set transform' on the entire batch
# Currently validation dataset has empty set_transform function, but it may change in the future
val_collate_fn = make_collate_fn(datasets['val'], params.model_params.version, params.dataset_name,
params.model_params.mink_quantization_size)
dataloders['val'] = DataLoader(datasets['val'], batch_sampler=val_sampler, collate_fn=val_collate_fn,
num_workers=params.num_workers, pin_memory=True)
return dataloders
| 10,147
|
def hilbert(n, x0, y0, xi, xj, yi, yj):
"""Generate a Hilbert curve.
This function returns a generator that yields the (x,y) coordinates
of the Hilbert curve points from 0 to 4^n-1.
Arguments:
n -- the base-4 logarithm of the number of points (ie. the function generates 4^n points).
x0, y0 -- offset to add to all generated point coordinates.
xi, yi -- projection-plane coordinates of the curve's I vector (i.e. horizontal, "X" axis).
xj, yj -- projection-plane coordinates of the curve's J vector (i.e. vertical, "Y" axis).
"""
if n <= 0:
yield (x0 + (xi + yi) / 2, y0 + (xj + yj) / 2)
else:
yield from hilbert(n - 1, x0, y0, yi/2, yj/2, xi/2, xj/2)
yield from hilbert(n - 1, x0 + xi/2, y0 + xj/2, xi/2, xj/2, yi/2, yj/2)
yield from hilbert(n - 1, x0 + xi/2 + yi/2, y0 + xj/2 + yj/2, xi/2, xj/2, yi/2, yj/2)
yield from hilbert(n - 1, x0 + xi/2 + yi, y0 + xj/2 + yj, -yi/2,-yj/2,-xi/2,-xj/2)
| 10,148
|
def show_images(image_one, image_two, image_three):
"""
Plots three images with label and shape attributes
"""
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20,10))
ax1.set_title("%s with shape %s" % (image_one[1], image_one[0].shape))
ax1.imshow(image_one[0])
ax2.set_title("%s with shape %s" % (image_two[1], image_two[0].shape))
ax2.imshow(image_two[0])
ax3.set_title("%s with shape %s" % (image_three[1], image_three[0].shape))
ax3.imshow(image_three[0])
| 10,149
|
def get_attn_pad_mask(seq_q, seq_k):
"""
由于各句子长度不一样,故需要通过PAD将所有句子填充到指定长度;
故用于填充的PAD在句子中无任何含义,无需注意力关注;
注意力掩码函数,可用于屏蔽单词位置为PAD的位置,将注意力放在其他单词上。
:param seq_q: [batch_size, seq_len]
:param seq_k: [batch_size, seq_len]
"""
batch_size, len_q = seq_q.size()
_, len_k = seq_k.size()
pad_attn_mask = seq_k.data.eq(0).unsqueeze(1) # [batch_size, 1, len_k], 0代表PAD,eq(0)返回和seq_k同等维度的矩阵
# 若是seq_k某个位置上的元素为0,那么该位置为True,否则为False
# [1, 2, 3, 0] -> [F, F, F, T]
return pad_attn_mask.expand(batch_size, len_q, len_k)
| 10,150
|
def sync_gcp_projects(neo4j_session, projects, gcp_update_tag, common_job_parameters):
"""
Load a given list of GCP project data to Neo4j and clean up stale nodes.
:param neo4j_session: The Neo4j session
:param projects: List of GCP projects; output from crm.get_gcp_projects()
:param gcp_update_tag: The timestamp value to set our new Neo4j nodes with
:param common_job_parameters: Parameters to carry to the Neo4j jobs
:return: Nothing
"""
logger.debug("Syncing GCP projects")
load_gcp_projects(neo4j_session, projects, gcp_update_tag)
cleanup_gcp_projects(neo4j_session, common_job_parameters)
| 10,151
|
def convert_str_to_float(string):
"""Convert str to float
To handle the edge case
Args:
string (str): string
Returns:
f (float): float value
"""
try:
f = float(string)
except Exception:
f = np.nan
return f
| 10,152
|
def node_tree(node: str):
"""Format printing for locate"""
str2list = list(node.replace(' ', ''))
count = 0
for i, e in enumerate(str2list):
if e == '(':
count += 1
str2list[i] = '(\n{}'.format('| ' * count)
elif e == ')':
count -= 1
str2list[i] = '\n{})'.format('| ' * count)
elif e == ',':
str2list[i] = ',\n{}'.format('| ' * count)
elif e == '[':
count += 1
str2list[i] = '[\n{}'.format('| ' * count)
elif e == ']':
count -= 1
str2list[i] = '\n{}]'.format('| ' * count)
return ''.join(str2list)
| 10,153
|
def gen_sensor_summary_report(config_file, sensor, output_file=None):
"""
A function which generates a summary report for a given sensor. Report includes info such
as the file size, download times, number of scenes etc.
:param config_file: The EODataDown configuration file path.
:param sensor: The sensor for the report to be generated
:param output_file: Output JSON file with the report. If None report is printed to the console.
"""
sys_main_obj = eodatadown.eodatadownsystemmain.EODataDownSystemMain()
sys_main_obj.parse_config(config_file)
sensor_obj = sys_main_obj.get_sensor_obj(sensor)
report_dict = sensor_obj.get_sensor_summary_info()
if output_file is not None:
import json
with open(output_file, 'w') as fp:
json.dump(report_dict, fp, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False)
else:
import pprint
pprint.pprint(report_dict)
| 10,154
|
def _check_info_id_is_in_list(
expected_info_id: int, info_list: List[dict]) -> None:
"""
Check that the target info id is included in the list.
Parameters
----------
expected_info_id : int
The expected info id included in the list.
info_list : list of dicts
A list of check results for one function.
Raises
------
AssertionError
If the target id is not included in the list.
"""
id_list: List[int] = [
info_dict[py_module.INFO_KEY_INFO_ID]
for info_dict in info_list]
is_in: bool = expected_info_id in id_list
assert is_in
| 10,155
|
def random_init_checkpoint(param_name, is_weight, tar_size, checkpoint, args):
"""Either remove the final layer weights for fine-tuning on novel dataset
or append randomly initialized weights for the novel classes.
Note: The base detector for LVIS contains weights for all classes, but only
the weights corresponding to base classes are updated during base training
(this design choice has no particular reason). Thus, the random
initialization step is not really necessary.
"""
weight_name = param_name + ('.weight' if is_weight else '.bias')
pretrained_weight = checkpoint['state_dict'][weight_name]
prev_cls = pretrained_weight.size(0)
if 'fc_cls' in param_name:
prev_cls -= 1
if is_weight:
feat_size = pretrained_weight.size(1)
new_weight = torch.rand((tar_size, feat_size))
torch.nn.init.normal_(new_weight, 0, 0.01)
else:
new_weight = torch.zeros(tar_size)
if args.coco or args.lvis:
BASE_CLASSES = COCO_BASE_CLASSES if args.coco else LVIS_BASE_CLASSES
IDMAP = COCO_IDMAP if args.coco else LVIS_IDMAP
for i, c in enumerate(BASE_CLASSES):
idx = i if args.coco else c
if 'fc_cls' in param_name:
new_weight[IDMAP[c]] = pretrained_weight[idx]
else:
new_weight[IDMAP[c] * 4:(IDMAP[c] + 1) * 4] = \
pretrained_weight[idx * 4:(idx + 1) * 4]
else:
new_weight[:prev_cls] = pretrained_weight[:prev_cls]
if 'fc_cls' in param_name:
new_weight[-1] = pretrained_weight[-1] # bg class
checkpoint['state_dict'][weight_name] = new_weight
| 10,156
|
def stream_changelog_sections(
target_filename, config_filename, receive_sections, version=None
):
"""Send individual changelog sections to a callable, one per version.
The callable accepts two arguments, the string version number of the
changelog section, and the markdown-formatted content of the changelog
section.
Used for APIs that receive changelog sections per version.
"""
Environment.register(DefaultEnvironment)
setup_docutils()
with open(target_filename, encoding="utf-8") as handle:
publish_string(
handle.read(),
source_path=target_filename,
writer=Writer(
limit_version=version, receive_sections=receive_sections
),
settings_overrides={
"changelog_env": DefaultEnvironment(config_filename),
"report_level": 3,
},
)
| 10,157
|
def process_data(data):
"""
:param datas:
:param args:
:return:
"""
# copy of the origin question_toks
for d in datas:
if 'origin_question_toks' not in d:
d['origin_question_toks'] = d['question_toks']
for entry in datas:
entry['question_toks'] = symbol_filter(entry['question_toks'])
origin_question_toks = symbol_filter([x for x in entry['origin_question_toks'] if x.lower() != 'the'])
question_toks = [wordnet_lemmatizer.lemmatize(x.lower()) for x in entry['question_toks'] if x.lower() != 'the']
entry['question_toks'] = question_toks
table_names = []
table_names_pattern = []
for y in entry['table_names']:
x = [wordnet_lemmatizer.lemmatize(x.lower()) for x in y.split(' ')]
table_names.append(" ".join(x))
x = [re_lemma(x.lower()) for x in y.split(' ')]
table_names_pattern.append(" ".join(x))
header_toks = []
header_toks_list = []
header_toks_pattern = []
header_toks_list_pattern = []
for y in entry['col_set']:
x = [wordnet_lemmatizer.lemmatize(x.lower()) for x in y.split(' ')]
header_toks.append(" ".join(x))
header_toks_list.append(x)
x = [re_lemma(x.lower()) for x in y.split(' ')]
header_toks_pattern.append(" ".join(x))
header_toks_list_pattern.append(x)
num_toks = len(question_toks)
idx = 0
tok_concol = []
type_concol = []
nltk_result = nltk.pos_tag(question_toks)
while idx < num_toks:
# fully header
end_idx, header = fully_part_header(question_toks, idx, num_toks, header_toks)
if header:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["col"])
idx = end_idx
continue
# check for table
end_idx, tname = group_header(question_toks, idx, num_toks, table_names)
if tname:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["table"])
idx = end_idx
continue
# check for column
end_idx, header = group_header(question_toks, idx, num_toks, header_toks)
if header:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["col"])
idx = end_idx
continue
# check for partial column
end_idx, tname = partial_header(question_toks, idx, header_toks_list)
if tname:
tok_concol.append(tname)
type_concol.append(["col"])
idx = end_idx
continue
# check for aggregation
end_idx, agg = group_header(question_toks, idx, num_toks, AGG)
if agg:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["agg"])
idx = end_idx
continue
if nltk_result[idx][1] == 'RBR' or nltk_result[idx][1] == 'JJR':
tok_concol.append([question_toks[idx]])
type_concol.append(['MORE'])
idx += 1
continue
if nltk_result[idx][1] == 'RBS' or nltk_result[idx][1] == 'JJS':
tok_concol.append([question_toks[idx]])
type_concol.append(['MOST'])
idx += 1
continue
# string match for Time Format
if num2year(question_toks[idx]):
question_toks[idx] = 'year'
end_idx, header = group_header(question_toks, idx, num_toks, header_toks)
if header:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["col"])
idx = end_idx
continue
def get_concept_result(toks, graph):
for begin_id in range(0, len(toks)):
for r_ind in reversed(range(1, len(toks) + 1 - begin_id)):
tmp_query = "_".join(toks[begin_id:r_ind])
if tmp_query in graph:
mi = graph[tmp_query]
for col in entry['col_set']:
if col in mi:
return col
end_idx, symbol = group_symbol(question_toks, idx, num_toks)
if symbol:
tmp_toks = [x for x in question_toks[idx: end_idx]]
assert len(tmp_toks) > 0, print(symbol, question_toks)
pro_result = get_concept_result(tmp_toks, english_IsA)
if pro_result is None:
pro_result = get_concept_result(tmp_toks, english_RelatedTo)
if pro_result is None:
pro_result = "NONE"
for tmp in tmp_toks:
tok_concol.append([tmp])
type_concol.append([pro_result])
pro_result = "NONE"
idx = end_idx
continue
end_idx, values = group_values(origin_question_toks, idx, num_toks)
if values and (len(values) > 1 or question_toks[idx - 1] not in ['?', '.']):
tmp_toks = [wordnet_lemmatizer.lemmatize(x) for x in question_toks[idx: end_idx] if x.isalnum() is True]
assert len(tmp_toks) > 0, print(question_toks[idx: end_idx], values, question_toks, idx, end_idx)
pro_result = get_concept_result(tmp_toks, english_IsA)
if pro_result is None:
pro_result = get_concept_result(tmp_toks, english_RelatedTo)
if pro_result is None:
pro_result = "NONE"
for tmp in tmp_toks:
tok_concol.append([tmp])
type_concol.append([pro_result])
pro_result = "NONE"
idx = end_idx
continue
result = group_digital(question_toks, idx)
if result is True:
tok_concol.append(question_toks[idx: idx + 1])
type_concol.append(["value"])
idx += 1
continue
if question_toks[idx] == ['ha']:
question_toks[idx] = ['have']
tok_concol.append([question_toks[idx]])
type_concol.append(['NONE'])
idx += 1
continue
entry['question_arg'] = tok_concol
entry['question_arg_type'] = type_concol
entry['nltk_pos'] = nltk_result
return datas
| 10,158
|
def concat(
adatas: Union[Collection[AnnData], "typing.Mapping[str, AnnData]"],
*,
axis: Literal[0, 1] = 0,
join: Literal["inner", "outer"] = "inner",
merge: Union[StrategiesLiteral, Callable, None] = None,
uns_merge: Union[StrategiesLiteral, Callable, None] = None,
label: Optional[str] = None,
keys: Optional[Collection] = None,
index_unique: Optional[str] = None,
fill_value: Optional[Any] = None,
pairwise: bool = False,
) -> AnnData:
"""Concatenates AnnData objects along an axis.
See the :doc:`concatenation` section in the docs for a more in-depth description.
.. warning::
This function is marked as experimental for the `0.7` release series, and will
supercede the :meth:`AnnData.concatenate() <anndata.AnnData.concatenate>` method
in future releases.
Params
------
adatas
The objects to be concatenated. If a Mapping is passed, keys are used for the `keys`
argument and values are concatenated.
axis
Which axis to concatenate along.
join
How to align values when concatenating. If "outer", the union of the other axis
is taken. If "inner", the intersection. See :doc:`concatenation` for more.
merge
How elements not aligned to the axis being concatenated along are selected.
Currently implemented strategies include:
* `None`: No elements are kept.
* `"same"`: Elements that are the same in each of the objects.
* `"unique"`: Elements for which there is only one possible value.
* `"first"`: The first element seen at each from each position.
* `"only"`: Elements that show up in only one of the objects.
uns_merge
How the elements of `.uns` are selected. Uses the same set of strategies as
the `merge` argument, except applied recursively.
label
Column in axis annotation (i.e. `.obs` or `.var`) to place batch information in.
If it's None, no column is added.
keys
Names for each object being added. These values are used for column values for
`label` or appended to the index if `index_unique` is not `None`. Defaults to
incrementing integer labels.
index_unique
Whether to make the index unique by using the keys. If provided, this
is the delimeter between "{orig_idx}{index_unique}{key}". When `None`,
the original indices are kept.
fill_value
When `join="outer"`, this is the value that will be used to fill the introduced
indices. By default, sparse arrays are padded with zeros, while dense arrays and
DataFrames are padded with missing values.
pairwise
Whether pairwise elements along the concatenated dimension should be included.
This is False by default, since the resulting arrays are often not meaningful.
Notes
-----
.. warning::
If you use `join='outer'` this fills 0s for sparse data when
variables are absent in a batch. Use this with care. Dense data is
filled with `NaN`.
Examples
--------
Preparing example objects
>>> import anndata as ad, pandas as pd, numpy as np
>>> from scipy import sparse
>>> a = ad.AnnData(
... X=sparse.csr_matrix(np.array([[0, 1], [2, 3]])),
... obs=pd.DataFrame({"group": ["a", "b"]}, index=["s1", "s2"]),
... var=pd.DataFrame(index=["var1", "var2"]),
... varm={"ones": np.ones((2, 5)), "rand": np.random.randn(2, 3), "zeros": np.zeros((2, 5))},
... uns={"a": 1, "b": 2, "c": {"c.a": 3, "c.b": 4}},
... )
>>> b = ad.AnnData(
... X=sparse.csr_matrix(np.array([[4, 5, 6], [7, 8, 9]])),
... obs=pd.DataFrame({"group": ["b", "c"], "measure": [1.2, 4.3]}, index=["s3", "s4"]),
... var=pd.DataFrame(index=["var1", "var2", "var3"]),
... varm={"ones": np.ones((3, 5)), "rand": np.random.randn(3, 5)},
... uns={"a": 1, "b": 3, "c": {"c.b": 4}},
... )
>>> c = ad.AnnData(
... X=sparse.csr_matrix(np.array([[10, 11], [12, 13]])),
... obs=pd.DataFrame({"group": ["a", "b"]}, index=["s1", "s2"]),
... var=pd.DataFrame(index=["var3", "var4"]),
... uns={"a": 1, "b": 4, "c": {"c.a": 3, "c.b": 4, "c.c": 5}},
... )
Concatenating along different axes
>>> ad.concat([a, b]).to_df()
var1 var2
s1 0.0 1.0
s2 2.0 3.0
s3 4.0 5.0
s4 7.0 8.0
>>> ad.concat([a, c], axis=1).to_df()
var1 var2 var3 var4
s1 0.0 1.0 10.0 11.0
s2 2.0 3.0 12.0 13.0
Inner and outer joins
>>> inner = ad.concat([a, b]) # Joining on intersection of variables
>>> inner
AnnData object with n_obs × n_vars = 4 × 2
obs: 'group'
>>> (inner.obs_names, inner.var_names) # doctest: +NORMALIZE_WHITESPACE
(Index(['s1', 's2', 's3', 's4'], dtype='object'),
Index(['var1', 'var2'], dtype='object'))
>>> outer = ad.concat([a, b], join="outer") # Joining on union of variables
>>> outer
AnnData object with n_obs × n_vars = 4 × 3
obs: 'group', 'measure'
>>> outer.var_names
Index(['var1', 'var2', 'var3'], dtype='object')
>>> outer.to_df() # Sparse arrays are padded with zeroes by default
var1 var2 var3
s1 0.0 1.0 0.0
s2 2.0 3.0 0.0
s3 4.0 5.0 6.0
s4 7.0 8.0 9.0
Keeping track of source objects
>>> ad.concat({"a": a, "b": b}, label="batch").obs
group batch
s1 a a
s2 b a
s3 b b
s4 c b
>>> ad.concat([a, b], label="batch", keys=["a", "b"]).obs # Equivalent to previous
group batch
s1 a a
s2 b a
s3 b b
s4 c b
>>> ad.concat({"a": a, "b": b}, index_unique="-").obs
group
s1-a a
s2-a b
s3-b b
s4-b c
Combining values not aligned to axis of concatenation
>>> ad.concat([a, b], merge="same")
AnnData object with n_obs × n_vars = 4 × 2
obs: 'group'
varm: 'ones'
>>> ad.concat([a, b], merge="unique")
AnnData object with n_obs × n_vars = 4 × 2
obs: 'group'
varm: 'ones', 'zeros'
>>> ad.concat([a, b], merge="first")
AnnData object with n_obs × n_vars = 4 × 2
obs: 'group'
varm: 'ones', 'rand', 'zeros'
>>> ad.concat([a, b], merge="only")
AnnData object with n_obs × n_vars = 4 × 2
obs: 'group'
varm: 'zeros'
The same merge strategies can be used for elements in `.uns`
>>> dict(ad.concat([a, b, c], uns_merge="same").uns)
{'a': 1, 'c': {'c.b': 4}}
>>> dict(ad.concat([a, b, c], uns_merge="unique").uns)
{'a': 1, 'c': {'c.a': 3, 'c.b': 4, 'c.c': 5}}
>>> dict(ad.concat([a, b, c], uns_merge="only").uns)
{'c': {'c.c': 5}}
>>> dict(ad.concat([a, b, c], uns_merge="first").uns)
{'a': 1, 'b': 2, 'c': {'c.a': 3, 'c.b': 4, 'c.c': 5}}
"""
# Argument normalization
merge = resolve_merge_strategy(merge)
uns_merge = resolve_merge_strategy(uns_merge)
if isinstance(adatas, Mapping):
if keys is not None:
raise TypeError(
"Cannot specify categories in both mapping keys and using `keys`. "
"Only specify this once."
)
keys, adatas = list(adatas.keys()), list(adatas.values())
else:
adatas = list(adatas)
if keys is None:
keys = np.arange(len(adatas)).astype(str)
if axis == 0:
dim = "obs"
elif axis == 1:
dim = "var"
alt_axis, alt_dim = _resolve_dim(axis=1 - axis)
# Label column
label_col = pd.Categorical.from_codes(
np.repeat(np.arange(len(adatas)), [a.shape[axis] for a in adatas]),
categories=keys,
)
# Combining indexes
concat_indices = pd.concat(
[pd.Series(dim_indices(a, axis=axis)) for a in adatas], ignore_index=True
)
if index_unique is not None:
concat_indices = concat_indices.str.cat(label_col.map(str), sep=index_unique)
concat_indices = pd.Index(concat_indices)
alt_indices = resolve_index(
[dim_indices(a, axis=1 - axis) for a in adatas], join=join
)
reindexers = [
gen_reindexer(alt_indices, dim_indices(a, axis=1 - axis)) for a in adatas
]
# Annotation for concatenation axis
concat_annot = pd.concat(
[getattr(a, dim) for a in adatas], join=join, ignore_index=True
)
concat_annot.index = concat_indices
if label is not None:
concat_annot[label] = label_col
# Annotation for other axis
alt_annot = merge_dataframes(
[getattr(a, alt_dim) for a in adatas], alt_indices, merge
)
X = concat_arrays(
[a.X for a in adatas], reindexers, axis=axis, fill_value=fill_value
)
if join == "inner":
layers = inner_concat_aligned_mapping(
[a.layers for a in adatas], axis=axis, reindexers=reindexers
)
concat_mapping = inner_concat_aligned_mapping(
[getattr(a, f"{dim}m") for a in adatas], index=concat_indices
)
if pairwise:
concat_pairwise = concat_pairwise_mapping(
mappings=[getattr(a, f"{dim}p") for a in adatas],
shapes=[a.shape[axis] for a in adatas],
join_keys=intersect_keys,
)
else:
concat_pairwise = {}
elif join == "outer":
layers = outer_concat_aligned_mapping(
[a.layers for a in adatas], reindexers, axis=axis, fill_value=fill_value
)
concat_mapping = outer_concat_aligned_mapping(
[getattr(a, f"{dim}m") for a in adatas],
index=concat_indices,
fill_value=fill_value,
)
if pairwise:
concat_pairwise = concat_pairwise_mapping(
mappings=[getattr(a, f"{dim}p") for a in adatas],
shapes=[a.shape[axis] for a in adatas],
join_keys=union_keys,
)
else:
concat_pairwise = {}
# TODO: Reindex lazily, so we don't have to make those copies until we're sure we need the element
alt_mapping = merge(
[
{k: r(v, axis=0) for k, v in getattr(a, f"{alt_dim}m").items()}
for r, a in zip(reindexers, adatas)
],
)
alt_pairwise = merge(
[
{k: r(r(v, axis=0), axis=1) for k, v in getattr(a, f"{alt_dim}p").items()}
for r, a in zip(reindexers, adatas)
]
)
uns = uns_merge([a.uns for a in adatas])
raw = None
has_raw = [a.raw is not None for a in adatas]
if all(has_raw):
raw = concat(
[
AnnData(
X=a.raw.X,
obs=pd.DataFrame(index=a.obs_names),
var=a.raw.var,
varm=a.raw.varm,
)
for a in adatas
],
join=join,
label=label,
keys=keys,
index_unique=index_unique,
fill_value=fill_value,
axis=axis,
)
elif any(has_raw):
warn(
"Only some AnnData objects have `.raw` attribute, "
"not concatenating `.raw` attributes.",
UserWarning,
)
return AnnData(
**{
"X": X,
"layers": layers,
dim: concat_annot,
alt_dim: alt_annot,
f"{dim}m": concat_mapping,
f"{alt_dim}m": alt_mapping,
f"{dim}p": concat_pairwise,
f"{alt_dim}p": alt_pairwise,
"uns": uns,
"raw": raw,
}
)
| 10,159
|
def repeated_parity_data_binning(shots, nr_of_meas:int):
"""
Used for data binning of the repeated parity check experiment.
Assumes the data qubit is alternatively prepared in 0 and 1.
Args:
shots (1D array) : array containing all measured values of 1 qubit
nr_of_meas (int) : number of measurement per prepared state.
used to determine the period for data binning. Includes
the initialization measurement.
Returns
prep_0 (1D array) outcomes of the initialization measurement
meas_0 (1D array) outcomes of the first measurement
trace_0 (2D array) traces
prep_1 (1D array)
meas_1 (1D array)
trace_1 (2D array)
"""
prep_0 = copy(shots[::nr_of_meas*2])
meas_0 = copy(shots[1::nr_of_meas*2])
prep_1 = copy(shots[nr_of_meas::nr_of_meas*2])
meas_1 = copy(shots[nr_of_meas+1::nr_of_meas*2])
trace_0 = np.zeros((len(prep_0), nr_of_meas-1))
trace_1 = np.zeros((len(prep_1), nr_of_meas-1))
for i in range(len(prep_0)):
trace_0[i, :] = shots[1+(2*i)*nr_of_meas: (2*i+1)*nr_of_meas]
trace_1[i, :] = shots[1+(2*i+1)*nr_of_meas: (2*i+2)*nr_of_meas]
return (prep_0, meas_0, trace_0, prep_1, meas_1, trace_1)
| 10,160
|
def get_user_activities(user_id, timestamp_start, timestamp_end):
""" Returns the activities for a user, between two times"""
activities = Activity.query \
.filter(Activity.user_id == user_id) \
.filter(Activity.timestamp_end >= timestamp_start) \
.filter(Activity.timestamp_start <= timestamp_end).all()
# If required, add the current_activity (The above loop will not get it)
current_activity_id = get_current_user_activity_id(target_user_id=user_id)
if current_activity_id is not None:
current_act = Activity.query.get(current_activity_id)
# Don't add the current activity if it started after the requested end
if current_act.timestamp_start <= timestamp_end:
activities.append(current_act)
return activities
| 10,161
|
def _disable(recipes):
"""Disable the given recipe in the link"""
for recipe in recipes:
with json_file(
config.recipe.profile.link_location(recipe)
) as values:
values["enabled"] = False
LOGGER.status("Disabling the link file of {} ({})".format(recipe, config.recipe.writeprofile))
| 10,162
|
def create_feature_from_floor(train_df, test_df):
"""
Also the next important variables from EDA are floor and max_floor. So let us create two variables
1. Floor number of the house to the total number of floors
2. Number of floor from the top
"""
# floor of the house to the total number of floors in the house #
train_df["ratio_floor_max_floor"] = train_df["floor"] / train_df["max_floor"].astype("float")
test_df["ratio_floor_max_floor"] = test_df["floor"] / test_df["max_floor"].astype("float")
# num of floor from top #
train_df["floor_from_top"] = train_df["max_floor"] - train_df["floor"]
test_df["floor_from_top"] = test_df["max_floor"] - test_df["floor"]
| 10,163
|
def _find_matches(ref, pred):
""" find potential matches between objects in the reference and
predicted images. These need to have at least 1 pixel of overlap.
"""
matches = {}
for label in ref.labels:
mask = ref.labeled == label
matches[label] = [m for m in np.unique(pred.labeled[mask]) if m>0]
return matches
| 10,164
|
def _process(config: ConfigType, should_make_dir: bool) -> ConfigType:
"""Process the config
Args:
config (ConfigType): Config object
should_make_dir (bool): Should make dir for saving logs, models etc
Returns:
[ConfigType]: Processed config
"""
config = _process_general_config(config=config)
config = _process_logbook_config(config=config, should_make_dir=should_make_dir)
config = _process_experiment_config(config=config, should_make_dir=should_make_dir)
return config
| 10,165
|
def calendar_heatmap_echarts(data_frame: pd.DataFrame, date_field: str = None, value_field: str = None,
title: str = "",
width: str = "100%", height: str = "300px") -> Echarts:
"""
日历热度图,显示日期热度
:param data_frame:
:param date_field: 日期列
:param value_field: 值列
:param title: 可选标题
:param width: 输出div的宽度 支持像素和百分比 比如800px/100%
:param height: 输出div的高度 支持像素和百分比 比如800px/100%
:return:
"""
df = data_frame[[date_field, value_field]].copy()
value_max = df[value_field].max()
value_min = df[value_field].min()
date_start = pd.to_datetime(df[date_field].min()).strftime("%Y-%m-%d")
date_end = pd.to_datetime(df[date_field].max()).strftime("%Y-%m-%d")
df[date_field] = pd.to_datetime(df[date_field]).dt.strftime("%Y-%m-%d")
options = {
'title': {
'text': title
},
'tooltip': {'formatter': "{c}"},
'visualMap': {
'text': ['高', '低'],
'min': value_min,
'max': value_max,
'type': 'continuous',
'orient': 'horizontal',
'inRange': {
'color': ["#313695", "#4575b4", "#74add1", "#abd9e9", "#e0f3f8", "#ffffbf", "#fee090", "#fdae61",
"#f46d43", "#d73027", "#a50026"]
},
'left': 'center',
'top': 0,
'hoverLink': True
},
'calendar': {
'top': 60,
'left': 30,
'right': 30,
'cellSize': ['auto', 'auto'],
'range': [date_start, date_end],
'itemStyle': {
'borderWidth': 0.5
},
'dayLabel': {
'firstDay': 1
},
'monthLabel': {
'nameMap': 'cn'
},
'yearLabel': {'show': True}
},
'series': {
'type': 'heatmap',
'coordinateSystem': 'calendar',
'emphasis': {
'itemStyle': {
'borderColor': "#333",
'borderWidth': 1,
'shadowColor': 'rgba(0, 0, 0, 0.5)',
'shadowBlur': 15
}
},
'data': df[[date_field, value_field]].values.tolist()
}
}
return Echarts(options=options, width=width, height=height)
| 10,166
|
def adj_to_edge_indices(adj: Union[torch.Tensor, np.ndarray]) -> Union[torch.Tensor, np.ndarray]:
"""
Args:
adj: a (N, N) adjacency matrix, where N is the number of nodes
Returns:
A (2, E) array, edge_idxs, where E is the number of edges,
and edge_idxs[0], edge_idxs[1] are the source & destination nodes, respectively.
"""
edge_tuples = torch.nonzero(adj, as_tuple=True) if torch.is_tensor(adj) else np.nonzero(adj)
edge_src = edge_tuples[0].unsqueeze(0) if torch.is_tensor(adj) else np.expand_dims(edge_tuples[0], axis=0)
edge_dest = edge_tuples[1].unsqueeze(0) if torch.is_tensor(adj) else np.expand_dims(edge_tuples[1], axis=0)
if torch.is_tensor(adj):
edge_idxs = torch.cat((edge_src, edge_dest), dim=0)
else:
edge_idxs = np.concatenate((edge_src, edge_dest), axis=0)
return edge_idxs
| 10,167
|
def test_git_url_top_level_url_versions(mock_packages, config):
"""Test URL fetch strategy inference when url is specified with git."""
pkg = spack.repo.get('git-url-top-level')
# leading 62 zeros of sha256 hash
leading_zeros = '0' * 62
fetcher = spack.fetch_strategy.for_package_version(pkg, '2.0')
assert isinstance(fetcher, spack.fetch_strategy.URLFetchStrategy)
assert fetcher.url == 'https://example.com/some/tarball-2.0.tar.gz'
assert fetcher.digest == leading_zeros + '20'
fetcher = spack.fetch_strategy.for_package_version(pkg, '2.1')
assert isinstance(fetcher, spack.fetch_strategy.URLFetchStrategy)
assert fetcher.url == 'https://example.com/some/tarball-2.1.tar.gz'
assert fetcher.digest == leading_zeros + '21'
fetcher = spack.fetch_strategy.for_package_version(pkg, '2.2')
assert isinstance(fetcher, spack.fetch_strategy.URLFetchStrategy)
assert fetcher.url == 'https://www.example.com/foo2.2.tar.gz'
assert fetcher.digest == leading_zeros + '22'
fetcher = spack.fetch_strategy.for_package_version(pkg, '2.3')
assert isinstance(fetcher, spack.fetch_strategy.URLFetchStrategy)
assert fetcher.url == 'https://www.example.com/foo2.3.tar.gz'
assert fetcher.digest == leading_zeros + '23'
| 10,168
|
def create_export_settings_window():
"""
This function contains all the logic of the export settings window and will run the window by it's own.
:return: None
"""
window = sg.Window("Export Settings", generate_export_settings_layout(), modal=True, finalize=True,
keep_on_top=True)
while True:
n_event, _ = window.read()
if n_event in ["Exit", sg.WIN_CLOSED]:
window.close()
return None
if n_event == "-PROGRAM_CODE-":
export(window)
if n_event == "-OVERWATCH_CODE-":
export_as_overwatch_code(window)
| 10,169
|
def render_string(s: str, *, args: Dict = None, as_json: bool = False) -> Iterator[str]:
"""Render each document from a string and return each rendered string one by one."""
if not args:
args = {}
for node in _parse_string(s):
yield _render(node, args=args, as_json=as_json)
| 10,170
|
def get_vertex_list(session, node_id, part_info):
"""Wrapper for HAPI_GetVertexList
Args:
session (int): The session of Houdini you are interacting with.
node_id (int): The node to get.
part_info (PartInfo): Part info of querying
Returns:
np.ndarray: Array of vertices
"""
data_buffer = (c_int * part_info.vertexCount)()
result = HAPI_LIB.HAPI_GetVertexList(
byref(session), node_id, part_info.id, byref(data_buffer),
0, part_info.vertexCount)
assert result == HDATA.Result.SUCCESS,\
"GetVertexList Failed with {0}".format(HDATA.Result(result).name)
data_np = np.frombuffer(data_buffer, np.int32)
return data_np
| 10,171
|
async def list(zeroconf=None, tdm_addr=None, tdm_port=None, password=None,
robot_id=None, robot_name=None,
timeout=5):
"""Display a list of all the robots.
Arguments:
tdm_addr - TDM address as a string (default: as in start())
tdm_port - TDM TCP port number (default: as in start())
password - TDM password (default: None, not necessary for local TDM)
robot_id - robot id to restrict the output (default: any)
robot_name - robot name to restrict the output (default: any)
timeout - time to obtain at least one node (default: 5s)
zeroconf - True to find TDM with zeroconf (default: automatic)
"""
with (ClientAsync(zeroconf=zeroconf,
tdm_addr=tdm_addr, tdm_port=tdm_port, password=password)
if zeroconf is not None or
tdm_addr is not None or
tdm_port is not None or
_interactive_console is None
else _interactive_console.client) as client:
for _ in range(1 if timeout < 0.1 else int(timeout / 0.1)):
client.process_waiting_messages()
if len(client.nodes) > 0:
break
await client.sleep(0.1)
for node in client.filter_nodes(client.nodes,
node_id=robot_id, node_name=robot_name):
print(f"id: {node.id_str}")
if "group_id_str" in node.props and node.props["group_id_str"] is not None:
print(f"group id: {node.props['group_id_str']}")
if "name" in node.props:
print(f"name: {node.props['name']}")
if "status" in node.props:
status_str = {
ClientAsync.NODE_STATUS_UNKNOWN: "unknown",
ClientAsync.NODE_STATUS_CONNECTED: "connected",
ClientAsync.NODE_STATUS_AVAILABLE: "available",
ClientAsync.NODE_STATUS_BUSY: "busy",
ClientAsync.NODE_STATUS_READY: "ready",
ClientAsync.NODE_STATUS_DISCONNECTED: "disconnected",
}[node.status]
print(f"status: {node.status} ({status_str})")
if "capabilities" in node.props:
print(f"cap: {node.props['capabilities']}")
if "fw_version" in node.props:
print(f"firmware: {node.props['fw_version']}")
print()
| 10,172
|
def lamb1(u,alpha=.5):
"""Approximate the Lambert W function.
Approximate the Lambert W function from its upper and lower bounds.
The parameter alpha (between 0 and 1) determines how close the
approximation is to the lower bound instead of the upper bound.
:arg float u: Modified argument of the function.
:arg float alpha: Bound parameter (default 0.5).
:returns: (-z)-value of the Lambert function.
:raises ValueError: If u is negative.
:raises ValueError: If alpha is not between 0 and 1.
"""
if u < 0:
errmsg = 'Argument u must be positive'
raise ValueError(errmsg)
if alpha < 0 or alpha > 1:
errmsg = 'Parameter alpha must be between 0 and 1'
raise ValueError(errmsg)
beta = (2 + alpha)/3
negz = 1 + (2*u)**.5 + beta*u
return negz
| 10,173
|
def undeploy(c):
"""Uninstall package on remote hosts(s)"""
package = 'slacm'
sudo(c,'pip3 uninstall -y %s' % package)
| 10,174
|
def loglikelihood(time_steps: list) -> float:
"""Calculate the log-likelihood of the time steps from the estimation
Parameters
----------
time_steps : list
estimation time steps
Returns
-------
float
log-likelihood
"""
loglikelihood = 0
for time_step in time_steps:
loglikelihood += _loglikelihood(time_step)
return loglikelihood
| 10,175
|
def write_conv_msg(n, nMax, Error, fName, State, ConvCrit, totTime):
"""Write convergence status message at the end of conv. file."""
if State.upper() == "STEADY":
if n == nMax and Error > ConvCrit:
msg = write_steadyst_notconv_msg(nMax)
save_msg(msg, fName)
elif Error < ConvCrit:
msg = write_steadyst_conv_msg(n, ConvCrit)
save_msg(msg, fName)
elif State.upper() == "TRANSIENT" and n == nMax:
msg = write_transient_conv_msg(nMax, totTime)
save_msg(msg, fName)
| 10,176
|
def get_r0_rm_rp(s, i_delta):
""" compute 3 points r0, r_minus and r_plus to determine apsis
compute these at s.i-i_delta and s.i-2*i_delta
"""
xp = s.Xlast[:, s.i % s.save_last]
x0 = s.Xlast[:, (s.i - i_delta) % s.save_last]
xm = s.Xlast[:, (s.i - 2 * i_delta) % s.save_last]
rp = norm(xp[0:3] - xp[3:6])
r0 = norm(x0[0:3] - x0[3:6])
rm = norm(xm[0:3] - xm[3:6])
return r0, rm, rp
| 10,177
|
def _add_missing_scheduler_data(
sys_map, parsed_map, timestamps, node, node_ip_mapping, ignore_exception
):
"""
Add missing IO scheduler details into parsed_map.
"""
if not sys_map or "scheduler" not in sys_map:
return
scheduler_map = {}
scheduler_map[node] = {}
scheduler_map[node]["scheduler"] = sys_map["scheduler"]
_merge_nodelevel_map_to_mainmap(
parsed_map, scheduler_map, timestamps, node_ip_mapping, ["sys_stat"]
)
| 10,178
|
def make_atomic(last, **rows):
"""
Unify related table instances/row, including: ids, dir, and dfile
Parameters
----------
last : obspy.AttributeDict
{'keyvalue': lastid instance, ...}
rows : dict
{'canonical tablename': [list of row instances], ...}
These row instances are related.
"""
for wfdisc in rows.get('wfdisc', []):
wfdisc.wfid = next(last.wfid)
for sitechan in rows.get('sitechan', []):
# XXX: this is wrong, as each new sitechan doesn't automatically get a
# new chanid, but we accept this for now, since duplicate sitechans
# will be rejected upon database write.
sitechan.chanid = next(last.chanid)
| 10,179
|
def setup_preview(parent):
"""Creates preview window in the UI and connects a callback on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
tag = parent.tag
dlg = parent.dlg
btn_log_clear = getattr(dlg, f"btn_{tag}_log_clear", None)
text_log = getattr(dlg, f"text_{tag}_log", None)
preview = getattr(dlg, f"w_{tag}_preview", None)
btn_log_clear.clicked.connect(lambda: text_log.clear())
preview.setCurrentIndex(0)
if "on_changed_preview" in dir(parent):
preview.currentChanged.connect(parent.on_changed_preview)
| 10,180
|
def create_tf_example(image,
image_dir,
seg,
seg_dir):
"""Converts image and annotations to a tf.Example proto.
Args:
image: dict with keys: [u'license', u'file_name', u'coco_url', u'height',
u'width', u'date_captured', u'flickr_url', u'id']
image_dir: directory containing the image files.
seg: dict with keys: [u'license', u'file_name', u'coco_url', u'height',
u'width', u'date_captured', u'flickr_url', u'id']
seg_dir: directory containing the image files.
Returns:
example: The converted tf.Example
num_annotations_skipped: Number of (invalid) annotations that were ignored.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
filename = image['file_name']
img_format = os.path.splitext(filename)[-1]
full_path = os.path.join(image_dir, filename)
with tf.io.gfile.GFile(full_path, 'rb') as fid:
encoded_img = fid.read()
feature_dict = tfrecord_lib.image_info_to_feature_dict(
image['height'], image['width'], filename, image['id'], encoded_img, img_format)
seg_full_path = os.path.join(seg_dir, seg['file_name'])
with tf.io.gfile.GFile(seg_full_path, 'rb') as fid:
seg_encoded_img = fid.read()
feature_dict['image/segmentation/class/encoded'] = tfrecord_lib.convert_to_feature(seg_encoded_img)
num_annotations_skipped = 0 # data checks
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return example, num_annotations_skipped
| 10,181
|
def test_get_assigned_name_simple():
"""
Test `magic.get_assigned_name()` in various use cases.
"""
obj = type('', (), {})
foo = get_assigned_name(_getframe())
assert_equals("foo", foo)
spam = [get_assigned_name(_getframe())] + ["bar"]
assert_equals("spam", spam[0])
obj.eggs = (lambda: get_assigned_name(_getframe(1)))()
assert_equals("eggs", obj.eggs)
with assert_raises(ValueError):
get_assigned_name(_getframe())
with assert_raises(ValueError):
# get_assigned_name() branch must be first part of the expression.
spam = [42] + [get_assigned_name(_getframe())] + ["bar"]
assert "spam" == spam[0]
| 10,182
|
def build_rfb_lite(base, feature_layer, mbox, num_classes):
"""Receptive Field Block Net for Accurate and Fast Object Detection for embeded system
See: https://arxiv.org/pdf/1711.07767.pdf for more details.
"""
base_, extras_, norm_, head_ = add_extras(base(), feature_layer, mbox, num_classes, version='rfb_lite')
return RFB(base_, extras_, norm_, head_, feature_layer, num_classes)
| 10,183
|
def bfunsmat(u, p, U):
"""Computes a matrix of the form :math:`B_{ij}`, where
:math:`i=0\\ldots p` and for each :math:`j` th column the
row :math:`i` of the matrix corresponds to the value of
:math:`(\\mathrm{span}(u_j)-p+i)` th bspline basis function at
:math:`u_j`.
Parameters:
u (np.array(float)) : evaluation point(s)
p (int) : basis function degree
U (np.array(float)) : knot vector
Returns:
np.array(float) : matrix :math:`B_{ij}`
"""
nkts = U.size
nbfuns = nkts - p - 1
npts = u.size
Bij = np.zeros((nbfuns, npts))
for j in range(0, npts):
span = fspan(u[j], p, U)
B_i = bfuns(span, u[j], p, U)
for i in range(0, p+1):
Bij[i,j] = B_i[i]
return Bij
| 10,184
|
def make_directory(path: str):
"""..."""
save_directory = os.path.dirname(path)
if not os.path.exists(save_directory):
os.makedirs(save_directory)
| 10,185
|
def plot_clustermap(foldchanges, pvalues, threshold=0.05, row_cluster=True, dendogram=True, file_path=None):
"""Simple function to plot clustermap of foldchanges and pwms (without annotation); function will filter foldchanges whenever pvalues are significant.
Args:
foldchanges (np.array): matrix of foldchanges for enrichment and depletion of pwms
pvalues (np.array): matrix of pvalues for enrichment of the same pwms
threshold (float): threshold for pvalues. default 0.05
row_cluster (bool): Cluster rows. Default True
dendogram (bool): Plot dendograms. Default True
file_path (str): The path to the file into which the plot should be written.
Returns:
heatmap plot.
"""
np.seterr(divide = 'ignore')
# Create log2FC tables based on foldchanges and significant pvalues
log2fc = np.log2(foldchanges)
log2fc = log2fc.fillna(0)
log2fc[pvalues>threshold] = 0
# Transpose (species as rows and columns as pwms)
plot_df = log2fc.transpose()
# Parameters for plot
cbar_kws = {'extend':'both'}
cmap = plt.get_cmap(sns.diverging_palette(240, 0, l=30, s=100,as_cmap=True))
cmap.set_under('navy')
cmap.set_over('darkred')
height = len(plot_df)*0.25
# Create clustermap plot and save pdf
sns_plot = sns.clustermap(plot_df, cmap=cmap, cbar_kws=cbar_kws, row_cluster=row_cluster, xticklabels=1, yticklabels=1, figsize=(35,height),center=0)
# Show dendogram
sns_plot.ax_col_dendrogram.set_visible(dendogram)
sns_plot.ax_row_dendrogram.set_visible(dendogram)
plt.show()
if file_path is not None:
sns_plot.savefig(file_path, dpi=300)
| 10,186
|
def getFourgram(words, join_string):
"""
Input: a list of words, e.g., ['I', 'am', 'Denny', 'boy']
Output: a list of trigram, e.g., ['I_am_Denny_boy']
I use _ as join_string for this example.
"""
assert type(words) == list
L = len(words)
if L > 3:
lst = []
for i in xrange(L-3):
lst.append( join_string.join([words[i], words[i+1], words[i+2], words[i+3]]) )
else:
# set it as bigram
lst = getTrigram(words, join_string)
return lst
| 10,187
|
def get_age_carbon_14_dating(carbon_14_ratio):
"""Returns the estimated age of the sample in year.
carbon_14_ratio: the percent (0 < percent < 1) of carbon-14
in the sample conpared to the amount in living
tissue (unitless). """
if isinstance(carbon_14_ratio, str):
raise TypeError("Please provide an integer")
elif carbon_14_ratio <= 0:
raise ValueError("Not acceptable, must be greater than 0 but less than 1")
elif carbon_14_ratio > 1:
raise ValueError("Too large, must be between 0 and 1")
calculation = math.log(carbon_14_ratio) / DECAY_CONSTANT * T_HALF
age = "{:.2f}".format(calculation) # rounds to 2 decimal places
return age
| 10,188
|
def test_units_callnumbers_star_imports():
"""Star imports for the ``units.callnumbers`` package should work
without raising errors."""
from context import pycallnumber
all_imp = __import__('pycallnumber.units.callnumbers', globals(),
locals(), ['*'])
assert all_imp.LC
assert len(all_imp.__all__) == len(pycallnumber.units.callnumbers.__all__)
| 10,189
|
def process_factor(seqlet_H, seqlet_W, seqlet_dna, feature_mask, out_prefix, background_fasta, align_seqlets_shift, meme_db=None):
"""Perform all analyses on one factor."""
print(out_prefix)
# write coef vector
write_factor(seqlet_H, feature_mask, '%s_coef.txt' % out_prefix)
# plot logo
plot_logo(seqlet_W, seqlet_dna, out_prefix, align_seqlets_shift)
# homer
run_homer(seqlet_W, seqlet_dna, out_prefix, background_fasta)
# meme
run_dreme(seqlet_W, seqlet_dna, out_prefix, background_fasta, meme_db)
| 10,190
|
def test_get_default_config_location_default(monkeypatch):
"""Assert that, if no file is found, the most-specific location is returned."""
monkeypatch.setattr(chromaterm.__main__, 'CONFIG_LOCATIONS', ['1', '2'])
assert chromaterm.__main__.get_default_config_location() == '1.yml'
| 10,191
|
def get_input_definition() -> InputDefinition:
"""
Query ReconAll's input file definition (*t1_files*) to check for existing
runs.
Returns
-------
InputDefinition
ReconAll's *t1_files* input definition
"""
node = get_node()
return node.analysis_version.input_definitions.get(key=T1_FILES_KEY)
| 10,192
|
def launch(workflow: str, number: int, file: str, concurrency: int):
"""Launch multiple workflows."""
results_folder_path = _build_results_folder_path(workflow)
try:
os.mkdir(results_folder_path)
except FileExistsError:
logging.info(
"Benchmark folder already exists. Will overwrite previous results."
)
try:
submitted_results = _start_benchmark(workflow, number, file, concurrency)
except Exception as e:
logging.error(f"Something went wrong during benchmark launch: {e}")
return
logging.info("Saving intermediate submit results...")
submitted_results_path = _build_submitted_results_path(workflow)
submitted_results.to_csv(submitted_results_path, index=False)
logging.info("Finished. Don't forget to collect the results.")
| 10,193
|
def create_ou_process(action_spec, ou_stddev, ou_damping):
"""Create nested zero-mean Ornstein-Uhlenbeck processes.
The temporal update equation is:
.. code-block:: python
x_next = (1 - damping) * x + N(0, std_dev)
Note: if ``action_spec`` is nested, the returned nested OUProcess will not bec
checkpointed.
Args:
action_spec (nested BountedTensorSpec): action spec
ou_damping (float): Damping rate in the above equation. We must have
:math:`0 <= damping <= 1`.
ou_stddev (float): Standard deviation of the Gaussian component.
Returns:
nested ``OUProcess`` with the same structure as ``action_spec``.
"""
def _create_ou_process(action_spec):
return dist_utils.OUProcess(action_spec.zeros(), ou_damping, ou_stddev)
ou_process = alf.nest.map_structure(_create_ou_process, action_spec)
return ou_process
| 10,194
|
def clean_data(df):
"""
remove the duplicates from a dataframe
parameters:
df(Dataframe): data frame
"""
df=df.drop_duplicates()
return df
| 10,195
|
def IssueFactory(data, journal_id, issue_order):
"""
Realiza o registro fascículo utilizando o opac schema.
Esta função pode lançar a exceção `models.Journal.DoesNotExist`.
"""
mongo_connect()
metadata = data["metadata"]
issue = models.Issue()
issue._id = issue.iid = data.get("id")
issue.type = metadata.get("type", "regular")
issue.spe_text = metadata.get("spe_text", "")
issue.start_month = metadata.get("publication_month", 0)
issue.end_month = metadata.get("publication_season", [0])[-1]
issue.year = metadata.get("publication_year")
issue.volume = metadata.get("volume", "")
issue.number = metadata.get("number", "")
issue.label = metadata.get(
"label", "%s%s" % ("v" + issue.volume, "n" + issue.number)
)
issue.order = metadata.get("order", 0)
issue.pid = metadata.get("pid", "")
issue.journal = models.Journal.objects.get(_id=journal_id)
issue.order = issue_order
return issue
| 10,196
|
def delete_images(ec2_conn, ids=None, tags=None, owners=None, name=None):
"""Delete (unregister) AMI images."""
images = list_images(
ec2_conn, ids=ids, tags=tags, owners=owners, name=name
)
if not images:
if ids:
raise exc.NotFoundError('No image id {} found.'.format(ids))
if tags:
raise exc.NotFoundError('No image tagged {} found.'.format(tags))
if name:
raise exc.NotFoundError('No image named {} found.'.format(name))
for image in images:
_LOGGER.info('deleting image: %s', image['ImageId'])
ec2_conn.deregister_image(ImageId=image['ImageId'])
| 10,197
|
def _is_permission_in_db(permission_name: str):
"""To check whether the given permission is in the DB
Parameters
----------
permission_name: str
A permission name we use internally.
E.g., hazard, hazard:hazard, project...
"""
return bool(
models.Auth0Permission.query.filter_by(permission_name=permission_name).first()
)
| 10,198
|
def check(source):
"""Return messages from pyflakes."""
if sys.version_info[0] == 2 and isinstance(source, unicode):
# Convert back to original byte string encoding, otherwise pyflakes
# call to compile() will complain. See PEP 263. This only affects
# Python 2.
try:
source = source.encode('utf-8')
except UnicodeError: # pragma: no cover
return []
reporter = ListReporter()
try:
pyflakes.api.check(source, filename='<string>', reporter=reporter)
except (AttributeError, RecursionError, UnicodeDecodeError):
pass
return reporter.messages
| 10,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.