content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def xml_ind(content):
"""Translate a individual expression or variable to MathCAD XML.
:param content: str, math expression or variable name.
:return: str, formatted MathCAD XML.
"""
ns = ''' xmlns:ml="http://schemas.mathsoft.com/math30">''' # name space as initial head
sub_statement = xml_stat(xml_ex(content))
return sub_statement.replace('>', ns, 1)
| 9,500
|
def run_game():
"""Inicializa o jogo e cria um objeto para a tela - 1200/700"""
pygame.init()
ai_settings = Settings()
screen = pygame.display.set_mode((ai_settings.screen_width, ai_settings.screen_height))
# Definindo Telas
pygame.display.set_caption(f'Alien-Invasion')
pygame.display.set_icon(pygame.image.load('Assets/aliens/alien3.bmp'))
background_start = pygame.image.load('Assets/menu/apresentacao.bmp')
background_menu = pygame.image.load('Assets/menu/menu.bmp')
background_game_over = pygame.image.load('Assets/menu/game-over.bmp')
background = pygame.image.load('Assets/background/fundo.bmp')
# Cria uma instância para armazenar dados estatísticos do jogo e cria o painel de pontuação
stats = GameStats(ai_settings)
sb = Scoreboard(ai_settings, screen, stats)
# Cria a espaçonave, um grupo de projéteis e um grupo de alienígenas
ship = Ship(ai_settings, screen)
bullets = Group()
aliens = Group()
# Cria a frota de alienigenas
gf.create_fleet(ai_settings, screen, ship, aliens)
# Cria o botão play
play_button = Button(ai_settings, screen, "P to Play or esc or quit")
# Definindo a imagem inicial
gf.update_start(screen, background_start)
pygame.mouse.set_visible(False)
# Iniciando o som inicial
menu_sound = pygame.mixer.Sound('Sounds/game_sound2.mp3')
pygame.mixer.Sound.set_volume(menu_sound, 0.5)
menu_sound.play(-1)
while True:
"""Inicializa o laço principal do jogo"""
gf.check_events(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets)
if stats.game_start:
gf.update_menu(screen, background_menu, play_button)
elif stats.game_active:
menu_sound.stop()
gf.update_screen(ai_settings, screen, stats, sb, ship, aliens, bullets, play_button, background)
ship.update()
bullets.update()
gf.update_bullets(ai_settings, screen, stats, sb, ship, aliens, bullets)
gf.update_aliens(ai_settings, screen, stats, sb, ship, aliens, bullets)
elif stats.game_over:
gf.update_game_over(screen, background_game_over, play_button)
pygame.mixer.Sound.play(menu_sound)
| 9,501
|
def p_q_STRING(p):
"""q_STRING : "'" STRING "'" """
p[0] = p[2]
| 9,502
|
def now_playing(update, context):
"""Information about the current song."""
with mpdclient() as c:
song = c.currentsong()
# TODO(shoeffner): Handle properly
update.message.reply_text(song)
| 9,503
|
def CheckPortFree(port):
"""Check the availablity of the tcp port.
Args:
Integer, a port number.
Raises:
PortOccupied: This port is not available.
"""
tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
tcp_socket.bind(("", port))
except socket.error:
raise errors.PortOccupied("Port (%d) is taken, please choose another "
"port." % port)
tcp_socket.close()
| 9,504
|
def test_communicator(url):
"""Test the instantiation of a ``kiwipy.rmq.RmqThreadCommunicator``.
This class is used by all runners to communicate with the RabbitMQ server.
"""
RmqThreadCommunicator.connect(connection_params={'url': url})
| 9,505
|
def read_slug(slug: str, db: Session = Depends(get_db)) -> Any:
"""
Get a post by slug
"""
db_slug = get_post(db, slug)
if db_slug is None:
raise HTTPException(status_code=404, detail="Post not found")
return db_slug
| 9,506
|
def left_join_predictions(anno_gold: pd.DataFrame, anno_predicted: pd.DataFrame, columns_keep_gold: List[str],
columns_keep_system: List[str]) -> pd.DataFrame:
"""
Given gold mention annotations and predicted mention annotations, this method returns the gold annotations with
additional columns from the system prediction merged in, based on the optimal 1:1 span matching per sentence. Gold
annotation spans will not be modified, only enriched (hence: left join). Index and column of dataframes must
conform to a certain format (see assert in code). Spans in the dataframes must be non-overlapping.
:param anno_gold:
:param anno_predicted:
:param columns_keep_gold:
:param columns_keep_system:
:return:
"""
assert anno_gold.index.names == [DOCUMENT_ID, MENTION_ID]
assert anno_predicted.index.names == [DOCUMENT_ID, MENTION_ID]
mappings = []
MENTION_ID_GOLD = "mention-id-gold"
MENTION_ID_PREDICTED = "mention-id-predicted"
# perform intersection sentence-wise
if not anno_predicted.empty:
for (doc_id, sent_idx), df_gold in anno_gold.reset_index().groupby([DOCUMENT_ID, SENTENCE_IDX]):
spans_gold = df_gold[[TOKEN_IDX_FROM, TOKEN_IDX_TO]].values.tolist()
# look up mentions at the same spot in system output
anno_predicted_wout_index = anno_predicted.reset_index()
df_predicted = anno_predicted_wout_index.loc[(anno_predicted_wout_index[DOCUMENT_ID] == doc_id) & (anno_predicted_wout_index[SENTENCE_IDX] == sent_idx)]
spans_predicted = df_predicted[[TOKEN_IDX_FROM, TOKEN_IDX_TO]].values.tolist()
# perform span matching (only based on spans! no type information taken into consideration!)
matched_spans = span_matching(spans_gold, spans_predicted, keep_A=True)
# keep MENTION_IDs of matched mentions
for i_gold, i_predicted in matched_spans.items():
row = {DOCUMENT_ID: doc_id,
MENTION_ID_GOLD: df_gold.iloc[i_gold][MENTION_ID]}
# this index can be None because we set keep_A=True for span_matching, to always keep all gold annotations
if i_predicted is not None:
row[MENTION_ID_PREDICTED] = df_predicted.iloc[i_predicted][MENTION_ID]
mappings.append(row)
mappings = pd.DataFrame(mappings, columns=[DOCUMENT_ID, MENTION_ID_GOLD, MENTION_ID_PREDICTED])
if not mappings.empty:
# merge in the columns we want to keep from the gold annotations
mappings = mappings.merge(anno_gold[columns_keep_gold],
left_on=[DOCUMENT_ID, MENTION_ID_GOLD],
right_index=True)
# merge in the columns we want to keep from the predicted annotations - note the use of how="left" to keep gold annotations which have MENTION_ID_PREDICTED == None
left_joined = mappings.merge(anno_predicted[columns_keep_system],
left_on=[DOCUMENT_ID, MENTION_ID_PREDICTED],
right_index=True,
how="left")
# drop unwanted columns, return to original column names, return to original index
left_joined = left_joined.drop(columns=[MENTION_ID_PREDICTED])
left_joined = left_joined.rename(columns={MENTION_ID_GOLD: MENTION_ID})
left_joined = left_joined.set_index([DOCUMENT_ID, MENTION_ID])
else:
# append lots of NaNs if there is nothing to merge
left_joined = pd.concat([anno_gold[columns_keep_gold], pd.DataFrame([], columns=columns_keep_system)], axis=1)
left_joined.sort_index(inplace=True)
return left_joined
| 9,507
|
def get_users_data(filter):
"""
Returns users in db based on submitted filter
:param filter:
:return:
"""
# presets - filter must be in one of the lists
filter_presets = {"RegistrationStatus": ["Pending", "Verified"], "userTypeName": ["Administrator", "Event Manager", "Alumni"]}
if filter.title() in filter_presets["RegistrationStatus"]:
users_data = db.get_users(RegistrationStatus=filter)
elif filter.title() in filter_presets["userTypeName"]:
users_data = db.get_users(userTypeName=filter)
else:
#filter doesn't exist return all users
users_data = db.get_users()
users_data = list(enumerate(users_data))
return json.jsonify(users_data)
| 9,508
|
def test_graph_diffusion() -> None:
"""
1) Provide both labelled and unlabelled data points.
2) Run label propagation, collect diffused labels for the unlabelled data
3) Verify the match between true and diffused labels
"""
# Create labelled and unlabelled data points
noisy_circles, labels = _create_circles_dataset()
n_samples = noisy_circles.shape[0]
n_labelled = int(n_samples * 0.75)
perm = np.random.permutation(n_samples)
unlabelled_indices = perm[n_labelled:]
# Build a graph and run label propagation
graph_param = GraphParameters(
n_neighbors=20,
diffusion_alpha=0.99,
cg_solver_max_iter=10,
diffusion_batch_size=-1,
distance_kernel='euclidean')
graph = build_connectivity_graph(normalised=True,
embeddings=noisy_circles,
n_neighbors=graph_param.n_neighbors,
distance_kernel=graph_param.distance_kernel)
diffusion_input_labels = np.eye(2)[labels]
laplacian = scipy.sparse.eye(n_samples) - graph_param.diffusion_alpha * graph
laplacian_inv = scipy.sparse.linalg.inv(laplacian.tocsc()).todense()
diffused_labels = label_diffusion(laplacian_inv, diffusion_input_labels, unlabelled_indices)
# Compare the diffused labels against the true labels
predicted_classes = np.asarray(np.argmax(diffused_labels, axis=1)).reshape(-1)
target_classes = labels[unlabelled_indices]
accuracy = np.sum(predicted_classes == target_classes) / target_classes.shape[0]
assert np.isclose(accuracy, 1.0, rtol=1e-05, atol=1e-08, equal_nan=False)
| 9,509
|
def calAdjCCTTFromTrace(nt,dt,tStartIn,tEndIn,dataIn, synthIn):
""" calculate the cross correlation traveltime adjoint sources for one seismogram
IN:
nt : number of timesteps in each seismogram
dt : timestep of seismograms
tStartIn : float starting time for trace
tEndIn : float end time for trace
OUT:
fBar : array containing the adjoint seismogram for the trace
t : ndarray containing the time steps
"""
isCalculateWeights = False
if isCalculateWeights:
dSeism = np.zeros(nt)
weight = 0
# -- time vector
t = np.ogrid[0:(nt-1)*dt:nt*1j]
# -- the norm
norm = 0
# -- numpy arrays initialisation
velSynth = np.zeros(nt)
accSynth = np.zeros(nt)
timeWind = np.zeros(nt)
fBar = np.zeros(nt)
# -- calculate time time-window
tStart = tStartIn
tEnd = tEndIn
# -- the starting and ending sample numbers
iStart = int(np.floor(tStart/dt))
iEnd = int(np.ceil(tEnd/dt))
# -- sample length of the window
iWind = iEnd - iStart
#print iStart,iEnd,iWind
timeWind[iStart:iEnd]=sgnl.hann(iWind)
# -- calculate the adjoint
synth = synthIn
interpTrc = interp.InterpolatedUnivariateSpline(t,synth)
velSynth = interpTrc(t,1)
accSynth = interpTrc(t,2)
integrArgument = timeWind*synth*accSynth
# -- calculating the norm
norm = integr.simps(integrArgument,dx=dt,axis=-1,even='last')
# -- divide every trace (row in matrices) by their norm (row in vector norm)
fBar = timeWind*velSynth / norm
if isCalculateWeights:
# -- read in the data seismograms
data = dataIn
# -- calculate the difference between data and synthetics (amplitude) per trace
dSeism = data - synth
# -- calculate the weight per trace
integrArgument = timeWind*velSynth*dSeism
weight = integr.simps(integrArgument,dx=dt,axis=-1,even='last')
print "weight", weight/norm
# -- multiply weight with every adj trace
fBar = fBar*weight
print weight
return [fBar,t]
| 9,510
|
def get_par_idx_update_pars_dict(pars_dict, cmd, params=None, rev_pars_dict=None):
"""Get par_idx representing index into pars tuples dict.
This is used internally in updating the commands H5 and commands PARS_DICT
pickle files. The ``pars_dict`` input is updated in place.
This code was factored out verbatim from kadi.update_cmds.py.
:param pars_dict: dict of pars tuples
:param cmd: dict or CommandRow
Command for updated par_idx
:param pars: dict, optional
If provided, this is used instead of cmd['params']
:param rev_pars_dict: dict, optional
If provided, also update the reverse dict.
:returns: int
Params index (value of corresponding pars tuple dict key)
"""
# Define a consistently ordered tuple that has all command parameter information
if params is None:
params = cmd['params']
keys = set(params.keys()) - set(('SCS', 'STEP', 'TLMSID'))
if cmd['tlmsid'] == 'AOSTRCAT':
pars_tup = encode_starcat_params(params) if params else ()
else:
if cmd['tlmsid'] == 'OBS':
# Re-order parameters to a priority order.
new_keys = ['obsid', 'simpos', 'obs_stop', 'manvr_start', 'targ_att']
for key in sorted(cmd['params']):
if key not in new_keys:
new_keys.append(key)
keys = new_keys
else:
# Maintain original order of keys for OBS command but sort the rest.
# This is done so the OBS command displays more nicely.
keys = sorted(keys)
pars_tup = tuple((key.lower(), params[key]) for key in keys)
try:
par_idx = pars_dict[pars_tup]
except KeyError:
# Along with transition to 32-bit idx in #190, ensure that idx=65535
# never gets used. Prior to #190 this value was being used by
# get_cmds_from_backstop() assuming that it will never occur as a
# key in the pars_dict. Adding 65536 allows older versions to work
# with the new cmds.pkl pars_dict.
par_idx = len(pars_dict) + 65536
pars_dict[pars_tup] = par_idx
if rev_pars_dict is not None:
rev_pars_dict[par_idx] = pars_tup
return par_idx
| 9,511
|
def profile(function, *args, **kwargs):
"""
Log the runtime of a function call.
Args:
function: The callable to profile.
args: Additional positional arguments to ``function``.
kwargs: Additional keyword arguments to ``function``.
Returns:
The result of applying ``function`` to ``args`` and ``kwargs``.
"""
start_time = time.time()
result = function(*args, **kwargs)
end_time = time.time()
time_elapsed = end_time - start_time
LOGGER.log(logging.DEBUG, 'Call to "%s" took %.3f seconds',
function.__name__, time_elapsed)
return result
| 9,512
|
def get_parser():
"""Define the command line interface"""
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
from .. import __version__ as _vstr
parser = ArgumentParser(description='SDC Workflows',
formatter_class=RawTextHelpFormatter)
parser.add_argument(
'bids_dir', action='store', type=Path,
help='the root folder of a BIDS dataset')
parser.add_argument('output_dir', action='store', type=Path,
help='the output path for the outcomes of preprocessing and visual '
'reports')
parser.add_argument('analysis_level', choices=['participant', 'group'], nargs='+',
help='processing stage to be run, "participant" means individual analysis '
'and "group" is second level analysis.')
# optional arguments
parser.add_argument('--version', action='version', version='v{}'.format(_vstr))
# Options that affect how pyBIDS is configured
g_bids = parser.add_argument_group('Options for filtering BIDS queries')
g_bids.add_argument('--participant-label', action='store', type=str,
nargs='*', dest='subject', help='process only particular subjects')
g_bids.add_argument('--task', action='store', type=str, nargs='*',
help='select a specific task to be processed')
g_bids.add_argument('--dir', action='store', type=str, nargs='*',
help='select a specific direction entity to be processed')
g_bids.add_argument('--acq', action='store', type=str, nargs='*', dest='acquisition',
help='select a specific acquisition entity to be processed')
g_bids.add_argument('--run', action='store', type=int, nargs='*',
help='select a specific run identifier to be processed')
g_bids.add_argument('--suffix', action='store', type=str, nargs='*', default='bold',
help='select a specific run identifier to be processed')
g_perfm = parser.add_argument_group('Options to handle performance')
g_perfm.add_argument("-v", "--verbose", dest="verbose_count", action="count", default=0,
help="increases log verbosity for each occurence, debug level is -vvv")
g_perfm.add_argument('--ncpus', '--nprocs', action='store', type=int,
help='maximum number of threads across all processes')
g_perfm.add_argument('--nthreads', '--omp-nthreads', action='store', type=int,
help='maximum number of threads per-process')
g_other = parser.add_argument_group('Other options')
g_other.add_argument('-w', '--work-dir', action='store', type=Path,
help='path where intermediate results should be stored')
return parser
| 9,513
|
def COSTR(LR, R, W, S):
"""
COSTR one value of cosine transform of two-sided function
p. 90
"""
COSNW = 1.
SINNW = 0.
COSW = COS(W)
SINW = SIN(W)
S = R[0]
for I in range(1, LR):
T = COSW * COSNW - SINW * SINNW
COSNW = T
S += 2 * R[I] * COSNW
return S
| 9,514
|
def CV_SIGN(*args):
"""CV_SIGN(int a)"""
return _cv.CV_SIGN(*args)
| 9,515
|
def import_config_data(config_path):
"""
Parameters
----------
config_path : str
path to the experimental configuration file
Returns
-------
config data : dict
dict containing experimental metadata for a given session config file
"""
data = get_config(config_path)
return data
| 9,516
|
def evaluate_agent(agent, environment, num_episodes, max_steps, render=True):
"""Evaluate an agent in an environment.
Parameters
----------
agent: AbstractAgent
environment: AbstractEnvironment
num_episodes: int
max_steps: int
render: bool
"""
with Evaluate(agent):
rollout_agent(
environment,
agent,
max_steps=max_steps,
num_episodes=num_episodes,
render=render,
)
returns = np.mean(agent.logger.get("eval_return")[-num_episodes:])
print(f"Test Cumulative Rewards: {returns}")
| 9,517
|
def sigmaLabel(ax, xlabel, ylabel, sigma=None):
"""Label the axes on a figure with some uncertainty."""
confStr = r'$\pm{} \sigma$'.format(sigma) if sigma is not None else ''
ax.set_xlabel(xlabel + confStr)
ax.set_ylabel(ylabel + confStr)
return ax
| 9,518
|
def startswith(x, prefix):
"""Determines if entries of x start with prefix
Args:
x: A vector of strings or a string
prefix: The prefix to test against
Returns:
A bool vector for each element in x if element startswith the prefix
"""
x = regcall(as_character, x)
return x.str.startswith(prefix)
| 9,519
|
def create_estimator(est_cls, const_kwargs, node, child_list):
"""
Creates an estimator.
:param est_cls: Function that creates the estimator.
:param const_kwargs: Keyword arguments which do not change during the evolution.
:param child_list: List of converted child nodes - should me empty.
:param evolved_kwargs: Keyword arguments which are set during the evolution process.
:return: A new estimator.
"""
if len(child_list) > 0:
raise ValueError("Estimator cannot have sub-estimators.")
evolved_kwargs = node.obj_kwargs
if 'feat_frac' in evolved_kwargs.keys():
feat_frac = evolved_kwargs['feat_frac']
evolved_kwargs = {key: val for key, val in evolved_kwargs.items()
if key != 'feat_frac'}
est = est_cls(**const_kwargs, **evolved_kwargs)
return RelativeTransformer(est, feat_frac)
return est_cls(**const_kwargs, **evolved_kwargs)
| 9,520
|
def exists(hub_id):
"""Check for existance of hub in local state.
Args:
hub_id(str): Id of hub to query. The id is a string of hexadecimal sections used internally to represent a hub.
"""
if 'Hubs.{0}'.format(hub_id) in config.state:
return True
else:
return False
| 9,521
|
def train_one():
"""
train an agent
"""
print("==============Start Fetching Data===========")
df = YahooDownloader(start_date = config.START_DATE,
end_date = config.END_DATE,
ticker_list = config.DOW_30_TICKER).fetch_data()
print("==============Start Feature Engineering===========")
df = FeatureEngineer(df,
use_technical_indicator=True,
use_turbulence=True).preprocess_data()
# Training & Trade data split
train = data_split(df, config.START_DATE,config.START_TRADE_DATE)
trade = data_split(df, config.START_TRADE_DATE,config.END_DATE)
# data normalization
#feaures_list = list(train.columns)
#feaures_list.remove('date')
#feaures_list.remove('tic')
#feaures_list.remove('close')
#print(feaures_list)
#data_normaliser = preprocessing.StandardScaler()
#train[feaures_list] = data_normaliser.fit_transform(train[feaures_list])
#trade[feaures_list] = data_normaliser.fit_transform(trade[feaures_list])
# calculate state action space
stock_dimension = len(train.tic.unique())
state_space = 1 + 2*stock_dimension + len(config.TECHNICAL_INDICATORS_LIST)*stock_dimension
env_setup = EnvSetup(stock_dim = stock_dimension,
state_space = state_space,
hmax = 100,
initial_amount = 1000000,
transaction_cost_pct = 0.001)
env_train = env_setup.create_env_training(data = train,
env_class = StockEnvTrain)
agent = DRLAgent(env = env_train)
print("==============Model Training===========")
now = datetime.datetime.now().strftime('%Y%m%d-%Hh%M')
sac_params_tuning={
'batch_size': 64,
'buffer_size': 100000,
'ent_coef':'auto_0.1',
'learning_rate': 0.0001,
'learning_starts':200,
'timesteps': 50000,
'verbose': 0}
model = agent.train_SAC(model_name = "SAC_{}".format(now), model_params = sac_params_tuning)
print("==============Start Trading===========")
env_trade, obs_trade = env_setup.create_env_trading(data = trade,
env_class = StockEnvTrade,
turbulence_threshold=250)
df_account_value,df_actions = DRLAgent.DRL_prediction(model=model,
test_data = trade,
test_env = env_trade,
test_obs = obs_trade)
df_account_value.to_csv("./"+config.RESULTS_DIR+"/df_account_value_"+now+'.csv')
df_actions.to_csv("./"+config.RESULTS_DIR+"/df_actions_"+now+'.csv')
print("==============Get Backtest Results===========")
perf_stats_all = BackTestStats(df_account_value)
perf_stats_all = pd.DataFrame(perf_stats_all)
perf_stats_all.to_csv("./"+config.RESULTS_DIR+"/perf_stats_all_"+now+'.csv')
| 9,522
|
def query_by_date_after(**kwargs):
"""
根据发布的时间查询,之后的记录: 2020-06-03之后,即2020-06-03, 2020-06-04, ......
:param kwargs: {'date': date}
:return:
"""
session = None
try:
date = kwargs['date'].strip() + config.BEGIN_DAY_TIME
session = get_session()
ret = session.query(Play).filter(Play.DATE_TIME >= date).order_by(
Play.DATE_TIME.desc()).limit(config.LIMIT_MAX).all()
# 提交即保存到数据库
session.commit()
results = parse_object(*ret)
logging.info('OK : play.py--->query_by_date_after(), 成功')
return results
except Exception as e:
logging.critical('Error : play.py--->query_by_date_after() 失败: {}'.format(e))
return []
finally:
# 关闭session
session.close()
| 9,523
|
def get_artifact_path(name):
"""Получение пути для сохранения артефакта. Side-эффект: Создание директории
@param name: Название артефакта
@return Путь для сохранения
"""
if not os.path.exists('../artifacts/'):
os.makedirs('../artifacts/')
path = f'../artifacts/{name}.png'
print(f'New artifact: {path}')
return path
| 9,524
|
def _step_4_find_peaks(
aligned_composite_bg_removed_im,
aligned_roi_rect,
raw_mask_rects,
border_size,
field_df,
sigproc_params,
):
"""
Find peaks on the composite image
TASK: Remove the mask rect checks and replace with the same masking
logic that is now implemented in the alignment phase. That is, just remove
the peaks from the source instead of in post-processing.
"""
from skimage.feature import peak_local_max # Defer slow import
from scipy.stats import iqr
n_outchannels, n_inchannels, n_cycles, dim = sigproc_params.channels_cycles_dim
assert (
aligned_composite_bg_removed_im.shape[0]
== aligned_composite_bg_removed_im.shape[1]
)
aligned_dim, _ = aligned_composite_bg_removed_im.shape
check.array_t(aligned_composite_bg_removed_im, is_square=True)
hat_rad = sigproc_params.hat_rad
brim_rad = sigproc_params.hat_rad + 1
hat_mask, brim_mask = _hat_masks(hat_rad, brim_rad)
kernel = imops.generate_gauss_kernel(1.0)
kernel = kernel - kernel.mean()
_fiducial_im = imops.convolve(aligned_composite_bg_removed_im, kernel)
# Black out the convolution artifact around the perimeter of the _fiducial_im
search_roi_rect = Rect(
aligned_roi_rect.b + brim_rad,
aligned_roi_rect.t - brim_rad,
aligned_roi_rect.l + brim_rad,
aligned_roi_rect.r - brim_rad,
)
search_roi = search_roi_rect.roi()
composite_fiducial_im = np.zeros_like(aligned_composite_bg_removed_im)
# Use Inter-Quartile Range for some easy filtering
_iqr = 0
if sigproc_params.iqr_rng is not None:
_iqr = iqr(
_fiducial_im[search_roi],
rng=(100 - sigproc_params.iqr_rng, sigproc_params.iqr_rng),
)
composite_fiducial_im[search_roi] = (_fiducial_im[search_roi] - _iqr).clip(min=0)
locs = peak_local_max(
composite_fiducial_im,
min_distance=hat_rad,
threshold_abs=sigproc_params.threshold_abs,
)
# Emergency exit to prevent memory overflows
# check.affirm(len(locs) < 7000, f"Too many peaks {len(locs)}")
shift = field_df.set_index("cycle_i").sort_index()[["shift_y", "shift_x"]].values
shift_y = shift[:, 0]
shift_x = shift[:, 1]
# Discard any peak in any mask_rect
# ALIGN the mask rects to the composite coordinate system
aligned_mask_rects = []
for channel in range(sigproc_params.n_output_channels):
channel_rects = safe_list_get(raw_mask_rects, channel, [])
for cycle in range(n_cycles):
for rect in safe_list_get(channel_rects, cycle, []):
yx = XY(rect[0], rect[1])
hw = WH(rect[2], rect[3])
yx += XY(border_size, border_size) - XY(shift_x[cycle], shift_y[cycle])
aligned_mask_rects += [(yx[0], yx[1], yx[0] + hw[0], yx[1] + hw[1])]
aligned_mask_rects = np.array(aligned_mask_rects)
if aligned_mask_rects.shape[0] > 0:
# To compare every loc with every mask rect we use the tricky np.fn.outer()
y_hits = np.greater_equal.outer(locs[:, 0], aligned_mask_rects[:, 0])
y_hits &= np.less.outer(locs[:, 0], aligned_mask_rects[:, 2])
x_hits = np.greater_equal.outer(locs[:, 1], aligned_mask_rects[:, 1])
x_hits &= np.less.outer(locs[:, 1], aligned_mask_rects[:, 3])
inside_rect = x_hits & y_hits # inside a rect if x and y are inside the rect
locs_to_keep = ~np.any(
inside_rect, axis=1
) # Reject if inside of any masked rect
locs = locs[locs_to_keep]
circle_im = np.zeros((aligned_dim, aligned_dim))
center = aligned_dim / 2
peak_rows = []
for field_peak_i, loc in enumerate(locs):
if sigproc_params.radial_filter is not None:
radius = math.sqrt((loc[0] - center) ** 2 + (loc[1] - center) ** 2)
radius /= center
if radius >= sigproc_params.radial_filter:
continue
imops.set_with_mask_in_place(circle_im, brim_mask, 1, loc=loc, center=True)
peak_rows += [
Munch(
peak_i=0,
field_peak_i=field_peak_i,
aln_y=int(loc[0]),
aln_x=int(loc[1]),
)
]
peak_df = pd.DataFrame(peak_rows)
return peak_df, circle_im, aligned_mask_rects
| 9,525
|
def get_reverse_host():
"""Return the reverse hostname of the IP address to the calling function."""
try:
return socket.gethostbyaddr(get_ipaddress())[0]
except:
return "Unable to resolve IP address to reverse hostname"
| 9,526
|
def trans_stop(value) -> TransformerResult:
"""
A transformer that simply returns TransformerResult.RETURN.
"""
return TransformerResult.RETURN
| 9,527
|
def generator(n, mode):
""" Returns a data generator object.
Args:
mode: One of 'training' or 'validation'
"""
flip_cams = False
if FLAGS.regularization == 'GRU':
flip_cams = True
gen = ClusterGenerator(FLAGS.train_data_root, FLAGS.view_num, FLAGS.max_w, FLAGS.max_h,
FLAGS.max_d, FLAGS.interval_scale, FLAGS.base_image_size, mode=mode, flip_cams=flip_cams)
logger.info('Initializing generator with mode {}'.format(mode))
if mode == 'training':
global training_sample_size
training_sample_size = len(gen.train_clusters)
if FLAGS.regularization == 'GRU':
training_sample_size = training_sample_size * 2
return iter(gen)
| 9,528
|
def AddAppProfileResourceArg(parser, verb):
"""Add app profile positional resource argument to the parser."""
concept_parsers.ConceptParser.ForResource(
'app_profile',
GetAppProfileResourceSpec(),
'The app profile {}.'.format(verb),
required=True).AddToParser(parser)
| 9,529
|
def remove_multispaces(text):
""" Replace multiple spaces with only 1 space """
return [re.sub(r' +', " ",word) for word in text]
| 9,530
|
def empirical_ci(arr: np.ndarray, alpha: float = 95.0) -> np.ndarray:
"""Computes percentile range in an array of values.
Args:
arr: An array.
alpha: Percentile confidence interval.
Returns:
A triple of the lower bound, median and upper bound of the confidence interval
with a width of alpha.
"""
percentiles = 50 - alpha / 2, 50, 50 + alpha / 2
return np.percentile(arr, percentiles)
| 9,531
|
def euclidean(a,b):
"""Calculate GCD(a,b) with the Euclidean algorithm.
Args:
a (Integer): an integer > 0.
b (Integer): an integer > 0.
Returns:
Integer: GCD(a,b) = m ∈ ℕ : (m|a ⋀ m|b) ⋀ (∄ n ∈ ℕ : (n|a ⋀ n|b) ⋀ n>m).
"""
if(a<b):
a,b = b,a
a, b = abs(a), abs(b)
while a != 0:
a, b = b % a, a
return b
| 9,532
|
def check_reserved_pulse_id(pulse: OpInfo) -> Union[str, None]:
"""
Checks whether the function should be evaluated generically or has special
treatment.
Parameters
----------
pulse
The pulse to check.
Returns
-------
:
A str with a special identifier representing which pulse behavior to use
"""
reserved_pulse_mapping = {
"stitched_square_pulse": _check_square_pulse_stitching,
"staircase": _check_staircase,
}
for key, checking_func in reserved_pulse_mapping.items():
if checking_func(pulse):
return key
return None
| 9,533
|
def test_arr_provided():
""" Test for arrays provided as image and/or mask input. """
an_arr_tup = (2, 3)
with pytest.raises(AttributeError):
make_apply_mask(an_arr_tup, mask_arr=im_mask, vals=[0, 4])
with pytest.raises(AttributeError):
make_apply_mask(im, mask_arr=an_arr_tup, vals=[0, 4])
| 9,534
|
def figure(*args, **kwargs):
"""
Returns a new SpectroFigure, a figure extended with features useful for
analysis of spectrograms.
Compare pyplot.figure.
"""
kw = {
'FigureClass': SpectroFigure,
}
kw.update(kwargs)
return plt.figure(*args, **kw)
| 9,535
|
def test_objects_mutation(token):
"""
Ensures that mutations objects are compiled correctly.
"""
tree = Tree('mutation', [token])
expected = {'$OBJECT': 'mutation', 'mutation': token.value,
'arguments': []}
assert Objects.mutation(tree) == expected
| 9,536
|
def allow_ports(ports, proto="tcp", direction="in"):
"""
Fully replace the incoming or outgoing ports
line in the csf.conf file - e.g. TCP_IN, TCP_OUT,
UDP_IN, UDP_OUT, etc.
CLI Example:
.. code-block:: bash
salt '*' csf.allow_ports ports="[22,80,443,4505,4506]" proto='tcp' direction='in'
"""
results = []
ports = set(ports)
ports = list(ports)
proto = proto.upper()
direction = direction.upper()
_validate_direction_and_proto(direction, proto)
ports_csv = ",".join(six.moves.map(six.text_type, ports))
directions = build_directions(direction)
for direction in directions:
result = __salt__["file.replace"](
"/etc/csf/csf.conf",
# pylint: disable=anomalous-backslash-in-string
pattern='^{0}_{1}(\ +)?\=(\ +)?".*"$'.format(proto, direction),
# pylint: enable=anomalous-backslash-in-string
repl='{0}_{1} = "{2}"'.format(proto, direction, ports_csv),
)
results.append(result)
return results
| 9,537
|
def add(n1, n2, base=10):
"""Add two numbers represented as lower-endian digit lists."""
k = max(len(n1), len(n2)) + 1
d1 = n1 + [0 for _ in range(k - len(n1))]
d2 = n2 + [0 for _ in range(k - len(n2))]
res = []
carry = 0
for i in range(k):
if d1[i] + d2[i] + carry < base:
res.append(d1[i] + d2[i] + carry)
carry = 0
else:
res.append(d1[i] + d2[i] + carry - base)
carry = 1
while res and res[-1] == 0:
res = res[:-1]
if res: return res
return [0]
| 9,538
|
def _Grafik_mit_matplotlib(*args, **kwargs):
"""Funktion zum Erzeugen von 2D-Grafiken mit matplotlib"""
Vektor = importlib.import_module('agla.lib.objekte.vektor').Vektor
plt.close('all')
mlab.close(all=True)
achsen = True if kwargs.get('achsen') is None else kwargs.get('achsen')
gitter = False if kwargs.get('gitter') is None else kwargs.get('gitter')
skalen = True if kwargs.get('skalen') is None else kwargs.get('skalen')
x_skala = True if kwargs.get('x_skala') is None else kwargs.get('x_skala')
y_skala = True if kwargs.get('y_skala') is None else kwargs.get('y_skala')
groesse = kwargs.get('groesse')
text = kwargs.get('text')
bez = kwargs.get('bez')
# string-Angaben in Eingabe sichten
i = 1
for arg in args:
if isinstance(arg, str):
if arg.find('=') > 0:
exec(arg)
continue
else:
print("agla: %s. Eintrag: '=' ist nicht angegeben" % i)
return
if not skalen:
x_skala = False
y_skala = False
if isinstance(achsen, bool):
x_bez, y_bez = 'x', 'y'
else:
a = achsen
if not isinstance(a, (tuple, Tuple, list)) or len(achsen) != 2:
print('agla: Liste/Tupel mit zwei Bezeichnern für die Achsen angeben')
return
x_bez = str(a[0]) if isinstance(a[0], Symbol) else a[0]
y_bez = str(a[1]) if isinstance(a[1], Symbol) else a[1]
if not (isinstance(x_bez, str) and isinstance(y_bez, str)):
print('agla: die Bezeichner als Symbole oder Zeichenketten angeben')
return
if x_skala:
if not achsen and not gitter:
x_skala = False
if y_skala:
if not achsen and not gitter:
y_skala = False
if groesse:
if not iterable(groesse) and len(groesse) == 2:
print('agla: Größenangaben sind als Tupel/Liste mit 2 Elementen zu schreiben')
return
typ = int, Integer, float, Float, Rational
if not all([isinstance(el, typ) and isinstance(el, typ) and el > 0
for el in groesse]):
print('agla: Größenangaben müssen die Form (breite, höhe) mit positiven Werten haben')
return
if bez:
text = bez
if text:
meld = 'agla: Textangaben sind als Tupel/Liste mit Elementen der \nLänge 3 oder 4 zu schreiben'
if not iterable(text):
print(meld)
return
if not all([iterable(el) and len(el) in (2, 3) for el in text]):
print(meld)
return
if not all([isinstance(el[0], Vektor) and isinstance(el[1], str)
for el in text]):
print('agla: Textelemente müssen die Form (Vektor/Punkt, \'text\' /[, text_größe]) haben')
return
tg = [el for el in text if len(el) == 3]
if not all([el[2] in (1, 2) for el in tg]):
print('agla: Textgrößen sind 1:kleiner, 2:größer')
return
# Einträge kontrollieren und sammeln
eintraege = [] # [Objekt, Spezifikation]
i = 1
for arg in args:
if isinstance(arg, str):
if arg.find('=') > 0:
exec(arg)
continue
else:
print("agla: %s. Eintrag: '=' ist nicht angegeben" % i)
return
spez = tuple()
if isinstance(arg, (list, tuple, Tuple)):
obj = arg[0]
if len(arg) > 1:
spez = tuple(arg[1:])
else:
obj = arg
s = str(type(obj))
if not s[s.rfind('.')+1 : -2] in _klassen_mit_grafik:
print("agla: %s. Eintrag: ein Objekt zum Zeichnen angeben" % i)
return
try:
if mit_param(obj) and not obj.is_schar:
raise AglaError('Parameteranzahl > 1, die Darstellung ist nicht implementiert')
spez = _spezifikation(spez)
if isinstance(spez, AglaError):
raise AglaError(spez[0])
if not obj.is_schar and spez[3]:
raise AglaError('keine Schar, die Bereichsangabe ist ungültig')
if obj.is_schar and not spez[3]:
raise AglaError('Schar, eine Bereichsangabe machen')
except AglaError as e:
print("agla: " + str(i) + ". Eintrag:", e.args[0])
return
except Exception:
print("agla: %s. Eintrag: die Eingaben sind fehlerhaft" % i)
return
eintraege += [[obj, spez]]
i += 1
# auf Animationsfähigkeit untersuchen
for i, ein in enumerate(eintraege):
obj, spez = ein
if spez[3]:
print('agla: ' + str(i+1) + '. Eintrag:', \
'die animierte Darstellung von Objekten in der Ebene \n' +\
' ist nicht implementiert')
return
_mass = UMG._mass()
fig = plt.figure(figsize=(8, 6))
if groesse:
fig = plt.figure(figsize=(groesse[0], groesse[1]))
plt.axes().set_aspect('equal')
ax = fig.add_subplot(1, 1, 1)
ax.axis('off')
xl, xr, yl, yr = UMG._sicht_box[:4]
d = 0
if gitter:
d = _mass / 20
plt.axis([xl-d, xr+d, yl-d, yr+d])
f = (0.65, 0.65, 0.65)
if achsen:
ax.arrow(xl, 0.0, (xr-xl)-0.6*_mass, 0.0, head_length=0.6*_mass, \
head_width=0.18*_mass, linewidth=0.5, fc=f, ec=f)
ax.arrow(0.0, yl, 0.0, (yr-yl)-0.6*_mass, head_length=0.6*_mass, \
head_width=0.18*_mass, linewidth=0.5, fc=f, ec=f)
plt.text(xr-0.5*_mass, -1.2*_mass, x_bez, size=9, alpha=0.5)
plt.text(-1.2*_mass, yr-0.5*_mass, y_bez, size=9, alpha=0.5)
if gitter:
_gitter2(ax, null=(not achsen and gitter))
if x_skala:
_x_skala2(ax)
if y_skala:
_y_skala2(ax)
if text:
for t in text:
if len(t) < 3:
plt.text(t[0].x, t[0].y, t[1], size=10, alpha=0.8)
else:
font = {1:8, 2:12}
plt.text(t[0].x, t[0].y, t[1], size=font[t[2]], alpha=0.8)
# Animation nicht implementiert
animations_liste = []
for i, ein in enumerate(eintraege):
obj, spez = ein
res = obj.graf(spez, figure=fig)
if isinstance(res, AglaError):
print('agla:', res.args[0])
return
#if res is None:
# print('agla: ' + str(i+1) + '. Eintrag:', \
# 'die animierte Darstellung des Objektes ist nicht implementiert')
# pass
if isinstance(obj, Vektor) and isinstance(spez[0], Vektor):
plt.gca().add_line(res[0])
plt.gca().add_patch(res[1])
if spez[3]:
animations_liste += [[type(obj), res[0], res[1]]]
if animations_liste:
anim2(animations_liste) # Funktion ist außerhalb definiert
plt.show()
return
| 9,539
|
def client_thread(client_url, context, i):
""" Basic request-reply client using REQ socket """
socket = context.socket(zmq.REQ)
identity = "Client-%d" % (i)
socket.setsockopt(zmq.IDENTITY, identity) #Set client identity. Makes tracing easier
socket.connect(client_url)
# Send request, get reply
socket.send("HELLO")
reply = socket.recv()
print("%s: %s\n" % (identity, reply))
return
| 9,540
|
def test_enumerating_tautomers_apply():
"""
Test enumerating tautomers and make sue the input molecule is also returned.
"""
enumerate_tauts = workflow_components.EnumerateTautomers()
enumerate_tauts.max_tautomers = 2
mols = get_tautomers()
result = enumerate_tauts.apply(mols, processors=1, toolkit_registry=GLOBAL_TOOLKIT_REGISTRY)
# check the input molecule is present
for mol in mols:
assert mol in result.molecules
assert result.n_molecules > len(mols)
| 9,541
|
def test_set_humidity_level(gateway, add_sensor):
"""Test set humidity level."""
sensor = add_sensor(1)
sensor.add_child_sensor(1, gateway.const.Presentation.S_HUM)
gateway.logic('1;1;1;0;1;75\n')
assert sensor.children[1].values[gateway.const.SetReq.V_HUM] == '75'
| 9,542
|
def folder_datetime(foldername, time_infolder_fmt=TIME_INFOLDER_FMT):
"""Parse UTC datetime from foldername.
Foldername e.g.: hive1_rpi1_day-190801/
"""
# t_str = folder.name.split("Photos_of_Pi")[-1][2:] # heating!!
t_str = foldername.split("day-")[-1]
day_naive = datetime.strptime(t_str, time_infolder_fmt)
# # Localize as UTC
# day_local = local_tz.localize(day_naive)
# dt_utc = day_local.astimezone(pytz.utc)
day_utc = pytz.utc.localize(day_naive)
return day_utc
| 9,543
|
def get_policy_profile_by_name(name, db_session=None):
"""
Retrieve policy profile by name.
:param name: string representing the name of the policy profile
:param db_session: database session
:returns: policy profile object
"""
db_session = db_session or db.get_session()
vsm_hosts = config.get_vsm_hosts()
pp = n1kv_models.PolicyProfile
pprofiles = db_session.query(pp).filter(
sql.and_(pp.name == name, pp.vsm_ip.in_(vsm_hosts))).all()
if pprofiles and check_policy_profile_exists_on_all_vsm(pprofiles,
vsm_hosts):
return pprofiles[0]
else:
raise n1kv_exc.PolicyProfileNotFound(profile=name)
| 9,544
|
def ecg_hrv_assessment(hrv, age=None, sex=None, position=None):
"""
Correct HRV features based on normative data from Voss et al. (2015).
Parameters
----------
hrv : dict
HRV features obtained by :function:`neurokit.ecg_hrv`.
age : float
Subject's age.
sex : str
Subject's gender ("m" or "f").
position : str
Recording position. To compare with data from Voss et al. (2015), use "supine".
Returns
----------
hrv_adjusted : dict
Adjusted HRV features.
Example
----------
>>> import neurokit as nk
>>> hrv = nk.bio_ecg.ecg_hrv(rpeaks=rpeaks)
>>> ecg_hrv_assessment = nk.bio_ecg.ecg_hrv_assessment(hrv)
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Details*
- **Adjusted HRV**: The raw HRV features are normalized :math:`(raw - Mcluster) / sd` according to the participant's age and gender. In data from Voss et al. (2015), HRV analysis was performed on 5-min ECG recordings (lead II and lead V2 simultaneously, 500 Hz sampling rate) obtained in supine position after a 5–10 minutes resting phase. The cohort of healthy subjects consisted of 782 women and 1124 men between the ages of 25 and 74 years, clustered into 4 groups: YF (Female, Age = [25-49], n=571), YM (Male, Age = [25-49], n=744), EF (Female, Age = [50-74], n=211) and EM (Male, Age = [50-74], n=571).
References
-----------
- Voss, A., Schroeder, R., Heitmann, A., Peters, A., & Perz, S. (2015). Short-term heart rate variability—influence of gender and age in healthy subjects. PloS one, 10(3), e0118308.
"""
hrv_adjusted = {}
if position == "supine":
if sex == "m":
if age <= 49:
hrv_adjusted["meanNN_Adjusted"] = (hrv["meanNN"]-930)/133
hrv_adjusted["sdNN_Adjusted"] = (hrv["sdNN"]-45.8)/18.8
hrv_adjusted["RMSSD_Adjusted"] = (hrv["RMSSD"]-34.0)/18.3
hrv_adjusted["LF_Adjusted"] = (hrv["LF"]-203)/262
hrv_adjusted["HF_Adjusted"] = (hrv["HF"]-101)/143
hrv_adjusted["LF/HF_Adjusted"] = (hrv["LF/HF"]-3.33)/3.47
else:
hrv_adjusted["meanNN_Adjusted"] = (hrv["meanNN"]-911)/128
hrv_adjusted["sdNN_Adjusted"] = (hrv["sdNN"]-33.0)/14.8
hrv_adjusted["RMSSD_Adjusted"] = (hrv["RMSSD"]-20.5)/11.0
hrv_adjusted["LF_Adjusted"] = (hrv["LF"]-84)/115
hrv_adjusted["HF_Adjusted"] = (hrv["HF"]-29.5)/36.6
hrv_adjusted["LF/HF_Adjusted"] = (hrv["LF/HF"]-4.29)/4.06
if sex == "f":
if age <= 49:
hrv_adjusted["meanNN_Adjusted"] = (hrv["meanNN"]-901)/117
hrv_adjusted["sdNN_Adjusted"] = (hrv["sdNN"]-44.9)/19.2
hrv_adjusted["RMSSD_Adjusted"] = (hrv["RMSSD"]-36.5)/20.1
hrv_adjusted["LF_Adjusted"] = (hrv["LF"]-159)/181
hrv_adjusted["HF_Adjusted"] = (hrv["HF"]-125)/147
hrv_adjusted["LF/HF_Adjusted"] = (hrv["LF/HF"]-2.75)/2.93
else:
hrv_adjusted["meanNN_Adjusted"] = (hrv["meanNN"]-880)/115
hrv_adjusted["sdNN_Adjusted"] = (hrv["sdNN"]-31.6)/13.6
hrv_adjusted["RMSSD_Adjusted"] = (hrv["RMSSD"]-22.0)/13.2
hrv_adjusted["LF_Adjusted"] = (hrv["LF"]-66)/83
hrv_adjusted["HF_Adjusted"] = (hrv["HF"]-41.4)/72.1
hrv_adjusted["LF/HF_Adjusted"] = (hrv["LF/HF"]-2.09)/2.05
return(hrv_adjusted)
| 9,545
|
def declared_attr_roles(rw=None, call=None, read=None, write=None):
"""
Equivalent of :func:`with_roles` for use with ``@declared_attr``::
@declared_attr
@declared_attr_roles(read={'all'})
def my_column(cls):
return Column(Integer)
While :func:`with_roles` is always the outermost decorator on properties
and functions, :func:`declared_attr_roles` must appear below
``@declared_attr`` to work correctly.
.. deprecated:: 0.6.1
Use :func:`with_roles` instead. It works for
:class:`~sqlalchemy.ext.declarative.declared_attr` since 0.6.1
"""
def inner(f):
@wraps(f)
def attr(cls):
# Pass f(cls) as a parameter to with_roles.inner to avoid the test for
# iterables within with_roles. We have no idea about the use cases for
# declared_attr in downstream code. There could be a declared_attr
# that returns a list that should be accessible via the proxy.
return with_roles(rw=rw, call=call, read=read, write=write)(f(cls))
return attr
warnings.warn("declared_attr_roles is deprecated; use with_roles", stacklevel=2)
return inner
| 9,546
|
def _in_iterating_context(node):
"""Check if the node is being used as an iterator.
Definition is taken from lib2to3.fixer_util.in_special_context().
"""
parent = node.parent
# Since a call can't be the loop variant we only need to know if the node's
# parent is a 'for' loop to know it's being used as the iterator for the
# loop.
if isinstance(parent, astroid.For):
return True
# Need to make sure the use of the node is in the iterator part of the
# comprehension.
elif isinstance(parent, astroid.Comprehension):
if parent.iter == node:
return True
# Various built-ins can take in an iterable or list and lead to the same
# value.
elif isinstance(parent, astroid.Call):
if isinstance(parent.func, astroid.Name):
parent_scope = parent.func.lookup(parent.func.name)[0]
if _is_builtin(parent_scope) and parent.func.name in _ACCEPTS_ITERATOR:
return True
elif isinstance(parent.func, astroid.Attribute):
if parent.func.attrname == 'join':
return True
# If the call is in an unpacking, there's no need to warn,
# since it can be considered iterating.
elif (isinstance(parent, astroid.Assign) and
isinstance(parent.targets[0], (astroid.List, astroid.Tuple))):
if len(parent.targets[0].elts) > 1:
return True
return False
| 9,547
|
def to_point(obj):
"""Convert `obj` to instance of Point."""
if obj is None or isinstance(obj, Point):
return obj
if isinstance(obj, str):
obj = obj.split(",")
return Point(*(int(i) for i in obj))
| 9,548
|
def writeTable(filename='logs/table.txt', methods={'planenet_normal': 'PlaneNet', 'warping_normal_pair': 'Ours', 'basic_normal_backup': 'Ours (w/o warping loss)', 'warping_normal_none_pair': 'Ours (w/o normal anchors', 'warping_joint_pair': 'Ours (w/o depth map)'}, cols=[20, 19, 21, 32, 38, 44], dataset=''):
"""Write the comparison table (Table 2)"""
method_statistics = {}
with open('logs/global.txt', 'r') as f:
for line in f:
tokens = line.split(' ')
method = tokens[1].strip()
if len(tokens) > max(cols) and method in methods and tokens[0].strip()[:-1] == dataset:
method_statistics[method] = [float(tokens[c].strip()) for c in cols]
pass
continue
pass
with open(filename, 'w') as f:
for k, values in method_statistics.items():
f.write(methods[k])
for v in values:
f.write(' & %0.3f'%v)
continue
f.write(' \\\\\n')
continue
pass
return
| 9,549
|
def issues(request, project_id):
"""问题栏"""
if request.method == "GET":
# 筛选条件 -- 通过get来实现参数筛选
allow_filter_name = ['issues_type', 'status', 'priority', 'assign', 'attention']
condition = {} # 条件
for name in allow_filter_name:
value_list = request.GET.getlist(name)
if not value_list:
continue
condition['{}__in'.format(name)] = value_list
# 分页获取数据
form = IssuesModelForm(request)
issues_obj = Issues.objects.filter(project=request.tracer.project).filter(**condition)
page_object = Pagination(
current_page=request.GET.get('page'),
all_count=issues_obj.count(),
base_url=request.path_info,
query_params=request.GET,
per_page=3,
)
issues_object_list = issues_obj[page_object.start:page_object.end]
project_total_user = [(request.tracer.project.create_user_id, request.tracer.project.create_user.username,)]
join_user = ProjectUser.objects.filter(project_id=project_id).values_list('user_id', 'user__username')
project_total_user.extend(join_user)
invite_form = InviteModelForm(data=request.POST)
context = {
'form': form,
'invite_form': invite_form,
'issues_object_list': issues_object_list,
'page_html': page_object.page_html(),
'filter_list': [
{'title': '问题类型', 'filter': CheckFilter('issues_type',
IssuesType.objects.filter(project_id=project_id).values_list(
'id',
'title'),
request)},
{'title': '状态', 'filter': CheckFilter('status', Issues.STATUS_CHOICES, request)},
{'title': '优先级', 'filter': CheckFilter('priority', Issues.PRIORITY_CHOICES, request)},
{'title': '指派者', 'filter': SelectFilter('assign', project_total_user, request)},
{'title': '关注者', 'filter': SelectFilter('attention', project_total_user, request)},
]
}
return render(request, 'web/issues.html', context)
if request.method == "POST":
form = IssuesModelForm(request, data=request.POST)
if form.is_valid():
# 添加问题数据
form.instance.project = request.tracer.project
form.instance.create_user = request.tracer.user
form.save()
return JsonResponse({'code': 200})
return JsonResponse({'msg': form.errors, 'code': 416})
| 9,550
|
def get_30mhz_rht_data(sensor_id):
"""
Produces a JSON with the 30MHz RH & T sensor data for a specified sensor.
Args:
sensor_id - Advanticsys sensor ID
Returns:
result - JSON string
"""
dt_from, dt_to = parse_date_range_argument(request.args.get("range"))
query = (
db.session.query(
ReadingsZensieTRHClass.sensor_id,
ReadingsZensieTRHClass.timestamp,
ReadingsZensieTRHClass.temperature,
ReadingsZensieTRHClass.humidity,
ReadingsZensieTRHClass.time_created,
ReadingsZensieTRHClass.time_updated,
)
.filter(
and_(
ReadingsZensieTRHClass.sensor_id == sensor_id,
ReadingsZensieTRHClass.timestamp >= dt_from,
ReadingsZensieTRHClass.timestamp <= dt_to,
)
)
.order_by(desc(ReadingsZensieTRHClass.timestamp))
)
execute_result = db.session.execute(query).fetchall()
result = jasonify_query_result(execute_result)
return result
| 9,551
|
def stock_em_gpzy_industry_data() -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-股权质押-上市公司质押比例-行业数据
http://data.eastmoney.com/gpzy/industryData.aspx
:return: pandas.DataFrame
"""
url = "http://dcfm.eastmoney.com/EM_MutiSvcExpandInterface/api/js/get"
page_num = _get_page_num_gpzy_industry_data()
temp_df = pd.DataFrame()
for page in range(1, page_num + 1):
print(f"一共{page_num}页, 正在下载第{page}页")
params = {
"type": "ZD_HY_SUM",
"token": "70f12f2f4f091e459a279469fe49eca5",
"cmd": "",
"st": "amtshareratio_pj",
"sr": "-1",
"p": str(page),
"ps": "5000",
"js": "var SIqThurI={pages:(tp),data:(x),font:(font)}",
"rt": "52584617",
}
res = requests.get(url, params=params)
data_text = res.text
data_json = demjson.decode(data_text[data_text.find("={") + 1 :])
map_dict = dict(
zip(
pd.DataFrame(data_json["font"]["FontMapping"])["code"],
pd.DataFrame(data_json["font"]["FontMapping"])["value"],
)
)
for key, value in map_dict.items():
data_text = data_text.replace(key, str(value))
data_json = demjson.decode(data_text[data_text.find("={") + 1 :])
temp_df = temp_df.append(pd.DataFrame(data_json["data"]), ignore_index=True)
temp_df.columns = [
"统计时间",
"-",
"行业",
"平均质押比例(%)",
"公司家数",
"质押总笔数",
"质押总股本",
"最新质押市值",
]
temp_df = temp_df[["统计时间", "行业", "平均质押比例(%)", "公司家数", "质押总笔数", "质押总股本", "最新质押市值"]]
temp_df["统计时间"] = pd.to_datetime(temp_df["统计时间"])
return temp_df
| 9,552
|
def to_float32(x: tf.Tensor) -> tf.Tensor:
"""Cast the given tensor to float32.
Args:
x: The tensor of any type.
Returns:
The tensor casts to float32.
"""
return tf.cast(x, tf.float32)
| 9,553
|
def _read_and_load_options():
"""
Read file and add all settings to dict
:return: None
"""
parser = SimpleConfigParser()
parser.read(_config_file)
global _settings
_settings = {}
for item in parser.items(_DEFAULT_SECTION):
_settings[item[0]] = item[1]
| 9,554
|
def toDerivative(
data,
derivativeType=2,
normalize=-1
):
"""
@deprecated function moved to @ref dspUtil
"""
raise Exception("toDerivative(...) has been moved to the module dspUtil")
| 9,555
|
def cli_usage(name=None):
"""
custom usage message to override `cli.py`
"""
return """
{logo}
usage: signalyze [-h] [-o OUTPUT] [--show-name] [-b | -w | -all] [--show-graph | --show-extra-info]
""".format(logo=get_logo())
| 9,556
|
def unban_chat_member(chat_id, user_id, **kwargs):
"""
Use this method to unban a previously kicked user in a supergroup. The user will not return to the group automatically,
but will be able to join via link, etc. The bot must be an administrator in the group for this to work
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param user_id: Unique identifier of the target user
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int or str
:type user_id: int
:returns: Returns True on success.
:rtype: bool
"""
# required args
params = dict(
chat_id=chat_id,
user_id=user_id,
)
return TelegramBotRPCRequest('unbanChatMember', params=params, on_result=lambda result: result, **kwargs)
| 9,557
|
def parse_progress_line(prefix: str, line: str) -> Optional[float]:
"""Extract time in seconds from a prefixed string."""
regexp = prefix + r"(?P<hours>\d+):(?P<minutes>\d{2}):(?P<seconds>\d{2}.\d{2})"
match = re.search(regexp, line)
if not match:
return None
return (
int(match.group("hours")) * 3600
+ int(match.group("minutes")) * 60
+ float(match.group("seconds"))
)
| 9,558
|
def convert_array_to_df(emission_map):
"""
This function converts the emission map dict to a DataFrame where
- 'emission_map' is a dictionary containing at least 'z_var_ave', 'count_var','std_var','q25_var' and'q75_var
"""
def reform_df(df, nr):
"""This subfunction will reform the format of the dataframe is such a way that it can be saved in the .map.txt
file later on. The expected input is:
df: pd.DataFrame
nr: int
The output is a dataframe that can contains all data of one map and is ready to be written to a .map.txt file"""
df_temp_fin = pd.DataFrame()
for key, value in df.items():
df_temp = pd.DataFrame()
df_temp['Y'] = value.index # CO2_mF [g/s]
df_temp['X'] = key # velocity_filtered [km/h]
df_temp['Z{}'.format(nr)] = df[key].values # avgNOx [mg/s]
df_temp = df_temp[['X', 'Y', 'Z{}'.format(nr)]]
df_temp_fin = df_temp_fin.append(df_temp)
return df_temp_fin
numbering = {'z_var_ave': 1, 'std_var': 2, 'q25_var': 3,
'q75_var': 4, 'count_var': 5}
map_df = []
for var in numbering.keys():
if type(emission_map[var]) == np.ndarray:
map = emission_map[var]
x_axis = np.arange(emission_map['binsizes'][0],
emission_map['binsizes'][0] * map.shape[1] + 1,
emission_map['binsizes'][0])
y_axis = np.arange(emission_map['binsizes'][1],
(emission_map['binsizes'][1] * map.shape[0]) + emission_map['binsizes'][1],
emission_map['binsizes'][1])
# check if shape of axis and indices are the same
if map.shape[1] != len(x_axis):
x_axis = x_axis[:map.shape[1]]
elif map.shape[0] != len(y_axis):
y_axis = y_axis[:map.shape[0]]
## Make Table for .map.txt outputfile
df = pd.DataFrame(data=map, index=y_axis, columns=x_axis)
reformed_df = reform_df(df, numbering[var])
map_df.append(reformed_df)
final_df = reduce(lambda left, right: pd.merge(left, right, on=['X', 'Y']), map_df)
return final_df
| 9,559
|
def add_missing_flows(data):
"""There are some flows not given in ReCiPe that seem like they should be there, given the relatively coarse precision of these CFs."""
new_cfs = {
"managed forest": {
"amount": 0.3,
"flows": [
"occupation, forest, unspecified",
"occupation, field margin/hedgerow",
],
},
"annual crops": {
"amount": 1.0,
"flows": [
"occupation, annual crop, flooded crop",
"occupation, annual crop, irrigated, extensive",
],
},
"pasture": {
"amount": 0.55,
"flows": [
"occupation, arable land, unspecified use",
"occupation, grassland, natural, for livestock grazing",
"occupation, heterogeneous, agricultural",
],
},
"artificial area": {"amount": 0.73, "flows": [],},
"permanent crops": {
"amount": 0.7,
"flows": [
"occupation, permanent crop, irrigated",
"occupation, permanent crop, irrigated, extensive",
"occupation, permanent crop, non-irrigated",
"occupation, permanent crop, non-irrigated, extensive",
],
},
}
""" The following were included in an earlier version of ReCiPe, but are skipped here, as we don't have enough info to use them consistently:
* 'occupation, bare area (non-use)',
* 'occupation, cropland fallow (non-use)',
* 'occupation, forest, primary (non-use)',
* 'occupation, forest, secondary (non-use)',
* 'occupation, inland waterbody, unspecified',
* 'occupation, lake, natural (non-use)',
* 'occupation, river, natural (non-use)',
* 'occupation, seabed, natural (non-use)',
* 'occupation, seabed, unspecified',
* 'occupation, snow and ice (non-use)',
* 'occupation, unspecified',
* 'occupation, unspecified, natural (non-use)',
* 'occupation, wetland, coastal (non-use)',
* 'occupation, wetland, inland (non-use)'
"""
for ds in data:
ds["exchanges"].extend(
[
{"name": flow, "amount": obj["amount"]}
for obj in new_cfs.values()
for flow in obj["flows"]
]
)
return data
| 9,560
|
def graph_x(EC1, EM1, EY1, x01, parlist):
""" This function serves as an interactive visualization tool showing how material inputs change
in sector 1 and 2, in two separate graphs, if we change the elasticities of substitution (EC, EM, EY)
Args:
x0: initial vector if the guess is poor the model won't be solved
EC: parameters included in order to be changed
EM: parameters included in order to be changed
EY: parameters included in order to be changed
parlist: a list with parameters
"""
x0=x01
c=[]
d=[]
for i in range(1,300):
Grow_theta1=(1+0.03)**(i-1)
Grow_theta2= (1+0.01)**(i-1)
Grow_theta1m=1
Grow_theta2m=1
result_loop3 = solve_model(x0, EC1, EM1, EY1, Grow_theta1, Grow_theta2, parlist, Grow_theta1m, Grow_theta2m)
x0=result_loop3
c.append(result_loop3[10:14:2]) #setting the variables of interest (x11, x21) for sector 1
d.append(result_loop3[11:15:2]) #setting the variables of interest (x11, x21) for sector 2
fig, axs = plt.subplots(2)
# plotting the data
axs[0].plot(c)
axs[0].set_title('sector 1 material inputs')
axs[0].legend(['sector 1 good', 'sector 2 good'])
axs[1].plot(d)
axs[1].set_title('sector 2 material inputs')
axs[1].legend(['sector 1 good', 'sector 2 good'])
# naming the axes
plt.xlabel('Period')
fig.tight_layout()
| 9,561
|
def count_features(sm):
"""
Counts reads mapped to features such as KOs, PFAMs etc.
:param sm:
:return:
"""
feature_sum = sum_to_features(sm.input.abund, sm.input.annot[0])
feature_sum.to_csv(sm.output[0], sep="\t")
| 9,562
|
def test_get_config_verbose_parser(fxtr_setup_logger_environment):
"""Test: get_config_verbose_parser()."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
[
(cfg.glob.setup._DCR_CFG_VERBOSE_PARSER, "aLL"),
],
)
cfg.glob.setup = cfg.setup.Setup()
assert cfg.glob.setup.verbose_parser == "all", "DCR_CFG_VERBOSE_PARSER: all"
pytest.helpers.restore_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
values_original,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
[
(cfg.glob.setup._DCR_CFG_VERBOSE_PARSER, cfg.glob.INFORMATION_NOT_YET_AVAILABLE),
],
)
cfg.glob.setup = cfg.setup.Setup()
assert cfg.glob.setup.verbose_parser == "none", "DCR_CFG_VERBOSE_PARSER: none (not all or text)"
pytest.helpers.restore_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
values_original,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
[
(cfg.glob.setup._DCR_CFG_VERBOSE_PARSER, "tEXT"),
],
)
cfg.glob.setup = cfg.setup.Setup()
assert cfg.glob.setup.verbose_parser == "text", "DCR_CFG_VERBOSE_PARSER: all"
pytest.helpers.restore_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
| 9,563
|
def repl():
"""
REPL: Read-Eval-Print Loop
Response based on input from do_action
Args:
None
Return:
None
"""
with open("title_screen.txt", encoding="utf8") as file_descriptor:
contents = file_descriptor.read()
print(contents)
current_room = actions.GAMESTATE.current_position()
output = ("Type a command or type 'help' for a list of commands.\n\n"
+ current_room + "\n"
+ actions.GAMESTATE.data["Description"][current_room] +
"\n\n>> ")
while True:
user_input = input(output)
if 'quit' in user_input:
print('You have quit.\n Goodbye!')
break
else:
output = "\n" + evaluate(user_input) + "\n\n>> "
| 9,564
|
def test_list_parameters(database):
"""List parameters mapped as expected."""
query = 'query'
parameters = [
'message',
'count',
'nested',
'nested.message',
'unknown',
'nested.unknown',
'message.unknown',
'count.unknown',
]
batch = [
{
'message': '<message>',
'count': 42,
'nested': {
'message': '<nested_message>',
},
},
]
database.connection = Mock()
database.batch_ready_cb('<sender>', query, parameters, batch)
database.connection.execute.assert_called_once_with(
query,
[
[
'<message>',
42,
'{"message": "<nested_message>"}',
'<nested_message>',
None,
None,
None,
None,
],
],
)
| 9,565
|
def cglb_conjugate_gradient(
K: TensorType,
b: TensorType,
initial: TensorType,
preconditioner: NystromPreconditioner,
cg_tolerance: float,
max_steps: int,
restart_cg_step: int,
) -> tf.Tensor:
"""
Conjugate gradient algorithm used in CGLB model. The method of
conjugate gradient (Hestenes and Stiefel, 1952) produces a
sequence of vectors :math:`v_0, v_1, v_2, ..., v_N` such that
:math:`v_0` = initial, and (in exact arithmetic)
:math:`Kv_n = b`. In practice, the v_i often converge quickly to
approximate :math:`K^{-1}b`, and the algorithm can be stopped
without running N iterations.
We assume the preconditioner, :math:`Q`, satisfies :math:`Q ≺ K`,
and stop the algorithm when :math:`r_i = b - Kv_i` satisfies
:math:`||rᵢᵀ||_{Q⁻¹r}^2 = rᵢᵀQ⁻¹rᵢ <= ϵ`.
:param K: Matrix we want to backsolve from. Must be PSD. Shape [N, N].
:param b: Vector we want to backsolve. Shape [B, N].
:param initial: Initial vector solution. Shape [N].
:param preconditioner: Preconditioner function.
:param cg_tolerance: Expected maximum error. This value is used
as a decision boundary against stopping criteria.
:param max_steps: Maximum number of CG iterations.
:param restart_cg_step: Restart step at which the CG resets the
internal state to the initial position using the currect
solution vector :math:`v`. Can help avoid build up of
numerical errors.
:return: `v` where `v` approximately satisfies :math:`Kv = b`.
"""
CGState = namedtuple("CGState", ["i", "v", "r", "p", "rz"])
def stopping_criterion(state: CGState) -> bool:
return (0.5 * state.rz > cg_tolerance) and (state.i < max_steps)
def cg_step(state: CGState) -> List[CGState]:
Ap = state.p @ K
denom = tf.reduce_sum(state.p * Ap, axis=-1)
gamma = state.rz / denom
v = state.v + gamma * state.p
i = state.i + 1
r = tf.cond(
state.i % restart_cg_step == restart_cg_step - 1,
lambda: b - v @ K,
lambda: state.r - gamma * Ap,
)
z, new_rz = preconditioner(r)
p = tf.cond(
state.i % restart_cg_step == restart_cg_step - 1,
lambda: z,
lambda: z + state.p * new_rz / state.rz,
)
return [CGState(i, v, r, p, new_rz)]
Kv = initial @ K
r = b - Kv
z, rz = preconditioner(r)
p = z
i = tf.constant(0, dtype=default_int())
initial_state = CGState(i, initial, r, p, rz)
final_state = tf.while_loop(stopping_criterion, cg_step, [initial_state])
final_state = tf.nest.map_structure(tf.stop_gradient, final_state)
return final_state[0].v
| 9,566
|
def crb_ax(n, Ndim=6, vary=['halo', 'bary', 'progenitor'], align=True, fast=False):
"""Calculate CRB inverse matrix for 3D acceleration at position x in a halo potential"""
pid, dp, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
# read in full inverse CRB for stream modeling
cxi = np.load('../data/crb/bspline_cxi{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
# subset halo parameters
Nhalo = 4
cq = cx[:Nhalo,:Nhalo]
if fast:
cqi = np.linalg.inv(cq)
else:
cqi = stable_inverse(cq)
xi = np.array([-8.3, 0.1, 0.1])*u.kpc
x0, v0 = gd1_coordinates()
#xi = np.array(x0)*u.kpc
d = 50
Nb = 20
x = np.linspace(x0[0]-d, x0[0]+d, Nb)
y = np.linspace(x0[1]-d, x0[1]+d, Nb)
x = np.linspace(-d, d, Nb)
y = np.linspace(-d, d, Nb)
xv, yv = np.meshgrid(x, y)
xf = np.ravel(xv)
yf = np.ravel(yv)
af = np.empty((Nb**2, 3))
plt.close()
fig, ax = plt.subplots(3,3,figsize=(11,10))
dimension = ['x', 'y', 'z']
xlabel = ['y', 'x', 'x']
ylabel = ['z', 'z', 'y']
for j in range(3):
if j==0:
xin = np.array([np.repeat(x0[j], Nb**2), xf, yf]).T
elif j==1:
xin = np.array([xf, np.repeat(x0[j], Nb**2), yf]).T
elif j==2:
xin = np.array([xf, yf, np.repeat(x0[j], Nb**2)]).T
for i in range(Nb**2):
#xi = np.array([xf[i], yf[i], x0[2]])*u.kpc
xi = xin[i]*u.kpc
a = acc_nfw(xi)
dqda = halo_accelerations(xi)
cai = np.matmul(dqda, np.matmul(cqi, dqda.T))
if fast:
ca = np.linalg.inv(cai)
else:
ca = stable_inverse(cai)
a_crb = (np.sqrt(np.diag(ca)) * u.km**2 * u.kpc**-1 * u.s**-2).to(u.pc*u.Myr**-2)
af[i] = np.abs(a_crb/a)
af[i] = a_crb
for i in range(3):
plt.sca(ax[j][i])
im = plt.imshow(af[:,i].reshape(Nb,Nb), extent=[-d, d, -d, d], cmap=mpl.cm.gray) #, norm=mpl.colors.LogNorm(), vmin=1e-2, vmax=0.1)
plt.xlabel(xlabel[j]+' (kpc)')
plt.ylabel(ylabel[j]+' (kpc)')
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("top", size="4%", pad=0.05)
plt.colorbar(im, cax=cax, orientation='horizontal')
plt.gca().xaxis.set_ticks_position('top')
cax.tick_params(axis='x', labelsize='xx-small')
if j==0:
plt.title('a$_{}$'.format(dimension[i]), y=4)
plt.tight_layout(rect=[0,0,1,0.95])
plt.savefig('../plots/acc_{}_{}_{}.png'.format(n, vlabel, Ndim))
| 9,567
|
def build_docker_build(latest=True):
"""Create command used to (re)build the container.
We store the Dockerfile (as that name)
in dir .next or .latest so that we can
have various templates and assets and so on
in the 'context' directory.
"""
tmpl = "{} build -t {{tagname}}:{{tagtag}} {{pathtodockerfile}}".format(OCI_CMD)
_latest = LATEST if latest else NEXT
pathtodockerfile = os.path.join(CONFD["devstation_config_root"], "." + _latest)
return tmpl.format(
tagname=CONFD["tagname"], tagtag=_latest, pathtodockerfile=pathtodockerfile
)
| 9,568
|
def motif_compare(args):
"""Compare PSSMs of filter motifs."""
# create output directory
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
# load training data to determine background nucleotide content
train_samples = np.load(args.train_data, mmap_mode='r')
probs = np.mean(np.mean(train_samples, axis=1), axis=0)
bg = {'A': probs[0], 'C': probs[1], 'G': probs[2], 'T': probs[3]}
# load all filter motifs from first file
with open(args.in_file1) as handle:
records1 = transfac.read(handle)
# load all filter motifs from second file
with open(args.in_file2) as handle:
records2 = transfac.read(handle)
# convert motifs to pssm's
pssms1 = {}
pssms2 = {}
rc_pssms2 = {}
for idx, m1 in enumerate(records1):
pwm1 = m1.counts.normalize(pseudocounts=bg)
pssm1 = pwm1.log_odds(background=bg)
pssms1[m1.get("ID")] = pssm1
for idx, m2 in enumerate(records2):
pwm2 = m2.counts.normalize(pseudocounts=bg)
pssm2 = pwm2.log_odds(background=bg)
pssms2[m2.get("ID")] = pssm2
# build reverse complement
if args.rc:
rc_pssm2 = pssm2.reverse_complement()
rc_pssms2[idx] = rc_pssm2
result_table = []
# compare motifs
for idx1, pssm1 in pssms1.items():
for idx2, pssm2 in pssms2.items():
if args.extensively or idx1 == idx2:
row = [idx1, idx2]
for measure in [pearsonr, spearmanr]:
cor, p_value, offset = get_motif_similarity(measure, pssm1, pssm2,
args.min_overlap if args.shift else pssm1.length)
orientation = "+"
if args.rc:
rc_pssm2 = rc_pssms2[idx2]
cor_rc, p_value_rc, offset_rc = get_motif_similarity(measure, pssm1, rc_pssm2,
args.min_overlap if args.shift else pssm1.length)
# if cor < cor_rc:
if p_value > p_value_rc:
cor, p_value, offset, orientation = cor_rc, p_value_rc, offset_rc, "-"
row.extend([cor, p_value, offset, orientation])
result_table.append(row)
# write results to output file
out_file_name = args.out_dir + "/correlation_motifs" + ("_extensively" if args.extensively else "") + (
"_rc" if args.rc else "") + ("_shift_min_overlap=" + str(args.min_overlap) if args.shift else "") + ".txt"
with open(out_file_name, 'w') as csv_file:
file_writer = csv.writer(csv_file, delimiter="\t")
file_writer.writerow(["ID1", "ID2", "cor_pearson", "p_value_pearson", "offset_pearson", "orientation_pearson",
"cor_spearman", "p_value_spearman", "offset_spearman", "orientation_spearman"])
for row in result_table:
file_writer.writerow(row)
| 9,569
|
def iter_local_job_status(s3_scratch_prefix: str, job_id2job: Dict[str, "Job"]) -> Iterator[dict]:
"""
Returns local Docker jobs grouped by their status.
"""
running_containers = subprocess.check_output(["docker", "ps", "--no-trunc"]).decode("utf8")
for job_id, redun_job in job_id2job.items():
if job_id not in running_containers:
# Job is done running.
status_file = File(
aws_utils.get_job_scratch_file(
s3_scratch_prefix, redun_job, aws_utils.S3_SCRATCH_STATUS
)
)
output_file = File(
aws_utils.get_job_scratch_file(
s3_scratch_prefix, redun_job, aws_utils.S3_SCRATCH_OUTPUT
)
)
# Get docker logs and remove container.
logs = subprocess.check_output(["docker", "logs", job_id]).decode("utf8")
logs += "Removing container...\n"
logs += subprocess.check_output(["docker", "rm", job_id]).decode("utf8")
# TODO: Simplify whether status file is always used or not.
if status_file.exists():
succeeded = status_file.read().strip() == "ok"
else:
succeeded = output_file.exists()
status = SUCCEEDED if succeeded else FAILED
yield {"jobId": job_id, "status": status, "logs": logs}
| 9,570
|
def show_menu_items(category, items):
"""Takes a category name and item list and prints them out.
input: category (str)
input: items (list)
returns: none
"""
# Print category name
print(category.title())
print('-' * len(category))
for item in items:
# Print each item in title case
print(item.title())
print()
| 9,571
|
def scrape_new_thread(thread_name, url):
"""Scrape data for a thread that isn't already in our database."""
logger.debug(f"Start of scrape_new_thread for {thread_name}, {url}")
# URL Validation
# TODO: write this function, then hand it off to scrape_existing_thread()
logger.debug("Now that the thread exists in our db, hand it off to scrape_existing_thread()")
scrape_existing_thread(thread_name)
logger.debug("End of scrape_new_thread")
return render_template('scrape_new_thread.html', title='Browse TalkBeer BIFs')
| 9,572
|
def num_fixed_points(permutation):
"""
Compute the number of fixed points (elements mapping to themselves) of a permutation.
:param permutation: Permutation in one-line notation (length n tuple of the numbers 0, 1, ..., n-1).
:return: Number of fixed points in the permutation.
.. rubric:: Examples
>>> num_fixed_points((0, 2, 1))
1
"""
n = 0
for i in range(len(permutation)):
if permutation[i] == i:
n += 1
return n
| 9,573
|
def load_messages(path, unique, verbose):
""" Loads messages from the corpus and returns them as Message objects """
messages = []
signatures = set()
for root, _, files in os.walk(path):
if verbose:
print("Processing {}".format(root))
for message_file in files:
message = read_message(os.path.join(root, message_file))
if unique:
sig = (message.sender, message.recipients, message.timestamp, message.subject, message.body)
if sig in signatures:
continue
signatures.add(sig)
messages.append(message)
return messages
| 9,574
|
def backup(project=None, force=False):
"""Perform pending backups"""
if project:
name = project
project = ProjectsManager.get_project_by_name(name)
if not project:
click.echo('Project with name "%s" is not installed!' % name)
return
projects = [project]
else:
projects = ProjectsManager.get_projects()
if not projects:
click.echo('No projects installed!')
return
__process_backups(projects, force)
| 9,575
|
def mutate(grid):
"""
Alters the cycle by breaking it into two separate circuits, and then fusing
them back together to recreate a (slightly different) cycle.
This operation is called "sliding" in 'An Algorithm for Finding Hamiltonian
Cycles in Grid Graphs Without Holes', and it's specifically mentioned
because it is insuffient if you want to be able to reach all possible cycles
for a given starting graph. That condition isn't really relevant to this
project, so I use sliding since it's much easier to implement.
"""
working_grid = grid.copy().astype(numpy.uint8)
above = shift_down(grid)
below = shift_up(grid)
left = shift_right(grid)
right = shift_left(grid)
# this mask highlights every grid location that could be turned off
candidates = numpy.logical_and(
numpy.logical_and(grid, above != left),
numpy.logical_and(above == below, left == right)
)
# the connected region is split into two
coord = pick_candidate(candidates)
flood_y, flood_x = coord
working_grid[coord] = 0
# find the right spot to label one of the regions '2'
if left[coord] == 1:
flood_x -= 1
elif right[coord] == 1:
flood_x += 1
elif above[coord] == 1:
flood_y -= 1
elif below[coord] == 1:
flood_y += 1
cv2.floodFill(
working_grid,
numpy.zeros([v + 2 for v in grid.shape], dtype=numpy.uint8),
(flood_x, flood_y),
2
)
above = shift_down(working_grid)
below = shift_up(working_grid)
left = shift_right(working_grid)
right = shift_left(working_grid)
x_neighbors = left + right
y_neighbors = above + below
# this mask highlights every grid location that can fuse the two regions
# back together while preserving a cycle
fuse_candidates = numpy.logical_and(
working_grid == 0,
numpy.logical_or(
numpy.logical_and(x_neighbors == 3, y_neighbors == 0),
numpy.logical_and(x_neighbors == 0, y_neighbors == 3)
)
)
fuse_location = pick_candidate(fuse_candidates)
grid[coord] = 0
grid[fuse_location] = 1
return grid
| 9,576
|
def reverse_permute(output_shape: np.array, order: np.array):
"""
Calculates Transpose op input shape based on output shape and permute order.
:param output_shape: Transpose output shape
:param order: permute order
:return: Transpose input shape corresponding to the specified output shape
"""
return int64_array(output_shape[PermuteAttrs.get_inverse_permutation(order)])
| 9,577
|
def recursive_bisection(block, block_queue, epsilon_cut, depth_max, theta, lamb, delta, verbose=False):
"""Random cut and random converge
Args:
block_queue (multiprocessing.Queue): Shared queue to store blocks to be executed
Returns:
[{"range": {int: (int,int)}, "mondrian_budget": float, "depth": int}]
"""
# Random cut
if verbose:
print('Before cut', block.domain_dict)
if block.depth > depth_max:
axis, index = cut_random(block)
else:
axis, index = cut_exp_mech(block, epsilon_cut)
if verbose:
print(axis, index)
left_block, right_block = block.split(axis, index)
# Random converge
converged_block_results = []
if left_block.size() == 1:
converged_block_results.append(BlockResult(left_block.domain_dict, left_block.depth))
elif random_converge(left_block, left_block.depth, theta, lamb, delta):
converged_block_results.append(BlockResult(left_block.domain_dict, left_block.depth))
else:
block_queue.put(left_block)
if right_block.size() == 1:
converged_block_results.append(BlockResult(right_block.domain_dict, right_block.depth))
elif random_converge(right_block, right_block.depth, theta, lamb, delta):
converged_block_results.append(BlockResult(right_block.domain_dict, right_block.depth))
else:
block_queue.put(right_block)
return converged_block_results
| 9,578
|
def _get_global_step_read(graph=None):
"""Gets global step read tensor in graph.
Args:
graph: The graph in which to create the global step read tensor. If missing,
use default graph.
Returns:
Global step read tensor.
Raises:
RuntimeError: if multiple items found in collection GLOBAL_STEP_READ_KEY.
"""
graph = graph or ops.get_default_graph()
global_step_read_tensors = graph.get_collection(GLOBAL_STEP_READ_KEY)
if len(global_step_read_tensors) > 1:
raise RuntimeError('There are multiple items in collection {}. '
'There should be only one.'.format(GLOBAL_STEP_READ_KEY))
if len(global_step_read_tensors) == 1:
return global_step_read_tensors[0]
return None
| 9,579
|
def light_control() -> LightControl:
"""Returns the light_control mock object."""
mock_request = Mock()
mock_request.return_value = ""
return LightControl(mock_request)
| 9,580
|
def test_event():
"""Test pour la fonction event_handling."""
_, players = main.initialization(False)
# Test pour KEY_DOWN
cf.STATE = cf.State.ingame
for i, P in enumerate(players):
event = ut.make_event(ut.KEYDOWN, {'key': plyr.JUMP_KEYS[i]})
players = gml.event_handling(players, event, (0, 0))
assert P.vel.y == -cf.V_JMP
cf.CAPT = True
cf.STATE = cf.State.keyset
players = gml.event_handling(players,
ut.make_event(ut.KEYDOWN,
{'key': ut.K_ESCAPE}),
(0, 0))
assert not cf.CAPT
cf.CAPT_PLYR = 1
cf.CAPT = True
players = gml.event_handling(
players, ut.make_event(ut.KEYDOWN, {'key': ut.K_s}), (0, 0))
assert not cf.CAPT
assert plyr.JUMP_KEYS[1] == ut.K_s
cf.STATE = cf.State.gameover
cf.CAPT = True
mn.player_name_area.select()
players = gml.event_handling(
players, ut.make_event(ut.KEYDOWN, {'key': ut.K_s}), (0, 0))
assert mn.player_name_area.input == 'S'
players = gml.event_handling(
players, ut.make_event(ut.KEYDOWN, {'key': ut.K_RETURN}), (0, 0))
assert scre.PLAYER == 'S'
assert not cf.CAPT
assert not mn.player_name_area.selected
cf.NB_PLAYERS = 3
players = gml.reset_world()
# Tests pour MOUSEBUTTONDOWN
cf.STATE = cf.State.menu
players[0].jump()
event = ut.make_event(ut.MOUSEBUTTONDOWN)
players = gml.event_handling(players, event, mn.Oneplayer_pos)
assert cf.NB_PLAYERS == 1
assert players[0].vel.y == 0
assert cf.STATE == cf.State.ingame
for bloc in spt.ground:
assert not bloc.FLAG_creation
cf.STATE = cf.State.menu
players = gml.event_handling(players, event, mn.Multiplayer_pos)
players = gml.event_handling(players, event,
mn.Multi_pos[cf.NB_PLAYERS_MAX - 2])
assert cf.NB_PLAYERS == 4
players = gml.event_handling(players, event, mn.Start_pos)
assert players[0].vel.y == 0
assert cf.STATE == cf.State.ingame
for bloc in spt.ground:
assert not bloc.FLAG_creation
cf.STATE = cf.State.menu
players = gml.event_handling(players, event, mn.Records_pos)
assert cf.STATE == cf.State.highscore
cf.STATE = cf.State.menu
players = gml.event_handling(players, event, mn.Settings_pos)
assert cf.STATE == cf.State.setup
cf.STATE = cf.State.languages
lg.set_lang(lg.AVAILABLE[0])
players = gml.event_handling(players, event, mn.Flag_pos[1])
assert cf.STATE == cf.State.menu
assert cf.LANG == lg.AVAILABLE[1]
cf.STATE = cf.State.languages
lg.set_lang(lg.AVAILABLE[1])
players = gml.event_handling(players, event, mn.Flag_pos[0])
assert cf.STATE == cf.State.menu
assert cf.LANG == lg.AVAILABLE[0]
cf.STATE = cf.State.multiplayer_set
players = gml.event_handling(players, event, mn.Multi_pos[1])
assert cf.NB_PLAYERS == 3
players = gml.event_handling(players, event, mn.Return_pos)
assert cf.STATE == cf.State.menu
cf.STATE = cf.State.multiplayer_set
players = gml.event_handling(players, event, mn.Start_pos)
assert cf.STATE == cf.State.ingame
cf.STATE = cf.State.langchange
players = gml.event_handling(players, event, mn.Flag_pos[0])
assert cf.STATE == cf.State.setup
assert cf.LANG == lg.AVAILABLE[0]
cf.STATE = cf.State.langchange
players = gml.event_handling(players, event, mn.Flag_pos[1])
assert cf.STATE == cf.State.setup
assert cf.LANG == lg.AVAILABLE[1]
cf.STATE = cf.State.langchange
lg.set_lang(lg.AVAILABLE[0])
players = gml.event_handling(players, event, mn.Return_pos)
assert cf.STATE == cf.State.setup
assert cf.LANG == lg.AVAILABLE[0]
cf.STATE = cf.State.gameover
players[0].jump()
players = gml.event_handling(players, event, mn.Return_pos)
assert players[0].vel.y == 0
assert cf.STATE == cf.State.menu
cf.STATE = cf.State.gameover
players[0].jump()
players = gml.event_handling(players, event, mn.Restart_pos)
assert players[0].vel.y == 0
for bloc in spt.ground:
assert not bloc.FLAG_creation
assert cf.STATE == cf.State.ingame
cf.STATE = cf.State.highscore
players = gml.event_handling(players, event, mn.Return_pos)
assert cf.STATE == cf.State.menu
cf.STATE = cf.State.setup
players = gml.event_handling(players, event, mn.Language_pos)
assert cf.STATE == cf.State.langchange
cf.CAPT = True
cf.STATE = cf.State.setup
players = gml.event_handling(players, event, mn.Commands_pos)
assert cf.STATE == cf.State.keyset
assert not cf.CAPT
cf.STATE = cf.State.credits
players = gml.event_handling(players, event, mn.Return_pos)
assert cf.STATE == cf.State.menu
cf.STATE = cf.State.menu
players = gml.event_handling(players, event, mn.Credits_pos)
assert cf.STATE == cf.State.credits
cf.STATE = cf.State.setup
players = gml.event_handling(players, event, mn.Return_pos)
assert cf.STATE == cf.State.menu
cf.STATE = cf.State.keyset
players = gml.event_handling(players, event, mn.Return_pos)
assert cf.STATE == cf.State.setup
cf.CAPT = False
cf.STATE = cf.State.keyset
players = gml.event_handling(players, event, ky.Modify_pos[2])
assert cf.CAPT
assert cf.CAPT_PLYR == 2
| 9,581
|
def map_threshold(stat_img=None, mask_img=None, alpha=.001, threshold=3.,
height_control='fpr', cluster_threshold=0):
""" Compute the required threshold level and return the thresholded map
Parameters
----------
stat_img : Niimg-like object or None, optional
statistical image (presumably in z scale)
whenever height_control is 'fpr' or None,
stat_img=None is acceptable.
If it is 'fdr' or 'bonferroni', an error is raised if stat_img is None.
mask_img : Niimg-like object, optional,
mask image
alpha: float, optional
number controling the thresholding (either a p-value or q-value).
Its actual meaning depends on the height_control parameter.
This function translates alpha to a z-scale threshold.
threshold: float, optional
desired threshold in z-scale.
This is used only if height_control is None
height_control: string, or None optional
false positive control meaning of cluster forming
threshold: 'fpr'|'fdr'|'bonferroni'\|None
cluster_threshold : float, optional
cluster size threshold. In the returned thresholded map,
sets of connected voxels (`clusters`) with size smaller
than this number will be removed.
Returns
-------
thresholded_map : Nifti1Image,
the stat_map thresholded at the prescribed voxel- and cluster-level
threshold: float,
the voxel-level threshold used actually
Note
----
If the input image is not z-scaled (i.e. some z-transformed statistic)
the computed threshold is not rigorous and likely meaningless
"""
# Check that height_control is correctly specified
if height_control not in ['fpr', 'fdr', 'bonferroni', None]:
raise ValueError(
"height control should be one of ['fpr', 'fdr', 'bonferroni', None]")
# if height_control is 'fpr' or None, we don't need to look at the data
# to compute the threhsold
if height_control == 'fpr':
threshold = norm.isf(alpha)
# In this case, and is stat_img is None, we return
if stat_img is None:
if height_control in ['fpr', None]:
return None, threshold
else:
raise ValueError(
'Map_threshold requires stat_img not to be None'
'when the heigh_control procedure is bonferroni or fdr')
# Masking
if mask_img is None:
masker = NiftiMasker(mask_strategy='background').fit(stat_img)
else:
masker = NiftiMasker(mask_img=mask_img).fit()
stats = np.ravel(masker.transform(stat_img))
n_voxels = np.size(stats)
# Thresholding
if height_control == 'fdr':
threshold = fdr_threshold(stats, alpha)
elif height_control == 'bonferroni':
threshold = norm.isf(alpha / n_voxels)
stats *= (stats > threshold)
# embed it back to 3D grid
stat_map = get_data(masker.inverse_transform(stats))
# Extract connected components above threshold
label_map, n_labels = label(stat_map > threshold)
labels = label_map[get_data(masker.mask_img_) > 0]
for label_ in range(1, n_labels + 1):
if np.sum(labels == label_) < cluster_threshold:
stats[labels == label_] = 0
return masker.inverse_transform(stats), threshold
| 9,582
|
def convertError(
sourceType: Type[BaseException], targetType: Callable[[], BaseException]
) -> Generator[None, None, None]:
"""
Convert an error into a different error type.
@param sourceType: The type of exception that should be caught and
converted.
@type sourceType: L{BaseException}
@param targetType: The type of exception to which the original should be
converted.
@type targetType: L{BaseException}
"""
try:
yield
except sourceType as e:
raise targetType().with_traceback(e.__traceback__)
| 9,583
|
def grab_features(dataframe: pd.DataFrame) -> pd.DataFrame:
"""
Attempts to assign song features using the get_features function to all songs in given dataframe.
This function creates a column that encompasses all features retuerned from Spotify in a json format for each track ID.
It then explodes this column into a seperate dataframe and concatenates it with the original.
Parameters:
dataframe (pandas dataframe): Dataframe to assigned track IDs to. Must have a "trackID" column
Returns:
dataframe (pandas dataframe): original pandas dataframe with song features included
"""
start = time.time()
print("Getting song features..")
dataframe["features_json"] = dataframe["trackId"].progress_apply(
get_features
) # progress apply allows for tqdm progress bar
dataframe.dropna(
axis=0, subset=["trackId"], inplace=True
) # cannot search for tracks that have no ID
temp_list = [pd.json_normalize(x) for x in dataframe["features_json"]]
features_df = pd.concat(x for x in temp_list).reset_index().drop(["index"], axis=1)
dataframe = dataframe.reset_index().drop(["index"], axis=1)
dataframe = pd.concat([dataframe, features_df], axis=1)
dataframe.drop(["features_json"], axis=1, inplace=True)
index_check = random.randint(
0, len(dataframe)
) # performing check that temporary song feature df matches orignal df
assert (
dataframe["trackId"].iloc[index_check] == dataframe["id"].iloc[index_check]
), "track IDs do not match"
del temp_list, features_df
end = time.time()
print(
f".apply took {round((end - start),3)} seconds for {len(dataframe)} songs, around {round((end-start) / (len(dataframe)), 3)} seconds per song"
)
return dataframe
| 9,584
|
def _build_geo_shape_query(field, geom, relation):
"""Crea una condición de búsqueda por relación con una geometría en formato
GeoJSON.
Args:
field (str): Campo de la condición.
geom (dict): Geometría GeoJSON.
relation (str): Tipo de búsqueda por geometrías a realizar. Ver la
documentación de Elasticsearch GeoShape Query para más detalles.
Returns:
Query: Condición para Elasticsearch.
"""
options = {
'shape': geom,
'relation': relation
}
return GeoShape(**{field: options})
| 9,585
|
def load_image(mm_type, images_urls):
"""加载图片 """
for url in images_urls:
path = os.path.join(OUTPUT, mm_type, os.path.split(url)[-1].split('.')[0])
if os.path.exists(path):
return
os.makedirs(path)
content = etree.HTML(get(url,url).text)
img_srcs = content.xpath('//div[@class="content"]/img/@data-original')
if not img_srcs:
continue
write_page(mm_type, img_srcs, path)
| 9,586
|
def ProcessConfigurationFile(options):
"""Process configuration file, merge configuration with OptionParser.
Args:
options: optparse.OptionParser() object
Returns:
options: optparse.OptionParser() object
global_ns: A list of global nameserver tuples.
regional_ns: A list of regional nameservers tuples.
Raises:
ValueError: If we are unable to find a usable configuration file.
"""
config = ConfigParser.ConfigParser()
full_path = util.FindDataFile(options.config)
config.read(full_path)
if not config or not config.has_section('general'):
raise ValueError('Could not find usable configuration in %s (%s)' % (full_path, options.config))
general = dict(config.items('general'))
if options.only or options.system_only:
global_ns = []
regional_ns = []
else:
global_ns = config.items('global')
regional_ns = config.items('regional') + config.items('private')
# -U implies -u
if options.site_url:
options.upload_results = True
for option in general:
if not getattr(options, option, None):
if 'timeout' in option:
value = float(general[option])
elif 'count' in option or 'num' in option or 'hide' in option:
value = int(general[option])
else:
value = general[option]
setattr(options, option, value)
for key in ('input_file', 'output_file', 'csv_file', 'input_source'):
value = getattr(options, key, None)
if value:
setattr(options, key, os.path.expanduser(value))
options.version = version.VERSION
return (options, global_ns, regional_ns)
| 9,587
|
def lastquote(user, channel, text):
"""Show the last quote"""
callback = partial(_single_quote_callback, channel)
run_query("SELECT rowid, quote FROM quote ORDER BY rowid DESC\
LIMIT 1;", [], callback)
| 9,588
|
def map_symptom(symptom_name: str) -> Optional[str]:
"""
Maps a *symptom_name* to current symptom values in ID3C warehouse.
There is no official standard for symptoms, we are using the values
created by Audere from year 1 (2018-2019).
"""
symptom_map = {
'feeling feverish': 'feelingFeverish',
'fever': 'feelingFeverish',
'headache': 'headaches',
'headaches': 'headaches',
'cough': 'cough',
'chills': 'chillsOrShivering',
'chills or shivering': 'chillsOrShivering',
'sweats': 'sweats',
'throat': 'soreThroat',
'sore throat or itchy/scratchy throat': 'soreThroat',
'nausea': 'nauseaOrVomiting',
'nausea or vomiting': 'nauseaOrVomiting',
'nose': 'runnyOrStuffyNose',
'runny or stuffy nose': 'runnyOrStuffyNose',
'runny / stuffy nose': 'runnyOrStuffyNose',
'tired': 'fatigue',
'feeling more tired than usual': 'fatigue',
'ache': 'muscleOrBodyAches',
'muscle or body aches': 'muscleOrBodyAches',
'diarrhea': 'diarrhea',
'ear': 'earPainOrDischarge',
'ear pain or ear discharge': 'earPainOrDischarge',
'rash': 'rash',
'breathe': 'increasedTroubleBreathing',
'increased trouble with breathing': 'increasedTroubleBreathing',
'eye': 'eyePain',
'smell_taste': 'lossOfSmellOrTaste',
'other': 'other',
'none': 'none',
'none of the above': 'none',
}
if symptom_name.lower() not in symptom_map:
raise UnknownSymptomNameError(f"Unknown symptom name «{symptom_name}»")
return symptom_map[symptom_name.lower()]
| 9,589
|
def check_sbatch(cmd, call=True, num_cpus=1, mem="2G", time=None,
partitions=None, dependencies=None, no_output=False, no_error=False,
use_slurm=False, mail_type=['FAIL', 'TIME_LIMIT'], mail_user=None,
stdout_file=None, stderr_file=None,
args=None):
""" This function wraps calls to sbatch. It adds the relevant command line
options based on the parameters (either specified or extracted from
args, if args is not None).
The 'ntasks' option is always 1 with the function.
Args:
cmd (str): The command to execute
call (bool): If this flag is false, then the commands will not be
executed (but will be logged).
num_cpus (int): The number of CPUs to use. This will be translated into
an sbatch request like: "--ntasks 1 --cpus-per-task <num-cpus>".
default: 1
mem (str): This will be translated into an sbatch request like:
"--mem=<mem>". default: 10G
time (str): The amount of time to request. This will be translated
into an sbatch request like: "--time <time>". default: 0-05:59
partitions (str): The partitions to request. This will be translated
into an sbatch request like: "-p <partitions>". default: general
(N.B. This value should be a comma-separated list with no spaces,
for example: partitions="general,long")
dependencies (list of int-likes): A list of all of the job ids to
use as dependencies for this call. This will be translated into
an sbatch request like: "--dependency=afterok:<dependencies>".
default: None (i.e., no dependencies)
N.B. This IS NOT overwritten by args.
no_output (bool): If this flag is True, stdout will be redirected
to /dev/null. This will be translated into an sbatch request
like: "--output=/dev/null". default: If the flag is not present,
then stdout will be directed to a log file with the job number.
This corresponds to "--output=slurm-%J.out" in the sbatch call.
stdout_file (str): If this value is given and no_output is False,
then this filename will be used for stdout rather than
slurm-%J.out. This corresponds to "--output=<stdout_file>" in
the sbatch call.
no_error (bool): If this flag is True, stderr will be redirected
to /dev/null. This will be translated into an sbatch request
like: "--error=/dev/null". default: If the flag is not present,
then stderr will be directed to a log file with the job number.
This corresponds to "--error=slurm-%J.err" in the sbatch call.
stderr_file (str): If this value is given and no_output is False,
then this filename will be used for stderr rather than
slurm-%J.err. This corresponds to "--output=<stdout_file>" in
the sbatch call.
use_slurm (bool): If this flag is True, then the commands will be
submitted to SLURM via sbatch. default: By default, each command
is executed sequentially within the current terminal.
mail_type (list of strings): A list of the types of mail to send.
This will be translated into an sbatch request like:
"--mail-type type_1,type_2,...". default: ['FAIL', 'TIME_LIMIT']
mail_user (string): The email address (or user name if that is
configured) of the recipient of the mails. This is translated
into an sbatch request like: "--mail-user <user>"
args (namespace): A namespace which contains values for all of the
options (i.e., created from an argparse parser after calling
add_sbatch_options on the parser)
Returns:
If use_slurm is False, None
If use_slurm is True, the slurm job id
"""
# use args if they are present
if args is not None:
call = not args.do_not_call
num_cpus = args.num_cpus
mem = args.mem
time = args.time
partitions = args.partitions
no_output = args.no_output
no_error = args.no_error
use_slurm = args.use_slurm
mail_type = args.mail_type
mail_user = args.mail_user
stdout_file = args.stdout_file
stderr_file = args.stderr_file
output_str = "--output=slurm-%J.out"
if stdout_file is not None:
output_str = "--output={}".format(stdout_file)
if no_output:
output_str = "--output=/dev/null"
error_str = "--error=slurm-%J.err"
if stderr_file is not None:
error_str = "--error={}".format(stderr_file)
if no_error:
error_str = "--error=/dev/null"
dependencies_str = ""
if dependencies is not None:
dependencies_str = ':'.join(str(d) for d in dependencies)
dependencies_str = "--dependency=afterok:{}".format(dependencies_str)
# check if we actually want to use SLURM
msg = "slurm.check_sbatch.use_slurm: {}, call: {}".format(use_slurm, call)
logger.debug(msg)
# anyway, make sure to remove the --use-slurm option
cmd = cmd.replace("--use-slurm", "")
if use_slurm:
time_str = ""
if time is not None:
time_str = "--time {}".format(time)
mem_str = ""
if mem is not None:
mem_str = "--mem={}".format(mem)
partitions_str = ""
if partitions is not None:
partitions_str = "-p {}".format(partitions)
num_cpus_str = ""
if num_cpus is not None:
num_cpus_str = "--cpus-per-task {}".format(num_cpus)
mail_type_str = ""
if mail_type is not None:
mail_type_str = "--mail-type {}".format(','.join(mail_type))
mail_user_str = ""
if mail_user is not None:
mail_user_str = "--mail-user {}".format(mail_user)
else:
# if we did not give a mail user, then do not specify the mail types
mail_type_str = ""
cmd = ("sbatch {} {} --ntasks 1 {} {} "
"{} {} {} {} {} {}".format(time_str, mem_str, partitions_str, num_cpus_str, dependencies_str,
output_str, error_str, mail_type_str, mail_user_str, cmd))
output = shell_utils.check_output(cmd, call=call)
# and parse out the job id
if call:
job_id = output.strip().split()[-1]
else:
job_id = None
return job_id
else:
shell_utils.check_call(cmd, call=call)
return None
| 9,590
|
def get_project_details(p):
"""Extract from the pickle object detailed information about
a given project and parse it in a comprehensive dict structure."""
res = {}
project = p['projects'][0]
fields = {'Owner(s)': 'project_owners',
'Member(s)': 'project_members',
'Collaborator(s)': 'project_collabs',
'User(s)': 'project_users',
'last_accessed': 'project_last_access'}
for k, v in fields.items():
res[k] = project[v].strip().split(' <br/> ')
if res[k][0] == '':
res[k] = ['None']
for e in ['insert_user', 'insert_date', 'project_access', 'name',
'project_last_workflow']:
res[e] = project[e]
return res
| 9,591
|
def get_category(name: str) -> Category:
"""Returns a category with a given name"""
return Category.objects.get(name=name)
| 9,592
|
def extract_sentiment_analysis(model):
"""Runs extraction common for all sentiment analysis models"""
model.extract_torchscript()
model.extract_onnx()
| 9,593
|
def log_report():
""" The log report shows the log file. The user can filter and search the log. """
log_main = open(main_log, 'r').readlines()
data_main = []
for line in log_main:
split_line = line.split(' ')
data_main.append([' '.join(split_line[:2]), ' '.join(split_line[2:])])
return render_template(
'log.html',
title="Logs",
data_main=data_main)
| 9,594
|
def register(ctx, repo_path):
"""Register a report with LSST the Docs.
This command only needs to be run once, when you're creating a new report
repository. The command creates a new "product" on LSST the Docs where
instances of the report are published.
**Required arguments**
``REPO_PATH``
Path to the report repository. The report repository must be a local
directory (not a remote Git repository) because the repository's
nbreport.yaml metadata file will be modified. The new metadata created
by this command must be committed into the report repository.
"""
report_repo = ReportRepo(repo_path)
handle = report_repo.config['handle']
title = report_repo.config['title']
git_repo = report_repo.config['git_repo']
try:
github_username = ctx.obj['config']['github']['username']
github_token = ctx.obj['config']['github']['token']
except KeyError:
raise click.UsageError(
'Could not find GitHub authentication data in {0!s}. Try '
'running "nbreport login" first.'.format(ctx.obj['config_path'])
)
# Allow for user confirmation
click.echo('Registering report with this metadata from nbreport.yaml:')
click.echo(' Handle: {}'.format(handle))
click.echo(' Title: {}'.format(title))
click.echo(' Git repository: {}'.format(git_repo))
click.confirm('Register this report?', abort=True)
response = requests.post(
urljoin(ctx.obj['server'], '/nbreport/reports/'),
auth=(github_username, github_token),
json={
'handle': handle,
'title': title,
'git_repo': git_repo
}
)
response.raise_for_status()
response_data = response.json()
report_repo.config['ltd_product'] = response_data['product']
report_repo.config['published_url'] = response_data['published_url']
report_repo.config['ltd_url'] = response_data['product_url']
click.echo('Registered report at {}'.format(
report_repo.config['published_url']))
| 9,595
|
def int_or_none(x) -> int:
"""Either convert x to an int or return None."""
try:
return int(x)
except TypeError:
return None
except ValueError:
return None
| 9,596
|
def create_parser_for_docs() -> argparse.ArgumentParser:
"""Create a parser showing all options for the default CLI
documentation.
Returns:
The primary parser, specifically for generating documentation.
"""
daiquiri.setup(level=logging.FATAL)
# load default plugins
plugin.initialize_default_plugins()
ext_commands = plug.manager.hook.create_extension_command()
return create_parser(
show_all_opts=True,
ext_commands=ext_commands,
config_file=_repobee.constants.DEFAULT_CONFIG_FILE,
)
| 9,597
|
def split_into_batches(all_users, batch_size=BATCH_SIZE):
"""h/t: https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks"""
for i in range(0, len(all_users), batch_size):
yield all_users[i : i + batch_size]
| 9,598
|
def extra_lo_ips():
"""Setup IPs that are used for simulating e.g. agent, mesos leader, etc.. """
ips = ['127.0.0.2', '127.0.0.3']
for ip in ips:
add_lo_ipaddr(ip, 32)
yield
for ip in ips:
del_lo_ipaddr(ip, 32)
| 9,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.