content
stringlengths 5
1.05M
|
|---|
name=input("enter your name : ")
print("Name inserted =" , name)
age=int(input("Enter your age"))
print("Age : ",age)
print(type(name))
print(type(age))
|
def password_valid(password):
is_valid = True
counter = 0
if not 6 <= len(password) <= 10:
is_valid = False
print("Password must be between 6 and 10 characters")
for i in password:
if i.isdigit():
counter += 1
if not i.isalpha() and not i.isdigit():
is_valid = False
print("Password must consist only of letters and digits")
break
if counter < 2:
is_valid = False
print("Password must have at least 2 digits")
return is_valid
password = input()
is_valid = password_valid(password)
if is_valid:
print("Password is valid")
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine import recipe_api
class InfraCheckoutApi(recipe_api.RecipeApi):
"""Stateless API for using public infra gclient checkout."""
# Named cache shared across builders using public infra gclient checkout.
PUBLIC_NAMED_CACHE = 'infra_gclient_with_go'
# Ditto but for builders which use internal gclient checkout.
INTERNAL_NAMED_CACHE = 'infra_internal_gclient_with_go'
def checkout(self, gclient_config_name,
patch_root=None,
path=None,
internal=False,
named_cache=None,
**kwargs):
"""Fetches infra gclient checkout into a given path OR named_cache.
Arguments:
* gclient_config_name (string) - name of gclient config.
* patch_root (path or string) - path **inside** infra checkout to git repo
in which to apply the patch. For example, 'infra/luci' for luci-py repo.
If None (default), no patches will be applied.
* path (path or string) - path to where to create/update infra checkout.
If None (default) - path is cache with customizable name (see below).
* internal (bool) - by default, False, meaning infra gclient checkout
layout is assumed, else infra_internal.
This has an effect on named_cache default and inside which repo's
go corner the ./go/env.py command is run.
* named_cache - if path is None, this allows to customize the name of the
cache. Defaults to PUBLIC_NAMED_CACHE or INTERNAL_NAMED_CACHE, depending
on `internal` argument value.
Note: your cr-buildbucket.cfg should specify named_cache for swarming to
prioritize bots which actually have this cache populated by prior
runs. Otherwise, using named cache isn't particularly useful, unless
your pool of builders is very small.
* kwargs - passed as is to bot_update.ensure_checkout.
Returns:
a Checkout object with commands for common actions on infra checkout.
"""
assert gclient_config_name, gclient_config_name
if named_cache is None:
named_cache = (self.INTERNAL_NAMED_CACHE if internal else
self.PUBLIC_NAMED_CACHE)
path = path or self.m.path['cache'].join(named_cache)
self.m.file.ensure_directory('ensure builder dir', path)
with self.m.context(cwd=path):
self.m.gclient.set_config(gclient_config_name)
bot_update_step = self.m.bot_update.ensure_checkout(
patch_root=patch_root, **kwargs)
class Checkout(object):
@property
def path(self):
return path
@property
def bot_update_step(self):
return bot_update_step
@property
def patch_root_path(self):
assert patch_root
return path.join(patch_root)
@staticmethod
def commit_change():
assert patch_root
with self.m.context(cwd=path.join(patch_root)):
self.m.git(
'-c', 'user.email=commit-bot@chromium.org',
'-c', 'user.name=The Commit Bot',
'commit', '-a', '-m', 'Committed patch',
name='commit git patch')
@staticmethod
def get_changed_files():
assert patch_root
# Grab a list of changed files.
with self.m.context(cwd=path.join(patch_root)):
result = self.m.git(
'diff', '--name-only', 'HEAD', 'HEAD~',
name='get change list',
stdout=self.m.raw_io.output())
files = result.stdout.splitlines()
if len(files) < 50:
result.presentation.logs['change list'] = sorted(files)
else:
result.presentation.logs['change list is too long'] = (
'%d files' % len(files))
return set(files)
@staticmethod
def gclient_runhooks():
with self.m.context(cwd=path):
self.m.gclient.runhooks()
@staticmethod
def ensure_go_env(infra_step=True):
with self.m.context(cwd=path):
Checkout.go_env_step('go', 'version', name='init infra go env',
infra_step=infra_step)
@staticmethod
def go_env_step(*args, **kwargs):
# name lazily defaults to first two args, like "go test".
name = kwargs.pop('name', None) or ' '.join(map(str, args[:2]))
with self.m.context(cwd=path):
where = 'infra_internal' if internal else 'infra'
return self.m.python(name, path.join(where, 'go', 'env.py'),
args, venv=True, **kwargs)
@staticmethod
def run_presubmit_in_go_env():
assert patch_root
assert self.m.runtime.is_luci
revs = self.m.bot_update.get_project_revision_properties(patch_root)
upstream = bot_update_step.json.output['properties'].get(revs[0])
gerrit_change = self.m.buildbucket.build.input.gerrit_changes[0]
# The presubmit must be run with proper Go environment.
presubmit_cmd = [
'python', # env.py will replace with this its sys.executable.
self.m.presubmit.presubmit_support_path,
'--root', path.join(patch_root),
'--commit',
'--verbose', '--verbose',
'--issue', gerrit_change.change,
'--patchset', gerrit_change.patchset,
'--gerrit_url', 'https://' + gerrit_change.host,
'--gerrit_fetch',
'--upstream', upstream,
'--skip_canned', 'CheckTreeIsOpen',
'--skip_canned', 'CheckBuildbotPendingBuilds',
]
with self.m.context(env={'PRESUBMIT_BUILDER': '1'}):
Checkout.go_env_step(*presubmit_cmd, name='presubmit')
return Checkout()
|
import tensorflow as tf
import tensorlayer as tl
from rlzoo.common import math_utils
from rlzoo.common.value_networks import *
from rlzoo.common.policy_networks import *
from rlzoo.common.utils import set_seed
'''
full list of algorithm parameters (alg_params)
-----------------------------------------------
net_list: a list of networks (value and policy) used in the algorithm, from common functions or customization
optimizers_list: a list of optimizers for all networks and differentiable variables
entropy_beta: factor for entropy boosted exploration
-----------------------------------------------
full list of learning parameters (learn_params)
-----------------------------------------------
train_episodes: total number of episodes for training
test_episodes: total number of episodes for testing
max_steps: maximum number of steps for one episode
n_workers: manually set number of workers
update_itr: update global policy after several episodes
gamma: reward discount factor
save_interval: timesteps for saving the weights and plotting the results
mode: train or test
------------------------------------------------
'''
def atari(env, default_seed=True):
if default_seed:
assert isinstance(env, list)
seed = np.arange(len(env)).tolist() # a list of seeds for each env
set_seed(seed, env) # reproducible
# for multi-threading
if isinstance(env, list): # judge if multiple envs are passed in for parallel computing
num_env = len(env) # number of envs passed in
env = env[0] # take one of the env as they are all the same
else:
num_env = 1
alg_params = dict(
entropy_beta=0.005
)
if alg_params.get('net_list') is None:
num_hidden_layer = 4 # number of hidden layers for the networks
hidden_dim = 64 # dimension of hidden layers for the networks
net_list2 = [] # networks list of networks list, each item for single thread/process
for _ in range(num_env + 1): # additional one for global
with tf.name_scope('AC'):
with tf.name_scope('Critic'):
critic = ValueNetwork(env.observation_space, hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Actor'):
actor = StochasticPolicyNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
net_list = [actor, critic]
net_list2.append(net_list)
alg_params['net_list'] = net_list2
if alg_params.get('optimizers_list') is None:
a_lr, c_lr = 1e-3, 1e-3 # a_lr: learning rate of the actor; c_lr: learning rate of the critic
a_optimizer = tf.optimizers.RMSprop(a_lr, name='RMS_optimizer_actor')
c_optimizer = tf.optimizers.RMSprop(c_lr, name='RMS_optimizer_critic')
optimizers_list = [a_optimizer, c_optimizer]
alg_params['optimizers_list'] = optimizers_list
learn_params = dict(
max_steps=100,
gamma=0.9,
train_episodes=1000,
test_episodes=10,
save_interval=100,
update_itr=10,
n_workers=num_env
)
return alg_params, learn_params
def classic_control(env, default_seed=True):
if default_seed:
assert isinstance(env, list)
seed = np.arange(len(env)).tolist() # a list of seeds for each env
set_seed(seed, env) # reproducible
# for multi-threading
if isinstance(env, list): # judge if multiple envs are passed in for parallel computing
num_env = len(env) # number of envs passed in
env = env[0] # take one of the env as they are all the same
else:
num_env = 1
alg_params = dict(
entropy_beta=0.005
)
if alg_params.get('net_list') is None:
num_hidden_layer = 4 # number of hidden layers for the networks
hidden_dim = 64 # dimension of hidden layers for the networks
net_list2 = [] # networks list of networks list, each item for single thread/process
for _ in range(num_env + 1): # additional one for global
with tf.name_scope('AC'):
with tf.name_scope('Critic'):
critic = ValueNetwork(env.observation_space, hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Actor'):
actor = StochasticPolicyNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
net_list = [actor, critic]
net_list2.append(net_list)
alg_params['net_list'] = net_list2
if alg_params.get('optimizers_list') is None:
a_lr, c_lr = 1e-3, 1e-3 # a_lr: learning rate of the actor; c_lr: learning rate of the critic
a_optimizer = tf.optimizers.RMSprop(a_lr, name='RMS_optimizer_actor')
c_optimizer = tf.optimizers.RMSprop(c_lr, name='RMS_optimizer_critic')
optimizers_list = [a_optimizer, c_optimizer]
alg_params['optimizers_list'] = optimizers_list
learn_params = dict(
max_steps=100,
gamma=0.9,
train_episodes=1000,
test_episodes=10,
save_interval=100,
update_itr=10,
n_workers=num_env
)
return alg_params, learn_params
def box2d(env, default_seed=True):
if default_seed:
assert isinstance(env, list)
seed = np.arange(len(env)).tolist() # a list of seeds for each env
set_seed(seed, env) # reproducible
# for multi-threading
if isinstance(env, list): # judge if multiple envs are passed in for parallel computing
num_env = len(env) # number of envs passed in
env = env[0] # take one of the env as they are all the same
else:
num_env = 1
alg_params = dict(
entropy_beta=0.005
)
if alg_params.get('net_list') is None:
num_hidden_layer = 4 # number of hidden layers for the networks
hidden_dim = 64 # dimension of hidden layers for the networks
net_list2 = [] # networks list of networks list, each item for single thread/process
for _ in range(num_env + 1): # additional one for global
with tf.name_scope('AC'):
with tf.name_scope('Critic'):
critic = ValueNetwork(env.observation_space, hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Actor'):
actor = StochasticPolicyNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
net_list = [actor, critic]
net_list2.append(net_list)
alg_params['net_list'] = net_list2
if alg_params.get('optimizers_list') is None:
a_lr, c_lr = 1e-3, 1e-3 # a_lr: learning rate of the actor; c_lr: learning rate of the critic
a_optimizer = tf.optimizers.RMSprop(a_lr, name='RMS_optimizer_actor')
c_optimizer = tf.optimizers.RMSprop(c_lr, name='RMS_optimizer_critic')
optimizers_list = [a_optimizer, c_optimizer]
alg_params['optimizers_list'] = optimizers_list
learn_params = dict(
max_steps=20000,
gamma=0.9,
train_episodes=20000,
test_episodes=10,
save_interval=500,
update_itr=10,
n_workers=num_env
)
return alg_params, learn_params
def mujoco(env, default_seed=True):
if default_seed:
assert isinstance(env, list)
seed = np.arange(len(env)).tolist() # a list of seeds for each env
set_seed(seed, env) # reproducible
# for multi-threading
if isinstance(env, list): # judge if multiple envs are passed in for parallel computing
num_env = len(env) # number of envs passed in
env = env[0] # take one of the env as they are all the same
else:
num_env = 1
alg_params = dict(
entropy_beta=0.005
)
if alg_params.get('net_list') is None:
num_hidden_layer = 4 # number of hidden layers for the networks
hidden_dim = 64 # dimension of hidden layers for the networks
net_list2 = [] # networks list of networks list, each item for single thread/process
for _ in range(num_env + 1): # additional one for global
with tf.name_scope('AC'):
with tf.name_scope('Critic'):
critic = ValueNetwork(env.observation_space, hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Actor'):
actor = StochasticPolicyNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
net_list = [actor, critic]
net_list2.append(net_list)
alg_params['net_list'] = net_list2
if alg_params.get('optimizers_list') is None:
a_lr, c_lr = 1e-3, 1e-3 # a_lr: learning rate of the actor; c_lr: learning rate of the critic
a_optimizer = tf.optimizers.RMSprop(a_lr, name='RMS_optimizer_actor')
c_optimizer = tf.optimizers.RMSprop(c_lr, name='RMS_optimizer_critic')
optimizers_list = [a_optimizer, c_optimizer]
alg_params['optimizers_list'] = optimizers_list
learn_params = dict(
max_steps=100,
gamma=0.9,
train_episodes=1000,
test_episodes=10,
save_interval=100,
update_itr=10,
n_workers=num_env
)
return alg_params, learn_params
def robotics(env, default_seed=True):
if default_seed:
assert isinstance(env, list)
seed = np.arange(len(env)).tolist() # a list of seeds for each env
set_seed(seed, env) # reproducible
# for multi-threading
if isinstance(env, list): # judge if multiple envs are passed in for parallel computing
num_env = len(env) # number of envs passed in
env = env[0] # take one of the env as they are all the same
else:
num_env = 1
alg_params = dict(
entropy_beta=0.005
)
if alg_params.get('net_list') is None:
num_hidden_layer = 4 # number of hidden layers for the networks
hidden_dim = 64 # dimension of hidden layers for the networks
net_list2 = [] # networks list of networks list, each item for single thread/process
for _ in range(num_env + 1): # additional one for global
with tf.name_scope('AC'):
with tf.name_scope('Critic'):
critic = ValueNetwork(env.observation_space, hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Actor'):
actor = StochasticPolicyNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
net_list = [actor, critic]
net_list2.append(net_list)
alg_params['net_list'] = net_list2
if alg_params.get('optimizers_list') is None:
a_lr, c_lr = 1e-3, 1e-3 # a_lr: learning rate of the actor; c_lr: learning rate of the critic
a_optimizer = tf.optimizers.RMSprop(a_lr, name='RMS_optimizer_actor')
c_optimizer = tf.optimizers.RMSprop(c_lr, name='RMS_optimizer_critic')
optimizers_list = [a_optimizer, c_optimizer]
alg_params['optimizers_list'] = optimizers_list
learn_params = dict(
max_steps=100,
gamma=0.9,
train_episodes=1000,
test_episodes=10,
save_interval=100,
update_itr=10,
n_workers=num_env
)
return alg_params, learn_params
def dm_control(env, default_seed=True):
if default_seed:
assert isinstance(env, list)
seed = np.arange(len(env)).tolist() # a list of seeds for each env
set_seed(seed, env) # reproducible
# for multi-threading
if isinstance(env, list): # judge if multiple envs are passed in for parallel computing
num_env = len(env) # number of envs passed in
env = env[0] # take one of the env as they are all the same
else:
num_env = 1
alg_params = dict(
entropy_beta=0.005
)
if alg_params.get('net_list') is None:
num_hidden_layer = 4 # number of hidden layers for the networks
hidden_dim = 64 # dimension of hidden layers for the networks
net_list2 = [] # networks list of networks list, each item for single thread/process
for _ in range(num_env + 1): # additional one for global
with tf.name_scope('AC'):
with tf.name_scope('Critic'):
critic = ValueNetwork(env.observation_space, hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Actor'):
actor = StochasticPolicyNetwork(env.observation_space, env.action_space,
hidden_dim_list=num_hidden_layer * [hidden_dim])
net_list = [actor, critic]
net_list2.append(net_list)
alg_params['net_list'] = net_list2
if alg_params.get('optimizers_list') is None:
a_lr, c_lr = 1e-3, 1e-3 # a_lr: learning rate of the actor; c_lr: learning rate of the critic
a_optimizer = tf.optimizers.RMSprop(a_lr, name='RMS_optimizer_actor')
c_optimizer = tf.optimizers.RMSprop(c_lr, name='RMS_optimizer_critic')
optimizers_list = [a_optimizer, c_optimizer]
alg_params['optimizers_list'] = optimizers_list
learn_params = dict(
max_steps=100,
gamma=0.9,
train_episodes=1000,
test_episodes=10,
save_interval=100,
update_itr=10,
n_workers=num_env
)
return alg_params, learn_params
|
import discord
import os
import asyncio
import time
import logging
from discord.ext import commands
from __main__ import settings
from cogs.utils.dataIO import dataIO
reminders_path = "data/remindme/reminders.json"
mod_log_path = 'data/mod/mod.log'
class RemindMe:
"""Pour ne plus rien oublier."""
# _ignore_list_path = "data/mod/ignorelist.json"
# _filter_path = "data/mod/filter.json"
_ownersettings_path = "data/red/ownersettings.json"
# _perms_cache_path = "data/mod/perms_cache.json"
# _past_names_path = "data/mod/past_names.json"
# _past_nicknames_path = "data/mod/past_nicknames.json"
_reminders_path = reminders_path
def __init__(self, bot):
self.bot = bot
self.reminders = dataIO.load_json(self._reminders_path)
self.settings = dataIO.load_json(self._ownersettings_path)
self.units = {"second": 1,
"seconde": 1,
"sec": 1,
"minute": 60,
"min": 60,
"heure": 3600,
"hour": 3600,
"h": 3600,
"jour": 86400,
"day": 86400,
"j": 86400,
"semaine": 604800,
"sem": 604800,
"week": 604800,
"moi": 2592000,
"month": 2592000}
@commands.command(pass_context=True)
async def rappelermoi(self, ctx, quantity: int, time_unit: str, *text: str):
"""Vous envoie <text> quand c'est l'heure
Accepte: minutes, min, heures, hours, h, jours, days, j, semaines, sem, weeks, mois, months
"""
text = " ".join(text)
time_unit = time_unit.lower()
author = ctx.message.author
server = ctx.message.server
s = ""
if time_unit.endswith("s"):
time_unit = time_unit[:-1]
s = "s"
if not time_unit in self.units:
await self.bot.say("Unité de temps invalide. Choisir dans:\n"
"minutes, min, heures, hours, h,\n"
"jours, days, j, semaines, sem, weeks,\n"
"mois, months", delete_after=self.settings[server.id]["delete_delay"])
return
if quantity < 1:
await self.bot.say("Quantity ne peut pas être 0 ou négatif.", delete_after=self.settings[server.id]["delete_delay"])
return
if len(text) > 1960:
await self.bot.say("Le texte est trop long (1960 caractères max).", delete_after=self.settings[server.id]["delete_delay"])
return
seconds = self.units[time_unit] * quantity
future = int(time.time()+seconds)
self.reminders.append(
{"SERVER":server.id, "ID": author.id, "NAME": author.name, "MENTION": author.mention, "DISCRIMINATOR": author.discriminator, "FUTURE": future, "TEXT": text})
logger.info("{}({}) set a reminder.".format(author.name, author.id))
await self.bot.say("Je vous rappelle ça dans {} {}.".format(str(quantity), time_unit + s), delete_after=self.settings[server.id]["delete_delay"])
dataIO.save_json(self._reminders_path, self.reminders)
@commands.command(pass_context=True)
async def oubliermoi(self, ctx):
"""Supprime toutes vos notifications à venir"""
author = ctx.message.author
server = ctx.message.server
to_remove = []
for reminder in self.reminders:
if reminder["ID"] == author.id:
to_remove.append(reminder)
if not to_remove == []:
for reminder in to_remove:
self.reminders.remove(reminder)
dataIO.save_json(self._reminders_path, self.reminders)
await self.bot.say("Toutes vos notifications ont été enlevées.", delete_after=self.settings[server.id]["delete_delay"])
else:
await self.bot.say("Vous n'avez pas de notifications à venir.", delete_after=self.settings[server.id]["delete_delay"])
async def check_reminders(self, bot):
while "RemindMe" in self.bot.cogs:
self.reminders = dataIO.load_json(self._reminders_path)
to_remove = []
#print("\ncheck : " + str(len(self.reminders)))
for reminder in self.reminders:
if reminder["FUTURE"] <= int(time.time()):
try:
logger.info("{}({}) has a reminder done.".format(
reminder["NAME"], reminder["ID"]))
for un in bot.servers:
assert isinstance(un, discord.Server)
if un.id == reminder["SERVER"]:
server = un
break
else:
server = None
if server:
for member in server.members:
if member.id == reminder["ID"]:
author = member
break
else:
author = "@"+reminder["NAME"]+"#"+reminder["DISCRIMINATOR"]
#print("Mon destinataire=", author)
try:
#print("Le destinataire:", author)
await self.bot.send_message(author, "RAPPEL:\n\n{}".format(reminder["TEXT"]))
to_remove.append(reminder)
except:
pass
except (discord.errors.Forbidden, discord.errors.NotFound):
pass
# to_remove.append(reminder)
except discord.errors.HTTPException:
pass
# else:
# to_remove.append(reminder)
for reminder in to_remove:
self.reminders.remove(reminder)
if to_remove:
dataIO.save_json(self._reminders_path, self.reminders)
await asyncio.sleep(5)
def check_folders():
if not os.path.exists("data/remindme"):
print("Créaton du dossier data/remindme ...")
os.makedirs("data/remindme")
def check_files():
f = reminders_path
if not os.path.isfile(f):
print("Création du fichier vide reminders.json...")
dataIO.save_json(f, [])
def setup(bot):
global logger
check_folders()
check_files()
logger = logging.getLogger("remindme")
if logger.level == 0: # Prevents the logger from being loaded again in case of module reload
logger.setLevel(logging.INFO)
handler = logging.FileHandler(
filename=mod_log_path, encoding='utf-8', mode='a')
handler.setFormatter(logging.Formatter(
'%(asctime)s %(message)s', datefmt="[%d/%m/%Y %H:%M]"))
logger.addHandler(handler)
n = RemindMe(bot)
loop = asyncio.get_event_loop()
loop.create_task(n.check_reminders(bot))
bot.add_cog(n)
|
from __future__ import print_function
import FWCore.ParameterSet.Config as cms
bmtfStage2Raw = cms.EDProducer(
"L1TDigiToRaw",
Setup = cms.string("stage2::BMTFSetup"),
InputLabel = cms.InputTag("simBmtfDigis","BMTF"),
InputLabel2 = cms.InputTag("simTwinMuxDigis"),
FedId = cms.int32(1376),
FWId = cms.uint32(1),
)
|
# -*- coding: utf-8 -*-
# @Time : 2018/6/16 下午4:43
# @Author : yidxue
from gensim.corpora.Dictionary import load_from_text, doc2bow
from gensim.corpora import MmCorpus
from gensim.models.ldamodel import LdaModel
document = "This is some document..."
# load id->word mapping (the dictionary)
id2word = load_from_text('wiki_en_wordids.txt')
# load corpus iterator
mm = MmCorpus('wiki_en_tfidf.mm')
# extract 100 LDA topics, updating once every 10,000
lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, update_every=1, chunksize=10000, passes=1)
# use LDA model: transform new doc to bag-of-words, then apply lda
doc_bow = doc2bow(document.split())
doc_lda = lda[doc_bow]
# doc_lda is vector of length num_topics representing weighted presence of each topic in the doc
|
import sys
n = int(input().strip())
sockColors = [int(color) for color in input().strip().split(' ')]
d = dict()
for color in sockColors:
if color not in d:
d[color] = 0
d[color] += 1
count = 0
for key, colorCount in d.items():
count += colorCount // 2
print(count)
|
# $Id: programs.py,v 1.1 2011-09-28 19:38:22 wirawan Exp $
#
# pyqmc.nwchem.programs module
#
# Wirawan Purwanto
# Created: 20101028
#
"""
This module contains a general encapsulation of nwchem program.
"""
import sys
import os
import os.path
import time
import wpylib.shell_tools as sh
from wpylib.params import flat as params
from pyqmc.nwchem.output import nwchem_output
class nwchem_program(object):
"""Encapsulation of nwchem program."""
NWCHEM_EXECUTABLE = "nwchem.sh"
def __init__(self):
self.parm = params(
verbose=2,
rerun=0, # forces running even if the output file exists
parse_results=1,
logfile=sys.stdout,
)
def run(self, infile, **_opts_):
"""Runs nwchem (if the output file hasn't existed) and
returns the results.
* verbose (-1): verbosity level
- > 2 = even prints out all nwchem output lines to stdout
- 2 = quiet, but prints the total SCF/DFT energy
- 1 = prints nothing except saying `nwchem inputfile.nw' to logfile
- <= 0 = really really quiet
"""
p = self.parm._create_()
opt_quiet = []
nw_exec = self.NWCHEM_EXECUTABLE
nw_in = infile
stdout = p.logfile
verbose = p.verbose
if nw_in.endswith(".nw"):
nw_out = nw_in[:-3] + ".out"
else:
nw_out = nw_in + ".out"
if verbose <= 2:
opt_quiet = ['-q']
if verbose == 1:
stdout.write("%s %s -q\n" % (nw_exec,infile,))
stdout.flush()
elif verbose == 2 and p.parse_results:
stdout.write(infile + ":")
stdout.flush()
if not os.path.isfile(nw_out):
sh.run(nw_exec, [nw_in] + opt_quiet)
if p.parse_results:
rec = nwchem_output(nw_out)
if verbose >= 2:
for e in ('E_SCF', 'E_DFT', 'E_MP2', 'E_CCSD', 'E_CCSD_T',):
if e in rec:
stdout.write("%s = %s\n" % (e, rec[e]))
stdout.flush()
return rec
|
from tracker.index_aliases import tracker_index_alias
from tracker.indexers import AttachmentFileIndexer
class TrackerTestCaseMixin(object):
def setUp(self):
tracker_index_alias.write_index.delete(ignore=404)
tracker_index_alias.read_index.create(ignore=400)
def refresh_index(self):
with tracker_index_alias.indexing():
AttachmentFileIndexer().reindex()
tracker_index_alias.write_index.refresh()
|
from __future__ import print_function
import cv2 as cv
import argparse
## [Load image]
parser = argparse.ArgumentParser(description='Code for Histogram Equalization tutorial.')
parser.add_argument('--input', help='Path to input image.', default='../data/lena.jpg')
args = parser.parse_args()
src = cv.imread(args.input)
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
## [Load image]
## [Convert to grayscale]
src = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
## [Convert to grayscale]
## [Apply Histogram Equalization]
dst = cv.equalizeHist(src)
## [Apply Histogram Equalization]
## [Display results]
cv.imshow('Source image', src)
cv.imshow('Equalized Image', dst)
## [Display results]
## [Wait until user exits the program]
cv.waitKey()
## [Wait until user exits the program]
|
"""Entry point for the analysis runner."""
import os
import hailtop.batch as hb
from analysis_runner import dataproc
service_backend = hb.ServiceBackend(
billing_project=os.getenv('HAIL_BILLING_PROJECT'), bucket=os.getenv('HAIL_BUCKET')
)
batch = hb.Batch(name='new-variants-plot-pca', backend=service_backend)
dataproc.hail_dataproc_job(
batch,
'plot_pca_and_loadings.py',
max_age='2h',
packages=['selenium'],
init=['gs://cpg-reference/hail_dataproc/install_common.sh'],
job_name='new-variants-plot-pca',
)
batch.run()
|
from setuptools import setup
setup(
name='elementflow',
version='0.5',
author='Ivan Sagalaev',
author_email='maniac@softwaremaniacs.org',
url='https://github.com/isagalaev/elementflow',
license='BSD',
description='Python library for generating XML as a stream without building a tree in memory.',
long_description=open('README.md').read(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
extras_require={
'test': [
'pytest',
'tox',
]
},
py_modules=['elementflow'],
)
|
# This file is part of beets.
# Copyright 2014, Stig Inge Lea Bjornsen.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the `importadded` plugin."""
import os
from _common import unittest
from test.test_importer import ImportHelper, AutotagStub
from beets import importer
from beets import util
from beetsplug.importadded import ImportAddedPlugin
_listeners = ImportAddedPlugin.listeners
def preserve_plugin_listeners():
"""Preserve the initial plugin listeners as they would otherwise be
deleted after the first setup / tear down cycle.
"""
if not ImportAddedPlugin.listeners:
ImportAddedPlugin.listeners = _listeners
def modify_mtimes(paths, offset=-60000):
for i, path in enumerate(paths, start=1):
mstat = os.stat(path)
os.utime(path, (mstat.st_atime, mstat.st_mtime + offset * i))
class ImportAddedTest(unittest.TestCase, ImportHelper):
# The minimum mtime of the files to be imported
min_mtime = None
def setUp(self):
preserve_plugin_listeners()
self.setup_beets()
self.load_plugins('importadded')
self._create_import_dir(2)
# Different mtimes on the files to be imported in order to test the
# plugin
modify_mtimes((mfile.path for mfile in self.media_files))
self.min_mtime = min(os.path.getmtime(mfile.path)
for mfile in self.media_files)
self.matcher = AutotagStub().install()
self.matcher.macthin = AutotagStub.GOOD
self._setup_import_session()
self.importer.add_choice(importer.action.APPLY)
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
self.matcher.restore()
def findMediaFile(self, item):
"""Find the pre-import MediaFile for an Item"""
for m in self.media_files:
if m.title.replace('Tag', 'Applied') == item.title:
return m
raise AssertionError("No MediaFile found for Item " +
util.displayable_path(item.path))
def assertEqualTimes(self, first, second, msg=None):
"""For comparing file modification times at a sufficient precision"""
self.assertAlmostEqual(first, second, places=4, msg=msg)
def test_import_album_with_added_dates(self):
self.importer.run()
album = self.lib.albums().get()
self.assertEqual(album.added, self.min_mtime)
for item in album.items():
self.assertEqual(item.added, self.min_mtime)
def test_import_album_with_preserved_mtimes(self):
self.config['importadded']['preserve_mtimes'] = True
self.importer.run()
album = self.lib.albums().get()
self.assertEqual(album.added, self.min_mtime)
for item in album.items():
self.assertEqualTimes(item.added, self.min_mtime)
mediafile_mtime = os.path.getmtime(self.findMediaFile(item).path)
self.assertEqualTimes(item.mtime, mediafile_mtime)
self.assertEqualTimes(os.path.getmtime(item.path),
mediafile_mtime)
def test_reimported_album_skipped(self):
# Import and record the original added dates
self.importer.run()
album = self.lib.albums().get()
album_added_before = album.added
items_added_before = dict((item.path, item.added)
for item in album.items())
# Newer Item path mtimes as if Beets had modified them
modify_mtimes(items_added_before.keys(), offset=10000)
# Reimport
self._setup_import_session(import_dir=album.path)
self.importer.run()
# Verify the reimported items
album = self.lib.albums().get()
self.assertEqualTimes(album.added, album_added_before)
items_added_after = dict((item.path, item.added)
for item in album.items())
for item_path, added_after in items_added_after.iteritems():
self.assertEqualTimes(items_added_before[item_path], added_after,
"reimport modified Item.added for " +
item_path)
def test_import_singletons_with_added_dates(self):
self.config['import']['singletons'] = True
self.importer.run()
for item in self.lib.items():
mfile = self.findMediaFile(item)
self.assertEqualTimes(item.added, os.path.getmtime(mfile.path))
def test_import_singletons_with_preserved_mtimes(self):
self.config['import']['singletons'] = True
self.config['importadded']['preserve_mtimes'] = True
self.importer.run()
for item in self.lib.items():
mediafile_mtime = os.path.getmtime(self.findMediaFile(item).path)
self.assertEqualTimes(item.added, mediafile_mtime)
self.assertEqualTimes(item.mtime, mediafile_mtime)
self.assertEqualTimes(os.path.getmtime(item.path),
mediafile_mtime)
def test_reimported_singletons_skipped(self):
self.config['import']['singletons'] = True
# Import and record the original added dates
self.importer.run()
items_added_before = dict((item.path, item.added)
for item in self.lib.items())
# Newer Item path mtimes as if Beets had modified them
modify_mtimes(items_added_before.keys(), offset=10000)
# Reimport
import_dir = os.path.dirname(items_added_before.keys()[0])
self._setup_import_session(import_dir=import_dir, singletons=True)
self.importer.run()
# Verify the reimported items
items_added_after = dict((item.path, item.added)
for item in self.lib.items())
for item_path, added_after in items_added_after.iteritems():
self.assertEqualTimes(items_added_before[item_path], added_after,
"reimport modified Item.added for " +
item_path)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
# coding: utf-8
import ee
import time
import ee.mapclient as mc
import matplotlib.pyplot as plt
import sys
from osgeo import ogr
import os
import glob
class WaterProductivityCalc(object):
def __init__(self):
pass
class L1WaterProductivity(WaterProductivityCalc):
def __init__(self):
ee.Initialize()
self._L1_AGBP_SEASONAL = ee.ImageCollection("projects/fao-wapor/L1_AGBP")
self._L1_AGBP_DEKADAL = ee.ImageCollection("projects/fao-wapor/L1_AGBP250")
self._L1_ETa_DEKADAL = ee.ImageCollection("projects/fao-wapor/L1_AET")
self._L1_AET250 = ee.ImageCollection("users/lpeiserfao/AET250")
@property
def multi_agbp(self):
print("chiama il getter")
return self._L1_AGBP_DEKADAL
@multi_agbp.setter
def multi_agbp(self, value):
print("chiama il setter")
return self._L1_AGBP_DEKADAL.map(
lambda immagine: immagine.multiply(value))
moltiplicatore = 1.25
elaborazione = L1WaterProductivity()
elaborazione.multi_agbp
elaborazione.multi_agbp = moltiplicatore
elaborazione.multi_agbp
|
"""
Command handler
This module contains the infrastructure for accepting commands on the
command line. The process is as follows:
1) The calling object (caller) inputs a string and triggers the command parsing system.
2) The system checks the state of the caller - loggedin or not
3) If no command string was supplied, we search the merged cmdset for system command CMD_NOINPUT
and branches to execute that. --> Finished
4) Cmdsets are gathered from different sources (in order of dropping priority):
channels - all available channel names are auto-created into a cmdset, to allow
for giving the channel name and have the following immediately
sent to the channel. The sending is performed by the CMD_CHANNEL
system command.
object cmdsets - all objects at caller's location are scanned for non-empty
cmdsets. This includes cmdsets on exits.
caller - the caller is searched for its own currently active cmdset.
player - lastly the cmdsets defined on caller.player are added.
5) All the gathered cmdsets (if more than one) are merged into one using the cmdset priority rules.
6) If merged cmdset is empty, raise NoCmdSet exception (this should not happen, at least the
player should have a default cmdset available at all times). --> Finished
7) The raw input string is parsed using the parser defined by settings.COMMAND_PARSER. It
uses the available commands from the merged cmdset to know which commands to look for and
returns one or many matches.
8) If match list is empty, branch to system command CMD_NOMATCH --> Finished
9) If match list has more than one element, branch to system command CMD_MULTIMATCH --> Finished
10) A single match was found. If this is a channel-command (i.e. the command name is that of a channel),
branch to CMD_CHANNEL --> Finished
11) At this point we have found a normal command. We assign useful variables to it that
will be available to the command coder at run-time.
12) We have a unique cmdobject, primed for use. Call all hooks:
at_pre_cmd(), cmdobj.parse(), cmdobj.func() and finally at_post_cmd().
"""
from weakref import WeakValueDictionary
from copy import copy
from traceback import format_exc
from twisted.internet.defer import inlineCallbacks, returnValue
from django.conf import settings
from src.comms.channelhandler import CHANNELHANDLER
from src.utils import logger, utils
from src.commands.cmdparser import at_multimatch_cmd
from src.utils.utils import string_suggestions, to_unicode
from django.utils.translation import ugettext as _
__all__ = ("cmdhandler",)
_GA = object.__getattribute__
_CMDSET_MERGE_CACHE = WeakValueDictionary()
# This decides which command parser is to be used.
# You have to restart the server for changes to take effect.
_COMMAND_PARSER = utils.variable_from_module(*settings.COMMAND_PARSER.rsplit('.', 1))
# System command names - import these variables rather than trying to
# remember the actual string constants. If not defined, Evennia
# hard-coded defaults are used instead.
# command to call if user just presses <return> with no input
CMD_NOINPUT = "__noinput_command"
# command to call if no command match was found
CMD_NOMATCH = "__nomatch_command"
# command to call if multiple command matches were found
CMD_MULTIMATCH = "__multimatch_command"
# command to call if found command is the name of a channel
CMD_CHANNEL = "__send_to_channel_command"
# command to call as the very first one when the user connects.
# (is expected to display the login screen)
CMD_LOGINSTART = "__unloggedin_look_command"
# custom Exceptions
class NoCmdSets(Exception):
"No cmdsets found. Critical error."
pass
class ExecSystemCommand(Exception):
"Run a system command"
def __init__(self, syscmd, sysarg):
self.args = (syscmd, sysarg) # needed by exception error handling
self.syscmd = syscmd
self.sysarg = sysarg
# Helper function
@inlineCallbacks
def get_and_merge_cmdsets(caller, session, player, obj,
callertype, sessid=None):
"""
Gather all relevant cmdsets and merge them.
callertype is one of "session", "player" or "object" dependin
on which level the cmdhandler is invoked. Session includes the
cmdsets available to Session, Player and its eventual puppeted Object.
Player-level include cmdsets on Player and Object, while calling
the handler on an Object only includes cmdsets on itself.
The cdmsets are merged in order generality, so that the Object's
cmdset is merged last (and will thus take precedence over
same-named and same-prio commands on Player and Session).
Note that this function returns a deferred!
"""
local_obj_cmdsets = [None]
@inlineCallbacks
def _get_channel_cmdsets(player, player_cmdset):
"Channel-cmdsets"
# Create cmdset for all player's available channels
channel_cmdset = None
if not player_cmdset.no_channels:
channel_cmdset = yield CHANNELHANDLER.get_cmdset(player)
returnValue(channel_cmdset)
@inlineCallbacks
def _get_local_obj_cmdsets(obj, obj_cmdset):
"Object-level cmdsets"
# Gather cmdsets from location, objects in location or carried
local_obj_cmdsets = [None]
try:
location = obj.location
except Exception:
location = None
if location and not obj_cmdset.no_objs:
# Gather all cmdsets stored on objects in the room and
# also in the caller's inventory and the location itself
local_objlist = yield (location.contents_get(exclude=obj.dbobj) +
obj.contents +
[location])
for lobj in local_objlist:
try:
# call hook in case we need to do dynamic changing to cmdset
_GA(lobj, "at_cmdset_get")()
except Exception:
logger.log_trace()
# the call-type lock is checked here, it makes sure a player
# is not seeing e.g. the commands on a fellow player (which is why
# the no_superuser_bypass must be True)
local_obj_cmdsets = \
yield [lobj.cmdset.current for lobj in local_objlist
if (lobj.cmdset.current and
lobj.locks.check(caller, 'call', no_superuser_bypass=True))]
for cset in local_obj_cmdsets:
#This is necessary for object sets, or we won't be able to
# separate the command sets from each other in a busy room.
cset.old_duplicates = cset.duplicates
cset.duplicates = True
returnValue(local_obj_cmdsets)
@inlineCallbacks
def _get_cmdset(obj):
"Get cmdset, triggering all hooks"
try:
yield obj.at_cmdset_get()
except Exception:
logger.log_trace()
try:
returnValue(obj.cmdset.current)
except AttributeError:
returnValue(None)
if callertype == "session":
# we are calling the command from the session level
report_to = session
session_cmdset = yield _get_cmdset(session)
cmdsets = [session_cmdset]
if player: # this automatically implies logged-in
player_cmdset = yield _get_cmdset(player)
channel_cmdset = yield _get_channel_cmdsets(player, player_cmdset)
cmdsets.extend([player_cmdset, channel_cmdset])
if obj:
obj_cmdset = yield _get_cmdset(obj)
local_obj_cmdsets = yield _get_local_obj_cmdsets(obj, obj_cmdset)
cmdsets.extend([obj_cmdset] + local_obj_cmdsets)
elif callertype == "player":
# we are calling the command from the player level
report_to = player
player_cmdset = yield _get_cmdset(player)
channel_cmdset = yield _get_channel_cmdsets(player, player_cmdset)
cmdsets = [player_cmdset, channel_cmdset]
if obj:
obj_cmdset = yield _get_cmdset(obj)
local_obj_cmdsets = yield _get_local_obj_cmdsets(obj, obj_cmdset)
cmdsets.extend([obj_cmdset] + local_obj_cmdsets)
elif callertype == "object":
# we are calling the command from the object level
report_to = obj
obj_cmdset = yield _get_cmdset(obj)
local_obj_cmdsets = yield _get_local_obj_cmdsets(obj, obj_cmdset)
cmdsets = [obj_cmdset] + local_obj_cmdsets
else:
raise Exception("get_and_merge_cmdsets: callertype %s is not valid." % callertype)
#cmdsets = yield [caller_cmdset] + [player_cmdset] +
# [channel_cmdset] + local_obj_cmdsets
# weed out all non-found sets
cmdsets = yield [cmdset for cmdset in cmdsets
if cmdset and cmdset.key != "_EMPTY_CMDSET"]
# report cmdset errors to user (these should already have been logged)
yield [report_to.msg(cmdset.errmessage) for cmdset in cmdsets
if cmdset.key == "_CMDSET_ERROR"]
if cmdsets:
# faster to do tuple on list than to build tuple directly
mergehash = tuple([id(cmdset) for cmdset in cmdsets])
if mergehash in _CMDSET_MERGE_CACHE:
# cached merge exist; use that
cmdset = _CMDSET_MERGE_CACHE[mergehash]
else:
# we group and merge all same-prio cmdsets separately (this avoids
# order-dependent clashes in certain cases, such as
# when duplicates=True)
tempmergers = {}
for cmdset in cmdsets:
prio = cmdset.priority
#print cmdset.key, prio
if prio in tempmergers:
# merge same-prio cmdset together separately
tempmergers[prio] = yield cmdset + tempmergers[prio]
else:
tempmergers[prio] = cmdset
# sort cmdsets after reverse priority (highest prio are merged in last)
cmdsets = yield sorted(tempmergers.values(), key=lambda x: x.priority)
# Merge all command sets into one, beginning with the lowest-prio one
cmdset = cmdsets[0]
for merging_cmdset in cmdsets[1:]:
#print "<%s(%s,%s)> onto <%s(%s,%s)>" % (merging_cmdset.key, merging_cmdset.priority, merging_cmdset.mergetype,
# cmdset.key, cmdset.priority, cmdset.mergetype)
cmdset = yield merging_cmdset + cmdset
# store the full sets for diagnosis
cmdset.merged_from = cmdsets
# cache
_CMDSET_MERGE_CACHE[mergehash] = cmdset
else:
cmdset = None
for cset in (cset for cset in local_obj_cmdsets if cset):
cset.duplicates = cset.old_duplicates
#print "merged set:", cmdset.key
returnValue(cmdset)
# Main command-handler function
@inlineCallbacks
def cmdhandler(called_by, raw_string, testing=False, callertype="session", sessid=None):
"""
This is the main function to handle any string sent to the engine.
called_by - object on which this was called from. This is either a Session, a Player or an Object.
raw_string - the command string given on the command line
testing - if we should actually execute the command or not.
if True, the command instance will be returned instead.
callertype - this is one of "session", "player" or "object", in decending
order. So when the Session is the caller, it will merge its
own cmdset into cmdsets from both Player and eventual puppeted
Object (and cmdsets in its room etc). A Player will only
include its own cmdset and the Objects and so on. Merge order
is the same order, so that Object cmdsets are merged in last,
giving them precendence for same-name and same-prio commands.
sessid - Relevant if callertype is "player" - the session id will help
retrieve the correct cmdsets from puppeted objects.
Note that this function returns a deferred!
"""
raw_string = to_unicode(raw_string, force_string=True)
session, player, obj = None, None, None
if callertype == "session":
session = called_by
player = session.player
if player:
obj = yield _GA(player.dbobj, "get_puppet")(session.sessid)
elif callertype == "player":
player = called_by
if sessid:
obj = yield _GA(player.dbobj, "get_puppet")(sessid)
elif callertype == "object":
obj = called_by
else:
raise RuntimeError("cmdhandler: callertype %s is not valid." % callertype)
# the caller will be the one to receive messages and excert its permissions.
# we assign the caller with preference 'bottom up'
caller = obj or player or session
try: # catch bugs in cmdhandler itself
try: # catch special-type commands
cmdset = yield get_and_merge_cmdsets(caller, session, player, obj,
callertype, sessid)
if not cmdset:
# this is bad and shouldn't happen.
raise NoCmdSets
unformatted_raw_string = raw_string
raw_string = raw_string.strip()
if not raw_string:
# Empty input. Test for system command instead.
syscmd = yield cmdset.get(CMD_NOINPUT)
sysarg = ""
raise ExecSystemCommand(syscmd, sysarg)
# Parse the input string and match to available cmdset.
# This also checks for permissions, so all commands in match
# are commands the caller is allowed to call.
matches = yield _COMMAND_PARSER(raw_string, cmdset, caller)
# Deal with matches
if len(matches) > 1:
# We have a multiple-match
syscmd = yield cmdset.get(CMD_MULTIMATCH)
sysarg = _("There were multiple matches.")
if syscmd:
# use custom CMD_MULTIMATCH
syscmd.matches = matches
else:
# fall back to default error handling
sysarg = yield at_multimatch_cmd(caller, matches)
raise ExecSystemCommand(syscmd, sysarg)
if len(matches) == 1:
# We have a unique command match. But it may still be invalid.
match = matches[0]
cmdname, args, cmd = match[0], match[1], match[2]
# check if we allow this type of command
if cmdset.no_channels and hasattr(cmd, "is_channel") and cmd.is_channel:
matches = []
if cmdset.no_exits and hasattr(cmd, "is_exit") and cmd.is_exit:
matches = []
if not matches:
# No commands match our entered command
syscmd = yield cmdset.get(CMD_NOMATCH)
if syscmd:
# use custom CMD_NOMATH command
sysarg = raw_string
else:
# fallback to default error text
sysarg = _("Command '%s' is not available.") % raw_string
suggestions = string_suggestions(raw_string,
cmdset.get_all_cmd_keys_and_aliases(caller),
cutoff=0.7, maxnum=3)
if suggestions:
pass # sysarg += _(" Maybe you meant %s?") % utils.list_to_string(suggestions, _('or'), addquote=True)
else:
sysarg += _(" Type \"help\" for help.")
raise ExecSystemCommand(syscmd, sysarg)
# Check if this is a Channel-cmd match.
if hasattr(cmd, 'is_channel') and cmd.is_channel:
# even if a user-defined syscmd is not defined, the
# found cmd is already a system command in its own right.
syscmd = yield cmdset.get(CMD_CHANNEL)
if syscmd:
# replace system command with custom version
cmd = syscmd
cmd.sessid = session.sessid if session else None
sysarg = "%s:%s" % (cmdname, args)
raise ExecSystemCommand(cmd, sysarg)
# A normal command.
# Assign useful variables to the instance
cmd.caller = caller
cmd.cmdstring = cmdname
cmd.args = args
cmd.cmdset = cmdset
cmd.sessid = session.sessid if session else sessid
cmd.session = session
cmd.player = player
cmd.raw_string = unformatted_raw_string
#cmd.obj # set via on-object cmdset handler for each command,
# since this may be different for every command when
# merging multuple cmdsets
if hasattr(cmd, 'obj') and hasattr(cmd.obj, 'scripts'):
# cmd.obj is automatically made available by the cmdhandler.
# we make sure to validate its scripts.
yield cmd.obj.scripts.validate()
if testing:
# only return the command instance
returnValue(cmd)
# pre-command hook
yield cmd.at_pre_cmd()
# Parse and execute
yield cmd.parse()
# (return value is normally None)
ret = yield cmd.func()
# post-command hook
yield cmd.at_post_cmd()
if cmd.save_for_next:
# store a reference to this command, possibly
# accessible by the next command.
caller.ndb.last_cmd = yield copy(cmd)
else:
caller.ndb.last_cmd = None
# cleanup
del cmd.caller
del cmd.player
del cmd.session
del cmd.cmdset
# Done! This returns a deferred. By default, Evennia does
# not use this at all.
returnValue(ret)
except ExecSystemCommand, exc:
# Not a normal command: run a system command, if available,
# or fall back to a return string.
syscmd = exc.syscmd
sysarg = exc.sysarg
if syscmd:
syscmd.caller = caller
syscmd.cmdstring = syscmd.key
syscmd.args = sysarg
syscmd.cmdset = cmdset
syscmd.sessid = session.sessid if session else None
syscmd.raw_string = unformatted_raw_string
if hasattr(syscmd, 'obj') and hasattr(syscmd.obj, 'scripts'):
# cmd.obj is automatically made available.
# we make sure to validate its scripts.
yield syscmd.obj.scripts.validate()
if testing:
# only return the command instance
returnValue(syscmd)
# parse and run the command
yield syscmd.parse()
yield syscmd.func()
elif sysarg:
# return system arg
caller.msg(exc.sysarg)
except NoCmdSets:
# Critical error.
string = "No command sets found! This is a sign of a critical bug.\n"
string += "The error was logged.\n"
string += "If logging out/in doesn't solve the problem, try to "
string += "contact the server admin through some other means "
string += "for assistance."
caller.msg(_(string))
logger.log_errmsg("No cmdsets found: %s" % caller)
except Exception:
# We should not end up here. If we do, it's a programming bug.
string = "%s\nAbove traceback is from an untrapped error."
string += " Please file a bug report."
logger.log_trace(_(string))
caller.msg(string % format_exc())
except Exception:
# This catches exceptions in cmdhandler exceptions themselves
string = "%s\nAbove traceback is from a Command handler bug."
string += " Please contact an admin and/or file a bug report."
logger.log_trace(_(string))
caller.msg(string % format_exc())
|
from django import forms
from allauth.account.forms import SignupForm
from django.utils.translation import gettext_lazy as _
class CustomSignupForm(SignupForm):
email = forms.EmailField(label=_("E-mail"),
required=True,
widget=forms.TextInput(
attrs={'type': 'email',
'placeholder': _('E-mail address')}))
nickname = forms.CharField(label=_('Nickname (optional)'),
required=False,
widget=forms.TextInput(
attrs={'type': 'text',
'placeholder': _('Nickname (optional)')}
))
def signup(self, request, user):
user.email = self.cleaned_data['email']
user.nickname = self.cleaned_data['nickname']
user.save()
return user
|
# Author : Ali Snedden
# Date : 5/15/19
# License: MIT
# Purpose:
#
# Notes :
#
# Questions:
#
# References :
#
import time
import sys
import os
import glob
import numpy as np
import subprocess
import shutil
import bisect
from error import exit_with_error
from functions import parse_run_time
from functions import get_core_ids
def print_help(Arg):
"""
ARGS:
arg : exit value
RETURN:
N/A
DESCRIPTION:
Print Help. Exit with value arg
DEBUG:
1. Tested, it worked
FUTURE:
"""
sys.stdout.write(
"USAGE : ./src/driver.py [options] /abs/path/to/workspace /abs/path/to/ref\n"
" [options] = 'all' : Builds data, runs all tests\n"
" = 'build_mat_mult_data' : Builds matrix_multiply data \n"
" = 'mat_mult_cache_opt' : Run matrix_multiply_cache_opt tests\n"
" = 'mat_mult_non_cache_opt' : Run matrix_multiply_cache_opt tests\n"
" = 'build_rnaseq_data' : Creates single end RNA-Seq data\n"
" = 'align_rnaseq_tophat' : Align samples in data/rnaseq w/ tophat\n"
" = 'align_rnaseq_hisat' : Align samples in data/rnaseq w/ hisat\n"
" = 'cufflinks_assemble' : Must have run tophat. Assembles transcriptome\n"
" = 'cuffmerge' : Must have run tophat and cufflinks\n"
" = 'cuffcompare' : Must have run tophat,cufflinks\n"
" = 'cuffquant' : Must have run tophat,cufflinks,cuffmerge\n"
" = 'cuffnorm' : Must have run tophat,cufflinks,"
" = 'cuffdiff' : Must have run tophat,cufflinks,"
"cuffmerge and cuffquant\n"
" = 'kelvin' : Runs kelvin (a statistical genetics software) \n"
" = 'local_memory_access' : grep's a large file in temp\n"
" /abs/path/to/workspace : Path where all the output/data gets saved.\n"
" /abs/path/to/ref : Path where bowtie/hisat indices and ref fasta/gtf"
" are stored\n\n"
" NOTES : \n"
" 1. Only one option can be passed at a time.\n"
" 2. It is assumed that all exectuables (i.e. tophat2, bowtie2, etc.)\n"
" are located in you shell PATH\n"
" 3. Location / Names of references _must_ be :\n"
" a) Bowtie2 indices: /refPath/Bowtie2Index/Homo_sapiens.GRC38 \n"
" b) Hisat2 indices : /refPath/HisatIndex/genome \n"
" c) Genome Fasta : /refPath/Homo_sapiens.GRCh38.dna.primary_assembly.fa:\n"
" d) GTF file : /refPath/Homo_sapiens.GRCh38.83.gtf\n"
" e) Short Chr1 Gtf : /refPath/chr1_short.gtf\n"
" f) Short Chr1 fasta : /refPath/chr1_short.fa\n"
)
sys.exit(Arg)
def main():
"""
ARGS:
RETURN:
DESCRIPTION:
NOTES:
DEBUG:
FUTURE:
"""
### Check Python version and CL args ###
if(sys.version_info[0] != 3):
exit_with_error("ERROR!!! Runs with python3, NOT python-{}\n\n".format(
sys.version_info[0]))
nArg = len(sys.argv)
if(nArg == 2 and (sys.argv[1][0:3] == "--h" or sys.argv[1][0:2] == "-h")):
print_help(0)
elif(nArg != 4):
print_help(1)
startTime = time.time()
print("Start Time : {}".format(time.strftime("%a, %d %b %Y %H:%M:%S ",
time.localtime())))
print("Logging run output to driver.log\n\n")
### Variables ###
options = sys.argv[1]
workPath = sys.argv[2] # Path where all the output/work will be saved.
refPath = sys.argv[3] # Path where all the ref data and indices are located
ompNumThreadsL = [1,2,5,20] # Cores used in OMP tasks
matrixSizeL = [5000] # outer dim of mats to run matrix_multiply on
#matrixSizeL = [2000,3000,5000] # outer dim of mats to run matrix_multiply on
#rnaSeqSizeL = [10**4,10**5]
rnaSeqSizeL = [10**5]
nTrials = 3 # number of trials to test,get stdev and mean
shortNTrials= 1 # shortened num of trials to test,get stdev and mean
# Create work path dir if doesn't exist
if(not os.path.isdir(workPath)):
os.mkdir(workPath)
## In Linux singularity container add cores per socket and total cores
## to ompNumThreadsL
if(shutil.which('lscpu') != None):
# Record raw lscpu, lscpu -e and numactl --hardware
lscpuLog=open("{}/lscpu.log".format(workPath), "a")
cmd="lscpu"
lscpuLog.write("\n{}:\n{}\n".format(cmd,subprocess.getoutput(cmd)))
cmd="lscpu -e"
lscpuLog.write("\n{}:\n{}\n".format(cmd,subprocess.getoutput(cmd)))
cmd="numactl --hardware"
lscpuLog.write("\n{}:\n{}\n".format(cmd,subprocess.getoutput(cmd)))
lscpuLog.close()
# other details
cmd="lscpu | grep 'Core(s) per socket:' | awk '{print $4}'"
coresPerSocket = int(subprocess.getoutput(cmd))
cmd="lscpu | grep '^CPU(s):' | awk '{print $2}'"
totalCores = int(subprocess.getoutput(cmd))
cmd="lscpu | grep 'NUMA node0 CPU' | awk '{print $4}'"
## Numa - node
coresPerNuma = subprocess.getoutput(cmd)
if('-' in coresPerNuma):
coresPerNuma = coresPerNuma.split('-')
coresPerNuma[0] = int(coresPerNuma[0])
coresPerNuma[1] = int(coresPerNuma[1])
coresPerNuma = coresPerNuma[1] - coresPerNuma[0] + 1
elif(',' in coresPerNuma): # Interleave off
coresPerNuma = len(coresPerNuma.split(','))
else:
exit_with_error("ERROR!!! Format for coresPerNuma is not handled"
": {}".format(coresPerNuma))
## Insert
bisect.insort_left(ompNumThreadsL, coresPerNuma)
bisect.insort_left(ompNumThreadsL, coresPerSocket)
bisect.insort_left(ompNumThreadsL, totalCores)
ompNumThreadsL=list(sorted(set(ompNumThreadsL)))
print("Cores per NUMA : {}".format(coresPerNuma))
print("Cores per socket : {}".format(coresPerSocket))
print("Total Cores : {}".format(totalCores))
print("Cores tested : {}".format(ompNumThreadsL))
# Get operating system and list of cores (linux only) to take advantage of NUMA
curOS = sys.platform
if(curOS == 'darwin'):
curOS = 'osx' # Rename for my own selfish readability
elif(curOS == 'linux'):
cmd = "grep -P 'processor[\t ]' /proc/cpuinfo | cut -d: -f2 | tr -d ' '"
coreIDL = subprocess.getoutput(cmd)
coreIDL = [int(idx) for idx in coreIDL.split()]
ompCoresIdD = dict() # List of list cores to use associated with ompNumThreadsL
for nThread in ompNumThreadsL:
ompCoresIdD[nThread] = get_core_ids(NumThreads = nThread)
else:
exit_with_error("ERROR!! {} is an unsupported operating system".format(curOS))
if(options != 'all' and options != 'build_mat_mult_data' and
options != 'mat_mult_non_cache_opt' and options != 'local_memory_access' and
options != 'mat_mult_cache_opt' and options != 'build_rnaseq_data' and
options != 'align_rnaseq_tophat' and options != 'align_rnaseq_hisat' and
options != 'cufflinks_assemble' and options != 'cuffmerge' and
options != 'cuffcompare' and options != 'cuffquant' and
options != 'cuffnorm' and options != 'cuffdiff' and options != 'kelvin'
):
exit_with_error("ERROR!!! {} is invalid option\n".format(options))
######## Run Tests ########
if(options == 'all' or options == 'build_mat_mult_data'):
nThread = 1
print("Building data for matrix_multiply (time to run is for numpy's matrix mult.: ")
print("--------------------------------------------------------")
print(" {:<10} | {:<12} | {:<15} | {:<15}".format("Size", "OMP_Threads", "mean",
"stdev"))
print("--------------------------------------------------------")
### Create directory structure in data
outDirPrefix = "{}/data/matrix".format(workPath)
if(not os.path.isdir(outDirPrefix)):
os.mkdir(outDirPrefix)
for size in matrixSizeL:
outDir = "{}/{}".format(outDirPrefix,size)
if(not os.path.isdir(outDir)):
os.mkdir(outDir)
runTimeV = np.zeros([shortNTrials])
for tIdx in range(shortNTrials): ### change to shortNTrials
if(curOS == 'linux'):
taskset = "taskset -c {} ".format(ompCoresIdD[nThread])
else:
taskset = ""
cmd = ("{} python3 src/matrix/matrix_generator.py {} 10000 "
"10000 {} {}".format(taskset, size, size, outDir))
output = "{}\n".format(cmd)
output = output + subprocess.getoutput(cmd)
runTime = parse_run_time(output,workPath) # Run time
runTimeV[tIdx]= runTime
print(" {:<10} | {:<12} | {:<15.4f} | {:<15.4f}".format(size, nThread,
np.mean(runTimeV), np.std(runTimeV)))
print("--------------------------------------------------------")
if(options == 'all' or options == 'mat_mult_cache_opt'):
print("matrix_multiply (cache optimized using OpenMP) : ")
print("--------------------------------------------------------")
print(" {:<10} | {:<12} | {:<15} | {:<15}".format("Size", "OMP_Threads", "mean",
"stdev"))
print("--------------------------------------------------------")
### Create directory structure in output
outDirPrefix = "{}/output/matrix_cache_opt".format(workPath)
if(not os.path.isdir(outDirPrefix)):
os.mkdir(outDirPrefix)
for size in matrixSizeL:
outDir = "{}/{}".format(outDirPrefix,size)
if(not os.path.isdir(outDir)):
os.mkdir(outDir)
for nThread in ompNumThreadsL:
runTimeV = np.zeros([nTrials])
#nThread = 10
#size=2000
for tIdx in range(nTrials):
if(curOS == 'linux'):
taskset = "taskset -c {} ".format(ompCoresIdD[nThread])
else:
taskset = ""
cmd = ("export OMP_NUM_THREADS={}; {} "
"./src/matrix/matrix_multiply_cache_opt "
"{}/data/matrix/{}/A.txt {}/data/matrix/{}/B.txt "
"{}".format(nThread,taskset,workPath,size,workPath,size,
outDir))
output = "{}\n".format(cmd)
output = output + subprocess.getoutput(cmd)
runTime = parse_run_time(output,workPath) # Run time
runTimeV[tIdx]= runTime
print(" {:<10} | {:<12} | {:<15.4f} | {:<15.4f}".format(size, nThread,
np.mean(runTimeV), np.std(runTimeV)))
print("--------------------------------------------------------")
if(options == 'all' or options == 'mat_mult_non_cache_opt'):
print("matrix_multiply (non-cache optimized using OpenMP) : ")
print("--------------------------------------------------------")
print(" {:<10} | {:<12} | {:<15} | {:<15}".format("Size", "OMP_Threads", "mean",
"stdev"))
print("--------------------------------------------------------")
### Create directory structure in output
outDirPrefix = "{}/output/matrix_non_cache_opt".format(workPath)
if(not os.path.isdir(outDirPrefix)):
os.mkdir(outDirPrefix)
for size in matrixSizeL:
outDir = "{}/{}".format(outDirPrefix,size)
if(not os.path.isdir(outDir)):
os.mkdir(outDir)
for nThread in ompNumThreadsL:
runTimeV = np.zeros([nTrials])
#nThread = 10
#size=2000
for tIdx in range(nTrials):
if(curOS == 'linux'):
taskset = "taskset -c {} ".format(ompCoresIdD[nThread])
else:
taskset = ""
cmd = ("export OMP_NUM_THREADS={}; {} "
"./src/matrix/matrix_multiply_non_cache_opt "
"{}/data/matrix/{}/A.txt {}/data/matrix/{}/B.txt "
"{}".format(nThread,taskset,workPath,size,workPath,
size,outDir))
output = "{}\n".format(cmd)
output = output + subprocess.getoutput(cmd)
runTime = parse_run_time(output,workPath) # Run time
runTimeV[tIdx]= runTime
print(" {:<10} | {:<12} | {:<15.4f} | {:<15.4f}".format(size, nThread,
np.mean(runTimeV), np.std(runTimeV)))
print("--------------------------------------------------------")
if(options == 'all' or options == 'build_rnaseq_data'):
print("Building RNA-Seq Data sets : ")
print("--------------------------------------------------------")
print(" {:<10} | {:<12} | {:<15} | {:<15}".format("Size", "OMP_Threads", "mean",
"stdev"))
print("--------------------------------------------------------")
nThread = 1
nSamp = 3
treatSampL = []
wtSampL = []
gtf="{}/chr1_short.gtf".format(refPath)
genome ="{}/chr1_short.fa".format(refPath)
configL=["config/config_wt_chr1.txt", "config/config_treat_chr1.txt"]
# Create output directory structure
outDir = "{}/data/rnaseq".format(workPath)
if(not os.path.isdir(outDir)):
os.mkdir(outDir)
outDir = "{}/fastq/".format(outDir)
if(not os.path.isdir(outDir)):
os.mkdir(outDir)
## Loop
for size in rnaSeqSizeL:
runTimeV = np.zeros([nSamp*len(configL)])
tIdx = 0
for config in configL:
for samp in range(nSamp):
## Set output files
if("treat" in config):
if(not os.path.isdir("{}/{}".format(outDir,size))):
os.mkdir("{}/{}".format(outDir,size))
outFile = "{}/{}/treat_{}".format(outDir,size,samp)
treatSampL.append(outFile)
elif("wt" in config):
if(not os.path.isdir("{}/{}".format(outDir,size))):
os.mkdir("{}/{}".format(outDir,size))
outFile = "{}/{}/wt_{}".format(outDir,size,samp)
wtSampL.append(outFile)
else:
exit_with_error("ERROR!!! No correct config file found!\n")
if(curOS == 'linux'):
taskset = "taskset -c {} ".format(ompCoresIdD[nThread])
else:
taskset = ""
cmd = ("export OMP_NUM_THREADS={}; "
"{} python3 src/simulate_fastq_data/simulate_fastq.py "
"{} {} {} {} {} single"
"".format(nThread, taskset, gtf, genome, config, size, outFile))
output = "{}\n".format(cmd)
output = output + subprocess.getoutput(cmd)
runTime = parse_run_time(output,workPath) # Run time
runTimeV[tIdx]= runTime
tIdx = tIdx + 1
print(" {:<10} | {:<12} | {:<15.4f} | {:<15.4f}".format(size, nThread,
np.mean(runTimeV), np.std(runTimeV)))
print("--------------------------------------------------------")
if(options == 'all' or options == 'align_rnaseq_tophat'):
print("Aligning RNA-Seq Data sets with tophat : ")
print("--------------------------------------------------------")
print(" {:<10} | {:<12} | {:<15} | {:<15}".format("Size", "OMP_Threads", "mean",
"stdev"))
print("--------------------------------------------------------")
outDirPref = "{}/output/rnaseq".format(workPath)
if(not os.path.isdir(outDirPref)):
os.mkdir(outDirPref)
outDirPref = os.path.abspath("{}/output/rnaseq/tophat".format(workPath))
if(not os.path.isdir(outDirPref)):
os.mkdir(outDirPref)
inDirPref = os.path.abspath("{}/data/rnaseq/fastq".format(workPath))
if(not os.path.isdir(inDirPref)):
exit_with_error("ERROR!!! fastq data does not exits. Run build_rnaseq_data option")
bowtieIdxPath = "{}/Bowtie2Index/Homo_sapiens.GRC38".format(refPath)
## Loop
for size in rnaSeqSizeL:
sampFileL = glob.glob("{}/{}/*.fq".format(inDirPref,size))
if(not os.path.isdir("{}/{}".format(outDirPref,size))):
os.mkdir("{}/{}".format(outDirPref,size))
for nThread in [1]: # Tophat is poorly parallelizable
runTimeV = np.zeros([len(sampFileL)])
tIdx = 0
for samp in sampFileL:
sampDir = samp.split("/")[-1].split(".")[0]
## Set output directory
outDir = "{}/{}/{}".format(outDirPref,size,sampDir)
if(curOS == "osx"):
# My OSX configuration b/c I use virtualenv
python2="source ~/.local/virtualenvs/python2.7/bin/activate;"
cmd = (
"{}; time {} tophat2 -p {} -o {} {} {}"
"".format(python2,taskset, nThread, outDir,
bowtieIdxPath, samp))
elif(curOS == 'linux'):
# # On CentOS, default python is 2.6.6
# python2="/usr/bin/python"
taskset = "taskset -c {} ".format(ompCoresIdD[nThread])
cmd = (
"time {} tophat2 -p {} -o {} {} {}"
"".format(taskset, nThread, outDir,
bowtieIdxPath, samp))
else:
exit_with_error("ERROR!!! OS not supported")
output = "{}\n".format(cmd)
output = output + subprocess.getoutput(cmd)
runTime = parse_run_time(output,workPath) # Run time
runTimeV[tIdx]= runTime
tIdx = tIdx + 1
print(" {:<10} | {:<12} | {:<15.4f} | {:<15.4f}".format(size, nThread,
np.mean(runTimeV), np.std(runTimeV)))
print("--------------------------------------------------------")
if(options == 'all' or options == 'align_rnaseq_hisat'):
print("Aligning RNA-Seq Data sets with hisat : ")
print("--------------------------------------------------------")
print(" {:<10} | {:<12} | {:<15} | {:<15}".format("Size", "OMP_Threads", "mean",
"stdev"))
print("--------------------------------------------------------")
# Get directory structure
outDirPref = "{}/output/rnaseq".format(workPath)
if(not os.path.isdir(outDirPref)):
os.mkdir(outDirPref)
outDirPref = os.path.abspath("{}/output/rnaseq/hisat".format(workPath)) ## prefix
if(not os.path.isdir(outDirPref)):
os.mkdir(outDirPref)
inDirPref = os.path.abspath("{}/data/rnaseq/fastq".format(workPath)) ## prefix
if(not os.path.isdir(inDirPref)):
exit_with_error("ERROR!!! fastq data does not exits. Run build_rnaseq_data option")
hisatIdxPath = "{}/HisatIndex/genome".format(refPath)
## Loop
for size in rnaSeqSizeL:
sampFileL = glob.glob("{}/{}/*.fq".format(inDirPref,size))
if(not os.path.isdir("{}/{}".format(outDirPref,size))):
os.mkdir("{}/{}".format(outDirPref,size))
for nThread in ompNumThreadsL:
runTimeV = np.zeros([len(sampFileL)])
tIdx = 0
for samp in sampFileL:
sampDir = samp.split("/")[-1].split(".")[0]
## Set output directory
outDir = "{}/{}/{}".format(outDirPref,size,sampDir)
if(curOS == 'linux'):
taskset = "taskset -c {} ".format(ompCoresIdD[nThread])
else:
taskset = ""
if(not os.path.isdir(outDir)):
os.mkdir(outDir)
cmd = (
"time {} hisat2 -p {} --phred33 -x {} -U {} -S {}/output.sam"
"".format(taskset, nThread, hisatIdxPath, samp, outDir))
output = "{}\n".format(cmd)
output = output + subprocess.getoutput(cmd)
runTime = parse_run_time(output,workPath) # Run time
runTimeV[tIdx]= runTime
tIdx = tIdx + 1
print(" {:<10} | {:<12} | {:<15.4f} | {:<15.4f}".format(size, nThread,
np.mean(runTimeV), np.std(runTimeV)))
print("--------------------------------------------------------")
if(options == 'all' or options == 'cufflinks_assemble'):
print("Assembling transcriptome using cufflinks: ")
print("--------------------------------------------------------")
print(" {:<10} | {:<12} | {:<15} | {:<15}".format("Size", "OMP_Threads", "mean",
"stdev"))
print("--------------------------------------------------------")
outDirPref = os.path.abspath("{}/output/rnaseq/".format(workPath)) ## prefix
if(not os.path.isdir(outDirPref)):
os.mkdir(outDirPref)
outDirPref = os.path.abspath("{}/output/rnaseq/cufflinks".format(workPath)) ## prefix
if(not os.path.isdir(outDirPref)):
os.mkdir(outDirPref)
inDirPref = os.path.abspath("{}/output/rnaseq/tophat".format(workPath)) ## prefix
gtf="{}/Homo_sapiens.GRCh38.83.gtf".format(refPath)
## Loop
for size in rnaSeqSizeL:
sampFileL = glob.glob("{}/{}/*/accepted_hits.bam".format(inDirPref,size))
if(not os.path.isdir("{}/{}".format(outDirPref,size))):
os.mkdir("{}/{}".format(outDirPref,size))
for nThread in ompNumThreadsL:
runTimeV = np.zeros([len(sampFileL)])
tIdx = 0
for samp in sampFileL:
sampDir = samp.split("/")[-2].split(".")[0]
## Set output directory
outDir = "{}/{}/{}".format(outDirPref,size,sampDir)
if(not os.path.isdir(outDir)):
os.mkdir(outDir)
if(curOS == 'linux'):
taskset = "taskset -c {} ".format(ompCoresIdD[nThread])
else:
taskset = ""
cmd = (
"time {} cufflinks --num-threads {} -g {} --output-dir {} {}"
"".format(taskset, nThread, gtf, outDir, samp))
output = "{}\n".format(cmd)
output = output + subprocess.getoutput(cmd)
runTime = parse_run_time(output,workPath) # Run time
runTimeV[tIdx]= runTime
tIdx = tIdx + 1
print(" {:<10} | {:<12} | {:<15.4f} | {:<15.4f}".format(size, nThread,
np.mean(runTimeV), np.std(runTimeV)))
print("--------------------------------------------------------")
if(options == 'all' or options == 'cuffmerge'):
print("Merging assembled transcriptomes using cuffmerge")
print("--------------------------------------------------------")
print(" {:<10} | {:<12} | {:<15} | {:<15}".format("Size", "OMP_Threads", "mean",
"stdev"))
print("--------------------------------------------------------")
outDirPref = os.path.abspath("{}/output/rnaseq/".format(workPath)) ## prefix
if(not os.path.isdir(outDirPref)):
os.mkdir(outDirPref)
outDirPref = os.path.abspath("{}/output/rnaseq/cuffmerge".format(workPath)) ## prefix
if(not os.path.isdir(outDirPref)):
os.mkdir(outDirPref)
inDirPref = os.path.abspath("{}/output/rnaseq/cufflinks".format(workPath)) ## prefix
gtf="{}/Homo_sapiens.GRCh38.83.gtf".format(refPath)
genome="{}/Homo_sapiens.GRCh38.dna.primary_assembly.fa".format(refPath)
curDir = os.path.dirname(os.path.realpath(__file__))
## Loop
for size in rnaSeqSizeL:
sampFileL = glob.glob("{}/{}/*/transcripts.gtf".format(inDirPref,size))
outDir = "{}/{}".format(outDirPref,size)
if(not os.path.isdir(outDir)):
os.mkdir(outDir)
assemblyPath = "{}/assemblies.txt".format(outDir)
if(not os.path.isfile(assemblyPath)):
assemblyFile = open(assemblyPath, "w+")
for samp in sampFileL:
assemblyFile.write("{}\n".format(samp))
assemblyFile.close()
for nThread in ompNumThreadsL:
## Consider adding nTrials here.
runTimeV = np.zeros([1])
tIdx = 0
if(curOS == "osx"):
# My OSX configuration b/c I use virtualenv
python2="source ~/.local/virtualenvs/python2.7/bin/activate;"
cmd = (
"{};"
"time cuffmerge --num-threads {} -o {} "
"--ref-gtf {} --ref-sequence {} {}"
"".format(python2,nThread, outDir, gtf, genome,
assemblyPath))
elif(curOS == "linux"):
# On CentOS, default python is 2.6.6
# python2="/usr/bin/python"
taskset = "taskset -c {} ".format(ompCoresIdD[nThread])
cmd = (
"pwd; cd /tmp/; alias python='/usr/bin/python';"
"time {} cuffmerge --num-threads {} -o {} "
"--ref-gtf {} --ref-sequence {} {}; cd {}/../"
"".format(taskset, nThread, outDir, gtf, genome,
assemblyPath, curDir))
else:
exit_with_error("ERROR!!! Unsupported OS.")
output = "{}\n".format(cmd)
output = output + subprocess.getoutput(cmd)
runTime = parse_run_time(output,workPath) # Run time
runTimeV[tIdx]= runTime
tIdx = tIdx + 1
print(" {:<10} | {:<12} | {:<15.4f} | {:<15.4f}".format(size, nThread,
np.mean(runTimeV), np.std(runTimeV)))
print("--------------------------------------------------------")
if(options == 'all' or options == 'cuffcompare'):
print("Comparing cufflinks gtf using cuffcompare")
print("--------------------------------------------------------")
print(" {:<10} | {:<12} | {:<15} | {:<15}".format("Size", "OMP_Threads", "mean",
"stdev"))
print("--------------------------------------------------------")
# Check and make directory structure
outDirPref = os.path.abspath("{}/output/rnaseq/".format(workPath)) ## prefix
if(not os.path.isdir(outDirPref)):
exit_with_error("ERROR!!! Expecting {}/output/rnaseq. Must have run tophat "
"and cufflinks prior\n".format(workPath))
outDirPref = os.path.abspath("{}/output/rnaseq/cuffcompare".format(workPath))
if(not os.path.isdir(outDirPref)):
os.mkdir(outDirPref)
inDirPref = os.path.abspath("{}/output/rnaseq/cufflinks".format(workPath)) ## prefix
gtf="{}/Homo_sapiens.GRCh38.83.gtf".format(refPath)
genome="{}/Homo_sapiens.GRCh38.dna.primary_assembly.fa".format(refPath)
nThread = 1
## Loop
for size in rnaSeqSizeL:
sampFileL = glob.glob("{}/{}/*/transcripts.gtf".format(inDirPref,size))
outPref = "{}/{}".format(outDirPref,size)
## Consider adding nTrials here.
runTimeV = np.zeros([1])
tIdx = 0
if(curOS == 'linux'):
taskset = "taskset -c {} ".format(ompCoresIdD[nThread])
else:
taskset = ""
cmd = (
"time {} cuffcompare -o {} -r {} -R -C -V {}"
"".format(taskset,outPref, gtf, " ".join(sampFileL)))
output = "{}\n".format(cmd)
output = output + subprocess.getoutput(cmd)
runTime = parse_run_time(output,workPath) # Run time
runTimeV[tIdx]= runTime
tIdx = tIdx + 1
print(" {:<10} | {:<12} | {:<15.4f} | {:<15.4f}".format(size, nThread,
np.mean(runTimeV), np.std(runTimeV)))
print("--------------------------------------------------------")
if(options == 'all' or options == 'cuffquant'):
print("Quantifying gene expression using cuffquant")
print("--------------------------------------------------------")
print(" {:<10} | {:<12} | {:<15} | {:<15}".format("Size", "OMP_Threads", "mean",
"stdev"))
print("--------------------------------------------------------")
outDirPref = os.path.abspath("{}/output/rnaseq/".format(workPath)) ## prefix
if(not os.path.isdir(outDirPref)):
os.mkdir(outDirPref)
outDirPref = os.path.abspath("{}/output/rnaseq/cuffquant".format(workPath)) ## prefix
if(not os.path.isdir(outDirPref)):
os.mkdir(outDirPref)
inGtfDirPref = os.path.abspath("{}/output/rnaseq/cuffmerge".format(workPath)) ## prefix
inBamDirPref = os.path.abspath("{}/output/rnaseq/tophat".format(workPath)) ## prefix
## Loop
for size in rnaSeqSizeL:
bamFileL = glob.glob("{}/{}/*/accepted_hits.bam".format(inBamDirPref,size))
outDir = "{}/{}".format(outDirPref,size)
gtf="{}/{}/merged.gtf".format(inGtfDirPref,size)
if(not os.path.isdir(outDir)):
os.mkdir(outDir)
for nThread in ompNumThreadsL:
## Consider adding nTrials here.
runTimeV = np.zeros([len(bamFileL)])
tIdx = 0
for bamFile in bamFileL:
outDirSamp = "{}/{}".format(outDir,bamFile.split("/")[-2].split(".")[0])
if(not os.path.isdir(outDirSamp)):
os.mkdir(outDirSamp)
if(curOS == 'linux'):
taskset = "taskset -c {} ".format(ompCoresIdD[nThread])
else:
taskset = ""
cmd = (
"time {} cuffquant --num-threads {} --output-dir {} "
"{} {}"
"".format(taskset, nThread, outDirSamp, gtf, bamFile))
output = "{}\n".format(cmd)
output = output + subprocess.getoutput(cmd)
runTime = parse_run_time(output,workPath) # Run time
runTimeV[tIdx]= runTime
tIdx = tIdx + 1
print(" {:<10} | {:<12} | {:<15.4f} | {:<15.4f}".format(size, nThread,
np.mean(runTimeV), np.std(runTimeV)))
print("--------------------------------------------------------")
if(options == 'all' or options == 'cuffnorm'):
print("Quantifying gene expression using cuffnorm")
print("--------------------------------------------------------")
print(" {:<10} | {:<12} | {:<15} | {:<15}".format("Size", "OMP_Threads", "mean",
"stdev"))
print("--------------------------------------------------------")
outDirPref = os.path.abspath("{}/output/rnaseq/".format(workPath)) ## prefix
if(not os.path.isdir(outDirPref)):
os.mkdir(outDirPref)
outDirPref = os.path.abspath("{}/output/rnaseq/cuffnorm".format(workPath)) ## prefix
if(not os.path.isdir(outDirPref)):
os.mkdir(outDirPref)
inGtfDirPref = os.path.abspath("{}/output/rnaseq/cuffmerge".format(workPath)) ## prefix
inCxbDirPref = os.path.abspath("{}/output/rnaseq/cuffquant".format(workPath)) ## prefix
## Loop
for size in rnaSeqSizeL:
cxbFileL = glob.glob("{}/{}/*/abundances.cxb".format(inCxbDirPref,size))
cxbFileL = sorted(cxbFileL) ## Break up into replicates
# Get treat and wt groups
sampNameL = [name.split('/')[-2] for name in cxbFileL]
treatIdxL = ['treat_' in name for name in sampNameL]
wtIdxL = ['wt_' in name for name in sampNameL]
treatCxbL = []
wtCxbL = []
for idx in range(len(treatIdxL)):
if(treatIdxL[idx] == True):
treatCxbL.append(cxbFileL[idx])
elif(wtIdxL[idx] == True):
wtCxbL.append(cxbFileL[idx])
else:
exit_with_error("ERROR!!! neither treatIdxL[idx] {} nor wtIdxL[idx] "
"{} are" "True".format(treatIdxL[idx], wtIdxL[idx]))
outDir = "{}/{}".format(outDirPref,size)
gtf="{}/{}/merged.gtf".format(inGtfDirPref,size)
if(not os.path.isdir(outDir)):
os.mkdir(outDir)
for nThread in ompNumThreadsL:
## Consider adding nTrials here.
runTimeV = np.zeros([1])
tIdx = 0
if(curOS == 'linux'):
taskset = "taskset -c {} ".format(ompCoresIdD[nThread])
else:
taskset = ""
cmd = (
"time {} cuffnorm --num-threads {} --output-dir {} -L {} "
" {} {} {}"
"".format(taskset, nThread, outDir, "treat,wt", gtf,
",".join(treatCxbL), ",".join(wtCxbL)))
#print(cmd)
output = "{}\n".format(cmd)
output = output + subprocess.getoutput(cmd)
runTime = parse_run_time(output,workPath) # Run time
runTimeV[tIdx]= runTime
tIdx = tIdx + 1
print(" {:<10} | {:<12} | {:<15.4f} | {:<15.4f}".format(size, nThread,
np.mean(runTimeV), np.std(runTimeV)))
print("--------------------------------------------------------")
if(options == 'all' or options == 'cuffdiff'):
print("Quantifying gene expression using cuffdiff")
print("--------------------------------------------------------")
print(" {:<10} | {:<12} | {:<15} | {:<15}".format("Size", "OMP_Threads", "mean",
"stdev"))
print("--------------------------------------------------------")
outDirPref = os.path.abspath("{}/output/rnaseq/".format(workPath)) ## prefix
if(not os.path.isdir(outDirPref)):
os.mkdir(outDirPref)
outDirPref = os.path.abspath("{}/output/rnaseq/cuffdiff".format(workPath)) ## prefix
if(not os.path.isdir(outDirPref)):
os.mkdir(outDirPref)
inGtfDirPref = os.path.abspath("{}/output/rnaseq/cuffmerge".format(workPath)) ## prefix
inCxbDirPref = os.path.abspath("{}/output/rnaseq/cuffquant".format(workPath)) ## prefix
## Loop
for size in rnaSeqSizeL:
cxbFileL = glob.glob("{}/{}/*/abundances.cxb".format(inCxbDirPref,size))
cxbFileL = sorted(cxbFileL) ## Break up into replicates
# Get treat and wt groups
sampNameL = [name.split('/')[-2] for name in cxbFileL]
treatIdxL = ['treat_' in name for name in sampNameL]
wtIdxL = ['wt_' in name for name in sampNameL]
treatCxbL = []
wtCxbL = []
for idx in range(len(treatIdxL)):
if(treatIdxL[idx] == True):
treatCxbL.append(cxbFileL[idx])
elif(wtIdxL[idx] == True):
wtCxbL.append(cxbFileL[idx])
else:
exit_with_error("ERROR!!! neither treatIdxL[idx] {} nor wtIdxL[idx] "
"{} are" "True".format(treatIdxL[idx], wtIdxL[idx]))
outDir = "{}/{}".format(outDirPref,size)
gtf="{}/{}/merged.gtf".format(inGtfDirPref,size)
if(not os.path.isdir(outDir)):
os.mkdir(outDir)
# Cuffdiff is too time intensive to go over all threads
for nThread in [ompNumThreadsL[0]]: # Cheap hack iter over only nthread=1.
## Consider adding nTrials here.
runTimeV = np.zeros([1])
tIdx = 0
if(curOS == 'linux'):
taskset = "taskset -c {} ".format(ompCoresIdD[nThread])
else:
taskset = ""
cmd = (
"time {} cuffdiff --num-threads {} --output-dir {} -L {} "
" {} {} {}"
"".format(taskset, nThread, outDir, "treat,wt", gtf,
",".join(treatCxbL), ",".join(wtCxbL)))
#print(cmd)
output = "{}\n".format(cmd)
output = output + subprocess.getoutput(cmd)
runTime = parse_run_time(output,workPath) # Run time
runTimeV[tIdx]= runTime
tIdx = tIdx + 1
print(" {:<10} | {:<12} | {:<15.4f} | {:<15.4f}".format(size, nThread,
np.mean(runTimeV), np.std(runTimeV)))
print("--------------------------------------------------------")
# Note :
# 1. This will only run on Linux, not OSX
# 2. Per John, it is near pointless to run multiple threads here.
# Just run it via his run_kelvin.sh, and leave my machinery out of it
# 3. His script computes only the mean, but I'll shoe horn it into my
# reporting scheme
if(options == 'all' or options == 'kelvin'):
print("Runnning Kelving...")
print("--------------------------------------------------------")
print(" {:<10} | {:<12} | {:<15} | {:<15}".format("Short", "OMP_Threads", "mean",
"stdev"))
print("--------------------------------------------------------")
# Create output directory structure
outDirPref = "{}/output".format(workPath)
if(not os.path.isdir(outDirPref)):
os.mkdir(outDirPref)
outDirPref = "{}/kelvin".format(outDirPref)
curDir = os.path.dirname(os.path.realpath(__file__))
if(not os.path.isdir(outDirPref)):
os.mkdir(outDirPref)
nThread = 1
runTimeV = np.zeros([1])
## Loop
outDir = "{}".format(outDirPref)
if(not os.path.isdir(outDir)):
os.mkdir(outDir)
cmd = ("export LD_LIBRARY_PATH={}/kelvin/:$LD_LIBRARY_PATH;"
"export PATH={}/kelvin/:$PATH;"
"bash {}/kelvin/run_kelvin.sh {} {}/kelvin" # arg1 =outputdir, arg2=/path/to/kelvin.conf
"".format(curDir, curDir, curDir, outDir, curDir))
output = "{}\n".format(cmd)
output = output + subprocess.getoutput(cmd)
runTime = parse_run_time(output,workPath) # Run time
runTimeV[0]= runTime
print(" {:<10} | {:<12} | {:<15.4f} | {:<15.4f}".format("Short", nThread,
np.mean(runTimeV), np.std(runTimeV)))
print("--------------------------------------------------------")
print("Run Time for {} option : {:.4f} h\n\n".format(options,(time.time() - startTime)/3600.0))
sys.exit(0)
if __name__ == "__main__":
main()
|
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn import functional as F
class relative_depth_crit(nn.Module):
def __loss_func_arr(self, z_A, z_B, ground_truth):
mask = torch.abs(ground_truth)
z_A = z_A[0]
a_B = z_B[0]
return mask * torch.log(1 + torch.exp(-ground_truth * (z_A - z_B))) + (1 - mask) * (z_A - z_B) * (z_A - z_B)
def __init__(self):
super(relative_depth_crit, self).__init__()
def forward(self, input, target):
self.input = input
self.target = target
self.output = Variable(torch.Tensor([0])).cuda()
n_point_total = 0
cpu_input = input
for batch_idx in range(0, cpu_input.size()[0]):
n_point_total += target[batch_idx]['n_point']
x_A_arr = target[batch_idx]['x_A']
y_A_arr = target[batch_idx]['y_A']
x_B_arr = target[batch_idx]['x_B']
y_B_arr = target[batch_idx]['y_B']
batch_input = cpu_input[batch_idx, 0]
z_A_arr = batch_input.index_select(1, x_A_arr.long()).gather(0, y_A_arr.view(1, -1).long())
z_B_arr = batch_input.index_select(1, x_B_arr.long()).gather(0, y_B_arr.view(1, -1).long())
ground_truth_arr = target[batch_idx]['ordianl_relation']
self.output += torch.sum(self.__loss_func_arr(z_A_arr, z_B_arr, ground_truth_arr))
return self.output / n_point_total
def _grad_loss_func(self, z_A, z_B, ground_truth):
mask = torch.abs(ground_truth)
z_A_z_B = z_A - z_B
d = z_A_z_B * z_A_z_B
grad_A1 = z_A_z_B * 2
grad_B1 = - grad_A1
denom = torch.exp(z_A_z_B * ground_truth) + 1
grad_A2 = -ground_truth / denom
grad_B2 = ground_truth / denom
grad_A = mask * grad_A2 + (1 - mask) * grad_A1
grad_B = mask * grad_B2 + (1 - mask) * grad_B1
return grad_A, grad_B
if __name__ == '__main__':
# testing
crit = relative_depth_crit()
print(crit)
x = Variable(torch.zeros(1, 1, 6, 6).cuda(), requires_grad=True)
target = {}
target[0] = {}
target[0]['x_A'] = Variable(torch.Tensor([0, 1, 2, 3, 4, 5])).cuda()
target[0]['y_A'] = Variable(torch.Tensor([0, 1, 2, 3, 4, 5])).cuda()
target[0]['x_B'] = Variable(torch.Tensor([0, 0, 0, 0, 0, 0])).cuda()
target[0]['y_B'] = Variable(torch.Tensor([5, 4, 3, 2, 1, 0])).cuda()
target[0]['ordianl_relation'] = Variable(torch.Tensor([-1, 0, 1, 1, -1, -1])).cuda()
target[0]['n_point'] = 6
loss = crit.forward(x, target)
print(loss)
loss.backward()
# a = crit.backward(1.0)
# print(a)
print(x.grad)
# print(x.creator)
|
import carla
import cv2
import gym
import random
import time
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from agents.navigation.behavior_agent import BehaviorAgent, BasicAgent
from carla import ColorConverter as cc
from collections import deque
from copy import deepcopy
from enum import Enum, auto
from gym import spaces
from PIL import Image
from queue import Queue
from tqdm import trange
from .misc import get_pos, get_lane_dis_numba, get_vehicle_angle
from .route_planner import RoutePlanner
from .manual_route_planner import ManualRoutePlanner, TOWN7_PLAN, TOWN7_REVERSE_PLAN
from ..utils import center_crop, normalize_image
from agents.navigation.behavior_agent import BehaviorAgent
from agents.navigation.global_route_planner import RoadOption
class RouteMode(Enum):
BASIC_RANDOM = auto()
MANUAL_LAP = auto()
_walker_spawn_points_cache = []
_load_world = False
class CarlaEnv(gym.Env):
start_wp_idx = 0
def __init__(self, **kwargs):
global _load_world
self.host = 'localhost'
self.port = 2000
self.n_images = kwargs.get('n_frames', 1)
observation_size = kwargs.get('image_size', [256, 512])
self.obs_width = observation_size[1]
self.obs_height = observation_size[0]
self.obs_dtype = np.float16
self.map = 'Town07'
self.fps_mode = kwargs.get('fps_mode')
if self.fps_mode == 'high':
self.dt = 0.1
else:
self.dt = 0.2
self.frame_per_second = round(1 / self.dt)
self.reload_world = True
self.use_semantic_camera = True
camera_size = kwargs.get('camera_size')
self.camera_width = camera_size[1]
self.camera_height = camera_size[0]
if kwargs.get('camera_fov'):
self.camera_fov = kwargs.get('camera_fov')
self.number_of_walkers = 0
self.number_of_vehicles = 0
self.number_of_wheels = [4]
self.max_ego_spawn_times = 100
self.max_time_episode = kwargs.get('max_episode_steps', 5000)
self.max_waypt = 12
# in m/s. 5.5 is 20KMH
self.desired_speed = 5.5
self.out_lane_thres = 2.
# random or roundabout
self.task_mode = 'random'
self.dests = None
# action and observation spaces
self.observation_space = spaces.Box(low=0, high=255, shape=(self.obs_height, self.obs_width, 3), dtype=np.uint8)
# steering, accel/brake
self.action_space = spaces.Box(low=np.array([-1., -1.]), high=np.array([1., 1.]), dtype=np.float32)
self.dry_run = kwargs.get('dry_run_init_env', False)
if self.dry_run:
print('dry run, exit init')
return
print('connecting to Carla server...')
self.client = carla.Client(self.host, self.port)
self.client.set_timeout(30.0)
if _load_world:
self.world = self.client.get_world()
else:
self.world = self.client.load_world(self.map)
_load_world = True
print('Carla server connected!')
# Set fixed simulation step for synchronous mode
self.settings = self.world.get_settings()
self.settings.fixed_delta_seconds = self.dt
if self.fps_mode == 'low':
self.settings.max_substep_delta_time = 0.01666
self.settings.max_substeps = 13
self.settings.synchronous_mode = True
self.world.apply_settings(self.settings)
self.bp_library = self.world.get_blueprint_library()
# spawn points
self.vehicle_spawn_points = list(self.world.get_map().get_spawn_points())
# top left spawn point of town4
self.lap_spwan_point_wp = self.world.get_map().get_waypoint(self.vehicle_spawn_points[1].location)
self.walker_spawn_points = []
# if we can cache more than 70% of spawn points then use cache
if len(_walker_spawn_points_cache) > self.number_of_walkers * 0.7:
def loc_to_transform(loc):
x, y, z = loc
loc = carla.Location(x=x, y=y, z=z)
return carla.Transform(location=loc)
self.walker_spawn_points = list(map(loc_to_transform, _walker_spawn_points_cache))
print('load walker spwan points from cache')
else:
_walker_spawn_points_cache.clear()
for i in range(self.number_of_walkers):
spawn_point = carla.Transform()
loc = self.world.get_random_location_from_navigation()
if (loc != None):
spawn_point.location = loc
self.walker_spawn_points.append(spawn_point)
# save to cache
_walker_spawn_points_cache.append((loc.x, loc.y, loc.z))
# route planner mode
self.route_mode = RouteMode.MANUAL_LAP
if self.route_mode == RouteMode.MANUAL_LAP:
self.routeplanner = ManualRoutePlanner(self.lap_spwan_point_wp, self.lap_spwan_point_wp, self.world,
resolution=2, plan=TOWN7_PLAN, use_section=True)
# ego vehicle bp
self.ego_bp = self._create_vehicle_bluepprint('vehicle.mini.cooper_s_2021')
self.ego = None
# Collision sensor
self.collision_hist = []
# collision history length
self.collision_hist_l = 1
self.collision_bp = self.bp_library.find('sensor.other.collision')
self.collision_sensor = None
# camera
self.camera_img = None
self.camera_trans = carla.Transform(carla.Location(x=1.18, z=1.7))
self.camera_sensor_type = 'sensor.camera.rgb'
if self.use_semantic_camera:
self.camera_sensor_type = 'sensor.camera.semantic_segmentation'
self.camera_bp = self.bp_library.find(self.camera_sensor_type)
# Modify the attributes of the blueprint to set image resolution and field of view.
self.camera_bp.set_attribute('image_size_x', str(self.camera_width))
self.camera_bp.set_attribute('image_size_y', str(self.camera_height))
if hasattr(self, 'camera_fov'):
self.camera_bp.set_attribute('fov', str(self.camera_fov))
self.camera_sensor = None
self.record_video = kwargs.get('record_video', False)
if self.record_video:
self.obs_camera_bp = self.bp_library.find('sensor.camera.rgb')
self.obs_camera_bp.set_attribute('image_size_x', '800')
self.obs_camera_bp.set_attribute('image_size_y', '600')
self.obs_frame_data_queue = Queue()
# Record the time of total steps and resetting steps
self.reset_step = 0
self.total_step = 0
# frame buffer
self.img_buff = deque(maxlen=self.n_images)
self.frame_data_queue = Queue()
# action buffer
self.num_past_actions = kwargs.get('n_past_actions', 10)
self.actions_queue = deque(maxlen=self.num_past_actions)
# control history
self.store_history = self.record_video
if self.store_history:
self.throttle_hist = []
self.brakes_hist = []
self.steers_hist = []
self.speed_hist = []
self.lspeed_lon_hist = []
self.original_dis = []
self.spawn_batch = True
# cache vehicle blueprints
if self.spawn_batch:
self.vehicle_bp_caches = {}
for nw in self.number_of_wheels:
self.vehicle_bp_caches[nw] = self._cache_vehicle_blueprints(number_of_wheels=nw)
encoder_type = kwargs.get('encoder_type')
if encoder_type is None:
raise ValueError(f'unknown encoder_type {self.encoder_type}')
grayscale = kwargs.get('grayscale', False)
if encoder_type == 'CNN':
if observation_size == camera_size:
if grayscale:
self._transform_observation = self._transform_CNN_grayscale_observation_no_resize
else:
self._transform_observation = self._transform_CNN_observation_no_resize
else:
if grayscale:
self._transform_observation = self._transform_CNN_grayscale_observation
else:
self._transform_observation = self._transform_CNN_observation
if self.n_images > 1:
if grayscale:
# grayscale image, stack in new axis in place of channel
self._combine_observations = lambda obs_array: np.array(obs_array, dtype=self.obs_dtype)
else:
# RGB image, stack in channel dimension
self._combine_observations = lambda obs_array: np.concatenate(obs_array, axis=-1, dtype=np.uint8)
else:
self._combine_observations = lambda obs_array: obs_array
elif encoder_type == 'VAE':
# VAE case
self._transform_observation = self._transform_VAE_observation
if self.n_images > 1:
self._combine_observations = lambda obs_array: np.array(obs_array, dtype=np.float16)
else:
self._combine_observations = lambda obs_array: obs_array
self.mean = np.array([0.4652, 0.4417, 0.3799])
self.std = np.array([0.0946, 0.1767, 0.1865])
self.z_steps = {}
def reset(self):
# Clear history if exist
if self.store_history:
self.throttle_hist.clear()
self.brakes_hist.clear()
self.steers_hist.clear()
self.speed_hist.clear()
self.lspeed_lon_hist.clear()
self.original_dis.clear()
self.current_action = None
self.img_buff.clear()
self.actions_queue.clear()
self.current_lane_dis = 0
# delete sensor, vehicles and walkers
# self._clear_all_actors(['sensor.other.collision', self.camera_sensor_type, 'vehicle.*', 'controller.ai.walker', 'walker.*'])
# self._clear_all_actors(['sensor.other.collision', self.camera_sensor_type, 'vehicle.*'])
# Clear sensor objects
if self.camera_sensor is not None:
# not the first time
self.camera_sensor.stop()
self.collision_sensor.stop()
destroy_commands = [
carla.command.DestroyActor(self.ego.id),
carla.command.DestroyActor(self.camera_sensor.id),
carla.command.DestroyActor(self.collision_sensor.id)
]
self.client.apply_batch_sync(destroy_commands, False)
self.camera_sensor = None
self.collision_sensor = None
self.ego = None
# clear image
self.camera_img = None
# Disable sync mode
# self._set_synchronous_mode(False)
# Get actors polygon list
self.vehicle_polygons = []
# Spawn the ego vehicle
ego_spawn_times = 0
spawn_transform_index, spawn_transform = self.routeplanner.get_random_spawn_point()
if spawn_transform_index not in self.z_steps:
self.z_steps[spawn_transform_index] = 0.1
z_step = self.z_steps[spawn_transform_index]
while True:
if ego_spawn_times > self.max_ego_spawn_times:
raise Exception(f'cannot spawn at {transform}. waypoint index is {self.routeplanner._checkpoint_waypoint_index}')
transform = self._make_safe_spawn_transform(spawn_transform, z_step)
if self._try_spawn_ego_vehicle_at(transform):
break
else:
ego_spawn_times += 1
z_step += 0.1
self.z_steps[spawn_transform_index] = z_step
time.sleep(0.1)
# Add collision sensor
self.collision_sensor = self.world.spawn_actor(self.collision_bp, carla.Transform(), attach_to=self.ego)
self.collision_sensor.listen(lambda event: get_collision_hist(event))
def get_collision_hist(event):
impulse = event.normal_impulse
intensity = np.sqrt(impulse.x**2 + impulse.y**2 + impulse.z**2)
self.collision_hist.append(intensity)
self.collision_hist.clear()
# Add camera sensor
self.camera_sensor = self.world.spawn_actor(self.camera_bp, self.camera_trans, attach_to=self.ego)
self.camera_sensor.listen(self.frame_data_queue.put)
if self.record_video:
bound_x = 0.5 + self.ego.bounding_box.extent.x
bound_y = 0.5 + self.ego.bounding_box.extent.y
bound_z = 0.5 + self.ego.bounding_box.extent.z
obs_camera_trans = carla.Transform(carla.Location(x=-2.0*bound_x, y=+0.0*bound_y, z=2.0*bound_z), carla.Rotation(pitch=8.0))
self.obs_camera_sensor = self.world.spawn_actor(blueprint=self.obs_camera_bp, transform=obs_camera_trans,
attach_to=self.ego, attachment_type=carla.AttachmentType.SpringArm)
self.obs_camera_sensor.listen(self.obs_frame_data_queue.put)
# Update timesteps
self.time_step = 1
self.reset_step += 1
self.frame = self.world.tick()
# get route plan
self.routeplanner.set_vehicle(self.ego)
self.waypoints = self.routeplanner.run_step()
for _ in range(self.num_past_actions):
self.actions_queue.append(np.array([0, 0]))
self.first_additional_state = np.ravel(np.array(self.actions_queue, dtype=np.float16))
return self._get_obs()
def step(self, action):
acc = action[0]
steer = action[1]
if acc > 0:
throttle = np.clip(acc, 0, 1)
brake = 0
else:
throttle = 0
brake = np.clip(-acc, 0, 1)
self.ego.apply_control(carla.VehicleControl(throttle=float(throttle), steer=float(steer), brake=float(brake)))
if self.store_history:
self.throttle_hist.append(float(throttle))
self.brakes_hist.append(float(brake))
self.steers_hist.append(float(steer))
self.current_action = (throttle, steer, brake)
self.actions_queue.append(action)
self.frame = self.world.tick()
self.waypoints = self.routeplanner.run_step()
self.current_waypoint = self.routeplanner.current_waypoint
# Update timesteps
self.time_step += 1
self.total_step += 1
info = {}
info['additional_state'] = np.ravel(np.array(self.actions_queue, dtype=np.float16))
return self._get_obs(), self._get_reward(), self._get_terminal(), info
def render(self, mode='human'):
if mode == 'human':
if self.camera_img is None:
raise Exception('self.camera_img is None')
cv2.imshow('Carla environment', self.camera_img)
cv2.waitKey(1)
elif mode == 'rgb_array':
if self.record_video:
return self._retrieve_image_data(self.obs_frame_data_queue, use_semantic_mask=False)
else:
return self._get_observation_image()
elif mode == 'observation':
return self._transform_observation(self.camera_img)
def _get_reward(self):
""" Calculate the step reward. """
# reward for speed tracking
v = self.ego.get_velocity()
speed = np.sqrt(v.x**2 + v.y**2)
r_speed = -abs(speed - self.desired_speed)
# reward for collision
r_collision = 0
if len(self.collision_hist) > 0:
r_collision = -1
# reward for steering:
r_steer = -self.ego.get_control().steer**2
# reward for out of lane
ego_x, ego_y = get_pos(self.ego)
self.current_lane_dis, w = get_lane_dis_numba(self.waypoints, ego_x, ego_y)
r_out = 0
if abs(self.current_lane_dis) > self.out_lane_thres:
r_out = -100
else:
r_out = -abs(np.nan_to_num(self.current_lane_dis, posinf=self.out_lane_thres + 1, neginf=-(self.out_lane_thres + 1)))
# longitudinal speed
lspeed = np.array([v.x, v.y])
lspeed_lon = np.dot(lspeed, w)
# cost for too fast
r_fast = 0
if lspeed_lon > self.desired_speed:
r_fast = -1
# if it is faster than desired speed, minus the excess speed
# and don't give reward from speed
# r_fast *= lspeed_lon
# cost for lateral acceleration
r_lat = - abs(self.ego.get_control().steer) * lspeed_lon**2
# cost for braking
brake_cost = self.current_action[2]
r = 200*r_collision + 1*lspeed_lon + 10*r_fast + 1*r_out + r_steer*5 + 0.2*r_lat - 1 - brake_cost*2
if self.store_history:
self.speed_hist.append(speed)
self.lspeed_lon_hist.append(lspeed_lon)
self.original_dis.append(self.current_lane_dis)
return r
def _get_reward_ppo(self):
# lane distance reward
ego_x, ego_y = get_pos(self.ego)
self.current_lane_dis, w = get_lane_dis_numba(self.waypoints, ego_x, ego_y)
non_nan_lane_dis = np.nan_to_num(self.current_lane_dis, posinf=self.out_lane_thres + 1, neginf=-(self.out_lane_thres + 1))
d_norm = abs(non_nan_lane_dis) / self.out_lane_thres
lane_centering_reward = 1 - d_norm
# termination reward
if abs(non_nan_lane_dis) > self.out_lane_thres or len(self.collision_hist) > 0:
return -10
# speed reward
v = self.ego.get_velocity()
lspeed = np.array([v.x, v.y])
lspeed_lon = np.dot(lspeed, w)
min_desired_speed = 0.8 * self.desired_speed
max_desired_speed = 1.2 * self.desired_speed
if lspeed_lon < min_desired_speed:
speed_reward = lspeed_lon / min_desired_speed
elif lspeed_lon > self.desired_speed:
speed_reward = 1 - (lspeed_lon - self.desired_speed) / (max_desired_speed - self.desired_speed)
else:
speed_reward = 1
# angle
angle_degree = get_vehicle_angle(self.ego.get_transform(), self.current_waypoint.transform)
# allow only 4 degree until no reward
angle_reward = max(1.0 - abs(angle_degree / 4), 0.0)
return speed_reward + lane_centering_reward + angle_reward
def _get_terminal(self):
""" Calculate whether to terminate the current episode. """
# Get ego state
ego_x, ego_y = get_pos(self.ego)
# If collides
if len(self.collision_hist) > 0:
return True
# If reach maximum timestep
if self.time_step > self.max_time_episode:
return True
# If at destination
if self.dests is not None: # If at destination
for dest in self.dests:
if np.sqrt((ego_x-dest[0])**2 + (ego_y-dest[1])**2) < 4:
return True
# If out of lane
if abs(self.current_lane_dis) > self.out_lane_thres:
return True
# end of section
if self.routeplanner.is_end_of_section:
return True
return False
def _get_obs(self):
self.camera_img = self._retrieve_image_data(self.frame_data_queue,
use_semantic_mask=self.use_semantic_camera)
if self.n_images == 1:
transformed_observation = self._transform_observation(self.camera_img)
return self._combine_observations(transformed_observation)
self.img_buff.append(self.camera_img)
while len(self.img_buff) < self.n_images:
self.img_buff.append(self.camera_img)
img_array = [self._transform_observation(img) for img in self.img_buff]
return self._combine_observations(img_array)
def _create_vehicle_bluepprint(self, actor_filter, color=None, number_of_wheels=[4]):
"""Create the blueprint for a specific actor type.
Args:
actor_filter: a string indicating the actor type, e.g, 'vehicle.lincoln*'.
Returns:
bp: the blueprint object of carla.
"""
blueprints = self.bp_library.filter(actor_filter)
blueprint_library = []
for nw in number_of_wheels:
blueprint_library = blueprint_library + [x for x in blueprints if int(x.get_attribute('number_of_wheels')) == nw]
bp = random.choice(blueprint_library)
if bp.has_attribute('color'):
if not color:
color = random.choice(bp.get_attribute('color').recommended_values)
bp.set_attribute('color', color)
return bp
def _cache_vehicle_blueprints(self, number_of_wheels=4):
if not isinstance(number_of_wheels, int):
raise TypeError(f'number_of_wheels must be int not {type(number_of_wheels)}')
blueprint_library = []
blueprints = self.bp_library.filter('vehicle.*')
for bp in blueprints:
if bp.get_attribute('number_of_wheels').as_int() == number_of_wheels:
if bp.has_attribute('color'):
color = random.choice(bp.get_attribute('color').recommended_values)
bp.set_attribute('color', color)
blueprint_library.append(bp)
return blueprint_library
def _clear_all_actors(self, actor_filters):
""" Clear specific actors. """
destroy_commands = []
for actor_filter in actor_filters:
for actor in self.world.get_actors().filter(actor_filter):
if actor.is_alive:
if actor.type_id == 'controller.ai.walker':
actor.stop()
destroy_commands.append(carla.command.DestroyActor(actor))
self.client.apply_batch(destroy_commands)
def _set_synchronous_mode(self, synchronous = True):
"""Set whether to use the synchronous mode.
"""
self.settings.synchronous_mode = synchronous
self.world.apply_settings(self.settings)
def _try_spawn_random_vehicle_at(self, transform, number_of_wheels=[4], set_autopilot=True):
"""Try to spawn a surrounding vehicle at specific transform with random bluprint.
Args:
transform: the carla transform object.
Returns:
Bool indicating whether the spawn is successful.
"""
blueprint = self._create_vehicle_bluepprint('vehicle.*', number_of_wheels=number_of_wheels)
blueprint.set_attribute('role_name', 'autopilot')
vehicle = self.world.try_spawn_actor(blueprint, transform)
if vehicle is not None:
if set_autopilot:
vehicle.set_autopilot()
return True, vehicle
return False, vehicle
def _spawn_random_vehicles_batch(self, transforms, number_of_vehicles, number_of_wheels=[4]):
bps = []
for nw in number_of_wheels:
bps.extend(self.vehicle_bp_caches[nw])
count = 0
spawn_commands = []
for transform in transforms:
bp = random.choice(bps)
bp.set_attribute('role_name', 'autopilot')
spawn_cmd = carla.command.SpawnActor(bp, transform)
spawn_cmd.then(carla.command.SetAutopilot(carla.command.FutureActor, True))
spawn_commands.append(spawn_cmd)
count += 1
if count == number_of_vehicles:
break
self.client.apply_batch(spawn_commands)
if count < number_of_vehicles:
spawn_commands.clear()
while count < number_of_vehicles:
transform = random.choice(transforms)
bp = random.choice(bps)
bp.set_attribute('role_name', 'autopilot')
spawn_cmd = carla.command.SpawnActor(bp, transform)
spawn_cmd.then(carla.command.SetAutopilot(carla.command.FutureActor, True))
spawn_commands.append(spawn_cmd)
count += 1
self.client.apply_batch(spawn_commands)
def _try_spawn_random_walker_at(self, transform):
"""Try to spawn a walker at specific transform with random bluprint.
Args:
transform: the carla transform object.
Returns:
Bool indicating whether the spawn is successful.
"""
walker_bp = random.choice(self.bp_library.filter('walker.*'))
# set as not invencible
if walker_bp.has_attribute('is_invincible'):
walker_bp.set_attribute('is_invincible', 'false')
walker_actor = self.world.try_spawn_actor(walker_bp, transform)
if walker_actor is not None:
walker_controller_bp = self.bp_library.find('controller.ai.walker')
walker_controller_actor = self.world.spawn_actor(walker_controller_bp, carla.Transform(), walker_actor)
# start walker
walker_controller_actor.start()
# set walk to random point
walker_controller_actor.go_to_location(self.world.get_random_location_from_navigation())
# random max speed
walker_controller_actor.set_max_speed(1 + random.random()) # max speed between 1 and 2 (default is 1.4 m/s)
return True
return False
def _spwan_random_walkers_batch(self, transforms, number_of_walkers):
walker_bps = self.bp_library.filter('walker.*')
# spawn walker
count = 0
spawn_commands = []
for transform in transforms:
walker_bp = random.choice(walker_bps)
if walker_bp.has_attribute('is_invincible'):
walker_bp.set_attribute('is_invincible', 'false')
spawn_commands.append(carla.command.SpawnActor(walker_bp, transform))
count += 1
if count == number_of_walkers:
break
results = self.client.apply_batch_sync(spawn_commands, True)
walkers_list = []
for result in results:
if not result.error:
walkers_list.append({'id': result.actor_id})
# spawn controller
spawn_commands.clear()
walker_controller_bp = self.bp_library.find('controller.ai.walker')
for i in range(len(walkers_list)):
spawn_commands.append(carla.command.SpawnActor(walker_controller_bp, carla.Transform(), walkers_list[i]['id']))
results = self.client.apply_batch_sync(spawn_commands, True)
controller_ids = []
for i in range(len(results)):
if not results[i].error:
walkers_list[i]['con_id'] = results[i].actor_id
controller_ids.append(results[i].actor_id)
controller_actors = self.world.get_actors(controller_ids)
self.world.wait_for_tick()
# start controller
con_idx = 0
for walker in walkers_list:
if 'con_id' not in walker:
continue
controller = controller_actors[con_idx]
assert walker['con_id'] == controller.id
controller.start()
controller.go_to_location(self.world.get_random_location_from_navigation())
controller.set_max_speed(1 + random.random())
con_idx += 1
def _get_actor_polygons(self, filt):
"""Get the bounding box polygon of actors.
Args:
filt: the filter indicating what type of actors we'll look at.
Returns:
actor_poly_dict: a dictionary containing the bounding boxes of specific actors.
"""
actor_poly_dict = {}
for actor in self.world.get_actors().filter(filt):
# Get x, y and yaw of the actor
trans = actor.get_transform()
x = trans.location.x
y = trans.location.y
yaw = trans.rotation.yaw / 180 * np.pi
# Get length and width
bb = actor.bounding_box
l = bb.extent.x
w = bb.extent.y
# Get bounding box polygon in the actor's local coordinate
poly_local = np.array([[l, w], [l, -w], [-l, -w], [-l,w]]).transpose()
# Get rotation matrix to transform to global coordinate
R = np.array([[np.cos(yaw), -np.sin(yaw)], [np.sin(yaw), np.cos(yaw)]])
# Get global bounding box polygon
poly = np.matmul(R,poly_local).transpose() + np.repeat([[x, y]], 4, axis=0)
actor_poly_dict[actor.id] = poly
return actor_poly_dict
def _try_spawn_ego_vehicle_at(self, transform):
"""Try to spawn the ego vehicle at specific transform.
Args:
transform: the carla transform object.
Returns:
Bool indicating whether the spawn is successful.
"""
vehicle = None
# Check if ego position overlaps with surrounding vehicles
overlap = False
if self.vehicle_polygons:
for idx, poly in self.vehicle_polygons[-1].items():
poly_center = np.mean(poly, axis=0)
ego_center = np.array([transform.location.x, transform.location.y])
dis = np.linalg.norm(poly_center - ego_center)
if dis > 8:
continue
else:
overlap = True
break
if not overlap:
vehicle = self.world.try_spawn_actor(self.ego_bp, transform)
if vehicle is not None:
self.ego = vehicle
return True
return False
def _make_safe_spawn_transform(self, spawn_point_transform, z_step):
''' Set Z axis to 0.39 if Z axis of transform equals to 0.00 to prevent collision when spawning '''
old_z = spawn_point_transform.location.z
new_location = carla.Location(x=spawn_point_transform.location.x,
y=spawn_point_transform.location.y,
z=old_z + z_step)
new_transform = carla.Transform(location=new_location, rotation=spawn_point_transform.rotation)
return new_transform
def _transform_CNN_observation(self, obs):
cropped_obs = self._crop_image(obs)
resized_obs = cv2.resize(cropped_obs, (self.obs_width, self.obs_height), interpolation=cv2.INTER_NEAREST)
# normalized_obs = normalize_image(resized_obs, self.mean, self.std).astype(np.float16)
return resized_obs
def _transform_CNN_grayscale_observation(self, obs):
cropped_obs = self._crop_image(obs)
resized_obs = cv2.resize(cropped_obs, (self.obs_width, self.obs_height), interpolation=cv2.INTER_NEAREST)
gray_obs = (cv2.cvtColor(resized_obs, cv2.COLOR_RGB2GRAY) / 255.).astype(np.float16)
return gray_obs
def _transform_CNN_observation_no_resize(self, obs):
scaled_obs = obs / 255.
return scaled_obs.transpose((2, 0, 1))
def _transform_CNN_grayscale_observation_no_resize(self, obs):
scaled_obs = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY) / 255.
return scaled_obs
def _transform_VAE_observation(self, obs):
cropped_obs = self._crop_image(obs)
resized_obs = cv2.resize(cropped_obs, (self.obs_width, self.obs_height), interpolation=cv2.INTER_NEAREST)
normalized_obs = normalize_image(resized_obs, self.mean, self.std).astype(np.float16)
return normalized_obs
def _transform_old_VAE_observation(self, obs):
''' For old version that doesn't need normalization '''
resized_obs = cv2.resize(obs, (self.obs_width, self.obs_height), interpolation=cv2.INTER_NEAREST)
return (resized_obs / 255.0).astype(np.float16)
# def _transform_observation(self, obs):
# return (obs / 255.0).astype(np.float16)
def _get_observation_image(self):
''' Return RGB image in `H` x `W` x `C` format, its size match observation size.
The difference between this method and `_transform_observation` is that `_transform_observation`
will normalize an image but this method keeps the image format except spatial size.
'''
cropped_img = self._crop_image(self.camera_img)
return cv2.resize(cropped_img, (self.obs_width, self.obs_height), interpolation=cv2.INTER_NEAREST)
# def _get_observation_image(self):
# return self.camera_img
def _crop_image(self, img):
# this size is suitable for 800x600, fov 110 camera
cropped_size = (384, 768)
return center_crop(img, cropped_size, shift_H=1.2)
def _retrieve_image_data(self, queue_to_wait, use_semantic_mask=False):
while True:
data = queue_to_wait.get()
queue_to_wait.task_done()
if data.frame == self.frame:
break
if use_semantic_mask:
data.convert(cc.CityScapesPalette)
else:
data.convert(cc.Raw)
array = np.frombuffer(data.raw_data, dtype=np.uint8)
array = np.reshape(array, (data.height, data.width, 4))
array = array[:, :, :3]
# BGR(OpenCV) > RGB
return np.ascontiguousarray(array[:, :, ::-1])
def _draw_debug_waypoints(self, waypoints, size=1, color=(255,0,0)):
''' Draw debug point on waypoints '''
if len(waypoints) < 1:
raise ValueError('number_of_waypoint must be greater than or equal to 1.')
debug = self.world.debug
color = carla.Color(r=color[0], g=color[1], b=color[2])
for wp in waypoints:
location = carla.Location(x=wp[0], y=wp[1], z=1.0)
debug.draw_point(location, size=size, color=color)
def close(self):
try:
cv2.destroyAllWindows()
except cv2.error:
pass
if not self.dry_run:
self._set_synchronous_mode(False)
# delete all sensor for the next world
self._clear_all_actors(['sensor.other.collision', self.camera_sensor_type, 'vehicle.*', 'controller.ai.walker', 'walker.*'])
return super().close()
def plot_control_graph(self, name):
if not self.store_history:
print('Cannot plot graph because environment does not store history')
return
data_np = np.array([self.throttle_hist, self.brakes_hist, self.steers_hist]).transpose()
data = pd.DataFrame(data_np, columns=['throttle', 'brake', 'steer']).reset_index()
data = pd.melt(data, id_vars='index', var_name='command', value_name='value')
sns.lineplot(data=data, hue='command', x='index', y='value')
plt.title('Throttle, Brake and Steer')
plt.savefig(name)
def plot_speed_graph(self, name):
if not self.store_history:
print('Cannot plot graph because environment does not store history')
return
data_np = np.array([self.speed_hist, self.lspeed_lon_hist]).transpose()
data = pd.DataFrame(data_np, columns=['speed', 'speed_lon']).reset_index()
data = pd.melt(data, id_vars='index', var_name='command', value_name='value')
sns.lineplot(data=data, hue='command', x='index', y='value')
plt.title('Speed and Longitudinal speed')
plt.savefig(name)
def plot_distance_graph(self, name):
if not self.store_history:
print('Cannot plot graph because environment does not store history')
return
data_np = np.array([self.original_dis]).transpose()
data = pd.DataFrame(data_np, columns=['original distance']).reset_index()
data = pd.melt(data, id_vars='index', var_name='distance', value_name='value')
sns.lineplot(data=data, hue='distance', x='index', y='value')
plt.title('Distance from center of the lane')
plt.savefig(name)
def get_latest_milestone(self):
''' Return index of latest checkpoint waypoint that the agent can go '''
if self.routeplanner._intermediate_checkpoint_waypoint_index > self.routeplanner._checkpoint_waypoint_index:
return self.routeplanner._checkpoint_waypoint_index
else:
return self.routeplanner._intermediate_checkpoint_waypoint_index
@property
def metadata(self):
return {"render_modes": ["human", "rgb_array"], "render_fps": self.frame_per_second}
def collect_env_images(self, num_steps, start_step=0, agent_class=BehaviorAgent, observation_callback=None):
agent = agent_class(self.ego)
for _ in range(start_step, start_step + num_steps):
self.ego.apply_control(agent.run_step())
self.world.tick()
if observation_callback is not None:
observation_callback(self._get_observation_image())
def test_carla_agent(self, num_steps, start_step=0, recorder=None):
agent = BasicAgent(self.ego)
# agent = ManualAgent(self.ego)
agent.set_global_plan(self.route_waypoints)
agent.ignore_traffic_lights(active=True)
agent.ignore_stop_signs(active=True)
# agent.set_target_speed(40)
for step in trange(start_step, start_step + num_steps):
self.ego.apply_control(agent.run_step())
self.world.tick()
if recorder is not None:
recorder.capture_frame()
# img_np = self._get_observation_image()
# img = Image.fromarray(img_np)
# img.save(f'carla_town7_images/outskirts/town7_outskirts_{step:05d}.jpeg')
# img.close()
if agent.done():
print('agent is done')
break
CarlaEnv.start_wp_idx += 5
completed_lap = CarlaEnv.start_wp_idx > len(self.route_waypoints)
return completed_lap
class SteerDirection(Enum):
RIGHT = auto()
LEFT = auto()
class ManualAgent:
steer_right_cmds = [0.0] * 30 + [0.2] * 10 + [-0.1] * 15
steer_left_cmds = [0.0] * 30 + [-0.05] * 5 + [-0.1] * 5
def __init__(self, vehicle):
steer_direction = SteerDirection.LEFT
if steer_direction == SteerDirection.RIGHT:
self.steer_cmds = deepcopy(ManualAgent.steer_right_cmds)
elif steer_direction == SteerDirection.LEFT:
self.steer_cmds = deepcopy(ManualAgent.steer_left_cmds)
else:
raise TypeError('unknown steer_direction type')
def run_step(self):
throttle = max(0.4, random.random())
steer = 0
if len(self.steer_cmds) > 0:
steer = self.steer_cmds[0]
self.steer_cmds.pop(0)
return carla.VehicleControl(throttle=float(throttle), steer=float(steer), brake=0.0)
def done(self):
return False
class CarlaPerfectActionSampler:
def __init__(self, env: CarlaEnv) -> None:
self.agent = BasicAgent(env.ego,
target_speed=env.desired_speed * 3.6)
self.agent.set_global_plan(env.routeplanner.get_route_waypoints())
self.agent.ignore_traffic_lights(active=True)
self.agent.ignore_stop_signs(active=True)
self.time_step = 0
self.max_time_step = 15 * env.frame_per_second
def sample(self):
self.time_step += 1
action_command = self.agent.run_step()
action = self._carla_command_to_action(action_command)
return action, self.time_step == self.max_time_step
def _carla_command_to_action(self, command):
''' Convert CARLA control command to environment action '''
throttle = command.throttle
brake = command.brake
steer = command.steer
if throttle > 0:
acc = throttle
elif brake > 0:
acc = -brake
else:
acc = 0
return np.array([acc, steer])
|
import pytest
from pkg_resources import resource_filename
import numpy as np
pyhessio = pytest.importorskip("pyhessio")
testfile = 'tests/resources/gamma_test_large_truncated.simtel.gz'
def test_adc_samples():
from eventio import EventIOFile
from eventio.simtel import (
ArrayEvent, TelescopeEvent, ADCSamples, MCEvent
)
from eventio.search_utils import yield_toplevel_of_type
events_compared = 0
current_event = -1
with pyhessio.open_hessio(testfile) as h, EventIOFile(testfile) as e:
hessio_events = h.move_to_next_event()
try:
for o in e:
if isinstance(o, MCEvent):
current_event = o.header.id
if isinstance(o, ArrayEvent):
hessio_event = next(hessio_events)
for televent in yield_toplevel_of_type(o, TelescopeEvent):
for adcsamp in yield_toplevel_of_type(televent, ADCSamples):
assert hessio_event == current_event
tel_id = adcsamp.telescope_id
assert tel_id in h.get_teldata_list()
adcsamp_eventio = adcsamp.parse()
adcsamp_hessio = h.get_adc_sample(tel_id)
assert np.all(adcsamp_eventio == adcsamp_hessio)
events_compared += 1
if events_compared >= 10:
raise StopIteration
except StopIteration:
pass
assert events_compared == 10
|
import unittest
import rubik
import random
class TestRubik(unittest.TestCase):
def setUp(self):
self.turned = {
"left": rubik.cube.Cube("HCDNFVIJELMTPASKUQXZ",
"20010200100102010200"),
"right": rubik.cube.Cube("ACSEMHIDKLUNFQZTPVXJ",
"00102001002020102001"),
"top": rubik.cube.Cube("PKAEFHIJQCMNSLDTUVXZ",
"01000000110001000000"),
"down": rubik.cube.Cube("ACDEFJNZKLIXPQSTUHMV",
"00000010001100000010"),
"front": rubik.cube.Cube("ACDEFHIJKLMNVTPXQZUS",
"00000000000000000000"),
"back": rubik.cube.Cube("DFJCIAEHKLMNPQSTUVXZ",
"00000000000000000000"),
}
def testTurn(self):
for direction in ("left", "right", "top", "down", "front", "back"):
cube = rubik.cube.Cube()
getattr(cube, direction)()
self.assertEqual(cube, self.turned[direction])
cube_turn = rubik.cube.Cube()
[getattr(cube_turn, direction)() for _ in range(4)]
self.assertEqual(cube_turn, rubik.cube.Cube())
cube = self.turned[direction].copy()
getattr(cube, "anti_" + direction)()
self.assertEqual(cube, rubik.cube.Cube())
def testPath(self):
choices = ["left", "right", "top", "down", "front", "back"]
m = [random.choice(choices) for _ in xrange(1000)]
n = ["anti_" + i for i in reversed(m)]
cube = rubik.Cube()
for i in m + n:
getattr(cube, i)()
self.assertEqual(cube, rubik.Cube())
|
# Notices:
# Copyright 2017, United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. All Rights Reserved.
# Disclaimers
# No Warranty: THE SUBJECT SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY
# OF ANY KIND, EITHER EXPRESSED, IMPLIED, OR STATUTORY, INCLUDING, BUT NOT
# LIMITED TO, ANY WARRANTY THAT THE SUBJECT SOFTWARE WILL CONFORM TO
# SPECIFICATIONS, ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE, OR FREEDOM FROM INFRINGEMENT, ANY WARRANTY THAT THE
# SUBJECT SOFTWARE WILL BE ERROR FREE, OR ANY WARRANTY THAT DOCUMENTATION,
# IF PROVIDED, WILL CONFORM TO THE SUBJECT SOFTWARE. THIS AGREEMENT DOES NOT,
# IN ANY MANNER, CONSTITUTE AN ENDORSEMENT BY GOVERNMENT AGENCY OR ANY PRIOR
# RECIPIENT OF ANY RESULTS, RESULTING DESIGNS, HARDWARE, SOFTWARE PRODUCTS
# OR ANY OTHER APPLICATIONS RESULTING FROM USE OF THE SUBJECT SOFTWARE.
# FURTHER, GOVERNMENT AGENCY DISCLAIMS ALL WARRANTIES AND LIABILITIES
# REGARDING THIRD-PARTY SOFTWARE, IF PRESENT IN THE ORIGINAL SOFTWARE, AND
# DISTRIBUTES IT "AS IS."
# Waiver and Indemnity: RECIPIENT AGREES TO WAIVE ANY AND ALL CLAIMS
# AGAINST THE UNITED STATES GOVERNMENT, ITS CONTRACTORS AND SUBCONTRACTORS,
# AS WELL AS ANY PRIOR RECIPIENT. IF RECIPIENT'S USE OF THE SUBJECT
# SOFTWARE RESULTS IN ANY LIABILITIES, DEMANDS, DAMAGES, EXPENSES OR
# LOSSES ARISING FROM SUCH USE, INCLUDING ANY DAMAGES FROM PRODUCTS BASED
# ON, OR RESULTING FROM, RECIPIENT'S USE OF THE SUBJECT SOFTWARE, RECIPIENT
# SHALL INDEMNIFY AND HOLD HARMLESS THE UNITED STATES GOVERNMENT, ITS
# CONTRACTORS AND SUBCONTRACTORS, AS WELL AS ANY PRIOR RECIPIENT, TO THE
# EXTENT PERMITTED BY LAW. RECIPIENT'S SOLE REMEDY FOR ANY SUCH MATTER
# SHALL BE THE IMMEDIATE, UNILATERAL TERMINATION OF THIS AGREEMENT.
# Description:
# This tool takes in Landsat 8 data.
# It removes land pixels, cloud pixels, pixels corresponding to a depth
# of 2 meters or shallower, and high sediment concentrations
# It then creates three final images: A true color image, a 543 color
# composition chlorophyll concentration image, and a normalized difference
# vegetation index (NDVI) chlorophyll concentration image.
# ---------------------------------------------------------------------------
# Import modules
import arcpy
import os
import glob
from arcpy import *
from arcpy.sa import *
# Check out any necessary licenses
arcpy.CheckOutExtension("spatial")
#set working directory
#The path should be set using no quotes and single forward slashes, e.g. C:/Users/DEVELOP4/Desktop/test
env.workspace = raw_input("What's the file location? ")
os.chdir(env.workspace)
n = float(raw_input("If you'd like to change the standard deviation for the sediment mask, type the multiplyer here. If you'd like to keep it default, type '1' --> "))#user input float raw_input
arcpy.env.overwriteOutput = True
if os.path.isfile("b1clb.tif") == False:
# Isolating landsat band files
B1 = glob.glob("*_band1*")
B2 = glob.glob("*_band2*")
B3 = glob.glob("*_band3*")
B4 = glob.glob("*_band4*")
B5 = glob.glob("*_band5*")
B6 = glob.glob("*_band6*")
B7 = glob.glob("*_band7*")
C = glob.glob("*_cfmask*")
bath = glob.glob("*bath*")
#defining landsat band files
b1 = B1[0]
b2 = B2[0]
b3 = B3[0]
b4 = B4[0]
b5 = B5[0]
b6 = B6[0]
b7 = B7[0]
cloud = C[0]
bathmask = bath[0]
print("Defined bands")
#Make floating point valued rasters
b1float = Raster(b1) * 0.0001
b2float = Raster(b2) * 0.0001
b3float = Raster(b3) * 0.0001
b4float = Raster(b4) * 0.0001
b5float = Raster(b5) * 0.0001
b6float = Raster(b6) * 0.0001
b7float = Raster(b7) * 0.0001
print("Created floating point rasters")
#Calculate the NDVI from b4 and b5
ndvi = (b5float-b4float)/(b5float+b4float)
# Create the watermask
watermask = Con(ndvi,1,"","VALUE < 0")
# Creating water-only band rasters
b1water = ExtractByMask(b1float,watermask)
b2water = ExtractByMask(b2float,watermask)
b3water = ExtractByMask(b3float,watermask)
b4water = ExtractByMask(b4float,watermask)
b5water = ExtractByMask(b5float,watermask)
b6water = ExtractByMask(b6float,watermask)
b7water = ExtractByMask(b7float,watermask)
print("Removed land pixels")
# Reclassify the cloud mask values
cloudmask = Reclassify(cloud,"Value",RemapRange([[0,1,1],[2,4,"NODATA"]]))
# Removing clouds from the bands
b1wc = ExtractByMask(b1water,cloudmask)
b2wc = ExtractByMask(b2water,cloudmask)
b3wc = ExtractByMask(b3water,cloudmask)
b4wc = ExtractByMask(b4water,cloudmask)
b5wc = ExtractByMask(b5water,cloudmask)
b6wc = ExtractByMask(b6water,cloudmask)
b7wc = ExtractByMask(b7water,cloudmask)
print("Removed cloud pixels")
#remove shallow pixels
b1clb = ExtractByMask(b1wc,bathmask)
b2clb = ExtractByMask(b2wc,bathmask)
b3clb = ExtractByMask(b3wc,bathmask)
b4clb = ExtractByMask(b4wc,bathmask)
b5clb = ExtractByMask(b5wc,bathmask)
b6clb = ExtractByMask(b6wc,bathmask)
b7clb = ExtractByMask(b7wc,bathmask)
b1clb.save("b1clb.tif")
b2clb.save("b2clb.tif")
b3clb.save("b3clb.tif")
b4clb.save("b4clb.tif")
b5clb.save("b5clb.tif")
b6clb.save("b6clb.tif")
b7clb.save("b7clb.tif")
print("Removed shallow pixels")
#Creating the NDTI mask
ndti = (b4clb-b3clb)/(b4clb+b3clb)
ndti.save("ndti.tif")
arcpy.CalculateStatistics_management("ndti.tif")
ustd = arcpy.GetRasterProperties_management("ndti.tif","STD")
ostd = ustd.getOutput(0)
std = n*float(ostd)
umean = arcpy.GetRasterProperties_management("ndti.tif","MEAN")
omean = umean.getOutput(0)
mean = float(omean)
mod = mean + std
h = str(mod)
value = "VALUE < " + h
ndtimask = Con(ndti,1,"",value)
newmask = "ndvimask_sd_" + str(n) + ".tif"
ndtimask.save(newmask)
print("NDTI mask created. Now creating True Color Image, NDVI Chlorophyll image, and 543 Composite Chlorophyll image")
#making a true color image
truecolorlist = [b4,b3,b2]
CompositeBands_management(truecolorlist,"truecolor.tif")
print("Created True Color Image")
#creating the NDVI for chlorophyll concentration
chl_ndvi = (b5clb-b4clb)/(b5clb+b4clb)
chlndvi = ExtractByMask(chl_ndvi,ndtimask)
chlndviname = "ndvi_chl_sd_" + str(n) + ".tif"
chlndvi.save(chlndviname) #this is the final NDVI chlorophyll concentration image to be stretched
print("Created NDVI chlorophyll concentration image")
#making a 543 band composite
chlbandlist = [b5clb,b4clb,b3clb]
band543 = CompositeBands_management(chlbandlist,"comp_band_543.tif")
band543_sed = ExtractByMask("comp_band_543.tif",ndtimask)
b543name = "comp_band_543_chl_sd_" + str(n) + ".tif"
band543_sed.save(b543name) #this is the final 543 image to be stretched
print("Created 543 color composite for chlorophyll concentration")
else:
#retrieving previously saved land, cloud, and shallow pixel removed images
B1 = glob.glob("b1clb.tif")
B2 = glob.glob("b2clb.tif")
B3 = glob.glob("b3clb.tif")
B4 = glob.glob("b4clb.tif")
B5 = glob.glob("b5clb.tif")
B6 = glob.glob("b6clb.tif")
B7 = glob.glob("b7clb.tif")
print("Defined Bands")
#defining each cloud, land, and shallow image
b1clb = Raster(B1[0])
b2clb = Raster(B2[0])
b3clb = Raster(B3[0])
b4clb = Raster(B4[0])
b5clb = Raster(B5[0])
b6clb = Raster(B6[0])
b7clb = Raster(B7[0])
arcpy.overwriteoutput = True
#Creating the NDTI mask
ndti = (b4clb-b3clb)/(b4clb+b3clb)
ndti.save("ndti.tif")
arcpy.CalculateStatistics_management("ndti.tif")
ustd = arcpy.GetRasterProperties_management("ndti.tif","STD")
ostd = ustd.getOutput(0)
std = n*float(ostd)
umean = arcpy.GetRasterProperties_management("ndti.tif","MEAN")
omean = umean.getOutput(0)
mean = float(omean)
mod = mean + std
h = str(mod)
value = "VALUE < " + h
ndtimask = Con(ndti,1,"",value)
newmask = "ndvimask_sd_" + str(n) + ".tif"
ndtimask.save(newmask)
print("NDTI mask created. Now creating NDVI Chlorophyll image and 543 Composite Chlorophyll image")
#creating the NDVI for chlorophyll concentration
chl_ndvi = (b5clb-b4clb)/(b5clb+b4clb)
chlndvi = ExtractByMask(chl_ndvi,ndtimask)
chlndviname = "ndvi_chl_sd_" + str(n) + ".tif"
chlndvi.save(chlndviname) #this is the final NDVI chlorophyll concentration image to be stretched
print("Created NDVI chlorophyll concentration image")
#making a 543 band composite
chlbandlist = [b5clb,b4clb,b3clb]
band543 = CompositeBands_management(chlbandlist,"comp_band_543.tif")
band543_sed = ExtractByMask("comp_band_543.tif",ndtimask)
b543name = "comp_band_543_chl_sd_" + str(n) + ".tif"
band543_sed.save(b543name) #this is the final 543 image to be stretched
print("Created 543 color composite for chlorophyll concentration")
|
from django.db import models
from django.db.models.signals import post_save
from authentication.models import User
# 父类是 django.db.models.base.Model 类
class Profile(models.Model):
"""用户个人简介映射类
"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
url = models.CharField(max_length=50, null=True, blank=True)
location = models.CharField(max_length=50, null=True, blank=True)
job = models.CharField(max_length=50, null=True, blank=True)
avatar = models.ImageField(upload_to='pic_folder', default='img/user.png')
class Meta:
db_table = 'user_profile'
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
# 使用信号,在创建 user 时同时创建 profile
# Django 自带一套信号系统帮助我们在框架中传递信息
# 当某种条件出现时,信号发送者利用信号系统将信号发送给信号接收者
# 信号系统中有很多对象用于实现信息传递功能
# 这些对象都是 db.models.signals 模块下 ModelSignal 类的实例
# 其中包括 post_save 对象,当某映射类的实例执行 save 方法保存数据后
# 该映射类会自动发送信号给信号接收者
# ModelSignal 类有一个 connect 方法,第一个位置参数就是接收信号的对象
# 第二个参数 sender 就是发送信号的那个映射类
# 如下所示差不多是固定写法
# 当 User 类的实例被存储到数据库,自动执行第一个参数那个函数
# 此处 sender 参数的值可以是引入的映射类,也可以用字符串表示
post_save.connect(create_user_profile, sender=User)
|
"""Implements the :py:class:`RolesClient` class."""
from datetime import datetime, timezone
from typing import Callable, Optional, List, Dict, Any
from flask import current_app, has_app_context
import pymongo
from .exceptions import InvalidEntity
__all__ = ['RolesClient']
class RolesClient:
"""This class implements the Shrunk roles system."""
def __init__(self, *, db: pymongo.database.Database):
self.db = db
self.qualified_for: Dict[str, Callable[[str], bool]] = {}
self.process_entity: Dict[str, Callable[[str], str]] = {}
self.valid_entity_for: Dict[str, Callable[[str], bool]] = {}
self.oncreate_for: Dict[str, Callable[[str], None]] = {}
self.onrevoke_for: Dict[str, Callable[[str], None]] = {}
self.form_text: Dict[str, Any] = {}
@staticmethod
def _default_text(role: str) -> Any:
"""Get the default text that apears in a role menu.
:param role: Role name
"""
return {
'title': role,
'invalid': f'invalid entity for role {role}',
'grant_title': f'Grant {role}',
'grantee_text': 'Grantee',
'grant_button': 'GRANT',
'revoke_title': f'Revoke {role}',
'revoke_button': 'REVOKE',
'empty': f'there is currently nothing with the role {role}',
'granted_by': 'granted by',
'allow_comment': False,
'comment_prompt': 'Comment',
}
def create(self,
role: str,
qualifier_func: Callable[[str], bool],
validator_func: Callable[[str], bool] = lambda e: e != '',
custom_text: Any = None,
oncreate: Callable[[str], None] = lambda _: None,
onrevoke: Callable[[str], None] = lambda _: None,
process_entity: Callable[[str], str] = lambda e: e) -> None:
"""
:param role: Role name
:param role: Role name
:param qualifier_func:
takes in a netid and returns whether or not a user is qualified to add to a specific role.
:param validator_func:
takes in an entity (like netid or link) and returns whether it's valid for a role. for
example, it could take a link like ``htp://fuz1`` and say it's not a valid link
:param custom_text: custom text to show on the form. see :py:func:`_default_text` source for options
:param oncreate: callback for extra logic when granting a role, e.g. remove a user's links on ban
:param onrevoke:
callback for extra logic when revoking a role, e.g. reenabling a user's link when they are unbanned
:param process_entity:
callback to transform entity before it's inserted into the db
"""
custom_text = custom_text or {}
text = self._default_text(role)
text.update(custom_text)
self.form_text[role] = text
self.qualified_for[role] = qualifier_func
self.process_entity[role] = process_entity
self.valid_entity_for[role] = validator_func
self.oncreate_for[role] = oncreate
self.onrevoke_for[role] = onrevoke
def exists(self, role: str) -> bool:
"""Check whether a role exists.
:param role: Role name
"""
return role in self.oncreate_for
def grant(self, role: str, grantor: str, grantee: str, comment: Optional[str] = None) -> None:
"""
Gives a role to grantee and remembers who did it
:param role: Role to grant
:param grantor: Identifier of entity granting role
:param grantee: Entity to which role should be granted
:param comment: Comment, if required
:raises InvalidEntity: If the entity fails validation
"""
if self.exists(role) and self.is_valid_entity_for(role, grantee):
if role in self.process_entity:
grantee = self.process_entity[role](grantee)
# guard against double insertions
if not self.has(role, grantee):
self.db.grants.insert_one({
'role': role,
'entity': grantee,
'granted_by': grantor,
'comment': comment if comment is not None else '',
'time_granted': datetime.now(timezone.utc),
})
if role in self.oncreate_for:
self.oncreate_for[role](grantee)
else:
raise InvalidEntity
def revoke(self, role: str, entity: str) -> None:
"""Revoke a role from an entity
:param role: Role name
:param entity: The entity
"""
if has_app_context():
current_app.logger.info(f'revoking role {role} for {entity}')
if role in self.onrevoke_for:
self.onrevoke_for[role](entity)
self.db.grants.delete_one({'role': role, 'entity': entity})
def has(self, role: str, entity: str) -> bool:
"""Check whether an entity has a role
:param role: Role name
:param entity: The entity
"""
return self.db.grants.find_one({'role': role, 'entity': entity}) is not None
def has_some(self, roles: List[str], entity: str) -> bool:
"""Check whether an entity has at least one of the roles in the list
:param roles: The roles to check
:param entity: The entity
"""
return any(self.has(role, entity) for role in roles)
def get_role_names(self) -> List[Any]:
"""Get name and display name of all roles
:returns: A list of objects of the form
.. code-block:: json
{ "name": "string", "display_name": "string" }
"""
return [{'name': name, 'display_name': info['title']}
for (name, info) in self.form_text.items()]
def get_role_entities(self, role: str) -> List[Any]:
"""Get all entities having the given role
:param role: Role name
"""
return list(self.db.grants.find({'role': role}))
def get_role_text(self, role: str) -> Any:
"""Get the form text for a given role
:param role: Role name
"""
return self.form_text[role]
def is_valid_entity_for(self, role: str, entity: str) -> bool:
"""Check whether an entity is valid for a role
:param role: Role name
:param entity: The entity
"""
if role in self.valid_entity_for:
return self.valid_entity_for[role](entity)
return True
|
#!/usr/bin/env python3
"""
lastcheckpoint is a simple kludge to identify and print out the name
of the last checkpoint TimeSeries produced by a previous run of
ksfdsolver2.py. Typical usage is
export LASTCHECK=`python lastcheckpoint.py checks/checks115/options115e`
The argument (checks/checks115/options115e in the example) should be
the value of the --check options to ksfdsolver.
lastcheckpoint looks up all files whose names match the regular
expression <prefix>_[0-9]*_s[0-9]r[0-9].h5. It chooses
the maximum checkpoint number (the checkpoint number is the first
[0-9]* in the above regular expression). Calling this <mcn>, the
corresponding TimeSeries prefix is <prefix>_<mcn>_. This is written to
stdout. Alternatively, the -g/--gather options tells last checkpoint
to write <preix>_<mcn>_s<rank>@, where <rank> is the number that
follows s in the regular expression. If rank has multiple values in
the last checkpoint filenames, the largest numbr is used.
"""
import sys, glob, os, re
from argparse import Namespace
from KSFD import Parser
import numpy as np
def parse_commandline(args=None):
# clargs = Namespace()
parser = Parser(description='Find last KSFD solution checkpoint')
parser.add_argument('-g', '--gather', action='store_true',
help='produce gather-type name')
parser.add_argument('-v', '--verbose', action='count')
parser.add_argument('prefix', nargs=1, help='checkpoint prefix')
clargs = parser.parse_args(args=args)
return clargs
def main():
clargs = parse_commandline()
prefix = clargs.prefix[0]
fnames = glob.glob(prefix + '_*_s*r*.h5')
plen = len(prefix)
fends = [
fname[plen:] for fname in fnames if fname.startswith(prefix)
]
cpre = re.compile(r'_([0-9]*)_s([0-9]*)r([0-9]*)\.h5')
csr = np.array([
list(map(int, cpre.fullmatch(fend).groups()))
for fend in fends if cpre.fullmatch(fend)
], dtype=int)
if len(csr) == 0:
raise FileNotFoundError
mcn = np.max(csr[:,0])
checkpoint = prefix + '_' + str(mcn) + '_'
mcn_fends = csr[csr[:,0] == mcn]
size = np.max(mcn_fends[:,1])
if clargs.gather:
print(checkpoint + 's' + str(size) + '@')
else:
print(checkpoint)
if __name__ == '__main__':
main()
|
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
if -0<n<=2:
return True
s = 1
while s < n:
s = s*2
if s ==n:
return True
return False
|
import math
def pages_numbering_with_ink(current, num_of_digits):
while num_of_digits >= 0:
num_of_digits -= int(math.ceil(math.log(current+1, 10)))
current += 1
return current - 2
if __name__ == '__main__':
current = 1
num_of_digits = 5
print(pages_numbering_with_ink(current, num_of_digits))
|
n = int(input())
d = {}
for i in range(0, n * 2, 2):
name = input()
party = input()
d[name] = [party, 0]
m = int(input())
for i in range(m):
b = input()
for key, val in d.items():
if b == key:
val[1] += 1
lst = sorted([val[1] for key, val in d.items()], reverse=True)
p = []
for key, val in d.items():
if val[1] == max(lst):
p.append(val[0])
if len(p) > 1:
print('tie')
else:
print(*p)
|
from proteus.default_p import *
from proteus.mprans import RANS2P
import numpy as np
from math import cos
from proteus import Context
ct = Context.get()
domain = ct.domain
nd = domain.nd
mesh = domain.MeshOptions
LevelModelType = RANS2P.LevelModel
if ct.useOnlyVF:
LS_model = None
else:
LS_model = 2
if ct.useRANS >= 1:
Closure_0_model = 5
Closure_1_model = 6
if ct.useOnlyVF:
Closure_0_model=2
Closure_1_model = 3
else:
Closure_0_model = None
Closure_1_model = None
coefficients = RANS2P.Coefficients(epsFact=ct.epsFact_viscosity,
sigma=0.0,
rho_0=ct.rho_0,
nu_0=ct.nu_0,
rho_1=ct.rho_1,
nu_1=ct.nu_1,
g=ct.g,
nd=nd,
ME_model=int(ct.movingDomain)+0,
VF_model=int(ct.movingDomain)+1,
LS_model=int(ct.movingDomain)+LS_model,
Closure_0_model=Closure_0_model,
Closure_1_model=Closure_1_model,
epsFact_density=ct.epsFact_density,
stokes=False,
useVF=ct.useVF,
useRBLES=ct.useRBLES,
useMetrics=ct.useMetrics,
eb_adjoint_sigma=1.0,
eb_penalty_constant=ct.weak_bc_penalty_constant,
forceStrongDirichlet=ct.ns_forceStrongDirichlet,
turbulenceClosureModel=ct.ns_closure,
movingDomain=ct.movingDomain)
dirichletConditions = {0: lambda x, flag: domain.bc[flag].p_dirichlet.init_cython(),
1: lambda x, flag: domain.bc[flag].u_dirichlet.init_cython(),
2: lambda x, flag: domain.bc[flag].v_dirichlet.init_cython()}
advectiveFluxBoundaryConditions = {0: lambda x, flag: domain.bc[flag].p_advective.init_cython(),
1: lambda x, flag: domain.bc[flag].u_advective.init_cython(),
2: lambda x, flag: domain.bc[flag].v_advective.init_cython()}
diffusiveFluxBoundaryConditions = {0: {},
1: {1: lambda x, flag: domain.bc[flag].u_diffusive.init_cython()},
2: {2: lambda x, flag: domain.bc[flag].v_diffusive.init_cython()}}
class PerturbedSurface_p:
def __init__(self,waterdepth,amplitude):
self.waterdepth=waterdepth
self.amplitude=amplitude
def uOfXT(self,x,t):
d = ct.signedDistance(x, 0.)
if d <= 0:
return ct.pressure(x[0], x[1]-self.waterdepth, t, ct.h, ct.eps, ct.rho_0, ct.g, ct.k, 0.)+(ct.tank_dim[1]-(self.waterdepth+ct.eta(x[2], 0.)))*ct.rho_1*(-ct.g[1])
# return (ct.tank_dim[1]-(self.waterdepth+ct.eta(x)))*ct.rho_1*(-ct.g[1])+((self.waterdepth+ct.eta(x))-x[1])*ct.rho_0*(-ct.g[1])
else:
return (ct.tank_dim[1] - x[1])*ct.rho_1*(-ct.g[1])
class AtRest:
def __init__(self):
pass
def uOfXT(self,x,t):
return 0.0
initialConditions = {0:PerturbedSurface_p(ct.water_depth,
ct.water_amplitude), #[temp] from main we need depth and amplitude
1:AtRest(),
2:AtRest()}
|
#!/usr/bin/env python
import rospy
from tug_python_utils import YamlHelper as Config
class NominalValue():
"""
Base class for nominal value.
"""
def __init__(self):
pass
def check_hypothesis(self, value):
"""
Should contain the verification of the value based on the defined limits.
:param value: Input that should be checked
:return: True if value correspond to limits, otherwise False
"""
return False
pass
class NominalValueFactory():
"""
Factory for getting the right verification instance.
"""
@staticmethod
def create_nominal_value(config):
"""
Decode verification type and return new corresponding object.
:param config: Configuration from yaml file
:return: New instance of a corresponding verification object
"""
type = Config.get_param(config, 'type')
if type == "gauss":
return GaussNominalValue(config)
elif type == "exact":
return ExactValue(config)
elif type == "not":
return NotValue(config)
elif type == "greather_than":
return GreaterThanValue(config)
elif type == "less_than":
return LessThanValue(config)
elif type == "in_between":
return InBetweenValue(config)
elif type == "not_in_between":
return NotInBetweenValue(config)
else:
rospy.logwarn("nominal value type '" + str(type) + "' not found")
# return None
return NominalValue()
class GaussNominalValue(NominalValue):
"""
Gaussian verification of value.
"""
def __init__(self, config):
"""
Constructor for gaussian verification. Uses mean and standard deviation.
:param config: Configuration from yaml file
"""
NominalValue.__init__(self)
self._mean = Config.get_param(config, 'mean')
self._std_deviation = Config.get_param(config, 'std_deviation')
def _distance_to_mean(self, value):
"""
Calculates the difference of the given value and the mean of the Gauss.
:param value: Value, from which the difference to mean should be calculated
:return: difference of the given value and the mean of the Gauss.
"""
if value < self._mean:
return abs(self._mean - value)
return abs(value - self._mean)
def check_hypothesis(self, value):
"""
Check if the given value confirms with the defined Gauss.
:param value: Value that should be checked
:return: True if value confirms with the defined Gauss, otherwise False
"""
distance = self._distance_to_mean(value)
return True if distance < self._std_deviation else False
class ExactValue(NominalValue):
"""
Check if value is the same as the defined one.
"""
def __init__(self, config):
"""
Constructor for exact verification.
:param config: Configuration from yaml file
"""
NominalValue.__init__(self)
self._exact = Config.get_param(config, 'exact')
def check_hypothesis(self, value):
"""
Check if given value is exactly the same as defined.
:param value: Value that should be checked
:return: True if value confirms with the given requirements, otherwise False
"""
return True if value is self._exact else False
class NotValue(NominalValue):
"""
Check if value is not the same as the defined one.
"""
def __init__(self, config):
"""
Constructor for exactly not verification.
:param config: Configuration from yaml file
"""
NominalValue.__init__(self)
self._exact_not = Config.get_param(config, 'exact_not')
def check_hypothesis(self, value):
"""
Check if given value is exactly not the same as defined.
:param value: Value that should be checked
:return: True if value confirms not with the given requirements, otherwise False
"""
return True if value is not self._exact_not else False
class GreaterThanValue(NominalValue):
"""
Check if value is greater than the defined one.
"""
def __init__(self, config):
"""
Constructor for greater than verification.
:param config: Configuration from yaml file
"""
NominalValue.__init__(self)
self._greater_than = Config.get_param(config, 'greater_than')
def check_hypothesis(self, value):
"""
Check if given value is greater than defined.
:param value: Value that should be checked
:return: True if value confirms with the given requirements, otherwise False
"""
return True if value > self._greater_than else False
class LessThanValue(NominalValue):
"""
Check if value is less than the defined one.
"""
def __init__(self, config):
"""
Constructor for less than verification.
:param config: Configuration from yaml file
"""
NominalValue.__init__(self)
self._less_than = Config.get_param(config, 'less_than')
def check_hypothesis(self, value):
"""
Check if given value is less than defined.
:param value: Value that should be checked
:return: True if value confirms with the given requirements, otherwise False
"""
return True if value < self._less_than else False
class InBetweenValue(NominalValue):
"""
Check if value is greater than a defined lower bound and smaller than a upper bound.
"""
def __init__(self, config):
"""
Constructor for in between verification.
:param config: Configuration from yaml file
"""
NominalValue.__init__(self)
self._lower_bound = Config.get_param(config, 'lower_bound')
self._upper_bound = Config.get_param(config, 'upper_bound')
if self._lower_bound > self._lower_bound:
rospy.logwarn("lower bound is bigger than upper bound. 'InBetweenValue' will not work correctly!")
def check_hypothesis(self, value):
"""
Check if given value is greater than a lower bound and smaller than a upper bound.
:param value: Value that should be checked
:return: True if value confirms with the given requirements, otherwise False
"""
return True if self._lower_bound < value < self._upper_bound else False
class NotInBetweenValue(NominalValue):
"""
Check if value is smaller than a defined lower bound and greater than a upper bound.
"""
def __init__(self, config):
"""
Constructor for not in between verification.
:param config: Configuration from yaml file
"""
NominalValue.__init__(self)
self._lower_bound = Config.get_param(config, 'lower_bound')
self._upper_bound = Config.get_param(config, 'upper_bound')
if self._lower_bound > self._lower_bound:
rospy.logwarn("lower bound is bigger than upper bound. 'NotInBetweenValue' will not work correctly!")
def check_hypothesis(self, value):
"""
Check if given value is smaller than a lower bound and greater than a upper bound.
:param value: Value that should be checked
:return: True if value confirms with the given requirements, otherwise False
"""
return True if value < self._lower_bound or self._upper_bound < value else False
|
from app import db
from app.models import Rank
SQL_CMD = 'CREATE DATABASE kifutalk CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci'
# initialize database
db.create_all()
# populate ranks table
for i in range(18):
r = Rank(rank_en='%dk'%(18-i), rank_cn='%d级'%(18-i))
db.session.add(r)
for i in range(9):
r = Rank(rank_en='%dd'%(i+1), rank_cn='%d段'%(i+1))
db.session.add(r)
for i in range(9):
r = Rank(rank_en='%dp'%(i+1), rank_cn='职业%d段'%(i+1))
db.session.add(r)
db.session.commit()
|
#!/usr/bin/env python
from itertools import groupby
import math
import nltk
import sys
def do_check(filename):
body_grammar = nltk.data.load("file:%s" % filename, 'cfg')
uses = {}
print "Nonterminals with no productions:"
for label,prods in groupby(body_grammar.productions(),
lambda p: p.lhs().symbol()):
l = label
if l not in uses:
uses[l] = 0
np = 0
for p in prods:
np += 1
for term in p.rhs():
s = repr(term)
if s not in uses:
uses[s] = 0
uses[s] += 1
if (not isinstance(term, basestring) and
len(body_grammar.productions(term)) == 0):
print "* %s (label %s)" % (term, label)
# check # of productions
#if np >= 3:
# bits = math.log(np-1, 2)
# if int(bits) != bits:
# print "*** label %s has %s productions" % (label, np)
print "Nonterminals with duplicate productions:"
for label,prods in groupby(body_grammar.productions(),
lambda p: p.lhs().symbol()):
l = label
pset = set()
#done = set()
for p in prods:
if p in pset:# and p not in done:
print "* term %s: %s" % (label, p)
#done.add(p)
pset.add(p)
for term in p.rhs():
s = repr(term)
if s not in uses:
uses[s] = 0
uses[s] += 1
if (not isinstance(term, basestring) and
len(body_grammar.productions(term)) == 0):
print "* %s (label %s)" % (term, label)
print "\nNonterminals with no uses:"
print "grep -v ",
for t,u in uses.iteritems():
if u == 0 and t != "START":
print "-e \"^%s -\" " % t,
print filename
do_check(sys.argv[1])
|
#!/usr/bin/env python
#
# setup.py
#
"""Aberdeen Setup Script"""
from setuptools import setup, find_packages
from importlib.machinery import SourceFileLoader
desc = "Conversion from markdown files to database entries to use as the backend of a blog"
NAME = "aberdeen"
CONSOLE_SCRIPTS = [
'aberdeen-init = aberdeen.cli.init:main',
'aberdeen-update-hook = aberdeen.cli.update_hook:main'
]
REQUIRES = [
'emoji',
'termcolor2',
'python-dateutil',
]
OPTIONAL_REQUIRES = {
'markdown': ['Markdown'],
'mongodb': ['pymongo','asyncio_mongo']
}
KEYWORDS = [
'markdown',
'blog',
'publishing',
'nosql',
'mongodb',
]
CLASSIFIERS = [
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Topic :: Utilities",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"License :: OSI Approved :: Apache Software License"
]
TESTS_REQUIRE = [
'pytest',
'pytest-asyncio',
]
SETUP_REQUIRES = [
'pytest-runner',
]
meta = SourceFileLoader("meta", "aberdeen/__meta__.py").load_module()
tar_url = 'https://github.com/akubera/aberdeen/archive/v%s.tar.gz' % (meta.version)
setup(
name=NAME,
packages=find_packages(exclude=['test']),
version=meta.version,
description=desc,
url=meta.url,
download_url=tar_url,
author=meta.author,
author_email=meta.author_email,
keywords=KEYWORDS,
license=meta.license,
classifiers=CLASSIFIERS,
platforms='any',
install_requires=REQUIRES,
extras_require=OPTIONAL_REQUIRES,
tests_require=TESTS_REQUIRE,
setup_requires=SETUP_REQUIRES,
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
package_data={'aberdeen': ['git_hooks/*']}
)
|
from itertools import groupby
S = input()
for key, gr in groupby(S):
print(tuple([len(list(gr)), int(key)]), end=' ')
|
from simple_hilbert import *
from advection_block_analytical import *
import space_filling_decomp_new as sfc
import numpy as np # Numpy
import scipy.sparse.linalg as spl
import scipy.linalg as sl
import scipy.sparse as sp
from util import *
def loadsimulation(data_dir, simulaion_steps, simulaion_num, reshape = False):
for i in range(simulaion_steps + 1):
iter_data = np.loadtxt(F'{data_dir}_%d/step_%d.txt'% (simulaion_num, i))
if reshape:
size = np.sqrt(iter_data.shape[0]).astype('int')
iter_data = iter_data.reshape((size, size))
if i != 0: tensor = torch.cat((tensor, torch.unsqueeze(torch.from_numpy(iter_data), 0)), 0)
else:
tensor = torch.unsqueeze(torch.from_numpy(iter_data), 0)
return tensor
def load_tensor(data_dir, simulation_indexes):
total = len(simulation_indexes)
cnt_progress = 0
bar=progressbar.ProgressBar(maxval=total)
tensor = loadsimulation(simulaion_steps, simulation_indexes[0])
cnt_progress+=1
bar.update(cnt_progress)
for i in range(1, total):
tensor = torch.cat((tensor, loadsimulation(data_dir, simulaion_steps, simulation_indexes[i])))
cnt_progress+=1
bar.update(cnt_progress)
bar.finish()
return tensor
def index_split(train_ratio, valid_ratio, test_ratio, total_num = 500):
if train_ratio + valid_ratio + test_ratio != 1:
raise ValueError("The sum of three input ratios should be 1!")
total_index = np.arange(1, total_num + 1)
rng = np.random.default_rng()
total_index = rng.permutation(total_index)
knot_1 = int(total_num * train_ratio)
knot_2 = int(total_num * valid_ratio) + knot_1
train_index, valid_index, test_index = np.split(total_index, [knot_1, knot_2])
return train_index, valid_index, test_index
def sparse_square_grid(N):
n = N ** 2
offsets = [-N, -1, 0, 1, N]
diags = []
# coefficient in front of u_{i-N}:
diags.append(np.ones(n-N))
# coefficient in front of u_{i-1}:
diags.append(np.ones(n-1))
# main diagonal, zero for centre difference in space
diags.append(np.ones(n))
# coefficient in front of u_{i+1}:
diags.append(diags[1])
# coefficient in front of u_{i+N}:
diags.append(diags[0])
K = sp.diags(diags, offsets, format='csr')
# loop over left-most column in grid (except first row)
for i in range(N, n, N):
K[i, i-1] = 0
K[i-1, i] = 0
K.eliminate_zeros()
return K.indptr + 1, K.indices + 1, K.getnnz()
def get_hilbert_curves(size, num):
Hilbert_index = hilbert_space_filling_curve(size)
invert_Hilbert_index = np.argsort(Hilbert_index)
if num == 1: return [Hilbert_index], [invert_Hilbert_index]
elif num == 2:
Hilbert_index_2 = Hilbert_index.reshape(size, size).T.flatten()
invert_Hilbert_index_2 = np.argsort(Hilbert_index_2)
return [Hilbert_index, Hilbert_index_2], [invert_Hilbert_index, invert_Hilbert_index_2]
def get_MFT_RNN_curves_structured(size, num):
findm, colm, ncolm = sparse_square_grid(size)
curve_lists = []
inv_lists = []
ncurve = num
graph_trim = -10 # has always been set at -10
starting_node = 0 # =0 do not specifiy a starting node, otherwise, specify the starting node
whichd, space_filling_curve_numbering = sfc.ncurve_python_subdomain_space_filling_curve(colm, findm, starting_node, graph_trim, ncurve, size**2, ncolm)
for i in range(space_filling_curve_numbering.shape[-1]):
curve_lists.append(np.argsort(space_filling_curve_numbering[:,i]))
inv_lists.append(np.argsort(np.argsort(space_filling_curve_numbering[:,i])))
return curve_lists, inv_lists
|
#code of the program
#fibonacci series
n=int(input("No. of terms :-" ))
#1st two terms are predefined
a1=0
a2=1
i=0
#applying conditions for correct results
if n<=0:
print("Enter the terms > 0 ")
elif n==1:
print(a1)
else:
#using loops
print("FIBONACCI SERIES :-") #generating the series
while i<=n:
print(a1)
a=a1+a2
a1=a2
a2=a
i+=1
|
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2019 all rights reserved
#
class Value:
"""
Mix-in class to encapsulate nodes that can hold a value.
"""
# value management
def getValue(self, **kwds):
"""
Return my value
"""
# easy enough
return self._value
def setValue(self, value, **kwds):
"""
Set my value
"""
# store the value
self._value = value
# all done
return self
# meta methods
def __init__(self, value=None, **kwds):
# chain up
super().__init__(**kwds)
# save the value
self._value = value
# all done
return
# private data
_value = None
# end of file
|
import pygame, math, time
from Pendulum import Pendulum, dist, onPath, collide
if __name__ == "__main__":
# Define pygame variables
(width, height) = (640, 480)
background_colour = (255,255,255)
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption("Pendulum")
running = True
selected = None
# Define the stack of pendulums
pendulum_stack = [Pendulum(0, 250, 40),
Pendulum(0, 300, 40),
Pendulum(0, 350, 40),
Pendulum(0, 400, 40)]
# Assign an id to each pendulum
for i, p in enumerate(pendulum_stack):
p.ID = i
# Number of pendulums for reference within the game loop
numPen = len(pendulum_stack)
while running:
# draw background and line pendulums hang from
screen.fill(background_colour)
pygame.draw.line(screen, (0,0,255), (120, 40),
(520, 40), 3)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
for p in pendulum_stack:
# Check for mouse left click
if pygame.mouse.get_pressed() == (1,0,0):
(mouseX, mouseY) = pygame.mouse.get_pos()
a = mouseX - p.pivot_x
b = mouseY - p.pivot_y
# If mouse position is within bounds of a pendulum
# then select that pendulum
if dist(a, b, p.x, p.y) < 40:
if selected == None:
selected = p.ID
(p.x, p.y) = onPath(a, b, p.length, p.pivot_y)
p.v_x = 0
elif selected == p.ID:
(p.x, p.y) = onPath(a, b, p.length, p.pivot_y)
p.v_x = 0
if p.ID < numPen-1:
for i in range(p.ID, numPen-1):
collide(pendulum_stack[p.ID], pendulum_stack[i+1])
if event.type == pygame.MOUSEBUTTONUP:
# Deselect pendulum upon mouse release
if selected == p.ID:
p.v_x = 0
selected = None
# Check for collisions with adjacent pendulum
if p.ID < numPen-1:
for i in range(p.ID, numPen-1):
collide(pendulum_stack[p.ID], pendulum_stack[i+1])
# Pendulum should swing unless it's selected by the user
if selected != p.ID:
p.swing()
p.draw(screen)
time.sleep(0.4*0.1)
pygame.display.flip()
pygame.quit()
|
from abc import ABCMeta, abstractmethod
from six import with_metaclass, integer_types
from functools import partial
from PySide2 import QtWidgets
from . import afnbase
from ..xml import xmlutils
from ..userinterface import qmainmenu, qloggingmenu
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
class AFnQt(with_metaclass(ABCMeta, afnbase.AFnBase)):
"""
Overload of AFnBase that outlines function set behaviour for DCC qt objects.
"""
__slots__ = ()
@abstractmethod
def getMainWindow(self):
"""
Returns the main window.
:rtype: PySide2.QtWidgets.QMainWindow
"""
pass
def getMainMenuBar(self):
"""
Returns the menu bar from the main window.
This is the safest approach to retrieve the current menu bar.
:rtype: PySide2.QtWidgets.QMenuBar
"""
# Iterate through children
#
for child in self.getMainWindow().children():
# Check if this is a menu bar
#
if isinstance(child, QtWidgets.QMenuBar) and child.isVisible():
return child
else:
continue
@staticmethod
def objectSafeName(string):
"""
Returns a string that safe to assign as an object name.
This string will be compliant with Maya's pathing syntax.
:rtype: str
"""
return string.replace(' ', '_')
@abstractmethod
def partial(self, command):
"""
Returns a partial object for executing commands in a DCC embedded language.
:type command: str
:rtype: partial
"""
pass
@staticmethod
def execute(string):
"""
Executes the supplied string.
:type string: str
:rtype: None
"""
try:
exec string
except Exception as exception:
log.error(exception)
def iterMainMenus(self):
"""
Returns a generator that yields all top-level main menu objects.
:rtype: iter
"""
# Iterate through actions
#
menuBar = self.getMainMenuBar()
for child in menuBar.children():
# Check if menu is visible
#
if isinstance(child, QtWidgets.QMenu):
yield child
else:
continue
def findMainMenuByTitle(self, title):
"""
Returns the top level menu associated with the given title.
:type title: str
:rtype: QtWidgets.QMenu
"""
menus = [x for x in self.iterMainMenus() if x.title() == title]
numMenus = len(menus)
if numMenus == 0:
return
elif numMenus == 1:
return menus[0]
else:
raise TypeError('findTopLevelMenuByTitle() expects a unique title!')
def findMainMenuByName(self, name):
"""
Returns the top level menu associated with the given object name.
:type name: str
:rtype: QtWidgets.QMenu
"""
menus = [x for x in self.iterMainMenus() if x.objectName() == name]
numMenus = len(menus)
if numMenus == 0:
return
elif numMenus == 1:
return menus[0]
else:
raise TypeError('findTopLevelMenuByName() expects a unique name!')
def createMenuFromXmlElement(self, xmlElement, parent=None):
"""
Returns a menu item using the supplied xml element.
:type xmlElement: xml.etree.ElementTree.Element
:type parent: Union[QtWidgets.QMenu, QtWidgets.QMenuBar]
:rtype: Union[QtWidgets.QMenu, QtWidgets.QAction]
"""
# Query menu tag to determine menu type
#
if xmlElement.tag == 'Menu':
# Create new menu
#
title = xmlElement.get('title', default='')
log.info('Creating menu: %s' % title)
menu = QtWidgets.QMenu(title, parent)
menu.setObjectName(self.objectSafeName(title))
menu.setSeparatorsCollapsible(False)
menu.setTearOffEnabled(bool(xmlElement.get('tearOff', default=False)))
menu.setWindowTitle(title)
# Create child menus
#
for child in iter(xmlElement):
self.createMenuFromXmlElement(child, parent=menu)
# Assign submenu to parent menu
#
if parent is not None:
parent.addMenu(menu)
return menu
elif xmlElement.tag == 'Action':
# Create new action
#
text = xmlElement.get('text', default='')
log.info('Creating action: %s' % text)
action = QtWidgets.QAction(text, parent)
action.setObjectName(self.objectSafeName(text))
action.setToolTip(xmlElement.get('tooltip', ''))
# Configure trigger event
#
language = xmlElement.get('language', default='')
command = xmlElement.get('command', default='')
if language == 'python':
action.triggered.connect(partial(self.execute, command))
else:
action.triggered.connect(self.partial(command))
# Assign action to parent menu
#
parent.addAction(action)
return action
elif xmlElement.tag == 'Section':
return parent.addSection(xmlElement.get('text', default=''))
elif xmlElement.tag == 'Separator':
return parent.addSeparator()
else:
raise TypeError('createMenuItem() expects a valid xml tag (%s found)!' % xmlElement.tag)
def createMenuFromFile(self, filePath):
"""
Creates a menu system from the supplied xml file configuration.
:type filePath: str
:rtype: QtWidgets.QMenu
"""
# Parse xml file and inspect root element
#
xmlTree = xmlutils.parse(filePath)
xmlElement = xmlTree.getroot()
if xmlElement.tag != 'Menu':
raise TypeError('createMenuFromFile() expects Menu tag for root element (%s given)!' % xmlElement.tag)
# Create menu from xml element
#
title = xmlElement.get('title', '')
tearOff = bool(xmlElement.get('tearOff', True))
menu = self.findMainMenuByTitle(title)
if menu is None:
menuBar = self.getMainMenuBar()
menu = qmainmenu.QMainMenu(title, tearOff=tearOff, parent=menuBar)
menuBar.insertMenu(menuBar.actions()[-1], menu)
# Append new menu to menu bar
#
menu.clear()
for child in iter(xmlElement):
self.createMenuFromXmlElement(child, parent=menu)
return menu
def removeMenuFromAssociatedFile(self, filePath):
"""
Removes the menu that is associated with the supplied file.
:type filePath: str
:rtype: None
"""
# Parse xml file and inspect root element
#
xmlTree = xmlutils.parse(filePath)
xmlElement = xmlTree.getroot()
if xmlElement.tag != 'Menu':
raise TypeError('removeMenuFromAssociatedFile() expects Menu tag for root element (%s given)!' % xmlElement.tag)
# Unregister top-level menu
#
title = xmlElement.get('title', '')
menu = self.findMainMenuByTitle(title)
if menu is not None:
menu.deleteLater()
def createLoggingMenu(self):
"""
Creates a logging menu for modifying logger levels.
If the menu already exists the current instance will be refreshed.
:rtype: None
"""
# Check if menu already exists
#
menuBar = self.getMainMenuBar()
menu = self.findMainMenuByTitle('Logging Control')
if menu is None:
menu = qloggingmenu.QLoggingMenu('Logging Control', parent=menuBar)
menuBar.insertMenu(menuBar.actions()[-1], menu)
else:
menu.refresh()
return menu
|
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
import spconv
from pointnet2.pointnet2_modules import PointnetSAModuleMSG
from pointnet2.pointnet2_utils import furthest_point_sample
from pvrcnn.config import PvrcnnConfig
from pvrcnn.data_classes import Boxes3D
from pvrcnn.roi_grid_pool import RoiGridPool
from pvrcnn.backbone import SparseCNN, VoxelFeatureExtractor
class BEV_FeatureGatherer(nn.Module):
def __init__(self, cfg, voxel_offset, base_voxel_size):
super(BEV_FeatureGatherer, self).__init__()
self.cfg = cfg
self.voxel_offset = voxel_offset
self.base_voxel_size = base_voxel_size
def normalize_grid_sample_indices(self, indices, H, W):
"""F.grid_sample expects normalized indices on (-1, +1)."""
image_dims = torch.cuda.FloatTensor([H - 1, W - 1])
indices = torch.min(torch.clamp(indices, 0), image_dims)
indices = 2 * (indices / (image_dims - 1)) - 1
return indices
def compute_bev_indices(self, keypoint_xyz, H, W):
"""Convert xyz coordinates to fractional BEV indices."""
indices = (keypoint_xyz[:, None, :, :2] - self.voxel_offset[:2])
indices = indices / (self.base_voxel_size[:2] * self.cfg.strides[-1])
indices = self.normalize_grid_sample_indices(indices, H, W)
return indices
def forward(self, volume, keypoint_xyz):
"""
Project 3D voxel grid to XY-plane and gather
BEV features using bilinear interpolation.
"""
volume = volume.dense()
N, C, D, H, W = volume.shape
volume = volume.view(N, C * D, H, W)
indices = self.compute_bev_indices(keypoint_xyz, H, W)
features = F.grid_sample(volume, indices).squeeze(2)
return features
class PV_RCNN(nn.Module):
"""
For each feature volume stride, convert keypoint locations to
continuous voxel index coordinates. Then fetch voxels within ball query.
Raw input points are treated as an additional stride-1 voxel stage.
"""
def __init__(self, cfg):
super(PV_RCNN, self).__init__()
self.pnets = self.build_pointnets(cfg)
self.roi_grid_pool = RoiGridPool(cfg)
self.voxel_generator, grid_shape = self.build_voxel_generator(cfg)
self.vfe = VoxelFeatureExtractor()
self.cnn = SparseCNN(grid_shape, cfg)
self.bev_gatherer = self.build_bev_gatherer(cfg)
self.cfg = cfg
def build_voxel_generator(self, cfg):
"""Voxel-grid is reversed XYZ -> ZYX and padded in Z-axis."""
voxel_generator = spconv.utils.VoxelGenerator(
voxel_size=cfg.voxel_size,
point_cloud_range=cfg.grid_bounds,
max_voxels=cfg.max_voxels,
max_num_points=cfg.max_num_points,
)
grid_shape = np.r_[voxel_generator.grid_size[::-1]] + [1, 0, 0]
return voxel_generator, grid_shape
def build_pointnets(self, cfg):
"""Copy channel list because PointNet modifies it in-place."""
pnets = []
for i in range(len(cfg.mlps)):
pnets += [PointnetSAModuleMSG(
npoint=-1, radii=cfg.radii[i], nsamples=cfg.nsamples[i],
mlps=cfg.mlps[i].copy(), use_xyz=True,
)]
return nn.Sequential(*pnets)
def build_bev_gatherer(self, cfg):
bev = BEV_FeatureGatherer(
cfg, self.cnn.voxel_offset, self.cnn.base_voxel_size)
return bev
def voxelize(self, points):
"""
Compute sparse voxel grid.
:points_in np.ndarray of shape (Np, 4)
:points_out FloatTensor of shape (Np, 4)
:features FloatTensor of shape (Nv, 1)
:coordinates IntTensor of shape (Nv, 4)
"""
features, coordinates, occupancy = self.voxel_generator.generate(points)
coordinates = np.pad(coordinates, ((0, 0), (1, 0)), mode="constant", constant_values=0)
from_numpy = lambda x: torch.from_numpy(x).cuda()
points, features, coordinates, occupancy = \
map(from_numpy, (points, features, coordinates, occupancy))
features = self.vfe(features, occupancy)
return points, features, coordinates
def sample_keypoints(self, points):
"""
Sample keypoints from raw pointcloud. Assumes unit batch size.
:points FloatTensor of shape (N, 4).
:return FloatTensor of shape (n_keypoints, 3),
"""
points = points.unsqueeze(0).contiguous()
indices = furthest_point_sample(points, self.cfg.n_keypoints)
keypoints = points[:, indices.squeeze(0).long(), :3].contiguous()
return keypoints
def pnet_forward(self, cnn_out, keypoint_xyz):
"""
Call PointNet modules to gather keypoint features
from the intermediate 3D CNN feature maps.
:param xyz: (B, N, 3) tensor of the xyz coordinates of the features
:param features: (B, N, C) tensor of the descriptors of the the features
:param new_xyz: (B, npoint, 3) tensor of the new features' xyz
:return (B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
pnet_out = []
for (voxel_xyz, voxel_features), pnet in zip(cnn_out, self.pnets):
voxel_xyz = voxel_xyz.unsqueeze(0).contiguous()
voxel_features = voxel_features.t().unsqueeze(0).contiguous()
out = pnet(voxel_xyz, voxel_features, keypoint_xyz)[1]
pnet_out += [out]
return pnet_out
def forward(self, points):
"""
TODO: Document intermediate tensor shapes.
"""
points, features, coordinates = self.voxelize(points)
cnn_out, final_volume = self.cnn(features, coordinates, batch_size=1)
cnn_out = [torch.split(points, [3, 1], dim=-1)] + cnn_out
keypoints_xyz = self.sample_keypoints(points)
pnet_out = self.pnet_forward(cnn_out, keypoints_xyz)
bev_out = self.bev_gatherer(final_volume, keypoints_xyz)
features = torch.cat(pnet_out + [bev_out], dim=1)
proposals = Boxes3D(20 * torch.rand((25, 7)).cuda())
pooled_features = self.roi_grid_pool(proposals, keypoints_xyz, features)
return pooled_features
def main():
cfg = PvrcnnConfig()
net = PV_RCNN(cfg).cuda()
points = np.random.uniform(0, 50, size=(120000, cfg.raw_C_in)).astype(np.float32)
out = net(points)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests around dash governance objects."""
import json
import time
from test_framework.messages import uint256_to_string
from test_framework.test_framework import DashTestFramework
from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error
def validate_object(prepared, rpc_prepared):
assert_equal(prepared["parentHash"], rpc_prepared["parentHash"])
assert_equal(prepared["collateralHash"], rpc_prepared["collateralHash"])
assert_equal(prepared["createdAt"], rpc_prepared["createdAt"])
assert_equal(prepared["revision"], rpc_prepared["revision"])
assert_equal(prepared["hex"], rpc_prepared["data"]["hex"])
del rpc_prepared["data"]["hex"]
assert_equal(prepared["data"], rpc_prepared["data"])
class DashGovernanceTest (DashTestFramework):
def set_test_params(self):
self.set_dash_test_params(2, 1)
def prepare_object(self, object_type, parent_hash, creation_time, revision, name, amount):
proposal_rev = revision
proposal_time = int(creation_time)
proposal_template = {
"type": object_type,
"name": name,
"start_epoch": proposal_time,
"end_epoch": proposal_time + 24 * 60 * 60,
"payment_amount": amount,
"payment_address": self.nodes[0].getnewaddress(),
"url": "https://dash.org"
}
proposal_hex = ''.join(format(x, '02x') for x in json.dumps(proposal_template).encode())
collateral_hash = self.nodes[0].gobject("prepare", parent_hash, proposal_rev, proposal_time, proposal_hex)
return {
"parentHash": parent_hash,
"collateralHash": collateral_hash,
"createdAt": proposal_time,
"revision": proposal_rev,
"hex": proposal_hex,
"data": proposal_template,
}
def run_test(self):
time_start = time.time()
object_type = 1 # GOVERNANCE PROPOSAL
# At start there should be no prepared objects available
assert_equal(len(self.nodes[0].gobject("list-prepared")), 0)
# Create 5 proposals with different creation times and validate their ordered like expected
p1 = self.prepare_object(object_type, uint256_to_string(0), time_start, 0, "SortByTime1", 1)
p2 = self.prepare_object(object_type, uint256_to_string(0), time_start + 10, 1000, "SortByTime2", 10)
p3 = self.prepare_object(object_type, uint256_to_string(0), time_start - 10, 1000000000, "SortByTime3", 20)
p4 = self.prepare_object(object_type, uint256_to_string(0), time_start + 1, -20, "SortByTime4", 400)
p5 = self.prepare_object(object_type, uint256_to_string(0), time_start + 30, 1, "SortByTime5", 100000000)
rpc_list_prepared = self.nodes[0].gobject("list-prepared")
assert_equal(len(rpc_list_prepared), 5)
expected_order = [p3, p1, p4, p2, p5]
for i in range(len(expected_order)):
validate_object(expected_order[i], rpc_list_prepared[i])
# Create two more with the same time
self.prepare_object(object_type, uint256_to_string(0), time_start + 60, 1, "SameTime1", 2)
self.prepare_object(object_type, uint256_to_string(0), time_start + 60, 2, "SameTime2", 2)
# Query them with count=2
rpc_list_prepared = self.nodes[0].gobject("list-prepared", 2)
# And make sure it does only return 2 of the 7 available
assert_equal(len(rpc_list_prepared), 2)
# Since they have the same time they should be sorted by hex data, in this case, the second should be greater
assert_greater_than(rpc_list_prepared[1]["data"]["hex"], rpc_list_prepared[0]["data"]["hex"])
# Restart node0 and make sure it still contains all valid proposals after restart
rpc_full_list_pre_restart = self.nodes[0].gobject("list-prepared")
self.restart_node(0)
rpc_full_list_post_restart = self.nodes[0].gobject("list-prepared")
assert_equal(rpc_full_list_pre_restart, rpc_full_list_post_restart)
# Create more objects so that we have a total of 11
self.prepare_object(object_type, uint256_to_string(0), time_start, 0, "More1", 1)
self.prepare_object(object_type, uint256_to_string(0), time_start, 0, "More2", 1)
self.prepare_object(object_type, uint256_to_string(0), time_start, 0, "More3", 1)
self.prepare_object(object_type, uint256_to_string(0), time_start, 0, "More4", 1)
# Make sure default count is 10 while there are 11 in total
assert_equal(len(self.nodes[0].gobject("list-prepared")), 10)
assert_equal(len(self.nodes[0].gobject("list-prepared", 12)), 11)
# Make sure it returns 0 objects with count=0
assert_equal(len(self.nodes[0].gobject("list-prepared", 0)), 0)
# And test some invalid count values
assert_raises_rpc_error(-8, "Negative count", self.nodes[0].gobject, "list-prepared", -1)
assert_raises_rpc_error(-8, "Negative count", self.nodes[0].gobject, "list-prepared", -1000)
if __name__ == '__main__':
DashGovernanceTest().main()
|
"""
Entradas
salario-->int-->s
categoria-->int-->c
Salidas
aumento-->int-->a
salario nuevo-->int-->sn
"""
s=int(input("Digite el salario:") )
c=int(input("Digite una categoria del 1 al 5: "))
if (c==1):
a=s*.10
if (c==2):
a=s*.15
if (c==3):
a=s*.20
if (c==4):
a=s*.40
if (c==5):
a=s*.60
sn=s+a
print("El aumento sera de: "+str(a))
print("Valor del sueldo nuevo: " +str(sn))
|
from magma import *
from mantle.lattice.mantle40.logic import OrN
__all__ = ['DefineEncoder', 'Encoder']
#
# Given an n-bit input array with only a single bit set,
# return the position of the set bit.
#
# NB. The current implementation only works for n<=8
#
def DefineEncoder(n):
assert n <= 8
logn = log2(n)
class _Encoder(Circuit):
name = 'Encoder'+str(n)
IO = ['I', In(Array(n, Bit)), 'O', Out(Array(logn, Bit))]
@classmethod
def definition(Enc):
def f(y):
or_ = OrN(n/2)
os = []
for i in range(n):
if i & (1 << y): os.append(Enc.I[i])
wire(array(*os), or_)
return AnonymousCircuit("O", or_.O)
enc = join( col(f, logn) )
wire(enc.O, Enc.O)
return _Encoder
def Encoder(n, **kwargs):
return DefineEncoder(n)(**kwargs)
|
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from distutils.version import LooseVersion
from f5.sdk_exception import UnsupportedOperation
class TestPolicyBuilder(object):
def test_update_raises(self, policy):
with pytest.raises(UnsupportedOperation):
policy.policy_builder.update()
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) > LooseVersion('11.5.4'),
reason='This test is for versions below 11.6.0.'
)
def test_load_modify_v11_5(self, policy):
r1 = policy.policy_builder.load()
assert r1.kind == 'tm:asm:policies:policy-builder:pbconfigstate'
assert r1.enablePolicyBuilder is False
r1.modify(enablePolicyBuilder=True)
assert r1.enablePolicyBuilder is True
r2 = policy.policy_builder.load()
assert r1.kind == r2.kind
assert hasattr(r2, 'responseStatusCodes')
assert hasattr(r2, 'learnFromResponses')
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) > LooseVersion('11.6.1'),
reason='This test is for versions greater than 11.5.4.'
)
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) <= LooseVersion('11.5.4'),
reason='This test is for versions not below 11.6.0.'
)
def test_load_modify(self, policy):
r1 = policy.policy_builder.load()
assert r1.kind == 'tm:asm:policies:policy-builder:pbconfigstate'
assert r1.enablePolicyBuilder is False
assert not hasattr(r1, 'responseStatusCodes')
assert not hasattr(r1, 'learnFromResponses')
r1.modify(enablePolicyBuilder=True)
assert r1.enablePolicyBuilder is True
r2 = policy.policy_builder.load()
assert r1.kind == r2.kind
assert hasattr(r2, 'responseStatusCodes')
assert hasattr(r2, 'learnFromResponses')
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) > LooseVersion('12.0.0'),
reason='This test is for version greater than 12.'
)
def test_refresh_modify(self, policy):
r1 = policy.policy_builder.load()
assert r1.kind == 'tm:asm:policies:policy-builder:pbconfigstate'
assert r1.enablePolicyBuilder is False
assert not hasattr(r1, 'responseStatusCodes')
assert not hasattr(r1, 'learnFromResponses')
r2 = policy.policy_builder.load()
assert r1.kind == r2.kind
assert r2.enablePolicyBuilder is False
assert not hasattr(r2, 'responseStatusCodes')
assert not hasattr(r2, 'learnFromResponses')
r2.modify(enablePolicyBuilder=True)
assert r2.enablePolicyBuilder is True
assert hasattr(r2, 'responseStatusCodes')
assert hasattr(r2, 'learnFromResponses')
r1.refresh()
assert hasattr(r1, 'responseStatusCodes')
assert hasattr(r1, 'learnFromResponses')
assert r1.enablePolicyBuilder == r2.enablePolicyBuilder
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion('12.0.0'),
reason='This test is for version 12.1.0 or greater.'
)
def test_refresh_modify_v12(self, policy):
r1 = policy.policy_builder.load()
assert r1.kind == 'tm:asm:policies:policy-builder:policy-builderstate'
assert r1.trustAllIps is False
r2 = policy.policy_builder.load()
assert r1.kind == r2.kind
assert r2.trustAllIps is False
r2.modify(trustAllIps=True)
assert r2.trustAllIps is True
r1.refresh()
assert r1.trustAllIps == r2.trustAllIps
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion('12.0.0'),
reason='This test is for version 12.0.0 or greater.'
)
def test_load_modify_v12(self, policy):
r1 = policy.policy_builder.load()
assert r1.kind == 'tm:asm:policies:policy-builder:policy-builderstate'
assert r1.trustAllIps is False
r1.modify(trustAllIps=True)
assert r1.trustAllIps is True
r2 = policy.policy_builder.load()
assert r1.kind == r2.kind
|
#!/usr/bin/env python3
import time, argparse
from learn import main
def get(help, choices=None, default=None):
while True:
i = input(help)
if i:
if choices and i not in choices:
pass
else:
if default == []:
i = i.split()
return i
else:
return default
def get_args():
parser = argparse.ArgumentParser()
args = parser.parse_args()
print('按回车选择默认选项 ...')
args.all = get('同步所有学期的所有课程 [y/N]:', choices=['Y', 'N', 'y', 'n'], default=None)
if args.all in ['n', 'N']:
args.all = None
args.clear = get('清空相同文件 [y/N]:', choices=['Y', 'N', 'y', 'n'], default=None)
if args.clear in ['n', 'N']:
args.clear = None
args.semester = get('学期:', default=[])
args.course = get('指定课程:', default=[])
args.ignore = get('忽略课程:', default=[])
args._pass = '.pass'
args.cookie = ''
return args
if __name__ == '__main__':
t = time.time()
main(get_args())
t = time.time() - t
print('耗时: %02d:%02d:%02.0f' % (t // 3600, (t % 3600) // 60, t % 60))
input('请按任意键退出')
|
# coding: utf-8
import json
import uuid
from datetime import datetime
import io
import unicodecsv as ucsv
from django.contrib.auth.models import User
from onadata.apps.logger.models import Instance
from onadata.libs.utils.logger_tools import dict2xml, safe_create_instance
def get_submission_meta_dict(xform, instance_id):
"""Generates metadata for our submission
Checks if `instance_id` belongs to an existing submission.
If it does, it's considered an edit and its uuid gets deprecated.
In either case, a new one is generated and assigned.
:param onadata.apps.logger.models.XForm xform: The submission's XForm.
:param string instance_id: The submission/instance `uuid`.
:return: The metadata dict
:rtype: dict
"""
uuid_arg = 'uuid:{}'.format(uuid.uuid4())
meta = {'instanceID': uuid_arg}
update = 0
if xform.instances.filter(uuid=instance_id).count() > 0:
uuid_arg = 'uuid:{}'.format(uuid.uuid4())
meta.update({'instanceID': uuid_arg,
'deprecatedID': 'uuid:{}'.format(instance_id)})
update += 1
return [meta, update]
def dict2xmlsubmission(submission_dict, xform, instance_id, submission_date):
"""Creates and xml submission from an appropriate dict (& other data)
:param dict submission_dict: A dict containing form submission data.
:param onadata.apps.logger.models.XForm xfrom: The submission's XForm.
:param string instance_id: The submission/instance `uuid`.
:param string submission_date: An isoformatted datetime string.
:return: An xml submission string
:rtype: string
"""
return ('<?xml version="1.0" ?>'
'<{0} id="{1}" instanceID="uuid:{2}" submissionDate="{3}" '
'xmlns="http://opendatakit.org/submissions">{4}'
'</{0}>'.format(
json.loads(xform.json).get('name', xform.id_string),
xform.id_string, instance_id, submission_date,
dict2xml(submission_dict).replace('\n', '')))
def submit_csv(username, xform, csv_file):
""" Imports CSV data to an existing form
Takes a csv formatted file or string containing rows of submission/instance
and converts those to xml submissions and finally submits them by calling
:py:func:`onadata.libs.utils.logger_tools.safe_create_instance`
:param str username: the submission user
:param onadata.apps.logger.models.XForm xfrom: The submission's XForm.
:param (str or file): A CSV formatted file with submission rows.
:return: If successful, a dict with import summary else dict with error str.
:rtype: Dict
"""
if isinstance(csv_file, str):
csv_file = io.StringIO(csv_file)
elif csv_file is None or not hasattr(csv_file, 'read'):
return {'error': ('Invalid param type for `csv_file`. '
'Expected file or String '
'got {} instead.'.format(type(csv_file).__name__))}
csv_reader = ucsv.DictReader(csv_file)
rollback_uuids = []
submission_time = datetime.utcnow().isoformat()
ona_uuid = {'formhub': {'uuid': xform.uuid}}
error = None
additions = inserts = 0
for row in csv_reader:
# fetch submission uuid before purging row metadata
row_uuid = row.get('_uuid')
submitted_by = row.get('_submitted_by')
submission_date = row.get('_submission_time', submission_time)
row_iter = dict(row)
for key in row_iter: # seems faster than a comprehension
# remove metadata (keys starting with '_')
if key.startswith('_'):
del row[key]
# process nested data e.g x[formhub/uuid] => x[formhub][uuid]
if r'/' in key:
p, c = key.split('/')
row[p] = {c: row[key]}
del row[key]
# inject our form's uuid into the submission
row.update(ona_uuid)
old_meta = row.get('meta', {})
new_meta, update = get_submission_meta_dict(xform, row_uuid)
inserts += update
old_meta.update(new_meta)
row.update({'meta': old_meta})
row_uuid = row.get('meta').get('instanceID')
rollback_uuids.append(row_uuid.replace('uuid:', ''))
xml_file = io.StringIO(
dict2xmlsubmission(row, xform, row_uuid, submission_date))
try:
error, instance = safe_create_instance(username, xml_file, [],
xform.uuid, None)
except ValueError as e:
error = e
if error:
Instance.objects.filter(uuid__in=rollback_uuids,
xform=xform).delete()
return {'error': str(error)}
else:
additions += 1
users = User.objects.filter(
username=submitted_by) if submitted_by else []
if users:
instance.user = users[0]
instance.save()
return {'additions': additions - inserts, 'updates': inserts}
|
import cv2
import caffe
import tools
import sys
import os
import numpy as np
from functools import partial
import config
def gen_scales(w, h, min_imgsize, net_imgsize):
scales = []
scale = float(net_imgsize) / min_imgsize;
minhw = min(w, h) * scale;
while minhw > net_imgsize:
scales.append(scale)
scale *= 0.709
minhw *= 0.709
return scales
def regularize_rect(img_shape, rect):
w, h, _ = img_shape
x1, y1, x2, y2, prob = rect
x1 = max(0, min(x1, w))
x2 = max(0, min(x2, w))
y1 = max(0, min(y1, h))
y2 = max(0, min(y2, h))
return [x1, x2, y1, y2, prob]
def load_model(model_dir, net, iter_num):
proto_path = os.path.join(model_dir, '%s.prototxt' % net)
model_path = os.path.join(model_dir, '%s_iter_%d.caffemodel' % (net, iter_num))
return caffe.Net(proto_path, model_path, caffe.TEST)
def test_pnet(img, min_img_size, net_size, net):
norm_img = (img.copy() - 127.5) / 127.5
h, w, c = norm_img.shape
scales = gen_scales(w, h, min_img_size, net_size)
rects = []
for scale in scales:
sh = int(h * scale)
sw = int(w * scale)
scale_img = cv2.resize(norm_img, (sw, sh))
scale_img = cv2.transpose(scale_img)
scale_img = np.swapaxes(scale_img, 0, 2)
net.blobs['data'].reshape(1, 3, sh, sw)
net.blobs['data'].data[...] = scale_img
print(scale_img.shape)
print(scale_img)
out = net.forward()
conv1 = net.blobs["conv1"]
label_prob = out[config.NET_OUTPUTS['pnet']['label']][0]
bbox = out[config.NET_OUTPUTS['pnet']['bbox']][0]
print(conv1.data.shape)
print(conv1.data)
out_h, out_w = label_prob[1].shape
out_side = max(out_h, out_w)
rect = tools.detect_face_12net(label_prob[1], bbox, out_side,
1 / scale, w, h, 0.65)
rects += rect
break
rects = tools.NMS(rects, 0.7, 'iou')
return rects
def test_rnet(img, rects, min_img_size, net_size, net):
norm_img = (img.copy() - 127.5) / 128
h, w, c = norm_img.shape
for i, rect in enumerate(rects):
cropped_img = img[int(rect[1]):int(rect[3]), int(rect[0]):int(rect[2]),]
resized_img = cv2.resize(cropped_img, (net_size, net_size))
resized_img = np.swapaxes(resized_img, 0, 2)
net.blobs['data'].data[i] = resized_img
out = net.forward()
label_prob = out[config.NET_OUTPUTS['rnet']['label']][0][1]
bbox = out[config.NET_OUTPUTS['rnet']['bbox']][0][1]
rects = tools.filter_face_24net(label_prob, bbox, rects, w, h, 0.7)
return [rect for rect in rects if rects[2] - rects[0] > 0 and rects[3] - rects[1] > 0]
def test_net(net, model_dir, iter_num):
model_path = os.path.join(model_dir, '%s_iter_%d.caffemodel' % (net, iter_num))
proto_path = os.path.join(model_dir, '%s.prototxt' % net)
caffe_net = caffe.Net(proto_path, model_path, caffe.TEST)
if net == 'pnet':
return partial(test_pnet, net=caffe_net)
elif net == 'rnet':
return partial(test_rnet, net=caffe_net)
if __name__ == '__main__':
net = sys.argv[1]
iter_num = int(sys.argv[2])
img_path = sys.argv[3]
test_func = test_net(net, config.MODEL_DIR, iter_num)
img = cv2.imread(img_path)
rects = test_func(img, config.MIN_IMG_SIZE, config.NET_IMG_SIZES['pnet'])
for i, rect in enumerate(rects):
sub_img = img[rect[1]:rect[3], rect[0]:rect[2]]
print(sub_img.shape, rect)
cv2.imwrite("pnet/test/%d_%f.jpg" % (i, rect[4]), sub_img)
for i, rect in enumerate(rects):
cv2.rectangle(img, (rect[0], rect[1]), (rect[2], rect[3]), (255,0,0), 3)
cv2.imwrite("pnet/result.jpg", img)
print('%d rects generated' % len(rects))
|
# ===========================================================================
# Different topic strategy
# ===========================================================================
from __future__ import print_function, division, absolute_import
topic2T = {
'dsname': ['est', 'fin', 'sam'],
'feats': ['mfcc'],
'normalize': ['mfcc'],
'mode': 'bin',
'context': 30,
'hop': 10,
'seq': True,
'nb_topics': 2,
'unite_topics': True,
}
topic4T = {
'dsname': ['est', 'fin', 'sam'],
'feats': ['mfcc'],
'normalize': ['mfcc'],
'mode': 'bin',
'context': 30,
'hop': 10,
'seq': True,
'nb_topics': 4,
'unite_topics': True,
}
topic6T = {
'dsname': ['est', 'fin', 'sam'],
'feats': ['mfcc'],
'normalize': ['mfcc'],
'mode': 'bin',
'context': 30,
'hop': 10,
'seq': True,
'nb_topics': 6,
'unite_topics': True,
}
topic10T = {
'dsname': ['est', 'fin', 'sam'],
'feats': ['mfcc'],
'normalize': ['mfcc'],
'mode': 'bin',
'context': 30,
'hop': 10,
'seq': True,
'nb_topics': 10,
'unite_topics': True,
}
topic2F = {
'dsname': ['est', 'fin', 'sam'],
'feats': ['mfcc'],
'normalize': ['mfcc'],
'mode': 'bin',
'context': 30,
'hop': 10,
'seq': True,
'nb_topics': 2,
'unite_topics': False,
}
topic4F = {
'dsname': ['est', 'fin', 'sam'],
'feats': ['mfcc'],
'normalize': ['mfcc'],
'mode': 'bin',
'context': 30,
'hop': 10,
'seq': True,
'nb_topics': 4,
'unite_topics': False,
}
topic10F = {
'dsname': ['est', 'fin', 'sam'],
'feats': ['mfcc'],
'normalize': ['mfcc'],
'mode': 'bin',
'context': 30,
'hop': 10,
'seq': True,
'nb_topics': 10,
'unite_topics': False,
}
|
from dataset.jester import Jester
from config import Config
def get_training_set(spatial_transform, temporal_transform,
target_transform):
return Jester(
Config.dataset_path,
Config.annotation_path,
'training',
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
sample_duration=Config.sample_duration)
def get_validation_set(spatial_transform, temporal_transform,
target_transform):
return Jester(
Config.dataset_path,
Config.annotation_path,
'validation',
Config.n_val_samples,
spatial_transform,
temporal_transform,
target_transform,
sample_duration=Config.sample_duration)
def get_test_set(spatial_transform, temporal_transform, target_transform):
if Config.test_subset == 'val':
subset = 'validation'
elif Config.test_subset == 'test':
subset = 'testing'
return Jester(
Config.dataset_path,
Config.annotation_path,
subset,
spatial_transform = spatial_transform,
temporal_transform= temporal_transform,
target_transform=target_transform,
sample_duration=Config.sample_duration)
|
"""Copyright 2011 The University of Michigan
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Authors - Jie Yu (jieyu@umich.edu)
"""
import os
from maple.core import proto
def race_pb2():
return proto.module('race.race_pb2')
class StaticRaceEvent(object):
def __init__(self, proto, db):
self.proto = proto
self.db = db
self.sinfo = db.sinfo
def id(self):
return self.proto.id
def inst(self):
return self.sinfo.find_inst(self.proto.inst_id)
def type(self):
return self.proto.type
def type_name(self):
if self.type() == race_pb2().RACE_EVENT_READ:
return 'READ'
elif self.type() == race_pb2().RACE_EVENT_WRITE:
return 'WRITE'
else:
return 'INVALID'
def __str__(self):
content = []
content.append('%-4d' % self.id())
content.append('%-7s' % self.type_name())
content.append('[%-40s]' % str(self.inst()))
return ' '.join(content)
class StaticRace(object):
def __init__(self, proto, db):
self.proto = proto
self.db = db
self.sinfo = db.sinfo
def id(self):
return self.proto.id
def event(self, idx):
event_id = self.proto.event_id[idx]
return self.db.find_static_event(event_id)
def num_events(self):
return len(self.proto.event_id)
def __str__(self):
content = []
content.append('Static Race %-4d' % self.id())
for idx in range(self.num_events()):
content.append(' %s' % str(self.event(idx)))
return '\n'.join(content)
class RaceEvent(object):
def __init__(self, proto, db):
self.proto = proto
self.db = db
def thd_id(self):
return self.proto.thd_id
def static_event(self):
return self.db.find_static_event(self.proto.static_id)
def __str__(self):
content = []
content.append('[T%lx]' % self.thd_id())
content.append('%s' % str(self.static_event()))
return ' '.join(content)
class Race(object):
def __init__(self, proto, db):
self.proto = proto
self.db = db
self.event_vec = []
for e_proto in self.proto.event:
e = RaceEvent(e_proto, db)
self.event_vec.append(e)
def exec_id(self):
return self.proto.exec_id
def addr(self):
return self.proto.addr
def event(self, idx):
return self.event_vec[idx]
def num_events(self):
return len(self.proto.event)
def static_race(self):
return self.db.find_static_race(self.proto.static_id)
def __str__(self):
content = []
content.append('Dynamic Race: %-4d 0x%-8x' % (self.exec_id(), self.addr()))
for idx in range(self.num_events()):
content.append(' %s' % self.event(idx))
return '\n'.join(content)
class RaceDB(object):
def __init__(self, sinfo):
self.sinfo = sinfo
self.proto = race_pb2().RaceDBProto()
self.static_event_map = {}
self.static_race_map = {}
self.race_vec = []
self.racy_inst_set = set()
def load(self, db_name):
if not os.path.exists(db_name):
return
f = open(db_name, 'rb')
self.proto.ParseFromString(f.read())
f.close()
for e_proto in self.proto.static_event:
e = StaticRaceEvent(e_proto, self)
self.static_event_map[e.id()] = e
for r_proto in self.proto.static_race:
r = StaticRace(r_proto, self)
self.static_race_map[r.id()] = r
for r_proto in self.proto.race:
r = Race(r_proto, self)
self.race_vec.append(r)
for inst_id in self.proto.racy_inst_id:
inst = self.sinfo.find_inst(inst_id)
self.racy_inst_set.add(inst)
def num_static_races(self):
return len(self.proto.static_race)
def num_racy_insts(self):
return len(self.proto.racy_inst_id)
def find_static_event(self, id):
return self.static_event_map[id]
def find_static_race(self, id):
return self.static_race_map[id]
def display_static_race(self, f):
for r in self.static_race_map.itervalues():
f.write('%s\n' % str(r))
def display_race(self, f):
for r in self.race_vec:
f.write('%s\n' % str(r))
def display_racy_inst(self, f):
for inst in self.racy_inst_set:
f.write('%s\n' % str(inst))
|
#!/usr/bin/env python3
from aws_cdk import core as cdk
from stacks.base_stack import BaseStack
from stacks.databases_stack import DatabasesStack
from stacks.lakeformation_stack import LakeFormationStack
from stacks.opensearch_stack import OpenSearchStack
app = cdk.App()
base = BaseStack(app, "aws-data-wrangler-base")
DatabasesStack(
app,
"aws-data-wrangler-databases",
base.get_vpc,
base.get_bucket,
base.get_key,
)
LakeFormationStack(app, "aws-data-wrangler-lakeformation")
OpenSearchStack(
app,
"aws-data-wrangler-opensearch",
base.get_vpc,
base.get_bucket,
base.get_key,
)
app.synth()
|
__version__ = '0.9.4d1'
|
from Queue import Queue
import comms
import cPickle
import nav
import threading
import time
GRAPH_FILE = 'graph.txt'
G = cPickle.load(open(GRAPH_FILE))
BASE_VALUE = 0
READY_VALUE = 1
ERROR_VALUE = 2
task_queue = Queue()
car_queue = Queue()
car_task_locations = {}
class Car(threading.Thread):
""" Object that maintains car information and queues. """
def __init__(self, ser, device_id, navStatus="", state=(0, 4, 'E'), response_code=READY_VALUE):
super(Car, self).__init__()
self.ser = ser
self.device_id = device_id
self.navStatus = navStatus
self.state = state
self.final_destination = None
self.response_code = response_code
self.instQ = Queue()
car_queue.put(self)
self.queued = True
self.ready_to_exit = False
def update_data(self, navStatus, response_code):
""" Updates car info and adds car to Queue if necessary. """
self.navStatus = navStatus
self.response_code = response_code
if self.can_be_queued():
car_queue.put(self)
self.queued = True
def can_be_queued(self):
return not self.queued and self.instQ.empty() and self.response_code == READY_VALUE
def has_path_to_destination(self, goal_node):
"""Returns a valid path if one exists. Returns None otherwise."""
dirs = nav.get_directions(G, self.state, goal_node)
return dirs
def add_task(self, path, goal_node):
if self.final_destination:
car_task_locations[self.final_destination] = False
self.final_destination = goal_node
car_task_locations[goal_node] = True
for di in path:
self.instQ.put(di)
self.queued = False
self.wave_hello()
def respond(self):
""" Pops a command to the car if one is available and updates intended location data. """
if self.response_code == READY_VALUE and not self.instQ.empty():
inst = self.instQ.get()
instruction_letter = inst[0]
next_destination = inst[1]
self.send_command(instruction_letter)
if instruction_letter == 't':
nav.reactivate_node(G, self.state)
if next_destination == self.final_destination:
nav.reactivate_node(G, self.final_destination)
self.state = next_destination
self.response_code = BASE_VALUE
def send_command(self, action):
comms.write_to_device(self.ser, action)
def read_data(self):
return comms.read_from_device(self.ser)
def wave_hello(self):
self.send_command('h')
def run(self):
while not self.ready_to_exit:
try:
args = self.read_data()
if len(args) >= 4:
navStatus = args[0]
usDistance = float(args[1])
temp = float(args[2])
response_code = int(args[3])
self.update_data(navStatus, response_code)
except Exception, e:
pass
def car_update_loop(car_manager):
""" Updates data and determines next action. """
while not car_manager.ready_to_exit:
if not car_manager.paused:
bad_tasks = []
while not task_queue.empty() and not car_queue.empty():
task = task_queue.get()
goal_node = nav.translate_to_loc(task)
if not car_task_locations.setdefault(goal_node, False):
car = car_queue.get()
path = car.has_path_to_destination(goal_node)
if path is None:
car_queue.put(car)
bad_tasks.append(task)
else:
print "queueing task %d onto car %s" % (task, car.device_id)
car.add_task(path, goal_node)
for bt in bad_tasks:
task_queue.put(bt)
bad_tasks[:] = []
for car in car_manager.cars:
car.respond()
def stop_and_readjust(car_manager):
""" Suspends car movements and displays intended locations. """
car_manager.paused = True
for car in car_manager.cars:
print("Car %s should be at position: %s" %(str(car.device_id), str(car.state)))
raw_input("Press enter to continue.")
car_manager.paused = False
for car in car_manager.cars:
car.wave_hello()
def handle_input(car_manager):
""" Allows users to input program commands. """
while not car_manager.ready_to_exit:
user_input = raw_input("Enter a command or type 'h' for help: ")
user_input = user_input.lower()
if user_input.startswith('s'): # Stop and Readjust mode
stop_and_readjust(car_manager)
elif user_input.startswith('t'): # Input locations
tasks = eval(raw_input("Input a list of integer locations (ex. [6, 2, 4]): "))
if isinstance(tasks, list):
for task in tasks:
task_queue.put(task)
else:
print("Please format tasks correctly.")
elif user_input.startswith('h'): # Help
print "t - Input tasks"
print "s - Enter stop-and-readjust mode"
print "q - Quit the program"
elif user_input.startswith('q'):
car_manager.quit()
time.sleep(0.4)
class CarManager():
""" Discovers and maintains information on all cars. """
def __init__(self):
self.cars = []
self.device_ports = []
self.paused = False
self.ready_to_exit = False
self.discover_cars()
def discover_cars(self):
dev_id_count = 0
devices = comms.bind_all_devices()
for port, ser in devices:
self.device_ports.append(port)
if dev_id_count >= 2:
car = Car(ser, dev_id_count, state=(0, 0, 'E'))
else:
car = Car(ser, dev_id_count)
car.start()
print("Connected to car %d" % dev_id_count)
self.cars.append(car)
dev_id_count += 1
def quit(self):
for car in self.cars:
car.ready_to_exit = True
car.ser.close()
for dp in self.device_ports:
comms.release_device(dp)
self.ready_to_exit = True
def main():
car_manager = CarManager()
input_thread = threading.Thread(target = handle_input, args = (car_manager,))
input_thread.start()
car_update_loop(car_manager)
if __name__ == "__main__":
main()
|
import json
from os.path import join, isfile
from .targets import DirectoryTarget, NamedVolumeTarget, TargetOptions
root_path = "/bb8/etc/"
source_config_path = join(root_path, "source-config.json")
config_path = join(root_path, "config.json")
ssh_key_path = join(root_path, "secrets/ssh_key")
host_key_path = join(root_path, "secrets/host_key")
known_hosts_path = join(root_path, "known_hosts")
log_dir = '/bb8/logs/'
class Settings:
def __init__(self, path=config_path, machine_id_path=join(root_path, "machine-id")):
with open(path, 'r') as f:
config = json.load(f)
self.starport = config["starport"]
self.targets = list(Settings.parse_target(t) for t in config["targets"])
if isfile(machine_id_path):
with open(machine_id_path, 'r') as f:
self.instance_guid = f.read().replace('\n', '')
else:
self.instance_guid = None
@classmethod
def parse_target(cls, data):
t = data["type"]
options = TargetOptions.from_data(data)
if t == "directory":
return DirectoryTarget(data["name"], data["path"], options)
elif t == "named_volume":
return NamedVolumeTarget(data["name"], data["volume"], options)
else:
raise Exception("Unsupported target type: " + t)
def load_settings():
return Settings()
|
from textwrap import dedent
import pytest
from pylox.lox import Lox
# Base cases from https://github.com/munificent/craftinginterpreters/blob/master/test/constructor/arguments.lox
TEST_SRC = dedent(
"""\
class Foo {
init(a, b) {
print "init"; // expect: init
this.a = a;
this.b = b;
}
}
var foo = Foo(1, 2);
print foo.a; // expect: 1
print foo.b; // expect: 2
"""
)
EXPECTED_STDOUTS = ["init", "1", "2"]
def test_arguments(capsys: pytest.CaptureFixture) -> None:
interpreter = Lox()
interpreter.run(TEST_SRC)
assert not interpreter.had_error
assert not interpreter.had_runtime_error
all_out = capsys.readouterr().out.splitlines()
assert all_out == EXPECTED_STDOUTS
|
import sys
import unittest
import numpy as np
import os
from shutil import rmtree
try:
from concurrent import futures
except ImportError:
futures = False
try:
import z5py
except ImportError:
sys.path.append('..')
import z5py
class TestUtil(unittest.TestCase):
tmp_dir = './tmp_dir'
shape = (100, 100, 100)
chunks = (10, 10, 10)
def setUp(self):
if not os.path.exists(self.tmp_dir):
os.mkdir(self.tmp_dir)
def tearDown(self):
if os.path.exists(self.tmp_dir):
rmtree(self.tmp_dir)
@unittest.skipUnless(futures, "Needs 3rd party concurrent.futures in python 2")
def test_rechunk_default(self):
from z5py.util import rechunk
in_path = os.path.join(self.tmp_dir, 'in.n5')
out_path = os.path.join(self.tmp_dir, 'out.n5')
# create input file
in_file = z5py.File(in_path, use_zarr_format=False)
ds_in = in_file.create_dataset('data', dtype='float32',
shape=self.shape, chunks=self.chunks,
compression='gzip')
# write test data
data = np.arange(ds_in.size).reshape(ds_in.shape).astype(ds_in.dtype)
ds_in[:] = data
# rechunk for different out blocks
out_file = z5py.File(out_path, use_zarr_format=False)
new_chunks = (20, 20, 20)
# NOTE we can only choose out blocks that align with the chunks
# because otherwise we run into issues due to not thread safe blocking
for out_blocks in (None, (40, 40, 40), (60, 60, 60)):
ds_str = 'none' if out_blocks is None else '_'.join(map(str, out_blocks))
ds_name = 'data_%s' % ds_str
rechunk(in_path, out_path, 'data', ds_name, new_chunks,
out_blocks=out_blocks,
n_threads=8)
# make sure that new data agrees
ds_out = out_file[ds_name]
data_out = ds_out[:]
self.assertEqual(data_out.shape, data.shape)
self.assertEqual(ds_out.chunks, new_chunks)
self.assertTrue(np.allclose(data, data_out))
@unittest.skipUnless(futures, "Needs 3rd party concurrent.futures in python 2")
def test_rechunk_custom(self):
from z5py.util import rechunk
in_path = os.path.join(self.tmp_dir, 'in.n5')
out_path = os.path.join(self.tmp_dir, 'out.n5')
# create input file
in_file = z5py.File(in_path, use_zarr_format=False)
ds_in = in_file.create_dataset('data', dtype='float32',
shape=self.shape, chunks=self.chunks,
compression='gzip')
# write test data
data = np.arange(ds_in.size).reshape(ds_in.shape).astype(ds_in.dtype)
ds_in[:] = data
# rechunk
new_chunks = (20, 20, 20)
for compression in ('raw', 'gzip'):
for dtype in ('float64', 'int32', 'uint32'):
ds_name = 'ds_%s_%s' % (compression, dtype)
rechunk(in_path, out_path,
'data', ds_name, new_chunks,
n_threads=8,
compression=compression,
dtype=dtype)
# make sure that new data agrees
out_file = z5py.File(out_path, use_zarr_format=False)
ds_out = out_file[ds_name]
data_out = ds_out[:]
self.assertEqual(data_out.shape, data.shape)
self.assertEqual(ds_out.chunks, new_chunks)
self.assertTrue(np.allclose(data, data_out))
# TODO finish blocking tests
def simple_blocking(self, shape, block_shape):
blocking = []
ndim = len(shape)
for x in range(0, shape[0], block_shape[0]):
blocking.append((x, min(x + block_shape[0], shape[0])))
if ndim > 1:
block = blocking.pop()
for y in range(0, shape[1], block_shape[1]):
blocking.append(block + min(y + block_shape[1], shape[1]))
def _test_blocking(self):
from z5py.util import blocking
n_reps = 10
for dim in range(1, 6):
for _ in range(n_reps):
shape = tuple(np.random.randint(0, 1000) for ii in range(dim))
block_shape = tuple(min(np.random.randint(0, 100), sh)
for ii, sh in zip(range(dim), shape))
blocking1 = [(block.start, block.stop)
for block in blocking(shape, block_shape)]
blocking2 = self.simple_blocking(shape, block_shape)
sorted(blocking1)
sorted(blocking2)
self.assertEqual(blocking1, blocking2)
if __name__ == '__main__':
unittest.main()
|
import cv2
import numpy as np
resim = cv2.imread("ati.JPG")
cv2.imshow("resim",resim)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
num_list = [int(x) for x in input().split()]
sum = int(input("Enter the sum: "))
low = 0
hi = len(num_list)-1
found = False
while low<hi:
s = num_list[low]+num_list[hi]
if s==sum:
found = True
break
elif s<sum:
if num_list[low]<num_list[hi]:
low +=1
else:
hi -=1
else:
if num_list[low]>num_list[hi]:
low +=1
else:
hi -=1
print(found)
|
# -*- coding:utf8 -*-
# Performance optimization model(Maybe Only Linux)
if __name__ == "__main__":
from main import app
from werkzeug.contrib.profiler import ProfilerMiddleware
from config import GLOBAL
Host = GLOBAL.get('Host')
Port = GLOBAL.get('Port')
app.config['PROFILE'] = True
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions = [60])
app.run(debug=True, host=Host, port=Port)
|
from django.conf.urls import patterns, url
urlpatterns = patterns('betty.image_browser.views',
url(r'^search.html$', 'search'), # noqa
url(r'^upload\.html$', 'upload'),
url(r'^crop\.html$', 'crop'),
)
|
import datetime
from django.contrib import messages
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.views.decorators.csrf import csrf_exempt
from aimsbackend.models import Students, Courses, Subjects, CustomUser, Attendance, AttendanceReport, \
StudentLeaveReport, StudentFeedBack, StudentNotifications, StudentResult, SessionYearModel
def student_home(request):
student_obj = Students.objects.get(admin=request.user.id)
attendance_total = AttendanceReport.objects.filter(
student_id=student_obj).count()
attendance_present = AttendanceReport.objects.filter(
student_id=student_obj, status=True).count()
attendance_absent = AttendanceReport.objects.filter(
student_id=student_obj, status=False).count()
course = Courses.objects.get(id=student_obj.course_id.id)
subjects = Subjects.objects.filter(course_id=course).count()
subjects_data = Subjects.objects.filter(course_id=course)
session_obj = SessionYearModel.objects.get(
id=student_obj.session_year_id.id)
subject_name = []
data_present = []
data_absent = []
subject_data = Subjects.objects.filter(course_id=student_obj.course_id)
for subject in subject_data:
attendance = Attendance.objects.filter(subject_id=subject.id)
attendance_present_count = AttendanceReport.objects.filter(
attendance_id__in=attendance, status=True, student_id=student_obj.id).count()
attendance_absent_count = AttendanceReport.objects.filter(
attendance_id__in=attendance, status=False, student_id=student_obj.id).count()
subject_name.append(subject.subject_name)
data_present.append(attendance_present_count)
data_absent.append(attendance_absent_count)
return render(request, "student_template/student_home_template.html", {"total_attendance": attendance_total, "attendance_absent": attendance_absent, "attendance_present": attendance_present, "subjects": subjects, "data_name": subject_name, "data1": data_present, "data2": data_absent, })
def student_view_attendance(request):
student = Students.objects.get(admin=request.user.id)
course = student.course_id
subjects = Subjects.objects.filter(course_id=course)
return render(request, "student_template/student_view_attendance.html", {"subjects": subjects})
def student_view_attendance_post(request):
subject_id = request.POST.get("subject")
start_date = request.POST.get("start_date")
end_date = request.POST.get("end_date")
start_data_parse = datetime.datetime.strptime(
start_date, "%Y-%m-%d").date()
end_data_parse = datetime.datetime.strptime(end_date, "%Y-%m-%d").date()
subject_obj = Subjects.objects.get(id=subject_id)
user_object = CustomUser.objects.get(id=request.user.id)
stud_obj = Students.objects.get(admin=user_object)
attendance = Attendance.objects.filter(attendance_date__range=(
start_data_parse, end_data_parse), subject_id=subject_obj)
attendance_reports = AttendanceReport.objects.filter(
attendance_id__in=attendance, student_id=stud_obj)
return render(request, "student_template/student_attendance_data.html", {"attendance_reports": attendance_reports})
def student_apply_leave(request):
staff_obj = Students.objects.get(admin=request.user.id)
leave_data = StudentLeaveReport.objects.filter(student_id=staff_obj)
return render(request, "student_template/student_apply_leave.html", {"leave_data": leave_data})
def student_apply_leave_save(request):
if request.method != "POST":
return HttpResponseRedirect(reverse("student_apply_leave"))
else:
leave_date = request.POST.get("leave_date")
leave_msg = request.POST.get("leave_msg")
student_obj = Students.objects.get(admin=request.user.id)
try:
leave_report = StudentLeaveReport(
student_id=student_obj, leave_date=leave_date, leave_message=leave_msg, leave_status=0)
leave_report.save()
messages.success(request, "Successfully Applied for Leave")
return HttpResponseRedirect(reverse("student_apply_leave"))
except:
messages.error(request, "Failed To Apply for Leave")
return HttpResponseRedirect(reverse("student_apply_leave"))
def student_feedback(request):
staff_id = Students.objects.get(admin=request.user.id)
feedback_data = StudentFeedBack.objects.filter(student_id=staff_id)
return render(request, "student_template/student_feedback.html", {"feedback_data": feedback_data})
def student_feedback_save(request):
if request.method != "POST":
return HttpResponseRedirect(reverse("student_feedback"))
else:
feedback_msg = request.POST.get("feedback_msg")
student_obj = Students.objects.get(admin=request.user.id)
try:
feedback = StudentFeedBack(
student_id=student_obj, feedback=feedback_msg, feedback_reply="")
feedback.save()
messages.success(request, "Successfully Sent Feedback")
return HttpResponseRedirect(reverse("student_feedback"))
except:
messages.error(request, "Failed To Send Feedback")
return HttpResponseRedirect(reverse("student_feedback"))
def student_profile(request):
user = CustomUser.objects.get(id=request.user.id)
student = Students.objects.get(admin=user)
return render(request, "student_template/student_profile.html", {"user": user, "student": student})
def student_profile_save(request):
if request.method != "POST":
return HttpResponseRedirect(reverse("student_profile"))
else:
first_name = request.POST.get("first_name")
last_name = request.POST.get("last_name")
password = request.POST.get("password")
address = request.POST.get("address")
try:
customuser = CustomUser.objects.get(id=request.user.id)
customuser.first_name = first_name
customuser.last_name = last_name
if password != None and password != "":
customuser.set_password(password)
customuser.save()
student = Students.objects.get(admin=customuser)
student.address = address
student.save()
messages.success(request, "Successfully Updated Profile")
return HttpResponseRedirect(reverse("student_profile"))
except:
messages.error(request, "Failed to Update Profile")
return HttpResponseRedirect(reverse("student_profile"))
@csrf_exempt
def student_fcmtoken_save(request):
token = request.POST.get("token")
try:
student = Students.objects.get(admin=request.user.id)
student.fcm_token = token
student.save()
return HttpResponse("True")
except:
return HttpResponse("False")
def student_all_notification(request):
student = Students.objects.get(admin=request.user.id)
notifications = StudentNotifications.objects.filter(student_id=student.id)
return render(request, "student_template/all_notification.html", {"notifications": notifications})
def student_view_result(request):
student = Students.objects.get(admin=request.user.id)
studentresult = StudentResult.objects.filter(student_id=student.id)
return render(request, "student_template/student_result.html", {"studentresult": studentresult})
|
import ccobra
from Modal_Logic.ccobra_adapter import ccobra_to_assertion
from Modal_Logic.solver import does_a_follow_from_b
class MentalModel(ccobra.CCobraModel):
def __init__(self, name='Modal Logic System S4'):
super(MentalModel, self).__init__(
name, ['modal'], ['verify'])
def predict(self, item, **kwargs):
task = ccobra_to_assertion(item.task[0])
choices = ccobra_to_assertion(item.choices[0][0])
return does_a_follow_from_b(task, choices, ['reflexive', 'transitive'])
def pre_train(self, dataset):
pass
# print("pretrain")
# print(len(dataset))
# for subj_train_data in dataset:
# for seq_train_data in subj_train_data:
# print(seq_train_data['item'].identifier, seq_train_data['response'])
def adapt(self, item, response, **kwargs):
# print(item.task)
# print(response)
# print()
pass
|
# Read from file, announce on the web, irc, xml-rpc
from twisted.application import internet, service, strports
from twisted.internet import protocol, reactor, defer, endpoints
from twisted.words.protocols import irc
from twisted.protocols import basic
from twisted.web import resource, server, static, xmlrpc
import cgi
class FingerProtocol(basic.LineReceiver):
def lineReceived(self, user):
d = self.factory.getUser(user)
def onError(err):
return b'Internal error in server'
d.addErrback(onError)
def writeResponse(message):
self.transport.write(message + b'\r\n')
self.transport.loseConnection()
d.addCallback(writeResponse)
class IRCReplyBot(irc.IRCClient):
def connectionMade(self):
self.nickname = self.factory.nickname
irc.IRCClient.connectionMade(self)
def privmsg(self, user, channel, msg):
user = user.split('!')[0]
if self.nickname.lower() == channel.lower():
d = self.factory.getUser(msg.encode("ascii"))
def onError(err):
return 'Internal error in server'
d.addErrback(onError)
def writeResponse(message):
message = message.decode("ascii")
irc.IRCClient.msg(self, user, msg+ ': ' + message)
d.addCallback(writeResponse)
class FingerService(service.Service):
def __init__(self, filename):
self.filename = filename
self.users = {}
def _read(self):
self.users.clear()
with open(self.filename, "rb") as f:
for line in f:
user, status = line.split(b':', 1)
user = user.strip()
status = status.strip()
self.users[user] = status
self.call = reactor.callLater(30, self._read)
def getUser(self, user):
return defer.succeed(self.users.get(user, b"No such user"))
def getFingerFactory(self):
f = protocol.ServerFactory()
f.protocol = FingerProtocol
f.getUser = self.getUser
return f
def getResource(self):
def getData(path, request):
user = self.users.get(path, b"No such user <p/> usage: site/user")
path = path.decode("ascii")
user = user.decode("ascii")
text = '<h1>{}</h1><p>{}</p>'.format(path, user)
text = text.encode("ascii")
return static.Data(text, 'text/html')
r = resource.Resource()
r.getChild = getData
x = xmlrpc.XMLRPC()
x.xmlrpc_getUser = self.getUser
r.putChild('RPC2', x)
return r
def getIRCBot(self, nickname):
f = protocol.ClientFactory()
f.protocol = IRCReplyBot
f.nickname = nickname
f.getUser = self.getUser
return f
def startService(self):
self._read()
service.Service.startService(self)
def stopService(self):
service.Service.stopService(self)
self.call.cancel()
application = service.Application('finger', uid=1, gid=1)
f = FingerService('/etc/users')
serviceCollection = service.IServiceCollection(application)
f.setServiceParent(serviceCollection)
strports.service("tcp:79", f.getFingerFactory()
).setServiceParent(serviceCollection)
strports.service("tcp:8000", server.Site(f.getResource())
).setServiceParent(serviceCollection)
internet.ClientService(
endpoints.clientFromString(reactor, "tcp:irc.freenode.org:6667"),
f.getIRCBot('fingerbot')).setServiceParent(serviceCollection)
|
from django.shortcuts import redirect
from django.http import JsonResponse
from django.core import serializers
from django.contrib import messages
class ReviewAjaxFormMixin(object):
def form_invalid(self, form):
response = super(ReviewAjaxFormMixin, self).form_invalid(form)
if self.request.is_ajax():
return JsonResponse(form.errors, status=400)
else:
return response
def form_valid(self, form):
response = super(ReviewAjaxFormMixin, self).form_valid(form)
if self.request.is_ajax():
print(form.cleaned_data)
data = {
'message': "Successfully submitted form data."
}
review = form.save()
ser_review = serializers.serialize('json', [ review, ])
# send to client side.
return JsonResponse({'review': ser_review}, status=200)
# return JsonResponse(data)
else:
review = form.save()
messages.success(self.request, 'Review submitted')
next_ = self.request.POST.get('next')
if next_:
return redirect(next_)
return response
|
import cv2
import numpy as np
from tensorflow.keras.models import load_model
from image_utils import preprocess
model = load_model('./model.h5')
print(model.summary())
img = cv2.imread('/home/sajith/Documents/Acedamic/self-driving-car/data/data/IMG/center_2021_01_12_13_20_30_027.jpg')
cv2.imshow('tem', img)
img = preprocess(img)
img = np.asarray(img, dtype=np.float32)
print(img.shape)
print(float(model.predict(img[None,:,:,:])[0]))
|
###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#
###############################################################################
"""
Tests for loading water source definitions
"""
import pytest
import yaml
import os
from pyomo.environ import ConcreteModel, value
from pyomo.util.check_units import assert_units_consistent
from idaes.core import FlowsheetBlock
from watertap.unit_models.zero_order import FeedZO
from watertap.core import Database, WaterParameterBlock
dbpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
with open(os.path.join(dbpath, "water_sources.yaml"), "r") as f:
lines = f.read()
f.close()
source_data = yaml.load(lines, yaml.Loader)
DEFAULT_SOURCE = "seawater"
@pytest.mark.integration
def test_default_source():
m = ConcreteModel()
m.db = Database()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.params = WaterParameterBlock(default={"database": m.db})
m.fs.unit = FeedZO(default={"property_package": m.fs.params})
for j in m.fs.params.solute_set:
assert j in source_data[DEFAULT_SOURCE]["solutes"]
m.fs.unit.load_feed_data_from_database()
assert pytest.approx(
source_data[DEFAULT_SOURCE]["default_flow"]["value"], rel=1e-12
) == value(m.fs.unit.flow_vol[0])
assert m.fs.unit.flow_vol[0].fixed
for j, v in source_data[DEFAULT_SOURCE]["solutes"].items():
assert pytest.approx(v["value"], rel=1e-12) == value(
m.fs.unit.conc_mass_comp[0, j]
)
assert m.fs.unit.conc_mass_comp[0, j].fixed
assert_units_consistent(m)
@pytest.mark.integration
@pytest.mark.parametrize(
"source", list(j for j in source_data.keys() if j != "default")
)
def test_all_sources(source):
m = ConcreteModel()
m.db = Database()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.params = WaterParameterBlock(
default={"database": m.db, "water_source": source}
)
m.fs.unit = FeedZO(default={"property_package": m.fs.params})
for j in m.fs.params.solute_set:
assert j in source_data[source]["solutes"]
m.fs.unit.load_feed_data_from_database()
assert pytest.approx(
source_data[source]["default_flow"]["value"], rel=1e-12
) == value(m.fs.unit.flow_vol[0])
assert m.fs.unit.flow_vol[0].fixed
for j, v in source_data[source]["solutes"].items():
assert pytest.approx(v["value"], rel=1e-12) == value(
m.fs.unit.conc_mass_comp[0, j]
)
assert m.fs.unit.conc_mass_comp[0, j].fixed
assert j in m.db.component_list.keys()
|
#######################################
# 文字列の操作
#######################################
def f1(a, b):
# 文字列aとbを連結してxに代入せよ
# (次の行を書き換える)
x = ""
return x
def f2(s):
# 文字列sの、左から数えて3文字目を取り出してxに代入せよ
# 例: "abcdefg" => "c"
# (次の行を書き換える)
x = ""
return x
def f3(s):
# 文字列sの、左から数えて3文字目から6文字目までを取り出してxに代入せよ
# 例: "abcdefg" => "cdef"
# (次の行を書き換える)
x = ""
return x
def f4(s):
# 文字列sを3回繰り返した文字列をxに代入せよ
# 例: "abc" => "abcabcabc"
# (次の行を書き換える)
x = ""
return x
assert f1("xy", "zw") == "xyzw"
assert f2("123456") == "3"
assert f3("123456789") == "3456"
assert f4("xy") == "xyxyxy"
print("OK")
|
"""
Jinsung Yoon (9/13/2018)
PATE-GAN: Synthetic Data Generation
"""
import numpy as np
from PATE_GAN import PATE_GAN
import argparse
import os
import time
import pandas as pd
import initpath_alg
initpath_alg.init_sys_path()
import utilmlab
# gpu_frac = 0.5
# config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = gpu_frac
# set_session(tf.Session(config=config))
def df_split_random(df, train_rate=0.8):
idx = np.random.permutation(len(df))
train_idx = idx[:int(train_rate * len(df))]
test_idx = idx[int(train_rate * len(df)):]
df_train = df.iloc[train_idx]
df_test = df.iloc[test_idx, :]
return df_train, df_test
def init_arg():
parser = argparse.ArgumentParser()
parser.add_argument("--otrain")
parser.add_argument("--otest")
parser.add_argument("--itrain")
parser.add_argument("--itest")
parser.add_argument("--iter", type=int, default=10000)
parser.add_argument("--epsilon", type=float, default=1)
parser.add_argument("--delta", type=int, default=5)
parser.add_argument("--teachers", type=int, default=5)
parser.add_argument(
"--target",
help='name of response var when using csv as input')
parser.add_argument(
"--separator",
default=',',
help="separator csv file")
return parser.parse_args()
if __name__ == '__main__':
args = init_arg()
target = args.target.split(',')
sep = args.separator
fn_i_train = args.itrain
fn_i_test = args.itest
fn_o_train = args.otrain
fn_o_test = args.otest
num_teachers = args.teachers
niter = args.iter
epsilon = args.epsilon
delta = args.delta
assert fn_i_train is not None
assert fn_i_test is not None
assert fn_o_train is not None
assert fn_o_test is not None
assert target is not None
logger = utilmlab.init_logger(os.path.dirname(fn_o_train))
logger.info('loading {} target:{} sep:{}'.format(
(fn_i_train, fn_i_test),
target,
sep))
df_train = pd.read_csv(fn_i_train, sep=sep)
df_test = pd.read_csv(fn_i_test, sep=sep)
features = list(df_train.columns)
for lbl in target:
assert lbl in features
features.remove(lbl)
logger.info('features:{} {}'.format(len(features), features))
logger.info('target:{}'.format(target))
logger.info('epsilon:{} delta:{}'.format(epsilon, delta))
time_exe_start = time.time()
x_train_new, y_train_new, x_test_new, y_test_new = PATE_GAN(
df_train[features].values,
df_train[target].values,
df_test[features].values,
df_test[target].values,
epsilon,
delta,
niter,
num_teachers)
cols = features
cols.append(target[0])
df_train_new = pd.DataFrame(
np.hstack(
[x_train_new,
y_train_new.reshape(len(y_train_new), -1)]),
columns=cols)
df_test_new = pd.DataFrame(
np.hstack(
[x_test_new,
y_test_new.reshape(len(y_test_new), -1)]),
columns=cols)
df_train_new.to_csv(
fn_o_train,
index=False,
compression=utilmlab.get_df_compression(fn_o_train))
df_test_new.to_csv(
fn_o_test,
index=False,
compression=utilmlab.get_df_compression(fn_o_test))
|
import datetime
import unittest
from unittest import mock
from aprsd import messaging
class TestMessageTrack(unittest.TestCase):
def setUp(self) -> None:
config = {}
messaging.MsgTrack(config=config)
def _clean_track(self):
track = messaging.MsgTrack()
track.data = {}
track.total_messages_tracked = 0
return track
def test_create(self):
track1 = messaging.MsgTrack()
track2 = messaging.MsgTrack()
self.assertEqual(track1, track2)
def test_add(self):
track = self._clean_track()
fromcall = "KFART"
tocall = "KHELP"
message = "somthing"
msg = messaging.TextMessage(fromcall, tocall, message)
track.add(msg)
self.assertEqual(msg, track.get(msg.id))
def test_remove(self):
track = self._clean_track()
fromcall = "KFART"
tocall = "KHELP"
message = "somthing"
msg = messaging.TextMessage(fromcall, tocall, message)
track.add(msg)
track.remove(msg.id)
self.assertEqual(None, track.get(msg.id))
def test_len(self):
"""Test getting length of tracked messages."""
track = self._clean_track()
fromcall = "KFART"
tocall = "KHELP"
message = "somthing"
msg = messaging.TextMessage(fromcall, tocall, message)
track.add(msg)
self.assertEqual(1, len(track))
msg2 = messaging.TextMessage(tocall, fromcall, message)
track.add(msg2)
self.assertEqual(2, len(track))
track.remove(msg.id)
self.assertEqual(1, len(track))
@mock.patch("aprsd.messaging.TextMessage.send")
def test__resend(self, mock_send):
"""Test the _resend method."""
track = self._clean_track()
fromcall = "KFART"
tocall = "KHELP"
message = "somthing"
msg = messaging.TextMessage(fromcall, tocall, message)
msg.last_send_attempt = 3
track.add(msg)
track._resend(msg)
msg.send.assert_called_with()
self.assertEqual(0, msg.last_send_attempt)
@mock.patch("aprsd.messaging.TextMessage.send")
def test_restart_delayed(self, mock_send):
"""Test the _resend method."""
track = self._clean_track()
fromcall = "KFART"
tocall = "KHELP"
message1 = "something"
message2 = "something another"
message3 = "something another again"
mock1_send = mock.MagicMock()
mock2_send = mock.MagicMock()
mock3_send = mock.MagicMock()
msg1 = messaging.TextMessage(fromcall, tocall, message1)
msg1.last_send_attempt = 3
msg1.last_send_time = datetime.datetime.now()
msg1.send = mock1_send
track.add(msg1)
msg2 = messaging.TextMessage(tocall, fromcall, message2)
msg2.last_send_attempt = 3
msg2.last_send_time = datetime.datetime.now()
msg2.send = mock2_send
track.add(msg2)
track.restart_delayed(count=None)
msg1.send.assert_called_once()
self.assertEqual(0, msg1.last_send_attempt)
msg2.send.assert_called_once()
self.assertEqual(0, msg2.last_send_attempt)
msg1.last_send_attempt = 3
msg1.send.reset_mock()
msg2.last_send_attempt = 3
msg2.send.reset_mock()
track.restart_delayed(count=1)
msg1.send.assert_not_called()
msg2.send.assert_called_once()
self.assertEqual(3, msg1.last_send_attempt)
self.assertEqual(0, msg2.last_send_attempt)
msg3 = messaging.TextMessage(tocall, fromcall, message3)
msg3.last_send_attempt = 3
msg3.last_send_time = datetime.datetime.now()
msg3.send = mock3_send
track.add(msg3)
msg1.last_send_attempt = 3
msg1.send.reset_mock()
msg2.last_send_attempt = 3
msg2.send.reset_mock()
msg3.last_send_attempt = 3
msg3.send.reset_mock()
track.restart_delayed(count=2)
msg1.send.assert_not_called()
msg2.send.assert_called_once()
msg3.send.assert_called_once()
self.assertEqual(3, msg1.last_send_attempt)
self.assertEqual(0, msg2.last_send_attempt)
self.assertEqual(0, msg3.last_send_attempt)
msg1.last_send_attempt = 3
msg1.send.reset_mock()
msg2.last_send_attempt = 3
msg2.send.reset_mock()
msg3.last_send_attempt = 3
msg3.send.reset_mock()
track.restart_delayed(count=2, most_recent=False)
msg1.send.assert_called_once()
msg2.send.assert_called_once()
msg3.send.assert_not_called()
self.assertEqual(0, msg1.last_send_attempt)
self.assertEqual(0, msg2.last_send_attempt)
self.assertEqual(3, msg3.last_send_attempt)
|
#!/usr/bin/env ipython
import sys, unittest
import numpy as np
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
class DotTestCase(unittest.TestCase):
"""A test fixture for numpy's dot products"""
def setUp(self):
"""Establish our fixture"""
n = 4
self.x = np.arange(n)
self.y = 1 - np.linspace(0.0, 1.0, n)
def tearDown(self):
"""Cleanup"""
del self.x
del self.y
def testDotI(self):
"""Test np.dot for x.x"""
self.assertEqual(np.dot(self.x, self.x),
sum([x*x for x in self.x]))
class DetectionTestCase(unittest.TestCase):
"""A test fixture for numpy's dot products"""
def setUp(self):
"""Establish our fixture"""
pass
def tearDown(self):
"""Cleanup"""
pass
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def suite():
"""Returns a suite containing all the test cases in this module."""
suites = []
suites += unittest.makeSuite(DotTestCase)
suites += unittest.makeSuite(DetectionTestCase)
return unittest.TestSuite(suites)
def run(shouldExit=False):
"""Run the tests"""
if shouldExit:
unittest.main()
else:
return unittest.TextTestRunner().run(suite()).wasSuccessful()
if __name__ == "__main__":
run(True)
|
"""
Time:2019-2-26
Author:pengfei
Motivation:for training on a csv dataset
"""
"""csv Dataset Classes
Original author: Francisco Massa
https://github.com/fmassa/vision/blob/voc_dataset/torchvision/datasets/voc.py
Updated by: Ellis Brown, Max deGroot
"""
from .config import HOME
import os.path as osp
import sys
import csv
import torch
import torch.utils.data as data
import cv2
import numpy as np
import pandas as pd
class CSVAnnotationTransform(object):
"""Transforms a VOC annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
Arguments:
class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
(default: alphabetic indexing of VOC's 20 classes)
keep_difficult (bool, optional): keep difficult instances or not
(default: False)
height (int): height
width (int): width
"""
def __init__(self, keep_difficult=False):
self.keep_difficult = keep_difficult
def __call__(self, targets, width, height, classes):
"""
Arguments:
target (annotation) : the target annotation to be made usable
will be an ET.Element
Returns:
a list containing lists of bounding boxes [bbox coords, class name]
"""
res = []
wh = np.array([width, height, width, height, 1])
for i in targets:
bboxe = list(map(float, i[:-1])) # str to float
bboxe.append(classes[i[-1]])
bboxe /= wh # bounding box normalization
res += [list(bboxe)]
return res # [[xmin, ymin, xmax, ymax, label_ind], ... ]
class CSVDataset(data.Dataset):
"""VOC Detection Dataset Object
input is image, target is annotation
Arguments:
root (string): image path with all trainval and test images.
csv_file (string): csv file which contain object's information
class_file(string): csv file contains classes information
transform (callable, optional): transformation to perform on the
input image
target_transform (callable, optional): transformation to perform on the
target `annotation`
(eg: take in caption string, return tensor of word indices)
dataset_name (string, optional): which dataset to load
(default: 'csv')
"""
def __init__(self, csv_file,
classes_file,
transform=None, target_transform=CSVAnnotationTransform(),
dataset_name='csv'):
self.csv_file = csv_file
self.transform = transform
self.target_transform = target_transform
self.name = dataset_name
self.img_lists = []
self.targets = []
# construct class information dict
# key is class's name, item is class's index
with open(classes_file) as f:
temp = list(csv.reader(f))
self.classes = dict(zip([x[0] for x in temp], [float(x[1]) for x in temp]))
self.num_classes = len(temp)+1
with open(csv_file) as f:
temp = list(csv.reader(f))
for i in temp:
self.targets.append(i[1:])
self.ids = self._get_im_lists(csv_file)
def __getitem__(self, index):
im, gt, h, w = self.pull_item(index)
return im, gt
def __len__(self):
return len(self.ids)
def pull_item(self, index):
im_info = self.ids[index]
im_path = im_info[0]
img = cv2.imread(im_path)
# img = self.ims[index]
height, width, channels = img.shape
# Reshape the targets from [num_targets * 5] to [num_targets, 5]
num_targets = int( (len(im_info)-1) / 5)
targets = []
for i in range(num_targets):
targets.append(im_info[1+5*i:1+5*(i+1)])
if self.target_transform is not None:
target = self.target_transform(targets, width, height, self.classes)
if self.transform is not None:
target = np.array(target)
img, boxes, labels = self.transform(img, target[:, :4], target[:, 4])
# to rgb
img = img[:, :, (2, 1, 0)]
# img = img.transpose(2, 0, 1)
target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
return torch.from_numpy(img).permute(2, 0, 1), target, height, width
def pull_image(self, index):
'''Returns the original image object at index in PIL form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
PIL img
'''
img_path = self.ids[index]
return cv2.imread(img_path, cv2.IMREAD_COLOR)
def pull_anno(self, index):
'''Returns the original annotation of image at index
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to get annotation of
Return:
list: [img_id, [(label, bbox coords),...]]
eg: ('001718', [('dog', (96, 13, 438, 332))])
'''
im_info = self.ids[index]
# Reshape the targets from [num_targets * 5] to [num_targets, 5]
num_targets = int((len(im_info) - 1) / 5)
targets = []
for i in range(num_targets):
targets.append(im_info[1 + 5 * i:1 + 5 * (i + 1)])
gt = self.target_transform(targets, 1, 1, self.classes)
return targets, gt
def pull_tensor(self, index):
'''Returns the original image at an index in tensor form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
tensorized version of img, squeezed
'''
return torch.Tensor(self.pull_image(index)).unsqueeze_(0)
def _get_im_lists(self, dataset_file):
"""
:param dataset_file: (string) csv file contains objects
:return: (list) images list. Shape [num_images]
[[im_name_1, x1,y1,x2,y2, classes_name, ....], ....]
"""
im_list_old = pd.read_csv(dataset_file).values.tolist()
im_list_new = []
current_im_name = ''
new_im = []
for index, i in enumerate(im_list_old):
if i[0] != current_im_name:
if index > 0:
im_list_new.append(new_im)
new_im = i
current_im_name = i[0]
else:
new_im += i[1:]
if index == len(im_list_old) - 1:
im_list_new.append(new_im)
return im_list_new
|
# for more information on how to install requests
# http://docs.python-requests.org/en/master/user/install/#install
import requests
import json
# TODO: replace with your own app_id and app_key
app_id = '-'
app_key = '-'
def get_meaning(word):
language = 'en'
word_id = str(word)
url = 'https://od-api.oxforddictionaries.com:443/api/v1/entries/' + language + '/' + word_id.lower()
r = requests.get(url, headers = {'app_id': app_id, 'app_key': app_key})
# print(r)
# print("code {}\n".format(r.status_code))
try:
meaning = r.json()
except:
return "No entry available for " + '"' + str(word) +'"'
# print(type(meaning))
out = ''
for i in meaning["results"][0]["lexicalEntries"][0]["entries"][0]["senses"]:
out += str(i["definitions"][0]).capitalize() + "\n" + "Usage Examples:\n"
try:
for j in i["examples"]:
out += "- " + j["text"] + "\n"
except:
out += "None\n"
pass
return out
if __name__ == '__main__':
print(get_meaning("cupcalsroskl;dke"))
|
#/usr/bin/python
# senderclient.py - this file is part of dailyimage
# Log in and send emails to clients
import sys # argv, exit
import smtplib # for email ...
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.mime.image import MIMEImage
from email.MIMEText import MIMEText
from email import Encoders
from getimage import get_image
class Client:
'''
class Client
attributes:
email string - The user's email
query string - Image search query
time dictionary 'hour', 'minute' - Time to send
'''
def __init__(self, email, timestring, query):
self.email = email
# Build the dictionary, i.e timestring='15:35' gives {'hour': 15, 'minute': 35}
self.time = dict(zip(['hour', 'minute'], [int(i) for i in timestring.split(':', 2)]))
self.query = query
# Start the generator to find images of the client's query
self.image_generator = get_image(query)
class Sender:
'''
class Sender
attributes:
smtpObj smtplib.SMTP
'''
def __init__(self, email, password):
'''
arguments:
email: string
password: string
Using the email and password, this function logs into the email on the
provider's SMTP server and returns the generated client
'''
self.email = email
# Contains the SMTP server and the appropriate port depending on the email
server = []
with open('.smtp_servers') as f:
# Find the appropriate server and port from the user's email
for line in f:
elems = line.split(None, 3)
if elems[0] in email:
server = dict(zip(['provider', 'server', 'port'], line.split(None, 3)))
break
# Only some email providers work, see .smtp_servers for details
if not server:
raise ValueError('Could not find an SMTP server for that email provider: ' + email)
# Create a client and connect to the SMTP server
self.smtpObj = smtplib.SMTP(server['server'], server['port'])
self.smtpObj.ehlo()
self.smtpObj.starttls()
self.smtpObj.login(email, password)
def send(self, client):
'''Send an image to the email'''
body = 'Here is your daily ' + client.query[:-1] + '!\n\nRegards, DailyImage'
# Create the email message
msg = MIMEMultipart()
msg['Subject'] = 'Your Daily ' + client.query
msg['From'] = self.email
msg['To'] = client.email
msg.attach(MIMEText(body.encode('utf-8')))
# Get the next image and attach it
image = next(client.image_generator)
attachment = MIMEImage(image, 'daily-' + client.query)
msg.attach(attachment)
# Off it goes!
self.smtpObj.sendmail(self.email, client.email, msg.as_string())
def main(argv):
if len(argv) != 3:
print('usage: login.py email password')
sys.exit()
try:
client = login(argv[1], argv[2])
except smtplib.SMTPAuthenticationError as e:
print('Error: Could not log in. ')
print((e.__str__().split('\'')[1]))
sys.exit()
print('Login successful')
client.smtpObj.quit()
if __name__ == '__main__':
import sys
main(sys.argv)
|
import numpy as np
import pandas as pd
import matplotlib.image as mpimg
from typing import Tuple
from .config import _dir
_data_dir = '%s/input' % _dir
cell_types = ['HEPG2', 'HUVEC', 'RPE', 'U2OS']
positive_control = 1108
negative_control = 1138
nsirna = 1108 # excluding 30 positive_control + 1 negative_control
plate_shape = (14, 22)
def set_data_dir(d):
global _data_dir
_data_dir = d
def load_header(set_type):
"""
Args:
set_type (str): train or test
id_code experiment plate well sirna well_type cell_type
HEPG2-01_1_B03 HEPG2-01 1 B03 513 posotive_control HEPG2
"""
df = pd.read_csv('%s/%s.csv' % (_data_dir, set_type))
df_controls = pd.read_csv('%s/%s_controls.csv' % (_data_dir, set_type))
if set_type == 'train':
df.insert(5, 'well_type', 'train')
else:
df.insert(4, 'sirna', -1)
df.insert(5, 'well_type', 'test')
df_all = pd.concat([df, df_controls], sort=False)
# Add cell_type; HPEG2-01 -> HPEG2
df_all['cell_type'] = df_all['experiment'].str.replace('-.*', '',
regex=True)
# Add cell_type: HPEG2-01 -> 1
ex_no = df_all['experiment'].str.replace('^.*-', '', regex=True)
df_all['experiment_no'] = ex_no.astype(int)
return df_all
def load(set_type, id_code, site):
"""
Load image specified by the id_code
Args:
set_type (str): train or test
id_code (str): U2OS-03_4_O19 (or, can be a pd.Series with id_code)
site (int): 1 or 2
Returns:
img (np.array): 6 x 512 x 512
"""
if not isinstance(id_code, str):
if isinstance(id_code, pd.Series):
id_code = id_code.id_code
else:
raise TypeError('id_code is not str', id_code)
if not (site == 1 or site == 2):
raise ValueError('site must be 1 or 2')
# Example:
# id_code: U2OS-03_4_O19
# filename: U2OS-03/Plate4/O19_s<site>_w<channel>.png
v = id_code.split('_')
batch = v[0] # U2OS-03
plate = v[1] # 4
well = v[2] # O19
nc = 512
X = np.empty((6, nc, nc))
for ichannel in range(6):
filename = ('%s/%s/%s/Plate%s/%s_s%d_w%d.png' % (_data_dir,
set_type, batch, plate, well, site, ichannel + 1))
img = mpimg.imread(filename)
X[ichannel, :, :] = img[:, :]
return X
def well_coordinate(well: str) -> Tuple[int]:
"""
Row: B -> 0
Col: 02 -> 0
Args:
well (str): e.g. B02
Returns
(row, col)
"""
assert(len(well) == 3)
row = ord(well[0].lower()) - 96
col = int(well[1:])
return (row, col)
def well_index(well: str) -> int:
row, col = well_coordinate(well)
return row * 22 + col
|
"""
Unified interfaces to minimization algorithms.
Functions
---------
- minimize : minimization of a function of several variables.
"""
__all__ = ['minimize', 'show_minimize_options']
from warnings import warn
# unconstrained minimization
from optimize import _minimize_neldermead, _minimize_powell, \
_minimize_cg, _minimize_bfgs, _minimize_newtoncg, \
MemoizeJac
from anneal import _minimize_anneal
# contrained minimization
from lbfgsb import _minimize_lbfgsb
from tnc import _minimize_tnc
from cobyla import _minimize_cobyla
from slsqp import _minimize_slsqp
def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None,
hessp=None, bounds=None, constraints=(),
options=dict(), full_output=False, callback=None,
retall=False):
"""
Minimization of scalar function of one or more variables.
.. versionadded:: 0.11.0
Parameters
----------
fun : callable
Objective function.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to the objective function and its
derivatives (Jacobian, Hessian).
method : str, optional
Type of solver. Should be one of:
{'Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG', 'Anneal',
'L-BFGS-B', 'TNC', 'COBYLA', 'SLSQP'}.
jac : bool or callable, optional
Jacobian of objective function. Only for CG, BFGS, Newton-CG.
If `jac` is a Boolean and is True, `fun` is assumed to return the
value of Jacobian along with the objective function. If False, the
Jacobian will be estimated numerically.
`jac` can also be a callable returning the Jacobian of the
objective. In this case, it must accept the same arguments as
`fun`.
hess, hessp : callable, optional
Hessian of objective function or Hessian of objective function
times an arbitrary vector p. Only for Newton-CG.
Only one of `hessp` or `hess` needs to be given. If `hess` is
provided, then `hessp` will be ignored. If neither `hess` nor
`hessp` is provided, then the hessian product will be approximated
using finite differences on `jac`. `hessp` must compute the Hessian
times an arbitrary vector.
bounds : sequence, optional
Bounds for variables (only for L-BFGS-B, TNC, COBYLA and SLSQP).
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter. Use None for one of ``min`` or
``max`` when there is no bound in that direction.
constraints : dict or sequence of dict, optional
Constraints definition (only for COBYLA and SLSQP).
Each constraint is defined in a dictionary with fields:
type: str
Constraint type: 'eq' for equality, 'ineq' for inequality.
fun: callable
The function defining the constraint.
jac: callable, optional
The Jacobian of `fun` (only for SLSQP).
args: sequence, optional
Extra arguments to be passed to the function and Jacobian.
Equality constraint means that the constraint function result is to
be zero whereas inequality means that it is to be non-negative.
Note that COBYLA only supports inequality constraints.
options : dict, optional
A dictionary of solver options. All methods accept the following
generic options:
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
For method-specific options, see `show_minimize_options`.
full_output : bool, optional
If True, return optional outputs. Default is False.
callback : callable, optional
Called after each iteration, as ``callback(xk)``, where ``xk`` is the
current parameter vector.
retall : bool, optional
If True, return a list of the solution at each iteration. This is only
done if `full_output` is True.
Returns
-------
xopt : ndarray
The solution.
info : dict
A dictionary of optional outputs (depending on the chosen method)
with the keys:
solution : ndarray
The solution (same as `xopt`).
success : bool
Boolean flag indicating if a solution was found.
status : int
An integer flag indicating the type of termination. Its
value depends on the underlying solver. Refer to `message`
for more information.
message : str
A string message giving information about the cause of the
termination.
fun, jac, hess : ndarray
Values of objective function, Jacobian and Hessian (if
available).
nfev, njev, nhev: int
Number of evaluations of the objective functions and of its
jacobian and hessian.
nit: int
Number of iterations.
direc: ndarray
Current set of direction vectors for the Powell method.
T : float
Final temperature for simulated annealing.
accept : int
Number of tests accepted.
allvecs : list
Solution at each iteration (if ``retall == True``).
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter. The default method is *BFGS*.
Unconstrained minimization
~~~~~~~~~~~~~~~~~~~~~~~~~~
Method *Nelder-Mead* uses the Simplex algorithm [1]_, [2]_. This
algorithm has been successful in many applications but other algorithms
using the first and/or second derivatives information might be preferred
for their better performances and robustness in general.
Method *Powell* is a modification of Powell's method [3]_, [4]_ which
is a conjugate direction method. It performs sequential one-dimensional
minimizations along each vector of the directions set (`direc` field in
`options` and `info`), which is updated at each iteration of the main
minimization loop. The function need not be differentiable, and no
derivatives are taken.
Method *CG* uses a nonlinear conjugate gradient algorithm by Polak and
Ribiere, a variant of the Fletcher-Reeves method described in [5]_ pp.
120-122. Only the first derivatives are used.
Method *BFGS* uses the quasi-Newton method of Broyden, Fletcher,
Goldfarb, and Shanno (BFGS) [5]_ pp. 136. It uses the first derivatives
only. BFGS has proven good performance even for non-smooth
optimizations
Method *Newton-CG* uses a Newton-CG algorithm [5]_ pp. 168 (also known
as the truncated Newton method). It uses a CG method to the compute the
search direction. See also *TNC* method for a box-constrained
minimization with a similar algorithm.
Method *Anneal* uses simulated annealing, which is a probabilistic
metaheuristic algorithm for global optimization. It uses no derivative
information from the function being optimized.
Constrained minimization
~~~~~~~~~~~~~~~~~~~~~~~~
Method *L-BFGS-B* uses the L-BFGS-B algorithm [6]_, [7]_ for bound
constrained minimization.
Method *TNC* uses a truncated Newton algorithm [5]_, [8]_ to minimize a
function with variables subject to bounds. This algorithm is uses
gradient information; it is also called Newton Conjugate-Gradient. It
differs from the *Newton-CG* method described above as it wraps a C
implementation and allows each variable to be given upper and lower
bounds.
Method *COBYLA* uses the Constrained Optimization BY Linear
Approximation (COBYLA) method [9]_, [10]_, [11]_. The algorithm is
based on linear approximations to the objective function and each
constraint. The method wraps a FORTRAN implementation of the algorithm.
Method *SLSQP* uses Sequential Least SQuares Programming to minimize a
function of several variables with any combination of bounds, equality
and inequality constraints. The method wraps the SLSQP Optimization
subroutine originally implemented by Dieter Kraft [12]_.
References
----------
.. [1] Nelder, J A, and R Mead. 1965. A Simplex Method for Function
Minimization. The Computer Journal 7: 308-13.
.. [2] Wright M H. 1996. Direct search methods: Once scorned, now
respectable, in Numerical Analysis 1995: Proceedings of the 1995
Dundee Biennial Conference in Numerical Analysis (Eds. D F
Griffiths and G A Watson). Addison Wesley Longman, Harlow, UK.
191-208.
.. [3] Powell, M J D. 1964. An efficient method for finding the minimum of
a function of several variables without calculating derivatives. The
Computer Journal 7: 155-162.
.. [4] Press W, S A Teukolsky, W T Vetterling and B P Flannery.
Numerical Recipes (any edition), Cambridge University Press.
.. [5] Nocedal, J, and S J Wright. 2006. Numerical Optimization.
Springer New York.
.. [6] Byrd, R H and P Lu and J. Nocedal. 1995. A Limited Memory
Algorithm for Bound Constrained Optimization. SIAM Journal on
Scientific and Statistical Computing 16 (5): 1190-1208.
.. [7] Zhu, C and R H Byrd and J Nocedal. 1997. L-BFGS-B: Algorithm
778: L-BFGS-B, FORTRAN routines for large scale bound constrained
optimization. ACM Transactions on Mathematical Software 23 (4):
550-560.
.. [8] Nash, S G. Newton-Type Minimization Via the Lanczos Method.
1984. SIAM Journal of Numerical Analysis 21: 770-778.
.. [9] Powell, M J D. A direct search optimization method that models
the objective and constraint functions by linear interpolation.
1994. Advances in Optimization and Numerical Analysis, eds. S. Gomez
and J-P Hennart, Kluwer Academic (Dordrecht), 51-67.
.. [10] Powell M J D. Direct search algorithms for optimization
calculations. 1998. Acta Numerica 7: 287-336.
.. [11] Powell M J D. A view of algorithms for optimization without
derivatives. 2007.Cambridge University Technical Report DAMTP
2007/NA03
.. [12] Kraft, D. A software package for sequential quadratic
programming. 1988. Tech. Rep. DFVLR-FB 88-28, DLR German Aerospace
Center -- Institute for Flight Mechanics, Koln, Germany.
Examples
--------
Let us consider the problem of minimizing the Rosenbrock function. This
function (and its respective derivatives) is implemented in `rosen`
(resp. `rosen_der`, `rosen_hess`) in the `scipy.optimize`.
>>> from scipy.optimize import minimize, rosen, rosen_der
A simple application of the *Nelder-Mead* method is:
>>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
>>> xopt = minimize(rosen, x0, method='Nelder-Mead')
Optimization terminated successfully.
Current function value: 0.000066
Iterations: 141
Function evaluations: 243
>>> print xopt
[ 1. 1. 1. 1. 1.]
Now using the *BFGS* algorithm, using the first derivative and a few
options:
>>> xopt, info = minimize(rosen, x0, method='BFGS', jac=rosen_der,
... options={'gtol': 1e-6, 'disp': False},
... full_output=True)
>>> print info['message']
Optimization terminated successfully.
>>> print info['solution']
[ 1. 1. 1. 1. 1.]
>>> print info['hess']
[[ 0.00749589 0.01255155 0.02396251 0.04750988 0.09495377]
[ 0.01255155 0.02510441 0.04794055 0.09502834 0.18996269]
[ 0.02396251 0.04794055 0.09631614 0.19092151 0.38165151]
[ 0.04750988 0.09502834 0.19092151 0.38341252 0.7664427 ]
[ 0.09495377 0.18996269 0.38165151 0.7664427 1.53713523]]
Next, consider a minimization problem with several constraints (namely
Example 16.4 from [5]_). The objective function is:
>>> fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2
There are three constraints defined as:
>>> cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
... {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6},
... {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2})
And variables must be positive, hence the following bounds:
>>> bnds = ((0, None), (0, None))
The optimization problem is solved using the SLSQP method as:
>>> xopt, info = minimize(fun, (2, 0), method='SLSQP', bounds=bnds,
... constraints=cons, full_output=True)
It should converge to the theoretical solution (1.4 ,1.7).
"""
meth = method.lower()
# check if optional parameters are supported by the selected method
# - jac
if meth in ['nelder-mead', 'powell', 'anneal', 'cobyla'] and bool(jac):
warn('Method %s does not use gradient information (jac).' % method,
RuntimeWarning)
# - hess
if meth != 'newton-cg' and hess is not None:
warn('Method %s does not use Hessian information (hess).' % method,
RuntimeWarning)
# - constraints or bounds
if meth in ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg'] and \
(bounds is not None or any(constraints)):
warn('Method %s cannot handle constraints nor bounds.' % method,
RuntimeWarning)
if meth in ['l-bfgs-b', 'tnc'] and any(constraints):
warn('Method %s cannot handle constraints.' % method,
RuntimeWarning)
if meth is 'cobyla' and bounds is not None:
warn('Method %s cannot handle bounds.' % method,
RuntimeWarning)
# - callback
if meth in ['anneal', 'l-bfgs-b', 'tnc', 'cobyla', 'slsqp'] and \
callback is not None:
warn('Method %s does not support callback.' % method,
RuntimeWarning)
# - retall
if meth in ['anneal', 'l-bfgs-b', 'tnc', 'cobyla', 'slsqp'] and \
retall:
warn('Method %s does not support retall.' % method,
RuntimeWarning)
# fun also returns the jacobian
if not callable(jac):
if bool(jac):
fun = MemoizeJac(fun)
jac = fun.derivative
else:
jac = None
if meth == 'nelder-mead':
return _minimize_neldermead(fun, x0, args, options, full_output,
retall, callback)
elif meth == 'powell':
return _minimize_powell(fun, x0, args, options, full_output,
retall, callback)
elif meth == 'cg':
return _minimize_cg(fun, x0, args, jac, options, full_output,
retall, callback)
elif meth == 'bfgs':
return _minimize_bfgs(fun, x0, args, jac, options, full_output,
retall, callback)
elif meth == 'newton-cg':
return _minimize_newtoncg(fun, x0, args, jac, hess, hessp, options,
full_output, retall, callback)
elif meth == 'anneal':
return _minimize_anneal(fun, x0, args, options, full_output)
elif meth == 'l-bfgs-b':
return _minimize_lbfgsb(fun, x0, args, jac, bounds, options,
full_output)
elif meth == 'tnc':
return _minimize_tnc(fun, x0, args, jac, bounds, options,
full_output)
elif meth == 'cobyla':
return _minimize_cobyla(fun, x0, args, constraints, options,
full_output)
elif meth == 'slsqp':
return _minimize_slsqp(fun, x0, args, jac, bounds,
constraints, options, full_output)
else:
raise ValueError('Unknown solver %s' % method)
def show_minimize_options(method=None):
"""Show documentation for additional options of minimize's methods.
These are method-specific options that can be supplied to `minimize` in the
``options`` dict.
Parameters
----------
method : str, optional
If not given, shows all methods. Otherwise, show only the options for
the specified method. Valid values are: 'BFGS', 'Newton-CG',
'Nelder-Mead', 'Powell', 'CG', 'Anneal', 'L-BFGS-B', 'TNC',
'COBYLA', 'SLSQP'.
Notes
-----
* BFGS options:
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
* Nelder-Mead options:
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxfev : int
Maximum number of function evaluations to make.
* Newton-CG options:
xtol : float
Average relative error in solution `xopt` acceptable for
convergence.
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
* CG options:
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
* Powell options:
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxfev : int
Maximum number of function evaluations to make.
direc : ndarray
Initial set of direction vectors for the Powell method.
* Anneal options:
schedule : str
Annealing schedule to use. One of: 'fast', 'cauchy' or
'boltzmann'.
T0 : float
Initial Temperature (estimated as 1.2 times the largest
cost-function deviation over random points in the range).
Tf : float
Final goal temperature.
maxfev : int
Maximum number of function evaluations to make.
maxaccept : int
Maximum changes to accept.
boltzmann : float
Boltzmann constant in acceptance test (increase for less
stringent test at each temperature).
learn_rate : float
Scale constant for adjusting guesses.
ftol : float
Relative error in ``fun(x)`` acceptable for convergence.
quench, m, n : float
Parameters to alter fast_sa schedule.
lower, upper : float or ndarray
Lower and upper bounds on `x`.
dwell : int
The number of times to search the space at each temperature.
* L-BFGS-B options:
maxcor : int
The maximum number of variable metric corrections used to
define the limited memory matrix. (The limited memory BFGS
method does not store the full hessian but uses this many terms
in an approximation to it.)
factr : float
The iteration stops when ``(f^k -
f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, where ``eps``
is the machine precision, which is automatically generated by
the code. Typical values for `factr` are: 1e12 for low
accuracy; 1e7 for moderate accuracy; 10.0 for extremely high
accuracy.
pgtol : float
The iteration will stop when ``max{|proj g_i | i = 1, ..., n}
<= pgtol`` where ``pg_i`` is the i-th component of the
projected gradient.
maxfev : int
Maximum number of function evaluations.
* TNC options:
scale : list of floats
Scaling factors to apply to each variable. If None, the
factors are up-low for interval bounded variables and
1+|x] fo the others. Defaults to None
offset : float
Value to substract from each variable. If None, the
offsets are (up+low)/2 for interval bounded variables
and x for the others.
maxCGit : int
Maximum number of hessian*vector evaluations per main
iteration. If maxCGit == 0, the direction chosen is
-gradient if maxCGit < 0, maxCGit is set to
max(1,min(50,n/2)). Defaults to -1.
maxfev : int
Maximum number of function evaluation. if None, `maxfev` is
set to max(100, 10*len(x0)). Defaults to None.
eta : float
Severity of the line search. if < 0 or > 1, set to 0.25.
Defaults to -1.
stepmx : float
Maximum step for the line search. May be increased during
call. If too small, it will be set to 10.0. Defaults to 0.
accuracy : float
Relative precision for finite difference calculations. If
<= machine_precision, set to sqrt(machine_precision).
Defaults to 0.
minfev : float
Minimum function value estimate. Defaults to 0.
ftol : float
Precision goal for the value of f in the stoping criterion.
If ftol < 0.0, ftol is set to 0.0 defaults to -1.
xtol : float
Precision goal for the value of x in the stopping
criterion (after applying x scaling factors). If xtol <
0.0, xtol is set to sqrt(machine_precision). Defaults to
-1.
pgtol : float
Precision goal for the value of the projected gradient in
the stopping criterion (after applying x scaling factors).
If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy).
Setting it to 0.0 is not recommended. Defaults to -1.
rescale : float
Scaling factor (in log10) used to trigger f value
rescaling. If 0, rescale at each iteration. If a large
value, never rescale. If < 0, rescale is set to 1.3.
* COBYLA options:
rhobeg : float
Reasonable initial changes to the variables.
rhoend : float
Final accuracy in the optimization (not precisely guaranteed).
This is a lower bound on the size of the trust region.
maxfev : int
Maximum number of function evaluations.
* SLSQP options:
eps : float
Step size used for numerical approximation of the jacobian.
maxiter : int
Maximum number of iterations.
"""
if method is None:
notes_header = "Notes\n -----"
sections = show_minimize_options.__doc__.split(notes_header)[1:]
else:
sections = show_minimize_options.__doc__.split('*')[1:]
sections = [s.strip() for s in sections]
sections = [s for s in sections if s.lower().startswith(method.lower())]
print '\n'.join(sections)
return
|
#!/usr/bin/env python
import numpy as np
import unittest
import pyphil
class tick_tack_toe_board:
"""
Does things pertaining to current board position. X moves first, O moves second in all cases.
X and O are represented by the type variable
"""
def __init__(self,rows=3,cols=3):
# for printing the board
self.VERT=u'︱'
self.HOR='_'
# contains the location of X and 0
self.grid=np.zeros(shape=(rows,cols))
# the enumerated type for X or O
self.type={'blank':0,'x':1,'o':2}
# The length of "in a row" to win
self.win_length=3
self._rows=rows
self._cols=cols
# the thing that contains the Xs and Os
self.boardStatus = np.reshape(np.zeros(self._rows * self._cols,dtype=int), (self._rows, self._cols))
@property
def rows(self):
return self._rows
@property
def cols(self):
return self._cols
def get_board(self):
return self.boardStatus
def set_board(self,board):
self.boardStatus=board
def _valid_point(self,row,col):
return 0 <= row < self.rows and 0 <= col < self._cols
def __contains__(self, point):
return self._valid_point(point[0],point[1])
def game_won(self):
""" checks to see if the game is won. Returns the winner or self.type["blank"] if no winner"""
# This gets squares that go through a given row and column in a line
def get_test_squares(row,col,movement):
points=[(row,col)]
for direction in [-1,1]:
displacement=1
while True:
test_point=movement(row,col,displacement*direction)
if test_point in self:
displacement += 1
points.append( test_point)
else:
break
return points
for piece in [self.type["x"],self.type["o"]]:
for row,col in [(row,col) for row in range(self.rows) for col in range(self._cols)]:
left_right = lambda row,col,direction: (row,direction+col)
up_down = lambda row,col,direction: (row+direction,col)
diag_up_right = lambda row,col,direction: (row-direction,col+direction)
diag_down_right = lambda row,col,direction: (row+direction,col+direction)
# sees if all the points are of the given type
def test_points(points,type):
for point in points:
if self.boardStatus[point] != type:
return False
return True
for movement in [left_right, up_down, diag_up_right, diag_down_right]:
points=get_test_squares(row,col,movement)
if len(points) >= self.win_length and test_points(points,piece):
return piece
return self.type["blank"]
def game_draw(self):
""" Return true if the game is a draw and false otherwise """
for num in np.ravel(self.boardStatus):
if num == self.type["blank"]:
return False
if self.game_won() != self.type["blank"]:
return False
return True
def __str__(self):
string=""
for row in range(self.rows):
for col in range(self.cols):
dock={self.type['x']:'x',self.type['o']:'o',self.type['blank']:' ' }
string += str(dock[self.boardStatus[row,col]])+' '
string += '\n'
return string
class test_tick_tack_toe_board(unittest.TestCase):
def test_1(self):
board=tick_tack_toe_board()
self.assertFalse( board.game_won() )
for row,col in ((0,0),(1,1),(2,2)):
board.boardStatus[row][col] = board.type["x"]
self.assertEqual( board.game_won(), board.type["x"])
self.assertNotEqual( board.game_won(), board.type["o"])
self.assertEqual( board.game_draw(), False)
def test_2(self):
board=tick_tack_toe_board()
self.assertFalse( board.game_won() )
for row,col in ((0,0),(0,1),(2,0),(1,2),(2,1)):
board.boardStatus[row][col] = board.type["x"]
for row,col in ((1,1),(0,2),(1,0),(2,2)):
board.boardStatus[row][col] = board.type["o"]
self.assertEqual( board.game_won(), False)
self.assertEqual( board.game_draw() , True)
def get_tack_tack_toe_losing_move(board, toMove=None):
"""
Returns a (row,col) tuple for the move that'll be guaranteed to make the current mover loose or None if no such "perfect" move exists
"""
# provides a way to alternate between Xs and Os
next_type = lambda curType: {board.type["x"]:board.type["o"],board.type["o"]:board.type["x"]}[toMove]
# condition for when a game is won
def game_won(board, toMove):
return board.game_won() == next_type(toMove)
def game_lost(board, toMove):
return board.game_won() == toMove or board.game_draw()
# condition for already lost and won
if game_lost(board,toMove):
return None
cur_pos = board.get_board()
# provides a way of iterating through avaliable positions on the board - this returns the next avaliable position to place a piece on the board
def get_next(pos, exclusive=False):
row,col=pos
# increment the colummn before checking it if it's exclusive
if exclusive:
col += 1
# keep looking for avaliable spots untill a blank spot is fount or we're out of locations
while True:
# correct things if they've gone out of bounds
if col >= board.cols:
row+=1
col=0
if row >= board.rows:
return None
# see if the location is avaliable
if board.boardStatus[row,col] == board.type["blank"]:
return row,col
# the location get's checked before incrementing
col += 1
# gets a list of possible moves
def get_possible_moves():
moves=[]
attempt_move=get_next( (0,0), exclusive=False )
while attempt_move is not None:
moves.append(attempt_move)
attempt_move=get_next(attempt_move,exclusive=True)
return moves
def reset_board():
board = cur_pos
# make sure none of the following opponent moves have guaranteed victory
for attempt_move in get_possible_moves():
board.boardStatus[attempt_move] = toMove
# If the move isn't good then try another
if game_lost(board,toMove):
reset_board()
continue
# also see if the move makes the player win
if game_won(board,toMove):
reset_board()
return attempt_move
# the move will win the game
sucess=True
# keep track of the status before making oponent moves
last_status = board.boardStatus
# no matter what the oposition does the player must still be able to "win" after their move
for opposition_move in get_possible_moves():
board.boardStatus[opposition_move]=next_type(toMove)
# a win isn't that remarkable; we must win all possible oponent moves
if game_won(board,toMove):
board.boardStatus=last_status
continue
# alternatively, if there's no sure way of winning then the game is lost
else:
reset_board()
sucess=False
break
# set up for next round
reset_board()
if sucess:
return attempt_move
# if we didn't return a valid move in the loop there is none to be found
return None
class test_get_tack_tack_toe_losing_move(unittest.TestCase):
def test_1(self):
def get_result( x_moves, o_moves,toMove=None):
board=tick_tack_toe_board()
for xmove in x_moves:
board.boardStatus[xmove]=board.type["x"]
for omove in o_moves:
board.boardStatus[omove]=board.type["o"]
print(board)
return get_tack_tack_toe_losing_move(board,toMove)
bord=tick_tack_toe_board()
self.assertEqual( None, get_result([(0,0),(0,1),(1,2),(2,0)],[(0,2),(1,0),(1,1)],toMove=bord.type["x"] ))
self.assertEqual( (1,2) , get_result([(0,0),(0,1),(1,0),(2,1)],[(0,2),(1,1),(2,2)],bord.type["x"]) )
self.assertEqual( None , get_result([(0,0),(0,1),(1,0),(2,1)],[(0,2),(1,1),(2,2)],bord.type["o"]) )
self.assertNotEqual(None, get_result([(1,1),(0,0)],[(2,1) ],bord.type["o"]) )
# print(get_result([(0,0),(1,1)],[ ],bord.type["o"]))
if __name__== '__main__':
unittest.main()
|
import logging
import torch
from torch import nn
from .base_mapping import MappingBase
logger = logging.getLogger(__name__)
class ProjectionMapping(MappingBase):
"""
A class for simple contextualization of word-level embeddings.
Runs an untrained BiLSTM on top of the loaded-from-disk embeddings.
"""
def __init__(self, model_hidden_dim: int, *args, **kwargs):
"""
Uses a bi-LSTM to project the sequence of embeddings to a new hidden space.
The paramaters of the projection to the hidden space are not trained
Args:
model_hidden_dim: dimension of the hidden dimension that is mapped to
"""
logger.info("Using projection mapping")
super(ProjectionMapping, self).__init__(*args, **kwargs)
input_dim = model_hidden_dim
self.lstm = nn.LSTM(
input_size=input_dim,
hidden_size=int(input_dim / 2),
num_layers=1,
batch_first=True,
bidirectional=True,
)
for param in self.lstm.parameters():
param.requires_grad = False
def forward(self, batch: torch.Tensor) -> torch.Tensor:
"""
Random BiLSTM contextualization of embeddings
Args:
batch: a batch of pre-computed embeddings loaded from disk.
Returns:
A random-init BiLSTM contextualization of the embeddings
"""
with torch.no_grad():
projected, _ = self.lstm(batch)
return projected
|
import time
import xnmt.loss
from xnmt.vocab import Vocab
from xnmt.events import register_xnmt_handler, handle_xnmt_event
from xnmt import logger, yaml_logger
class LossTracker(object):
"""
A template class to track training process and generate report.
"""
REPORT_TEMPLATE = 'Epoch {epoch:.4f}: {data}_loss/word={loss:.6f} (words={words}, words/sec={words_per_sec:.2f}, time={time})'
REPORT_TEMPLATE_DEV = 'Epoch {epoch:.4f} dev {score} (words={words}, words/sec={words_per_sec:.2f}, time={time})'
REPORT_TEMPLATE_DEV_AUX = 'Epoch {epoch:.4f} dev auxiliary {score}'
@register_xnmt_handler
def __init__(self, training_regimen, eval_every, name=None):
self.training_regimen = training_regimen
self.eval_train_every = 1000
self.eval_dev_every = eval_every
self.epoch_num = 0
self.epoch_loss = xnmt.loss.LossScalarBuilder()
self.epoch_words = 0
self.sent_num = 0
self.sent_num_not_report_train = 0
self.sent_num_not_report_dev = 0
self.fractional_epoch = 0
self.dev_score = None
self.best_dev_score = None
self.dev_words = 0
self.last_report_words = 0
self.start_time = time.time()
self.last_report_train_time = self.start_time
self.dev_start_time = self.start_time
self.name = name
@handle_xnmt_event
def on_new_epoch(self, training_task, num_sents):
"""
Clear epoch-wise counters for starting a new training epoch.
"""
if training_task is self.training_regimen:
self.total_train_sent = num_sents
self.epoch_loss.zero()
self.epoch_words = 0
self.epoch_num += 1
self.sent_num = 0
self.sent_num_not_report_train = 0
self.sent_num_not_report_dev = 0
self.last_report_words = 0
self.last_report_train_time = time.time()
def update_epoch_loss(self, src, trg, loss):
"""
Update epoch-wise counters for each iteration.
"""
batch_sent_num = self.count_sent_num(src)
self.sent_num += batch_sent_num
self.sent_num_not_report_train += batch_sent_num
self.sent_num_not_report_dev += batch_sent_num
self.epoch_words += self.count_trg_words(trg)
self.epoch_loss += loss
def format_time(self, seconds):
return "{}-{}".format(int(seconds) // 86400,
time.strftime("%H:%M:%S", time.gmtime(seconds)))
def log_readable_and_structured(self, template, args):
if self.name: args["task_name"] = self.name
logger.info(template.format(**args), extra=args)
yaml_logger.info(args)
def report_train_process(self):
"""
Print training report if eval_train_every sents have been evaluated.
Return:
True if the training process is reported
"""
print_report = self.sent_num_not_report_train >= self.eval_train_every \
or self.sent_num == self.total_train_sent
if print_report:
self.sent_num_not_report_train = self.sent_num_not_report_train % self.eval_train_every
self.fractional_epoch = (self.epoch_num - 1) + self.sent_num / self.total_train_sent
this_report_time = time.time()
self.log_readable_and_structured(LossTracker.REPORT_TEMPLATE,
{"key": "train_loss", "data" : "train",
"epoch" : self.fractional_epoch,
"loss" : self.epoch_loss.sum() / self.epoch_words,
"words" : self.epoch_words,
"words_per_sec" : (self.epoch_words - self.last_report_words) / (this_report_time - self.last_report_train_time),
"time" : self.format_time(time.time() - self.start_time)})
if len(self.epoch_loss) > 1:
for loss_name, loss_values in self.epoch_loss.items():
self.log_readable_and_structured("- {loss_name} {loss:5.6f}",
{"key":"additional_train_loss",
"loss_name" : loss_name,
"loss" : loss_values / self.epoch_words})
self.last_report_words = self.epoch_words
self.last_report_train_time = this_report_time
return print_report
def new_dev(self):
"""
Clear dev counters for starting a new dev testing.
"""
self.dev_start_time = time.time()
def set_dev_score(self, dev_words, dev_score):
"""
Update dev counters for each iteration.
"""
self.dev_score = dev_score
self.dev_words = dev_words
def should_report_dev(self):
if self.eval_dev_every > 0:
return self.sent_num_not_report_dev >= self.eval_dev_every or (self.sent_num == self.total_train_sent)
else:
return self.sent_num_not_report_dev >= self.total_train_sent
def report_dev_and_check_model(self):
"""
Print dev testing report and check whether the dev loss is the best seen so far.
Return:
True if the dev loss is the best and required save operations
"""
this_report_time = time.time()
sent_num = self.eval_dev_every if self.eval_dev_every != 0 else self.total_train_sent
self.sent_num_not_report_dev = self.sent_num_not_report_dev % sent_num
self.fractional_epoch = (self.epoch_num - 1) + self.sent_num / self.total_train_sent
self.log_readable_and_structured(LossTracker.REPORT_TEMPLATE_DEV,
{"key" : "dev_loss",
"epoch" : self.fractional_epoch,
"score" : self.dev_score,
"words" : self.dev_words,
"words_per_sec" : self.dev_words / (this_report_time - self.dev_start_time),
"time" : self.format_time(this_report_time - self.start_time)
})
save_model = True
if self.best_dev_score is not None:
save_model = self.dev_score.better_than(self.best_dev_score)
if save_model:
self.best_dev_score = self.dev_score
logger.info(f"Epoch {self.fractional_epoch:.4f}: best dev score, writing out model")
return save_model
def report_auxiliary_score(self, score):
self.log_readable_and_structured(LossTracker.REPORT_TEMPLATE_DEV_AUX,
{"key": "auxiliary_score",
"epoch" : self.fractional_epoch,
"score" : score})
def count_trg_words(self, trg_words):
"""
Method for counting number of trg words.
"""
raise NotImplementedError('count_trg_words must be implemented in LossTracker subclasses')
def count_sent_num(self, obj):
"""
Method for counting number of sents.
"""
raise NotImplementedError('count_trg_words must be implemented in LossTracker subclasses')
def clear_counters(self):
self.sent_num = 0
self.sent_num_not_report_dev = 0
self.sent_num_not_report_train = 0
def report_loss(self):
pass
class BatchLossTracker(LossTracker):
"""
A class to track training process and generate report for minibatch mode.
"""
def count_trg_words(self, trg_words):
trg_cnt = 0
for x in trg_words:
if type(x) == int:
trg_cnt += 1 if x != Vocab.ES else 0
else:
trg_cnt += sum([1 if y != Vocab.ES else 0 for y in x])
return trg_cnt
def count_sent_num(self, obj):
return len(obj)
|
import os
import glob
import subprocess
import random
import unittest
import time
import numpy as np
import readingdb as rdb
# make sure we're testing the version of readingdb in this dir.
assert os.path.dirname(os.path.abspath(rdb.__file__)) == \
os.path.dirname(os.path.abspath(__file__))
datadir = '_testdata'
readingdb = '../c6/reading-server'
log = '/dev/null'
port = int(random.random() * 5000) + 20000
class TestIface(unittest.TestCase):
def setUp(self):
try:
os.makedirs(datadir)
except OSError, e:
if e.errno != os.errno.EEXIST:
raise
cmd = [readingdb, '-p', str(port), '-d', datadir, '-c', '1']
self.log = open(log, 'a')
self.db = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=self.log)
# wait for startup or a fatal message
for x in xrange(0, 20):
l = self.db.stderr.readline()
if 'FATAL' in l:
raise Exception(l)
elif 'listening' in l:
break
self.conn = rdb.db_open('localhost', port)
def tearDown(self):
rdb.db_close(self.conn)
self.db.terminate()
self.db.wait()
self.log.close()
for f in glob.glob(os.path.join(datadir, '*')):
os.remove(f)
os.removedirs(datadir)
def infill_stream(self, stream):
for i in range(0, 1000):
data = [(x, x, x) for x in xrange(i * 100, i * 100 + 100)]
self.assertEqual(rdb.db_add(self.conn, stream, data), 1)
def test_simple(self):
self.infill_stream(1)
d = rdb.db_query(self.conn, 1, 0, 10000)
self.assertEqual(len(d), 10000)
for i in xrange(0, 10000):
self.assertEqual(d[i][0], i)
self.assertEqual(d[i][1], i)
def test_multi(self):
streams = range(1, int(1e4), int(1e3))
for i in streams:
self.infill_stream(i)
rdb.db_setup('localhost', port)
fetch = random.sample(streams, 3)
data = rdb.db_multiple(fetch, 0, 10000)
# check grabbing three random streams
self.assertEqual(len(data), 3)
for dv in data:
self.assertEqual(dv.shape, (10000, 2))
self.assertEqual(np.sum(dv[:, 0] - np.arange(0, 10000)), 0)
self.assertEqual(np.sum(dv[:, 1] - np.arange(0, 10000)), 0)
# grab some streams without data
data = rdb.db_multiple([2,3,4,6], 0, 10000)
self.assertEqual(len(data), 4)
for dv in data:
self.assertEqual(dv.shape, (0, 2))
if __name__ == '__main__':
unittest.main()
|
"""This module is useful to clean the firestore tables during the tests.
Thousands of entries could be generated and it won't be possible to remove them
from the Firestore UI
"""
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
from typing import List
from absl import app
from google.cloud import firestore
def _delete_collection(coll_ref, batch_size):
"""Deletes the collection documents in batches of batch_size.
Args:
coll_ref: a string array representing the CLI parameters, start date and end
date in YYYYMMDD format
batch_size: maximum amount of tables to delete in a row
Returns:
Itself if not all deleted
None otherwise
"""
docs = coll_ref.limit(batch_size).stream()
deleted = 0
for doc in docs:
doc.reference.delete()
deleted = deleted + 1
print('Items deleted ', deleted)
if deleted >= batch_size:
return _delete_collection(coll_ref, batch_size)
def main(argv: List[str]) -> None:
"""Deletes firestore collections between 2 dates.
Args:
argv: an array representing the input parameters:
argv[1]: a string array representing the GCP project to use
argv[2]: a string representing the name of the collection
argv[3]: an integer representing the start date in YYYYMMDD format
argv[4]: an integer representing the end date in YYYYMMDD format
"""
if len(argv) < 3:
raise app.UsageError('Too few command-line arguments.')
else:
print('Arguments ', argv)
db = firestore.Client(project=argv[1])
collection_name = argv[2]
if len(argv) > 3:
start = int(argv[3])
end = int(argv[4])
while start <= end:
collection_name = '{}_{}'.format(argv[2], start)
collection = db.collection(collection_name)
_delete_collection(collection, 1000)
start = start + 1
else:
collection = db.collection(collection_name)
_delete_collection(collection, 20)
if __name__ == '__main__':
app.run(main)
|
import boto3
import os
import json
region = 'us-east-2'
instances = list(json.loads((os.environ['EC2_INSTANCES'])).values())
ec2 = boto3.client('ec2', region_name=region)
def lambda_handler(event, context):
ec2.stop_instances(InstanceIds=instances)
print('stopped your instances: ' + str(instances))
|
from sensors.interfaces import GPIOs
from sensors.tachometer import Tachometer
GPIO_ADDRESS_A = 15
gpios = GPIOs([GPIO_ADDRESS_A, ])
interfaces = {
'gpios': gpios,
}
encoder = Tachometer(interfaces)
print(encoder.get_value())
|
class Solution(object):
def XXX(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
return self.search(root) != -1
def search(self, root):
if not root:
return 0
l_left = self.search(root.left)
if l_left == -1:
return -1
l_right = self.search(root.right)
if l_right == -1:
return -1
if abs(l_right - l_left) > 1:
return -1
else:
return max(l_left, l_right) + 1
|
#!/usr/bin/env python
import os
import sys
from flask_script import Manager
from {{cookiecutter.module_name}} import create_app
from {{cookiecutter.module_name}}.database import MyDatabase
sys.path.insert(0, os.getcwd())
manager = Manager(create_app)
@manager.command
def initdb():
try:
# FIXME replace this with yaml file config
db_url = 'mysql+pymysql://root:password@localhost:3306/demo'
MyDatabase(db_url=db_url).create_all()
print('database %s (re)initialized' % db_url)
except Exception as e:
print(e)
if __name__ == '__main__':
manager.run()
|
from .stickers import *
__red_end_user_data_statement__ = "No personal data is stored."
def setup(bot):
bot.add_cog(Stickers(bot))
|
from django.test import TestCase
from smartfields.dependencies import FileDependency
class MiscTestCase(TestCase):
def test_file_dependency(self):
self.assertEqual(
FileDependency(storage='foo', upload_to='somewhere', keep_orphans=True),
FileDependency(storage='foo', upload_to='somewhere', keep_orphans=True)
)
|
import IceRayPy
def Translate(
P_dll
,P_child : IceRayPy.core.object.Wrapper
,P_move
):
pretender = core.geometry.Pretender( P_dll, P_child.cast2Geometry(), P_child )
translator = core.geometry.transform.Translate( P_dll, pretender )
translator.move( P_move )
result = IceRayPy.core.object.Wrapper( P_dll )
result.geometrySet( translator )
return result
|
class Messages:
def __init__(self, database):
self.database = database
@property
def required_tables(self) -> dict:
return {
'message': {
'columns': [
'id BIGINT NOT NULL',
'channel_id BIGINT NOT NULL',
'author_id BIGINT',
'type VARCHAR(20)',
],
'constraints': [
'PRIMARY KEY (id)',
],
'indexes': [
'channel_id'
]
},
'message_info': {
'columns': [
'id BIGINT NOT NULL AUTO_INCREMENT',
'message_id BIGINT NOT NULL',
'name VARCHAR(50) NOT NULL',
'info TINYTEXT',
'user_id BIGINT',
],
'constraints': [
'PRIMARY KEY(id)',
'UNIQUE (message_id, name, user_id)',
('FOREIGN KEY (message_id) REFERENCES message(id) ' +
'ON DELETE CASCADE')
],
'indexes': [
'message_id',
'name'
]
}
}
async def get_message(
self, id: int, *, info: bool = False
) -> dict or None:
cnx = self.database.connection_object
cursor = cnx.cursor(dictionary=True)
cursor.execute(f'SELECT * FROM message WHERE id = {id}')
msg = cursor.fetchone()
cursor.close()
cnx.close()
if msg is not None and info:
info = await self.get_message_info(id=id)
if info is not None:
msg['info'] = info
return msg
async def get_message_info(
self, id: int, *, name: str = None, user_id: int = None
) -> list:
cnx = self.database.connection_object
cursor = cnx.cursor(dictionary=True)
query = f'SELECT * FROM message_info WHERE message_id = {id}'
if name is not None:
query += f' AND name = "{name}"'
if user_id is not None:
query += f' AND user_id = {user_id}'
cursor.execute(query)
info = cursor.fetchall()
cursor.close()
cnx.close()
return info
async def get_messages_by_info(self, *, name: str) -> list:
cnx = self.database.connection_object
cursor = cnx.cursor(dictionary=True)
cursor.execute(
'SELECT m.id, m.channel_id, m.author_id, m.type, ' +
'i.name as info_name, i.info, i.user_id as info_user_id ' +
'FROM message m JOIN' +
f' message_info i WHERE m.id = i.message_id AND i.name = "{name}"')
info = cursor.fetchall()
cursor.close()
cnx.close()
return info
async def add_message(
self, id: int, *, channel_id: int, type: str = None,
author_id: int = None, info: list = None
) -> None:
keys = 'id, channel_id, type, author_id'
values = '{}, {}, {}, {}'.format(
id, channel_id,
'NULL' if not type else f'"{type}"',
'NULL' if not author_id else author_id)
cnx = self.database.connection_object
cursor = cnx.cursor(buffered=True)
cursor.execute(f'INSERT INTO message({keys}) VALUES ({values})')
if info is not None and len(info) > 0:
cursor.execute('INSERT INTO message_info ({}) VALUES {}'.format(
'message_id, name, info, user_id',
', '.join('({}, "{}", "{}", {})'.format(
id, i.get('name'), i.get('info'),
'NULL' if not i.get('user_id') else i.get('user_id')
) for i in info)))
cnx.commit()
cursor.close()
cnx.close()
async def update_message_author(self, id, *, author_id):
cnx = self.database.connection_object
cursor = cnx.cursor(buffered=True)
cursor.execute(
'UPDATE message SET author_id = {} WHERE id = {}'.format(
'NULL' if not author_id else author_id, id))
cnx.commit()
cursor.close()
cnx.close()
async def delete_message(self, id: int) -> None:
cnx = self.database.connection_object
cursor = cnx.cursor(buffered=True)
cursor.execute(f'DELETE FROM message WHERE id = {id}')
cnx.commit()
cursor.close()
cnx.close()
async def add_message_info(
self, id: int, *, name: str, info: str = None, user_id: int = None
) -> None:
keys = 'message_id, name, info, user_id'
values = '{}, "{}", {}, {}'.format(
id, name,
'NULL' if not info else f'"{info}"',
"NULL" if not user_id else user_id)
cnx = self.database.connection_object
cursor = cnx.cursor(buffered=True)
cursor.execute(f'INSERT INTO message_info({keys}) VALUES ({values})')
cnx.commit()
cursor.close()
cnx.close()
async def delete_message_info(
self, id: int, *, name: str, user_id: int = None
) -> None:
cnx = self.database.connection_object
cursor = cnx.cursor(buffered=True)
cursor.execute('{} {} {}'.format(
'DELETE FROM message_info WHERE ',
f'message_id = {id} AND name = "{name}" ',
f'AND user_id = {"NULL" if not user_id else user_id}'))
cnx.commit()
cursor.close()
cnx.close()
|
import pytest
def test_qtdatavisualization():
"""Test the qtpy.QtDataVisualization namespace"""
# Using import skip here since with Python 3 you need to install another package
# besides the base `PyQt5` or `PySide2`.
# For example in the case of `PyQt5` you need `PyQtDataVisualization`
# QtDataVisualization
QtDataVisualization = pytest.importorskip("qtpy.QtDataVisualization")
assert QtDataVisualization.QScatter3DSeries is not None
assert QtDataVisualization.QSurfaceDataItem is not None
assert QtDataVisualization.QSurface3DSeries is not None
assert QtDataVisualization.QAbstract3DInputHandler is not None
assert QtDataVisualization.QHeightMapSurfaceDataProxy is not None
assert QtDataVisualization.QAbstractDataProxy is not None
assert QtDataVisualization.Q3DCamera is not None
assert QtDataVisualization.QAbstract3DGraph is not None
assert QtDataVisualization.QCustom3DVolume is not None
assert QtDataVisualization.Q3DInputHandler is not None
assert QtDataVisualization.QBarDataProxy is not None
assert QtDataVisualization.QSurfaceDataProxy is not None
assert QtDataVisualization.QScatterDataItem is not None
assert QtDataVisualization.Q3DLight is not None
assert QtDataVisualization.QScatterDataProxy is not None
assert QtDataVisualization.QValue3DAxis is not None
assert QtDataVisualization.Q3DBars is not None
assert QtDataVisualization.QBarDataItem is not None
assert QtDataVisualization.QItemModelBarDataProxy is not None
assert QtDataVisualization.Q3DTheme is not None
assert QtDataVisualization.QCustom3DItem is not None
assert QtDataVisualization.QItemModelScatterDataProxy is not None
assert QtDataVisualization.QValue3DAxisFormatter is not None
assert QtDataVisualization.QItemModelSurfaceDataProxy is not None
assert QtDataVisualization.Q3DScatter is not None
assert QtDataVisualization.QTouch3DInputHandler is not None
assert QtDataVisualization.QBar3DSeries is not None
assert QtDataVisualization.QAbstract3DAxis is not None
assert QtDataVisualization.Q3DScene is not None
assert QtDataVisualization.QCategory3DAxis is not None
assert QtDataVisualization.QAbstract3DSeries is not None
assert QtDataVisualization.Q3DObject is not None
assert QtDataVisualization.QCustom3DLabel is not None
assert QtDataVisualization.Q3DSurface is not None
assert QtDataVisualization.QLogValue3DAxisFormatter is not None
# QtDatavisualization
QtDatavisualization = pytest.importorskip("qtpy.QtDatavisualization")
assert QtDatavisualization.QScatter3DSeries is not None
assert QtDatavisualization.QSurfaceDataItem is not None
assert QtDatavisualization.QSurface3DSeries is not None
assert QtDatavisualization.QAbstract3DInputHandler is not None
assert QtDatavisualization.QHeightMapSurfaceDataProxy is not None
assert QtDatavisualization.QAbstractDataProxy is not None
assert QtDatavisualization.Q3DCamera is not None
assert QtDatavisualization.QAbstract3DGraph is not None
assert QtDatavisualization.QCustom3DVolume is not None
assert QtDatavisualization.Q3DInputHandler is not None
assert QtDatavisualization.QBarDataProxy is not None
assert QtDatavisualization.QSurfaceDataProxy is not None
assert QtDatavisualization.QScatterDataItem is not None
assert QtDatavisualization.Q3DLight is not None
assert QtDatavisualization.QScatterDataProxy is not None
assert QtDatavisualization.QValue3DAxis is not None
assert QtDatavisualization.Q3DBars is not None
assert QtDatavisualization.QBarDataItem is not None
assert QtDatavisualization.QItemModelBarDataProxy is not None
assert QtDatavisualization.Q3DTheme is not None
assert QtDatavisualization.QCustom3DItem is not None
assert QtDatavisualization.QItemModelScatterDataProxy is not None
assert QtDatavisualization.QValue3DAxisFormatter is not None
assert QtDatavisualization.QItemModelSurfaceDataProxy is not None
assert QtDatavisualization.Q3DScatter is not None
assert QtDatavisualization.QTouch3DInputHandler is not None
assert QtDatavisualization.QBar3DSeries is not None
assert QtDatavisualization.QAbstract3DAxis is not None
assert QtDatavisualization.Q3DScene is not None
assert QtDatavisualization.QCategory3DAxis is not None
assert QtDatavisualization.QAbstract3DSeries is not None
assert QtDatavisualization.Q3DObject is not None
assert QtDatavisualization.QCustom3DLabel is not None
assert QtDatavisualization.Q3DSurface is not None
assert QtDatavisualization.QLogValue3DAxisFormatter is not None
|
from laserpony import app
from flask.ext.bcrypt import Bcrypt
from flask_login import LoginManager
from flask.ext.markdown import Markdown
from flask_mongoengine import MongoEngine
from itsdangerous import URLSafeTimedSerializer
#Bcrypt
bcrypt = Bcrypt(app)
#Login Manager
login_manager = LoginManager(app)
login_manager.login_view = 'login'
#Markdown Parser
markdown = Markdown(app)
#Database
db = MongoEngine(app)
#Login Serializer
login_serializer = URLSafeTimedSerializer(app.secret_key)
|
import scrapy
from datetime import datetime
from spoon.items import SinaTopSummaryItem
class SinatopsummarySpider(scrapy.Spider):
name = 'sinaTopSummary'
allowed_domains = ['weibo.com']
start_urls = ['https://s.weibo.com/top/summary/']
def parse(self, response):
print(response.url)
last_update = datetime.now()
tr_list = response.xpath('//tr')
for tr in tr_list:
item = SinaTopSummaryItem()
item['ranking'] = tr.xpath('./td[@class="td-02"]/span/text()').extract_first()
item['summary'] = tr.xpath('./td[@class="td-02"]/a/text()').extract_first()
item['link'] = response.urljoin(tr.xpath('./td[@class="td-02"]/a/@href').extract_first())
item['last_update'] = last_update
yield item
|
import logging
import os
import re
import sys
import xml.etree.ElementTree as ET
import czech_stemmer
from RDRPOSTagger_python_3.pSCRDRtagger.RDRPOSTagger import RDRPOSTagger
from RDRPOSTagger_python_3.Utility.Utils import readDictionary
os.chdir('../..') # because above modules do chdir ... :/
from rouge_2_0.rouge_20 import print_rouge_scores
import separator
import textrank
logger = logging.getLogger('summarizer')
logging.basicConfig(level=logging.DEBUG)
STOPWORDS = set()
with open('stopwords.txt', 'r') as f:
for w in f:
STOPWORDS.add(w.strip())
def pos_tag(sentences):
r = RDRPOSTagger()
# Load the POS tagging model
r.constructSCRDRtreeFromRDRfile('./RDRPOSTagger_python_3/Models/UniPOS/UD_Czech-CAC/train.UniPOS.RDR')
# Load the lexicon
rdr_pos_dict = readDictionary('./RDRPOSTagger_python_3/Models/UniPOS/UD_Czech-CAC/train.UniPOS.DICT')
tagged_sentences = []
for sentence in sentences:
tagged_sentence_orig = r.tagRawSentence(rdr_pos_dict, sentence)
tagged_words = tagged_sentence_orig.split()
tagged_sentence = []
for t_w in tagged_words:
word, tag = t_w.split('/')
tagged_sentence.append((word, tag))
tagged_sentences.append(tagged_sentence)
return tagged_sentences
def remove_stop_words(sentences, keep_case=False, is_tokenized=True, return_tokenized=True):
if is_tokenized:
tokenized_sentences = sentences
else:
tokenized_sentences = tokenize(sentences)
sentences_without_stopwords = []
for sentence_orig in tokenized_sentences:
sentence_without_stopwords = []
for word in sentence_orig:
if word.lower() not in STOPWORDS:
sentence_without_stopwords.append(word if keep_case else word.lower())
sentences_without_stopwords.append(
sentence_without_stopwords if return_tokenized else ' '.join(sentence_without_stopwords)
)
return sentences_without_stopwords
def tokenize(sentences, additional_split_chars=('/', '|')):
tokenized = []
for s in sentences:
for split_char in additional_split_chars:
s = s.replace(split_char, ' ')
tokenized.append([w.strip(' ,.!?"():;-') for w in s.split()])
# tokenized.append(s.split())
return tokenized
def summarize(text):
# SPLIT TO PARAGRAPHS
pre_paragraphs = text.split('\n')
paragraphs = []
for i, p in enumerate(pre_paragraphs):
if not re.match(r'^\s*$', p) and (i == len(pre_paragraphs) - 1 or re.match(r'^\s*$', pre_paragraphs[i+1])):
paragraphs.append(p)
# print(f'Num of paragraphs: {len(paragraphs)}')
# for i, p in enumerate(paragraphs):
# print(f'par#{i+1}: {p}')
# SPLIT TO SENTENCES
sentences = separator.separate(text)
print(f'Num of sentences: {len(sentences)}')
for i, s in enumerate(sentences):
print(f'#{i+1}: {s}')
# TOKENIZE
stem = False
if stem:
tokenized_sentences = [[czech_stemmer.cz_stem(word, aggressive=True) for word in sentence]
for sentence in tokenize(sentences)]
else:
tokenized_sentences = tokenize(sentences)
# REMOVE STOPWORDS
tokenized_sentences_without_stopwords = remove_stop_words(tokenized_sentences, keep_case=False)
sentences_without_stopwords_case = remove_stop_words(sentences, keep_case=True, is_tokenized=False,
return_tokenized=False)
print('===Sentences without stopwords===')
for i, s in enumerate(tokenized_sentences_without_stopwords):
print(f'''#{i+1}: {' '.join(s)}''')
print('===Sentences without stopwords CASE===')
for i, s in enumerate(sentences_without_stopwords_case):
print(f'''#{i+1}: {s}''')
# POS-TAG
tagged_sentences = pos_tag(sentences_without_stopwords_case)
print('=====Tagged_sentences=====')
for i, s in enumerate(tagged_sentences):
print(f'''#{i+1}: {s}''')
counter = 0
summary_length = max(min(round(len(sentences) / 4), 15), 3) # length between 3-15 sentences
ranked_sentence_indexes = textrank.textrank(tokenized_sentences, True, '3-1-0.0001')
print(f'ranked_sentence_indexes: {ranked_sentence_indexes}')
# summary = ''
# # add 1st sentence always
# summary += f'{sentences[0]}\n'
# counter += 1
# ranked_sentence_indexes.remove(0)
# # # add also 2nd sentence if it is in top 50%
# if 1 in ranked_sentence_indexes[:len(ranked_sentence_indexes) // 2]:
# summary += f'{sentences[1]}\n'
# counter += 1
# ranked_sentence_indexes.remove(1)
# for sentence_index in sorted(ranked_sentence_indexes[:summary_length - counter]):
# if counter == summary_length:
# break
# summary += f'{sentences[sentence_index]}\n'
# counter += 1
# summary += f'::::: Sentences in original: {len(sentences)}. Sentences in summary: {summary_length}. :::::'
# add 1st sentence always
summary = []
summary.append(sentences[0])
counter += 1
ranked_sentence_indexes.remove(0)
# # add also 2nd sentence if it is in top 50%
if 1 in ranked_sentence_indexes[:len(ranked_sentence_indexes) // 2]:
summary.append(sentences[1])
counter += 1
ranked_sentence_indexes.remove(1)
for sentence_index in sorted(ranked_sentence_indexes[:summary_length - counter]):
if counter == summary_length:
break
summary.append(sentences[sentence_index])
counter += 1
return summary
def main():
if len(sys.argv) > 1:
filename = sys.argv[1]
with open(filename, 'r') as f:
content = f.read()
summary = summarize(content)
print(f'===Original text===\n{content}\n')
print(f'===Summary===\n{summary}')
else:
my_dir = os.path.dirname(os.path.realpath(__file__))
article_files = os.listdir(f'{my_dir}/articles')
total_articles = 0
for filename in article_files:
file_name, file_extension = os.path.splitext(filename)
print(f'=========================Soubor: {filename}=============================')
print('========================================================================')
tree = ET.parse(f'{my_dir}/articles/{filename}')
root = tree.getroot()
articles = list(root)
article_number = 0
for article in articles:
title = article.find('nadpis').text.strip()
content = article.find('text').text.strip()
print(f'Článek {article_number}: {title}')
summary = '\n'.join(summarize(content))
output_file_name = f'{file_name}-{article_number}_system.txt'
with open(f'{my_dir}/rouge_2_0/summarizer/system/{output_file_name}', 'w') as output_file:
output_file.write(summary)
article_number += 1
total_articles += 1
print(f'Tested {total_articles} articles.')
print(f'Resulting summaries stored to {my_dir}/rouge_2_0/summarizer/system/')
print_rouge_scores(rougen=1)
print_rouge_scores(rougen=2)
if __name__ == "__main__":
main()
|
"""A program for simulating personal finances."""
from argparse import ArgumentParser
from .simulation import Simulation
def main():
"""Simulates personal finances."""
parser = ArgumentParser(description="Simulate personal finances.")
parser.add_argument(
"--start",
metavar="YEAR",
type=int,
required=True,
help="The year in which to start the simulation",
)
parser.add_argument(
"--end",
metavar="YEAR",
type=int,
required=True,
help="The year in which to end the simulation",
)
args = parser.parse_args()
simulation = Simulation(args.start, args.end)
simulation.run()
if __name__ == "__main__":
main()
|
# This module enables a test to provide a handler for "hooked://..." urls
# passed into serial.serial_for_url. To do so, set the value of
# serial_class_for_url from your test to a function with the same API as
# ExampleSerialClassForUrl. Or assign your class to Serial.
from . import NoOpSerial
def ExampleSerialClassForUrl(url):
"""Implementation of serial_class_for_url called by serial.serial_for_url.
Returns the url, possibly modified, and a factory function to be called to
create an instance of a SerialBase sub-class (or at least behaves like it).
You can return a class as that factory function, as calling a class creates
an instance of that class.
serial.serial_for_url will call that factory function with None as the
port parameter (the first), and after creating the instance will assign
the url to the port property of the instance.
Returns:
A tuple (url, factory).
"""
return url, Serial
# Assign to this global variable from a test to override this default behavior.
serial_class_for_url = ExampleSerialClassForUrl
# Or assign your own class to this global variable.
Serial = NoOpSerial
|
import os
import threading
import socket
import select
import time
import queue
import cwipc
class _Sink_Passthrough(threading.Thread):
FOURCC="cwiU"
SELECT_TIMEOUT=0.1
QUEUE_FULL_TIMEOUT=0.001
def __init__(self, sink, verbose=False, nodrop=False):
threading.Thread.__init__(self)
self.name = 'cwipc_util._SinkPassthrough'
self.sink = sink
if hasattr(self.sink, 'set_fourcc'):
self.sink.set_fourcc(self.FOURCC)
self.producer = None
self.nodrop = nodrop
self.queue = queue.Queue(maxsize=2)
self.verbose = verbose
self.nodrop = nodrop
self.stopped = False
self.started = False
self.pointcounts = []
def set_encoder_params(self, **kwargs):
raise RuntimeError("cwipc_sink_passthrough: no encoder parameters supported")
def start(self):
threading.Thread.start(self)
self.sink.start()
self.started = True
def stop(self):
if self.verbose: print(f"passthrough: stopping thread")
self.stopped = True
self.sink.stop()
if self.started:
self.join()
def set_producer(self, producer):
self.producer = producer
self.sink.set_producer(producer)
def is_alive(self):
return not self.stopped
def run(self):
if self.verbose: print(f"passthrough: thread started")
try:
while not self.stopped and self.producer and self.producer.is_alive():
pc = self.queue.get()
if not pc:
print(f"passthrough: get() returned None")
continue
self.pointcounts.append(pc.count())
cpc = pc.get_packet()
self.sink.feed(cpc)
pc.free()
finally:
self.stopped = True
if self.verbose: print(f"passthrough: thread stopping")
def feed(self, pc):
try:
if self.nodrop:
self.queue.put(pc)
else:
self.queue.put(pc, timeout=self.QUEUE_FULL_TIMEOUT)
except queue.Full:
if self.verbose: print(f"passthrough: queue full, drop pointcloud")
pc.free()
def statistics(self):
self.print1stat('pointcount', self.pointcounts)
if hasattr(self.sink, 'statistics'):
self.sink.statistics()
def print1stat(self, name, values, isInt=False):
count = len(values)
if count == 0:
print('passthrough: {}: count=0'.format(name))
return
minValue = min(values)
maxValue = max(values)
avgValue = sum(values) / count
if isInt:
fmtstring = 'passthrough: {}: count={}, average={:.3f}, min={:d}, max={:d}'
else:
fmtstring = 'passthrough: {}: count={}, average={:.3f}, min={:.3f}, max={:.3f}'
print(fmtstring.format(name, count, avgValue, minValue, maxValue))
def cwipc_sink_passthrough(sink, verbose=False, nodrop=False):
"""Create a cwipc_sink object sends serialized uncompressed pointclouds to another sink"""
return _Sink_Passthrough(sink, verbose=verbose, nodrop=nodrop)
|
import logging
from .articleparser import ArticleParser
from crawler.webpage import WebPage
from pony.orm import db_session
from bs4 import BeautifulSoup
from data import Article, Author
from text_processing import TextProcessor
import re
import dateutil.parser
import urllib.parse
class ArxivParser(ArticleParser):
def __init__(self, text_processor: TextProcessor):
super().__init__(text_processor)
self._logger = logging.getLogger(self.__class__.__name__)
@db_session
def parse(self, web_page: WebPage) -> bool:
page_soup = BeautifulSoup(web_page.text, "html.parser")
abstract_section = page_soup.find("div", {"id": "abs"})
if not abstract_section:
return False
link_to_pdf_relative = abstract_section \
.find("div", {"class": "extra-services"}) \
.find("a", text=re.compile("PDF.*"))
if link_to_pdf_relative:
link = link_to_pdf_relative.get("href")
link_to_pdf = urllib.parse.urljoin(web_page.url, link)
else:
link_to_pdf = ''
title = ArxivParser._get_clean_text(
abstract_section.find("h1", {"class": "title mathjax"})
)
abstract = ArxivParser._get_clean_text(
abstract_section.find("blockquote", {"class": "abstract mathjax"})
)
authors = list(
map(lambda a: a.text, abstract_section.find("div", {"class": "authors"}).find_all("a"))
)
date_text = abstract_section\
.find("div", {"class": "submission-history"})\
.find("b", text="[v1]")\
.next_sibling
date = dateutil.parser.parse(date_text, fuzzy=True)
self._store_parsed_article(raw_abstract=abstract,
title=title,
link_to_pdf=link_to_pdf,
authors=authors,
date=date,
article_hash=web_page.page_hash)
return True
@staticmethod
def _get_clean_text(soup: BeautifulSoup) -> str:
for s in soup.find_all("span"):
s.extract()
return soup.text
|
#!/usr/bin/env python2.7
import json
import os
import sys
import auth
import common
def jsonrpc_change_password():
"""Accept a JSONRPC-style change password, with parameters like so:
{"jsonrpc":"2.0","method":"use.setpassword","params":["username","password", "oldpassword"],"id":1}
On successful login, set two cookies: The auth cookie, used for primary
authentication, is HttpOnly so JS cannot access it in case of an XSS. The
CSRF token, used to validate that POSTs come from the same origin, is
accessible to JS so it can be included in <form>'s.
"""
data = json.loads(sys.stdin.read())
try:
params = data["params"]
username = params[0]
new_password = params[1]
old_password = params[2]
except KeyError, e:
common.render_error(e.__str__())
except IndexError, e:
common.render_error(e.__str__())
a = auth.Auth()
if a.is_password(old_password):
a.save_password(new_password)
print "Content-Type: application/json"
print a.login_headers()
print
print "{}"
else:
common.render_error("Old password is incorrect.")
jsonrpc_change_password()
|
WARNING = 'Warning'
print('DunderImported:', __name__)
|
#!/usr/bin/env python3
import collections
import glob
import os
import subprocess
import sys
import numpy as np
import pandas as pd
batch_size = int(sys.argv[1])
n_iterations = 3
if __name__ == '__main__':
for root, dirs, files in os.walk('./'):
data_sets = dirs
for data_set in data_sets:
original_data = pd.read_csv(
os.path.join(root, data_set, 'data.csv'),
header=None
)
original_data = original_data.values
files = sorted(glob.glob(
os.path.join(root, data_set, '*_latents.csv')
)
)
bottlenecks = collections.defaultdict(list)
for i in range(n_iterations):
# Ensures that we never take more than the number of
# samples, regardless of the batch size parameter.
if original_data.shape[0] < batch_size:
batch_size = original_data.shape[0]
random_indices = np.random.choice(
original_data.shape[0],
batch_size,
replace=False
)
X_sample = original_data[random_indices]
np.savetxt('/tmp/Xw.csv', X_sample, delimiter=' ')
diagram = subprocess.run(
['vietoris_rips',
'-n',
'/tmp/Xw.csv',
'1e8',
'1'],
capture_output=True,
)
diagram = diagram.stdout
diagram = diagram.decode('utf-8')
with open('/tmp/D1w.txt', 'w') as f:
f.write(diagram)
D1 = np.genfromtxt('/tmp/D1w.txt')
for filename in files:
name = os.path.basename(filename)
name = name[:name.find('_')]
latent_space = pd.read_csv(
filename,
header=0
)
latent_space = latent_space[['0', '1']]
latent_space = latent_space.values
Y_sample = latent_space[random_indices]
np.savetxt('/tmp/Yw.csv', Y_sample, delimiter=' ')
diagram = subprocess.run(
['vietoris_rips',
'-n',
'/tmp/Yw.csv',
'1e8',
'1'],
capture_output=True,
)
diagram = diagram.stdout
diagram = diagram.decode('utf-8')
with open('/tmp/D2w.txt', 'w') as f:
f.write(diagram)
D2 = np.genfromtxt('/tmp/D2w.txt')
bottleneck = subprocess.run(
['topological_distance',
'-w',
'-p',
'1',
'/tmp/D1w.txt',
'/tmp/D2w.txt'
],
capture_output=True,
)
bottleneck = bottleneck.stdout
bottleneck = bottleneck.decode('utf-8')
bottleneck = bottleneck.split('\n')[0]
bottleneck = bottleneck.split(' ')
bottleneck = float(bottleneck[1])
bottlenecks[name].append(bottleneck)
#l2 = np.linalg.norm(D1 - D2)
#print(data_set, name, l2)
for name in sorted(bottlenecks.keys()):
print(batch_size,
data_set,
name,
np.mean(bottlenecks[name]),
np.std(bottlenecks[name])
)
sys.stdout.flush()
print('')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.