blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eddc44cc0f93e2816fbaba52cbd57eaa11038425
|
7d1fe1979bccfbe78308c047d40cb50fe87e6fdf
|
/Data Structure and Algorithms/iterunpack.py
|
5d4594b86a7d8e371d1b1ce89fb8597f184a0452
|
[] |
no_license
|
hbenr/pycookbook
|
ddd13ae13be94438efd20cf4376fe9fbde1da095
|
52e313eaa7b0d28f1accd7794aa323ab62647b9c
|
refs/heads/master
| 2021-08-06T06:41:10.553534
| 2017-11-03T21:59:42
| 2017-11-03T21:59:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 576
|
py
|
#! python3
from statistics import mean
# Use star to unpack multiple variables
def drop_first_last(grades):
_, *middle, _ = grades
print(middle)
return mean(middle)
# Specially useful with tagged tuples
def do_foo(x, y):
print('foo', x, y)
def do_bar(s):
print('bar', s)
def unpack_tagged(records):
for tag, *args in records:
if tag == 'foo':
do_foo(*args)
elif tag == 'bar':
do_bar(*args)
# print(drop_first_last([1, 2, 3, 4, 5, 6, 7, 8]))
unpack_tagged([('foo', 1, 2), ('bar', 'ok'), ('foo', 3, 4)])
|
[
"homero.hbr@gmail.com"
] |
homero.hbr@gmail.com
|
a86128cb30226105923c6e41dc98e7d5f5e37ed2
|
674649dc02390c4a60b9c62b586b81d405969047
|
/train_SGD.py
|
c4c73831851732e09a5eaf08ae3159dec8a7c794
|
[] |
no_license
|
weijiawu/Pytorch_Classification
|
709513be3e019a896ef11a1739829a97bb99c9db
|
7609a1d809590c1423f4ed0ee1f0d918954355a9
|
refs/heads/master
| 2022-12-06T00:51:26.716590
| 2020-09-01T07:38:22
| 2020-09-01T07:38:22
| 285,811,133
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,664
|
py
|
import torch
from torch.utils import data
from torch import nn
from torch.optim import lr_scheduler
import os
import time
from tqdm import tqdm
import numpy as np
from PIL import Image, ImageDraw
import argparse
import os
import random
import torch.backends.cudnn as cudnn
from datasets.dataloader import Dateloader
from network.models import create_model
import torch.optim as optim
import logging
import logging.config
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
parser = argparse.ArgumentParser(description='classification')
# Model path
parser.add_argument('--exp_name', help='Where to store logs and models')
parser.add_argument('--resume', default="/data/glusterfs_cv_04/11121171/AAAI_EAST/Baseline/EAST_v1/model_save/model_epoch_826.pth", type=str,
help='Checkpoint state_dict file to resume training from')
parser.add_argument('--data_path', default="/data/glusterfs_cv_04/11121171/data/CIFAR/cifar10", type=str,
help='the test image of target domain ')
parser.add_argument('--save_path', default="/data/glusterfs_cv_04/11121171/AAAI_NL/Baseline_classification/classification/model_save", type=str,
help='save model')
parser.add_argument('--Backbone', type=str, default="VGG19", help='FeatureExtraction stage. '
'ResNet18|ResNet34|ResNet50'
'MobileNet_v1|MobileNet_v2'
'VGG11|VGG16|VGG19'
'efficientnet-b0|efficientnet-b1|efficientnet-b2'
'shufflenet_v2_x0_5'
)
parser.add_argument('--Datasets', type=str, default="CIFAR10", help=' ImageNet|Clothing|CIFAR10|CIFAR100')
parser.add_argument('--num_classes', type=str, default=10, help=' classification')
parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed setting')
# Training strategy
parser.add_argument('--epoch_iter', default=8000, type = int,
help='the max epoch iter')
parser.add_argument('--batch_size', default=200, type = int,
help='batch size of training')
parser.add_argument('--lr', '--learning-rate', default=0.05, type=float,
help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float,
help='Momentum value for optim')
parser.add_argument('--weight_decay', default=2e-5, type=float,
help='Weight decay for SGD')
parser.add_argument('--gamma', default=0.1, type=float,
help='Gamma update for SGD')
parser.add_argument('--num_workers', default=10, type=int,
help='Number of workers used in dataloading')
opt = parser.parse_args()
def train(opt):
""" dataset preparation """
print("dataset preparation ...")
dataset = Dateloader(opt.data_path, mode="train",dataset = opt.Datasets)
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.num_workers,
drop_last=True,
pin_memory=True)
dataset_val = Dateloader(opt.data_path,mode="test", dataset=opt.Datasets)
data_loader_val = torch.utils.data.DataLoader(
dataset_val,
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.num_workers)
print('| Building net...')
model = create_model(opt.Backbone,opt.num_classes)
model = torch.nn.DataParallel(model)
cudnn.benchmark = True
optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=0.9, weight_decay=opt.weight_decay)
# optimizer = optim.Adam(model.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones =[80, 130, 160, 190, 220], gamma=0.2)
CEloss = nn.CrossEntropyLoss()
best_acc = 0
for epoch in range(opt.epoch_iter):
model.train()
lr_scheduler.step()
epoch_loss = 0
epoch_time = time.time()
for i, (image,gt) in enumerate(data_loader):
start_time = time.time()
inputs, labels = image.cuda(), gt.cuda()
optimizer.zero_grad()
outputs = model(inputs)
loss = CEloss(outputs, labels)
epoch_loss += loss.item()
loss.backward()
optimizer.step()
print('Epoch is [{}/{}], mini-batch is [{}/{}], time consumption is {:.8f}, batch_loss is {:.8f}'.format( \
epoch + 1, opt.epoch_iter, i + 1, int(len(data_loader)), time.time() - start_time, loss.item()))
if epoch>10 :
best_acc = test(epoch,model,data_loader_val,best_acc)
model.train()
print("----------------------------------------------------------")
print(" best_acc:",best_acc)
print(" lr:", optimizer.param_groups[0]['lr'])
print("----------------------------------------------------------")
print('epoch_loss is {:.8f}, epoch_time is {:.8f},current_time is {:.8f}'.format(epoch_loss / int(len(data_loader)),time.time() - epoch_time,time.time()))
print(time.asctime(time.localtime(time.time())))
def test(epoch, model,val_loader,best_acc):
model.eval()
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in tqdm(enumerate(val_loader)):
inputs, targets = inputs.cuda(), targets.cuda()
outputs = model(inputs)
_, predicted = torch.max(outputs, 1)
total += targets.size(0)
correct += predicted.eq(targets).cpu().sum().item()
acc = 100. * correct / total
print("\n| Validation\t Net Acc: %.2f%%" % acc)
if acc > best_acc:
best_acc = acc
print("best acc",best_acc)
print('| Saving Best Net ...')
# torch.save(model.state_dict(), save_point)
torch.save(model.state_dict(), os.path.join(opt.save_path, f'{opt.Backbone}-{opt.Datasets}'+'.pth'))
return best_acc
if __name__ == '__main__':
if not opt.exp_name:
opt.exp_name = f'{opt.Backbone}-{opt.Datasets}'
opt.exp_name += f'-Seed{opt.manualSeed}'
# print(opt.exp_name)
os.makedirs(f'./workspace/{opt.exp_name}', exist_ok=True)
# 通过下面的方式进行简单配置输出方式与日志级别
logging.basicConfig(
filename=os.path.join(f'./workspace/{opt.exp_name}',"logger.log"),
level=logging.INFO,filemode='w')
logging.debug('debug message')
logging.info('info message')
logging.error('error message')
logging.critical('critical message')
""" Seed and GPU setting """
# print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
cudnn.deterministic = True
opt.num_gpu = torch.cuda.device_count()
if opt.num_gpu > 1:
print('------ Use multi-GPU setting ------')
print('if you stuck too long time with multi-GPU setting, try to set --workers 0')
# check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1
opt.workers = opt.num_workers * opt.num_gpu
opt.batch_size = opt.batch_size * opt.num_gpu
train(opt)
|
[
"wwj123@zju.edu.cn"
] |
wwj123@zju.edu.cn
|
2c654c66171126629e07bb09e90b3eaa77e578e5
|
f7fa5c6da8cbde648a15ae13376d2e49f778b191
|
/hintgen/display.py
|
2cc5b881b7d94107f7183f286903b456edf0f2b6
|
[
"MIT"
] |
permissive
|
akonoroshi/ITAP-django
|
05bee4a26ed65f3a6ba2e7cb0cb692c9af52081d
|
17d690e0f4c3bbc6bb73406bcaedb673225d34e8
|
refs/heads/master
| 2020-12-14T01:54:31.811692
| 2020-01-29T15:08:56
| 2020-01-29T15:08:56
| 234,598,240
| 1
| 0
|
MIT
| 2020-01-17T17:15:39
| 2020-01-17T17:15:38
| null |
UTF-8
|
Python
| false
| false
| 20,993
|
py
|
import ast
from .tools import log
#===============================================================================
# These functions are used for displaying ASTs. printAst displays the tree,
# while printFunction displays the syntax
#===============================================================================
# TODO: add AsyncFunctionDef, AsyncFor, AsyncWith, AnnAssign, Nonlocal, Await, YieldFrom, FormattedValue, JoinedStr, Starred
def printFunction(a, indent=0):
s = ""
if a == None:
return ""
if not isinstance(a, ast.AST):
log("display\tprintFunction\tNot AST: " + str(type(a)) + "," + str(a), "bug")
return str(a)
t = type(a)
if t in [ast.Module, ast.Interactive, ast.Suite]:
for line in a.body:
s += printFunction(line, indent)
elif t == ast.Expression:
s += printFunction(a.body, indent)
elif t == ast.FunctionDef:
for dec in a.decorator_list:
s += (indent * 4 * " ") + "@" + printFunction(dec, indent) + "\n"
s += (indent * 4 * " ") + "def " + a.name + "(" + \
printFunction(a.args, indent) + "):\n"
for stmt in a.body:
s += printFunction(stmt, indent+1)
# TODO: returns
elif t == ast.ClassDef:
for dec in a.decorator_list:
s += (indent * 4 * " ") + "@" + printFunction(dec, indent) + "\n"
s += (indent * 4 * " ") + "class " + a.name
if len(a.bases) > 0 or len(a.keywords) > 0:
s += "("
for base in a.bases:
s += printFunction(base, indent) + ", "
for keyword in a.keywords:
s += printFunction(keyword, indent) + ", "
s += s[:-2] + ")"
s += ":\n"
for stmt in a.body:
s += printFunction(stmt, indent+1)
elif t == ast.Return:
s += (indent * 4 * " ") + "return " + \
printFunction(a.value, indent) + "\n"
elif t == ast.Delete:
s += (indent * 4 * " ") + "del "
for target in a.targets:
s += printFunction(target, indent) + ", "
if len(a.targets) >= 1:
s = s[:-2]
s += "\n"
elif t == ast.Assign:
s += (indent * 4 * " ")
for target in a.targets:
s += printFunction(target, indent) + " = "
s += printFunction(a.value, indent) + "\n"
elif t == ast.AugAssign:
s += (indent * 4 * " ")
s += printFunction(a.target, indent) + " " + \
printFunction(a.op, indent) + "= " + \
printFunction(a.value, indent) + "\n"
elif t == ast.For:
s += (indent * 4 * " ")
s += "for " + \
printFunction(a.target, indent) + " in " + \
printFunction(a.iter, indent) + ":\n"
for line in a.body:
s += printFunction(line, indent + 1)
if len(a.orelse) > 0:
s += (indent * 4 * " ")
s += "else:\n"
for line in a.orelse:
s += printFunction(line, indent + 1)
elif t == ast.While:
s += (indent * 4 * " ")
s += "while " + printFunction(a.test, indent) + ":\n"
for line in a.body:
s += printFunction(line, indent + 1)
if len(a.orelse) > 0:
s += (indent * 4 * " ")
s += "else:\n"
for line in a.orelse:
s += printFunction(line, indent + 1)
elif t == ast.If:
s += (indent * 4 * " ")
s += "if " + printFunction(a.test, indent) + ":\n"
for line in a.body:
s += printFunction(line, indent + 1)
branch = a.orelse
# elifs
while len(branch) == 1 and type(branch[0]) == ast.If:
s += (indent * 4 * " ")
s += "elif " + printFunction(branch[0].test, indent) + ":\n"
for line in branch[0].body:
s += printFunction(line, indent + 1)
branch = branch[0].orelse
if len(branch) > 0:
s += (indent * 4 * " ")
s += "else:\n"
for line in branch:
s += printFunction(line, indent + 1)
elif t == ast.With:
s += (indent * 4 * " ")
s += "with "
for item in a.items:
s += printFunction(item, indent) + ", "
if len(a.items) > 0:
s = s[:-2]
s += ":\n"
for line in a.body:
s += printFunction(line, indent + 1)
elif t == ast.Raise:
s += (indent * 4 * " ")
s += "raise"
if a.exc != None:
s += " " + printFunction(a.exc, indent)
# TODO: what is cause?!?
s += "\n"
elif type(a) == ast.Try:
s += (indent * 4 * " ") + "try:\n"
for line in a.body:
s += printFunction(line, indent + 1)
for handler in a.handlers:
s += printFunction(handler, indent)
if len(a.orelse) > 0:
s += (indent * 4 * " ") + "else:\n"
for line in a.orelse:
s += printFunction(line, indent + 1)
if len(a.finalbody) > 0:
s += (indent * 4 * " ") + "finally:\n"
for line in a.finalbody:
s += printFunction(line, indent + 1)
elif t == ast.Assert:
s += (indent * 4 * " ")
s += "assert " + printFunction(a.test, indent)
if a.msg != None:
s += ", " + printFunction(a.msg, indent)
s += "\n"
elif t == ast.Import:
s += (indent * 4 * " ") + "import "
for n in a.names:
s += printFunction(n, indent) + ", "
if len(a.names) > 0:
s = s[:-2]
s += "\n"
elif t == ast.ImportFrom:
s += (indent * 4 * " ") + "from "
s += ("." * a.level if a.level != None else "") + a.module + " import "
for name in a.names:
s += printFunction(name, indent) + ", "
if len(a.names) > 0:
s = s[:-2]
s += "\n"
elif t == ast.Global:
s += (indent * 4 * " ") + "global "
for name in a.names:
s += name + ", "
s = s[:-2] + "\n"
elif t == ast.Expr:
s += (indent * 4 * " ") + printFunction(a.value, indent) + "\n"
elif t == ast.Pass:
s += (indent * 4 * " ") + "pass\n"
elif t == ast.Break:
s += (indent * 4 * " ") + "break\n"
elif t == ast.Continue:
s += (indent * 4 * " ") + "continue\n"
elif t == ast.BoolOp:
s += "(" + printFunction(a.values[0], indent)
for i in range(1, len(a.values)):
s += " " + printFunction(a.op, indent) + " " + \
printFunction(a.values[i], indent)
s += ")"
elif t == ast.BinOp:
s += "(" + printFunction(a.left, indent)
s += " " + printFunction(a.op, indent) + " "
s += printFunction(a.right, indent) + ")"
elif t == ast.UnaryOp:
s += "(" + printFunction(a.op, indent) + " "
s += printFunction(a.operand, indent) + ")"
elif t == ast.Lambda:
s += "lambda "
s += printFunction(a.arguments, indent) + ": "
s += printFunction(a.body, indent)
elif t == ast.IfExp:
s += "(" + printFunction(a.body, indent)
s += " if " + printFunction(a.test, indent)
s += " else " + printFunction(a.orelse, indent) + ")"
elif t == ast.Dict:
s += "{ "
for i in range(len(a.keys)):
s += printFunction(a.keys[i], indent)
s += " : "
s += printFunction(a.values[i], indent)
s += ", "
if len(a.keys) >= 1:
s = s[:-2]
s += " }"
elif t == ast.Set:
# Empty sets must be initialized in a special way
if len(a.elts) == 0:
s += "set()"
else:
s += "{"
for elt in a.elts:
s += printFunction(elt, indent) + ", "
s = s[:-2]
s += "}"
elif t == ast.ListComp:
s += "["
s += printFunction(a.elt, indent) + " "
for gen in a.generators:
s += printFunction(gen, indent) + " "
s = s[:-1]
s += "]"
elif t == ast.SetComp:
s += "{"
s += printFunction(a.elt, indent) + " "
for gen in a.generators:
s += printFunction(gen, indent) + " "
s = s[:-1]
s += "}"
elif t == ast.DictComp:
s += "{"
s += printFunction(a.key, indent) + " : " + \
printFunction(a.value, indent) + " "
for gen in a.generators:
s += printFunction(gen, indent) + " "
s = s[:-1]
s += "}"
elif t == ast.GeneratorExp:
s += "("
s += printFunction(a.elt, indent) + " "
for gen in a.generators:
s += printFunction(gen, indent) + " "
s = s[:-1]
s += ")"
elif t == ast.Yield:
s += "yield " + printFunction(a.value, indent)
elif t == ast.Compare:
s += "(" + printFunction(a.left, indent)
for i in range(len(a.ops)):
s += " " + printFunction(a.ops[i], indent)
if i < len(a.comparators):
s += " " + printFunction(a.comparators[i], indent)
if len(a.comparators) > len(a.ops):
for i in range(len(a.ops), len(a.comparators)):
s += " " + printFunction(a.comparators[i], indent)
s += ")"
elif t == ast.Call:
s += printFunction(a.func, indent) + "("
for arg in a.args:
s += printFunction(arg, indent) + ", "
for key in a.keywords:
s += printFunction(key, indent) + ", "
if len(a.args) + len(a.keywords) >= 1:
s = s[:-2]
s += ")"
elif t == ast.Num:
if a.n != None:
if (type(a.n) == complex) or (type(a.n) != complex and a.n < 0):
s += '(' + str(a.n) + ')'
else:
s += str(a.n)
elif t == ast.Str:
if a.s != None:
val = repr(a.s)
if val[0] == '"': # There must be a single quote in there...
val = "'''" + val[1:len(val)-1] + "'''"
s += val
#s += "'" + a.s.replace("'", "\\'").replace('"', "\\'").replace("\n","\\n") + "'"
elif t == ast.Bytes:
s += str(a.s)
elif t == ast.NameConstant:
s += str(a.value)
elif t == ast.Attribute:
s += printFunction(a.value, indent) + "." + str(a.attr)
elif t == ast.Subscript:
s += printFunction(a.value, indent) + "[" + printFunction(a.slice, indent) + "]"
elif t == ast.Name:
s += a.id
elif t == ast.List:
s += "["
for elt in a.elts:
s += printFunction(elt, indent) + ", "
if len(a.elts) >= 1:
s = s[:-2]
s += "]"
elif t == ast.Tuple:
s += "("
for elt in a.elts:
s += printFunction(elt, indent) + ", "
if len(a.elts) > 1:
s = s[:-2]
elif len(a.elts) == 1:
s = s[:-1] # don't get rid of the comma! It clarifies that this is a tuple
s += ")"
elif t == ast.Starred:
s += "*" + printFunction(a.value, indent)
elif t == ast.Ellipsis:
s += "..."
elif t == ast.Slice:
if a.lower != None:
s += printFunction(a.lower, indent)
s += ":"
if a.upper != None:
s += printFunction(a.upper, indent)
if a.step != None:
s += ":" + printFunction(a.step, indent)
elif t == ast.ExtSlice:
for dim in a.dims:
s += printFunction(dim, indent) + ", "
if len(a.dims) > 0:
s = s[:-2]
elif t == ast.Index:
s += printFunction(a.value, indent)
elif t == ast.comprehension:
s += "for "
s += printFunction(a.target, indent) + " "
s += "in "
s += printFunction(a.iter, indent) + " "
for cond in a.ifs:
s += "if "
s += printFunction(cond, indent) + " "
s = s[:-1]
elif t == ast.ExceptHandler:
s += (indent * 4 * " ") + "except"
if a.type != None:
s += " " + printFunction(a.type, indent)
if a.name != None:
s += " as " + a.name
s += ":\n"
for line in a.body:
s += printFunction(line, indent + 1)
elif t == ast.arguments:
# Defaults are only applied AFTER non-defaults
defaultStart = len(a.args) - len(a.defaults)
for i in range(len(a.args)):
s += printFunction(a.args[i], indent)
if i >= defaultStart:
s += "=" + printFunction(a.defaults[i - defaultStart], indent)
s += ", "
if a.vararg != None:
s += "*" + printFunction(a.vararg, indent) + ", "
if a.kwarg != None:
s += "**" + printFunction(a.kwarg, indent) + ", "
if a.vararg == None and a.kwarg == None and len(a.kwonlyargs) > 0:
s += "*, "
if len(a.kwonlyargs) > 0:
for i in range(len(a.kwonlyargs)):
s += printFunction(a.kwonlyargs[i], indent)
s += "=" + printFunction(a.kw_defaults, indent) + ", "
if (len(a.args) > 0 or a.vararg != None or a.kwarg != None or len(a.kwonlyargs) > 0):
s = s[:-2]
elif t == ast.arg:
s += a.arg
if a.annotation != None:
s += ": " + printFunction(a.annotation, indent)
elif t == ast.keyword:
s += a.arg + "=" + printFunction(a.value, indent)
elif t == ast.alias:
s += a.name
if a.asname != None:
s += " as " + a.asname
elif t == ast.withitem:
s += printFunction(a.context_expr, indent)
if a.optional_vars != None:
s += " as " + printFunction(a.optional_vars, indent)
else:
ops = { ast.And : "and", ast.Or : "or",
ast.Add : "+", ast.Sub : "-", ast.Mult : "*", ast.Div : "/", ast.Mod : "%",
ast.Pow : "**", ast.LShift : "<<", ast.RShift : ">>", ast.BitOr : "|",
ast.BitXor : "^", ast.BitAnd : "&", ast.FloorDiv : "//",
ast.Invert : "~", ast.Not : "not", ast.UAdd : "+", ast.USub : "-",
ast.Eq : "==", ast.NotEq : "!=", ast.Lt : "<", ast.LtE : "<=",
ast.Gt : ">", ast.GtE : ">=", ast.Is : "is", ast.IsNot : "is not",
ast.In : "in", ast.NotIn : "not in"}
if type(a) in ops:
return ops[type(a)]
if type(a) in [ast.Load, ast.Store, ast.Del, ast.AugLoad, ast.AugStore, ast.Param]:
return ""
log("display\tMissing type: " + str(t), "bug")
return s
def formatContext(trace, verb):
traceD = {
"value" : { "Return" : ("return statement"),
"Assign" : ("right side of the assignment"),
"AugAssign" : ("right side of the assignment"),
"Expression" : ("expression"),
"Dict Comprehension" : ("left value of the dict comprehension"),
"Yield" : ("yield expression"),
"Repr" : ("repr expression"),
"Attribute" : ("attribute value"),
"Subscript" : ("outer part of the subscript"),
"Index" : ("inner part of the subscript"),
"Keyword" : ("right side of the keyword"),
"Starred" : ("value of the starred expression"),
"Name Constant" : ("constant value") },
"values" : { "Print" : ("print statement"),
"Boolean Operation" : ("boolean operation"),
"Dict" : ("values of the dictionary") },
"name" : { "Function Definition" : ("function name"),
"Class Definition" : ("class name"),
"Except Handler" : ("name of the except statement"),
"Alias" : ("alias") },
"names" : { "Import" : ("import"),
"ImportFrom" : ("import"),
"Global" : ("global variables") },
"elt" : { "List Comprehension" : ("left element of the list comprehension"),
"Set Comprehension" : ("left element of the set comprehension"),
"Generator" : ("left element of the generator") },
"elts" : { "Set" : ("set"),
"List" : ("list"),
"Tuple" : ("tuple") },
"target" : { "AugAssign" : ("left side of the assignment"),
"For" : ("target of the for loop"),
"Comprehension" : ("target of the comprehension") },
"targets" : { "Delete" : ("delete statement"),
"Assign" : ("left side of the assignment") },
"op" : { "AugAssign" : ("assignment"),
"Boolean Operation" : ("boolean operation"),
"Binary Operation" : ("binary operation"),
"Unary Operation" : ("unary operation") },
"ops" : { "Compare" : ("comparison operation") },
"arg" : { "Keyword" : ("left side of the keyword"),
"Argument" : ("argument") },
"args" : { "Function Definition" : ("function arguments"), # single item
"Lambda" : ("lambda arguments"), # single item
"Call" : ("arguments of the function call"),
"Arguments" : ("function arguments") },
"key" : { "Dict Comprehension" : ("left key of the dict comprehension") },
"keys" : { "Dict" : ("keys of the dictionary") },
"kwarg" : { "Arguments" : ("keyword arg") },
"kwargs" : { "Call" : ("keyword args of the function call") }, # single item
"body" : { "Module" : ("main codebase"), # list
"Interactive" : ("main codebase"), # list
"Expression" : ("main codebase"),
"Suite" : ("main codebase"), # list
"Function Definition" : ("function body"), # list
"Class Definition" : ("class body"), # list
"For" : ("lines of the for loop"), # list
"While" : ("lines of the while loop"), # list
"If" : ("main lines of the if statement"), # list
"With" : ("lines of the with block"), # list
"Try" : ("lines of the try block"), # list
"Execute" : ("exec expression"),
"Lambda" : ("lambda body"),
"Ternary" : ("ternary body"),
"Except Handler" : ("lines of the except block") }, # list
"orelse" : { "For" : ("else part of the for loop"), # list
"While" : ("else part of the while loop"), # list
"If" : ("lines of the else statement"), # list
"Try" : ("lines of the else statement"), # list
"Ternary" : ("ternary else value") },
"test" : { "While" : ("test case of the while statement"),
"If" : ("test case of the if statement"),
"Assert" : ("assert expression"),
"Ternary" : ("test case of the ternary expression") },
"generators" : { "List Comprehension" : ("list comprehension"),
"Set Comprehension" : ("set comprehension"),
"Dict Comprehension" : ("dict comprehension"),
"Generator" : ("generator") },
"decorator_list" : { "Function Definition" : ("function decorators"), # list
"Class Definition" : ("class decorators") }, # list
"iter" : { "For" : ("iterator of the for loop"),
"Comprehension" : ("iterator of the comprehension") },
"type" : { "Raise" : ("raised type"),
"Except Handler" : ("type of the except statement") },
"left" : { "Binary Operation" : ("left side of the binary operation"),
"Compare" : ("left side of the comparison") },
"bases" : { "Class Definition" : ("class bases") },
"dest" : { "Print" : ("print destination") },
"nl" : { "Print" : ("comma at the end of the print statement") },
"context_expr" : { "With item" : ("context of the with statement") },
"optional_vars" : { "With item" : ("context of the with statement") }, # single item
"inst" : { "Raise" : ("raise expression") },
"tback" : { "Raise" : ("raise expression") },
"handlers" : { "Try" : ("except block") },
"finalbody" : { "Try" : ("finally block") }, # list
"msg" : { "Assert" : ("assert message") },
"module" : { "Import From" : ("import module") },
"level" : { "Import From" : ("import module") },
"globals" : { "Execute" : ("exec global value") }, # single item
"locals" : { "Execute" : ("exec local value") }, # single item
"right" : { "Binary Operation" : ("right side of the binary operation") },
"operand" : { "Unary Operation" : ("value of the unary operation") },
"comparators" : { "Compare" : ("right side of the comparison") },
"func" : { "Call" : ("function call") },
"keywords" : { "Call" : ("keywords of the function call") },
"starargs" : { "Call" : ("star args of the function call") }, # single item
"attr" : { "Attribute" : ("attribute of the value") },
"slice" : { "Subscript" : ("inner part of the subscript") },
"lower" : { "Slice" : ("left side of the subscript slice") },
"upper" : { "Slice" : ("right side of the subscript slice") },
"step" : { "Step" : ("rightmost side of the subscript slice") },
"dims" : { "ExtSlice" : ("slice") },
"ifs" : { "Comprehension" : ("if part of the comprehension") },
"vararg" : { "Arguments" : ("vararg") },
"defaults" : { "Arguments" : ("default values of the arguments") },
"asname" : { "Alias" : ("new name") },
"items" : { "With" : ("context of the with statement") }
}
# Find what type this is by trying to find the closest container in the path
i = 0
while i < len(trace):
if type(trace[i]) == tuple:
if trace[i][0] == "value" and trace[i][1] == "Attribute":
pass
elif trace[i][0] in traceD:
break
elif trace[i][0] in ["id", "n", "s"]:
pass
else:
log("display\tformatContext\tSkipped field: " + str(trace[i]), "bug")
i += 1
else:
return "" # this is probably covered by the line number
field,typ = trace[i]
if field in traceD and typ in traceD[field]:
context = traceD[field][typ]
return verb + "the " + context
else:
log("display\tformatContext\tMissing field: " + str(field) + "," + str(typ), "bug")
return ""
def formatList(node, field):
if type(node) != list:
return None
s = ""
nameMap = { "body" : "line", "targets" : "value", "values" : "value", "orelse" : "line",
"names" : "name", "keys" : "key", "elts" : "value", "ops" : "operator",
"comparators" : "value", "args" : "argument", "keywords" : "keyword" }
# Find what type this is
itemType = nameMap[field] if field in nameMap else "line"
if len(node) > 1:
s = "the " + itemType + "s: "
for line in node:
s += formatNode(line) + ", "
elif len(node) == 1:
s = "the " + itemType + " "
f = formatNode(node[0])
if itemType == "line":
f = "[" + f + "]"
s += f
return s
def formatNode(node):
"""Create a string version of the given node"""
if node == None:
return ""
t = type(node)
if t == str:
return "'" + node + "'"
elif t == int or t == float:
return str(node)
elif t == list:
return formatList(node, None)
else:
return printFunction(node, 0)
|
[
"krivers@andrew.cmu.edu"
] |
krivers@andrew.cmu.edu
|
06d3ca1ff65101808264d7e631adce978fdf8578
|
4ca3a3958f19650e5861f748727cb55cdedb8841
|
/hnAnalysis/analyzeUnigrams.py
|
d22fd39b1a74399280d4b1d84cd36f150dfa67c7
|
[] |
no_license
|
mhwalker/Projects
|
6dad8c53f82713d6e1d71e3c704363a60efa7950
|
fa4c66ea701b4f966e2ed561c76a286678fc1de4
|
refs/heads/master
| 2021-01-10T06:33:12.420141
| 2016-03-22T21:07:45
| 2016-03-22T21:07:45
| 54,507,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
import simplejson as json
import operator
unifile = open("skimunigrams.json","r")
unigrams = json.load(unifile)
unifile.close()
sortedunigrams = sorted(unigrams.iteritems(), key = operator.itemgetter(1),reverse=True)
i = 0
while True:
print i,sortedunigrams[i][0],sortedunigrams[i][1]
i += 1
if sortedunigrams[i][1] < 100000:break
|
[
"matthewwalker@cdopscam003.fnal.gov"
] |
matthewwalker@cdopscam003.fnal.gov
|
45aa22217986347a95b741afab8a15d7af4413fe
|
b552dd6049a73608bac2a30248a75cf96fc270ce
|
/Zajecia_1/Zadanie_Domowe/sec2/zad6.py
|
30eca64d43d34d48c6d61fe2e5bb2e0602a618d1
|
[] |
no_license
|
marakon/zajecia_python
|
54ac1f516dd4acf0bc12f9e3ad72af1ff7287e76
|
7cf01383f0faad9f2c08f77627a9817fe9db25d2
|
refs/heads/master
| 2020-05-19T21:40:44.381507
| 2019-06-10T16:59:59
| 2019-06-10T16:59:59
| 185,230,982
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,308
|
py
|
# Zadanie 6
# Zarówno lodówka, jak i winda mają wysokość, szerokość i głębokość.
# Dowiedz się, ile miejsca pozostało w windzie, gdy lodówka jest w środku.
# Załóżmy, że wymiary lodówki zawsze będą mniejsze niż windy ( jest to prawdopodobne?)
# Wejście i wyjście powinny być zrozumiałe dla użytkownika.
print("-----------Does my fridge fits?-----------")
print("Please enter lenght, height and deepnes of your fridge and the elevator.")
print("The script will mesure if your fridge fits and how much space will be left.")
fridge_a = float(input("Height of fridge: "))
fridge_b = float(input("Lenght of fridge: "))
fridge_c = float(input("Deepnes of the fridge: "))
elevator_a = float(input("Height of the elevator: "))
elevator_b = float(input("Lenght of the elevator: "))
elevator_c = float(input("Deepnes of the elevator: "))
if fridge_a > elevator_a:
print("Your fridge is to high! :O")
exit()
if fridge_b > elevator_b:
print("Your fridge is to long! :O")
exit()
if fridge_c > elevator_c:
print("Your fridge is to deep! :O")
exit()
print("Nice your fridge fits into the elevator!")
print("A few informations in numbers:")
print("Elevators volume: ", elevator_a + elevator_b + elevator_c)
print("Fridge volume: ", fridge_a + fridge_b + fridge_c)
|
[
"mateuszosinski96@gmail.com"
] |
mateuszosinski96@gmail.com
|
fd5b3ff54c12e5557de206770406abd6edc0f7b5
|
81d82c13d53744f04ed1afe1fc1a7536950d34c5
|
/03.Loops/while_demo.py
|
6201fb230f621a972bb7eb67a5eb65ac63ecdd60
|
[] |
no_license
|
giridhersadineni/pythonsrec
|
991950880dc262eecd39d533ddbd978d37780301
|
87b843a4d8a5beac4292a58c3f373f8b590a598c
|
refs/heads/master
| 2021-12-22T04:33:22.153763
| 2021-12-08T03:06:26
| 2021-12-08T03:06:26
| 199,015,630
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
# i=1 # initialization
# while(i<=100):
# print(i)
# i = i + 1
# else:
# print(i)
# print("Loop has ended")
name = "PYTHON"
i=0
while (i<6):
print(name[i])
i = i + 1
for character in "Python":
print(character)
for n in [1,1,435,6,5464,576,76,76,8,"Hello"]:
print(n,end="\n")
for i in range(100):
print(i)
else:
print(i)
# // initialization
# int i=10;
# while(i<10){
# printf("%d",i);
# i++;
# }
# i=10 // initialisation expression
# do
# {
# printf("%d",i);
# i++; // update statements
# }while(i<10) // conditional expression
# for(i=0; i<10; i++){
# }
|
[
"giridher@hotmail.com"
] |
giridher@hotmail.com
|
192b0eb6591ffc8f5d377fb8b523bb5cae674a98
|
344055834858dbea4a745d5855a20b669f50b808
|
/setup.py
|
1823587d41de7eb5b3cf2d35d4c9ff69d081289d
|
[] |
no_license
|
wd5/psh
|
d1fd4b4a520080626d91c9311b2d5c8a43f66535
|
33df4636dcbdb66121454214f3d684b3021e3a1e
|
refs/heads/master
| 2021-01-16T20:06:10.882649
| 2012-12-21T08:09:55
| 2012-12-21T08:39:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,096
|
py
|
"""psh installation script."""
from __future__ import unicode_literals
from setuptools import find_packages, setup
from setuptools.command.test import test as Test
class PyTest(Test):
def finalize_options(self):
Test.finalize_options(self)
self.test_args = [ "tests" ]
self.test_suite = True
def run_tests(self):
import pytest
pytest.main(self.test_args)
description = """\
psh allows you to spawn processes in Unix shell-style way.
Unix shell is very convenient for spawning processes, connecting them into
pipes, etc., but it has a very limited language which is often not suitable
for writing complex programs. Python is a very flexible and reach language
which is used in a wide variety of application domains, but its standard
subprocess module is very limited. psh combines the power of Python language
and an elegant shell-style way to execute processes.
Complete documentation is available at http://konishchevdmitry.github.com/psh/\
"""
if __name__ == "__main__":
setup(
name = "psh",
version = "0.2.3",
description = "Process management library",
long_description = description,
url = "http://konishchevdmitry.github.com/psh/",
license = "GPL3",
author = "Dmitry Konishchev",
author_email = "konishchev@gmail.com",
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: Unix",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries :: Python Modules",
],
platforms = [ "unix", "linux", "osx" ],
install_requires = [ "psys", "psh" ],
packages = find_packages(),
cmdclass = { "test": PyTest },
tests_require = [ "psys", "pytest" ],
)
|
[
"konishchev@gmail.com"
] |
konishchev@gmail.com
|
06bb3fc516d29ae2feb46b1894c9dc30af3fdd6b
|
d5215cc7c775734b1edb4dbcece77ce766866892
|
/venv/bin/pip2
|
1c7d225bcdb9d1183377bb4c7031eba8c2f3905c
|
[] |
no_license
|
victorevector/FischerRandom-Web
|
c01e4e71eca4afabb1f0a2a5b07590b046246cec
|
6d72c635b529417ccbc98bbc4d338b081c0d8df0
|
refs/heads/master
| 2020-12-24T13:36:58.511008
| 2015-03-10T19:07:50
| 2015-03-10T19:07:50
| 31,973,724
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
#!/Users/victorestebanfimbres/django_workspace/fischer_random/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"victorevector@gmail.com"
] |
victorevector@gmail.com
|
|
72926195d8325cb4e33079fffa1f2b92619e47ba
|
bfc1b107b2ce8c664b17be7d96b93bf69aaa8665
|
/lab_03_zadania/1.py
|
c54d527df02fc52a4eae08211442ca223127a8c4
|
[] |
no_license
|
bulwan/wizualizacja_danych
|
db16c97da765646a71a8a794030f8014022cbc19
|
e305914105f42d22d42deb4e10a09b181534254f
|
refs/heads/main
| 2023-05-01T07:16:23.954859
| 2021-05-26T11:59:18
| 2021-05-26T11:59:18
| 346,389,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
A=[1/x for x in range(1, 11)]
B=[2 ** i for i in range(10)]
C=[x for x in B if x % 4 == 0]
print(A)
print(B)
print(C)
|
[
"noreply@github.com"
] |
bulwan.noreply@github.com
|
443156317a05eae0013f4af0a70968cd51185b81
|
ef187d259d33e97c7b9ed07dfbf065cec3e41f59
|
/work/atcoder/arc/arc062/D/answers/30340_oyodr.py
|
109239b4bbde5980a01c2c90b87d791e6776af19
|
[] |
no_license
|
kjnh10/pcw
|
847f7295ea3174490485ffe14ce4cdea0931c032
|
8f677701bce15517fb9362cc5b596644da62dca8
|
refs/heads/master
| 2020-03-18T09:54:23.442772
| 2018-07-19T00:26:09
| 2018-07-19T00:26:09
| 134,586,379
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 72
|
py
|
S = input()
n_p = S.count("p")
N = len(S)
ans = N // 2 - n_p
print(ans)
|
[
"kojinho10@gmail.com"
] |
kojinho10@gmail.com
|
868fb2901002bfd62248db1dfa59b55c9b15e74e
|
8dd834ec83f1d2b1e2517514892529b8c07a9765
|
/Django_platfrom/Django_platfrom/urls.py
|
d685ce50dcfa1232c1734448c0a57865c7696a5c
|
[] |
no_license
|
LucarYang/python
|
f48d33a628cd02a21b1994a85b8c747d622e157d
|
2afee28ceff1e1304ef7e3a5fdd4a5ea412ce912
|
refs/heads/master
| 2020-04-28T08:14:09.235680
| 2019-11-06T08:40:55
| 2019-11-06T08:40:55
| 175,119,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,293
|
py
|
"""Django_platfrom URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url
from CommonApps import views
from gameConfig import views as game_views
from game_issue import views as issue_views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^signup/',views.signup,name='signup'),
url(r'^login/$', views.login, name='login'),
url(r'^logout/$', views.logout, name='logout'),
url(r'^userInfo/$', views.userInfo, name='userInfo'),
path('admin/', admin.site.urls),
url(r'^gameList/$', game_views.gameList, name='gameList'),
url(r'^issue_upload/$', issue_views.uploadIssue, name='uploadIssue'),
]
|
[
"v.yanglu@shandagames.com"
] |
v.yanglu@shandagames.com
|
b9b504067996746019a5981337cfef30eb7882d5
|
5e37047149caaf5bf4543d2f6eb8ceca65ee12b2
|
/Hijacked/Dico_msg_Carla.py
|
d4e4b1a11f239123256eaef8754d3adbf1ec0abd
|
[] |
no_license
|
ilyes-annou/Hijacked
|
8946f693d64964731c605955a207f9008e9d5fc3
|
7aabe4ff3b804376a9f841ff03878688b95ac847
|
refs/heads/master
| 2021-05-21T02:49:44.328130
| 2020-04-14T15:22:09
| 2020-04-14T15:22:09
| 252,508,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,006
|
py
|
msg={"1": "C'est qui?", "2": "euuuh on s'est rencontres ou?", "3": "hmmm ca me dit rien mais tu as peut etre raison",
"4": "C'est rare de passer un bon moment avec moi haha", "5": "Beaugosse dis donc :)", "6": "HIHI", "7": "Non j'suis timide",
"8": "Je prefere qu'on apprenne a se connaitre.", "9": "bah je vais en cours et apres je rentre chez moi, et toi?",
"10": "mmmh pourquoi pas on va ou?", "11": "interressant haha", "12": "C'etait un super moment !", "13": "Nn desole ma mere m'attends ...",
"14": "Paco?", "15": "T'es la?", "16": "?????????????", "17": "Je sais la verite maintenant ... tu es un charo !",
"18": "J'aimerais qu'on coupe les ponts ... donc supprime tout ce que t'as de moi stp ...", "19": "Paco t'es serieux??? je t'aimais vraiment moi!",
"20": "T'es le pire homme ...", "21": "OK j'accepte... mais promets moi de ne rien partager","22":""}
msg1={"1": "C'est du harcelement, je vais appeler la police, tu vas etre arrete",
"2": "Paco ... aaah a la soiree ! Comment t'as eu num ?",
"3": "aah peut etre oui, je fais souvent n'importe quoi quand je suis bourree hihi",
"4": "Aaahhh si je me souviens on avait grave passe un bon moment", "5": "euuuh pourquoi tu m'envoies ton visage?",
"6": "2secondes je me fais belle ;)", "7": "Ok attends 2 secondes ;)", "8": ";)", "9": "bah je vais en cours et apres je rentre chez moi, et toi?",
"10": "mmmh pourquoi pas on va ou?", "11": "interressant haha", "12": "J'ai adore! Je veux vite te revoir!", "13": "OK attends moi j'arrive ;)",
"14": "BB?", "15": "Pourquoi tu reponds pas?", "16": "?????????????", "17": "Tu peux supprimer les photos que je t'ai envoyees stp Paco ?",
"18": "Appeler la police! Tu seras vite arrete ...", "19": "Tu veux quoi enflure?", "20": "... on peut pas negocier?",
"21": "Va te faire f*****! Espece de sale en****!!","22":""}
msg2={"6": "Je devrai?", "7": "Et alors??"}
y=0
L=list(msg)
L1=list(msg1)
L2=list(msg2)
|
[
"noreply@github.com"
] |
ilyes-annou.noreply@github.com
|
eae3bce8fa4c9de33cdba89c8eae2bff45361d49
|
8c367be1487120323533089f40a8a8884aa3e158
|
/architectures/eval.py
|
9addc03f794792b9e35cd4a9ee3f878714c53031
|
[
"Apache-2.0"
] |
permissive
|
readerbench/IRVIN
|
eeade83f57eae6d512f8e98b2c1e04812695af35
|
de894da6b694699f0aa8d67967aab184dbd3e4df
|
refs/heads/main
| 2023-07-10T11:24:14.742369
| 2021-08-20T08:21:18
| 2021-08-20T08:21:18
| 396,875,923
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,332
|
py
|
import numpy as np
from sklearn.metrics import confusion_matrix
#File used to store function to measure performances and other features from results.
class Performance:
def precision(self, ypred, ytrue):#real pos/estim pos : 1-->everytime we estimate pos, it's pos
return np.sum(ytrue*ypred)/np.sum(ypred)
def recall(self, ypred, ytrue):#part of pos really detected : 1--> everytime it's pos, we detect it
return np.sum(ytrue*ypred)/np.sum(ytrue)
def F_score(self, ypred, ytrue, beta=1):
p = self.precision(ytrue, ypred)
r = self.recall(ytrue, ypred)
return (1+beta**2)*p*r/(beta**2*p+r)
def accuracy(self, ypred, ytrue):#
return np.sum(ypred==ytrue)/len(ypred)
def confusion_matrix(self, ypred, ytrue):
return confusion_matrix(ytrue, ypred)
def show_top(classifier, list_col):
feature_names = np.array(list_col)#liste de str
top = np.argsort(classifier.coef_[0])
return feature_names[top]
#print("%s: %s" % ('RELEVANT', " ".join(feature_names[top10])))
def show_top10(classifier, list_col):
feature_names = np.array(list_col)
top10 = np.argsort(classifier.coef_[0])[-10:]
#print(classifier.coef_[top10])
#print(top10)
return "%s: %s" % ('RELEVANT', " ".join(feature_names[top10]))
|
[
"pierre.frode_de_la_foret@mines-paristech.fr"
] |
pierre.frode_de_la_foret@mines-paristech.fr
|
df65af6fc345dcd9f0bacda3172e5a0908b0a872
|
d6d9bc2a898bcd0dc4af44b6f432196e4358d8b7
|
/python/hw02/hw02.py
|
5464b221a1be2dd8d354075b6ac3825afca35f08
|
[] |
no_license
|
z80/hw
|
2e287c2f835cd055f5ef531bdb094e50dd5f4bee
|
1d368078372ebce445077d66792852d0864ca3c9
|
refs/heads/master
| 2021-01-10T19:37:14.697887
| 2015-08-02T08:59:52
| 2015-08-02T08:59:52
| 13,366,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
#!/usr/bin/env python
import sys
import math
import random
import os
import flip_coins
if len(sys.argv) < 2:
print "3\nNumber of Tests\t100000\nNumber of Coins\t1000\nFlip\t10"
sys.exit(0)
#This function is a stub for your code for the coin flipping simulation, here it just returns three random values for v_1, v_rand, and v_min
def flip_coins (coins, flip):
vone = random.gauss (0.75, 0.05)
vrnd = random.gauss (0.55, 0.05)
vmin = random.gauss (0.35, 0.05)
return (vone, vrnd, vmin)
parameters = [float(x) for x in sys.argv[1:-2]]
row_id = int(sys.argv[-2])
out_file = sys.argv[-1]
tmp_file = out_file + ".tmp"
tests = int (parameters[0])
coins = int (parameters[1])
flip = int (parameters[2])
fout = open (tmp_file, 'w')
fout.write ("Test::number,V_one::number,V_rnd::number,V_min::number\n")
for t in range (tests):
vone, vrnd, vmin = flip_coins (coins, flip)
fout.write (str(t) + ',' + str(vone) + ',' + str(vrnd) + ', '+ str(vmin) + '\n')
fout.close ()
os.rename (tmp_file, out_file)
|
[
"bashkirov.sergey@gmail.com"
] |
bashkirov.sergey@gmail.com
|
4ec77dda1f863652b8ede97e01fa33e69f3b479b
|
5fc09fdf075e8384fd85b0001f232d2ea384340f
|
/source/Planet.py
|
c543a2dfd0ce47c8f42f16b5f6ac86c7baf769ef
|
[] |
no_license
|
karlmitterhauser/spacesim
|
c9e11edcacf7061f060fa73ae2795a646e2bbc19
|
11e31ce4d7f750db4b34b4b1ffee2dec6c7dc414
|
refs/heads/master
| 2020-03-14T02:16:17.310841
| 2018-06-11T09:15:40
| 2018-06-11T09:15:40
| 131,395,910
| 0
| 1
| null | 2018-05-19T10:17:32
| 2018-04-28T09:38:09
|
Python
|
UTF-8
|
Python
| false
| false
| 771
|
py
|
'''
This is a abstract planet class.
Created on 28 Apr 2018
@author: Klaus
'''
from abc import ABC, abstractmethod
import string
import random
class Planet(ABC):
'''
There are 3 types of planets agricultur, industry and hightech
Depending on the planet types the wares have different prices
'''
def __init__(self):
'''
Constructor
'''
super().__init__()
prices = {'wheat': 0, 'iron': 0, 'phone': 0}
self.prices = prices
self.name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
def getPrices(self):
return self.prices
@abstractmethod
def setPrices(self):
pass
def getName(self):
return self.name
|
[
"tusch.klaus@gmail.com"
] |
tusch.klaus@gmail.com
|
09c72bef322c2d0a17dd270c10a22db10b174c02
|
46ed67158fcef1b37e76339de40219e7b466f98b
|
/t5.py
|
5be6e1a1fc02eab700a00c0fcbbbad9cfabe1046
|
[] |
no_license
|
RuningBird/TensorFlow
|
dd0c37796efa2a0690a2ac4c63d0c26f9d725a89
|
c67b192b69982ac272f4bc6f541c7918e9ba6773
|
refs/heads/master
| 2021-01-22T03:45:08.807663
| 2017-06-08T11:23:20
| 2017-06-08T11:23:20
| 92,401,899
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 80
|
py
|
import tensorflow as tf
#############--------激活函数---------#############
|
[
"ipowerink@outlook.com"
] |
ipowerink@outlook.com
|
a1865ec63daf117e572dec767df1368a078c8ae2
|
640c9480d8d9ec42ef76c5df6c29734a066edd2c
|
/network96.py
|
9c49a4fcc9d329c14c442ea2988f06dca5fb1338
|
[
"MIT"
] |
permissive
|
nuonuoyaya/DCGAN
|
28a8a53c903e8ff826ce37a1085f4b4ac8e9e6b4
|
0219db97ddf966d7a638e1be6b71bcf96fada10d
|
refs/heads/master
| 2023-03-27T11:54:00.085204
| 2021-03-25T05:01:29
| 2021-03-25T05:01:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,366
|
py
|
import tensorflow as tf
# 超参数(包括训练的轮数,学习率等,可以根据需要进行更改)
EPOCHS = 100
BATCH_SIZE = 128
LEARNING_RATE = 0.0002
BETA_1 = 0.5
# 根据图集的图片尺寸可以调整网络结构,但是图片不要太大,否则会造成训练时间过长。
# 定义判别器模型
def discriminator_model():
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(
filters=64, # 64个过滤器,输出的深度是 64
kernel_size=(5, 5), # 过滤器在二维的大小是(5 * 5)
strides=(2, 2), # 步长为 2
padding='same', # same 表示外围补零
input_shape=(96, 96, 3), # 输入形状 [96, 96, 3]。3 表示 RGB 三原色
kernel_initializer=tf.keras.initializers.TruncatedNormal(mean=0, stddev=0.02)
))
model.add(tf.keras.layers.LeakyReLU(alpha=0.2))
model.add(tf.keras.layers.Conv2D(
128,
(5, 5),
strides=(2, 2),
padding="same",
kernel_initializer=tf.keras.initializers.TruncatedNormal(mean=0, stddev=0.02)
))
model.add(tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9)) # 加入BN层防止模式崩塌,同时加速收敛
model.add(tf.keras.layers.LeakyReLU(alpha=0.2))
model.add(tf.keras.layers.Conv2D(
256,
(5, 5),
strides=(2, 2),
padding="same",
kernel_initializer=tf.keras.initializers.TruncatedNormal(mean=0, stddev=0.02)
))
model.add(tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9))
model.add(tf.keras.layers.LeakyReLU(alpha=0.2))
model.add(tf.keras.layers.Conv2D(
512,
(5, 5),
strides=(2, 2),
padding="same",
kernel_initializer=tf.keras.initializers.TruncatedNormal(mean=0, stddev=0.02)
))
model.add(tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9))
model.add(tf.keras.layers.LeakyReLU(alpha=0.2))
model.add(tf.keras.layers.Flatten()) # 扁平化处理
model.add(tf.keras.layers.Dense(1))
model.add(tf.keras.layers.Activation("sigmoid")) # sigmoid 激活层
return model
# 定义生成器模型
def generator_model():
model = tf.keras.models.Sequential()
# 输入的维度是 100
model.add(tf.keras.layers.Dense(512 * 6 * 6, input_shape=(100, )))
model.add(tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9))
model.add(tf.keras.layers.Activation("relu"))
model.add(tf.keras.layers.Reshape((6, 6, 512))) # 6 x 6 像素
model.add(tf.keras.layers.Conv2DTranspose(
256,
(5, 5),
strides=(2, 2),
padding="same",
kernel_initializer=tf.keras.initializers.TruncatedNormal(mean=0, stddev=0.02)
))
model.add(tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9))
model.add(tf.keras.layers.Activation("relu"))
model.add(tf.keras.layers.Conv2DTranspose(
128,
(5, 5),
strides=(2, 2),
padding="same",
kernel_initializer=tf.keras.initializers.TruncatedNormal(mean=0, stddev=0.02)
))
model.add(tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9))
model.add(tf.keras.layers.Activation("relu"))
model.add(tf.keras.layers.Conv2DTranspose(
64,
(5, 5),
strides=(2, 2),
padding="same",
kernel_initializer=tf.keras.initializers.TruncatedNormal(mean=0, stddev=0.02)
))
model.add(tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9))
model.add(tf.keras.layers.Activation("relu"))
model.add(tf.keras.layers.Conv2DTranspose(
3,
(5, 5),
strides=(2, 2),
padding="same",
kernel_initializer=tf.keras.initializers.TruncatedNormal(mean=0, stddev=0.02)
))
model.add(tf.keras.layers.Activation("tanh")) # tanh 激活层
return model
# 构造一个 DCGAN 对象,包含一个生成器和一个判别器
# 输入 -> 生成器 -> 判别器 -> 输出
def generator_containing_discriminator(generator, discriminator):
model = tf.keras.Sequential()
model.add(generator)
discriminator.trainable = False # 初始时判别器不可被训练
model.add(discriminator)
return model
|
[
"noreply@github.com"
] |
nuonuoyaya.noreply@github.com
|
0170bb41ca084d2527f7b9c2b97652ee4c794c72
|
0e1b1f5e2893070ebdcb5eb15b07b89b0f31f471
|
/submodules/seqan/util/py_lib/seqan/dox/sig_parser.py
|
8ba87455ea0259b6f9b67f3da6939a903f68f617
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
sheffield-bioinformatics-core/STRique
|
1a4a3e59e0ac66174ed5c9a4498d6d8bed40b54d
|
fd2df916847727b3484b2bbad839814043d7dbea
|
refs/heads/master
| 2022-12-27T22:28:31.893074
| 2020-09-29T14:31:45
| 2020-09-29T14:31:45
| 296,618,760
| 0
| 0
|
MIT
| 2020-09-18T12:45:30
| 2020-09-18T12:45:29
| null |
UTF-8
|
Python
| false
| false
| 12,109
|
py
|
#!/usr/bin/env python2
"""Parser for the signature supported by the SeqAn Doxygen-style documentation.
"""
# TODO(holtgrew): The parser has become quite complex. Maybe using some external library for parsing is in order.
import sys
import lexer
TOKENS = (
('KWD_TEMPLATE', r'template'),
('KWD_TYPENAME', r'typename'),
('KWD_CLASS', r'class'),
('KWD_CONCEPT', r'concept'),
('KWD_STRUCT', r'struct'),
('KWD_ENUM', r'enum'),
('IDENTIFIER', r'[a-zA-Z_~][a-zA-Z_0-9~]*'),
('COMMA', r','),
('NAME_SEP', r'::'),
('HASH', r'#'),
('SEMICOLON', r';'),
('SPACE', r'[\t ]'),
('PROUND_OPEN', r'\('),
('PROUND_OPEN', r'\('),
('PROUND_CLOSE', r'\)'),
('PANGULAR_OPEN' , r'<'),
('PANGULAR_CLOSE', r'>'),
)
class SigParseException(Exception):
"""Raised in the case of signature parsing error."""
def __init__(self, msg, line=0, column=0):
Exception.__init__(self, msg)
self.line = line
self.column = column
class Arg(object):
"""
@ivar type: The type of the template parameter, e.g. 'typename',
'class', 'int', 'unsigned' etc. str
@ivar name: The name of the parameter. str
"""
def __init__(self, type=None, name=None):
self.type = type
self.name = name
class SigEntry(object):
"""A signature entry.
The following kinds are possible: concept, class, function, variable,
enum, struct.
@ivar name: Name of the element.
@ivar kind: The kind of the element.
@ivar params: Parameters of the function, in case of function.
@ivar is_tpl: Whether or not the entry is a template.
@ivar tparams: Template parameters, in case of templates.
@ivar return_type: The name of the return type, in case of function.
@ivar return_name: Name after the :: for Metafunctions.
@ivar var_type: The type of the variable.
"""
def __init__(self, name=None, kind=None, params=[], tparams=[],
is_tpl=False, return_type=None, return_name=None,
var_type=None):
self.name = name
self.kind = kind
self.params = list([])
self.tparams = list([])
self.is_tpl = is_tpl
self.return_type = return_type
self.return_name = return_name
self.var_type = var_type
def toString(self):
"""Convert the SigEntry object back into a string."""
types = ['concept', 'class', 'struct', 'enum']
if not self.is_tpl and self.kind in types:
return '%s %s;' % (self.kind, self.name)
elif not self.is_tpl and self.kind == 'function':
params = ', '.join(['%s %s' % (p.type, p.name) for p in self.params])
if self.return_type:
return '%s %s(%s);' % (self.return_type, self.name, params)
else:
return '%s(%s);' % (self.name, params)
elif self.is_tpl and self.kind == 'function':
tparams = ', '.join(['%s %s' % (p.type, p.name) for p in self.tparams])
params = ', '.join(['%s %s' % (p.type, p.name) for p in self.params])
return 'template <%s>\n%s %s(%s);' % (tparams, self.return_type, self.name, params)
elif self.is_tpl and self.kind in ['struct', 'class']:
tparams = ', '.join(['%s %s' % (p.type, p.name) for p in self.tparams])
params = ', '.join(['%s %s' % (p.type, p.name) for p in self.params])
return 'template <%s>\n%s %s;' % (tparams, self.kind, self.name)
elif self.kind == 'metafunction':
tparams = ', '.join([p.name for p in self.tparams])
if self.return_type:
return '%s %s<%s>::%s;' % (self.return_type, self.name, tparams,
self.return_name)
else:
return '%s<%s>::%s;' % (self.name, tparams, self.return_name)
elif self.kind == 'variable':
return '%s %s;' % (self.var_type, self.name)
class SigParser(object):
def __init__(self, buffer):
self.buffer = buffer
self.lexer = lexer.Lexer(TOKENS)
self.lexer.input(buffer)
self.tokens = self.lexer.tokens()
def parseTemplate(self, token):
tparams = []
# Read <
t = self.tokens.next()
self.expectNotEof(t)
if t.type != 'PANGULAR_OPEN':
raise SigParseException('Expected opening angular parenthesis')
# Parse template parameters.
self.parseParams('PANGULAR_CLOSE', tparams,
['IDENTIFIER', 'KWD_TYPENAME', 'KWD_CLASS'])
# Parse remaining.
sig_entry = self.parse()
sig_entry.is_tpl = True
sig_entry.tparams = tparams
return sig_entry
def parseParams(self, end_token, params_dest, type_tokens):
t = self.tokens.next()
self.expectNotEof(t)
while t.type != end_token:
if t.type not in type_tokens:
raise SigParseException('Expected identifier got "%s"' % t.val)
arg = Arg(type=t.val)
t = self.tokens.next()
self.expectNotEof(t)
if t.type != 'IDENTIFIER':
raise SigParseException('Expected identifier got "%s"' % t.val)
arg.name = t.val
t = self.tokens.next()
self.expectNotEof(t)
if t.type not in ['COMMA', end_token]:
raise SigParseException('Expected COMMA or closing parenthesis')
if t.type != end_token:
t = self.tokens.next()
params_dest.append(arg)
def parseMetafunctionType(self, name):
sig_entry = SigEntry(kind='metafunction')
sig_entry.name = name
# Expect "#$name" or PANGULAR_CLOSE
t = self.tokens.next()
self.expectNotEof(t)
if t.type == 'HASH':
sig_entry.name += '#'
t = self.tokens.next()
self.expectNotEof(t)
if t.type != 'IDENTIFIER':
raise SigParseException('Expecting identifier')
sig_entry.name += t.val
t = self.tokens.next()
self.expectNotEof(t)
while t.type != 'PANGULAR_CLOSE':
if t.type != 'IDENTIFIER':
raise SigParseException('Expecting identifier')
arg = Arg(name=t.val)
sig_entry.tparams.append(arg)
# Read "," or ">"
t = self.tokens.next()
self.expectNotEof(t)
if t.type not in ['PANGULAR_CLOSE', 'COMMA']:
raise SigParseException('Expecting ">" or ","')
if t.type == 'COMMA':
t = self.tokens.next()
self.expectNotEof(t)
# Expect "::"
t = self.tokens.next()
self.expectNotEof(t)
if t.type != 'NAME_SEP':
raise SigParseException('Expecting "::"')
# Read return_name
t = self.tokens.next()
self.expectNotEof(t)
if t.type != 'IDENTIFIER':
raise SigParseException('Expecting identifier got %s' % repr(t.val))
sig_entry.return_name = t.val
return sig_entry
def parseMetafunctionValue(self, return_type, name):
sig_entry = self.parseMetafunctionType(name)
sig_entry.return_type = return_type
return sig_entry
def parseFunction(self, token):
"""Parse a function, variable, or metafunction.
We started out with an identifier. The things that this function will
be triggered for:
TReturn name(T1 x1, T2 x2, ...)
TReturn Klass::name(T1 x1, T2 x2, ...)
TReturn Klass#name(T1 x1, T2 x2, ...)
TReturn Name<TParam>::VALUE
TReturn Klass#Name<TParam>::VALUE
Name<TParam>::Type
Klass#Name<TParam>::Type
T var
@param token: lexer.Token object with the previous token.
"""
is_constructor = False
other_name = token.val
# get next token, i sname or "<"
t = self.tokens.next()
self.expectNotEof(t)
if t.type in ['HASH', 'NAME_SEP']:
other_name += t.val
t = self.tokens.next()
self.expectNotEof(t)
if t.type != 'IDENTIFIER':
raise SigParseException('Expecting identifier.')
other_name += t.val
t = self.tokens.next()
self.expectNotEof(t)
if t.type == 'PANGULAR_OPEN':
return self.parseMetafunctionType(other_name)
if t.type == 'PROUND_OPEN':
is_constructor = True
elif t.type != 'IDENTIFIER':
raise SigParseException('Expecting identifier as function name.')
name = t.val
if not is_constructor:
t = self.tokens.next()
self.expectNotEof(t)
if t.type in ['HASH', 'NAME_SEP']:
name += t.val
t = self.tokens.next()
self.expectNotEof(t)
if t.type != 'IDENTIFIER':
raise SigParseException('Expecting identifier.')
name += t.val
t = self.tokens.next()
self.expectNotEof(t)
# expect "(" or "<"
if t.type == 'PANGULAR_OPEN':
return self.parseMetafunctionValue(other_name, name)
# Expecting <eof>, ";", or (. The last triggers generation of a
# function, the other of a variable.
if t.type in ['EOF', 'SEMICOLON']:
sig_entry = SigEntry(kind='variable')
sig_entry.var_type = other_name
sig_entry.name = name
return sig_entry
sig_entry = SigEntry(kind='function')
if is_constructor:
sig_entry.return_type = None
sig_entry.name = other_name
else:
sig_entry.return_type = other_name
sig_entry.name = name
if t.type in ['HASH', 'NAME_SEP']:
sig_entry.name += t.val
t = self.tokens.next()
self.expectNotEof(t)
if t.type != 'IDENTIFIER':
raise SigParseException('EOF not expected')
sig_entry.name += t.val
t = self.tokens.next()
self.expectNotEof(t)
if t.type != 'PROUND_OPEN':
raise SigParseException('Expecting opening parenthesis after '
'function name')
# parse parameters
self.parseParams('PROUND_CLOSE', sig_entry.params, ['IDENTIFIER'])
return sig_entry
def parseCSE(self, kind):
"""Parse class, struct, enum."""
sig_entry = SigEntry(kind=kind)
t = self.tokens.next()
self.expectNotEof(t)
if t.type != 'IDENTIFIER':
raise SigParseException('Expecting identifier after "%s"!' % kind)
sig_entry.name = t.val
return sig_entry
def parseClass(self, token):
return self.parseCSE('class')
def parseConcept(self, token):
return self.parseCSE('concept')
def parseStruct(self, token):
return self.parseCSE('struct')
def parseEnum(self, token):
return self.parseCSE('enum')
def expectNotEof(self, token):
if token.type == 'EOF':
raise SigParseException('Unexpecte EOF!')
def parse(self):
try:
t = self.tokens.next()
self.expectNotEof(t)
m = {'KWD_TEMPLATE': self.parseTemplate,
'KWD_CLASS': self.parseClass,
'KWD_CONCEPT': self.parseConcept,
'KWD_STRUCT': self.parseStruct,
'KWD_ENUM': self.parseEnum,
'IDENTIFIER': self.parseFunction}
if not t.type in m:
raise SigParseException('Unexpected token of type %s' % t.type)
return m[t.type](t)
except lexer.LexerError, e:
raise SigParseException('Lexer error: %s at pos %s when parsing %s' % (e, e.pos, self.buffer))
|
[
"matthew.parker@sheffield.ac.uk"
] |
matthew.parker@sheffield.ac.uk
|
6918da170d57086d58de53e73ee734793ca9c647
|
5b6481da9d47221546382a0f39b73095a18f552b
|
/prev_ob_models/Migliore2014bulb3d/mig2birg_gloms.py
|
82d643debacca3139f0f9d1e14c4dc107fc68e04
|
[
"MIT"
] |
permissive
|
JustasB/OlfactoryBulb
|
26905f247bb189c7a5359c85a2707fcdeab10de1
|
ab0d31f84e18d102956a6fec3196ab262fc08c39
|
refs/heads/master
| 2023-04-11T19:23:45.731947
| 2021-05-04T21:55:20
| 2021-05-04T21:55:20
| 150,029,110
| 7
| 2
|
MIT
| 2019-06-08T01:57:46
| 2018-09-23T21:59:30
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,818
|
py
|
'''
Mapping between Migliore 2014 glomeruli indices and Birgiolas 2020 indices
Produced by finding the closest glomeruli pair in blender-files/ob-mig-mcs-aligned-to-gloms.blend
'''
mig2birg = {}
mig2birg[0] = 1493
mig2birg[100] = 325
mig2birg[101] = 1613
mig2birg[102] = 358
mig2birg[103] = 1403
mig2birg[104] = 840
mig2birg[105] = 918
mig2birg[106] = 544
mig2birg[107] = 938
mig2birg[108] = 1683
mig2birg[109] = 1481
mig2birg[10] = 785
mig2birg[110] = 1652
mig2birg[111] = 1342
mig2birg[112] = 1075
mig2birg[113] = 399
mig2birg[114] = 803
mig2birg[115] = 1614
mig2birg[116] = 224
mig2birg[117] = 870
mig2birg[118] = 1906
mig2birg[119] = 1090
mig2birg[11] = 80
mig2birg[120] = 1736
mig2birg[121] = 1852
mig2birg[122] = 398
mig2birg[123] = 91
mig2birg[124] = 140
mig2birg[125] = 1619
mig2birg[126] = 1912
mig2birg[12] = 521
mig2birg[13] = 1379
mig2birg[14] = 1684
mig2birg[15] = 187
mig2birg[16] = 1573
mig2birg[17] = 202
mig2birg[18] = 1529
mig2birg[19] = 1373
mig2birg[1] = 1057
mig2birg[20] = 748
mig2birg[21] = 690
mig2birg[22] = 1472
mig2birg[23] = 1531
mig2birg[24] = 50
mig2birg[25] = 1339
mig2birg[26] = 1219
mig2birg[27] = 1473
mig2birg[28] = 326
mig2birg[29] = 884
mig2birg[2] = 556
mig2birg[30] = 239
mig2birg[31] = 1551
mig2birg[32] = 834
mig2birg[33] = 60
mig2birg[34] = 275
mig2birg[35] = 1880
mig2birg[36] = 1043
mig2birg[37] = 38
mig2birg[38] = 1280
mig2birg[39] = 141
mig2birg[3] = 184
mig2birg[40] = 1172
mig2birg[41] = 1311
mig2birg[42] = 338
mig2birg[43] = 1520
mig2birg[44] = 590
mig2birg[45] = 1895
mig2birg[46] = 759
mig2birg[47] = 1883
mig2birg[48] = 545
mig2birg[49] = 1851
mig2birg[4] = 1282
mig2birg[50] = 856
mig2birg[51] = 1435
mig2birg[52] = 1170
mig2birg[53] = 598
mig2birg[54] = 1840
mig2birg[55] = 1210
mig2birg[56] = 1567
mig2birg[57] = 1165
mig2birg[58] = 461
mig2birg[59] = 33
mig2birg[5] = 895
mig2birg[60] = 1151
mig2birg[61] = 1340
mig2birg[62] = 1877
mig2birg[63] = 450
mig2birg[64] = 1867
mig2birg[65] = 1382
mig2birg[66] = 104
mig2birg[67] = 1245
mig2birg[68] = 361
mig2birg[69] = 1773
mig2birg[6] = 1669
mig2birg[70] = 1089
mig2birg[71] = 675
mig2birg[72] = 1115
mig2birg[73] = 1474
mig2birg[74] = 1125
mig2birg[75] = 696
mig2birg[76] = 1780
mig2birg[77] = 892
mig2birg[78] = 1589
mig2birg[79] = 167
mig2birg[7] = 1033
mig2birg[80] = 985
mig2birg[81] = 337
mig2birg[82] = 1633
mig2birg[83] = 924
mig2birg[84] = 1140
mig2birg[85] = 114
mig2birg[86] = 829
mig2birg[87] = 1673
mig2birg[88] = 836
mig2birg[89] = 794
mig2birg[8] = 530
mig2birg[90] = 272
mig2birg[91] = 609
mig2birg[92] = 1604
mig2birg[93] = 1483
mig2birg[94] = 429
mig2birg[95] = 1077
mig2birg[96] = 501
mig2birg[97] = 1287
mig2birg[98] = 1878
mig2birg[99] = 1133
mig2birg[9] = 449
|
[
"jbirgio@gmail.com"
] |
jbirgio@gmail.com
|
f71d99827be90d045ef1488af88ac529bc8fcdc2
|
aa622a03677b3a52890b8b8ffc13c2f57d6f14c9
|
/rctk/tests/test_synced_control.py
|
4e75f893ac1e6086652386acaf6bc34d7525d334
|
[
"BSD-2-Clause"
] |
permissive
|
rctk/rctk
|
8c6e0796127dc4ff528c03a840e02173dc660cdf
|
dc8640e75179c8560ca0cd64e709fefef58d5d2a
|
refs/heads/master
| 2020-05-19T16:52:20.687043
| 2011-09-05T13:02:36
| 2011-09-05T13:02:36
| 2,898,310
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,318
|
py
|
from rctk.tests.base import BaseTest
from rctk.widgets.control import Control, Attribute
class TestSyncedControl(BaseTest):
def test_single_attribute(self):
class TestControl(Control):
a = Attribute("default")
t = TestControl(self.tk)
assert len(self.tk._queue) == 1
assert self.tk._queue[0]._task['action'] == 'create'
assert self.tk._queue[0]._task['a'] == 'default'
def test_single_attribute_nondefault(self):
class TestControl(Control):
a = Attribute("default")
t = TestControl(self.tk, a="nondefault")
assert len(self.tk._queue) == 1
assert self.tk._queue[0]._task['action'] == 'create'
assert self.tk._queue[0]._task['a'] == 'nondefault'
def test_update(self):
class TestControl(Control):
a = Attribute("default")
t = TestControl(self.tk, a="nondefault")
self.tk._queue.pop() # pop the create task
t.a = 'updated'
assert len(self.tk._queue) == 1
assert self.tk._queue[0]._task['action'] == 'update'
assert self.tk._queue[0]._task['update']['a'] == 'updated'
def test_create_filter_default(self):
def filter(a):
return a[::-1]
class TestControl(Control):
a = Attribute("default", filter=filter)
t = TestControl(self.tk)
assert self.tk._queue[0]._task['a'] == 'tluafed'
def test_create_filter_nondefault(self):
def filter(a):
return a[::-1]
class TestControl(Control):
a = Attribute("default", filter=filter)
t = TestControl(self.tk, a="nondefault")
assert self.tk._queue[0]._task['a'] == 'tluafednon'
def test_update_filter(self):
def filter(a):
return a[::-1]
class TestControl(Control):
a = Attribute("default", filter=filter)
t = TestControl(self.tk)
self.tk._queue.pop() # pop the create task
t.a = 'updated'
assert len(self.tk._queue) == 1
assert self.tk._queue[0]._task['action'] == 'update'
assert self.tk._queue[0]._task['update']['a'] == 'detadpu'
def test_remote_sync(self):
class TestControl(Control):
a = Attribute("default")
t = TestControl(self.tk)
t.sync(a="synced")
assert t.a == "synced"
def test_scenario(self):
""" a more complete, slightly more complex scenario """
def filter(a):
return a[::-1]
class TestControl(Control):
a = Attribute("default", filter=filter)
b = Attribute(1, Attribute.NUMBER)
t = TestControl(self.tk, b=2)
assert len(self.tk._queue) == 1
assert self.tk._queue[0]._task['action'] == 'create'
assert self.tk._queue[0]._task['a'] == 'tluafed'
assert self.tk._queue[0]._task['b'] == 2
self.tk._queue.pop()
t.a = "updated"
t.b = 3
assert len(self.tk._queue) == 2
assert self.tk._queue[0]._task['update']['a'] == 'detadpu'
assert self.tk._queue[1]._task['update']['b'] == 3
t.sync(a="synced", b=100)
assert t.a == "synced"
assert t.b == 100
## TODO: Integratie attributes into xmlbuilder, test it.
# test (remote) sync of attribute
|
[
"github@in.m3r.nl"
] |
github@in.m3r.nl
|
2e7a77380201c7d92df9a72c9bdbf5361d4d0329
|
f246b414cce8687d6e5d1bb77cd94132b89580a2
|
/capstone/capstone/settings.py
|
a047facfe5fdebea0cdc34e2244ca61154a75b55
|
[] |
no_license
|
thewolfcommander/cs50-web
|
edbccd29b0b649852c7af73d5ecba4f51fa47ad3
|
beead0967d36ef398b699601c8ebae646827556d
|
refs/heads/master
| 2022-12-23T08:03:52.729833
| 2020-10-01T14:14:31
| 2020-10-01T14:14:31
| 300,309,278
| 2
| 2
| null | 2020-10-01T14:29:44
| 2020-10-01T14:29:42
| null |
UTF-8
|
Python
| false
| false
| 3,681
|
py
|
"""
Django settings for capstone project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't@^uc32#s6eo4&qf^5v4d05&p-cjlb8@3yyntd@!z3xxg*)4jo'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'tasker',
'rest_framework',
'corsheaders',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
# CORS_ORIGIN_WHITELIST = (
# 'http://localhost:3000',
# )
ROOT_URLCONF = 'capstone.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
}
WSGI_APPLICATION = 'capstone.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
AUTH_USER_MODEL = 'tasker.User'
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Australia/Sydney'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"nstu778@aucklanduni.ac.nz"
] |
nstu778@aucklanduni.ac.nz
|
cd3954fece8a567c0bd6837d9914e92508b196e0
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_298/ch54_2020_09_29_22_51_12_719433.py
|
030dbbeba2a0e3cf5a449b226a9408e9dc81ea74
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
fibonacci = []
fibonacci.append(1)
fibonacci.append(1)
def calcula_fibonacci(n):
t = 0
while t < n:
fibonacci_seguinte = fibonacci[t] + fibonacci[t+1]
fibonacci.append(fibonacci_seguinte)
t += 1
return fibonacci[:n]
print(calcula_fibonacci(n))
|
[
"you@example.com"
] |
you@example.com
|
bd41d5569239641b7cf11f80b1f16949af7fd367
|
5756d7c546c30924a95d8de484fd1ec66d89f31f
|
/web/migrations/0007_auto_20201104_0111.py
|
271be5007be0aca15c92bb8598b6413b9cc74743
|
[] |
no_license
|
manuguevara/MI-CAR-Django
|
0e665c7f7f850263ec01be5b8752ac15201ccaf2
|
90d96f2dafc4af125e3b4c0e03ca5305ccca1646
|
refs/heads/master
| 2023-01-30T18:48:03.749094
| 2020-12-10T00:22:23
| 2020-12-10T00:22:23
| 300,990,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
# Generated by Django 3.1.1 on 2020-11-04 04:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0006_insumo'),
]
operations = [
migrations.AlterField(
model_name='insumo',
name='imagen',
field=models.ImageField(default=1, upload_to='Insumo'),
preserve_default=False,
),
]
|
[
"manu.guevara@alumnos.duoc.cl"
] |
manu.guevara@alumnos.duoc.cl
|
19ff025373dc4fcab401bdcea7400cec9db59b81
|
73bb9421b459955af189b75fc01298fb820210c6
|
/hello_you.py
|
421d6598333c86ad5b0887c29346fb96cf80aeab
|
[] |
no_license
|
becerra2906/python_practice_ex
|
44247f5762b29b2db2e87c24834967f6d3b44941
|
2522be3a04bc1dc79e391edee0dd2bc239e56604
|
refs/heads/master
| 2022-12-22T12:11:07.205038
| 2020-10-02T17:14:59
| 2020-10-02T17:14:59
| 272,989,756
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
# By Alejandro Becerra
# done as part of The Python Bible Udemy Course
#ask user a name
name = input("What's your name?: ")
#ask user for age
age = input("How old are you?: ")
#ask user for city
city = input("What city do you live in?: ")
#ask user what they enjoy
love = input("What do you love doing?: ")
#Create ouput
string = "Your name is {} and you are {} years old. You live in {} and you love {}."
output = string.format(name, age, city, love)
#Print output
print(output)
|
[
"noreply@github.com"
] |
becerra2906.noreply@github.com
|
d48b020a384af34542a253fd697fdca212d37b83
|
a05ce7d365917c2ae6430e522381792a011d5979
|
/atomtoolbox/classification.py
|
0314661575a57af915ef3e05b66ea5d0493b2105
|
[
"MIT"
] |
permissive
|
eschmidt42/AtomToolBox
|
9d41770e9ebf545ac6cefce5c81226c14ee024c2
|
25b80675dcaa332fc0c0d702b60ea61dfe2501a0
|
refs/heads/master
| 2021-09-14T21:45:57.408383
| 2018-05-20T09:15:26
| 2018-05-20T09:15:26
| 113,569,858
| 1
| 0
| null | 2018-02-10T14:37:36
| 2017-12-08T12:00:18
| null |
UTF-8
|
Python
| false
| false
| 64,241
|
py
|
import copy, os, pickle, collections, warnings
from scipy import stats, optimize
import numpy as np
from sklearn.mixture import GaussianMixture, BayesianGaussianMixture
from sklearn.cluster import KMeans
from scipy.cluster import hierarchy
from scipy import spatial
import matplotlib.pylab as plt
import matplotlib as mpl
import tensorflow as tf
import sklearn
from time import time
from functools import partial
from datetime import datetime
def scipy_gmm_wrapper(gmm=None, means_=None, covariances_=None, weights_=None):
"""Wraps the gmm so it's pdf can be accessed.
"""
gmm_given = (not gmm is None)
parameters_given = not (means_ is None and covariances_ is None and weights_ is None)
assert gmm_given or parameters_given,\
"Either 'gmm' needs to be given or 'means_', 'covariances_' and 'weights_'!"
if gmm_given:
means_ = gmm.means_
covariances_ = gmm.covariances_
weights_ = gmm.weights_
elif parameters_given:
assert len(means_)==len(covariances_)==len(weights_), "'means_', 'covariances_' and 'weights_' all need to have the same length!"
else:
raise ValueError("WTF!")
gaussians = [stats.multivariate_normal(mean=means_[i],cov=covariances_[i])
for i in range(weights_.shape[0])]
def scipy_gmm(x):
if len(x.shape)==1:
Phi = np.array([g.pdf(x) for g in gaussians])
elif len(x.shape)==2:
Phi = np.array([[g.pdf(_x) for g in gaussians] for _x in x])
else:
raise ValueError("Cannot handle 'x' of shape {}!".format(x.shape))
return np.dot(Phi,weights_)
return scipy_gmm
def get_decomposed_models(in_models, verbose=False):
"""Decomposes 'models'.
Parameters
----------
in_models : dict of lists
Example: {"a":[0,2], "b":[0,1]}
verbose : boolean, optional, default False
Returns
-------
out_models : list of np.ndarrays of int
Each np.ndarray represents a maximally decomposed model
breaking down in_models.
Example:
>>> in_model = {"a":[0,2], "b":[0,1]}
>>> print(get_decomposed_models(in_model))
[array([2]), array([1]), array([0])]
"""
if verbose:
print("in_models ",in_models)
out_models = []
num_models = len(in_models)
assert num_models > 0, "number of models = {}!".format(num_models)
out_models = collections.deque([set(v) for v in in_models.values()])
if num_models > 1:
unique_components = np.sort(np.unique(np.hstack(in_models.values())))
if verbose:
print("unique_components ",unique_components)
for i, uc in enumerate(unique_components):
if verbose:
print("\nunique component: ",uc)
print("out_models ",out_models)
# splits out_models into relevant and relevant
# relevant models will be decomposed and the remainders
# added with relevant_models to proce new out_models
relevant_models = []
irrelevant_models = []
while len(out_models)>0:
m = out_models.popleft()
if verbose:
print(" m ",m)
if uc in m:
relevant_models.append(m)
else:
irrelevant_models.append(m)
if verbose:
print(" > relevant ",relevant_models)
print(" > irrelevant ",irrelevant_models)
# sort relevant models by length
relevant_models = sorted(relevant_models, key = lambda x: len(x))
if len(relevant_models)==0 and len(irrelevant_models)==0: # error
raise ValueError("Relevant and irrelevant models are surprisingly empty!")
elif len(relevant_models)==0: # no model was relevant
out_models = collections.deque(irrelevant_models)
elif len(relevant_models)==1: # a single model was relevant
out_models = collections.deque(relevant_models+irrelevant_models)
else: # there were some relevant models
i0, i1 = np.triu_indices(len(relevant_models),k=1)
# all intersections have the unique component in common
intersections = [relevant_models[v0].intersection(relevant_models[v1]) for v0,v1 in zip(i0,i1)]
assert len(intersections)>0, "The intersections are surprisingly empty!"
# sorting by number of model components
intersections = sorted(intersections, key=lambda x: len(x))
if verbose:
print(" > intersections ",intersections)
# the smallest possible set of components
intersection = intersections[0]
assert len(intersection)>0, "The intersection is surprisingly empty!"
if verbose:
print(" > intersection ",intersection)
#retrieving remainders
remainders = [m.difference(intersection) for m in relevant_models]
remainders = [v for v in remainders if len(v)>0]
if verbose:
print(" > intersection ",intersection)
# composing out_models again
if verbose:
print(" > irrelevant_models ",irrelevant_models)
print(" > remainders ",remainders)
out_models = collections.deque(irrelevant_models + [intersection] + remainders)
if verbose:
print(" > new out_models ",out_models)
out_models = [np.array(list(v),dtype=int) for v in out_models]
assert set([v0 for v1 in in_models.values() for v0 in v1]) == set(np.hstack(out_models)), "Mismatching sets, lost some components!"
return out_models
def fit_gmm_weights(w0,_gaussians,X,method="Nelder-Mead"):
def wbound(w):
w = np.absolute(w)
return w/w.sum()
def _wrap(_gaussians,X):
_g = np.array([[g.pdf(x) for g in _gaussians] for x in X])
def fun(w):
return - (np.log(np.dot(_g,wbound(w)))).sum()
return fun
res = optimize.minimize(_wrap(_gaussians,X),w0,method=method)
return wbound(res["x"])
class GaussianMixtureClassifier:
"""Approximates the sample distribution for classification.
GaussianMixture and BayesianGaussianMixture as implemented in sklearn
classify by individual Gaussian components found during the density
regression of the given samples. In this class the training set is
split by class and approximates the entire density distributions for
each class. The resulting pdfs are then used for classification.
This class can also do decomposition of Gaussian Mixture Models (GMMs).
The decomposition is triggered when GaussianMixtureClassifier.fit
is passed an X that is a list of np.ndarrays. It is then assumed that each
array contains a superposition of underlying distributions (which may each be
larger than a single Gaussian), e.g. comparison of LAMMPS trajectories
of different crystals.
Parameters
----------
gmm : instance of GaussianMixture or BayesianGaussianMixture, optional, default None
Required to regress data.
load_path : str, optional default None
To load results of a previous regression.
check_labels : boolean, optional, default True
Checks upon loading the classifier from disk whether the number of
GMM models matches the number of known labels or not.
Methods
-------
fit : fit(X, y)
Approximates the density distributions using the provided gmm.
X : float np.ndarray of shape (N, M)
y : int np.ndarray of shape (N,)
predict_proba : predict_proba(X)
Requires previous execution of 'fit'. Returns the probability
for the given samples to belong to either of the classes.
predict : predict(X, show=False, axes=[0,1])
Requires previous execution of 'fit'. Returns most probably class
for all samples. Optionally can also plot the classification along
two axes.
pdf : pdf(X, label)
Requires previous execution of 'fit'. Allows access to the
pdf for a specified class.
label : int
cluster : dict
Settings for the clustering/decomposition of Gaussian. See self.decompos_gmms
for more detail.
weights_fit_method : str, optional, default "Nelder-Mead"
Specifies the scipy.optimize.minimize method to use for the optimization of
GMM weights.
"""
fitted_gmms = dict()
labels = None
idx_class = None
weights_ = dict() # keys are integers. correspond to order of 'appearances' (a list)
covariances_ = dict() # keys are integers. correspond to order of 'appearances' (a list)
means_ = dict() # keys are integers. correspond to order of 'appearances' (a list)
label_map = None
check_labels = True
appearances = None # in which original given crystals centroids were observed
default_cluster_kwargs = {"method":"average","metric":"euclidean","cluster_parameters":"mu",
"threshold":1e-6,"criterion":"distance","combine":"mean"}
def __init__(self, gmm=None, load_path=None, tol0=1e-6, cluster = default_cluster_kwargs,
weights_fit_method="Nelder-Mead", verbose=False, check_labels=False):
if not gmm is None:
assert isinstance(gmm,(GaussianMixture, BayesianGaussianMixture)), "'gmm' needs to be an instance of sklearn.mixture.{GaussianMixture, BayesianGaussianMixture}!"
elif isinstance(load_path,str):
assert os.path.exists(load_path), "Given 'load_path' ({}) invalid!".format(load_path)
else:
raise ValueError("Either 'gmm' or 'load_path' needs to be given!")
self.gmm = gmm
self.check_labels = check_labels
if gmm is None:
self._load_parameters(load_path)
# decomposition related parameters
self.tol0 = tol0
for _k, _v in self.default_cluster_kwargs.items():
if not _k in cluster:
cluster[_k] = _v
self.cluster = cluster
self.weights_fit_method = weights_fit_method
# misc
self.verbose = verbose
def _load_parameters(self,load_path):
with open(load_path,"rb") as f:
params = pickle.load(f)
for label in sorted(params["weights_"]):
self.fitted_gmms[label] = scipy_gmm_wrapper(weights_=params["weights_"][label],\
covariances_=params["covariances_"][label],\
means_=params["means_"][label])
self.weights_ = params["weights_"]
self.covariances_ = params["covariances_"]
self.means_ = params["means_"]
self.label_map = params["label_map"]
if "appearances" in params:
self.appearances = params["appearances"]
if self.check_labels:
assert not self.label_map is None, "No labels are given in the stored file!"
assert len(self.fitted_gmms) == len(self.label_map), "The number of GMMs (%i) does not match the number of available labels (%i)!" % (len(self.fitted_gmms),len(self.label_map))
def save(self,save_path):
params = {"weights_":self.weights_,
"covariances_":self.covariances_,
"means_":self.means_,
"label_map":self.label_map,
"appearances":self.appearances}
with open(save_path,"wb") as f:
pickle.dump(params,f)
def fit(self, X, y=None, label_map=None):
"""Fits.
Parameters
----------
X : np.ndarray of floats or list of np.ndarrays
Notes
-----
If X is an array then y needs to be given and the classifier is developed
directly over all the samples. If, otherwise, X is a list of arrays then
y is not needed and labels are generated by decomposing the arrays in X by
comparison of their computed pdfs.
"""
X_is_array = isinstance(X,np.ndarray)
X_is_list = isinstance(X,list)
if X_is_array:
assert isinstance(y,np.ndarray), "'X' is an array and thus 'y' needs to be an array!"
assert y.shape[0]==X.shape[0], "The array 'X' of shape {} is not matched by 'y' of shape {}!".format(X.shape,y.shape)
elif X_is_list:
assert all([isinstance(x,np.ndarray) for x in X]), "'X' is a list and all its entries need to be np.ndarrays! Got: {}".format([type(x) for x in X])
assert len(set([x.shape[1] for x in X]))==1, "All arrays in 'X' need to have the same number of features! Got: {}".format([x.shape[1] for x in X])
n_features = X[0].shape[1]
else:
raise ValueError("'X' input not understood. Needs to be an array of list of arrays!")
if X_is_array:
self.label_map=label_map
self.labels = np.unique(y)
self.idx_class = {k: np.where(y==k)[0] for k in self.labels}
for label in sorted(self.idx_class):
_gmm = copy.deepcopy(self.gmm)
_gmm.fit(X[self.idx_class[label],:])
self.fitted_gmms[label] = scipy_gmm_wrapper(gmm=_gmm)
self.weights_[label] = _gmm.weights_
self.covariances_[label] = _gmm.covariances_
self.means_[label] = _gmm.means_
elif X_is_list:
Nstructures = len(X)
structure_ids = ["structure%s"%i for i in range(Nstructures)]
# fit structure pdfs
all_gmms = [copy.deepcopy(self.gmm).fit(X[i]) for i in range(Nstructures)]
# compressed and decomposed (cd) model
mus_cd, covs_cd, appearances = self.decompose_gmms(all_gmms, structure_ids, n_features,
method=self.cluster["method"], metric=self.cluster["metric"],
threshold=self.cluster["threshold"], criterion=self.cluster["criterion"],
cluster_parameters=self.cluster["cluster_parameters"], combine=self.cluster["combine"],
verbose=self.verbose)
N_cd = len(mus_cd)
self.appearances = appearances.copy()
# re-fit models
gmms_cd = []
_X = np.vstack(X)
if self.verbose:
print("number of resulting models: ",N_cd)
print("number of components in total: ",sum(m.shape[0] for m in mus_cd))
for i in range(N_cd):
gaussians = [stats.multivariate_normal(mean=mus_cd[i][j],cov=covs_cd[i][j])
for j in range(mus_cd[i].shape[0])]
#check closeness to original models
is_new_model = True
for _gmm in all_gmms:
if _gmm.means_.shape == mus_cd[i].shape and _gmm.covariances_.shape == covs_cd[i].shape:
mu_close = np.allclose(_gmm.means_,mus_cd[i])
cov_close = np.allclose(_gmm.covariances_,covs_cd[i])
if mu_close and cov_close:
is_new_model = False
weights_cd = _gmm.weights_
break
Ng = len(gaussians)
if is_new_model: # in case it's a new model get new weights
if Ng>1:
w0 = np.ones(Ng)/float(Ng)
weights_cd = fit_gmm_weights(w0, gaussians, _X, method=self.weights_fit_method)
assert weights_cd.sum() < 1+1e-6, "Weights invalid!"
if np.linalg.norm(w0-weights_cd)<self.tol0:
warnings.warn("Weights were not optimized! Changes are smaller than {}.".format(self.tol0))
else:
weights_cd = np.array([1.])
# finalize and store models
self.fitted_gmms[i] = scipy_gmm_wrapper(means_=mus_cd[i], covariances_=covs_cd[i], weights_=weights_cd)
self.weights_[i] = weights_cd
self.covariances_[i] = covs_cd[i]
self.means_[i] = mus_cd[i]
else:
raise ValueError("Boink!")
@staticmethod
def decompose_gmms(gmms, structure_ids, n_features, method="average",
metric="euclidean", threshold=1e-6, criterion="distance",
cluster_parameters="mu", combine="mean", verbose=False):
"""Decomposes GMMs.
Parameters
----------
gmms : list of GaussianMixtureModel instances
structure_ids : list of str
Names for the individual structures.
n_features : int
Number of features.
method : str, optional, default "average"
Method to use in scipy.cluster.hierarchy.linkage
metric: str, optional, default, "euclidean"
Metric to use in scipy.cluster.hierarchy.linkage.
threshold : float, optional, default 1e-6
Threshold to use in scipy.cluster.hierarchy.fcluster. The
smaller the value the more clusters will be found.
criterion : str, optional, default "distance"
Criterion to use in scipy.cluster.hierarchy.fcluster.
cluster_parameters : str, optional, default "mu"
Defines what is to be clustered:
"mu" : Gaussians are clustered by their mean/mu values.
"cov": Gaussians are clustered by their covariance values.
"mu+cov": Gaussians are clustered by both their mean/mu and covariance values.
combine : str, optional, default "mean"
Specifies how Gaussians found to belong to the same cluster are to be
combined. Currently "mean" is the only recognized option.
"""
model_reference = {}
N = 0
all_covs = []
all_mus = []
for i,sid in enumerate(structure_ids):
model_reference[sid] = np.arange(N,N+gmms[i].weights_.shape[0])
N += gmms[i].weights_.shape[0]
covs = np.array([c.ravel() for c in gmms[i].covariances_])
mus = np.array([m for m in gmms[i].means_])
all_covs.append(covs)
all_mus.append(mus)
all_covs = np.vstack(all_covs)
all_mus = np.vstack(all_mus)
# find approximately unique components
if cluster_parameters == "mu":
p = all_mus
elif cluster_parameters == "cov":
p = all_covs
elif cluster_parameters == "mu+cov":
p = np.hstack((all_mus, all_covs))
else:
raise ValueError("Unexpected 'cluster_parameters' value.")
Z = hierarchy.linkage(p, method=method, metric=metric)
T = hierarchy.fcluster(Z, threshold, criterion=criterion) -1
# relabel clusters to keep original parameter ordering
T_map, _c = {}, 0
for _t in T:
if not _t in T_map:
T_map[_t] = _c
_c += 1
T = np.array([T_map[_t] for _t in T])
T_set = np.sort(np.unique(T))
# combine parameters and thus compress models
all_mus_clustered = []
all_covs_clustered = []
for t in T_set:
if combine == "mean":
_mu = all_mus[T==t,:].mean(axis=0)
_cov = all_covs[T==t,:].mean(axis=0).reshape((n_features,n_features))
else:
raise ValueError("'combine' ({}) not understood!".format(combine))
all_mus_clustered.append(_mu)
all_covs_clustered.append(_cov)
all_mus_clustered = np.array(all_mus_clustered)
all_covs_clustered = np.array(all_covs_clustered)
compressed_model_reference = {sid:np.sort(np.unique(T[vals])) for sid, vals in model_reference.items()}
if verbose:
print("model_reference:")
for sid in sorted(model_reference):
print(" initial ",model_reference[sid])
print(" compressed ",compressed_model_reference[sid])
# decompose by comparison
compressed_decomposed_models = get_decomposed_models(compressed_model_reference, verbose=verbose)
if verbose:
print("compressed_decomposed_models ",compressed_decomposed_models)
print("all_mus_clustered ",all_mus_clustered.shape)
print("all_covs_clustered ",all_covs_clustered.shape)
# compressed and decomposed (cd) parameters
mus_cd = [all_mus_clustered[m,:] for m in compressed_decomposed_models]
covs_cd = [all_covs_clustered[m,:] for m in compressed_decomposed_models]
# model origins
if verbose:
print("\nMaximally decomposed models:")
print("Components -> component appearances")
appearances = []
for _decomposed_model in compressed_decomposed_models:
_appearances = [_label for _label,_vals in compressed_model_reference.items()\
if any([_val in _decomposed_model for _val in _vals])]
if verbose:
print("%s -> %s"%(str(_decomposed_model), ", ".join(sorted(_appearances))))
appearances.append(_appearances)
return mus_cd, covs_cd, appearances
def predict_proba(self, X):
p = np.zeros((X.shape[0],len(self.fitted_gmms)))
for i,label in enumerate(sorted(self.fitted_gmms)):
p[:,i] = self.fitted_gmms[label](X)
Z = p.sum(axis=1)
return (p.T/Z).T
def show(self, X, y, axes=[0,1], title=None, xlim=(0,1), ylim=(0,1),
xlabel=None, ylabel=None, labelfs=16, tickfs=14, legendfs=12,
titlefs=18, data_labels=None, cmap=plt.cm.jet, figsize=(10,5)):
"""Plots."""
isarray = isinstance(X,np.ndarray)
islist = isinstance(X,list) and all([isinstance(x,np.ndarray) for x in X])
if islist:
_X = np.vstack(X)
else:
_X = np.copy(X)
uy = np.unique(y) if isarray else np.unique(np.hstack(y))
_y = np.copy(y) if isarray else np.hstack(y)
norm = mpl.colors.Normalize(vmin=uy.min(), vmax=uy.max())
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(121) if islist else fig.add_subplot(111)
ax.set_aspect("equal")
hs = [None for _uy in uy]
for i,_uy in enumerate(uy):
idx = np.where(_y==_uy)[0]
ax.scatter(_X[idx,axes[0]], _X[idx,axes[1]], label="class %i"%_uy,
alpha=.5, color=cmap(norm(_uy*np.ones(len(idx)))))
ax.scatter(self.means_[i][:,axes[0]], self.means_[i][:,axes[1]],
marker="+", color=cmap(norm(_uy*np.ones(len(self.means_[i])))))
if xlabel is None:
ax.set_xlabel("Feature {}".format(axes[0]), fontsize=labelfs)
else:
ax.set_xlabel(xlabel, fontsize=labelfs)
if ylabel is None:
ax.set_ylabel("Feature {}".format(axes[1]), fontsize=labelfs)
else:
ax.set_ylabel(ylabel, fontsize=labelfs)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_title("Inferred classes",fontsize=labelfs)
ax.tick_params(labelsize=tickfs)
ax.legend(loc=0, fontsize=legendfs)
if islist:
ax2 = fig.add_subplot(122)
ax2.set_aspect("equal")
norm = mpl.colors.Normalize(vmin=0, vmax=len(X))
hs = [None for _X in X]
for i,_X in enumerate(X):
if data_labels is None:
label = "trajectory #%i"%i
else:
label = data_labels[i]
ax2.scatter(_X[:,axes[0]], _X[:,axes[1]], label=label, c=cmap(norm(np.ones(_X.shape[0])*i)),
alpha=.5)
if xlabel is None:
ax2.set_xlabel("Feature {}".format(axes[0]), fontsize=labelfs)
else:
ax2.set_xlabel(xlabel, fontsize=labelfs)
if ylabel is None:
ax2.set_ylabel("Feature {}".format(axes[1]), fontsize=labelfs)
else:
ax2.set_ylabel(ylabel, fontsize=labelfs)
ax2.tick_params(labelsize=tickfs)
ax2.set_xlim(xlim)
ax2.set_ylim(ylim)
ax2.set_title("Trajectories", fontsize=labelfs)
ax2.legend(loc=0, fontsize=legendfs)
if not title is None:
plt.suptitle(title, fontsize=titlefs)
plt.tight_layout()
plt.show()
def predict(self, X, show=False, show_kwargs={}):
"""Predicts.
"""
isarray = isinstance(X,np.ndarray)
islist = isinstance(X,list) and all([isinstance(x,np.ndarray) for x in X])
if isarray:
p = self.predict_proba(X)
y = np.argmax(p,axis=1)
elif islist:
p = [self.predict_proba(x) for x in X]
y = [np.argmax(_p,axis=1) for _p in p]
else:
raise ValueError("X needs to be an array of a list of arrays!")
if show:
self.show(X,y, **show_kwargs)
return y
def pdf(self, X, label):
return self.fitted_gmms[label](X)
def explain_inference(self, abbreviate=True):
assert not any([self.appearances is None, len(self.means_)==0]), "Class instance does not appear to have self.appearances and or self.centroids_ set."
print("\nMaximally decomposed models:")
print("Components -> component appearances")
for i, _appearances in enumerate(self.appearances):
_decomposed_model = self.means_[i].shape if abbreviate else self.means_[i]
print("%s -> %s"%(", ".join(sorted(_appearances)),_decomposed_model))
def set_labels(self,labels):
"""Sets labels for the label_map.
Associates model identifier (integer value of self.centroids_)
with a string, e.g. name for a type of crystal.
Parameters
----------
labels : dict
Example: {"fcc":0, "fccVac":[1,2]}
Note: 'labels' can also just set strings for a subset of model identifiers
since not always all names which should be assigned are immediately obvious.
The remaining labels are set to string versions of integer values.
"""
assert isinstance(labels,dict)
assert all([isinstance(_k,str) for _k in labels.keys()])
assert all([isinstance(_v,(int,list,tuple)) for _v in labels.values()])
if self.label_map is None:
self.label_map = dict()
values = set()
for _k, _v in labels.items():
self.label_map[_k] = _v
if isinstance(_v,int):
values.add(_v)
else:
for _v2 in _v:
values.add(_v2)
for _k in set(self.means_.keys()).difference(values):
self.label_map[str(_k)] = _k
def kmeans_wrapper(kmeans=None, centroids_=None):
"""Wraps the kmeans so it can be accessed.
"""
kmeans_given = (not kmeans is None)
parameters_given = not (centroids_ is None)
assert kmeans_given or parameters_given,\
"Either 'kmeans' needs to be given or 'centroids_'!"
if kmeans_given:
centroids_ = kmeans.cluster_centers_.copy()
elif parameters_given:
pass
else:
raise ValueError("WTF!")
def _kmeans(x):
Phi = spatial.distance.cdist(x, centroids_)
assert Phi.shape == (x.shape[0], centroids_.shape[0])
return Phi.min(axis=1)
return _kmeans
class KMeansClassifier:
"""Approximates the sample distribution for classification.
KMeans as implemented in sklearn classifies by individual centroids found during the
regression of the given samples. In this class the training set is
split by class and approximates the sample distributions for
each class. The resulting groups of centroids are then used for classification.
This class can also do decomposition of KMeans models.
The decomposition is triggered when KMeansClasifier.fit
is passed an X that is a list of np.ndarrays. It is then assumed that each
array contains a superposition of underlying distributions, e.g. comparison of
LAMMPS trajectories of different crystals.
Parameters
----------
kmeans : instance of Kmeans, optional, default None
Required to regress data.
load_path : str, optional default None
To load results of a previous regression.
check_labels : boolean, optional, default True
Checks upon loading the classifier from disk whether the number of
Kmeans models matches the number of known labels or not.
Methods
-------
fit : fit(X, y)
Approximates the density distributions using the provided centroids.
X : float np.ndarray of shape (N, M)
y : int np.ndarray of shape (N,)
predict : predict(X, show=False, axes=[0,1])
Requires previous execution of 'fit'. Returns most probably class
for all samples. Optionally can also plot the classification along
two axes.
pdf : pdf(X, label)
Requires previous execution of 'fit'. Allows access to the
pdf for a specified class.
label : int
cluster : dict
Settings for the clustering/decomposition of centroids. See self.decompos_kmeans
for more detail.
Example
-------
>>> kmc = atb.KMeansClassifier(KMeans(n_clusters=4), cluster={"threshold":.5})
>>> _D = 2
>>> _X = [np.vstack((stats.norm.rvs(size=(220,_D), loc=1, scale=1), stats.norm.rvs(size=(200,_D), loc=0, scale=1.))),
stats.norm.rvs(size=(400,_D), loc=3, scale=1.),stats.norm.rvs(size=(200,_D), loc=3, scale=1.)]
>>> kmc.fit(_X)
>>> kmc.explain_inference()
Maximally decomposed models:
Components -> component appearances
structure2 -> [[4.1626868 3.06136932]
[2.16839552 2.92352057]
[2.98831573 4.29232913]
[2.92131459 1.68174912]]
structure1 -> [[3.61949901 4.03556476]
[2.38409816 2.06630056]
[4.01029769 2.31575259]
[2.15251654 3.65100976]]
structure0 -> [[-0.32768532 0.96976257]
[ 1.39818763 0.33646176]
[-0.3366716 -0.75370139]
[ 1.2962009 2.09468514]]
"""
fitted_kmeans = dict()
labels = None
idx_class = None
centroids_ = dict() # keys are integers. correspond to order of 'appearances' (a list)
label_map = None # needs to be assigned manually after understanding the meaning of the centroids relative to the given structures
check_labels = True
appearances = None # in which original given crystals centroids were observed
default_cluster_kwargs = {"method":"average","metric":"euclidean",
"threshold":1e-6,"criterion":"distance","combine":"mean"}
def __init__(self, kmeans=None, load_path=None, tol0=1e-6,
cluster = default_cluster_kwargs,
verbose=False, check_labels=False):
if not kmeans is None:
assert isinstance(kmeans,KMeans), "'kmeans' needs to be an instance of sklearn.cluster.KMeans!"
elif isinstance(load_path,str):
assert os.path.exists(load_path), "Given 'load_path' ({}) invalid!".format(load_path)
else:
raise ValueError("Either 'kmeans' or 'load_path' needs to be given!")
self.kmeans = kmeans
self.check_labels = check_labels
if kmeans is None:
self._load_parameters(load_path)
# decomposition related parameters
self.tol0 = tol0
for _k, _v in self.default_cluster_kwargs.items():
if not _k in cluster:
cluster[_k] = _v
self.cluster = cluster
# misc
self.verbose = verbose
def _load_parameters(self,load_path):
with open(load_path,"rb") as f:
params = pickle.load(f)
for label in sorted(params["centroids_"]):
self.fitted_kmeans[label] = kmeans_wrapper(centroids_=params["centroids_"][label])
self.centroids_ = params["centroids_"]
self.label_map = params["label_map"]
if "appearances" in params:
self.appearances = params["appearances"]
if self.check_labels:
assert not self.label_map is None, "No labels are given in the stored file!"
assert len(self.fitted_kmeans) == len(self.label_map), "The number of KMeans models (%i) does not match the number of available labels (%i)!" % (len(self.fitted_gmms),len(self.label_map))
def save(self,save_path):
params = {"centroids_":self.centroids_,
"label_map":self.label_map,
"appearances":self.appearances}
with open(save_path,"wb") as f:
pickle.dump(params,f)
def fit(self, X, y=None, label_map=None):
"""Fits.
Parameters
----------
X : np.ndarray of floats or list of np.ndarrays
Notes
-----
If X is an array then y needs to be given and the classifier is developed
directly over all the samples. If, otherwise, X is a list of arrays then
y is not needed and labels are generated by decomposing the arrays in X by
comparison of their computed pdfs.
"""
X_is_array = isinstance(X,np.ndarray)
X_is_list = isinstance(X,list)
if X_is_array:
assert isinstance(y,np.ndarray), "'X' is an array and thus 'y' needs to be an array!"
assert y.shape[0]==X.shape[0], "The array 'X' of shape {} is not matched by 'y' of shape {}!".format(X.shape,y.shape)
elif X_is_list:
assert all([isinstance(x,np.ndarray) for x in X]), "'X' is a list and all its entries need to be np.ndarrays! Got: {}".format([type(x) for x in X])
assert len(set([x.shape[1] for x in X]))==1, "All arrays in 'X' need to have the same number of features! Got: {}".format([x.shape[1] for x in X])
n_features = X[0].shape[1]
else:
raise ValueError("'X' input not understood. Needs to be an array of list of arrays!")
if X_is_array:
self.label_map=label_map
self.labels = np.unique(y)
self.idx_class = {k: np.where(y==k)[0] for k in self.labels}
for label in sorted(self.idx_class):
_km = copy.deepcopy(self.kmeans)
_km.fit(X[self.idx_class[label],:])
self.fitted_kmeans[label] = kmeans_wrapper(kmeans=_km)
self.centroids_[label] = _km.cluster_centers_
elif X_is_list:
Nstructures = len(X)
structure_ids = ["structure%s"%i for i in range(Nstructures)]
# fit structure pdfs
all_kmeans = [copy.deepcopy(self.kmeans).fit(X[i]) for i in range(Nstructures)]
# compressed and decomposed (cd) model
centroids_cd, appearances = self.decompose_kmeans(all_kmeans, structure_ids, n_features,
method=self.cluster["method"], metric=self.cluster["metric"],
threshold=self.cluster["threshold"], criterion=self.cluster["criterion"],
combine=self.cluster["combine"],
verbose=self.verbose)
N_cd = len(centroids_cd)
self.appearances = appearances.copy()
# re-fit models
kmeans_cd = []
_X = np.vstack(X)
if self.verbose:
print("number of resulting models: ",N_cd)
print("number of components in total: ",sum(c.shape[0] for c in centroids_cd))
for i in range(N_cd):
self.fitted_kmeans[i] = kmeans_wrapper(centroids_=centroids_cd[i])
self.centroids_[i] = centroids_cd[i]
else:
raise ValueError("Boink!")
@staticmethod
def decompose_kmeans(kmeans, structure_ids, n_features, method="average",
metric="euclidean", threshold=1e-6, criterion="distance",
combine="mean", verbose=False):
"""Decomposes K-Means models.
Parameters
----------
kmeans : list of KMeans instances
structure_ids : list of str
Names for the individual structures.
n_features : int
Number of features.
method : str, optional, default "average"
Method to use in scipy.cluster.hierarchy.linkage
metric: str, optional, default, "euclidean"
Metric to use in scipy.cluster.hierarchy.linkage.
threshold : float, optional, default 1e-6
Threshold to use in scipy.cluster.hierarchy.fcluster. The
smaller the value the more clusters will be found.
criterion : str, optional, default "distance"
Criterion to use in scipy.cluster.hierarchy.fcluster.
combine : str, optional, default "mean"
Specifies how Gaussians found to belong to the same cluster are to be
combined. Currently "mean" is the only recognized option.
Returns
-------
mus_cd : list of np.ndarrays of int
Each np.ndarray represents the collection of clusters
for a maximally decomposed set of centroids.
appearances : list of lists of str
Same order as mus_cd, indicating the origin of the collection
of clusters.
Example: Assume mus_cd = [np.array([0,1]), np.array([2])] and
appearances = [["structure0"], ["structure1", "structure2"]].
This means that structure1 and structure2 shared the same centroid
number 2 and structure0 was the only one for which centroids number
0 and 1 were found. For crystals that could mean that structure1 and
structure2 were really two files containing the same type of crystal.
"""
model_reference = {}
N = 0
all_covs = []
all_mus = []
for i, sid in enumerate(structure_ids):
model_reference[sid] = np.arange(N,N+kmeans[i].cluster_centers_.shape[0])
N += kmeans[i].cluster_centers_.shape[0]
mus = np.array([m for m in kmeans[i].cluster_centers_])
all_mus.append(mus)
all_mus = np.vstack(all_mus)
# find approximately unique components
p = all_mus
Z = hierarchy.linkage(p, method=method, metric=metric)
T = hierarchy.fcluster(Z, threshold, criterion=criterion) -1
# relabel clusters to keep original parameter ordering
T_map, _c = {}, 0
for _t in T:
if not _t in T_map:
T_map[_t] = _c
_c += 1
T = np.array([T_map[_t] for _t in T])
T_set = np.sort(np.unique(T))
# combine parameters and thus compress models
all_mus_clustered = []
for t in T_set:
if combine == "mean":
_mu = all_mus[T==t,:].mean(axis=0)
else:
raise ValueError("'combine' ({}) not understood!".format(combine))
all_mus_clustered.append(_mu)
all_mus_clustered = np.array(all_mus_clustered)
compressed_model_reference = {sid:np.sort(np.unique(T[vals])) for sid, vals in model_reference.items()}
if verbose:
print("model_reference:")
for sid in sorted(model_reference):
print(" initial ",model_reference[sid])
print(" compressed ",compressed_model_reference[sid])
# decompose by comparison
compressed_decomposed_models = get_decomposed_models(compressed_model_reference, verbose=verbose)
if verbose:
print("compressed_decomposed_models ",compressed_decomposed_models)
print("all_mus_clustered ",all_mus_clustered.shape)
# compressed and decomposed (cd) parameters
mus_cd = [all_mus_clustered[m,:] for m in compressed_decomposed_models]
# model origins
if verbose:
print("\nMaximally decomposed models:")
print("Components -> component appearances")
appearances = []
for _decomposed_model in compressed_decomposed_models:
_appearances = [_label for _label,_vals in compressed_model_reference.items()\
if any([_val in _decomposed_model for _val in _vals])]
if verbose:
print("%s -> %s"%(str(_decomposed_model), ", ".join(sorted(_appearances))))
appearances.append(_appearances)
return mus_cd, appearances
def predict(self, X, show=False, show_kwargs={}):
"""Predicts.
"""
isarray = isinstance(X,np.ndarray)
islist = isinstance(X,list) and all([isinstance(x,np.ndarray) for x in X])
if isarray:
p = np.array([self.fitted_kmeans[_k](X) for _k in sorted(self.fitted_kmeans)]).T
y = np.argmin(p,axis=1)
elif islist:
p = [np.array([self.fitted_kmeans[_k](x) for _k in sorted(self.fitted_kmeans)]).T for x in X]
y = [np.argmin(_p,axis=1) for _p in p]
else:
raise ValueError("X needs to be an array of a list of arrays!")
if show:
self.show(X,y, **show_kwargs)
return y
def show(self, X, y, axes=[0,1], title=None, xlim=(0,1), ylim=(0,1),
xlabel=None, ylabel=None, labelfs=16, tickfs=14, legendfs=12,
titlefs=18, data_labels=None, cmap=plt.cm.jet, figsize=(10,5)):
"""Plots."""
isarray = isinstance(X,np.ndarray)
islist = isinstance(X,list) and all([isinstance(x,np.ndarray) for x in X])
if islist:
_X = np.vstack(X)
else:
_X = np.copy(X)
uy = np.unique(y) if isarray else np.unique(np.hstack(y))
_y = np.copy(y) if isarray else np.hstack(y)
norm = mpl.colors.Normalize(vmin=uy.min(), vmax=uy.max())
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(121) if islist else fig.add_subplot(111)
ax.set_aspect("equal")
hs = [None for _uy in uy]
for i,_uy in enumerate(uy):
idx = np.where(_y==_uy)[0]
ax.scatter(_X[idx,axes[0]], _X[idx,axes[1]], label="class %i"%_uy,
alpha=.5, color=cmap(norm(_uy*np.ones(len(idx)))))
ax.scatter(self.centroids_[i][:,axes[0]], self.centroids_[i][:,axes[1]],
marker="+", edgecolor="r", color=cmap(norm(_uy*np.ones(len(self.centroids_[i])))),
)
if xlabel is None:
ax.set_xlabel("Feature {}".format(axes[0]), fontsize=labelfs)
else:
ax.set_xlabel(xlabel, fontsize=labelfs)
if ylabel is None:
ax.set_ylabel("Feature {}".format(axes[1]), fontsize=labelfs)
else:
ax.set_ylabel(ylabel, fontsize=labelfs)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_title("Inferred classes",fontsize=labelfs)
ax.tick_params(labelsize=tickfs)
ax.legend(loc=0, fontsize=legendfs)
if islist:
ax2 = fig.add_subplot(122)
ax2.set_aspect("equal")
norm = mpl.colors.Normalize(vmin=0, vmax=len(X))
hs = [None for _X in X]
for i,_X in enumerate(X):
if data_labels is None:
label = "trajectory #%i"%i
else:
label = data_labels[i]
ax2.scatter(_X[:,axes[0]], _X[:,axes[1]], label=label, c=cmap(norm(np.ones(_X.shape[0])*i)),
alpha=.5)
if xlabel is None:
ax2.set_xlabel("Feature {}".format(axes[0]), fontsize=labelfs)
else:
ax2.set_xlabel(xlabel, fontsize=labelfs)
if ylabel is None:
ax2.set_ylabel("Feature {}".format(axes[1]), fontsize=labelfs)
else:
ax2.set_ylabel(ylabel, fontsize=labelfs)
ax2.tick_params(labelsize=tickfs)
ax2.set_xlim(xlim)
ax2.set_ylim(ylim)
ax2.set_title("Trajectories", fontsize=labelfs)
ax2.legend(loc=0, fontsize=legendfs)
if not title is None:
plt.suptitle(title, fontsize=titlefs)
plt.tight_layout()
plt.show()
def explain_inference(self, abbreviate=True):
assert not any([self.appearances is None, len(self.centroids_)==0]), "Class instance does not appear to have self.appearances and or self.centroids_ set."
print("\nMaximally decomposed models:")
print("Components -> component appearances")
for i, _appearances in enumerate(self.appearances):
_decomposed_model = self.centroids_[i].shape if abbreviate else self.centroids_[i]
print("%s -> %s"%(", ".join(sorted(_appearances)),_decomposed_model))
def set_labels(self,labels):
"""Sets labels for the label_map.
Associates model identifier (integer value of self.centroids_)
with a string, e.g. name for a type of crystal.
Parameters
----------
labels : dict
Example: {"fcc":0, "fccVac":[1,2]}
Note: 'labels' can also just set strings for a subset of model identifiers
since not always all names which should be assigned are immediately obvious.
The remaining labels are set to string versions of integer values.
"""
assert isinstance(labels,dict)
assert all([isinstance(_k,str) for _k in labels.keys()])
assert all([isinstance(_v,(int,list,tuple)) for _v in labels.values()])
if self.label_map is None:
self.label_map = dict()
values = set()
for _k, _v in labels.items():
self.label_map[_k] = _v
if isinstance(_v,int):
values.add(_v)
else:
for _v2 in _v:
values.add(_v2)
for _k in set(self.centroids_.keys()).difference(values):
self.label_map[str(_k)] = _k
def assign_chemical_disorder_labels(atoms_dict, t_l_flat, Phi, mapper, species_flat, mapper_key=3,
count_params={"elements":["Al","Ni"]},
dis_elements=set(["Al","Ni"]), dis_label="gamma"):
idx_dis_t = np.array([i for i,v in enumerate(t_l_flat) \
if 'gamma' in v and not 'prime' in v],dtype=int)
idx_dis_elements = np.array([i for i,v in enumerate(count_params["elements"]) if v in dis_elements])
sorted_dis_elements = np.array(sorted(list(dis_elements)))
idx_Phi = np.where((Phi[:,mapper[mapper_key][idx_dis_elements]]>0).all(axis=1))[0]
idx_gamma = np.intersect1d(idx_dis_t,idx_Phi)
idx_rest = np.setdiff1d(idx_dis_t, idx_gamma) # also labeled 'gamma' but disobey element condition
t_l_flat[idx_gamma] = dis_label
for ix in idx_rest:
_phi = Phi[ix, mapper[mapper_key][idx_dis_elements]]
_phi /= _phi.sum()
_ix = np.argsort(_phi)[-1]
if np.isclose(_phi[_ix],1):
t_l_flat[ix] = sorted_dis_elements[_ix]
return t_l_flat
def batcher(X, t, n=1):
for i in range(1,len(X),n):
yield X[i:i+n,:], t[i:i+n]
def tf_softmax_dnn(X_in, t_in=None, mode="fit", path_ckpt="/tmp/dnn-softmax_model.ckpt",
learning_rate=.01, l1_scale=0.001, n_epochs=5, batch_size=75, n_print=5,
verbose=False, n_hidden1=300, n_hidden2=100, n_hidden3=75, n_hidden4=50,
n_hidden5=25, n_outputs=10, **kwargs):
tf.reset_default_graph()
n_samples, n_inputs = X_in.shape
# input and output nodes
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
t = tf.placeholder(tf.int64, shape=(None,) , name="t")
# weights initialization (to prevent vanishing gradient) - Xavier (logistic activation) initialization is the default
w_init = tf.contrib.layers.variance_scaling_initializer() # He initialization (ReLU activation)
# activation
activation = tf.nn.relu
# L1 regularization
my_dense_layer = partial(tf.layers.dense, activation=activation,
kernel_regularizer=tf.contrib.layers.l1_regularizer(l1_scale),
kernel_initializer=w_init)
# pre-packaged neural layer version
with tf.name_scope("dnn"):
# no batch normalization
hidden1 = tf.layers.dense(X, n_hidden1, name="hidden1", activation=activation,
kernel_initializer=w_init)
hidden2 = tf.layers.dense(hidden1, n_hidden2, name="hidden2", activation=activation,
kernel_initializer=w_init)
hidden3 = tf.layers.dense(hidden2, n_hidden3, name="hidden3", activation=activation,
kernel_initializer=w_init)
hidden4 = tf.layers.dense(hidden3, n_hidden4, name="hidden4", activation=activation,
kernel_initializer=w_init)
hidden5 = tf.layers.dense(hidden4, n_hidden5, name="hidden5", activation=activation,
kernel_initializer=w_init)
logits = tf.layers.dense(hidden4, n_outputs, name="outputs")
# loss
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=t, logits=logits)
# without regularization
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("train"):
optimizer = tf.train.AdamOptimizer(learning_rate)
# constant learning rate
update = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, t, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
# logging node
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
logdir = "tf_softmax/dnn-%s/" % (now,)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
if mode == "fit":
t0 = time()
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, t_batch in batcher(X_in, t_in, n=batch_size):
sess.run(update, feed_dict={X: X_batch, t: t_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, t:t_batch})
if verbose and (epoch%n_print==0 or epoch+1==n_epochs):
print("Epoch", epoch, "Train accuracy", acc_train)
save_path = saver.save(sess, path_ckpt)
if verbose:
print("training took %.3f s" % (time()-t0))
elif mode == "predict":
with tf.Session() as sess:
saver.restore(sess, path_ckpt)
return logits.eval(feed_dict={X:X_in})
else:
raise NotImplementedError
def tf_softmax_dnn_cnn(X_in, t_in=None, mode="fit", path_ckpt="/tmp/dnn-softmax_model.ckpt",
learning_rate=.01, l1_scale=0.001, n_epochs=5, batch_size=75, n_print=5,
verbose=False, n_hidden=300, n_outputs=10, height=28, width=28, channels=1, **kwargs):
tf.reset_default_graph()
n_samples, n_inputs = X_in.shape
# input and output nodes
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
X_reshaped = tf.reshape(X, shape=[-1, height, width, channels])
t = tf.placeholder(tf.int64, shape=(None,) , name="t")
# weights initialization (to prevent vanishing gradient) - Xavier (logistic activation) initialization is the default
w_init = tf.contrib.layers.variance_scaling_initializer() # He initialization (ReLU activation)
# activation
activation = tf.nn.relu
# L1 regularization
my_dense_layer = partial(tf.layers.dense, activation=activation,
kernel_regularizer=tf.contrib.layers.l1_regularizer(l1_scale),
kernel_initializer=w_init)
with tf.name_scope("cnn"):
conv = tf.layers.conv2d(X_reshaped, filters=32, kernel_size=3, strides=[1,1], padding="SAME", name="conv") # strides=[1,2,2,1]
conv1 = tf.layers.conv2d(conv, filters=64, kernel_size=3, strides=[2,2], padding="SAME", name="conv1")
pool = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID")
pool_flat = tf.reshape(pool, shape=[-1, pool.get_shape()[1:4].num_elements()])
assert np.prod(pool.shape[1:]) == pool_flat.shape[1], "Shape mismatch pool (%s) != pool_flat (%s)" % (pool.shape, pool_flat.shape)
hidden = tf.layers.dense(pool_flat, n_hidden, activation=tf.nn.relu, name="hidden")
logits = tf.layers.dense(hidden, n_outputs, name="outputs")
# loss
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=t, logits=logits)
# without regularization
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("train"):
optimizer = tf.train.AdamOptimizer(learning_rate)
# constant learning rate
update = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, t, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
# logging node
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
logdir = "tf_softmax/dnn-%s/" % (now,)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
if mode == "fit":
t0 = time()
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, t_batch in batcher(X_in, t_in, n=batch_size):
sess.run(update, feed_dict={X: X_batch, t: t_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, t:t_batch})
if verbose and (epoch%n_print==0 or epoch+1==n_epochs):
print("Epoch", epoch, "Train accuracy", acc_train)
save_path = saver.save(sess, path_ckpt)
if verbose:
print("training took %.3f s" % (time()-t0))
elif mode == "predict":
with tf.Session() as sess:
saver.restore(sess, path_ckpt)
return logits.eval(feed_dict={X:X_in})
else:
raise NotImplementedError
def tf_softmax_dnn_rnn(X_in, t_in=None, mode="fit", path_ckpt="/tmp/dnn-softmax_model.ckpt",
learning_rate=.01, l1_scale=0.001, n_epochs=5, batch_size=75, n_print=5,
verbose=False, n_neurons=300, n_outputs=10, n_steps=28, n_inputs=28, **kwargs):
tf.reset_default_graph()
n_samples, _n_inputs = X_in.shape
# input and output nodes
assert n_inputs*n_steps == _n_inputs
X_in = X_in.reshape((-1, n_steps, n_inputs))
X = tf.placeholder(tf.float32, shape=(None, n_steps, n_inputs), name="X")
#X_reshaped = tf.reshape(X, shape=[-1, n_steps, n_inputs,])
#X = tf.placeholder(tf.float32, [None, n_steps, n_inputs], name="X")
t = tf.placeholder(tf.int64, shape=(None,) , name="t")
with tf.name_scope("rnn"):
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
outputs, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32)
logits = tf.layers.dense(states, n_outputs, name="outputs")
# loss
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=t, logits=logits)
# without regularization
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("train"):
optimizer = tf.train.AdamOptimizer(learning_rate)
# constant learning rate
update = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, t, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
# logging node
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
logdir = "tf_softmax/dnn-%s/" % (now,)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
if mode == "fit":
t0 = time()
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, t_batch in batcher(X_in, t_in, n=batch_size):
sess.run(update, feed_dict={X: X_batch, t: t_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, t:t_batch})
if verbose and (epoch%n_print==0 or epoch+1==n_epochs):
print("Epoch", epoch, "Train accuracy", acc_train)
save_path = saver.save(sess, path_ckpt)
if verbose:
print("training took %.3f s" % (time()-t0))
elif mode == "predict":
with tf.Session() as sess:
saver.restore(sess, path_ckpt)
return logits.eval(feed_dict={X:X_in})
else:
raise NotImplementedError
class DNNSoftmaxClassifier(sklearn.base.BaseEstimator, sklearn.base.ClassifierMixin):
dnn_params = dict(n_hidden1 = 300,
n_hidden2 = 100,
n_hidden3 = 75,
n_hidden4 = 50,
n_hidden5 = 25,
learning_rate = .01,
path_ckpt = "/tmp/dnn-softmax_model.ckpt",
l1_scale = 0.001,
n_epochs = 5,
batch_size = 75,
n_print = 5,
verbose = False,)
dnn_type = "simple"
dnn_impl = {"simple":tf_softmax_dnn,
"cnn":tf_softmax_dnn_cnn,
"rnn":tf_softmax_dnn_rnn}
def __init__(self, dnn_type=None, dnn_params=None):
if (not dnn_type is None) and (dnn_type in self.dnn_impl):
self.dnn_type = dnn_type
if (not dnn_params is None) and isinstance(dnn_params,dict):
self.dnn_params = dnn_params
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self : object
Returns self.
"""
X, y = sklearn.utils.validation.check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
sklearn.utils.multiclass.check_classification_targets(y)
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
classes_ = self.classes_
self.dnn_params["n_outputs"] = n_classes
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
self.dnn_impl[self.dnn_type](X, t_in=y, mode="fit", **self.dnn_params)
return self
def decision_function(self, X):
return self.dnn_impl[self.dnn_type](X, mode="predict", **self.dnn_params)
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
return sklearn.utils.extmath.softmax(self.decision_function(X), copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
def predict(self, X):
"""Predict class labels for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples]
Predicted class label per sample.
"""
scores = self.decision_function(X)
if len(scores.shape) == 1:
indices = (scores > 0).astype(np.int)
else:
indices = scores.argmax(axis=1)
return self.classes_[indices]
|
[
"11818904+eschmidt42@users.noreply.github.com"
] |
11818904+eschmidt42@users.noreply.github.com
|
7775a4ca239d36b16639f56bbc191d07f9d8f049
|
d93fe0484fc3b32c8fd9b33cc66cfd636a148ec4
|
/AtCoder/AGC-C/037probC.py
|
e22e44919bb25063bbc000a6d8946f829ba4983e
|
[] |
no_license
|
wattaihei/ProgrammingContest
|
0d34f42f60fa6693e04c933c978527ffaddceda7
|
c26de8d42790651aaee56df0956e0b206d1cceb4
|
refs/heads/master
| 2023-04-22T19:43:43.394907
| 2021-05-02T13:05:21
| 2021-05-02T13:05:21
| 264,400,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
N = int(input())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
R = [B[i]-A[i] for i in range(N)]
D = []
for i in range(N-1):
D.append(A[i-1]+A[i+1])
D.append(A[N-2]+A[0])
for i, r in enumerate(R):
|
[
"wattaihei.rapyuta@gmail.com"
] |
wattaihei.rapyuta@gmail.com
|
7689cbd6ca525cbb5c93bcf74ecb1f44600ec7cb
|
b3f5913b9b466ffbcabc0feaef291fa0346caf74
|
/src/Sheet_Squeeze.py
|
fbf721c518336ea5415f60814a18d03619ac197d
|
[] |
no_license
|
boigman/Sheet_Squeeze
|
ce600e88d057aa00d69a7ad46a15be1e2d35b652
|
c5cf73e077b441c5f90f670a2808a817b40c89d5
|
refs/heads/master
| 2022-04-28T16:21:59.362404
| 2020-05-06T14:03:55
| 2020-05-06T14:03:55
| 261,755,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,397
|
py
|
from PIL import Image
import png
#import ntpath
#import os
import sys
import time
white_row_limit = 5
vbar_row_limit = 10
infile = sys.argv[1]
print("Input file: "+infile.replace("\\\\","\\"))
outfile=infile.replace(".png","_sqz.png")
r=png.Reader(filename=infile)
xwidth, yheight, pixels, meta = r.asDirect()
#print("pixels type="+str(type(pixels)))
print("Converting pixels to list()...")
tpixels=list(pixels)
opixels=[]
oidx=0
white_row_count = 0
vbar_row_count = 0
min_row = 99999
max_row = -99999
min_vbar_row = 99999
max_vbar_row = -99999
line_break = 99999
print("Processing...")
#w = png.Writer(width=len(tpixels[0]), height=len(tpixels), greyscale=False, interlace=0, bitdepth=8)
#f=open(outfile+'x.png', 'wb')
#w.write(f, tpixels)
#f.close()
for xh in range(len(tpixels)):
#for xh in range(350):
white_count = 0
black_lines = []
white_lines = []
bline_len = 0
wline_len = 0
min_dark_col = 99999
max_dark_col = -99999
left_bar_count = 0
if xh > 1071 and xh < 1100:
xxx = 0
if xh == 1511:
xxx = 0
for xw in range(0,xwidth*3,3):
try:
# if tpixels[xh][xw]==255 and tpixels[xh][xw+1]==255 and tpixels[xh][xw+2]==255:
if tpixels[xh][xw]>250 and tpixels[xh][xw+1]>250 and tpixels[xh][xw+2]>250:
#jj = 1
white_count += 1
tpixels[xh][xw]=255
tpixels[xh][xw+1]=255
tpixels[xh][xw+2]=255
else:
min_dark_col = min(min_dark_col, xw)
max_dark_col = max(max_dark_col, xw)
# if (max_dark_col - min_dark_col)/3 > 5 or xw/3 > 50:
# break
if (tpixels[xh][xw]==255 and tpixels[xh][xw+1]==255 and tpixels[xh][xw+2]==255) or (tpixels[xh][xw]==0 and tpixels[xh][xw+1]==0 and tpixels[xh][xw+2]==0):
if tpixels[xh][xw]==255 and tpixels[xh][xw+1]==255 and tpixels[xh][xw+2]==255:
if bline_len>0:
black_lines.append(bline_len)
bline_len = 0
wline_len +=1
else:
if wline_len>0:
white_lines.append(wline_len)
wline_len = 0
bline_len +=1
else:
black_lines = []
white_lines = []
except:
print("Invalid Index: wh="+str(xh)+", xh="+str(xw))
# Detect dashed line (line break) - Lots of pure white and black segments approximately equal in number
if (len(black_lines)>50 and len(white_lines)>50 and abs(len(black_lines) - len(white_lines))<4
and black_lines[0]<20 and white_lines[0]<20):
print("Line break at: "+str(xh))
line_break = xh
white_count=xwidth
if white_count==xwidth:
# print('WhiteRow: '+str(xh))
white_row_count += 1
vbar_row_count = 0
if max_vbar_row > 0 and min_vbar_row < max_vbar_row:
print("VBar Rows "+str(min_vbar_row)+" - "+str(max_vbar_row)+ " squeezed out")
min_vbar_row = 99999
max_vbar_row = -99999
if white_row_count < white_row_limit:
opixels += [tuple(tpixels[xh])]
else:
min_row=min(min_row, xh)
max_row=max(max_row, xh)
# print("Row "+str(xh)+" over limit")
elif xwidth - white_count < 10 and max_dark_col/3 < 50 and (max_dark_col - min_dark_col) >= 0 and (max_dark_col - min_dark_col)/3 < 5 :
vbar_row_count += 1
white_row_count = 0
if max_row > 0 and min_row < max_row:
print("White Rows "+str(min_row)+" - "+str(max_row)+ " squeezed out")
min_row = 99999
max_row = -99999
if vbar_row_count < vbar_row_limit:
opixels += [tuple(tpixels[xh])]
else:
min_vbar_row=min(min_vbar_row, xh)
max_vbar_row=max(max_vbar_row, xh)
else:
white_row_count = 0
vbar_row_count = 0
if max_row > 0 and min_row < max_row:
print("White Rows "+str(min_row)+" - "+str(max_row)+ " squeezed out")
min_row = 99999
max_row = -99999
elif max_vbar_row > 0 and min_vbar_row < max_vbar_row:
print("VBar Rows "+str(min_vbar_row)+" - "+str(max_vbar_row)+ " squeezed out")
min_vbar_row = 99999
max_vbar_row = -99999
if line_break < xh: #Mark as line break line with red dash
for xw in range(0,10*3,3):
tpixels[xh][xw]=255
tpixels[xh][xw+1]=0
tpixels[xh][xw+2]=0
line_break = 99999
opixels += [tuple(tpixels[xh])]
if max_row > 0 and min_row < max_row:
print("White Rows "+str(min_row)+" - "+str(max_row)+ " squeezed out")
min_row = 99999
max_row = -99999
elif max_vbar_row > 0 and min_vbar_row < max_vbar_row:
print("VBar Rows "+str(min_vbar_row)+" - "+str(max_vbar_row)+ " squeezed out")
min_vbar_row = 99999
max_vbar_row = -99999
w = png.Writer(width=xwidth, height=len(opixels), greyscale=False, interlace=0, bitdepth=8)
f=open(outfile, 'wb')
w.write(f, opixels)
f.close()
print(str(len(opixels))+" lines written to: "+outfile.replace("\\\\","\\"))
print(str(len(tpixels)-len(opixels))+" lines squeezed out")
print("Finished")
time.sleep(5)
|
[
"Dave.Stauffer@spireenergy.com"
] |
Dave.Stauffer@spireenergy.com
|
80645dfd46c51e1594ab5bcf0406d091b133d28a
|
6023b767f104d51288fed3e872777f4b974f083d
|
/6603.py
|
0afb3699231152e97203bb622340a457cd869f0e
|
[] |
no_license
|
andyjung2104/BOJ-source-codes
|
daff7b804469473b898fb58500a1c8cff542b52e
|
a97b5e2b4bfb6d26fac67bb94d3d259836d875d3
|
refs/heads/main
| 2023-07-19T02:30:13.806978
| 2021-09-13T01:03:06
| 2021-09-13T01:03:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
from itertools import combinations
import sys
while True:
a=list(map(int,sys.stdin.readline().split()))
if a[0]==0:break
n=a.pop(0)
y=combinations(a,6)
for i in y:print(*i)
print()
|
[
"noreply@github.com"
] |
andyjung2104.noreply@github.com
|
f531bdaa3135afa760dfda6ab0d89945b68fe682
|
139d4c998e7e06fd160792a485bfc20190c80df1
|
/aliceroughdraft.py
|
c76922b1c92b535ed9031e1b7c5b3c4d87c1f400
|
[] |
no_license
|
MatthewSpecht/memory-game
|
afcb0d6e8cdf973514834e334b67c445429aee03
|
dbf2f714953af499c1cc42fcbadf7712dd67fc18
|
refs/heads/main
| 2023-03-13T03:38:35.590864
| 2021-03-09T17:08:54
| 2021-03-09T17:08:54
| 334,207,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,361
|
py
|
import tkinter as tk
from tkinter import messagebox
import random
from tkinter import PhotoImage
root = tk.Tk()
root.title("Memory Game")
# root.geometery("550x550")
# matches = [PhotoImage(file = "fruit-pictures/apple.png"),PhotoImage(file = "fruit-pictures/orange.png"),PhotoImage(file = "fruit-pictures/banana.png"),PhotoImage(file = "fruit-pictures/grape.png"), PhotoImage(file = "fruit-pictures/peach.png"),PhotoImage(file = "fruit-pictures/pear.png"),PhotoImage(file = "fruit-pictures/watermelon.png"),PhotoImage(file = "fruit-pictures/strawberry.png")]*2
#matches = [PhotoImage(file = "fruit-pictures/banana.gif")]*16
# matches = [1,2,3,4,5,6,7,8]*2
random.shuffle(matches)
my_frame = tk.Frame(root)
my_frame.pack(pady=10)
count = 0
answer_list = []
answer_dict ={}
def button_click(b, i):
global count, answer_list, answer_dict
if b["text"] == '?' and count < 2:
b["text"] = matches[i]
# b.photo = matches[i] #keep a refrence to it
# add num to answer list
answer_list.append(i)
# add button & num to answer dict
answer_dict[b] = matches[i]
#turn +1
count+=1
#print answer_list ##keeps track of tile
#print answer_dict ##keeps track of num on tile
if len(answer_list) == 2:
if matches[answer_list[0]] == matches[answer_list[1]]:
my_label.config(text="MATCH!")
for key in answer_dict:
key["state"] = "disabled"
count = 0
answer_list = []
answer_dict = {}
else:
my_label.config(text="NO!")
count = 0
answer_list = []
messagebox.showinfo("Incorrect!","Incorrect")
for key in answer_dict:
key["text"] = '?'
key.configure
answer_dict = {}
def reset():
#global matches, winner
matches = [1,2,3,4,5,6,7,8]*2
random.shuffle(matches)
my_label.config(text="")
buttonlist = [b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11,b12,b13,b14,b15]
for button in buttonlist:
button.config(text='?', state="normal")
restart= tk.Button(my_frame, text='Restart', width= 6, height = 2, command = reset).grid(row=4, column=0, columnspan = 1, sticky = tk.W)
# x = PhotoImage(file = "fruit-pictures/banana.png")
b0= tk.Button(my_frame, text = '?', font=("Helvetica", 10), bg = 'Red', command = lambda: button_click(b0, 0))
b1= tk.Button(my_frame, text='?', font=("Helvetica", 10), bg = 'Red', command = lambda: button_click(b1, 1))
b2= tk.Button(my_frame, text='?', font=("Helvetica", 10), bg = 'Red', command = lambda: button_click(b2, 2))
b3= tk.Button(my_frame, text='?', font=("Helvetica", 10), bg = 'Red', command = lambda: button_click(b3, 3))
b4= tk.Button(my_frame, text='?', font=("Helvetica", 10), bg = 'Red', command = lambda: button_click(b4, 4))
b5= tk.Button(my_frame, text='?', font=("Helvetica", 10), bg = 'Red', command = lambda: button_click(b5, 5))
b6= tk.Button(my_frame, text='?', font=("Helvetica", 10), bg = 'Red', command = lambda: button_click(b6, 6))
b7= tk.Button(my_frame, text='?', font=("Helvetica", 10), bg = 'Red', command = lambda: button_click(b7, 7))
b8= tk.Button(my_frame, text='?', font=("Helvetica", 10), bg = 'Red', command = lambda: button_click(b8, 8))
b9= tk.Button(my_frame, text='?', font=("Helvetica", 10), bg = 'Red', command = lambda: button_click(b9, 9))
b10= tk.Button(my_frame, text='?', font=("Helvetica", 10), bg = 'Red', command = lambda: button_click(b10, 10))
b11= tk.Button(my_frame, text='?', font=("Helvetica", 10), bg = 'Red', command = lambda: button_click(b11, 11))
b12= tk.Button(my_frame, text='?', font=("Helvetica", 10), bg = 'Red', command = lambda: button_click(b12, 12))
b13= tk.Button(my_frame, text='?', font=("Helvetica", 10), bg = 'Red', command = lambda: button_click(b13, 13))
b14= tk.Button(my_frame, text='?', font=("Helvetica", 10), bg = 'Red', command = lambda: button_click(b14, 14))
b15= tk.Button(my_frame, text='?', font=("Helvetica", 10), bg = 'Red', command = lambda: button_click(b15, 15))
b0.grid(row=0, column=0, columnspan = 1, sticky = tk.W)
b1.grid(row=0, column=1, columnspan = 1, sticky = tk.W)
b2.grid(row=0, column=2, columnspan = 1, sticky = tk.W)
b3.grid(row=0, column=3, columnspan = 1, sticky = tk.W)
b4.grid(row=1, column=0, columnspan = 1, sticky = tk.W)
b5.grid(row=1, column=1, columnspan = 1, sticky = tk.W)
b6.grid(row=1, column=2, columnspan = 1, sticky = tk.W)
b7.grid(row=1, column=3, columnspan = 1, sticky = tk.W)
b8.grid(row=2, column=0, columnspan = 1, sticky = tk.W)
b9.grid(row=2, column=1, columnspan = 1, sticky = tk.W)
b10.grid(row=2, column=2, columnspan = 1, sticky = tk.W)
b11.grid(row=2, column=3, columnspan = 1, sticky = tk.W)
b12.grid(row=3, column=0, columnspan = 1, sticky = tk.W)
b13.grid(row=3, column=1, columnspan = 1, sticky = tk.W)
b14.grid(row=3, column=2, columnspan = 1, sticky = tk.W)
b15.grid(row=3, column=3, columnspan = 1, sticky = tk.W)
my_label = tk.Label(root, text = ' ')
my_label.pack(pady = 20)
root.mainloop()
|
[
"720azpeng@gmail.com"
] |
720azpeng@gmail.com
|
fe9e28083d4916e855a1cf96981aa7e1a10e9203
|
03c14cd6730d9c90f9ac79c516e0e0f2783a2662
|
/trap_rain_water_leetcode.py
|
528289d5d41e686bbbc81c822a761f31645265ef
|
[] |
no_license
|
ismailej/General-Programming
|
4458d70a31565331d2773207edc4df0ee69a753f
|
3c5822617a4b19422be9def02d657e09cf968524
|
refs/heads/master
| 2021-01-10T10:19:47.717434
| 2016-05-01T13:41:07
| 2016-05-01T13:41:07
| 53,746,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,465
|
py
|
class Solution(object):
def calculate(self, height, start, count, ref):
print('Reached calculate with start, end', start, count)
store = 0
#ref = height[start]
#print(ref)
i = start + 1
end = count - 1
while i <= end:
store += ref - height[i]
i += 1
#print(store)
#print('Returned value - ', store)
return store
def calculate_max_array(self, height):
i = len(height) - 1
elem_max = -1
max_array = [-1]*len(height)
while i >= 0:
if elem_max < height[i]:
max_array[i] = elem_max
elem_max = height[i]
else:
max_array[i] = elem_max
i -= 1
#print(max_array)
return max_array
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
if height == []:
return 0
start = 0
count = 1
length = len(height)
total = 0
max_array = self.calculate_max_array(height)
ref = min(height[start], max_array[start])
while(count < length):
if ref <= height[count]:
total += self.calculate(height, start, count, ref)
start = count
ref = min(height[start], max_array[start])
count += 1
return total
|
[
"ejismail@gmail.com"
] |
ejismail@gmail.com
|
83f27e5da39ecc5798b8676ef45a90581bb4630b
|
2c02ad2867e86aadc14fa63fb6f11b380b7a38b8
|
/src/draw_q.py
|
5182519501edb76b5c8ac5cee6e78ed6a57cf01c
|
[] |
no_license
|
xiecailang/leetcodes
|
8ac61846d1b9c1b2f9d1638d8dda7f04fc47a9b7
|
49f00cd47344c3861ae8a0b25db9f0a0a0a2ae3e
|
refs/heads/master
| 2020-03-27T19:21:18.834973
| 2018-10-30T03:33:57
| 2018-10-30T03:33:57
| 146,983,326
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,797
|
py
|
#画家小Q
n, m = 4, 4
str_ = []
str_.append(list('YXXB'))
str_.append(list('XYGX'))
str_.append(list('XBYY'))
str_.append(list('BXXY'))
cnt = 0
def findY(i, j):
if i >=0 and i < n and j >=0 and j < m and (str_[i][j] == 'Y' or str_[i][j] == 'G'):
if str_[i][j] == 'G':
str_[i][j] = 'B'
else:
str_[i][j] = 'X'
findY(i+1, j+1)
return
def findB(i, j):
if i >=0 and i < n and j >=0 and j < m and (str_[i][j] == 'B' or str_[i][j] == 'G'):
if str_[i][j] == 'G':
str_[i][j] = 'Y'
else:
str_[i][j] = 'X'
findB(i+1, j-1)
return
for i in range(n):
for j in range(m):
if str_[i][j] == 'Y':
findY(i, j)
cnt += 1
elif str_[i][j] == 'B':
findB(i, j)
cnt += 1
elif str_[i][j] == 'G':
findY(i, j)
cnt += 1
str_[i][j] = 'B'
findB(i, j)
cnt += 1
print(cnt)
cnt = 0
mark = [([0] * n) for i in range(m)]
for i in range(n):
for j in range(m):
if mark[i][j] == 2 or (mark[i][j] == 1 and str_[i][j] != 'G'):
continue
if str_[i][j] == 'Y':
p, q = i+1, j+1
mark[i][j] += 1
cnt += 1
while p < n and q < m:
if not mark[p][q]:
if str_[p][q] == 'Y' or 'G':
mark[p][q] += 1
else:
break
p += 1
q += 1
elif str_[i][j] == 'B':
p, q = i + 1, j - 1
mark[i][j] += 1
cnt += 1
while p >= 0 and q >= 0:
if not mark[p][q]:
if str_[p][q] == 'B' or 'G':
mark[p][q] += 1
else:
break
p += 1
q -= 1
elif str_[i][j] == 'G':
p, q = i+1, j+1
if not mark[i][j]:
cnt += 2
elif mark[i][j] == 1:
cnt += 1
else:
continue
mark[i][j] += 1
while p < n and q < m:
if not mark[p][q]:
if str_[p][q] == 'Y' or 'G':
mark[p][q] += 1
else:
break
p += 1
q += 1
p, q = i + 1, j-1
while p >= 0 and q >= 0:
if not mark[p][q]:
if str_[p][q] == 'B' or 'G':
mark[p][q] += 1
else:
break
p += 1
q -= 1
else:
mark[i][j] = 1
print(cnt)
|
[
"cailangxie@sina.com"
] |
cailangxie@sina.com
|
d554dc494835296bc0a6175544783f5452d6a9e1
|
83ecabbeea8b17a3fd9b8142062f09c76198e232
|
/wso2_apim_publisherclient/models/api_object_2.py
|
703f56a19f1ec2597a91ddf4689042a03983c124
|
[] |
no_license
|
junetigerlee/python-wso2-apim-publisherclient
|
387f581bb48645b35f256159cce0031babd493f0
|
5e1cadeab4eb37ebc93e46b45d6d1f98f4fdfde9
|
refs/heads/master
| 2021-01-01T16:11:45.362270
| 2017-07-25T06:20:46
| 2017-07-25T06:20:46
| 97,783,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,626
|
py
|
# coding: utf-8
"""
WSO2 API Manager - Publisher API
This specifies a **RESTful API** for WSO2 **API Manager** - Publisher. Please see [full swagger definition](https://raw.githubusercontent.com/wso2/carbon-apimgt/v6.0.4/components/apimgt/org.wso2.carbon.apimgt.rest.api.publisher/src/main/resources/publisher-api.yaml) of the API which is written using [swagger 2.0](http://swagger.io/) specification.
OpenAPI spec version: 0.11.0
Contact: architecture@wso2.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class APIObject2(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'description': 'str',
'context': 'str',
'version': 'str',
'provider': 'str',
'api_definition': 'str',
'wsdl_uri': 'str',
'status': 'str',
'response_caching': 'str',
'cache_timeout': 'int',
'destination_stats_enabled': 'str',
'is_default_version': 'bool',
'type': 'str',
'transport': 'list[str]',
'tags': 'list[str]',
'tiers': 'list[str]',
'max_tps': 'ApisMaxTps',
'thumbnail_uri': 'str',
'visibility': 'str',
'visible_roles': 'list[str]',
'visible_tenants': 'list[str]',
'endpoint_config': 'str',
'endpoint_security': 'ApisEndpointSecurity',
'gateway_environments': 'str',
'sequences': 'list[Sequence1]',
'subscription_availability': 'str',
'subscription_available_tenants': 'list[str]',
'business_information': 'ApisBusinessInformation',
'cors_configuration': 'ApisCorsConfiguration'
}
attribute_map = {
'id': 'id',
'name': 'name',
'description': 'description',
'context': 'context',
'version': 'version',
'provider': 'provider',
'api_definition': 'apiDefinition',
'wsdl_uri': 'wsdlUri',
'status': 'status',
'response_caching': 'responseCaching',
'cache_timeout': 'cacheTimeout',
'destination_stats_enabled': 'destinationStatsEnabled',
'is_default_version': 'isDefaultVersion',
'type': 'type',
'transport': 'transport',
'tags': 'tags',
'tiers': 'tiers',
'max_tps': 'maxTps',
'thumbnail_uri': 'thumbnailUri',
'visibility': 'visibility',
'visible_roles': 'visibleRoles',
'visible_tenants': 'visibleTenants',
'endpoint_config': 'endpointConfig',
'endpoint_security': 'endpointSecurity',
'gateway_environments': 'gatewayEnvironments',
'sequences': 'sequences',
'subscription_availability': 'subscriptionAvailability',
'subscription_available_tenants': 'subscriptionAvailableTenants',
'business_information': 'businessInformation',
'cors_configuration': 'corsConfiguration'
}
def __init__(self, id=None, name=None, description=None, context=None, version=None, provider=None, api_definition=None, wsdl_uri=None, status=None, response_caching=None, cache_timeout=None, destination_stats_enabled=None, is_default_version=None, type='HTTP', transport=None, tags=None, tiers=None, max_tps=None, thumbnail_uri=None, visibility=None, visible_roles=None, visible_tenants=None, endpoint_config=None, endpoint_security=None, gateway_environments=None, sequences=None, subscription_availability=None, subscription_available_tenants=None, business_information=None, cors_configuration=None):
"""
APIObject2 - a model defined in Swagger
"""
self._id = None
self._name = None
self._description = None
self._context = None
self._version = None
self._provider = None
self._api_definition = None
self._wsdl_uri = None
self._status = None
self._response_caching = None
self._cache_timeout = None
self._destination_stats_enabled = None
self._is_default_version = None
self._type = None
self._transport = None
self._tags = None
self._tiers = None
self._max_tps = None
self._thumbnail_uri = None
self._visibility = None
self._visible_roles = None
self._visible_tenants = None
self._endpoint_config = None
self._endpoint_security = None
self._gateway_environments = None
self._sequences = None
self._subscription_availability = None
self._subscription_available_tenants = None
self._business_information = None
self._cors_configuration = None
if id is not None:
self.id = id
self.name = name
if description is not None:
self.description = description
self.context = context
self.version = version
if provider is not None:
self.provider = provider
if api_definition is not None:
self.api_definition = api_definition
if wsdl_uri is not None:
self.wsdl_uri = wsdl_uri
if status is not None:
self.status = status
if response_caching is not None:
self.response_caching = response_caching
if cache_timeout is not None:
self.cache_timeout = cache_timeout
if destination_stats_enabled is not None:
self.destination_stats_enabled = destination_stats_enabled
self.is_default_version = is_default_version
self.type = type
self.transport = transport
if tags is not None:
self.tags = tags
self.tiers = tiers
if max_tps is not None:
self.max_tps = max_tps
if thumbnail_uri is not None:
self.thumbnail_uri = thumbnail_uri
self.visibility = visibility
if visible_roles is not None:
self.visible_roles = visible_roles
if visible_tenants is not None:
self.visible_tenants = visible_tenants
self.endpoint_config = endpoint_config
if endpoint_security is not None:
self.endpoint_security = endpoint_security
if gateway_environments is not None:
self.gateway_environments = gateway_environments
if sequences is not None:
self.sequences = sequences
if subscription_availability is not None:
self.subscription_availability = subscription_availability
if subscription_available_tenants is not None:
self.subscription_available_tenants = subscription_available_tenants
if business_information is not None:
self.business_information = business_information
if cors_configuration is not None:
self.cors_configuration = cors_configuration
@property
def id(self):
"""
Gets the id of this APIObject2.
UUID of the api registry artifact
:return: The id of this APIObject2.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this APIObject2.
UUID of the api registry artifact
:param id: The id of this APIObject2.
:type: str
"""
self._id = id
@property
def name(self):
"""
Gets the name of this APIObject2.
:return: The name of this APIObject2.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this APIObject2.
:param name: The name of this APIObject2.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def description(self):
"""
Gets the description of this APIObject2.
:return: The description of this APIObject2.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this APIObject2.
:param description: The description of this APIObject2.
:type: str
"""
self._description = description
@property
def context(self):
"""
Gets the context of this APIObject2.
:return: The context of this APIObject2.
:rtype: str
"""
return self._context
@context.setter
def context(self, context):
"""
Sets the context of this APIObject2.
:param context: The context of this APIObject2.
:type: str
"""
if context is None:
raise ValueError("Invalid value for `context`, must not be `None`")
self._context = context
@property
def version(self):
"""
Gets the version of this APIObject2.
:return: The version of this APIObject2.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this APIObject2.
:param version: The version of this APIObject2.
:type: str
"""
if version is None:
raise ValueError("Invalid value for `version`, must not be `None`")
self._version = version
@property
def provider(self):
"""
Gets the provider of this APIObject2.
If the provider value is not given user invoking the api will be used as the provider.
:return: The provider of this APIObject2.
:rtype: str
"""
return self._provider
@provider.setter
def provider(self, provider):
"""
Sets the provider of this APIObject2.
If the provider value is not given user invoking the api will be used as the provider.
:param provider: The provider of this APIObject2.
:type: str
"""
self._provider = provider
@property
def api_definition(self):
"""
Gets the api_definition of this APIObject2.
Swagger definition of the API which contains details about URI templates and scopes
:return: The api_definition of this APIObject2.
:rtype: str
"""
return self._api_definition
@api_definition.setter
def api_definition(self, api_definition):
"""
Sets the api_definition of this APIObject2.
Swagger definition of the API which contains details about URI templates and scopes
:param api_definition: The api_definition of this APIObject2.
:type: str
"""
self._api_definition = api_definition
@property
def wsdl_uri(self):
"""
Gets the wsdl_uri of this APIObject2.
WSDL URL if the API is based on a WSDL endpoint
:return: The wsdl_uri of this APIObject2.
:rtype: str
"""
return self._wsdl_uri
@wsdl_uri.setter
def wsdl_uri(self, wsdl_uri):
"""
Sets the wsdl_uri of this APIObject2.
WSDL URL if the API is based on a WSDL endpoint
:param wsdl_uri: The wsdl_uri of this APIObject2.
:type: str
"""
self._wsdl_uri = wsdl_uri
@property
def status(self):
"""
Gets the status of this APIObject2.
:return: The status of this APIObject2.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this APIObject2.
:param status: The status of this APIObject2.
:type: str
"""
self._status = status
@property
def response_caching(self):
"""
Gets the response_caching of this APIObject2.
:return: The response_caching of this APIObject2.
:rtype: str
"""
return self._response_caching
@response_caching.setter
def response_caching(self, response_caching):
"""
Sets the response_caching of this APIObject2.
:param response_caching: The response_caching of this APIObject2.
:type: str
"""
self._response_caching = response_caching
@property
def cache_timeout(self):
"""
Gets the cache_timeout of this APIObject2.
:return: The cache_timeout of this APIObject2.
:rtype: int
"""
return self._cache_timeout
@cache_timeout.setter
def cache_timeout(self, cache_timeout):
"""
Sets the cache_timeout of this APIObject2.
:param cache_timeout: The cache_timeout of this APIObject2.
:type: int
"""
self._cache_timeout = cache_timeout
@property
def destination_stats_enabled(self):
"""
Gets the destination_stats_enabled of this APIObject2.
:return: The destination_stats_enabled of this APIObject2.
:rtype: str
"""
return self._destination_stats_enabled
@destination_stats_enabled.setter
def destination_stats_enabled(self, destination_stats_enabled):
"""
Sets the destination_stats_enabled of this APIObject2.
:param destination_stats_enabled: The destination_stats_enabled of this APIObject2.
:type: str
"""
self._destination_stats_enabled = destination_stats_enabled
@property
def is_default_version(self):
"""
Gets the is_default_version of this APIObject2.
:return: The is_default_version of this APIObject2.
:rtype: bool
"""
return self._is_default_version
@is_default_version.setter
def is_default_version(self, is_default_version):
"""
Sets the is_default_version of this APIObject2.
:param is_default_version: The is_default_version of this APIObject2.
:type: bool
"""
if is_default_version is None:
raise ValueError("Invalid value for `is_default_version`, must not be `None`")
self._is_default_version = is_default_version
@property
def type(self):
"""
Gets the type of this APIObject2.
:return: The type of this APIObject2.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this APIObject2.
:param type: The type of this APIObject2.
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`")
allowed_values = ["HTTP", "WS"]
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}"
.format(type, allowed_values)
)
self._type = type
@property
def transport(self):
"""
Gets the transport of this APIObject2.
Supported transports for the API (http and/or https).
:return: The transport of this APIObject2.
:rtype: list[str]
"""
return self._transport
@transport.setter
def transport(self, transport):
"""
Sets the transport of this APIObject2.
Supported transports for the API (http and/or https).
:param transport: The transport of this APIObject2.
:type: list[str]
"""
if transport is None:
raise ValueError("Invalid value for `transport`, must not be `None`")
self._transport = transport
@property
def tags(self):
"""
Gets the tags of this APIObject2.
:return: The tags of this APIObject2.
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this APIObject2.
:param tags: The tags of this APIObject2.
:type: list[str]
"""
self._tags = tags
@property
def tiers(self):
"""
Gets the tiers of this APIObject2.
:return: The tiers of this APIObject2.
:rtype: list[str]
"""
return self._tiers
@tiers.setter
def tiers(self, tiers):
"""
Sets the tiers of this APIObject2.
:param tiers: The tiers of this APIObject2.
:type: list[str]
"""
if tiers is None:
raise ValueError("Invalid value for `tiers`, must not be `None`")
self._tiers = tiers
@property
def max_tps(self):
"""
Gets the max_tps of this APIObject2.
:return: The max_tps of this APIObject2.
:rtype: ApisMaxTps
"""
return self._max_tps
@max_tps.setter
def max_tps(self, max_tps):
"""
Sets the max_tps of this APIObject2.
:param max_tps: The max_tps of this APIObject2.
:type: ApisMaxTps
"""
self._max_tps = max_tps
@property
def thumbnail_uri(self):
"""
Gets the thumbnail_uri of this APIObject2.
:return: The thumbnail_uri of this APIObject2.
:rtype: str
"""
return self._thumbnail_uri
@thumbnail_uri.setter
def thumbnail_uri(self, thumbnail_uri):
"""
Sets the thumbnail_uri of this APIObject2.
:param thumbnail_uri: The thumbnail_uri of this APIObject2.
:type: str
"""
self._thumbnail_uri = thumbnail_uri
@property
def visibility(self):
"""
Gets the visibility of this APIObject2.
:return: The visibility of this APIObject2.
:rtype: str
"""
return self._visibility
@visibility.setter
def visibility(self, visibility):
"""
Sets the visibility of this APIObject2.
:param visibility: The visibility of this APIObject2.
:type: str
"""
if visibility is None:
raise ValueError("Invalid value for `visibility`, must not be `None`")
allowed_values = ["PUBLIC", "PRIVATE", "RESTRICTED", "CONTROLLED"]
if visibility not in allowed_values:
raise ValueError(
"Invalid value for `visibility` ({0}), must be one of {1}"
.format(visibility, allowed_values)
)
self._visibility = visibility
@property
def visible_roles(self):
"""
Gets the visible_roles of this APIObject2.
:return: The visible_roles of this APIObject2.
:rtype: list[str]
"""
return self._visible_roles
@visible_roles.setter
def visible_roles(self, visible_roles):
"""
Sets the visible_roles of this APIObject2.
:param visible_roles: The visible_roles of this APIObject2.
:type: list[str]
"""
self._visible_roles = visible_roles
@property
def visible_tenants(self):
"""
Gets the visible_tenants of this APIObject2.
:return: The visible_tenants of this APIObject2.
:rtype: list[str]
"""
return self._visible_tenants
@visible_tenants.setter
def visible_tenants(self, visible_tenants):
"""
Sets the visible_tenants of this APIObject2.
:param visible_tenants: The visible_tenants of this APIObject2.
:type: list[str]
"""
self._visible_tenants = visible_tenants
@property
def endpoint_config(self):
"""
Gets the endpoint_config of this APIObject2.
:return: The endpoint_config of this APIObject2.
:rtype: str
"""
return self._endpoint_config
@endpoint_config.setter
def endpoint_config(self, endpoint_config):
"""
Sets the endpoint_config of this APIObject2.
:param endpoint_config: The endpoint_config of this APIObject2.
:type: str
"""
if endpoint_config is None:
raise ValueError("Invalid value for `endpoint_config`, must not be `None`")
self._endpoint_config = endpoint_config
@property
def endpoint_security(self):
"""
Gets the endpoint_security of this APIObject2.
:return: The endpoint_security of this APIObject2.
:rtype: ApisEndpointSecurity
"""
return self._endpoint_security
@endpoint_security.setter
def endpoint_security(self, endpoint_security):
"""
Sets the endpoint_security of this APIObject2.
:param endpoint_security: The endpoint_security of this APIObject2.
:type: ApisEndpointSecurity
"""
self._endpoint_security = endpoint_security
@property
def gateway_environments(self):
"""
Gets the gateway_environments of this APIObject2.
Comma separated list of gateway environments.
:return: The gateway_environments of this APIObject2.
:rtype: str
"""
return self._gateway_environments
@gateway_environments.setter
def gateway_environments(self, gateway_environments):
"""
Sets the gateway_environments of this APIObject2.
Comma separated list of gateway environments.
:param gateway_environments: The gateway_environments of this APIObject2.
:type: str
"""
self._gateway_environments = gateway_environments
@property
def sequences(self):
"""
Gets the sequences of this APIObject2.
:return: The sequences of this APIObject2.
:rtype: list[Sequence1]
"""
return self._sequences
@sequences.setter
def sequences(self, sequences):
"""
Sets the sequences of this APIObject2.
:param sequences: The sequences of this APIObject2.
:type: list[Sequence1]
"""
self._sequences = sequences
@property
def subscription_availability(self):
"""
Gets the subscription_availability of this APIObject2.
:return: The subscription_availability of this APIObject2.
:rtype: str
"""
return self._subscription_availability
@subscription_availability.setter
def subscription_availability(self, subscription_availability):
"""
Sets the subscription_availability of this APIObject2.
:param subscription_availability: The subscription_availability of this APIObject2.
:type: str
"""
allowed_values = ["current_tenant", "all_tenants", "specific_tenants"]
if subscription_availability not in allowed_values:
raise ValueError(
"Invalid value for `subscription_availability` ({0}), must be one of {1}"
.format(subscription_availability, allowed_values)
)
self._subscription_availability = subscription_availability
@property
def subscription_available_tenants(self):
"""
Gets the subscription_available_tenants of this APIObject2.
:return: The subscription_available_tenants of this APIObject2.
:rtype: list[str]
"""
return self._subscription_available_tenants
@subscription_available_tenants.setter
def subscription_available_tenants(self, subscription_available_tenants):
"""
Sets the subscription_available_tenants of this APIObject2.
:param subscription_available_tenants: The subscription_available_tenants of this APIObject2.
:type: list[str]
"""
self._subscription_available_tenants = subscription_available_tenants
@property
def business_information(self):
"""
Gets the business_information of this APIObject2.
:return: The business_information of this APIObject2.
:rtype: ApisBusinessInformation
"""
return self._business_information
@business_information.setter
def business_information(self, business_information):
"""
Sets the business_information of this APIObject2.
:param business_information: The business_information of this APIObject2.
:type: ApisBusinessInformation
"""
self._business_information = business_information
@property
def cors_configuration(self):
"""
Gets the cors_configuration of this APIObject2.
:return: The cors_configuration of this APIObject2.
:rtype: ApisCorsConfiguration
"""
return self._cors_configuration
@cors_configuration.setter
def cors_configuration(self, cors_configuration):
"""
Sets the cors_configuration of this APIObject2.
:param cors_configuration: The cors_configuration of this APIObject2.
:type: ApisCorsConfiguration
"""
self._cors_configuration = cors_configuration
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, APIObject2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"junetigerlee@gmail.com"
] |
junetigerlee@gmail.com
|
78a8943f8ce2436866eaef9d626cb0df1c6b6eda
|
6258769282bb5d23c0ae12064c76597014f64463
|
/sorting/insertion_sort.py
|
9461306d62d3b90ce11fe8b1582bbe462b4fee61
|
[] |
no_license
|
PandaMia/Python
|
91c9ceb949791bc8d6d549aa69b380e5d18ea2d2
|
98a1cb0d970607b7160336c705ff9f0f4c8d1edc
|
refs/heads/master
| 2022-11-20T00:28:49.881340
| 2020-07-12T12:51:15
| 2020-07-12T12:51:15
| 276,862,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
from random import randint
def insertion_sort(array):
for i in range(1, len(array)):
spam = array[i]
j = i
while array[j - 1] > spam and j > 0:
array[j] = array[j - 1]
j -= 1
array[j] = spam
array = [randint(-10, 10) for i in range(10)]
print(array)
insertion_sort(array)
print(array)
|
[
"noreply@github.com"
] |
PandaMia.noreply@github.com
|
0afbee7eba27aebba2818dd3c350919085a06630
|
f6204b85b4b0662eecda8cfd77e41ef51a8e97fb
|
/projectpythonlaptophp/escapechar.py
|
c0751920dbe670df4d359ec05d254143853197b6
|
[] |
no_license
|
sergiosanchezbarradas/Udemy_Masterclass_Python
|
0084e781fac9db8cca2b0dd3afec3dd69f7a3ed7
|
da8fc6105a04379e4ddeaf9df782115462408153
|
refs/heads/master
| 2023-04-14T22:53:58.527942
| 2021-04-21T10:51:23
| 2021-04-21T10:51:23
| 336,501,693
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
splitstring = "This string has been\nsplit over\nseveral\nlines"
print(splitstring)
tabstring = "1\t25\t3\t"
print(tabstring)
print('The pet owner said "No,no, \'e\'s uh,....he\'s resting".')
#or
print("The owner said \"No, no, 'e's uh,...he's resting\".")
print("""The owner said hmmmm "No,no...'e's \
uh...he's resting".""")
triplequotessplit = """this has been
split
many
times"""
print(triplequotessplit)
print(r"C:\Users\sergio")
|
[
"sergio_sanchez_b@hotmail.com"
] |
sergio_sanchez_b@hotmail.com
|
e3dd261be5438e3b948ea7d2190b0fc491879986
|
9ba3e1a001ffa4d4a93387c8685b6e1f5b7c48dc
|
/code/other-sims/case4.py
|
b0c25dae51509b1f7b1aeb268ce5b52ffc320602
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
filecoin-project/consensus
|
b194e8f061ab541d7e6d20d760e89705993cabdc
|
8824ad5fb8948706995805692d594f6ccf199176
|
refs/heads/master
| 2022-03-28T21:35:04.670512
| 2020-05-29T19:40:44
| 2020-05-29T19:40:44
| 130,159,711
| 44
| 5
|
NOASSERTION
| 2020-03-19T02:37:58
| 2018-04-19T04:38:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,715
|
py
|
import numpy as np
import time
from math import floor
import multiprocessing as mp
nh=67
na=33
ntot=na+nh
heights=range(250,251,10)
e=1
p=float(e)/float(1*ntot)
sim=1000000
ec =[]
num=1
if e==1: num=77
if e==5: num=54
start_time = time.time()
# Step 1: Init multiprocessing.Pool()
pool = mp.Pool(mp.cpu_count())
for height in heights: #the adversary tries to keep maintaining two chain of same weight and
#length "height", we try different heights and see the probability of succeeding. Ig this probability is
#small enough, we consider this height a good finality candidate.
win_ec = 0
longestfork =[]
for i in range(sim):
ch = np.random.binomial(nh, p, height)
ca = np.random.binomial(na, p, height)
# result of flipping a coin nha times, tested height times. (i.e. number of leaders
# at ach slot for both adversary and honest players)
j=0
w_h = 0
w_a = 0
praos_h=0
praos_a=0
j=0
while ca[j]>0 and j<height:
#determine if adversary or honest is winning
w_h+=1
w_a+=ca[j]#adv adds all blocks possible to its chain
praos_a
j+=1
if w_a>=w_h and w_a>0:
win_ec+=1
longestfork.append(j)
#print np.average(longestfork)
ec.append(float(win_ec)/float(sim))
# longestfork.sort()
# print ec,np.average(longestfork), np.median(longestfork),max(longestfork), sum(longestfork[-54:])
#before sorting, we group them by groups of num
stop = int(floor(sim/num)*num) #need to stop before the end of the longest fork
#if it is not a multiple of num
groupedfork=[ sum(longestfork[x:x+num]) for x in range(0, stop, num)]
print ec, np.average(groupedfork), np.median(groupedfork), max(groupedfork), len(groupedfork)
print("--- %s seconds ---" % (time.time() - start_time))
|
[
"sarah8@bitbucket.org"
] |
sarah8@bitbucket.org
|
110a6ab2cb322a10543de21c59650d94d221ef39
|
054c0bdf2ed7db0975828355562b7b0a7bad7e5c
|
/myapp/home/views.py
|
3b1cc4004190dd397091eceb3f851ab81243868c
|
[] |
no_license
|
fbbenod/Touristy-Information-System
|
d8024a5d9df0712d336d18cf1a2ac5b235b3b58e
|
8122a8b9f343fd952a052d2c435f914d0b82079c
|
refs/heads/master
| 2020-07-14T19:39:20.759408
| 2019-08-31T12:16:04
| 2019-08-31T12:16:04
| 205,385,231
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,745
|
py
|
from django.shortcuts import render
from .models import FirstContent, SecondContent,ThirdContent,Package,BodyContent1,\
BodyContent2, OurService1, OurService2,\
OurServiceImage, PopularDestination, HomeQuestion, OurPlaces, Footer
from django.views.generic import (
ListView,
DetailView
)
def index(request):
if request.method == 'GET':
firstcontent=FirstContent.objects.all()
secondcontent=SecondContent.objects.all()
thirdcontent = ThirdContent.objects.all()
bodycontent1= BodyContent1.objects.all()
bodycontent2= BodyContent2.objects.all()
populardestination = PopularDestination.objects.all()
ourservice1 = OurService1.objects.all()
ourservice2 = OurService2.objects.all()
ourserviceimage = OurServiceImage.objects.all()
package = Package.objects.all()
homequestion = HomeQuestion.objects.all()
ourplaces = OurPlaces.objects.all()
footer = Footer.objects.all()
context = {
'firstcontent': firstcontent,
'secondcontent': secondcontent,
'thirdcontent' : thirdcontent,
'bodycontent1' :bodycontent1,
'bodycontent2' :bodycontent2,
'populardestination': populardestination,
'ourservice1' : ourservice1,
'ourservice2' : ourservice2,
'ourserviceimage': ourserviceimage,
'package' : package,
'homequestion' :homequestion,
'ourplaces' : ourplaces,
'footer':footer
}
return render(request, 'home/index.html', context)
class PostListView(ListView):
model = FirstContent
template_name = 'home/index.html'
context_object_name = 'firstcontent'
def get_context_data(self, **kwargs):
context = super(PostListView, self).get_context_data(**kwargs)
context.update({
'secondcontent':SecondContent.objects.all(),
'thirdcontent': ThirdContent.objects.all(),
'bodycontent2': BodyContent2.objects.all(),
'bodycontent1': BodyContent1.objects.all(),
'populardestination': PopularDestination.objects.all(),
'ourservice1': OurService1.objects.all(),
'ourservice2': OurService2.objects.all(),
'ourserviceimage': OurServiceImage.objects.all(),
'package': Package.objects.all(),
'homequestion': HomeQuestion.objects.all(),
'ourplaces': OurPlaces.objects.all(),
'footer': Footer.objects.all(),
})
return context
class PostDetailView(DetailView):
model = PopularDestination
template_name = 'home/index2.html'
context_object_name = 'populardestinations'
|
[
"b3nodbanjara@gmail.com"
] |
b3nodbanjara@gmail.com
|
c576359869c6d2ae332e6910e94e31783d8ab1ec
|
c0d489046bc114672139873916a118a203c6f850
|
/Medium/1669. Merge In Between Linked Lists.py
|
d226397c60258d906ee028c656b938c7f36208fd
|
[] |
no_license
|
shifty049/LeetCode_Practice
|
165ada14a8fd436e9068bd94d6b82b1ed312013c
|
ca8be179282be86450c9959fb239466d152a55e5
|
refs/heads/master
| 2022-05-25T16:23:05.736852
| 2022-03-29T13:48:21
| 2022-03-29T13:48:21
| 249,737,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 965
|
py
|
class Solution:
def mergeInBetween(self, list1: ListNode, a: int, b: int, list2: ListNode) -> ListNode:
ix = 0
while True:
if ix == 0:
head = list1
if ix ==a-1:
lst1_head = list1
if ix == b +1:
lst1_tail = list1
break
list1 = list1.next
ix+=1
lst2_head = list2
while list2:
if not list2.next:
lst2_tail = list2
list2= list2.next
lst1_head.next = lst2_head
lst2_tail.next = lst1_tail
return head
#Runtime: 456 ms, faster than 51.12% of Python3 online submissions for Merge In Between Linked Lists.
#Memory Usage: 20.1 MB, less than 36.68% of Python3 online submissions for Merge In Between Linked Lists.
#Fu-Ti, Hsu
#shifty049@gmail.com
|
[
"shifty049@gmail.com"
] |
shifty049@gmail.com
|
de01fe52eb92564309ff7696b0b605bb26095c8d
|
9d844faeec40e4fd3dfaf7e79004831752090454
|
/kaplanmeier/helpers/savefig.py
|
198e47da65c7e032d87c9bf40dc11788474ffed1
|
[
"MIT"
] |
permissive
|
erdogant/kaplanmeier
|
8b498218f3105d905087aef713cfeb25d69fbf43
|
97c49d7c3f7c1385b895729a3dbffc2012cd50a2
|
refs/heads/master
| 2023-05-23T01:28:07.905957
| 2023-05-08T20:30:12
| 2023-05-08T20:30:12
| 231,418,764
| 24
| 9
|
MIT
| 2022-03-10T23:54:05
| 2020-01-02T16:25:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,668
|
py
|
""" This function saves figures in PNG format.
A=savefig(data, <optional>)
INPUT:
data: fig object
OPTIONAL
showprogress : Boolean [0,1]
[0]: No (default)
[1]: Yes
OUTPUT
BOOLEAN
[0]: If not succesful
[1]: If succesful
DESCRIPTION
his function saves figures in PNG format.
EXAMPLE
%reset -f
print(os.getcwd())
from donutchart import donutchart
A = donutchart([15, 30, 45, 10],['aap','boom','mies','banaan'])
B = savefig(A,"c://temp//magweg//fig.png",showprogress=1)
SEE ALSO
"""
#print(__doc__)
#--------------------------------------------------------------------------
# Name : savefig.py
# Version : 1.0
# Author : E.Taskesen
# Date : Sep. 2017
#--------------------------------------------------------------------------
# Libraries
from os import mkdir
from os import path
#%% Main
def savefig(fig, filepath, dpi=100, transp=False, showprogress=0):
out=0 # Returns 1 if succesful
# Make dictionary to store Parameters
Param = {}
Param['showprogress'] = showprogress
Param['filepath'] = filepath
Param['dpi'] = dpi
Param['transp'] = transp
# Write figure to path
if Param['filepath']!="":
# Check dir
[getpath, getfilename] = path.split(Param['filepath'])
if path.exists(getpath)==False:
mkdir(getpath)
#end
#save file
#print(fig.canvas.get_supported_filetypes())
fig.savefig(Param['filepath'], dpi=Param['dpi'], transparent=Param['transp'], bbox_inches='tight')
out=1
return(out)
|
[
"erdogant@gmail.com"
] |
erdogant@gmail.com
|
c330eb6da60dfa3e0ce1b2b9441715280a20da9c
|
12662aff12b6651a3cc046950e5ea57f60dd0a09
|
/16. Numpy/Inner_and_outer.py
|
0587955f2ce1d316ab30af51dbd4c7e31e925f13
|
[] |
no_license
|
basakmugdha/HackerRank-Python-Practice
|
3c563d68e002c1b04dc59594f3c84070babf443a
|
fa41f263eb310755a1da1c9d6f2f74dc9e0329b5
|
refs/heads/master
| 2023-06-18T05:07:58.563967
| 2021-07-11T16:29:34
| 2021-07-11T16:29:34
| 354,798,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
import numpy as np
A = np.array(input().split(), int)
B = np.array(input().split(), int)
print(np.inner(A,B))
print(np.outer(A,B))
|
[
"51905437+basakmugdha@users.noreply.github.com"
] |
51905437+basakmugdha@users.noreply.github.com
|
1294aa6cc306f575071bc36fb1e6ad6462da9268
|
3ae37808b1d2cee080d9dc6a8695ca822e02b9cd
|
/udemy/lazyprogrammer/linear-regression-python/lr_2d.py
|
a2682eb360002ca6324e3d34a0012e887d04d18b
|
[
"Apache-2.0"
] |
permissive
|
balazssimon/ml-playground
|
8dc9896a46bd468a74e0e04ba94889c009655216
|
c2eba497bebc53e5a03807bdd8873c55f0ec73e1
|
refs/heads/master
| 2023-08-11T04:36:11.084644
| 2019-07-30T14:39:23
| 2019-07-30T14:39:23
| 65,571,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
df = pd.read_csv("data_2d.csv", header=None)
X = df[[0,1]].values
y = df[2].values
w = np.linalg.solve(np.dot(X.T, X), np.dot(X.T, y))
y_hat = np.dot(X, w)
d1 = y-y_hat
d2 = y-y.mean()
SS_res = d1.dot(d1)
SS_tot = d2.dot(d2)
R2 = 1-SS_res/SS_tot
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X[:,0], X[:,1], y)
plt.show()
|
[
"balazssimon@gmail.com"
] |
balazssimon@gmail.com
|
0215b7bbb2f8facc2a6d7f58206630b3e43c303c
|
0a7b9005e93ad0ac846568d54d52c8faef8aa53b
|
/Old Files/Generation V3/ball_recognition_tensorflow/object_detection/eval_util.py
|
c1a09ec0e5d7677971b860159ce262ee101c5217
|
[] |
no_license
|
msumccoy/robotics
|
9d177710cd4e61c38fda26c0f72d8fbf25c7cfdd
|
63cec377ee7efb6e2297669a91e930f415ad8a6a
|
refs/heads/master
| 2021-12-12T16:25:33.840760
| 2021-11-12T19:20:43
| 2021-11-12T19:20:43
| 206,330,614
| 0
| 1
| null | 2021-11-12T19:20:44
| 2019-09-04T13:50:47
|
Python
|
UTF-8
|
Python
| false
| false
| 44,765
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utility functions for evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import re
import time
import numpy as np
from six.moves import range
import tensorflow as tf
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import keypoint_ops
from object_detection.core import standard_fields as fields
from object_detection.metrics import coco_evaluation
from object_detection.utils import label_map_util
from object_detection.utils import object_detection_evaluation
from object_detection.utils import ops
from object_detection.utils import shape_utils
from object_detection.utils import visualization_utils as vis_utils
slim = tf.contrib.slim
# A dictionary of metric names to classes that implement the metric. The classes
# in the dictionary must implement
# utils.object_detection_evaluation.DetectionEvaluator interface.
EVAL_METRICS_CLASS_DICT = {
'coco_detection_metrics':
coco_evaluation.CocoDetectionEvaluator,
'coco_mask_metrics':
coco_evaluation.CocoMaskEvaluator,
'oid_challenge_detection_metrics':
object_detection_evaluation.OpenImagesDetectionChallengeEvaluator,
'oid_challenge_segmentation_metrics':
object_detection_evaluation
.OpenImagesInstanceSegmentationChallengeEvaluator,
'pascal_voc_detection_metrics':
object_detection_evaluation.PascalDetectionEvaluator,
'weighted_pascal_voc_detection_metrics':
object_detection_evaluation.WeightedPascalDetectionEvaluator,
'precision_at_recall_detection_metrics':
object_detection_evaluation.PrecisionAtRecallDetectionEvaluator,
'pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.PascalInstanceSegmentationEvaluator,
'weighted_pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator,
'oid_V2_detection_metrics':
object_detection_evaluation.OpenImagesDetectionEvaluator,
}
EVAL_DEFAULT_METRIC = 'coco_detection_metrics'
def write_metrics(metrics, global_step, summary_dir):
"""Write metrics to a summary directory.
Args:
metrics: A dictionary containing metric names and values.
global_step: Global step at which the metrics are computed.
summary_dir: Directory to write tensorflow summaries to.
"""
tf.logging.info('Writing metrics to tf summary.')
summary_writer = tf.summary.FileWriterCache.get(summary_dir)
for key in sorted(metrics):
summary = tf.Summary(value=[
tf.Summary.Value(tag=key, simple_value=metrics[key]),
])
summary_writer.add_summary(summary, global_step)
tf.logging.info('%s: %f', key, metrics[key])
tf.logging.info('Metrics written to tf summary.')
# TODO(rathodv): Add tests.
def visualize_detection_results(result_dict,
tag,
global_step,
categories,
summary_dir='',
export_dir='',
agnostic_mode=False,
show_groundtruth=False,
groundtruth_box_visualization_color='black',
min_score_thresh=.5,
max_num_predictions=20,
skip_scores=False,
skip_labels=False,
keep_image_id_for_visualization_export=False):
"""Visualizes detection results and writes visualizations to image summaries.
This function visualizes an image with its detected bounding boxes and writes
to image summaries which can be viewed on tensorboard. It optionally also
writes images to a directory. In the case of missing entry in the label map,
unknown class name in the visualization is shown as "N/A".
Args:
result_dict: a dictionary holding groundtruth and detection
data corresponding to each image being evaluated. The following keys
are required:
'original_image': a numpy array representing the image with shape
[1, height, width, 3] or [1, height, width, 1]
'detection_boxes': a numpy array of shape [N, 4]
'detection_scores': a numpy array of shape [N]
'detection_classes': a numpy array of shape [N]
The following keys are optional:
'groundtruth_boxes': a numpy array of shape [N, 4]
'groundtruth_keypoints': a numpy array of shape [N, num_keypoints, 2]
Detections are assumed to be provided in decreasing order of score and for
display, and we assume that scores are probabilities between 0 and 1.
tag: tensorboard tag (string) to associate with image.
global_step: global step at which the visualization are generated.
categories: a list of dictionaries representing all possible categories.
Each dict in this list has the following keys:
'id': (required) an integer id uniquely identifying this category
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'
'supercategory': (optional) string representing the supercategory
e.g., 'animal', 'vehicle', 'food', etc
summary_dir: the output directory to which the image summaries are written.
export_dir: the output directory to which images are written. If this is
empty (default), then images are not exported.
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not.
show_groundtruth: boolean (default: False) controlling whether to show
groundtruth boxes in addition to detected boxes
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
min_score_thresh: minimum score threshold for a box to be visualized
max_num_predictions: maximum number of detections to visualize
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
keep_image_id_for_visualization_export: whether to keep image identifier in
filename when exported to export_dir
Raises:
ValueError: if result_dict does not contain the expected keys (i.e.,
'original_image', 'detection_boxes', 'detection_scores',
'detection_classes')
"""
detection_fields = fields.DetectionResultFields
input_fields = fields.InputDataFields
if not set([
input_fields.original_image,
detection_fields.detection_boxes,
detection_fields.detection_scores,
detection_fields.detection_classes,
]).issubset(set(result_dict.keys())):
raise ValueError('result_dict does not contain all expected keys.')
if show_groundtruth and input_fields.groundtruth_boxes not in result_dict:
raise ValueError('If show_groundtruth is enabled, result_dict must contain '
'groundtruth_boxes.')
tf.logging.info('Creating detection visualizations.')
category_index = label_map_util.create_category_index(categories)
image = np.squeeze(result_dict[input_fields.original_image], axis=0)
if image.shape[2] == 1: # If one channel image, repeat in RGB.
image = np.tile(image, [1, 1, 3])
detection_boxes = result_dict[detection_fields.detection_boxes]
detection_scores = result_dict[detection_fields.detection_scores]
detection_classes = np.int32((result_dict[
detection_fields.detection_classes]))
detection_keypoints = result_dict.get(detection_fields.detection_keypoints)
detection_masks = result_dict.get(detection_fields.detection_masks)
detection_boundaries = result_dict.get(detection_fields.detection_boundaries)
# Plot groundtruth underneath detections
if show_groundtruth:
groundtruth_boxes = result_dict[input_fields.groundtruth_boxes]
groundtruth_keypoints = result_dict.get(input_fields.groundtruth_keypoints)
vis_utils.visualize_boxes_and_labels_on_image_array(
image=image,
boxes=groundtruth_boxes,
classes=None,
scores=None,
category_index=category_index,
keypoints=groundtruth_keypoints,
use_normalized_coordinates=False,
max_boxes_to_draw=None,
groundtruth_box_visualization_color=groundtruth_box_visualization_color)
vis_utils.visualize_boxes_and_labels_on_image_array(
image,
detection_boxes,
detection_classes,
detection_scores,
category_index,
instance_masks=detection_masks,
instance_boundaries=detection_boundaries,
keypoints=detection_keypoints,
use_normalized_coordinates=False,
max_boxes_to_draw=max_num_predictions,
min_score_thresh=min_score_thresh,
agnostic_mode=agnostic_mode,
skip_scores=skip_scores,
skip_labels=skip_labels)
if export_dir:
if keep_image_id_for_visualization_export and result_dict[fields.
InputDataFields()
.key]:
export_path = os.path.join(export_dir, 'export-{}-{}.png'.format(
tag, result_dict[fields.InputDataFields().key]))
else:
export_path = os.path.join(export_dir, 'export-{}.png'.format(tag))
vis_utils.save_image_array_as_png(image, export_path)
summary = tf.Summary(value=[
tf.Summary.Value(
tag=tag,
image=tf.Summary.Image(
encoded_image_string=vis_utils.encode_image_array_as_png_str(
image)))
])
summary_writer = tf.summary.FileWriterCache.get(summary_dir)
summary_writer.add_summary(summary, global_step)
tf.logging.info('Detection visualizations written to summary with tag %s.',
tag)
def _run_checkpoint_once(tensor_dict,
evaluators=None,
batch_processor=None,
checkpoint_dirs=None,
variables_to_restore=None,
restore_fn=None,
num_batches=1,
master='',
save_graph=False,
save_graph_dir='',
losses_dict=None,
eval_export_path=None,
process_metrics_fn=None):
"""Evaluates metrics defined in evaluators and returns summaries.
This function loads the latest checkpoint in checkpoint_dirs and evaluates
all metrics defined in evaluators. The metrics are processed in batch by the
batch_processor.
Args:
tensor_dict: a dictionary holding tensors representing a batch of detections
and corresponding groundtruth annotations.
evaluators: a list of object of type DetectionEvaluator to be used for
evaluation. Note that the metric names produced by different evaluators
must be unique.
batch_processor: a function taking four arguments:
1. tensor_dict: the same tensor_dict that is passed in as the first
argument to this function.
2. sess: a tensorflow session
3. batch_index: an integer representing the index of the batch amongst
all batches
By default, batch_processor is None, which defaults to running:
return sess.run(tensor_dict)
To skip an image, it suffices to return an empty dictionary in place of
result_dict.
checkpoint_dirs: list of directories to load into an EnsembleModel. If it
has only one directory, EnsembleModel will not be used --
a DetectionModel
will be instantiated directly. Not used if restore_fn is set.
variables_to_restore: None, or a dictionary mapping variable names found in
a checkpoint to model variables. The dictionary would normally be
generated by creating a tf.train.ExponentialMovingAverage object and
calling its variables_to_restore() method. Not used if restore_fn is set.
restore_fn: None, or a function that takes a tf.Session object and correctly
restores all necessary variables from the correct checkpoint file. If
None, attempts to restore from the first directory in checkpoint_dirs.
num_batches: the number of batches to use for evaluation.
master: the location of the Tensorflow session.
save_graph: whether or not the Tensorflow graph is stored as a pbtxt file.
save_graph_dir: where to store the Tensorflow graph on disk. If save_graph
is True this must be non-empty.
losses_dict: optional dictionary of scalar detection losses.
eval_export_path: Path for saving a json file that contains the detection
results in json format.
process_metrics_fn: a callback called with evaluation results after each
evaluation is done. It could be used e.g. to back up checkpoints with
best evaluation scores, or to call an external system to update evaluation
results in order to drive best hyper-parameter search. Parameters are:
int checkpoint_number, Dict[str, ObjectDetectionEvalMetrics] metrics,
str checkpoint_file path.
Returns:
global_step: the count of global steps.
all_evaluator_metrics: A dictionary containing metric names and values.
Raises:
ValueError: if restore_fn is None and checkpoint_dirs doesn't have at least
one element.
ValueError: if save_graph is True and save_graph_dir is not defined.
"""
if save_graph and not save_graph_dir:
raise ValueError('`save_graph_dir` must be defined.')
sess = tf.Session(master, graph=tf.get_default_graph())
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
checkpoint_file = None
if restore_fn:
restore_fn(sess)
else:
if not checkpoint_dirs:
raise ValueError('`checkpoint_dirs` must have at least one entry.')
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dirs[0])
saver = tf.train.Saver(variables_to_restore)
saver.restore(sess, checkpoint_file)
if save_graph:
tf.train.write_graph(sess.graph_def, save_graph_dir, 'eval.pbtxt')
counters = {'skipped': 0, 'success': 0}
aggregate_result_losses_dict = collections.defaultdict(list)
with tf.contrib.slim.queues.QueueRunners(sess):
try:
for batch in range(int(num_batches)):
if (batch + 1) % 100 == 0:
tf.logging.info('Running eval ops batch %d/%d', batch + 1,
num_batches)
if not batch_processor:
try:
if not losses_dict:
losses_dict = {}
result_dict, result_losses_dict = sess.run([tensor_dict,
losses_dict])
counters['success'] += 1
except tf.errors.InvalidArgumentError:
tf.logging.info('Skipping image')
counters['skipped'] += 1
result_dict = {}
else:
result_dict, result_losses_dict = batch_processor(
tensor_dict, sess, batch, counters, losses_dict=losses_dict)
if not result_dict:
continue
for key, value in iter(result_losses_dict.items()):
aggregate_result_losses_dict[key].append(value)
for evaluator in evaluators:
# TODO(b/65130867): Use image_id tensor once we fix the input data
# decoders to return correct image_id.
# TODO(akuznetsa): result_dict contains batches of images, while
# add_single_ground_truth_image_info expects a single image. Fix
if (isinstance(result_dict, dict) and
fields.InputDataFields.key in result_dict and
result_dict[fields.InputDataFields.key]):
image_id = result_dict[fields.InputDataFields.key]
else:
image_id = batch
evaluator.add_single_ground_truth_image_info(
image_id=image_id, groundtruth_dict=result_dict)
evaluator.add_single_detected_image_info(
image_id=image_id, detections_dict=result_dict)
tf.logging.info('Running eval batches done.')
except tf.errors.OutOfRangeError:
tf.logging.info('Done evaluating -- epoch limit reached')
finally:
# When done, ask the threads to stop.
tf.logging.info('# success: %d', counters['success'])
tf.logging.info('# skipped: %d', counters['skipped'])
all_evaluator_metrics = {}
if eval_export_path and eval_export_path is not None:
for evaluator in evaluators:
if (isinstance(evaluator, coco_evaluation.CocoDetectionEvaluator) or
isinstance(evaluator, coco_evaluation.CocoMaskEvaluator)):
tf.logging.info('Started dumping to json file.')
evaluator.dump_detections_to_json_file(
json_output_path=eval_export_path)
tf.logging.info('Finished dumping to json file.')
for evaluator in evaluators:
metrics = evaluator.evaluate()
evaluator.clear()
if any(key in all_evaluator_metrics for key in metrics):
raise ValueError('Metric names between evaluators must not collide.')
all_evaluator_metrics.update(metrics)
global_step = tf.train.global_step(sess, tf.train.get_global_step())
for key, value in iter(aggregate_result_losses_dict.items()):
all_evaluator_metrics['Losses/' + key] = np.mean(value)
if process_metrics_fn and checkpoint_file:
m = re.search(r'model.ckpt-(\d+)$', checkpoint_file)
if not m:
tf.logging.error('Failed to parse checkpoint number from: %s',
checkpoint_file)
else:
checkpoint_number = int(m.group(1))
process_metrics_fn(checkpoint_number, all_evaluator_metrics,
checkpoint_file)
sess.close()
return (global_step, all_evaluator_metrics)
# TODO(rathodv): Add tests.
def repeated_checkpoint_run(tensor_dict,
summary_dir,
evaluators,
batch_processor=None,
checkpoint_dirs=None,
variables_to_restore=None,
restore_fn=None,
num_batches=1,
eval_interval_secs=120,
max_number_of_evaluations=None,
max_evaluation_global_step=None,
master='',
save_graph=False,
save_graph_dir='',
losses_dict=None,
eval_export_path=None,
process_metrics_fn=None):
"""Periodically evaluates desired tensors using checkpoint_dirs or restore_fn.
This function repeatedly loads a checkpoint and evaluates a desired
set of tensors (provided by tensor_dict) and hands the resulting numpy
arrays to a function result_processor which can be used to further
process/save/visualize the results.
Args:
tensor_dict: a dictionary holding tensors representing a batch of detections
and corresponding groundtruth annotations.
summary_dir: a directory to write metrics summaries.
evaluators: a list of object of type DetectionEvaluator to be used for
evaluation. Note that the metric names produced by different evaluators
must be unique.
batch_processor: a function taking three arguments:
1. tensor_dict: the same tensor_dict that is passed in as the first
argument to this function.
2. sess: a tensorflow session
3. batch_index: an integer representing the index of the batch amongst
all batches
By default, batch_processor is None, which defaults to running:
return sess.run(tensor_dict)
checkpoint_dirs: list of directories to load into a DetectionModel or an
EnsembleModel if restore_fn isn't set. Also used to determine when to run
next evaluation. Must have at least one element.
variables_to_restore: None, or a dictionary mapping variable names found in
a checkpoint to model variables. The dictionary would normally be
generated by creating a tf.train.ExponentialMovingAverage object and
calling its variables_to_restore() method. Not used if restore_fn is set.
restore_fn: a function that takes a tf.Session object and correctly restores
all necessary variables from the correct checkpoint file.
num_batches: the number of batches to use for evaluation.
eval_interval_secs: the number of seconds between each evaluation run.
max_number_of_evaluations: the max number of iterations of the evaluation.
If the value is left as None the evaluation continues indefinitely.
max_evaluation_global_step: global step when evaluation stops.
master: the location of the Tensorflow session.
save_graph: whether or not the Tensorflow graph is saved as a pbtxt file.
save_graph_dir: where to save on disk the Tensorflow graph. If store_graph
is True this must be non-empty.
losses_dict: optional dictionary of scalar detection losses.
eval_export_path: Path for saving a json file that contains the detection
results in json format.
process_metrics_fn: a callback called with evaluation results after each
evaluation is done. It could be used e.g. to back up checkpoints with
best evaluation scores, or to call an external system to update evaluation
results in order to drive best hyper-parameter search. Parameters are:
int checkpoint_number, Dict[str, ObjectDetectionEvalMetrics] metrics,
str checkpoint_file path.
Returns:
metrics: A dictionary containing metric names and values in the latest
evaluation.
Raises:
ValueError: if max_num_of_evaluations is not None or a positive number.
ValueError: if checkpoint_dirs doesn't have at least one element.
"""
if max_number_of_evaluations and max_number_of_evaluations <= 0:
raise ValueError(
'`max_number_of_evaluations` must be either None or a positive number.')
if max_evaluation_global_step and max_evaluation_global_step <= 0:
raise ValueError(
'`max_evaluation_global_step` must be either None or positive.')
if not checkpoint_dirs:
raise ValueError('`checkpoint_dirs` must have at least one entry.')
last_evaluated_model_path = None
number_of_evaluations = 0
while True:
start = time.time()
tf.logging.info('Starting evaluation at ' + time.strftime(
'%Y-%m-%d-%H:%M:%S', time.gmtime()))
model_path = tf.train.latest_checkpoint(checkpoint_dirs[0])
if not model_path:
tf.logging.info('No model found in %s. Will try again in %d seconds',
checkpoint_dirs[0], eval_interval_secs)
elif model_path == last_evaluated_model_path:
tf.logging.info('Found already evaluated checkpoint. Will try again in '
'%d seconds', eval_interval_secs)
else:
last_evaluated_model_path = model_path
global_step, metrics = _run_checkpoint_once(
tensor_dict,
evaluators,
batch_processor,
checkpoint_dirs,
variables_to_restore,
restore_fn,
num_batches,
master,
save_graph,
save_graph_dir,
losses_dict=losses_dict,
eval_export_path=eval_export_path,
process_metrics_fn=process_metrics_fn)
write_metrics(metrics, global_step, summary_dir)
if (max_evaluation_global_step and
global_step >= max_evaluation_global_step):
tf.logging.info('Finished evaluation!')
break
number_of_evaluations += 1
if (max_number_of_evaluations and
number_of_evaluations >= max_number_of_evaluations):
tf.logging.info('Finished evaluation!')
break
time_to_next_eval = start + eval_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
return metrics
def _scale_box_to_absolute(args):
boxes, image_shape = args
return box_list_ops.to_absolute_coordinates(
box_list.BoxList(boxes), image_shape[0], image_shape[1]).get()
def _resize_detection_masks(args):
detection_boxes, detection_masks, image_shape = args
detection_masks_reframed = ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image_shape[0], image_shape[1])
return tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
def _resize_groundtruth_masks(args):
mask, image_shape = args
mask = tf.expand_dims(mask, 3)
mask = tf.image.resize_images(
mask,
image_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
return tf.cast(tf.squeeze(mask, 3), tf.uint8)
def _scale_keypoint_to_absolute(args):
keypoints, image_shape = args
return keypoint_ops.scale(keypoints, image_shape[0], image_shape[1])
def result_dict_for_single_example(image,
key,
detections,
groundtruth=None,
class_agnostic=False,
scale_to_absolute=False):
"""Merges all detection and groundtruth information for a single example.
Note that evaluation tools require classes that are 1-indexed, and so this
function performs the offset. If `class_agnostic` is True, all output classes
have label 1.
Args:
image: A single 4D uint8 image tensor of shape [1, H, W, C].
key: A single string tensor identifying the image.
detections: A dictionary of detections, returned from
DetectionModel.postprocess().
groundtruth: (Optional) Dictionary of groundtruth items, with fields:
'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in
normalized coordinates.
'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.
'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)
'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)
'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)
'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)
'groundtruth_instance_masks': 3D int64 tensor of instance masks
(Optional).
class_agnostic: Boolean indicating whether the detections are class-agnostic
(i.e. binary). Default False.
scale_to_absolute: Boolean indicating whether boxes and keypoints should be
scaled to absolute coordinates. Note that for IoU based evaluations, it
does not matter whether boxes are expressed in absolute or relative
coordinates. Default False.
Returns:
A dictionary with:
'original_image': A [1, H, W, C] uint8 image tensor.
'key': A string tensor with image identifier.
'detection_boxes': [max_detections, 4] float32 tensor of boxes, in
normalized or absolute coordinates, depending on the value of
`scale_to_absolute`.
'detection_scores': [max_detections] float32 tensor of scores.
'detection_classes': [max_detections] int64 tensor of 1-indexed classes.
'detection_masks': [max_detections, H, W] float32 tensor of binarized
masks, reframed to full image masks.
'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in
normalized or absolute coordinates, depending on the value of
`scale_to_absolute`. (Optional)
'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.
(Optional)
'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)
'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)
'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)
'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)
'groundtruth_instance_masks': 3D int64 tensor of instance masks
(Optional).
"""
if groundtruth:
max_gt_boxes = tf.shape(
groundtruth[fields.InputDataFields.groundtruth_boxes])[0]
for gt_key in groundtruth:
# expand groundtruth dict along the batch dimension.
groundtruth[gt_key] = tf.expand_dims(groundtruth[gt_key], 0)
for detection_key in detections:
detections[detection_key] = tf.expand_dims(
detections[detection_key][0], axis=0)
batched_output_dict = result_dict_for_batched_example(
image,
tf.expand_dims(key, 0),
detections,
groundtruth,
class_agnostic,
scale_to_absolute,
max_gt_boxes=max_gt_boxes)
exclude_keys = [
fields.InputDataFields.original_image,
fields.DetectionResultFields.num_detections,
fields.InputDataFields.num_groundtruth_boxes
]
output_dict = {
fields.InputDataFields.original_image:
batched_output_dict[fields.InputDataFields.original_image]
}
for key in batched_output_dict:
# remove the batch dimension.
if key not in exclude_keys:
output_dict[key] = tf.squeeze(batched_output_dict[key], 0)
return output_dict
def result_dict_for_batched_example(images,
keys,
detections,
groundtruth=None,
class_agnostic=False,
scale_to_absolute=False,
original_image_spatial_shapes=None,
true_image_shapes=None,
max_gt_boxes=None):
"""Merges all detection and groundtruth information for a single example.
Note that evaluation tools require classes that are 1-indexed, and so this
function performs the offset. If `class_agnostic` is True, all output classes
have label 1.
Args:
images: A single 4D uint8 image tensor of shape [batch_size, H, W, C].
keys: A [batch_size] string tensor with image identifier.
detections: A dictionary of detections, returned from
DetectionModel.postprocess().
groundtruth: (Optional) Dictionary of groundtruth items, with fields:
'groundtruth_boxes': [batch_size, max_number_of_boxes, 4] float32 tensor
of boxes, in normalized coordinates.
'groundtruth_classes': [batch_size, max_number_of_boxes] int64 tensor of
1-indexed classes.
'groundtruth_area': [batch_size, max_number_of_boxes] float32 tensor of
bbox area. (Optional)
'groundtruth_is_crowd':[batch_size, max_number_of_boxes] int64
tensor. (Optional)
'groundtruth_difficult': [batch_size, max_number_of_boxes] int64
tensor. (Optional)
'groundtruth_group_of': [batch_size, max_number_of_boxes] int64
tensor. (Optional)
'groundtruth_instance_masks': 4D int64 tensor of instance
masks (Optional).
class_agnostic: Boolean indicating whether the detections are class-agnostic
(i.e. binary). Default False.
scale_to_absolute: Boolean indicating whether boxes and keypoints should be
scaled to absolute coordinates. Note that for IoU based evaluations, it
does not matter whether boxes are expressed in absolute or relative
coordinates. Default False.
original_image_spatial_shapes: A 2D int32 tensor of shape [batch_size, 2]
used to resize the image. When set to None, the image size is retained.
true_image_shapes: A 2D int32 tensor of shape [batch_size, 3]
containing the size of the unpadded original_image.
max_gt_boxes: [batch_size] tensor representing the maximum number of
groundtruth boxes to pad.
Returns:
A dictionary with:
'original_image': A [batch_size, H, W, C] uint8 image tensor.
'original_image_spatial_shape': A [batch_size, 2] tensor containing the
original image sizes.
'true_image_shape': A [batch_size, 3] tensor containing the size of
the unpadded original_image.
'key': A [batch_size] string tensor with image identifier.
'detection_boxes': [batch_size, max_detections, 4] float32 tensor of boxes,
in normalized or absolute coordinates, depending on the value of
`scale_to_absolute`.
'detection_scores': [batch_size, max_detections] float32 tensor of scores.
'detection_classes': [batch_size, max_detections] int64 tensor of 1-indexed
classes.
'detection_masks': [batch_size, max_detections, H, W] float32 tensor of
binarized masks, reframed to full image masks.
'num_detections': [batch_size] int64 tensor containing number of valid
detections.
'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes, in
normalized or absolute coordinates, depending on the value of
`scale_to_absolute`. (Optional)
'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed
classes. (Optional)
'groundtruth_area': [batch_size, num_boxes] float32 tensor of bbox
area. (Optional)
'groundtruth_is_crowd': [batch_size, num_boxes] int64 tensor. (Optional)
'groundtruth_difficult': [batch_size, num_boxes] int64 tensor. (Optional)
'groundtruth_group_of': [batch_size, num_boxes] int64 tensor. (Optional)
'groundtruth_instance_masks': 4D int64 tensor of instance masks
(Optional).
'num_groundtruth_boxes': [batch_size] tensor containing the maximum number
of groundtruth boxes per image.
Raises:
ValueError: if original_image_spatial_shape is not 2D int32 tensor of shape
[2].
ValueError: if true_image_shapes is not 2D int32 tensor of shape
[3].
"""
label_id_offset = 1 # Applying label id offset (b/63711816)
input_data_fields = fields.InputDataFields
if original_image_spatial_shapes is None:
original_image_spatial_shapes = tf.tile(
tf.expand_dims(tf.shape(images)[1:3], axis=0),
multiples=[tf.shape(images)[0], 1])
else:
if (len(original_image_spatial_shapes.shape) != 2 and
original_image_spatial_shapes.shape[1] != 2):
raise ValueError(
'`original_image_spatial_shape` should be a 2D tensor of shape '
'[batch_size, 2].')
if true_image_shapes is None:
true_image_shapes = tf.tile(
tf.expand_dims(tf.shape(images)[1:4], axis=0),
multiples=[tf.shape(images)[0], 1])
else:
if (len(true_image_shapes.shape) != 2
and true_image_shapes.shape[1] != 3):
raise ValueError('`true_image_shapes` should be a 2D tensor of '
'shape [batch_size, 3].')
output_dict = {
input_data_fields.original_image:
images,
input_data_fields.key:
keys,
input_data_fields.original_image_spatial_shape: (
original_image_spatial_shapes),
input_data_fields.true_image_shape:
true_image_shapes
}
detection_fields = fields.DetectionResultFields
detection_boxes = detections[detection_fields.detection_boxes]
detection_scores = detections[detection_fields.detection_scores]
num_detections = tf.cast(detections[detection_fields.num_detections],
dtype=tf.int32)
if class_agnostic:
detection_classes = tf.ones_like(detection_scores, dtype=tf.int64)
else:
detection_classes = (
tf.to_int64(detections[detection_fields.detection_classes]) +
label_id_offset)
if scale_to_absolute:
output_dict[detection_fields.detection_boxes] = (
shape_utils.static_or_dynamic_map_fn(
_scale_box_to_absolute,
elems=[detection_boxes, original_image_spatial_shapes],
dtype=tf.float32))
else:
output_dict[detection_fields.detection_boxes] = detection_boxes
output_dict[detection_fields.detection_classes] = detection_classes
output_dict[detection_fields.detection_scores] = detection_scores
output_dict[detection_fields.num_detections] = num_detections
if detection_fields.detection_masks in detections:
detection_masks = detections[detection_fields.detection_masks]
# TODO(rathodv): This should be done in model's postprocess
# function ideally.
output_dict[detection_fields.detection_masks] = (
shape_utils.static_or_dynamic_map_fn(
_resize_detection_masks,
elems=[detection_boxes, detection_masks,
original_image_spatial_shapes],
dtype=tf.uint8))
if detection_fields.detection_keypoints in detections:
detection_keypoints = detections[detection_fields.detection_keypoints]
output_dict[detection_fields.detection_keypoints] = detection_keypoints
if scale_to_absolute:
output_dict[detection_fields.detection_keypoints] = (
shape_utils.static_or_dynamic_map_fn(
_scale_keypoint_to_absolute,
elems=[detection_keypoints, original_image_spatial_shapes],
dtype=tf.float32))
if groundtruth:
if max_gt_boxes is None:
if input_data_fields.num_groundtruth_boxes in groundtruth:
max_gt_boxes = groundtruth[input_data_fields.num_groundtruth_boxes]
else:
raise ValueError(
'max_gt_boxes must be provided when processing batched examples.')
if input_data_fields.groundtruth_instance_masks in groundtruth:
masks = groundtruth[input_data_fields.groundtruth_instance_masks]
groundtruth[input_data_fields.groundtruth_instance_masks] = (
shape_utils.static_or_dynamic_map_fn(
_resize_groundtruth_masks,
elems=[masks, original_image_spatial_shapes],
dtype=tf.uint8))
output_dict.update(groundtruth)
image_shape = tf.cast(tf.shape(images), tf.float32)
image_height, image_width = image_shape[1], image_shape[2]
def _scale_box_to_normalized_true_image(args):
"""Scale the box coordinates to be relative to the true image shape."""
boxes, true_image_shape = args
true_image_shape = tf.cast(true_image_shape, tf.float32)
true_height, true_width = true_image_shape[0], true_image_shape[1]
normalized_window = tf.stack([0.0, 0.0, true_height / image_height,
true_width / image_width])
return box_list_ops.change_coordinate_frame(
box_list.BoxList(boxes), normalized_window).get()
groundtruth_boxes = groundtruth[input_data_fields.groundtruth_boxes]
groundtruth_boxes = shape_utils.static_or_dynamic_map_fn(
_scale_box_to_normalized_true_image,
elems=[groundtruth_boxes, true_image_shapes], dtype=tf.float32)
output_dict[input_data_fields.groundtruth_boxes] = groundtruth_boxes
if scale_to_absolute:
groundtruth_boxes = output_dict[input_data_fields.groundtruth_boxes]
output_dict[input_data_fields.groundtruth_boxes] = (
shape_utils.static_or_dynamic_map_fn(
_scale_box_to_absolute,
elems=[groundtruth_boxes, original_image_spatial_shapes],
dtype=tf.float32))
# For class-agnostic models, groundtruth classes all become 1.
if class_agnostic:
groundtruth_classes = groundtruth[input_data_fields.groundtruth_classes]
groundtruth_classes = tf.ones_like(groundtruth_classes, dtype=tf.int64)
output_dict[input_data_fields.groundtruth_classes] = groundtruth_classes
output_dict[input_data_fields.num_groundtruth_boxes] = max_gt_boxes
return output_dict
def get_evaluators(eval_config, categories, evaluator_options=None):
"""Returns the evaluator class according to eval_config, valid for categories.
Args:
eval_config: An `eval_pb2.EvalConfig`.
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
evaluator_options: A dictionary of metric names (see
EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization
keyword arguments. For example:
evalator_options = {
'coco_detection_metrics': {'include_metrics_per_category': True}
}
Returns:
An list of instances of DetectionEvaluator.
Raises:
ValueError: if metric is not in the metric class dictionary.
"""
evaluator_options = evaluator_options or {}
eval_metric_fn_keys = eval_config.metrics_set
if not eval_metric_fn_keys:
eval_metric_fn_keys = [EVAL_DEFAULT_METRIC]
evaluators_list = []
for eval_metric_fn_key in eval_metric_fn_keys:
if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT:
raise ValueError('Metric not found: {}'.format(eval_metric_fn_key))
kwargs_dict = (evaluator_options[eval_metric_fn_key] if eval_metric_fn_key
in evaluator_options else {})
evaluators_list.append(EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](
categories,
**kwargs_dict))
return evaluators_list
def get_eval_metric_ops_for_evaluators(eval_config,
categories,
eval_dict):
"""Returns eval metrics ops to use with `tf.estimator.EstimatorSpec`.
Args:
eval_config: An `eval_pb2.EvalConfig`.
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
eval_dict: An evaluation dictionary, returned from
result_dict_for_single_example().
Returns:
A dictionary of metric names to tuple of value_op and update_op that can be
used as eval metric ops in tf.EstimatorSpec.
"""
eval_metric_ops = {}
evaluator_options = evaluator_options_from_eval_config(eval_config)
evaluators_list = get_evaluators(eval_config, categories, evaluator_options)
for evaluator in evaluators_list:
eval_metric_ops.update(evaluator.get_estimator_eval_metric_ops(
eval_dict))
return eval_metric_ops
def evaluator_options_from_eval_config(eval_config):
"""Produces a dictionary of evaluation options for each eval metric.
Args:
eval_config: An `eval_pb2.EvalConfig`.
Returns:
evaluator_options: A dictionary of metric names (see
EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization
keyword arguments. For example:
evalator_options = {
'coco_detection_metrics': {'include_metrics_per_category': True}
}
"""
eval_metric_fn_keys = eval_config.metrics_set
evaluator_options = {}
for eval_metric_fn_key in eval_metric_fn_keys:
if eval_metric_fn_key in ('coco_detection_metrics', 'coco_mask_metrics'):
evaluator_options[eval_metric_fn_key] = {
'include_metrics_per_category': (
eval_config.include_metrics_per_category)
}
elif eval_metric_fn_key == 'precision_at_recall_detection_metrics':
evaluator_options[eval_metric_fn_key] = {
'recall_lower_bound': (eval_config.recall_lower_bound),
'recall_upper_bound': (eval_config.recall_upper_bound)
}
return evaluator_options
|
[
"54901117+wykek@users.noreply.github.com"
] |
54901117+wykek@users.noreply.github.com
|
78894df958990f55a0db33727dca30b40db76d1d
|
3eab5f92b07fe3a7ca0c082cd65a7a31cc00f831
|
/utilities/welcome.py
|
e752282f2cd8783707e39b8f5dbb3b52e869b011
|
[] |
no_license
|
rodriguez-facundo/nwb-explorer
|
5cc287adf8f1b4509009ce1dec4feff9aaf723a5
|
3d04605ed30cebd4ff9db4e61dc2fc2cb269aeab
|
refs/heads/test-branch-001
| 2020-06-17T08:31:57.555113
| 2019-07-09T00:32:20
| 2019-07-09T00:32:20
| 195,862,486
| 0
| 0
| null | 2020-10-14T12:03:37
| 2019-07-08T18:04:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,806
|
py
|
donkey =f'''\033[0m {'_'*(58)}
( )
( \U0001F604 Welcome to \033[36;3mNWB-Explorer\033[0m development installation )
( )
( \U0001F463 We will execute the following steps: )
( )
( \U0001F40D Install Python requirements. )
( )
( \U0001F63C Clone some GitHub repositories )
( )
( \U0001F9D9 Setup a custom Geppetto Application: )
( )
( \U0001F41E Install frontend NPM packages. )
( )
( \U0001F9F1 Build frontend bundle. )
( )
( \U0001F316 Enable Jupyter extensions. )
( )
( \U0001F52C Test NWB-Explorer. )
( )
( \U0001F433 Wrap-up and tag the Docker image. )
( )
( \U0000231B The whole process takes between 3 to 5 minutes. )
( )
( \U0001F3C4 Thank you for using NWB-Explorer! )
({"_"*59})
o
o ^__^
o (oo)\_________
(__)\ )\\/\\
||------W |
|| ||
'''
|
[
"bruseghini.f@gmail.com"
] |
bruseghini.f@gmail.com
|
35d500ef398e45b60227585fdebc4d98ad493da8
|
7b1ae566c39aac0626c8c414a153895728c6d02a
|
/tester new.py
|
0d49cccfb4fdf61a53368419e9dbdf6645ebf415
|
[] |
no_license
|
seansliu/Twitter_Filter
|
a3f70f10a11c6b8b935b44593152d9528e38694f
|
ceca0d4252e35c18dfbbca74f4cc53156cca6fc0
|
refs/heads/master
| 2016-09-10T19:23:30.762719
| 2013-12-15T16:23:22
| 2013-12-15T16:23:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,163
|
py
|
import tweeter as t
def main():
# Phase 1 functions.
print 'Making tweets...'
tweets = t.make_tweets('some_tweets.txt')
tweets = tweets[0:100]
print ' Tweets made. \nAdding state and ZIP...'
t.add_geo(tweets)
print ' State and ZIP added. \nAdding sentiments...'
# Phase 2 functions.
t.add_sentiments(tweets)
print ' Sentiments added. \nWriting to new file...'
t.write_newtweets(tweets, 'newtweets.txt')
print ' File created. \n'
print 'Filtering tweets. If filter unwanted, do not enter anything.'
word = raw_input('Word filter: ')
state = raw_input('State filter: ')
zip = raw_input('ZIP filter: ')
ftweets = t.tweet_filter(tweets, word = word, state = state, zip = zip)
print 'Average sentiment of filtered tweets: ', t.avg_sentiment(ftweets)
print ''
# Extra credit below.
word = raw_input('Most positive state filter word: ')
print t.most_positive(tweets, word = word), '\n'
word = raw_input('Most negative state filter word: ')
print t.most_negative(tweets, word = word), '\n'
print 'Testing complete.'
main()
|
[
"seanliu216@gmail.com"
] |
seanliu216@gmail.com
|
ba432695aa9d5c0e423720d792084fef3f477aac
|
c956401119e44e41f3873b4734c857eda957e2cd
|
/programs/templates/ma2/ma2.py
|
caee4b1f0edb70879bcd4be3fbabca3b25a59657
|
[] |
no_license
|
zhekunz2/c4pp
|
6f689bce42215507d749b8b2be96c7f68ed8c49c
|
9f8054addb48d9440662d8c8f494359846423ffd
|
refs/heads/master
| 2020-09-24T06:17:29.866975
| 2019-12-03T17:41:25
| 2019-12-03T17:41:25
| 225,678,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,915
|
py
|
import pyro, numpy as np, torch, pyro.distributions as dist, torch.nn as nn
from pyro.optim import Adam
import torch.distributions.constraints as constraints
from pyro.infer import SVI
if pyro.__version__ > '0.1.2': from pyro.infer import Trace_ELBO
from pyro.contrib.autoguide import *
import math
def amb(x):
return x.data.numpy().tolist() if isinstance(x, torch.Tensor) else x
y= np.array([-1.28817828364, -0.89881365019, -1.50208718224, -0.53595539928, -0.67026167768, -0.835147405489, -0.656487360935, -1.64548643704, -0.114178263542, 0.156199482775, -0.124825179024, -0.49876451538, -0.4305749626, -0.728660555711, -0.433830846505, -2.13599811869, -1.85111280561, -1.08534800456, -1.56583461781, -1.66157341094, -2.59491987718, -0.805394755877, 0.43300875633, 1.27121596528, 0.0907573885568, -0.0406016925726, -1.60742934034, -2.43167179315, -1.65607518837, -0.824574055502, -1.30924277076, -1.43639799405, -2.26351673294, -1.69564187881, -1.39174067788, -0.489415522765, -0.119955400397, -0.710421943851, -3.15421837823, -2.22579409979, -1.71698730599, -2.58672035774, -1.95493881519, -0.900292934526, -0.128031056099, -0.0171343834403, -0.166018444639, -1.77431583497, -1.55935361193, -1.70925857447, -1.69305241292, -1.86069481994, -1.79249551908, -3.33651821096, -1.52094732495, -1.39086075024, 0.92314181119, -0.321359347491, -1.09559664272, -1.58497800893, -2.25886166527, -1.2290595633, -2.33362119818, -1.47194761375, -1.53759697603, -0.761777759618, -2.56142619304, -1.76090723359, 0.453782665069, 0.241805656945, -1.29707753606, -2.84638262607, -1.54169792533, -0.0538004852358, -0.544579283221, -1.4867085929, -1.4136817588, -1.0459515402, -0.448806727723, -0.398754129286, -0.797914494747, -0.932848087835, -1.64980338912, -1.12943338341, -2.37702591135, -3.16309526276, -1.90716345595, -0.992280587119, -0.651929425349, -0.906343126553, -0.311177158806, -0.651598236481, -0.93400696165, -1.78956543969, -1.57263239818, -0.389161381095, -1.28936689219, -1.06778572038, -1.40788906151, -1.1672517898, -2.69199192684, -2.41110419262, -3.22592010144, -2.16176652204, -1.94575111914, -2.36895195456, -1.6958388518, -2.39567613449, -2.33641302208, -2.58871617972, -1.26741878052, -1.40378916529, -0.673134049956, -1.36643293924, -1.25882179652, -1.32335468481, -2.47359424233, -1.06789246555, -0.0067803102322, -0.974168570546, -1.80354201192, -1.39032582725, 0.692057867602, 1.15660036291, 0.867635305076, -0.177578433807, -1.89955423067, -1.67579908991, -2.0937718522, -2.46548527862, -2.87628407821, -1.43645281878, -1.17513833731, -1.32782608875, -1.25687733585, -1.89226163339, -1.65078961752, -0.524502872992, -1.46371003895, -0.924427299351, -1.92758842003, -1.54364762602, -0.614534519322, -0.98251840227, 0.0542681165961, -1.11119214541, -1.06229654857, -0.614827553561, -0.713586466959, -0.73635027274, -1.75161658337, -1.03282522594, -1.21189237002, 0.0967693290224, 0.314023737324, 0.744891835557, -0.497985176747, -1.13912864292, -1.80358092595, -2.07052872981, -2.60625787903, -0.496613488914, -1.18244533012, -0.271533505929, -0.957096988747, -1.56511055989, -1.67149026644, -2.11285656834, -2.38901120054, -2.15372797834, -2.08725148282, -1.6845037273, -1.14596756901, -1.9934362037, -2.25722324988, -2.06626025294, -1.67047663698, -1.58753729901, -1.57775336918, -1.63882576381, -1.0383302488, -1.26396898732, -1.18196603952, -1.96070091666, -1.39970643996, 0.185384923343, 0.385010248637, -1.38083956552, -2.84493182625, -3.07169515732, -2.83486459413, -1.44819756536, -1.41775782062, -1.28210902012, -1.38876395567, -0.955123225697, 0.33895884407, 1.22046956648, -0.457501490715, -2.02427765906, -2.13621312586, -1.4070858944, -1.7992834282, -0.619022214569, -0.26352512754, -1.05973523375, -2.39056175402, -2.69434442286, -2.53776516232, -2.40341537091, -2.31305508468, -3.16378893106, -0.821310820135, 0.00693804176907, 0.238142407697, 0.195206506739, -0.561516468345, -1.79347587136, -0.417926001645, 0.459393068737, -0.258671471445, -1.37548138955, -2.54424952736, -2.24026246421, -2.78494823819, -1.82744538, -0.700413733102, -0.365332219389, -0.871156420364, -1.62312131811, -2.96509233778, -0.624404155507, -1.15076305795, -1.52251749114, -1.57870866347, -0.203202369295, -0.37493353849, 0.684072860564, 0.246609335439, -1.3345754953, -2.58956548475, -2.39223598495, -0.137936548144, 0.326089035369, 0.672703834, -1.01499404409, -2.36340341768, -2.55628335352, -1.28848116814, -1.65591954691, -1.23007279655, -1.08665078753, -1.46340325053, -1.4122988983, -1.0870266606, -1.01888701201, -1.33686230419, -1.80608653408, -2.72630670304, -2.37105682541, -1.32331104154, -1.80190624761, -2.15885772805, -0.793379112147, -1.05504769304, -1.28289420141, -0.839346210783, 0.0744103415615, -1.98734309688, -2.6569129188, -2.2386191751, -0.122329167174, 0.257983212579, -0.0917714070871, -0.63411610728, -2.62765906873, -2.17393596451, -2.55680364352, -0.437345005181, -0.297253582634, -1.13099116691, -1.10278395825, -1.65727357699, -1.17952743027, -1.58339519785, -1.58772996341, -1.62169862157, -1.40128685868, -1.60861350904, -1.0715477106, -2.79240726175, -2.01361120823, -2.19782653978, -1.45605516802, -1.02543928287, -1.37586123849, -1.16585011452, -1.01112744427, -0.124955261214, -0.741950130348, -0.406577969246, -1.42516401472, -1.80017108988, -0.528817527429, -0.0652627326947, -1.22486232445, -1.28012611832, -0.128951914581, -1.10054945646, -0.823297403364, -0.696786840412, -0.750347329395, -0.0151395361469, -0.100756916032, 0.402211624283, -1.46300556306, -1.53139389029, 0.128578415651, -1.31936359937, -0.502049351644, -0.379686054916, -1.08000935851, -1.47947123629, -2.91020918248, -1.95411632514, -0.799881278621, -0.207824485985, -0.684745888, -1.27098307018, -0.775560605957, -0.124789680101, 1.21886528096, 0.146928722799, -0.429434087789, -0.30801747701, 0.283329278984, -1.29880052415, -2.69669024132, -0.794801967046, -0.455489923065, -0.623749691476, 0.256373096743, -0.812144151093, -0.419790648897, -0.610628380319, -0.975656800725, -1.43945593204, -3.46713841146, -2.84327950019, -1.20319847727, -0.808682327575, -1.50842602792, -0.870848957067, -1.68724758181, -0.946248823711, -1.19464876461, -1.50047423405, -1.80899583185, -1.09666134063, -0.076451926968, -1.43375225906, -1.43112558998, -0.913936235936, -1.70954954074, -1.87482858818, -2.93515579919, -1.62000952425, -1.23852433684, -1.03514868547, -1.29851401096, -2.00985392933, -2.52847347233, -2.26228043026, -2.01667844017, -1.45664704538, -0.622258364283, -0.416109933379, -0.574205151249, -1.19439143722, -0.366367616172, -0.194520582137, 0.029627232661, -0.824146933657, -0.800332585759, -0.303342851003, -1.26941155483, -2.54050790903, -1.94114345627, -2.78766113545, -1.88674220614, -0.602204012034, -0.519633699669, -0.539703818077, -1.30916796065, -2.03898802372, -2.06896824014, -2.66117157499, -2.92175590401, -2.79409530976, -2.62572901375, -1.91416415658, -2.35001912648, -1.41438729769, -0.179139713514, -2.89379005154, -3.71251159448, -3.6111087692, -0.846343489268, -1.20588701694, -0.792402634636, -1.85966761315, -1.70904545043, -1.51171881561, -0.894128827824, -0.735009004484, -2.27514971687, -1.67986361464, -1.70837839372, -0.205759894069, -1.47637346987, -2.58109043424, -1.59475207267, -0.92053407106, -0.732807533246, -2.53269210708, -1.2110423345, -2.01549277894, -0.190212569334, -1.76946767565, 0.0146261767786, 0.00207824046943, -0.483866736662, -1.5570144038, -1.125697451, -1.99698575745, -1.79061888676, -1.39704321108, -1.54523824144, -0.54172843683, -0.727962218408, -1.52288105108, -2.31002217985, -3.36598071902, -1.86867725373, -1.79179493406, -1.75825765931, -2.02830210926, -2.21411386579, -2.07412156429, -1.65064047949, -1.36842009445, -0.849602004792, -0.825839578236, -1.0827243956, -1.93768059967, -1.38545885469, -2.39718990862, -2.29868869908, -0.753955071445, -1.52476749372, -0.993069735822, -1.00492005773, 0.00982874412454, -0.549297121687, -0.0962851062745, -1.40859942241, -2.14044230832, -1.07908958941, -1.28251982898, -1.19385261979, -2.84335277281, -2.14477668165, -1.40141304798, -1.06165727149, -2.56302753001, -4.05396608067, -2.57166008422, -2.01663979579, -0.144043891831, -1.27960695629, -1.32383189416, -1.64693553758, 0.18456900798, 0.496748385668, 0.452029025942, -1.91613624292, -1.63964438573, -1.81701602466, -1.06887247846, -0.847786067257, -0.837915690589, -0.945800119729, -1.34474066349, -2.05726917156, -1.66461458816, -0.576082033857, -2.4375456523, -2.52473777362, -1.96872391511, -1.09670095919, 0.315303997766, -0.214308904421, -0.969386733671, -0.626066792499, -2.41616480356, -1.47865144997, -1.3861885354, -0.815297665615, -1.99466111373, -1.4606246154, -2.19035813483, -3.24644292523, -2.93800660564, -2.26134902188, -2.98293772492, -1.58543090903, -1.8201497994, -2.27269460793, -1.16902516942, -0.892832559014, 0.447190014875, -0.266327520935, -2.20657814537, -2.38489501953, -2.77925678326, -3.20504466862, -1.61152356374, -1.17435813239, -1.12295842262, -2.516868514, -2.20157067513, -2.62294527227, -1.09695178606, -1.66406912364, -1.43161117267, -1.30724519595, -0.0827584872046, 0.720177165707, -1.61352766506, -1.10504731517, -0.402874236399, -0.372448204965, -0.697055036508, -1.11267799773, -1.08546367349, -0.942690390338, -0.432714724536, -1.42487518691, -1.63494207331, -0.671591410162, -0.693324706943, -0.866106707545, -1.32719600045, -2.28349889258, -2.09301825478, -0.561560977288, 0.555589089534, -0.69287130853, -0.931341647802, -0.867795407003, -0.70052170936, -0.459610725751, -0.528974736173, -0.156145084931, -1.50818792226, -2.9093057965, -2.32144730859, -2.63852406371, -2.66291301349, -1.8402626495, -1.56398665409, -0.387286539039, 0.526214694047, 0.28767578768, 0.699764921572, 0.333360945508, -1.0232802596, -2.40321999699, -1.05335978437, -0.889353786125, -1.80252429141, -2.49676952341, -0.776453884663, -0.148182607935, -1.55815418515, -1.49293373416, -1.68973627306, -0.865343203713, -1.89721215668, -1.3521765439, -1.15998018295, -0.979938337546, -1.42091698738, -0.645905833567, -0.515920918537, -1.44774396751, -0.810728687557, -0.0695483300609, -0.14928894577, -1.75720693074, -1.71791866332, -0.305335690969, 0.0631083224194, -0.121819731307, -0.62884744608, 0.101247783918, 0.161171297024, 0.183115750795, -2.4197979235, -2.94548298226, -2.34511337146, -1.13239209139, -0.746328343429, -0.0093829082104, -0.683867570062, -0.618098821889, -0.6233494396, -1.92097497511, -2.32016414836, -1.79139079583, -1.75464178414, -1.54718660013, -0.263858761795, -0.656569923354, -2.65631385162, -2.61458786174, -2.1057035288, -1.63041975039, -0.614911317595, -1.42167749482, -2.01311680592, -0.430263416279, 0.152372381471, -0.599377745282, -0.480170362573, -1.77763451432, -0.792563419732, -1.40957115244, -2.08859815905, -0.95856324177, -0.858334307966, -1.03885007804, -0.880069946346, -0.949815435201, -1.7145479582, -1.85033392923, -1.93619419086, -1.31729780691, -1.62076274039, 0.353842145448, 0.0935713579185, -0.525105033009, -1.07201668718, -2.11777949931, -2.31208027056, -2.63556287615, -2.1410362571, -0.889745634153, -1.00338198293, -0.905124214637, -1.90892854498, -0.679516151219, -0.0697464025947, 0.282115948148, -0.583632010053, -1.54688420164, -1.73858869483, -1.72954831265, -0.739848041102, -0.734084758427, -1.75334264831, -0.62368659661, -0.937470561718, -2.50077879944, -2.83438240112, -0.842679813472, -1.46639708608, -2.89602896898, -4.46721998179, -2.20375226758, -1.38858423414, -0.282431630336, -0.693707851525, -0.809119080883, -1.64436697412, -1.15239763516, -0.856810488132, -1.97511447109, -0.859963935154, -1.19703039997, -1.18907058669, -1.78013649931, -0.891060763371, -2.1425012242, -1.60136258739, -0.605867970456, -1.12931539841, -1.78175315189, -2.33958290898, -1.7271570232, -1.32264515126, -1.11753851901, -0.483841363052, 0.497244600547, -0.4289476144, -1.48049883537, -1.83403610786, -1.49097886074, -1.59612592534, -2.02080965045, -2.15236243595, -1.80104532285, -2.58146158535, -3.72198545557, -1.50130905766, -0.550126528747, 0.104920372141, -0.591974740737, -1.1874108699, -1.77138925197, -1.62150814293, -2.54740975978, -1.61384319624, -2.30447210457, -2.77607494858, -2.13275065999, -2.13465322616, -2.41613000943, -1.85384778241, -2.05245133085, -0.679274954559, -0.409218910197, 0.310272725645, -0.864658400615, 0.0605528009482, 0.20907084323, -1.4842824164, -0.467160922172, -0.847737546376, -0.889576174999, -1.05974405444, -1.46362888194, -1.08084540906, -1.8493333792, -1.01159429796, -0.683163541095, -0.495735902933, 0.335757916562, 0.308262683989, -0.452294283194, -1.03556574241, -0.999622500662, -1.23253798753, -0.545482333186, -1.42710220689, -2.27689311581, -1.70962089169, -0.69863137867, -1.43229407623, -1.41699049745, -1.31473437788, -0.436345732383, -0.556300278179, -1.11778728484, -1.58162699728, -2.01176803947, -1.32848031244, -0.343991624499, -2.22041537606, -2.74032622864, -2.22338939268, -1.4449883213, -0.337366255237, -0.814176028625, -0.630071650406, -1.14787243637, -0.128339439908, 0.827470342227, 0.37434613998, -2.41074138827, -2.10945970354, -1.30295295397, -0.866505220681, -1.16862969519, 0.135103939943, -0.0864227792678, -2.58283402065, -2.24756432931, -1.94366841263, -0.78296854649, -1.16173229828, -2.17299604909, -2.52108237117, -1.4264984798, -1.14379897021, 0.11866901983, -0.982926428419, -1.78311324247, -2.95214487065, -2.26284400662, -1.89348026099, -2.16114567944, -2.77751409618, -2.04530842361, -1.4165274172, -2.44085334543, -2.32157370223, -1.57211012046, -0.411537686242, -0.46244862687, -0.239228921094, 0.0584624012253, -1.00829161739, -0.821072747075, -0.393958377759, -0.503934384051, -1.36563053553, -2.10843378434, -0.682877666111, -1.37416250727, -2.03763372552, -2.05584729394, -2.75614471996, -2.72259317961, -2.1802912808, -3.01583739202, -3.32141290734, -1.8796369986, -0.995645334692, -0.662415439756, -0.926211356448, -1.470926628, -1.55631217981, -1.63750035791, -0.749625981013, -0.407268585023, -0.0206507381091, -0.784652888395, -0.964717008335, -3.05220648876, -2.95320149648, -0.873574849226, -2.047385356, -2.10158098041, -0.868480133617, -0.95140461349, 0.119620547893, -1.11973483903, -1.47984798232, -1.52333851493, -2.83605106214, -3.65690457846, -1.85764082307, -1.02779872428, -0.143223209722, -0.840547100655, -0.941414478832, -0.827634628781, -0.0117038168844, -1.29509971248, -0.698217121759, -0.215377799592, -2.05994136857, -2.76905877503, -0.590362378603, 1.2022265016, 0.37564081281, 0.625298124356, 1.02636341876, 1.01344406009, 0.112217147862, -0.480941398175, -0.668441616013, -1.34520183382, 0.0704388675702, -1.0787991139, 0.132802044339, 0.346596549171, 0.0178003917763, -0.99527089911, -1.83877027832, -1.49695148408, -2.4970869299, -2.72972187752, -2.6107715155, -2.56686675937, -1.85254550578, -1.76164513187, -0.923704665961, -0.990352891402, 0.188223328345, -1.64642838688, -2.09937924157, -1.75625984329, -1.50572996551, -1.51600315435, -1.12741576265, -0.715537495046, -2.01685019508, -2.42838540175, -0.991892833282, -1.12669942366, -0.370146527404, -1.61780637986, -1.00003509245, -0.378869967177, -1.35999613223, -2.19616360253, -0.510101904702, -0.74686161991, -0.806731024822, -0.523884534559, -0.431062920139, 0.0251041431125, -1.0031317561, -1.64956332691, -1.18767109118, -1.6555239111, -3.57516097627, -3.32110170139, -1.7528759845, -1.01273934349, -0.503387252073, -1.52715401199, -1.8552567692, -0.573089432577, -0.46198070483, -0.241442729855, -0.758738213928, -1.13376385399, -1.8541567977, -2.3827654496, -2.38284410701, -2.30532093064, -1.64397174719, -1.07922478624, 0.760926136688, -0.087614867278, -0.772643508543, -1.50843279058, -0.774090501416, -0.238840062214, -0.335678668261, -0.580574728976, -1.62353515368, -1.6972824555, -2.08630757807, -0.223385819112, -1.06841805774, -1.46253902461, -1.67568799451, -1.24394276736, -1.76776355295, -0.532529796952, -0.916519126128, -0.963651339225, -1.40771012456, -2.61049452077, -3.17887048353, -1.61119147437, -1.91838181074, -0.840476633234, -0.364698948293, -1.700915498, -1.57784401329, -0.401078430311, 0.330759045292, -1.33884984061, -0.244174850975, -0.387969386247, -1.5034518314, -2.01985409533, -1.30224545427, -2.0964539077, -2.59679131442, -0.470968546562, -0.265134179108, -1.81147934139, -1.66337012622, -1.4440349286, -0.952057797231, -1.04982847231, -0.770670360467, -1.2235665756, -1.26090091359, 0.319753918352, -0.732204423568, -1.38258556572, -3.04303202913, -3.2369140087, -1.61035050772, -1.92483870062, -3.02067819881, -2.29423998712, -1.62013289507, -2.3861463406, -2.53707545745, -2.48428177672, -0.888591054707, -1.96425746269], dtype=np.float32).reshape(1000,1)
y=torch.tensor(y)
T=1000
T=torch.tensor(T)
def model(y,T):
mu = pyro.sample('mu'.format(''), dist.Cauchy(torch.tensor(0.0)*torch.ones([amb(1)]),torch.tensor(2.5)*torch.ones([amb(1)])))
with pyro.iarange('theta_range_'.format('')):
theta = pyro.sample('theta'.format(''), dist.Cauchy(torch.tensor(0.0)*torch.ones([amb(2)]),torch.tensor(2.5)*torch.ones([amb(2)])))
epsilon = torch.zeros([amb(T)])
epsilon[1-1]=y[1-1]-mu
epsilon[2-1]=y[2-1]-mu-theta[1-1]*epsilon[1-1]
for t in range(3, T+1):
epsilon[t-1]=(y[t-1]-mu-theta[1-1]*epsilon[t-1-1]-theta[2-1]*epsilon[t-2-1])
sigma = pyro.sample('sigma'.format(''), dist.Cauchy(torch.tensor(0.0)*torch.ones([amb(1)]),torch.tensor(2.5)*torch.ones([amb(1)])))
for t in range(3, T+1):
pyro.sample('obs_{0}_100'.format(t), dist.Normal(mu+theta[0-1]*epsilon[t-1-1]+theta[1-1]*epsilon[t-2-1],sigma), obs=y[t-1])
def guide(y,T):
arg_1 = pyro.param('arg_1', torch.ones((amb(1))), constraint=constraints.positive)
arg_2 = pyro.param('arg_2', torch.ones((amb(1))))
arg_3 = pyro.param('arg_3', torch.ones((amb(1))), constraint=constraints.positive)
mu = pyro.sample('mu'.format(''), dist.StudentT(df=arg_1,loc=arg_2,scale=arg_3))
arg_4 = pyro.param('arg_4', torch.ones((amb(2))), constraint=constraints.positive)
arg_5 = pyro.param('arg_5', torch.ones((amb(2))), constraint=constraints.positive)
with pyro.iarange('theta_prange'):
theta = pyro.sample('theta'.format(''), dist.Beta(arg_4,arg_5))
for t in range(3, T+1):
pass
arg_6 = pyro.param('arg_6', torch.ones((amb(1))), constraint=constraints.positive)
arg_7 = pyro.param('arg_7', torch.ones((amb(1))), constraint=constraints.positive)
sigma = pyro.sample('sigma'.format(''), dist.Gamma(arg_6,arg_7))
for t in range(3, T+1):
pass
pass
return { "mu": mu,"theta": theta,"sigma": sigma, }
optim = Adam({'lr': 0.05})
svi = SVI(model, guide, optim, loss=Trace_ELBO() if pyro.__version__ > '0.1.2' else 'ELBO')
for i in range(4000):
loss = svi.step(y,T)
if ((i % 1000) == 0):
print(loss)
for name in pyro.get_param_store().get_all_param_names():
print(('{0} : {1}'.format(name, pyro.param(name).data.numpy())))
print('mu_mean', np.array2string(dist.StudentT(pyro.param('arg_1')).mean.detach().numpy(), separator=','))
print('theta_mean', np.array2string(dist.Beta(pyro.param('arg_4'), pyro.param('arg_5')).mean.detach().numpy(), separator=','))
print('sigma_mean', np.array2string(dist.Gamma(pyro.param('arg_6'), pyro.param('arg_7')).mean.detach().numpy(), separator=','))
np.set_printoptions(threshold=np.inf)
with open('samples','w') as samplefile:
samplefile.write('mu:')
samplefile.write(np.array2string(np.array([guide(y,T)['mu'].data.numpy() for _ in range(1000)]), separator=',').replace('\n',''))
samplefile.write('\n')
samplefile.write('theta:')
samplefile.write(np.array2string(np.array([guide(y,T)['theta'].data.numpy() for _ in range(1000)]), separator=',').replace('\n',''))
samplefile.write('\n')
samplefile.write('sigma:')
samplefile.write(np.array2string(np.array([guide(y,T)['sigma'].data.numpy() for _ in range(1000)]), separator=',').replace('\n',''))
samplefile.write('\n')
|
[
"zhekunz2@Zhekuns-MacBook-Pro.local"
] |
zhekunz2@Zhekuns-MacBook-Pro.local
|
3b13833a39f9e9368ed077ab78b9898a7d50c57e
|
3255eb9597c65f7b4d3c882a86a4cc8c3283f828
|
/replaceText.py
|
415828feebe734175adfb486cef995604e8c7ed4
|
[] |
no_license
|
bsaakash/SummerInternship2018
|
c3069c94070f845e6680530027c2fcbdcf4d8f7a
|
fe14a2ffcd8f8ad9cdc5a046230b63db4d2b7f94
|
refs/heads/master
| 2020-03-21T16:00:17.999559
| 2018-08-02T00:02:37
| 2018-08-02T00:02:37
| 138,745,314
| 0
| 0
| null | 2018-07-10T14:20:26
| 2018-06-26T13:53:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,137
|
py
|
""" Write a python function to find and replace text in a file. This function takes three inputs - 1 string,
1 filename, and 1 number. It searches for the input string in the file, enclosed within '< >' and replaces ''
with the number passed as input. i.e., if the input string is 'var1', the filename is 'Input_File_1.txt', and the
input number is 12.34, the function searches for and replaces all instances of '' in the file 'Input_File_1.txt'
with 12.34. There should be no other changes to the file."""
import re
print("Enter the File Name.")
fileName = input()
print("Enter the string to replace.")
changeString = input()
print("Enter the number to replace the string with.")
replace = int(input())
file = open(fileName)
fileData = str(file.read())
file.close()
stringRegex = re.compile(r"<"+changeString+r">")
newData = ""
for instance in stringRegex.findall(fileData):
newData= newData + fileData[0:fileData.index(instance)]+str(replace)
fileData = fileData[(fileData.index(instance)+len(instance)):]
newData = newData + fileData
file = open(fileName,"w")
file.write(newData)
file.close()
|
[
"noreply@github.com"
] |
bsaakash.noreply@github.com
|
df00a7056301265671659106b1ca0ef1e1c7d558
|
2745dfc935b023f21e8f2aabe98e22ca41d2fc82
|
/temp.py
|
73944875cf9b6f48bde226774eff1d066ce41f32
|
[] |
no_license
|
redteamcaliber/fireEye
|
5e5a78c2a6fb07ed7c29d0b467f0a3c0f3977378
|
1360ddc878dba29df3f6c0d12e8a399a08249abb
|
refs/heads/master
| 2020-05-29T12:24:46.167705
| 2016-03-10T06:59:54
| 2016-03-10T06:59:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 757
|
py
|
# -*-encode utf-8-*-
#post.py
import urllib,urllib2
import hmac
from test import insert_a_task
import re
import os
import random
# signature and communication
key = '123456'
msg = str(random.uniform(1,100))
sign = hmac.new(key,msg).hexdigest()
#url = 'http://localhost/test999.php'
url = 'http://wtf.thinkphp.com/index.php?m=index&c=tools&a=sureport'
values = {'msg':msg,'sign':sign,'qid':25}
data = urllib.urlencode(values)
req = urllib2.Request(url,data)
res = urllib2.urlopen(req).read()
print res
# task insert into mysql
#insert_a_task(b[0],b[1])
#cmd = 'python ./wyportmap/wyportmap.py ' + b[1] +' ' + b[0]
#os.system(cmd)
# select result from mysql and send to apache
# read a task and run it ,if done return 1 else return 0
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
f28f9bbd8171763ce2ea81c3645f32432618f581
|
2509b353c9180cfbdeea8e4841fb17bc8535802d
|
/flask_db.py
|
766ad06b9973a88048582c3e95258b67566b9460
|
[] |
no_license
|
aberle/Learning-Challenges
|
e2d7b0917958e478e39c51f1de314fba7e6e96cc
|
72b945abdc7f76cab14b440960b7e166b3086a27
|
refs/heads/master
| 2021-01-13T01:30:59.513710
| 2014-03-07T03:02:17
| 2014-03-07T03:02:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 559
|
py
|
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///flask.db'
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
def __init__(self, username, email):
self.username = username
self.email = email
def __repr__(self):
return 'User:%s, Email:%s' % (self.username, self.email)
|
[
"aberle.nick@gmail.com"
] |
aberle.nick@gmail.com
|
63c02af323bc75b27051e9b4e4cc7cb662253e65
|
42d9eb520b87c76fee45168ee3771bb50e705b07
|
/Web_Integration/face_api/request_draw.py
|
650491c32ea4935577cf47451ab352e1166c7eb4
|
[] |
no_license
|
LovepreetSingh-09/OpenCV
|
a6b57b1dffcad7f807edf5bd2cb05ea916aff498
|
02c1e2062287b60b13b01eb641ab834c3a3188f4
|
refs/heads/master
| 2022-11-06T22:23:31.234468
| 2020-07-17T19:10:24
| 2020-07-17T19:10:24
| 270,727,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,248
|
py
|
import cv2
import numpy as np
import requests
from matplotlib import pyplot as plt
def show_img_with_matplotlib(color_img, title, pos):
img_RGB = color_img[:, :, ::-1]
ax = plt.subplot(1, 1, pos)
plt.imshow(img_RGB)
plt.title(title)
plt.axis('off')
FACE_DETECTION_REST_API_URL = "http://localhost:5000/detect"
IMAGE_PATH = "test_face_processing.jpg"
image = open(IMAGE_PATH, "rb").read()
payload = {"image": image}
r = requests.post(FACE_DETECTION_REST_API_URL, files=payload)
print("status code: {}".format(r.status_code))
print("headers: {}".format(r.headers))
print("content: {}".format(r.json()))
json_data = r.json()
result = json_data['result']
image_array = np.asarray(bytearray(image), dtype=np.uint8)
img_opencv = cv2.imdecode(image_array, -1)
for face in result:
left, top, right, bottom = face['box']
cv2.rectangle(img_opencv, (left, top), (right, bottom), (0, 255, 255), 2)
cv2.circle(img_opencv, (left, top), 5, (0, 0, 255), -1)
cv2.circle(img_opencv, (right, bottom), 5, (255, 0, 0), -1)
fig = plt.figure(figsize=(8, 6))
plt.suptitle("Using face API", fontsize=14, fontweight='bold')
fig.patch.set_facecolor('silver')
show_img_with_matplotlib(img_opencv, "face detection", 1)
plt.show()
|
[
"slovepreet435@gmail.com"
] |
slovepreet435@gmail.com
|
3034f85c3d2b6f91c7e721a076903dfaa30112f3
|
68200741ecec49667620d1f3424852f0d7e99470
|
/plugins/Snoo/flair.py
|
fc3ae268dd6a6f6d956212b91db4a5414eb0b501
|
[] |
no_license
|
frumiousbandersnatch/sobrieti-plugins
|
30c1079f3027226465d8faec9e88d2bcbb5bdc7a
|
b627829c974f6cadce471e904dbbcca608158360
|
refs/heads/master
| 2022-05-28T10:48:30.309250
| 2022-04-16T01:57:11
| 2022-04-16T01:57:11
| 6,642,855
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,762
|
py
|
#!/usr/bin/env python
import json
import requests
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json; charset=UTF-8',
'user-agent': 'sobrietibot'
}
comment_urlpat = "https://www.reddit.com/user/{username}/comments/{subreddit}.json"
search_urlpat = "https://www.reddit.com/r/{subreddit}/search.json"
def get_recent_search(subreddit, username):
url = search_urlpat.format(**locals())
params = dict(sort='new', q="author:"+username, restrict_sr='on', t='all')
r = requests.get(url, params = params, headers=headers)
return r.json()
def get_recent_comments(subreddit, username):
url = comment_urlpat.format(**locals())
params = dict(sort='new')
r = requests.get(url, params = params, headers=headers)
return r.json()
def get_data_entry(data, subreddit, username, entryname = 'author_flair_text'):
if data is None: return
data = data.get('data',None)
if data is None: return
children = data.get('children',[])
for child in children:
data = child.get('data',None)
if data is None: continue
sub = data.get('subreddit',None)
if sub != subreddit: continue
author = data.get('author',None)
if author != username: continue
return data.get(entryname, None)
return
def get_flair(subreddit, username):
res = get_recent_search(subreddit, username)
if not res:
res = get_recent_comments(subreddit, username)
if not res:
return
flair = get_data_entry(res, subreddit, username)
return flair
if __name__ == '__main__':
import sys
sub,user = sys.argv[1], sys.argv[2]
flair = get_flair(sub, user) or 'no flair'
print 'In %s, %s has flair: "%s"' % (sub, user, flair)
|
[
"frumious.irc@gmail.com"
] |
frumious.irc@gmail.com
|
698a7d4dc268ac9ca974a56bd1e79774c9989ff8
|
7e15a679d37e8d8449871c8f6f3f649d05b33b36
|
/web/ui_modules/__init__.py
|
b1c0a69df33032c18f535f065b303a88ebb878de
|
[] |
no_license
|
SuprDewd/gamma
|
a7514aaa86790e92df481da75c283279ac7a1678
|
98b291da26d8b54959134e493a25b73234d7df63
|
refs/heads/master
| 2021-01-17T16:59:05.247942
| 2014-03-22T23:09:28
| 2014-03-22T23:09:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
import os
import glob
import importlib
modules = {}
for f in glob.glob(os.path.dirname(__file__) + '/*.py'):
name = os.path.basename(f)[:-3]
if name == '__init__':
continue
module = importlib.import_module('ui_modules.' + name)
for k, v in module.__dict__.items():
if k.endswith('Module'):
exec('from %s import %s' % (name, k)) # ugh
modules[k[:-len('Module')]] = v
|
[
"suprdewd@gmail.com"
] |
suprdewd@gmail.com
|
052e50b5a5df887c17bdbb27be56c70b493aca1b
|
a41c3e646114d95482a9a71713151501d3b7d090
|
/Finite_fields&Signal_design/fundamental_tools/A_way_of_q_Field.py
|
5ce55f3415eb07da631205677d48ccebb3964ed1
|
[] |
no_license
|
ZckFreedom/Mathworks
|
574adbd81da4507724f0c4584254cbd5b6a67187
|
ce76c05cd9094f7e3378be1ef7478c7cb1492f0e
|
refs/heads/master
| 2021-08-22T02:55:09.698849
| 2020-09-25T04:12:18
| 2020-09-25T04:12:18
| 203,349,377
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,826
|
py
|
from Polynomials_Field import *
'''
以1,θ,θ^2等为基,定义了基本运算,以及求逆等运算
'''
class Field_q:
def __init__(self, list1, h):
if not isinstance(h, PolynomialsField):
raise FieldError('定义错误')
if not h.is_irreducible():
raise FieldError('f不是不可约,不是域')
p = h.get_home()
if isinstance(list1, int):
if list1 == 0:
self._body = PolynomialsField([0], p)
else:
g = PolynomialsField(list1, p)
self._body = g % h
elif isinstance(list1[0], int):
if list1 == [0]:
self._body = PolynomialsField([0], p)
else:
g = PolynomialsField(list1, p)
self._body = g % h
elif isinstance(list1, PolynomialsField):
if list1 == PolynomialsField([0], p):
self._body = PolynomialsField([0], p)
else:
g = PolynomialsField(list1, p)
self._body = g % h
self._character = p
self._mother = h
def __add__(self, other):
if self._mother != other.get_mother():
raise FieldError('不是一个域中的元素')
p = self._character
h = self._body.get_body().copy()
g = other.get_body().get_body().copy()
new_polynomial = PolynomialsField(h, p) + PolynomialsField(g, p)
new_coefficients = new_polynomial.get_body().copy()
return Field_q(new_coefficients, self._mother)
def __sub__(self, other):
if self._mother != other.get_mother():
raise FieldError('不是一个域中的元素')
p = self._character
h = self._body.get_body().copy()
g = other.get_body().get_body().copy()
new_polynomial = PolynomialsField(h, p) - PolynomialsField(g, p)
new_coefficients = new_polynomial.get_body().copy()
return Field_q(new_coefficients, self._mother)
def __mul__(self, other):
if self._mother != other.get_mother():
raise FieldError('不是一个域中的元素')
p = self._character
h = self._body.get_body().copy()
g = other.get_body().get_body().copy()
new_polynomial = PolynomialsField(h, p) * PolynomialsField(g, p)
new_coefficients = new_polynomial.get_body().copy()
return Field_q(new_coefficients, self._mother)
def __eq__(self, other):
if self._mother != other.get_mother():
raise FieldError('不是一个域中的元素')
return self._body == other.get_body()
def __pow__(self, p):
r = self._mother
g = Field_q([1], r)
h = Field_q(self._body.get_body().copy(), r)
while p > 0:
g *= h
p -= 1
return g
def ord(self):
if self == Field_q([0], self.get_mother()):
return 0
r = self._mother
ids = Field_q([1], r)
h = Field_q(self._body.get_body().copy(), r)
g = Field_q([1], r)
n = 1
while True:
g = g * h
if g == ids:
return n
n += 1
def inverse(self):
r = self._mother
h = Field_q(self._body.get_body().copy(), r)
g = Field_q([1], r)
n = self.ord()
while n > 1:
g = g * h
n -= 1
return g
def __truediv__(self, other):
the_inverse = self.inverse()
return other*the_inverse
def get_mother(self):
return self._mother
def get_body(self):
return self._body
def get_character(self):
return self._character
def trace(self):
n = self.get_mother().get_deg()
p = self._character
h = Field_q([0], self.get_mother())
g = Field_q(self._body.get_body().copy(), self.get_mother())
while n > 0:
h += g
g = g**p
n -= 1
return h
def __str__(self):
k = self.get_body().get_deg()
list1 = self.get_body().get_body().copy()
a = list1.pop(0)
p = self.get_body().get_home()
str1 = ''
ids = Field_p(1, p)
zero = Field_p(0, p)
if k > 0:
if a == ids and k == 1:
str1 += 'θ'
k -= 1
elif a != ids and k == 1:
str1 += str(a) + '*θ'
k -= 1
elif a == ids and k != 1:
str1 += 'θ^' + str(k)
k -= 1
else:
str1 += str(a) + 'θ^' + str(k)
k -= 1
while k > 0:
a = list1.pop(0)
if a == zero:
k -= 1
continue
elif a == ids and k != 1:
str1 += '+' + 'θ^' + str(k)
k -= 1
elif a == ids and k == 1:
str1 += '+' + 'θ'
k -= 1
elif a != ids and k == 1:
str1 += '+' + str(a) + '*θ'
k -= 1
elif a != ids and k != 1:
str1 += '+' + str(a) + '*θ^' + str(k)
k -= 1
a = list1[0]
if a == zero:
pass
else:
str1 += '+' + str(a)
list1.clear()
return str1
elif k == 0:
list1.clear()
return str(self._body.get_body()[0])
elif k == -1:
list1.clear()
return str(0)
# h1 = [1, 0, 0, 1, 1]
# h2 = [1, 0]
# g1 = PolynomialsField(h1, 2)
# # print(g1.is_irreducible())
# f = Field_q(h2, g1)
# # print(f.trace().get_body())
# sequence_a = [1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0]
# for k in range(0, len(sequence_a)):
# sequence_a[k] = Field_q(sequence_a[k], g1)
# for k in range(0, len(sequence_a)):
# g = Field_q([0], g1)
# for t in range(0, len(sequence_a)):
# g += sequence_a[t] * f**(t*k)
# print(g)
#
# print(f)
|
[
"zhu2010_546@163.com"
] |
zhu2010_546@163.com
|
43e35b37ae4cd2c1a13852accc79600fe35aabde
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/third_party/webgl/src/resources/html5lib/src/html5lib/inputstream.py
|
edec132975d95602c261c56fe59183204cc70bdf
|
[
"LicenseRef-scancode-khronos",
"BSD-3-Clause",
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
Python
| false
| false
| 27,795
|
py
|
import codecs
import re
import types
import sys
from constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from constants import encodings, ReparseException
import utils
#Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([str(item) for item in spaceCharacters])
asciiLettersBytes = frozenset([str(item) for item in asciiLetters])
asciiUppercaseBytes = frozenset([str(item) for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([">", "<"])
invalid_unicode_re = re.compile(u"[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]")
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile(ur"[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream:
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1,0] #chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos < self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= pos
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
data = rv.append(bufferedData[bufferOffset:
bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return "".join(rv)
class HTMLInputStream:
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
#Craziness
if len(u"\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
self.replaceCharactersRegexp = re.compile(u"[\uD800-\uDFFF]")
else:
self.reportCharacterErrors = self.characterErrorsUCS2
self.replaceCharactersRegexp = re.compile(u"([\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF])")
# List of where new lines occur
self.newLines = [0]
self.charEncoding = (codecName(encoding), "certain")
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
# Encoding Information
#Number of bytes to use when looking for a meta element with
#encoding information
self.numBytesMeta = 512
#Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
#Encoding to use if no other information can be found
self.defaultEncoding = "windows-1252"
#Detect encoding iff no explicit "transport level" encoding is supplied
if (self.charEncoding[0] is None):
self.charEncoding = self.detectEncoding(parseMeta, chardet)
self.reset()
def reset(self):
self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
'replace')
self.chunk = u""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
#Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
# Otherwise treat source as a string and convert to a file object
if isinstance(source, unicode):
source = source.encode('utf-8')
self.charEncoding = ("utf-8", "certain")
try:
from io import BytesIO
except:
# 2to3 converts this line to: from io import StringIO
from cStringIO import StringIO as BytesIO
stream = BytesIO(source)
if (not(hasattr(stream, "tell") and hasattr(stream, "seek")) or
stream is sys.stdin):
stream = BufferedStream(stream)
return stream
def detectEncoding(self, parseMeta=True, chardet=True):
#First look for a BOM
#This will also read past the BOM if present
encoding = self.detectBOM()
confidence = "certain"
#If there is no BOM need to look for meta elements with encoding
#information
if encoding is None and parseMeta:
encoding = self.detectEncodingMeta()
confidence = "tentative"
#Guess with chardet, if avaliable
if encoding is None and chardet:
confidence = "tentative"
try:
from chardet.universaldetector import UniversalDetector
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = detector.result['encoding']
self.rawStream.seek(0)
except ImportError:
pass
# If all else fails use the default encoding
if encoding is None:
confidence="tentative"
encoding = self.defaultEncoding
#Substitute for equivalent encodings:
encodingSub = {"iso-8859-1":"windows-1252"}
if encoding.lower() in encodingSub:
encoding = encodingSub[encoding.lower()]
return encoding, confidence
def changeEncoding(self, newEncoding):
newEncoding = codecName(newEncoding)
if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"):
newEncoding = "utf-8"
if newEncoding is None:
return
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.reset()
self.charEncoding = (newEncoding, "certain")
raise ReparseException, "Encoding changed from %s to %s"%(self.charEncoding[0], newEncoding)
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be',
codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
self.rawStream.seek(encoding and seek or 0)
return encoding
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding in ("utf-16", "utf-16-be", "utf-16-le"):
encoding = "utf-8"
return encoding
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count(u'\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind(u'\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line+1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = u""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
#Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
self.reportCharacterErrors(data)
# Replace invalid characters
# Note U+0000 is dealt with in the tokenizer
data = self.replaceCharactersRegexp.sub(u"\ufffd", data)
data = data.replace(u"\r\n", u"\n")
data = data.replace(u"\r", u"\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for i in xrange(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
#Someone picked the wrong compile option
#You lose
skip = False
import sys
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
#Pretty sure there should be endianness issues here
if utils.isSurrogatePair(data[pos:pos+2]):
#We have a surrogate pair!
char_val = utils.surrogatePairToCodepoint(data[pos:pos+2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite = False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = u"".join([u"\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = u"^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile(u"[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = u"".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class EncodingBytes(str):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
return str.__new__(self, value.lower())
def __init__(self, value):
self._position=-1
def __iter__(self):
return self
def next(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p]
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p+len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes)-1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
("<!--",self.handleComment),
("<meta",self.handleMeta),
("</",self.handlePossibleEndTag),
("<!",self.handleOther),
("<?",self.handleOther),
("<",self.handlePossibleStartTag))
for byte in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing=False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo("-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
#if we have <meta not followed by a space so just keep going
return True
#We have a valid meta element we want to search for attributes
while True:
#Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == "charset":
tentativeEncoding = attr[1]
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == "content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
self.data.next()
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
#If the next byte is not an ascii letter either ignore this
#fragment (possible start tag case) or treat it according to
#handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == "<":
#return to the first step in the overall "two step" algorithm
#reprocessing the < byte
data.previous()
else:
#Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset("/"))
# Step 2
if c in (">", None):
return None
# Step 3
attrName = []
attrValue = []
#Step 4 attribute name
while True:
if c == "=" and attrName:
break
elif c in spaceCharactersBytes:
#Step 6!
c = data.skip()
c = data.next()
break
elif c in ("/", ">"):
return "".join(attrName), ""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c == None:
return None
else:
attrName.append(c)
#Step 5
c = data.next()
#Step 7
if c != "=":
data.previous()
return "".join(attrName), ""
#Step 8
data.next()
#Step 9
c = data.skip()
#Step 10
if c in ("'", '"'):
#10.1
quoteChar = c
while True:
#10.2
c = data.next()
#10.3
if c == quoteChar:
data.next()
return "".join(attrName), "".join(attrValue)
#10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
#10.5
else:
attrValue.append(c)
elif c == ">":
return "".join(attrName), ""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = data.next()
if c in spacesAngleBrackets:
return "".join(attrName), "".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
self.data = data
def parse(self):
try:
#Check if the attr name is charset
#otherwise return
self.data.jumpTo("charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == "=":
#If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
#Look for an encoding between matching quote marks
if self.data.currentByte in ('"', "'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
#Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
#Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def codecName(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if (encoding is not None and type(encoding) in types.StringTypes):
canonicalName = ascii_punctuation_re.sub("", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
|
[
"jengelh@inai.de"
] |
jengelh@inai.de
|
c7dfeaa97bf0eb949d1a6d2ecdeeed03f601b720
|
bdb8848a5e44bc6ae6f7e8ecb0eb5a88f048c9f1
|
/To_Do.py
|
60ce32259f489783b59f352213508d048b101038
|
[] |
no_license
|
adiG48/TO-DO-LIST-PROGRAM-1
|
2dd4e576307bdf5d9e1578538ab25ce2cde9c102
|
9da60f1c094ce6ae2bb2ceb46a190632f64433d0
|
refs/heads/main
| 2023-01-30T12:23:22.428522
| 2020-12-11T10:10:25
| 2020-12-11T10:10:25
| 320,536,121
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,264
|
py
|
import PySimpleGUI as sg
from file import file_read, file_write
fname = "Text.txt"
tasks = file_read(fname)
layout = [
[sg.Text("ToDo List")],
[sg.InputText("", key='todo_item'), sg.Button(button_text="Add", key='add_save')],
[sg.Listbox(values=tasks, size=(40, 10), key="items"), sg.Button("Delete"), sg.Button("Edit"), sg.Button("Exit")]
]
window = sg.Window("ToDo App", layout)
while True:
events, values = window.Read()
if events == 'add_save':
tasks.append(values['todo_item'])
window.FindElement('items').Update(values=tasks)
window.FindElement('add_save').Update("Add")
window.FindElement('todo_item').Update('')
elif events == "Delete":
tasks.remove(values["items"][0])
window.FindElement('items').Update(values=tasks)
file_write(fname, tasks)
elif events == "Edit":
edit_val = values["items"][0]
tasks.remove(values["items"][0])
window.FindElement('items').Update(values=tasks)
window.FindElement('todo_item').Update(value=edit_val)
window.FindElement('add_save').Update("Save")
file_write(fname, tasks)
elif events == None or events == "Exit":
break
window.Close()
|
[
"noreply@github.com"
] |
adiG48.noreply@github.com
|
cae7bb386694e35e4716a59c8b4be78241ca9398
|
ceca10c49f709958535004770d1688f0ee04988a
|
/Django/DjangoNrps/fragment/migrations/0002_auto__add_field_gene_viewable.py
|
356a66fd8ae135bba78308d961f612f36d53e297
|
[] |
no_license
|
igemsoftware/Heidelberg_2013
|
13c20de982b51606367ee69f1ba71e957e2dfb87
|
85e4966d7ebf04173ae3db8d72f5b122d9a2475e
|
refs/heads/master
| 2020-04-04T09:00:38.005070
| 2013-10-28T15:26:45
| 2013-10-28T15:26:45
| 12,770,105
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,918
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Gene.viewable'
db.add_column(u'fragment_gene', 'viewable',
self.gf('django.db.models.fields.CharField')(default='L', max_length=1),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Gene.viewable'
db.delete_column(u'fragment_gene', 'viewable')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'fragment.annotation': {
'Meta': {'object_name': 'Annotation'},
'gene': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'annotations'", 'to': u"orm['fragment.Gene']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '5120', 'blank': 'True'})
},
u'fragment.feature': {
'Meta': {'ordering': "['start']", 'object_name': 'Feature'},
'direction': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'end': ('django.db.models.fields.PositiveIntegerField', [], {}),
'gene': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'features'", 'to': u"orm['fragment.Gene']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.PositiveIntegerField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'fragment.gene': {
'Meta': {'object_name': 'Gene'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'origin': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'sequence': ('django.db.models.fields.TextField', [], {'max_length': '500000'}),
'viewable': ('django.db.models.fields.CharField', [], {'default': "'L'", 'max_length': '1'})
},
u'fragment.qualifier': {
'Meta': {'object_name': 'Qualifier'},
'data': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'feature': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'qualifiers'", 'to': u"orm['fragment.Feature']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'fragment.reference': {
'Meta': {'object_name': 'Reference'},
'authors': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'gene': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'references'", 'to': u"orm['fragment.Gene']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'medline_id': ('django.db.models.fields.CharField', [], {'max_length': '24', 'blank': 'True'}),
'pubmed_id': ('django.db.models.fields.CharField', [], {'max_length': '24', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
}
}
complete_apps = ['fragment']
|
[
"nikos.ignatiadis01@gmail.com"
] |
nikos.ignatiadis01@gmail.com
|
407c6028f97b46141f099e0a26ec293cc0b975ea
|
68bf63745beb76f230fca9e23782d4e2acc91fac
|
/src/PidginCli/send.py
|
3ad973bce1ebbaac92d799dfb7143f948b53dd03
|
[
"Apache-2.0"
] |
permissive
|
tfga/pidginCli
|
4064e8756844190bdfcae4999b266b4ac22da8ce
|
91f4a22d0ca7108af7ac1372de12ea22fc7de081
|
refs/heads/master
| 2021-12-23T04:06:51.762882
| 2020-11-02T14:34:54
| 2020-11-09T18:55:10
| 81,127,521
| 4
| 3
|
Apache-2.0
| 2021-12-15T11:51:08
| 2017-02-06T20:07:28
|
Python
|
UTF-8
|
Python
| false
| false
| 240
|
py
|
# encoding: utf-8
from PidginCli.pidginCli import purple, account
# Send message
def send(msg, user):
conv = purple.PurpleConversationNew(1, account, user)
im = purple.PurpleConvIm(conv)
purple.PurpleConvImSend(im, msg)
|
[
"thiagofga@gmail.com"
] |
thiagofga@gmail.com
|
b6c772dff8fac1b0e1adcfc9afe1571acbd74b5f
|
f1cb02057956e12c352a8df4ad935d56cb2426d5
|
/LeetCode/783. Minimum Distance Between BST Nodes/Solution.py
|
5ad0c386380db865bc897b2d1ac85f219bc9371c
|
[] |
no_license
|
nhatsmrt/AlgorithmPractice
|
191a6d816d98342d723e2ab740e9a7ac7beac4ac
|
f27ba208b97ed2d92b4c059848cc60f6b90ce75e
|
refs/heads/master
| 2023-06-10T18:28:45.876046
| 2023-05-26T07:46:42
| 2023-05-26T07:47:10
| 147,932,664
| 15
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 634
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def minDiffInBST(self, root: TreeNode) -> int:
vals = []
self.inorder(root, vals)
ret = 1000000000
for i in range(len(vals) - 1):
ret = min(ret, vals[i + 1] - vals[i])
return ret
def inorder(self, node: TreeNode, ret: List[int]):
if node:
self.inorder(node.left, ret)
ret.append(node.val)
self.inorder(node.right, ret)
|
[
"nhatsmrt@uw.edu"
] |
nhatsmrt@uw.edu
|
a303ba8ab2ce0786a55da5529a6697c736cf51c8
|
3583a2f44149acc17cfd5e97f0f000b3a7ba94b8
|
/025/script.py
|
2589426ffe5be67e2930aad49200553aa960ff17
|
[] |
no_license
|
jmfennell/project_euler
|
5561438c96b010d02aa585fcc03a6c47bdbd5a1c
|
23edec8db3dcd3dad4607a6b916ac41f280d0ce2
|
refs/heads/master
| 2021-01-20T12:04:33.125362
| 2018-11-03T16:59:37
| 2018-11-03T16:59:37
| 20,556,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
#!/usr/bin/env python3
from functools import lru_cache
from itertools import count
length = 1000
@lru_cache(maxsize=None)
def fib(i):
if i <= 2:
return i
return fib(i-1) + fib(i-2)
def main():
for i in count():
l = str(fib(i))
if len(l) == length:
print(i+1)
break
if __name__ == '__main__':
main()
|
[
"j.fennell@pinbellcom.co.uk"
] |
j.fennell@pinbellcom.co.uk
|
1d1c44c8ffee81958006ceb271816629bba8cdcc
|
b5187b5ffd53a2cdc8ec6ed94effc39702c1ea31
|
/loyalty_app/loyalty/doctype/staff/test_staff.py
|
99156913065f175eb9514772eee1155414b32e63
|
[
"MIT"
] |
permissive
|
vignesharumainayagam/engagex-loyalty_app-backup-
|
946a7f75c5ae5cce33313142a0b4e6ba29d67cb6
|
4c326c5f7b22572146f0b946d6498e85ac22a143
|
refs/heads/master
| 2020-03-11T18:00:14.106005
| 2018-04-19T05:36:06
| 2018-04-19T05:36:06
| 130,163,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Loyalty and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestStaff(unittest.TestCase):
pass
|
[
"vigneshwaran@valiantsystems.com"
] |
vigneshwaran@valiantsystems.com
|
f8b2fd3a6f7e61063a3ef0e9d53bb2456ae68816
|
920baca8ee6cbd3c140cc54fada286ca7ecd20ed
|
/controlled_experiment/analysis_scripts/src/verification/verify_experiment.py
|
78177a4ff10103156c7dba085905031b3f445ca1
|
[
"MIT"
] |
permissive
|
lesunb/jss-2022-replication-package
|
79dffe25d469a5258d252214959aac5392a261da
|
35e2a0fddeedb8fdb5a1b1e4ad4b7a7658e39148
|
refs/heads/main
| 2023-04-07T08:10:19.818820
| 2022-05-03T22:40:43
| 2022-05-03T22:40:43
| 488,244,608
| 0
| 0
|
MIT
| 2022-05-03T21:40:28
| 2022-05-03T14:33:21
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 886
|
py
|
from utils import iter_map
from .verify_lab_samples_trials import check_mission_coordination
from .verify_task_execution import check_task_execution
def check_experiment(exec_code):
mc_verification = check_mission_coordination(exec_code)
mc_verification_list = []
te_verification_list = []
for trial, (property, value) in iter_map(mc_verification, 2):
trial_verification = { "trial": trial, "property": property, "result": value }
mc_verification_list.append(trial_verification)
te_verification = check_task_execution(exec_code)
for exec_group, (trial, (property, value)) in iter_map(te_verification, 3):
trial_exec_verification = { "exec_group": exec_group, "trial": trial, "property": property, "result": value }
te_verification_list.append(trial_exec_verification)
return mc_verification_list, te_verification_list
|
[
"vicenteromeiromoraes@gmail.com"
] |
vicenteromeiromoraes@gmail.com
|
29b4cdb20fae1c0f6d7dae7b4c9ad88447c69b9f
|
747f759311d404af31c0f80029e88098193f6269
|
/addons/etl_interface/etl/lib/etl/component/transform/data_filter.py
|
04246f50b258fe91bc33047f978f34bd4710ca6c
|
[] |
no_license
|
sgeerish/sirr_production
|
9b0d0f7804a928c0c582ddb4ccb7fcc084469a18
|
1081f3a5ff8864a31b2dcd89406fac076a908e78
|
refs/heads/master
| 2020-05-19T07:21:37.047958
| 2013-09-15T13:03:36
| 2013-09-15T13:03:36
| 9,648,444
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 98
|
py
|
/home/openerp/production/extra-addons/etl_interface/etl/lib/etl/component/transform/data_filter.py
|
[
"geerish@omerp.net"
] |
geerish@omerp.net
|
2efe39c54fddd62483270bd193e7f178b12f6cc2
|
f7a4cbd50e391af66b76d58994b50b6baaeb2255
|
/Problem 169/problem_169.py
|
a94299fa9b262469c60b7449be7621a167d6af82
|
[] |
no_license
|
orralacm/LeetCode
|
3754df2a220fbfa900185c5b3823d73613e462b3
|
869d539105da5af8452fa67f63bd6e408c1bf72a
|
refs/heads/main
| 2023-08-07T19:14:44.067421
| 2021-09-16T04:48:40
| 2021-09-16T04:48:40
| 380,321,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
#Given an array nums of size n, return the majority element.
nums = [2,2,1,1,1,2,2]
nums.sort()
n = 0
output = 0
for i in range (len(nums)) :
if (nums.count(nums[i]) > n) :
n = nums.count(nums[i])
print(n)
output = nums[i]
else :
continue
print(f"The majority element is: {output}")
|
[
"orrala@live.com.mx"
] |
orrala@live.com.mx
|
b249130aedf8b8dc019a2fd6eb0baf79b26eb634
|
a4a016e8bc1f077707733d7f404d5612deb6b4eb
|
/dash/bin/runxlrd.py
|
90f89fda49c01022ed296cc3fec54cac266e853b
|
[] |
no_license
|
naveenkonam/my_projects
|
e39749e99ccc59d2f2db5edac3b4602570f51612
|
64dae9eca6a402da378759668801597208df90e5
|
refs/heads/master
| 2023-03-04T10:26:50.003756
| 2021-02-20T14:13:52
| 2021-02-20T14:13:52
| 323,029,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,320
|
py
|
#!/home/konam/Documents/pythonsamples/dash/bin/python3
# Copyright (c) 2005-2012 Stephen John Machin, Lingfo Pty Ltd
# This script is part of the xlrd package, which is released under a
# BSD-style licence.
from __future__ import print_function
cmd_doc = """
Commands:
2rows Print the contents of first and last row in each sheet
3rows Print the contents of first, second and last row in each sheet
bench Same as "show", but doesn't print -- for profiling
biff_count[1] Print a count of each type of BIFF record in the file
biff_dump[1] Print a dump (char and hex) of the BIFF records in the file
fonts hdr + print a dump of all font objects
hdr Mini-overview of file (no per-sheet information)
hotshot Do a hotshot profile run e.g. ... -f1 hotshot bench bigfile*.xls
labels Dump of sheet.col_label_ranges and ...row... for each sheet
name_dump Dump of each object in book.name_obj_list
names Print brief information for each NAME record
ov Overview of file
profile Like "hotshot", but uses cProfile
show Print the contents of all rows in each sheet
version[0] Print versions of xlrd and Python and exit
xfc Print "XF counts" and cell-type counts -- see code for details
[0] means no file arg
[1] means only one file arg i.e. no glob.glob pattern
"""
options = None
if __name__ == "__main__":
PSYCO = 0
import xlrd
import sys
import time
import glob
import traceback
import gc
from xlrd.timemachine import xrange, REPR
class LogHandler(object):
def __init__(self, logfileobj):
self.logfileobj = logfileobj
self.fileheading = None
self.shown = 0
def setfileheading(self, fileheading):
self.fileheading = fileheading
self.shown = 0
def write(self, text):
if self.fileheading and not self.shown:
self.logfileobj.write(self.fileheading)
self.shown = 1
self.logfileobj.write(text)
null_cell = xlrd.empty_cell
def show_row(bk, sh, rowx, colrange, printit):
if bk.ragged_rows:
colrange = range(sh.row_len(rowx))
if not colrange: return
if printit: print()
if bk.formatting_info:
for colx, ty, val, cxfx in get_row_data(bk, sh, rowx, colrange):
if printit:
print("cell %s%d: type=%d, data: %r, xfx: %s"
% (xlrd.colname(colx), rowx+1, ty, val, cxfx))
else:
for colx, ty, val, _unused in get_row_data(bk, sh, rowx, colrange):
if printit:
print("cell %s%d: type=%d, data: %r" % (xlrd.colname(colx), rowx+1, ty, val))
def get_row_data(bk, sh, rowx, colrange):
result = []
dmode = bk.datemode
ctys = sh.row_types(rowx)
cvals = sh.row_values(rowx)
for colx in colrange:
cty = ctys[colx]
cval = cvals[colx]
if bk.formatting_info:
cxfx = str(sh.cell_xf_index(rowx, colx))
else:
cxfx = ''
if cty == xlrd.XL_CELL_DATE:
try:
showval = xlrd.xldate_as_tuple(cval, dmode)
except xlrd.XLDateError as e:
showval = "%s:%s" % (type(e).__name__, e)
cty = xlrd.XL_CELL_ERROR
elif cty == xlrd.XL_CELL_ERROR:
showval = xlrd.error_text_from_code.get(cval, '<Unknown error code 0x%02x>' % cval)
else:
showval = cval
result.append((colx, cty, showval, cxfx))
return result
def bk_header(bk):
print()
print("BIFF version: %s; datemode: %s"
% (xlrd.biff_text_from_num[bk.biff_version], bk.datemode))
print("codepage: %r (encoding: %s); countries: %r"
% (bk.codepage, bk.encoding, bk.countries))
print("Last saved by: %r" % bk.user_name)
print("Number of data sheets: %d" % bk.nsheets)
print("Use mmap: %d; Formatting: %d; On demand: %d"
% (bk.use_mmap, bk.formatting_info, bk.on_demand))
print("Ragged rows: %d" % bk.ragged_rows)
if bk.formatting_info:
print("FORMATs: %d, FONTs: %d, XFs: %d"
% (len(bk.format_list), len(bk.font_list), len(bk.xf_list)))
if not options.suppress_timing:
print("Load time: %.2f seconds (stage 1) %.2f seconds (stage 2)"
% (bk.load_time_stage_1, bk.load_time_stage_2))
print()
def show_fonts(bk):
print("Fonts:")
for x in xrange(len(bk.font_list)):
font = bk.font_list[x]
font.dump(header='== Index %d ==' % x, indent=4)
def show_names(bk, dump=0):
bk_header(bk)
if bk.biff_version < 50:
print("Names not extracted in this BIFF version")
return
nlist = bk.name_obj_list
print("Name list: %d entries" % len(nlist))
for nobj in nlist:
if dump:
nobj.dump(sys.stdout,
header="\n=== Dump of name_obj_list[%d] ===" % nobj.name_index)
else:
print("[%d]\tName:%r macro:%r scope:%d\n\tresult:%r\n"
% (nobj.name_index, nobj.name, nobj.macro, nobj.scope, nobj.result))
def print_labels(sh, labs, title):
if not labs:return
for rlo, rhi, clo, chi in labs:
print("%s label range %s:%s contains:"
% (title, xlrd.cellname(rlo, clo), xlrd.cellname(rhi-1, chi-1)))
for rx in xrange(rlo, rhi):
for cx in xrange(clo, chi):
print(" %s: %r" % (xlrd.cellname(rx, cx), sh.cell_value(rx, cx)))
def show_labels(bk):
# bk_header(bk)
hdr = 0
for shx in range(bk.nsheets):
sh = bk.sheet_by_index(shx)
clabs = sh.col_label_ranges
rlabs = sh.row_label_ranges
if clabs or rlabs:
if not hdr:
bk_header(bk)
hdr = 1
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
(shx, sh.name, sh.nrows, sh.ncols))
print_labels(sh, clabs, 'Col')
print_labels(sh, rlabs, 'Row')
if bk.on_demand: bk.unload_sheet(shx)
def show(bk, nshow=65535, printit=1):
bk_header(bk)
if 0:
rclist = xlrd.sheet.rc_stats.items()
rclist = sorted(rclist)
print("rc stats")
for k, v in rclist:
print("0x%04x %7d" % (k, v))
if options.onesheet:
try:
shx = int(options.onesheet)
except ValueError:
shx = bk.sheet_by_name(options.onesheet).number
shxrange = [shx]
else:
shxrange = range(bk.nsheets)
# print("shxrange", list(shxrange))
for shx in shxrange:
sh = bk.sheet_by_index(shx)
nrows, ncols = sh.nrows, sh.ncols
colrange = range(ncols)
anshow = min(nshow, nrows)
print("sheet %d: name = %s; nrows = %d; ncols = %d" %
(shx, REPR(sh.name), sh.nrows, sh.ncols))
if nrows and ncols:
# Beat the bounds
for rowx in xrange(nrows):
nc = sh.row_len(rowx)
if nc:
sh.row_types(rowx)[nc-1]
sh.row_values(rowx)[nc-1]
sh.cell(rowx, nc-1)
for rowx in xrange(anshow-1):
if not printit and rowx % 10000 == 1 and rowx > 1:
print("done %d rows" % (rowx-1,))
show_row(bk, sh, rowx, colrange, printit)
if anshow and nrows:
show_row(bk, sh, nrows-1, colrange, printit)
print()
if bk.on_demand: bk.unload_sheet(shx)
def count_xfs(bk):
bk_header(bk)
for shx in range(bk.nsheets):
sh = bk.sheet_by_index(shx)
nrows = sh.nrows
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
(shx, sh.name, sh.nrows, sh.ncols))
# Access all xfindexes to force gathering stats
type_stats = [0, 0, 0, 0, 0, 0, 0]
for rowx in xrange(nrows):
for colx in xrange(sh.row_len(rowx)):
xfx = sh.cell_xf_index(rowx, colx)
assert xfx >= 0
cty = sh.cell_type(rowx, colx)
type_stats[cty] += 1
print("XF stats", sh._xf_index_stats)
print("type stats", type_stats)
print()
if bk.on_demand: bk.unload_sheet(shx)
def main(cmd_args):
import optparse
global options, PSYCO
usage = "\n%prog [options] command [input-file-patterns]\n" + cmd_doc
oparser = optparse.OptionParser(usage)
oparser.add_option(
"-l", "--logfilename",
default="",
help="contains error messages")
oparser.add_option(
"-v", "--verbosity",
type="int", default=0,
help="level of information and diagnostics provided")
oparser.add_option(
"-m", "--mmap",
type="int", default=-1,
help="1: use mmap; 0: don't use mmap; -1: accept heuristic")
oparser.add_option(
"-e", "--encoding",
default="",
help="encoding override")
oparser.add_option(
"-f", "--formatting",
type="int", default=0,
help="0 (default): no fmt info\n"
"1: fmt info (all cells)\n",
)
oparser.add_option(
"-g", "--gc",
type="int", default=0,
help="0: auto gc enabled; 1: auto gc disabled, manual collect after each file; 2: no gc")
oparser.add_option(
"-s", "--onesheet",
default="",
help="restrict output to this sheet (name or index)")
oparser.add_option(
"-u", "--unnumbered",
action="store_true", default=0,
help="omit line numbers or offsets in biff_dump")
oparser.add_option(
"-d", "--on-demand",
action="store_true", default=0,
help="load sheets on demand instead of all at once")
oparser.add_option(
"-t", "--suppress-timing",
action="store_true", default=0,
help="don't print timings (diffs are less messy)")
oparser.add_option(
"-r", "--ragged-rows",
action="store_true", default=0,
help="open_workbook(..., ragged_rows=True)")
options, args = oparser.parse_args(cmd_args)
if len(args) == 1 and args[0] in ("version", ):
pass
elif len(args) < 2:
oparser.error("Expected at least 2 args, found %d" % len(args))
cmd = args[0]
xlrd_version = getattr(xlrd, "__VERSION__", "unknown; before 0.5")
if cmd == 'biff_dump':
xlrd.dump(args[1], unnumbered=options.unnumbered)
sys.exit(0)
if cmd == 'biff_count':
xlrd.count_records(args[1])
sys.exit(0)
if cmd == 'version':
print("xlrd: %s, from %s" % (xlrd_version, xlrd.__file__))
print("Python:", sys.version)
sys.exit(0)
if options.logfilename:
logfile = LogHandler(open(options.logfilename, 'w'))
else:
logfile = sys.stdout
mmap_opt = options.mmap
mmap_arg = xlrd.USE_MMAP
if mmap_opt in (1, 0):
mmap_arg = mmap_opt
elif mmap_opt != -1:
print('Unexpected value (%r) for mmap option -- assuming default' % mmap_opt)
fmt_opt = options.formatting | (cmd in ('xfc', ))
gc_mode = options.gc
if gc_mode:
gc.disable()
for pattern in args[1:]:
for fname in glob.glob(pattern):
print("\n=== File: %s ===" % fname)
if logfile != sys.stdout:
logfile.setfileheading("\n=== File: %s ===\n" % fname)
if gc_mode == 1:
n_unreachable = gc.collect()
if n_unreachable:
print("GC before open:", n_unreachable, "unreachable objects")
if PSYCO:
import psyco
psyco.full()
PSYCO = 0
try:
t0 = time.time()
bk = xlrd.open_workbook(
fname,
verbosity=options.verbosity, logfile=logfile,
use_mmap=mmap_arg,
encoding_override=options.encoding,
formatting_info=fmt_opt,
on_demand=options.on_demand,
ragged_rows=options.ragged_rows,
)
t1 = time.time()
if not options.suppress_timing:
print("Open took %.2f seconds" % (t1-t0,))
except xlrd.XLRDError as e:
print("*** Open failed: %s: %s" % (type(e).__name__, e))
continue
except KeyboardInterrupt:
print("*** KeyboardInterrupt ***")
traceback.print_exc(file=sys.stdout)
sys.exit(1)
except BaseException as e:
print("*** Open failed: %s: %s" % (type(e).__name__, e))
traceback.print_exc(file=sys.stdout)
continue
t0 = time.time()
if cmd == 'hdr':
bk_header(bk)
elif cmd == 'ov': # OverView
show(bk, 0)
elif cmd == 'show': # all rows
show(bk)
elif cmd == '2rows': # first row and last row
show(bk, 2)
elif cmd == '3rows': # first row, 2nd row and last row
show(bk, 3)
elif cmd == 'bench':
show(bk, printit=0)
elif cmd == 'fonts':
bk_header(bk)
show_fonts(bk)
elif cmd == 'names': # named reference list
show_names(bk)
elif cmd == 'name_dump': # named reference list
show_names(bk, dump=1)
elif cmd == 'labels':
show_labels(bk)
elif cmd == 'xfc':
count_xfs(bk)
else:
print("*** Unknown command <%s>" % cmd)
sys.exit(1)
del bk
if gc_mode == 1:
n_unreachable = gc.collect()
if n_unreachable:
print("GC post cmd:", fname, "->", n_unreachable, "unreachable objects")
if not options.suppress_timing:
t1 = time.time()
print("\ncommand took %.2f seconds\n" % (t1-t0,))
return None
av = sys.argv[1:]
if not av:
main(av)
firstarg = av[0].lower()
if firstarg == "hotshot":
import hotshot
import hotshot.stats
av = av[1:]
prof_log_name = "XXXX.prof"
prof = hotshot.Profile(prof_log_name)
# benchtime, result = prof.runcall(main, *av)
result = prof.runcall(main, *(av, ))
print("result", repr(result))
prof.close()
stats = hotshot.stats.load(prof_log_name)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
elif firstarg == "profile":
import cProfile
av = av[1:]
cProfile.run('main(av)', 'YYYY.prof')
import pstats
p = pstats.Stats('YYYY.prof')
p.strip_dirs().sort_stats('cumulative').print_stats(30)
elif firstarg == "psyco":
PSYCO = 1
main(av[1:])
else:
main(av)
|
[
"naveenkonam@gmail.com"
] |
naveenkonam@gmail.com
|
33a102440c5c223b71051fb2300c6a994629c3cc
|
0e7af30dd1cecd193b81224360011758fe153e35
|
/Code/warp_testing.py
|
9465f0ec968da42be821eb0a3fb0095f3895972c
|
[] |
no_license
|
chittojnr/360VideoTruckCam
|
ad329c71db8fe5680361e3326343d5d456d48c2b
|
1ffdee7dbfa2eec7e0234ce39cdec0c54aa73e87
|
refs/heads/master
| 2023-03-24T06:48:51.569267
| 2018-06-08T09:03:09
| 2018-06-08T09:03:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,126
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 28 03:23:43 2018
@author: Etcyl
"""
# -*- coding: utf-8 -*-
# USAGE
# python realtime_stitching.py
# import the necessary packages
from imutils.video import VideoStream
import datetime
import imutils
import cv2
import numpy as np
H = np.matrix('0.7494203, 0.0063932, 118.9597; -0.156886, 0.9848, 2.9166; -0.000986577, 0.0002825271, 1')
# initialize the video streams and allow them to warmup
print("[INFO] starting cameras...")
leftStream = VideoStream(0).start()
rightStream = VideoStream(1).start()
#second_right = VideoStream(2).start()
while True:
imageB=leftStream.read()
imageA=rightStream.read()
#s_right=second_right.read()
imageB=imutils.resize(imageB, width=400)
imageA=imutils.resize(imageA, width=400)
#s_right=imutils.resize(s_right, width=400)
#result=stitcher.stitch([right, s_right])
#result=stitcher.stitch([left, right])
#result1=stitcher.stitch([left, result])
#h_mtx = stitcher.H
#B should be from the left frame
result = cv2.warpPerspective(imageA, H,
(imageA.shape[1] + imageB.shape[1], imageA.shape[0]))
result2 = cv2.warpPerspective(imageB, H,
(imageB.shape[1] + imageA.shape[1], imageB.shape[0]))
result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB
if result is None:
print("[INFO] homography could not be computed")
break
timestamp=datetime.datetime.now()
ts=timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
cv2.putText(result, ts, (10, result.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
cv2.putText(result2, ts, (10, result2.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
cv2.imshow("Combined", result)
cv2.imshow("Left frame", imageB)
cv2.imshow("Right frame", imageA)
# cv2.imshow("Second right frame", s_right)
cv2.imshow("Warped left", result2)
key = cv2.waitKey(1) & 0xFF
if key==ord("q"):
break
print("[INFO] cleaning up...")
cv2.destroyAllWindows()
leftStream.stop()
rightStream.stop()
#second_right.stop()
|
[
"noreply@github.com"
] |
chittojnr.noreply@github.com
|
724e56efe7cac39f00bc8a77242a56c6ad487a42
|
1eff1347aa8d7f36b71a4145c28d674036514d1a
|
/page/home.py
|
aeac96595c54a9fdaac66bdb97887bda1a5a4c7f
|
[] |
no_license
|
laoji888/automated_testing
|
84b6f8616476257eb4668756afaf7fa82b0611b9
|
fcc813bda079293a5041724de36babaa2b7f9a0e
|
refs/heads/master
| 2021-01-05T02:47:25.702181
| 2020-03-27T15:05:22
| 2020-03-27T15:05:22
| 240,850,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,689
|
py
|
from selenium import webdriver
from common.ui_base import base
from time import sleep
class Home(base):
def element(self, rows):
"""
获取元素信息
:param rows: 元素信息所在的行(索引)
:return:
"""
element = self.element_info("elements/csms_elements.xlsx", 0, rows, clos=1, ty=1)
return element
def login(self, name, pwd):
"""
登录到csms
:param name: 用户名
:param pwd: 密码
:return:
"""
self.open()
self.send_keys(name, *self.element(0))
self.send_keys(pwd, *self.element(1))
self.find_element_click(*self.element(2))
def log_in_again(self, name, pwd):
"""
在登录状态下重新登录
:param name: 用户名
:param pwd: 密码
:return:
"""
self.action_chains(*self.element(6))
self.action_chains(*self.element(4))
self.find_element_click(*self.element(5))
self.send_keys(name, *self.element(0))
self.send_keys(pwd, *self.element(1))
self.find_element_click(*self.element(2))
# 进入工作台
def enter_workbench(self):
"""
登录后进入工作台
:return:
"""
self.find_element_click(*self.element(7))
def enter_Recruitment_management(self):
"""
鼠标悬停到合作伙伴管理后点击招募管理
:return:
"""
self.action_chains(*self.element(8))
self.find_element_click(*self.element(9))
sleep(2)
self.action_chains(*self.element(10))
self.find_element_click(*self.element(10))
|
[
"xztlaoji@163.com"
] |
xztlaoji@163.com
|
171dc773c6e37191ecd6ac776dd6d79d45cc3742
|
469d509e41858c31cd06df4668b430a7fd618acd
|
/joonas/089.py
|
97857927692755cc1d86475e83238209e20d004e
|
[] |
no_license
|
joonas-yoon/ps4ct
|
a1d4a01da8804c72aeab56b5334d58ffe23f8913
|
2dab68e9dafc6fed33cb18c96a6c23985a17597d
|
refs/heads/main
| 2023-07-25T07:54:51.198751
| 2021-08-30T12:22:16
| 2021-08-30T12:22:16
| 323,814,120
| 0
| 0
| null | 2021-05-20T03:31:39
| 2020-12-23T05:46:43
|
C++
|
UTF-8
|
Python
| false
| false
| 165
|
py
|
def solution(n):
k = 1
a = []
while n >= k:
n -= k
a.insert(0, 2 ** ((n % (3 * k)) // k))
k *= 3
return ''.join(map(str, a))
|
[
"joonas.yoon@gmail.com"
] |
joonas.yoon@gmail.com
|
8dcb81a59cf89ec26f6c9dd5cec4de00091d8eea
|
9454c33413acdb78097f6c57dda145bbfba2d252
|
/test_rpg.py
|
5cc9a8a1181a5c46fbf150c2e82dee395a5ddf96
|
[] |
no_license
|
eximius8/knight-game
|
43b1d5939e78ce950db0215d59183714544fcb67
|
44bba2234522cb8376707fcea2e1d942418ef509
|
refs/heads/master
| 2023-07-08T23:27:20.336499
| 2021-08-09T06:29:34
| 2021-08-09T06:29:34
| 384,995,550
| 0
| 0
| null | 2021-08-09T06:29:35
| 2021-07-11T16:16:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,170
|
py
|
import unittest
from unittest import TestCase
from unittest.mock import patch
import importlib
rpg = importlib.import_module("main")
class RpgTestCase(TestCase):
"""Юнит тест для домашнего задания 1."""
def setUp(self) -> None:
"""Начальные условия для тестов."""
self.input = ""
self.victory_count = 0
self.fail_count = 0
def fake_io_with_asserts(self, *args):
"""Обработка print() и input() в программе с проверками результата."""
last_io = "".join(args)
if "БОЙ" in last_io:
self.input = "1"
elif "МЕЧ" in last_io:
self.input = "2"
elif "ПОБЕДА" in last_io:
self.assertEqual(rpg.monster_counter, 10)
self.assertTrue(rpg.hp > 0)
self.victory_count += 1
self.input = "\n"
elif "ПОРАЖЕНИЕ" in last_io:
self.assertTrue(rpg.monster_counter < 10)
self.assertTrue(rpg.hp <= 0)
self.fail_count += 1
self.input = "\n"
else:
self.input = "\n"
return last_io
def test_game_e2e(self):
"""Тест, выполняющий полностью прохождение игры."""
with patch("builtins.print", new=self.fake_io_with_asserts):
with patch("builtins.input", side_effect=lambda _: self.input):
with self.assertRaises(SystemExit):
rpg.game()
def test_game_e2e_until_at_least_one_victory(self):
"""Тест, проверяющий что в игру возможно когда-нибудь выиграть."""
with patch("builtins.print", new=self.fake_io_with_asserts):
with patch("builtins.input", side_effect=lambda _: self.input):
while self.victory_count == 0:
with self.assertRaises(SystemExit):
rpg.game()
self.assertEqual(self.victory_count, 1)
if __name__ == "__main__":
unittest.main()
|
[
"mikhail.trunov@gmail.com"
] |
mikhail.trunov@gmail.com
|
350f79b305a537f68059e5788baf3ab821036519
|
b888846d0c3f0451f04887a761cb823a881b3dc7
|
/.py
|
077cb2daaa76cdf9b20cbb8a88c85fa73ebdf7e4
|
[] |
no_license
|
bjamalbasha/dmart
|
5a5373f3e793cf63338b32818d74620c1e16179b
|
de5c12629de0a47733dc189de7c8cd98bbb8ae58
|
refs/heads/master
| 2021-01-13T02:14:58.445190
| 2015-01-29T07:34:45
| 2015-01-29T07:34:45
| 30,008,104
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35
|
py
|
d={'a':1,'b':2,'a':3}
print d['a']
|
[
"bjamalbasha121@gmail.com"
] |
bjamalbasha121@gmail.com
|
9bae511953f0571839bde0acc0198b67814e55d2
|
77796d2d4e1fd931ef57620ac27a97cfd30158e5
|
/Python_Elasticsearch_Bulk_With_Geo.py
|
03ef0769db4a06d5cedd6ba04a8b958ff20403d1
|
[] |
no_license
|
florenefeyzi/Elastic_with_python
|
d8ac3a0748370e1b7e498d8206f7f8629488afef
|
4fbf0cf45523d7de068fb370c2fe590426ab6228
|
refs/heads/master
| 2023-03-20T19:21:58.306814
| 2019-05-20T07:10:49
| 2019-05-20T07:10:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,072
|
py
|
from elasticsearch import Elasticsearch
from elasticsearch import helpers
import json
import time, datetime
import os
# es = Elasticsearch(['192.168.16.246:9200'], timeout=60, retry_on_timeout=True)
es = Elasticsearch(['192.168.2.12:9200'], timeout=60, retry_on_timeout=True)
request_body = {
"mappings": {
"_doc": {
"properties": {
"USE_DT": {
"type": "date"
},
"location": {
"type": "geo_point"
}
# "dest_port": {
# "type": "text"
# }
}
}
}
}
if __name__ == "__main__":
# res = es.indices.delete(index='seoul-metro-passenger-geo2')
es.indices.create(index='seoul-metro-passenger-geo', body=request_body)
es_data = []
if es is not None:
path_to_json = '/home/data/ressssssssssssssssss'
json_files = [pos_json for pos_json in os.listdir(path_to_json) if pos_json.endswith('.json')]
for file in json_files:
es_data = []
with open(os.path.join(path_to_json, file), "r") as fr:
a = fr.readlines()
for i in a:
data = json.loads(i)
if 'xpoint_wgs' not in data or 'ypoint_wgs' not in data:
continue
data['location'] = {
'lat': float(data['xpoint_wgs']),
'lon': float(data['ypoint_wgs'])
}
action = {"_index": "seoul-metro-passenger-geo", "_type": "_doc", '_source': data}
es_data.append(action)
if len(es_data) > 500:
helpers.bulk(es, es_data, stats_only=False)
es_data = []
if len(es_data) > 0:
helpers.bulk(es, es_data, stats_only=False)
|
[
"Yu.david880@gmail.com"
] |
Yu.david880@gmail.com
|
4e36113442dabd353a6e1e7392f30fd0253a1c08
|
dc3f5701727d5623b3cfd9f8277b08c864925b7f
|
/working_with_files.py
|
864b7256a7aa13dd0465204c576ed8e7b2c0d767
|
[] |
no_license
|
kurskovp/cook_book
|
da051441d80938a471db0210d7a27e52cb2d864d
|
bcefb43ee8c7b9690fd275cff502303099b72202
|
refs/heads/master
| 2023-03-25T17:38:58.115408
| 2021-03-26T10:28:15
| 2021-03-26T10:28:15
| 351,729,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,302
|
py
|
# Задание №3
my_file1 = open('text_1.txt', 'r')
content1 = my_file1.readlines()
print(f'Количество строк в файле1 - {len(content1)}')
my_file2 = open('text_2.txt','r')
content2 = my_file2.readlines( )
print(f'Количество строк в файле2 - {len(content2)}')
my_file3 = open('text_3.txt','r')
content3 = my_file3.readlines( )
print(f'Количество строк в файле3 - {len(content3)}')
my_file1.close()
my_file2.close()
my_file3.close()
# f = open('text_4.txt')
in_file = open('text_2.txt', 'r',)
indata = in_file.read()
out_file = open('text_4.txt', 'w', encoding='UTF-8')
out_file.write(indata + '\n')
out_file.close()
in_file.close()
in_file = open('text_1.txt', 'r',)
indata = in_file.read()
out_file = open('text_4.txt', 'a', encoding='UTF-8')
out_file.write(indata + '\n')
out_file.close()
in_file.close()
in_file = open('text_3.txt', 'r',)
indata = in_file.read()
out_file = open('text_4.txt', 'a', encoding='UTF-8')
out_file.write(indata + '\n')
out_file.close()
in_file.close()
print()
in_file = open('text_4.txt', 'r')
indata = in_file.read()
print(indata)
in_file.close()
# Второе условие к заданию не смог понять как сделать, Через какую функцию....
|
[
"kurskovp@gmail.com"
] |
kurskovp@gmail.com
|
61db38c6fff5ec768e392047ba9405acbc49c1da
|
ebd66a708457ed11c1063bd596727a5fd8c93d02
|
/Notepad++/小红书:最长不降子序列.py
|
7ef7d2913239d810955011cc599e79daaa2bb7b3
|
[] |
no_license
|
lovewyy/exam
|
f53647b2640b6d83326c8b77393f794e08845b41
|
f8e30e944110d3b21e8477769cf218879cbdd65b
|
refs/heads/master
| 2020-07-22T03:58:49.126161
| 2019-09-24T13:23:23
| 2019-09-24T13:23:23
| 207,066,681
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
import sys
n = int(sys.stdin.readline().strip())
a = []
for i in range(n):
line = sys.stdin.readline().strip()
a.append(list(map(int, line.split())))
def func1(a):
return a[1]
def func0(a):
return a[0]
a.sort(key = func1)
print(a)
a.sort(key = func0)
print(a)
b = []
for i in range(n):
b.append(a[i][1])
print(b)
b = [3, 2, 2, 1, 5] # 改变了B
print(b)
dp = [1 for _ in range(len(b))]
for i in range(len(b)):
for j in range(i):
if b[i] >= b[j]:
dp[i] = max(dp[i], dp[j] + 1)
else:
dp[i] = dp[j]
print(dp)
print(dp[-1])
|
[
"lxclxc@126.com"
] |
lxclxc@126.com
|
6dba52157f9423fcce1040d984617cb4c4384391
|
760f612846c3c1f4cf8ccff929f93f469c993974
|
/IAM/req.py
|
0674695629581950b765305deb5f0da3f03c4829
|
[] |
no_license
|
keni-chi/AWS
|
da943b740f425c9ea0decb466eee1f90fb0479c7
|
cf5f89300fe29124bad6a0cfeb1b55bbb3943084
|
refs/heads/master
| 2022-12-13T23:47:35.043833
| 2022-12-02T14:29:08
| 2022-12-02T14:29:08
| 165,391,253
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 920
|
py
|
import requests
import json
from aws_requests_auth.aws_auth import AWSRequestsAuth
# STSToken
response = {"Credentials": {"AccessKeyId": "XXXXXXXX"}}
credentials = response['Credentials']
# request info
aws_host = 'XXXXXXXX.execute-api.ap-northeast-1.amazonaws.com'
url = 'https://' + aws_host + '/prod/functionA-1'
body_dict = {"k": "v"}
# execute
auth = AWSRequestsAuth(aws_access_key=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_host=aws_host,
aws_region='ap-northeast-1',
aws_service='execute-api')
headers = {'x-amz-security-token':credentials['SessionToken']}
body = json.dumps(body_dict)
# res = requests.get(url, auth=auth, headers=headers)
res = requests.post(url, data=body, auth=auth, headers=headers)
print("code: " + str(res.status_code)
print("content: " + res.text)
|
[
"neko9dog9access@gmail.com"
] |
neko9dog9access@gmail.com
|
529df2d660f2c0c964cbbcdc35d016c7da8030ef
|
a66b31a9de7def4de6b26ea705617fbb412dadb0
|
/COVID_19 detection/BERT/run_classifier.py
|
cc061cad00dfd025b743d503132841b9b61160c5
|
[] |
no_license
|
PHISSTOOD/COVID-19-rummor-detection
|
6e471c9ced78b9131c5cd4b868de229f67922b69
|
3cb6b1034a108d7b778bfb7c67ce659b8468a0a9
|
refs/heads/master
| 2022-07-21T16:56:02.822888
| 2020-05-17T15:30:38
| 2020-05-17T15:30:38
| 264,697,206
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,152
|
py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import modeling
import optimization
import tokenization
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", "C:\\Users\\PHISSTOOD\\Desktop\\Machine Learning\\Bert_data\\",
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", "C:\\Users\\PHISSTOOD\\Desktop\\Machine Learning\\Bert\\google\\bert_config.json",
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", "rumor", "The name of the task to train.")
flags.DEFINE_string("vocab_file", "C:\\Users\\PHISSTOOD\\Desktop\\Machine Learning\\Bert\\google\\vocab.txt",
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", "C:\\Users\\PHISSTOOD\\Desktop\\Machine Learning\\Bert_data\\output\\",
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", "C:\\Users\\PHISSTOOD\\Desktop\\Machine Learning\\Bert\\google\\bert_model.ckpt",
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", True, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", True,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 1e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=None)
lines = []
for line in reader:
lines.append(line)
return lines
class RumorProcessor(DataProcessor):
"""
Rumor data processor
"""
def _read_csv(self, data_dir, file_name):
with tf.gfile.Open(data_dir + file_name, "r") as f:
reader = csv.reader(f, delimiter=",", quotechar=None)
lines = []
for line in reader:
lines.append(line)
return lines
def get_train_examples(self, data_dir):
lines = self._read_csv(data_dir, "train.csv")
examples = []
for (i, line) in enumerate(lines):
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
def get_dev_examples(self, data_dir):
lines = self._read_csv(data_dir, "dev.csv")
examples = []
for (i, line) in enumerate(lines):
guid = "dev-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
def get_test_examples(self, data_dir):
lines = self._read_csv(data_dir, "test.csv")
examples = []
for (i, line) in enumerate(lines):
guid = "test-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
def get_labels(self):
return ["0", "1"]
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"rumor": RumorProcessor,
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
probabilities = prediction["probabilities"]
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
writer.write(output_line)
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
|
[
"noreply@github.com"
] |
PHISSTOOD.noreply@github.com
|
7cecbeb3415d3934da46d952c6e585cd3932209e
|
da3e1c0a01c290593d9f6d378a5fb41e0cf0b237
|
/Scripts/remove_duplicate.py
|
d2c7e667179c80091759af0a9145d17611261e8b
|
[] |
no_license
|
phsiao91/python_learning
|
32741ae623e3d7b4ff8a0328e3ad1af596dcac81
|
216e7b22cb649aba9ea60c0cbe486b145764c692
|
refs/heads/master
| 2023-05-14T16:15:56.254399
| 2021-06-08T23:46:55
| 2021-06-08T23:46:55
| 332,631,639
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
numbers = [2, 6, 16, 8, 31, 41, 24, 6, 8, 45, 21]
uniques = []
for number in numbers:
if number not in uniques:
uniques.append(number)
print(uniques)
|
[
"phinganhsiao@gmail.com"
] |
phinganhsiao@gmail.com
|
a3d53c5c1f2d259358462eef485a49d3241f9904
|
c3c996a134b0ab475f363c1bf644c9b6cecec42e
|
/leetcode/longest_substring_no_repeats.py
|
d41d5a0ebef9893cd442d68e6305f6710ac8758b
|
[] |
no_license
|
alexinkc/prog-problems
|
7ed424a3ce7ec5cf4f653a93a30e878e6e78a643
|
81d46d742e61215b90aee6865dc57ad622ef516c
|
refs/heads/master
| 2021-08-29T22:17:34.835624
| 2017-12-15T05:16:38
| 2017-12-15T05:16:38
| 114,326,756
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,258
|
py
|
class Solution(object):
"""
find the longest unique character substring in the string s. time complexity: O(n)
maintain a sliding range from i to j. Assume a longest substring from s[i]..s[j].
Store each character as a key in a dict, with
its respective value being its index in the string.
For s[j+1], two outcomes are possible:
1. The character is unique and not in the dict. increment j
2. the character is in the dict. take the max(i, dict[s[j+1]]) to find the correct i
Take the max of the maxLen value max(max, j-i+1)
store char in dict: dict[s[j+1]] = j+1
"""
@classmethod
def length_of_longest_substring(cls, given_str):
"""
:type s: str
:rtype: int
"""
max_len = 0
start = 0
letters = {}
for j, char in enumerate(given_str):
if given_str[j] in letters:
if start < letters[given_str[j]]:
start = letters[given_str[j]]
if max_len < j - start + 1:
max_len = j - start + 1
letters[given_str[j]] = j + 1
return max_len
if __name__ == '__main__':
SAMPLE = "pwwkew"
print(Solution.length_of_longest_substring(SAMPLE))
|
[
"alexmclaurian@gmail.com"
] |
alexmclaurian@gmail.com
|
39d16424c56d0f0764dbf16f4320096eecf18022
|
eb84cb547ce9a43c5f2c7989bba84b6d74f3529e
|
/python/TTT/Players/minimax.py
|
5a7eb2e8c0489cc69677a4820a6625b5f2ab75b8
|
[] |
no_license
|
Keitling/algorithms
|
6a92b2dde68d717c03d9b1f4e29091124ef8318e
|
ad38a0341a37e7576b9ca014458f4cd24a5e2f85
|
refs/heads/master
| 2023-08-13T22:26:43.758938
| 2021-10-15T00:39:21
| 2021-10-15T00:39:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,235
|
py
|
"""
Minimax Tic-Tac-Toe player.
"""
import sys
sys.path.append('../GameLogic')
import gamelogic as provided
name = "Minimax"
# SCORING VALUES
SCORES = {provided.PLAYERX: 1,
provided.DRAW: 0,
provided.PLAYERO: -1}
def move_mm(board, player, trials=None):
"""
Make a move on the board.
"""
score = board.check_win()
if score is not None:
return SCORES[score], (-1, -1)
else:
moves = board.get_empty_squares()
compare = -2
best_move = (-1, -1)
best_score = -2
for amove in moves:
new_board = board.clone()
new_board.move(amove[0], amove[1], player)
result = move_mm(new_board, provided.switch_player(player))
value = result[0] * SCORES[player]
if value > 0:
return result[0], amove
else:
if value > compare:
compare = value
best_score = result[0]
best_move = amove
return best_score, best_move
def move(board, player, trials=None):
"""
Wrapper for move_mm. This function is what is called by
other modules.
"""
return move_mm(board, player)[1]
|
[
"nemesis4go10@hotmail.com"
] |
nemesis4go10@hotmail.com
|
9db1b1956d09e1f8786b792370c44b29fba8a2e2
|
6efdf80d8075b5d3fc95046307a23fbf0f239785
|
/doc/source/conf.py
|
62bf9866c6266839fd9f64e12db2f84369b40c33
|
[
"Apache-2.0"
] |
permissive
|
Wilbert-Zhang/nuclei-sdk
|
2e962be3219339f5e55d84b80a2cefbccda94ce0
|
74efdfb054a5d057e12609fd7fd9368e319eb6ef
|
refs/heads/master
| 2021-02-26T20:28:12.484593
| 2020-03-04T04:01:26
| 2020-03-04T04:02:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,243
|
py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Nuclei SDK'
copyright = '2019-Present, Nuclei'
author = 'Nuclei'
# The short X.Y version
version = '0.2.0'
# The full version, including alpha/beta/rc tags
release = '0.2.0-alpha'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx_rtd_theme',
'breathe',
'sphinx.ext.githubpages',
'sphinx.ext.mathjax',
'recommonmark'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# Show build timestamp
html_last_updated_fmt = ""
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_logo = 'asserts/images/nsdk_logo_small.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_theme_options = {
'logo_only': True,
'navigation_depth': 6
}
# -- Options for Breathe Project ---------------------------------------------
breathe_projects = {
}
breathe_default_project = ""
breathe_show_define_initializer = True
# -- Options for Latex output -------------------------------------------------
latex_logo = 'asserts/images/nsdk_logo_small.png'
latex_show_pagerefs = True
latex_toplevel_sectioning = 'chapter'
latex_show_urls = 'footnote'
rst_prolog = """
.. |nuclei_contact| replace:: email support@nucleisys.com
.. |NMSIS| replace:: `NMSIS`_
.. _NMSIS: https://github.com/Nuclei-Software/NMSIS
.. |nuclei_sdk| replace:: `Nuclei SDK`_
.. _Nuclei SDK: https://github.com/Nuclei-Software/Nuclei-SDK
.. |nuclei_download_center| replace:: `Nuclei Download Center`_
.. _Nuclei Download Center: https://nucleisys.com/download.php
.. |github| replace:: `Github`_
.. _Github: https://github.com
.. |gitee| replace:: `Gitee`_
.. _Gitee: https://gitee.com
.. |github_nuclei_sdk| replace:: `Nuclei SDK in Github`_
.. _Nuclei SDK in Github: https://github.com/Nuclei-Software/Nuclei-SDK
.. |gitee_nuclei_sdk| replace:: `Nuclei SDK in Gitee`_
.. _Nuclei SDK in Gitee: https://gitee.com/Nuclei-Software/Nuclei-SDK
.. |github_nuclei_sdk_release| replace:: `Nuclei SDK Release in Github`_
.. _Nuclei SDK Release in Github: https://github.com/Nuclei-Software/nuclei-sdk/releases
.. |teraterm| replace:: `TeraTerm in Windows``
.. _TeraTerm in Windows: http://ttssh2.osdn.jp/
.. |minicom| replace:: ``Minicom in Linux``
.. _Minicom in Linux: https://help.ubuntu.com/community/Minicom
"""
rst_epilog = """
.. |nuclei_core| replace:: Nuclei N/NX Class Processors
.. |nuclei_ncore| replace:: Nuclei N Class Processors
.. |nuclei_nxcore| replace:: Nuclei NX Class Processors
.. |nmsis_support_cores| replace:: N200, N300, N600, NX600
.. |nmsis_core_defines| replace:: **NUCLEI_N200**, **NUCLEI_N300**, **NUCLEI_N600** or **NUCLEI_NX600**
"""
# -- Extension configuration -------------------------------------------------
def setup(app):
app.add_css_file("css/custom.css")
|
[
"hqfang@nucleisys.com"
] |
hqfang@nucleisys.com
|
95700aac6a23dba1d2ff5e33b581cf34c47ca05d
|
c91085f8311d5327f7256849a573e7923a729abd
|
/html/0090_code/util2.py
|
5eb3384c81d8112cfbc3f2640b5bf68f7ff99b45
|
[] |
no_license
|
yuichi110/www.yuichi.com-python
|
59903bd76b5ef70451377839f029618f5bb4b51c
|
440b11e27248bf775f30dde9b6abe0e62ea10e7a
|
refs/heads/master
| 2020-06-16T14:31:58.465782
| 2018-03-11T11:58:41
| 2018-03-11T11:58:41
| 94,149,071
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36
|
py
|
def test():
print('test')
test()
|
[
"yito86@gmail.com"
] |
yito86@gmail.com
|
21470e9f54e3cb8b265549e85b3b6baecb59c5dd
|
9f25ac38773b5ccdc0247c9d43948d50e60ab97a
|
/content/test/gpu/gpu_tests/webcodecs_integration_test.py
|
6c42350ca65144ca9068a34c2a66514213254bdc
|
[
"BSD-3-Clause"
] |
permissive
|
liang0/chromium
|
e206553170eab7b4ac643ef7edc8cc57d4c74342
|
7a028876adcc46c7f7079f894a810ea1f511c3a7
|
refs/heads/main
| 2023-03-25T05:49:21.688462
| 2021-04-28T06:07:52
| 2021-04-28T06:07:52
| 362,370,889
| 1
| 0
|
BSD-3-Clause
| 2021-04-28T07:04:42
| 2021-04-28T07:04:41
| null |
UTF-8
|
Python
| false
| false
| 2,060
|
py
|
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import time
from gpu_tests import gpu_integration_test
from gpu_tests import path_util
data_path = os.path.join(path_util.GetChromiumSrcDir(), 'content', 'test',
'data', 'gpu', 'webcodecs')
class WebCodecsIntegrationTest(gpu_integration_test.GpuIntegrationTest):
@classmethod
def Name(cls):
return 'webcodecs'
@classmethod
def GenerateGpuTests(cls, options):
yield ('WebCodecs_EncodeDecodeRender_h264_baseline',
'encode-decode-render.html', ('{ codec : "avc1.42001E" }'))
yield ('WebCodecs_EncodeDecodeRender_vp8', 'encode-decode-render.html',
('{ codec : "vp8" }'))
yield ('WebCodecs_EncodeDecodeRender_vp9', 'encode-decode-render.html',
('{ codec : "vp09.00.10.08" }'))
def RunActualGpuTest(self, test_path, *args):
url = self.UrlOfStaticFilePath(test_path)
tab = self.tab
arg_obj = args[0]
tab.Navigate(url)
tab.action_runner.WaitForJavaScriptCondition(
'document.readyState == "complete"', timeout=5)
tab.EvaluateJavaScript('TEST.run(' + str(arg_obj) + ')')
tab.action_runner.WaitForJavaScriptCondition('TEST.finished', timeout=60)
if not tab.EvaluateJavaScript('TEST.success'):
self.fail('Test failure:' + tab.EvaluateJavaScript('TEST.summary()'))
@classmethod
def SetUpProcess(cls):
super(WebCodecsIntegrationTest, cls).SetUpProcess()
cls.CustomizeBrowserArgs(['--enable-blink-features=WebCodecs'])
cls.StartBrowser()
cls.SetStaticServerDirs([data_path])
@classmethod
def ExpectationsFiles(cls):
return [
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'test_expectations', 'webcodecs_expectations.txt')
]
def load_tests(loader, tests, pattern):
del loader, tests, pattern # Unused.
return gpu_integration_test.LoadAllTestsInModule(sys.modules[__name__])
|
[
"chromium-scoped@luci-project-accounts.iam.gserviceaccount.com"
] |
chromium-scoped@luci-project-accounts.iam.gserviceaccount.com
|
2699df5e343f682772b4e50daf6a2313b2858c4a
|
429b1905ec09b4f3b1b640c64800286bc5bdd921
|
/single-server.py
|
d87c314e9027742938813b5869c1e067ffde6454
|
[] |
no_license
|
hariton594/web
|
57bd7b3032f3a5d4463a78dbd41baf5d400be159
|
072de8178b9963a663ba6387ef4c5c21cfb91f24
|
refs/heads/master
| 2021-01-10T11:33:23.458248
| 2016-03-16T17:31:36
| 2016-03-16T17:31:36
| 53,646,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 736
|
py
|
import socket
s = socket.socket()
s.bind(('', 2222))
s.listen(1)
conn, addr = s.accept()
while True:
data = conn.recv(1024)
if data=='close' : break
conn.send(data)
conn.close()
|
[
"hariton_stas@mail.ru"
] |
hariton_stas@mail.ru
|
82c0016fbcb397cc07b5f736ecc9521a7addc969
|
1361d634b8f5eeb060b0a25dada9e0d4d584beb6
|
/generate.py
|
31b85138277f0773c596b736acf5955f88cd7f08
|
[
"MIT"
] |
permissive
|
overskylab/python-auth_roundcube
|
5cee0fb74baf659f357d6cc6b823a07877c21718
|
f3af887c6717d45f1b7c5951d2ac91434ab40454
|
refs/heads/master
| 2021-01-11T07:16:19.967941
| 2016-11-03T07:10:21
| 2016-11-03T07:10:21
| 72,516,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,188
|
py
|
#!/usr/bin/python
from datetime import datetime, timedelta
import random, string, os
class Generate:
def __init__(self):
self.title = 'This class to generate password as your need'
def password(self, length=6, special=0):
if special == 1:
chars = string.ascii_letters + string.digits + '!@#$%^&*()'
else:
chars = string.ascii_letters + string.digits
random.seed = (os.urandom(1024))
password = ''.join(random.choice(chars) for i in range(length))
return password
class vb_script:
user_list = []
def __init__(self):
self.title = 'This class to generate excel vb script'
self.code = ''
def add_user(self, username, password):
credential = {'username': username, 'password': password}
self.user_list.append(credential)
def gen_script(self, priviledge):
code = \
'''
Private Sub Workbook_Open()
Dim Edate As Date
'''
if priviledge == 1:
# For postmaster
time_result = datetime.now() + timedelta(days=+7)
else:
# For user
time_result = datetime.now() + timedelta(days=+5)
time_result = time_result.strftime("%d/%m")
code += ' Edate = Format("'+time_result+'", "DD/MM")'
code += \
'''
If Date > Edate Then
MsgBox ("This worksheet was valid upto " & Format(Edate, "dd-mmm") & " and will be closed")
ActiveWorkbook.Close savechanges:=False
End If
If Edate - Date < 30 Then
MsgBox ("This worksheet expires on " & Format(Edate, "dd-mmm") & " You have " & Edate - Date & " Days left ")
'''
for i in range (len(self.user_list)):
code += ' Range("B'+str(i+3)+'").value = "'+self.user_list[i]['username']+'"'+"\n"
code += ' Range("C'+str(i+3)+'").value = "'+self.user_list[i]['password']+'"'+"\n"
code += \
'''
End If
End Sub
'''
return code
#test = vb_script()
#test.add_user('user1','pass1')
#test.add_user('user2','pass2')
#print test.user_list
#priviledge = 0
#test.gen_script(priviledge)
#print test.code
#test = password()
#print test.generate(6)
|
[
"noreply@github.com"
] |
overskylab.noreply@github.com
|
4af6cca770e6d93be2a6efab6853f02acf4c0b95
|
996c920adbf99c78be2848e99707ee9a0db30d97
|
/gallery/urls.py
|
dc86b0e84b88991f852bd53bb631540612d99f3f
|
[] |
no_license
|
Bella-Amandine/gallery-app
|
4c991c5d4a819cbfda3fd046f01db039cee3faea
|
7dfba7e8b561bb9625bc5b9116d95d3c44e53854
|
refs/heads/master
| 2023-02-13T15:12:53.405938
| 2021-01-02T10:54:48
| 2021-01-02T10:54:48
| 322,631,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 819
|
py
|
"""gallery URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('galleryapp.urls'))
]
|
[
"bellamandy45@gmail.com"
] |
bellamandy45@gmail.com
|
215d165cc62da96a342ace4a7d8752e088ad51b4
|
a9cf474a7a257e48258f08a73f021cf89f77adee
|
/samples.py
|
726e142c1b0955adcd981a7a987d96fbd4e60c18
|
[] |
no_license
|
mode89/vc
|
eb9fab15f31241570d799d9eef5a442dd366803b
|
cd205cf94b48b9fa7700dd6f7cceaf31f6f975b1
|
refs/heads/master
| 2020-12-09T09:30:04.997810
| 2015-11-27T09:47:29
| 2015-11-27T09:47:29
| 40,854,285
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,121
|
py
|
import glob
import numpy
import os
import wave
class Sample:
def __init__(self, path):
file = wave.open(path, "r")
frame_count = file.getnframes()
self.frames = numpy.fromstring(file.readframes(frame_count),
dtype=numpy.int16) / float(2 ** 15)
self.frame_rate = file.getframerate()
file.close()
self.pointer = 0
def read_frames(self, time):
count = int(time * self.frame_rate)
lower = self.pointer
upper = self.pointer + count
if upper > len(self.frames):
return None
else:
self.pointer = upper
return self.frames[lower:upper]
def reset(self):
self.pointer = 0
DATA_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "data")
MUSIC = list(Sample(path) for path in
glob.glob(os.path.join(DATA_DIR, "music*")))
CLAP = list(Sample(path) for path in
glob.glob(os.path.join(DATA_DIR, "clap*")))
COMPUTER = list(Sample(path) for path in
glob.glob(os.path.join(DATA_DIR, "computer*")))
NOISE = Sample(os.path.join(DATA_DIR, "noise.wav"))
|
[
"mode89@mail.ru"
] |
mode89@mail.ru
|
f9fd0bb6eed8ef61e24348be845175a2aa74a99f
|
f6b339dc252a314b8c0a87b085a9a36bdb2eac69
|
/blog/migrations/0001_initial.py
|
d410c11e2ad2a44db5c69ee58420ce6286203a15
|
[] |
no_license
|
schaggar/django3-personal-portfolio
|
f44b2ddec14fe54224d50e74d89751e2b24c3ed3
|
0816a24a816efd403dabff49e5e3799c95b9bd72
|
refs/heads/master
| 2022-12-06T07:51:47.726949
| 2020-08-22T14:24:53
| 2020-08-22T14:24:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 582
|
py
|
# Generated by Django 3.1 on 2020-08-20 14:48
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.TextField()),
('date', models.DateField()),
],
),
]
|
[
"saravjeet.singh@hotmail.com"
] |
saravjeet.singh@hotmail.com
|
e5f9a83b0e3eee542832fe69f6f89ce5d6a43c19
|
c4444845012bb07ce6fdd2d5aed9725e31c0577a
|
/all_py/newegg_web.py
|
81bf8217f0a65e43ccc80c7d72f4ac834ff9e617
|
[] |
no_license
|
AzhaarMohidden/image_down_search
|
307494e5c7392b26ab1fa5b46de2852a067633a9
|
f6c13f0fbd8810ed1085ef0c1d1b455af33f9073
|
refs/heads/master
| 2020-11-29T13:15:38.485224
| 2019-12-25T15:47:26
| 2019-12-25T15:47:26
| 230,121,499
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 739
|
py
|
from bs4 import BeautifulSoup as soup
from urllib.request import urlopen as uReq
#my_url = 'https://vk.com/video?notsafe=1&q=anal'
my_url = 'https://www.porntrex.com/categories/anal/'
uClient = uReq(my_url)
page_html =uClient.read()
uClient.close()
page_soup = soup(page_html, "html.parser")
containers = page_soup.findAll("div",{"class":"video-list"})
#containers = page_soup.findAll("div", {"class":"video_item _video_item ge_video_item_"})
out_filename = "newegg.csv"
headers= "URL"
f = open(out_filename, "w")
f.write(headers)
for x in range(len(containers)):
Img = containers[x].div.a.img["data-src"]
Img_url= str(Img).strip().replace("//","")
print(Img_url)
f.write(Img_url+ "\n")
f.close() # Close the file
|
[
"azhaarm94@gmail.com"
] |
azhaarm94@gmail.com
|
f6b9576f5930e6d0d3d09f7be33f07e1c47f70a9
|
35859245b22aa8439e8fd2a3dcb236df91face94
|
/poc1/poc_mancala_student_v3.py
|
93d6ca51c3a33aa0da57dc9a061a957b7cf8b873
|
[] |
no_license
|
victory118/foc_rice
|
dbf7a90a1e70baf268082621c47de05e9bd37f77
|
491f794e4f452490eefd38034ecd673459a04cd1
|
refs/heads/master
| 2021-10-11T16:31:55.734845
| 2019-01-28T15:56:33
| 2019-01-28T15:56:33
| 146,216,912
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,962
|
py
|
Looking for Python 3? Try py3.codeskulptor.org!
Run (Accesskey R)
Save (Accesskey S)
Download
Fresh URL
Open Local
Reset (Accesskey X)
CodeSkulptor
Docs
Demos
Viz Mode
house zero corresponds to the store and is on right
houses are number in ascending order from right to left
"""
self.board = list(configuration)
def __str__(self):
"""
Return string representation for Mancala board
"""
temp = list(self.board)
temp.reverse()
return str(temp)
def get_num_seeds(self, house_num):
"""
Return the number of seeds in given house on board
"""
return self.board[house_num]
def is_game_won(self):
"""
Check to see if all houses but house zero are empty
"""
return True
def is_legal_move(self, house_num):
"""
Check whether a given move is legal
"""
return True
def apply_move(self, house_num):
"""
Move all of the stones from house to lower/left houses
Last seed must be played in the store (house zero)
"""
pass
def choose_move(self):
"""
Return the house for the next shortest legal move
Shortest means legal move from house closest to store
Note that using a longer legal move would make smaller illegal
If no legal move, return house zero
"""
return 0
def plan_moves(self):
"""
Return sequence of shortest legal moves until none are available
Not used in GUI version, only for machine testing
"""
return []
# import test suite and run
import poc_mancala_testsuite_v2 as poc_mancala_testsuite
poc_mancala_testsuite.run_suite(SolitaireMancala)
Ran 5 tests. 0 failures.
CodeSkulptor was built by Scott Rixner and is based upon CodeMirror and Skulpt.
|
[
"victory118@gmail.com"
] |
victory118@gmail.com
|
517d08e9b33117c08a698c910c44b1ed079c38e7
|
3c8cf9ff96cdc77cee8620a48bda9b3e911aea78
|
/testPositiveEntries.py
|
fde2a6a8ac93c562624cd667cd107487f18e8265
|
[] |
no_license
|
ssmoewe/BA_Python_Code
|
d5139593f80f048791343dfb225cc63f18250a36
|
0fa2a65c43baef1564698d810b22bcef36edc28a
|
refs/heads/master
| 2020-03-22T10:08:27.696849
| 2018-12-10T11:30:00
| 2018-12-10T11:30:00
| 139,883,448
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 910
|
py
|
import os
import csv
import sys
import time
directory = "C:\\Users\\Arne\\bwSyncAndShare\\Linus-BA-EDR-Data (Richard Jumar)\\EDR0006_2016_L4I_csv"
t1 = time.time()
allFiles = len([name for name in os.listdir(directory) if os.path.isfile(os.path.join(directory, name))])
iterator = 0
for filename in os.listdir(directory):
rf = open(os.path.join(directory, filename))
reader = csv.reader(rf, delimiter=";")
readData = list(reader)
calc = 0
first = []
found = False
for i in range(1, len(readData)):
if '-' in readData[i][2]:
found = True
calc += 1
first.append(i)
if found:
print("\n", filename, calc, first)
rf.close()
sys.stdout.write("\r{0} %".format(round(iterator/allFiles * 100, 2)))
sys.stdout.flush()
iterator += 1
t2 = time.time()
print("\nTook {} seconds".format(t2 - t1))
|
[
"linus.engler4@gmail.com"
] |
linus.engler4@gmail.com
|
665cb7c3db67b3b75d40dcf622a36c3a3bcc2d1b
|
2da6cab107e6189bf3f29d5029525b336547d408
|
/Assignment2.py
|
a9529df72054610d4b83f6503318769d3dd1b41b
|
[
"Apache-2.0"
] |
permissive
|
DHNGUYEN68/Application-of-Deep-Neural-Network
|
4e1047511c0c2ffa40b5fc6e4434829df2a20c35
|
9c609f30cb15da9fb39a3589b50cb2a3617cd946
|
refs/heads/master
| 2020-04-07T10:49:14.015929
| 2018-11-23T14:32:26
| 2018-11-23T14:32:26
| 158,301,613
| 0
| 0
|
Apache-2.0
| 2018-11-19T23:07:15
| 2018-11-19T23:07:15
| null |
UTF-8
|
Python
| false
| false
| 9,784
|
py
|
# coding: utf-8
# In[ ]:
import os
import sklearn
import pandas as pd
import numpy as np
import tensorflow.contrib.learn as skflow
from sklearn.cross_validation import KFold
from scipy.stats import zscore
from sklearn import metrics
from sklearn import preprocessing
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
path = "./data/"
# These four functions will help you, they were covered in class.
# Encode a text field to dummy variables
def encode_text_dummy(df,name):
dummies = pd.get_dummies(df[name])
for x in dummies.columns:
dummy_name = "{}-{}".format(name,x)
df[dummy_name] = dummies[x]
df.drop(name, axis=1, inplace=True)
# Encode a text field to a single index value
def encode_text_index(df,name):
le = preprocessing.LabelEncoder()
df[name] = le.fit_transform(df[name])
return le.classes_
# Encode a numeric field to Z-Scores
def encode_numeric_zscore(df,name,mean=None,sd=None):
if mean is None:
mean = df[name].mean()
if sd is None:
sd = df[name].std()
df[name] = (df[name]-mean)/sd
# Encode a numeric field to fill missing values with the median.
def missing_median(df, name):
med = df[name].median()
df[name] = df[name].fillna(med)
# Convert a dataframe to x/y suitable for training.
def to_xy(df,target):
result = []
for x in df.columns:
if x != target:
result.append(x)
return df.as_matrix(result),df[target]
# Encode the toy dataset
def question1():
print()
print("***Question 1***")
path = "./data/"
filename_read = os.path.join(path,"toy1.csv")
df = pd.read_csv(filename_read,na_values=['NA','?'])
filename_write = os.path.join(path,"submit-hanmingli-prog2q1.csv")
df['height'] = zscore(df['height'])
df['width'] = zscore(df['width'])
encode_numeric_zscore(df,'length')
encode_text_dummy(df,'metal')
encode_text_dummy(df,'shape')
df.to_csv(filename_write,index=False)
print("Wrote {} lines.".format(len(df)))
def question2():
print()
print("***Question 2***")
path = "./data/"
# Read dataset
filename_read = os.path.join(path,"submit-hanmingli-prog2q1.csv")
df = pd.read_csv(filename_read,na_values=['NA','?'])
weight = encode_text_index(df,"weight")
# Create x(predictors) and y (expected outcome)
x,y = to_xy(df,'weight')
num_classes = len(weight)
# Split into train/test
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=45)
# Create a deep neural network with 3 hidden layers of 10, 20, 10
regressor = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=num_classes,
steps=10000)
# Early stopping
early_stop = skflow.monitors.ValidationMonitor(x_test, y_test,
early_stopping_rounds=10000, print_steps=100, n_classes=num_classes)
# Fit/train neural network
regressor.fit(x_train, y_train, monitor=early_stop)
# Measure accuracy
pred = regressor.predict(x_test)
score = np.sqrt(metrics.mean_squared_error(pred,y_test))
print("Final score (RMSE): {}".format(score))
def question3():
print()
print("***Question 3***")
path = "./data/"
filename_read = os.path.join(path,"toy1.csv")
df = pd.read_csv(filename_read,na_values=['NA','?'])
filename_write = os.path.join(path,"submit-hanmingli-prog2q3.csv")
length_mean=df['length'].mean()
width_mean=df['width'].mean()
height_mean=df['height'].mean()
length_std=df['length'].std()
width_std=df['width'].std()
height_std=df['height'].std()
print("length: ({}, {})".format(length_mean,length_std))
print("width:({}, {})".format(width_mean,width_std))
print("height:({}, {})".format(height_mean,height_std))
# Z-Score encode these using the mean/sd from the dataset (you got ← this in question 2)
testDF = pd.DataFrame([
{'length':1, 'width':2, 'height': 3},
{'length':3, 'width':2, 'height': 5},
{'length':4, 'width':1, 'height': 3}
])
encode_numeric_zscore(testDF,'length',mean=length_mean,sd=length_std)
encode_numeric_zscore(testDF,'width',mean=width_mean,sd=width_std)
encode_numeric_zscore(testDF,'height',mean=height_mean,sd=height_std)
print(testDF)
testDF.to_csv(filename_write,index=False)
def question4():
print()
print("***Question 4***")
path = "./data/"
filename_read = os.path.join(path,"iris.csv")
filename_write = os.path.join(path,"submit-hanmingli-prog2q4.csv")
df = pd.read_csv(filename_read,na_values=['NA','?'])
name = ['species', 'sepal_l', 'sepal_w', 'petal_l','petal_w']
df = pd.DataFrame(df[name])
encode_numeric_zscore(df,'petal_l')
encode_numeric_zscore(df,'sepal_w')
encode_numeric_zscore(df,'sepal_l')
encode_text_dummy(df,"species")
np.random.seed(42)
df = df.reindex(np.random.permutation(df.index))
df.reset_index(inplace=True, drop=True)
x, y = to_xy(df,'petal_w')
# Cross validate
kf = KFold(len(x), n_folds=5)
oos_y = []
oos_pred = []
oos_x = []
fold = 1
for train, test in kf:
print("Fold #{}".format(fold))
fold+=1
x_train = x[train]
y_train = y[train]
x_test = x[test]
y_test = y[test]
# Create a deep neural network with 3 hidden layers of 10, 20, 10
regressor = skflow.TensorFlowDNNRegressor(hidden_units=[10, 20, 10], steps=500)
# Early stopping
early_stop = skflow.monitors.ValidationMonitor(x_test, y_test,
early_stopping_rounds=200, print_steps=50)
# Fit/train neural network
regressor.fit(x_train, y_train, monitor=early_stop)
# Add the predictions to the oos prediction list
pred = regressor.predict(x_test)
oos_y.append(y_test)
oos_pred.append(pred)
oos_x.append(x_test)
# Measure accuracy
score = np.sqrt(metrics.mean_squared_error(pred,y_test))
print("Fold score (RMSE): {}".format(score))
# Build the oos prediction list and calculate the error.
oos_y = np.concatenate(oos_y)
oos_pred = np.concatenate(oos_pred)
oos_x = np.concatenate(oos_x)
score = np.sqrt(metrics.mean_squared_error(oos_pred,oos_y))
print("Final, out of sample score (RMSE): {}".format(score))
# Write the cross-validated prediction
oos_y = pd.DataFrame(oos_y)
oos_pred = pd.DataFrame(oos_pred)
oos_x = pd.DataFrame(oos_x)
oos_x.insert(3,'petal_w',oos_y[:])
oosDF = pd.concat([oos_x,oos_y, oos_pred],axis=1 )
oosDF.columns = ['sepal_l','sepal_w','petal_l','petal_w','species-Iris-setosa','species-Iris-versicolor','species-Iris-virginica',0,0]
oosDF.to_csv(filename_write,index=False)
def question5():
print()
print("***Question 5***")
filename_read = os.path.join(path,"auto-mpg.csv")
filename_write = os.path.join(path,"submit-hanmingli-prog2q5.csv")
df = pd.read_csv(filename_read,na_values=['NA','?'])
# create feature vector
missing_median(df, 'horsepower')
encode_numeric_zscore(df, 'mpg')
encode_numeric_zscore(df, 'horsepower')
encode_numeric_zscore(df, 'weight')
encode_numeric_zscore(df, 'displacement')
encode_numeric_zscore(df, 'acceleration')
encode_numeric_zscore(df, 'origin')
tem=df['name']
df.drop('name',1,inplace=True)
# Shuffle
np.random.seed(42)
df = df.reindex(np.random.permutation(df.index))
df.reset_index(inplace=True, drop=True)
# Encode to a 2D matrix for training
x,y = to_xy(df,'cylinders')
# Cross validate
kf = KFold(len(x), n_folds=5)
oos_y = []
oos_pred = []
fold = 1
for train, test in kf:
print("Fold #{}".format(fold))
fold+=1
x_train = x[train]
y_train = y[train]
x_test = x[test]
y_test = y[test]
# Create a deep neural network with 3 hidden layers of 10, 20, 10
classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=9,
steps=500)
# Early stopping
early_stop = skflow.monitors.ValidationMonitor(x_test, y_test,
early_stopping_rounds=200, print_steps=50, n_classes=9)
# Fit/train neural network
classifier.fit(x_train, y_train, monitor=early_stop)
# Add the predictions to the oos prediction list
pred = classifier.predict(x_test)
oos_y.append(y_test)
oos_pred.append(pred)
# Measure accuracy
score = np.sqrt(metrics.mean_squared_error(pred,y_test))
print("Fold score: {}".format(score))
# Build the oos prediction list and calculate the error.
oos_y = np.concatenate(oos_y)
oos_pred = np.concatenate(oos_pred)
score = np.sqrt(metrics.mean_squared_error(oos_pred,oos_y))
print("Final, out of sample score: {}".format(score))
# Write the cross-validated prediction
oos_y = pd.DataFrame(oos_y)
oos_pred = pd.DataFrame(oos_pred)
oos_y.columns = ['ideal']
oos_pred.columns = ['predict']
oosDF = pd.concat( [df, tem,oos_y, oos_pred],axis=1 )
oosDF.to_csv(filename_write,index=False)
question1()
question2()
question3()
question4()
question5()
# In[ ]:
# In[ ]:
# In[ ]:
|
[
"henryfeng1008@126.com"
] |
henryfeng1008@126.com
|
9b3e9a53a8ca23b453acfac6da8baab1f428b4f3
|
9214728da797cdad0300d854113ba41c7a522f9d
|
/Main.py
|
40305859e0d3651539c8eff04068cdcae005ba14
|
[] |
no_license
|
Muhammad-Osama31/pancake-Problem
|
f9fe37e990e33992426b2787ad32571ada7b8075
|
5edf50f6ad827c770e315c3f37f6b279ed22801f
|
refs/heads/master
| 2022-07-26T05:06:23.320333
| 2020-05-16T10:51:36
| 2020-05-16T10:51:36
| 264,415,918
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
if __name__ =='__main__':
# 0,8,7,6,5,4,3,2,1
goal = [0,1,2,4,5,6,7,8,9,10,11,12]
heuristic = Greedy(goal)
searchProblem = pankcakeproblem([10,9,4,6,3,7,12,11,,8,5,3,2,1], goal)
searchStrategy = GreedySearch(heuristic)
search = Search(searchProblem, searchStrategy)
result = search.solveProblem()
if result is not None:
search.printResult(result)
|
[
"noreply@github.com"
] |
Muhammad-Osama31.noreply@github.com
|
828f32e305d7ca3322babf7d52f911304cd842a2
|
7e160316663c37d23df9346e8390fcba5460ea00
|
/Django/crud_board/board/models.py
|
686a91ca6e3b02a6e62e4068db458885fd144693
|
[] |
no_license
|
calofmijuck/2018WaffleStudio
|
5f96594a2c2f5764f1cfb696ab03f3bddd14b0be
|
4ddc85916b6420dcd61f4ce76d5541e023b10874
|
refs/heads/master
| 2021-06-07T09:36:31.058129
| 2020-01-21T15:36:32
| 2020-01-21T15:36:32
| 150,717,634
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 735
|
py
|
from django.db import models
from django.urls import reverse
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length=140)
content = models.TextField()
author = models.CharField(max_length=40)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
hits = models.IntegerField(default=0)
# id = models.IntegerField()
password = models.CharField(max_length=20)
def get_absolute_url(self):
# hits += 1
return reverse('post-detail',
kwargs={'pk': self.pk})
def __str__(self):
# hits += 1
return f"({self.pk}) {self.title}"
def increase(self):
self.hits += 1
|
[
"noreply@github.com"
] |
calofmijuck.noreply@github.com
|
d47b3b64babd6d4f72e784d8842b05c14bdeb686
|
3627bf10d438665c5ac1320c4d91bf69821cb6e7
|
/main.py
|
a65b4e7332fec3dad9502ead6a6e445bd7963ffa
|
[
"WTFPL"
] |
permissive
|
Johan-Mi/PingPongBot
|
4dc79dab48ff1637f63300a23bf7c88e1bbdd265
|
60afe66544639de4fcb321aa48f7eb5264c7400b
|
refs/heads/master
| 2023-01-28T20:24:24.953225
| 2020-12-07T17:30:49
| 2020-12-07T17:30:49
| 278,606,177
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 878
|
py
|
#!/usr/bin/env python3
"""This module contains the ping pong bot, which responds to messages that
contain the word 'ping'."""
import re
import discord
client = discord.Client()
@client.event
async def on_ready():
"""Lets you know when the bot starts."""
print(f"Discord version: {discord.__version__}")
print(f"Logged in as {client.user}")
@client.event
async def on_message(message):
"""Responds when someone else sends a message."""
if message.author == client.user:
return
if "ping" in message.content.lower():
await message.channel.send(
re.sub("ping", "pong", message.content, flags=re.IGNORECASE))
def main():
"""Runs the bot with the token from the file called 'token'."""
with open("token") as token_file:
token = token_file.read()
client.run(token)
if __name__ == "__main__":
main()
|
[
"johanmilanov@outlook.com"
] |
johanmilanov@outlook.com
|
e6d69ed90a24e786c6d568dfc9b49d4cb145ac24
|
d3419120a66da711887697800a0aece10a009a52
|
/restapi-teach/backend/tests/cases/course2/conftest.py
|
e692acaa56a29d16a2ad03cc9eac896afb57bd83
|
[] |
no_license
|
zscdh1992/test
|
ca96f80e66d96f77883af4a4a9a067a39f6288b0
|
d88d99c327bf8aaaf2fc201fcf891ce9d05b7edd
|
refs/heads/master
| 2020-07-26T22:30:18.109138
| 2019-09-16T12:01:51
| 2019-09-16T12:01:51
| 208,784,364
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
import pytest
@pytest.fixture(scope='package',autouse=True)
def couse2(request):
print("*** !!! couse2 setting up ***")
def teardown():
print("*** !!! couse2 tear down ***")
request.addfinalizer(teardown)
|
[
"1002711754@qq.com"
] |
1002711754@qq.com
|
bfe37cde5dbbfe4bd88752353a379bbf9eec65f1
|
eadcdcdbe46a5208f163ef22e668e42d6fff94a6
|
/customSDK/servicefabric/models/setting_py3.py
|
8e51284c997f5b15f33472171b6c5f679e3b6861
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
yu-supersonic/service-fabric-cli
|
beed8c4b4d8a17a5fbcb5d0578a8e6c166dd9695
|
cc2838597e7d236852c6d95e1b5c54980e0fac96
|
refs/heads/master
| 2023-04-11T15:04:25.213876
| 2021-02-25T19:39:23
| 2021-02-25T19:39:23
| 348,077,179
| 0
| 0
|
NOASSERTION
| 2021-04-07T11:33:05
| 2021-03-15T18:16:11
| null |
UTF-8
|
Python
| false
| false
| 1,606
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Setting(Model):
"""Describes a setting for the container. The setting file path can be fetched
from environment variable "Fabric_SettingPath". The path for Windows
container is "C:\\secrets". The path for Linux container is "/var/secrets".
:param type: The type of the setting being given in value. Possible values
include: 'ClearText', 'KeyVaultReference', 'SecretValueReference'. Default
value: "ClearText" .
:type type: str or ~azure.servicefabric.models.SettingType
:param name: The name of the setting.
:type name: str
:param value: The value of the setting, will be processed based on the
type provided.
:type value: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(self, *, type="ClearText", name: str=None, value: str=None, **kwargs) -> None:
super(Setting, self).__init__(**kwargs)
self.type = type
self.name = name
self.value = value
|
[
"noreply@github.com"
] |
yu-supersonic.noreply@github.com
|
0efce220f61576109fdfa2a5c69ec7994cf7825b
|
47aa27752421393451ebed3389b5f3a52a57577c
|
/src/Lib/test/test_asyncore.py
|
3018cf75cb7e1dcc6ed52e4e2713c0fa26e1209a
|
[
"MIT"
] |
permissive
|
NUS-ALSET/ace-react-redux-brython
|
e66db31046a6a3cd621e981977ed0ca9a8dddba9
|
d009490263c5716a145d9691cd59bfcd5aff837a
|
refs/heads/master
| 2021-08-08T08:59:27.632017
| 2017-11-10T01:34:18
| 2017-11-10T01:34:18
| 110,187,226
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,523
|
py
|
import asyncore
import unittest
import select
import os
import socket
import sys
import time
import warnings
import errno
import struct
from test import support
from test.support import TESTFN, run_unittest, unlink, HOST, HOSTv6
from io import BytesIO
from io import StringIO
try:
import threading
except ImportError:
threading = None
HAS_UNIX_SOCKETS = hasattr(socket, 'AF_UNIX')
class dummysocket:
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def fileno(self):
return 42
class dummychannel:
def __init__(self):
self.socket = dummysocket()
def close(self):
self.socket.close()
class exitingdummy:
def __init__(self):
pass
def handle_read_event(self):
raise asyncore.ExitNow()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
class crashingdummy:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
def handle_error(self):
self.error_handled = True
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv):
try:
serv.listen(5)
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
start = time.time()
while n > 0 and time.time() - start < 3.0:
r, w, e = select.select([conn], [], [], 0.1)
if r:
n -= 1
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace(b'\n', b''))
if b'\n' in data:
break
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
def bind_af_aware(sock, addr):
"""Helper function to bind a socket according to its family."""
if HAS_UNIX_SOCKETS and sock.family == socket.AF_UNIX:
# Make sure the path doesn't exist.
unlink(addr)
sock.bind(addr)
class HelperFunctionTests(unittest.TestCase):
def test_readwriteexc(self):
# Check exception handling behavior of read, write and _exception
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore read/write/_exception calls
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.read, tr1)
self.assertRaises(asyncore.ExitNow, asyncore.write, tr1)
self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.read(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore.write(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore._exception(tr2)
self.assertEqual(tr2.error_handled, True)
# asyncore.readwrite uses constants in the select module that
# are not present in Windows systems (see this thread:
# http://mail.python.org/pipermail/python-list/2001-October/109973.html)
# These constants should be present as long as poll is available
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
def test_readwrite(self):
# Check that correct methods are called by readwrite()
attributes = ('read', 'expt', 'write', 'closed', 'error_handled')
expected = (
(select.POLLIN, 'read'),
(select.POLLPRI, 'expt'),
(select.POLLOUT, 'write'),
(select.POLLERR, 'closed'),
(select.POLLHUP, 'closed'),
(select.POLLNVAL, 'closed'),
)
class testobj:
def __init__(self):
self.read = False
self.write = False
self.closed = False
self.expt = False
self.error_handled = False
def handle_read_event(self):
self.read = True
def handle_write_event(self):
self.write = True
def handle_close(self):
self.closed = True
def handle_expt_event(self):
self.expt = True
def handle_error(self):
self.error_handled = True
for flag, expectedattr in expected:
tobj = testobj()
self.assertEqual(getattr(tobj, expectedattr), False)
asyncore.readwrite(tobj, flag)
# Only the attribute modified by the routine we expect to be
# called should be True.
for attr in attributes:
self.assertEqual(getattr(tobj, attr), attr==expectedattr)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore readwrite call
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
self.assertEqual(tr2.error_handled, False)
asyncore.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
def test_closeall(self):
self.closeall_check(False)
def test_closeall_default(self):
self.closeall_check(True)
def closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
socketmap = asyncore.socket_map
try:
asyncore.socket_map = testmap
asyncore.close_all()
finally:
testmap, asyncore.socket_map = asyncore.socket_map, socketmap
else:
asyncore.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
def test_compact_traceback(self):
try:
raise Exception("I don't like spam!")
except:
real_t, real_v, real_tb = sys.exc_info()
r = asyncore.compact_traceback()
else:
self.fail("Expected exception")
(f, function, line), t, v, info = r
self.assertEqual(os.path.split(f)[-1], 'test_asyncore.py')
self.assertEqual(function, 'test_compact_traceback')
self.assertEqual(t, real_t)
self.assertEqual(v, real_v)
self.assertEqual(info, '[%s|%s|%s]' % (f, function, line))
class DispatcherTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_basic(self):
d = asyncore.dispatcher()
self.assertEqual(d.readable(), True)
self.assertEqual(d.writable(), True)
def test_repr(self):
d = asyncore.dispatcher()
self.assertEqual(repr(d), '<asyncore.dispatcher at %#x>' % id(d))
def test_log(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log() (to stderr)
fp = StringIO()
stderr = sys.stderr
l1 = "Lovely spam! Wonderful spam!"
l2 = "I don't like spam!"
try:
sys.stderr = fp
d.log(l1)
d.log(l2)
finally:
sys.stderr = stderr
lines = fp.getvalue().splitlines()
self.assertEqual(lines, ['log: %s' % l1, 'log: %s' % l2])
def test_log_info(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
l1 = "Have you got anything without spam?"
l2 = "Why can't she have egg bacon spam and sausage?"
l3 = "THAT'S got spam in it!"
try:
sys.stdout = fp
d.log_info(l1, 'EGGS')
d.log_info(l2)
d.log_info(l3, 'SPAM')
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['EGGS: %s' % l1, 'info: %s' % l2, 'SPAM: %s' % l3]
self.assertEqual(lines, expected)
def test_unhandled(self):
d = asyncore.dispatcher()
d.ignore_log_types = ()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
try:
sys.stdout = fp
d.handle_expt()
d.handle_read()
d.handle_write()
d.handle_connect()
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['warning: unhandled incoming priority event',
'warning: unhandled read event',
'warning: unhandled write event',
'warning: unhandled connect event']
self.assertEqual(lines, expected)
def test_issue_8594(self):
# XXX - this test is supposed to be removed in next major Python
# version
d = asyncore.dispatcher(socket.socket())
# make sure the error message no longer refers to the socket
# object but the dispatcher instance instead
self.assertRaisesRegex(AttributeError, 'dispatcher instance',
getattr, d, 'foo')
# cheap inheritance with the underlying socket is supposed
# to still work but a DeprecationWarning is expected
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
family = d.family
self.assertEqual(family, socket.AF_INET)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, DeprecationWarning))
def test_strerror(self):
# refers to bug #8573
err = asyncore._strerror(errno.EPERM)
if hasattr(os, 'strerror'):
self.assertEqual(err, os.strerror(errno.EPERM))
err = asyncore._strerror(-1)
self.assertTrue(err != "")
class dispatcherwithsend_noread(asyncore.dispatcher_with_send):
def readable(self):
return False
def handle_connect(self):
pass
class DispatcherWithSendTests(unittest.TestCase):
usepoll = False
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.reap_threads
def test_send(self):
evt = threading.Event()
sock = socket.socket()
sock.settimeout(3)
port = support.bind_port(sock)
cap = BytesIO()
args = (evt, cap, sock)
t = threading.Thread(target=capture_server, args=args)
t.start()
try:
# wait a little longer for the server to initialize (it sometimes
# refuses connections on slow machines without this wait)
time.sleep(0.2)
data = b"Suppose there isn't a 16-ton weight?"
d = dispatcherwithsend_noread()
d.create_socket()
d.connect((HOST, port))
# give time for socket to connect
time.sleep(0.1)
d.send(data)
d.send(data)
d.send(b'\n')
n = 1000
while d.out_buffer and n > 0:
asyncore.poll()
n -= 1
evt.wait()
self.assertEqual(cap.getvalue(), data*2)
finally:
t.join()
class DispatcherWithSendTests_UsePoll(DispatcherWithSendTests):
usepoll = True
@unittest.skipUnless(hasattr(asyncore, 'file_wrapper'),
'asyncore.file_wrapper required')
class FileWrapperTest(unittest.TestCase):
def setUp(self):
self.d = b"It's not dead, it's sleeping!"
with open(TESTFN, 'wb') as file:
file.write(self.d)
def tearDown(self):
unlink(TESTFN)
def test_recv(self):
fd = os.open(TESTFN, os.O_RDONLY)
w = asyncore.file_wrapper(fd)
os.close(fd)
self.assertNotEqual(w.fd, fd)
self.assertNotEqual(w.fileno(), fd)
self.assertEqual(w.recv(13), b"It's not dead")
self.assertEqual(w.read(6), b", it's")
w.close()
self.assertRaises(OSError, w.read, 1)
def test_send(self):
d1 = b"Come again?"
d2 = b"I want to buy some cheese."
fd = os.open(TESTFN, os.O_WRONLY | os.O_APPEND)
w = asyncore.file_wrapper(fd)
os.close(fd)
w.write(d1)
w.send(d2)
w.close()
with open(TESTFN, 'rb') as file:
self.assertEqual(file.read(), self.d + d1 + d2)
@unittest.skipUnless(hasattr(asyncore, 'file_dispatcher'),
'asyncore.file_dispatcher required')
def test_dispatcher(self):
fd = os.open(TESTFN, os.O_RDONLY)
data = []
class FileDispatcher(asyncore.file_dispatcher):
def handle_read(self):
data.append(self.recv(29))
s = FileDispatcher(fd)
os.close(fd)
asyncore.loop(timeout=0.01, use_poll=True, count=2)
self.assertEqual(b"".join(data), self.d)
class BaseTestHandler(asyncore.dispatcher):
def __init__(self, sock=None):
asyncore.dispatcher.__init__(self, sock)
self.flag = False
def handle_accept(self):
raise Exception("handle_accept not supposed to be called")
def handle_accepted(self):
raise Exception("handle_accepted not supposed to be called")
def handle_connect(self):
raise Exception("handle_connect not supposed to be called")
def handle_expt(self):
raise Exception("handle_expt not supposed to be called")
def handle_close(self):
raise Exception("handle_close not supposed to be called")
def handle_error(self):
raise
class BaseServer(asyncore.dispatcher):
"""A server which listens on an address and dispatches the
connection to a handler.
"""
def __init__(self, family, addr, handler=BaseTestHandler):
asyncore.dispatcher.__init__(self)
self.create_socket(family)
self.set_reuse_addr()
bind_af_aware(self.socket, addr)
self.listen(5)
self.handler = handler
@property
def address(self):
return self.socket.getsockname()
def handle_accepted(self, sock, addr):
self.handler(sock)
def handle_error(self):
raise
class BaseClient(BaseTestHandler):
def __init__(self, family, address):
BaseTestHandler.__init__(self)
self.create_socket(family)
self.connect(address)
def handle_connect(self):
pass
class BaseTestAPI:
def tearDown(self):
asyncore.close_all()
def loop_waiting_for_flag(self, instance, timeout=5):
timeout = float(timeout) / 100
count = 100
while asyncore.socket_map and count > 0:
asyncore.loop(timeout=0.01, count=1, use_poll=self.use_poll)
if instance.flag:
return
count -= 1
time.sleep(timeout)
self.fail("flag not set")
def test_handle_connect(self):
# make sure handle_connect is called on connect()
class TestClient(BaseClient):
def handle_connect(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_accept(self):
# make sure handle_accept() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
def test_handle_accepted(self):
# make sure handle_accepted() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
asyncore.dispatcher.handle_accept(self)
def handle_accepted(self, sock, addr):
sock.close()
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
def test_handle_read(self):
# make sure handle_read is called on data received
class TestClient(BaseClient):
def handle_read(self):
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.send(b'x' * 1024)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_write(self):
# make sure handle_write is called
class TestClient(BaseClient):
def handle_write(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_close(self):
# make sure handle_close is called when the other end closes
# the connection
class TestClient(BaseClient):
def handle_read(self):
# in order to make handle_close be called we are supposed
# to make at least one recv() call
self.recv(1024)
def handle_close(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.close()
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_close_after_conn_broken(self):
# Check that ECONNRESET/EPIPE is correctly handled (issues #5661 and
# #11265).
data = b'\0' * 128
class TestClient(BaseClient):
def handle_write(self):
self.send(data)
def handle_close(self):
self.flag = True
self.close()
def handle_expt(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def handle_read(self):
self.recv(len(data))
self.close()
def writable(self):
return False
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
@unittest.skipIf(sys.platform.startswith("sunos"),
"OOB support is broken on Solaris")
def test_handle_expt(self):
# Make sure handle_expt is called on OOB data received.
# Note: this might fail on some platforms as OOB data is
# tenuously supported and rarely used.
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
class TestClient(BaseClient):
def handle_expt(self):
self.socket.recv(1024, socket.MSG_OOB)
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.socket.send(bytes(chr(244), 'latin-1'), socket.MSG_OOB)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_error(self):
class TestClient(BaseClient):
def handle_write(self):
1.0 / 0
def handle_error(self):
self.flag = True
try:
raise
except ZeroDivisionError:
pass
else:
raise Exception("exception not raised")
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_connection_attributes(self):
server = BaseServer(self.family, self.addr)
client = BaseClient(self.family, server.address)
# we start disconnected
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
# this can't be taken for granted across all platforms
#self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# execute some loops so that client connects to server
asyncore.loop(timeout=0.01, use_poll=self.use_poll, count=100)
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertTrue(client.connected)
self.assertFalse(client.accepting)
# disconnect the client
client.close()
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# stop serving
server.close()
self.assertFalse(server.connected)
self.assertFalse(server.accepting)
def test_create_socket(self):
s = asyncore.dispatcher()
s.create_socket(self.family)
self.assertEqual(s.socket.family, self.family)
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
self.assertEqual(s.socket.type, socket.SOCK_STREAM | SOCK_NONBLOCK)
def test_bind(self):
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
s1 = asyncore.dispatcher()
s1.create_socket(self.family)
s1.bind(self.addr)
s1.listen(5)
port = s1.socket.getsockname()[1]
s2 = asyncore.dispatcher()
s2.create_socket(self.family)
# EADDRINUSE indicates the socket was correctly bound
self.assertRaises(socket.error, s2.bind, (self.addr[0], port))
def test_set_reuse_addr(self):
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
sock = socket.socket(self.family)
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except socket.error:
unittest.skip("SO_REUSEADDR not supported on this platform")
else:
# if SO_REUSEADDR succeeded for sock we expect asyncore
# to do the same
s = asyncore.dispatcher(socket.socket(self.family))
self.assertFalse(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
s.socket.close()
s.create_socket(self.family)
s.set_reuse_addr()
self.assertTrue(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
finally:
sock.close()
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.reap_threads
def test_quick_connect(self):
# see: http://bugs.python.org/issue10340
if self.family in (socket.AF_INET, getattr(socket, "AF_INET6", object())):
server = BaseServer(self.family, self.addr)
t = threading.Thread(target=lambda: asyncore.loop(timeout=0.1,
count=500))
t.start()
self.addCleanup(t.join)
s = socket.socket(self.family, socket.SOCK_STREAM)
s.settimeout(.2)
s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack('ii', 1, 0))
try:
s.connect(server.address)
except socket.error:
pass
finally:
s.close()
class TestAPI_UseIPv4Sockets(BaseTestAPI):
family = socket.AF_INET
addr = (HOST, 0)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 support required')
class TestAPI_UseIPv6Sockets(BaseTestAPI):
family = socket.AF_INET6
addr = (HOSTv6, 0)
@unittest.skipUnless(HAS_UNIX_SOCKETS, 'Unix sockets required')
class TestAPI_UseUnixSockets(BaseTestAPI):
if HAS_UNIX_SOCKETS:
family = socket.AF_UNIX
addr = support.TESTFN
def tearDown(self):
unlink(self.addr)
BaseTestAPI.tearDown(self)
class TestAPI_UseIPv4Select(TestAPI_UseIPv4Sockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseIPv4Poll(TestAPI_UseIPv4Sockets, unittest.TestCase):
use_poll = True
class TestAPI_UseIPv6Select(TestAPI_UseIPv6Sockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseIPv6Poll(TestAPI_UseIPv6Sockets, unittest.TestCase):
use_poll = True
class TestAPI_UseUnixSocketsSelect(TestAPI_UseUnixSockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseUnixSocketsPoll(TestAPI_UseUnixSockets, unittest.TestCase):
use_poll = True
if __name__ == "__main__":
unittest.main()
|
[
"chrisboesch@nus.edu.sg"
] |
chrisboesch@nus.edu.sg
|
53cb07a0d22ea3a2429216985c1150d2d1273144
|
09c7f9719517a2d7fee9a784276dcbc898ad8160
|
/beagleBoxRasp/beagleBoxRaspCode.py
|
d3fef1980a63372809d9d7dad1ecae437d3b9ed6
|
[] |
no_license
|
BeagleBox/beagleBox_Eletronica
|
3e89b86f8fe9be314af263d7d5fae04cc2f2e68c
|
13280c9dd27873b17512d8619688f6727ca00df9
|
refs/heads/master
| 2021-06-18T16:43:24.277618
| 2017-07-02T18:03:32
| 2017-07-02T18:03:32
| 93,672,889
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,070
|
py
|
# No caso eu to mandando uma mensagem "aviso" e "Desolocamento"
# pra identificar o tipo de dado que eu estou enviando,
# a questão do nivel de bateria, e da balança a agente vai pegar da propria placa,
# ou seja, vai tá disponível a todo momento, não vai vir do arduino
import RPi.GPIO as GPIO
import time
import serial
#Configura a serial e a velocidade de transmissao
ser = serial.Serial("/dev/ttyAMA0", 115200)
GPIO.setmode(GPIO.BOARD)
#Entrada do sinal da balança
GPIO.setup(10,GPIO,IN)
#Entrada do sinal do nivel de bateria
GPIO.setup(12,GPIO,IN)
ser.write(identificadorDeCaminho)
while(1):
balanca = GPIO.input(10)
nivelBateria = GPIO.input(12)
resposta = ser.readline()
if (resposta == "Aviso") :
aviso = ser.readline() #INICIO, FIM, OBSTRUIDO
if (resposta == "Deslocamento") :
deslocamento = ser.readline() #NUMERO DE VEZES QUE DESLOCOU NO EIXO X.
if (trancar == 1) :
ser.write(trancar)
time.sleep(0.5)
|
[
"hdcjuninho@gmail.com"
] |
hdcjuninho@gmail.com
|
8e0fb7e19066bff63db68684d7a0451e3083fed4
|
e2bd9a2f5db45cee5456e7f48d7418af2d4081e4
|
/DynamicProgramming/LCS/PrintSCSuperSequence.py
|
2751b53c3e09578058e91ca0d5bf4d6569db1068
|
[
"MIT"
] |
permissive
|
26tanishabanik/Interview-Coding-Questions
|
9b46b9e943cc0547ea4a9d4af833f6ee24d4d616
|
dfed88c7067fcfcb3c1929730464ad12389d1da6
|
refs/heads/main
| 2023-08-17T05:41:54.617103
| 2021-10-05T07:08:02
| 2021-10-05T07:08:02
| 413,709,370
| 1
| 0
| null | 2021-10-05T07:03:56
| 2021-10-05T07:03:55
| null |
UTF-8
|
Python
| false
| false
| 1,503
|
py
|
'''
Given two strings str1 and str2, return the shortest string that has both str1 and str2 as subsequences.
If multiple answers exist, you may return any of them.
(A string S is a subsequence of string T if deleting some number of characters from T
(possibly 0, and the characters are chosen anywhere from T) results in the string S.)
Example 1:
Input: str1 = "abac", str2 = "cab"
Output: "cabac"
Explanation:
str1 = "abac" is a subsequence of "cabac" because we can delete the first "c".
str2 = "cab" is a subsequence of "cabac" because we can delete the last "ac".
The answer provided is the shortest such string that satisfies these properties.
'''
from icecream import ic
def PrintSCS(s1, s2, n, m):
dp = [[0 for _ in range(m+1)]for _ in range(n+1)]
for i in range(1, n+1):
for j in range(1, m+1):
if s1[i-1] == s2[j-1]:
dp[i][j] = 1+dp[i-1][j-1]
else:
dp[i][j] = max(dp[i-1][j], dp[i][j-1])
i = n
j = m
ans = ''
while i > 0 and j > 0:
if s1[i-1] == s2[j-1]:
ans += s2[j-1]
j -= 1
i -= 1
else:
if dp[i][j-1] < dp[i-1][j]:
ans += s1[i-1]
i -= 1
else:
ans += s2[j-1]
j -= 1
while i > 0:
ans += s1[i-1]
i -= 1
while j > 0:
ans += s2[j-1]
j -= 1
return ans[::-1]
print(PrintSCS(
"bbbaaaba",
"bbababbb", 8, 8))
|
[
"mahankalisaicharan@gmail.com"
] |
mahankalisaicharan@gmail.com
|
5d106286401eb861ce4ca131c415bf3aa36cf860
|
2a60d6095dad452eabcbf99583421435126c2a3c
|
/home/migrations/0097_auto_20180911_1919.py
|
411cd3b2c6c3b144769b696d28cccd5c8c702240
|
[] |
no_license
|
dentemm/healthhouse
|
7aa6dd68363f6ad0c76e3aaa83d74707930e9e34
|
da76f4406e9fb134ab8d94cf5f7f968523939b44
|
refs/heads/master
| 2023-03-19T21:08:07.136774
| 2023-03-12T19:45:23
| 2023-03-12T19:45:23
| 92,063,083
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,353
|
py
|
# Generated by Django 2.0.7 on 2018-09-11 19:19
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('home', '0096_auto_20180911_1917'),
]
operations = [
migrations.AlterField(
model_name='discoverypage',
name='content',
field=wagtail.core.fields.StreamField([('parallax', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(max_length=64)), ('info_text', wagtail.core.blocks.CharBlock(max_length=255)), ('background_color', wagtail.core.blocks.ChoiceBlock(choices=[('primary', 'HH blue'), ('white', 'White'), ('dark-blue', 'HH dark')])), ('image_position', wagtail.core.blocks.ChoiceBlock(choices=[('left', 'Left'), ('right', 'Right')])), ('links', wagtail.core.blocks.ListBlock(wagtail.core.blocks.PageChooserBlock(), required=False)), ('external_links', wagtail.core.blocks.ListBlock(wagtail.core.blocks.URLBlock())), ('external_links2', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('url', wagtail.core.blocks.URLBlock(required=False)), ('url_title', wagtail.core.blocks.CharBlock(max_length=28, required=False))]), required=False))]))], null=True),
),
]
|
[
"tim.claes@me.com"
] |
tim.claes@me.com
|
4f431509568bd27c45f52f4280cc40dcc68d86dd
|
6bdb32ddbd72c4337dab12002ff05d6966538448
|
/gridpack_folder/mc_request/LHEProducer/Spin-2/BulkGraviton_WW_WhadWhad/BulkGraviton_WW_WhadWhad_narrow_M4500_13TeV-madgraph_cff.py
|
7e6dfa7d29a8f12fbc0e6cd0897dc8190314a5f5
|
[] |
no_license
|
cyrilbecot/DibosonBSMSignal_13TeV
|
71db480de274c893ba41453025d01bfafa19e340
|
d8e685c40b16cde68d25fef9af257c90bee635ba
|
refs/heads/master
| 2021-01-11T10:17:05.447035
| 2016-08-17T13:32:12
| 2016-08-17T13:32:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 759
|
py
|
import FWCore.ParameterSet.Config as cms
# link to cards:
# https://github.com/cms-sw/genproductions/tree/master/bin/MadGraph5_aMCatNLO/cards/production/13TeV/exo_diboson/Spin-2/BulkGraviton_WW_WhadWhad/BulkGraviton_WW_WhadWhad_narrow_M4500
externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/13TeV/madgraph/V5_2.2.2/exo_diboson/Spin-2/BulkGraviton_WW_WhadWhad/narrow/v2/BulkGraviton_WW_WhadWhad_narrow_M4500_tarball.tar.xz'),
nEvents = cms.untracked.uint32(5000),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
|
[
"syu@cern.ch"
] |
syu@cern.ch
|
66f5083717011c1209ab870fbda9e2804badb4e4
|
d091ab3f316066327c3daf66814803516e73be18
|
/AITrans_Competition_withRainbow/memory.py
|
8743ea3e7f306fa440ce1a1d2704247bba413101
|
[] |
no_license
|
lilyhpeng/AItrans-2019-MMGC
|
c53f1c3d6136ddd1ab248794a3a82b6b3ad972ba
|
d8fb03eb70be75684f1e3b0e4d31d29956269ef9
|
refs/heads/master
| 2023-06-08T18:09:47.515957
| 2021-06-27T07:52:57
| 2021-06-27T07:52:57
| 380,681,345
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,388
|
py
|
from collections import namedtuple
import numpy as np
import torch
Transition = namedtuple('Transition', ('timestep', 'state', 'action', 'reward', 'nonterminal'))
blank_trans = Transition(0, torch.zeros(84, 84, dtype=torch.uint8), None, 0, False)
# Segment tree data structure where parent node values are sum/max of children node values
class SegmentTree():
def __init__(self, size):
self.index = 0
self.size = size
self.full = False # Used to track actual capacity
self.sum_tree = np.zeros((2 * size - 1, ), dtype=np.float32) # Initialise fixed size tree with all (priority) zeros
self.data = np.array([None] * size) # Wrap-around cyclic buffer
self.max = 1 # Initial max value to return (1 = 1^ω)
# Propagates value up tree given a tree index
def _propagate(self, index, value):
parent = (index - 1) // 2
left, right = 2 * parent + 1, 2 * parent + 2
self.sum_tree[parent] = self.sum_tree[left] + self.sum_tree[right]
if parent != 0:
self._propagate(parent, value)
# Updates value given a tree index
def update(self, index, value):
self.sum_tree[index] = value # Set new value
self._propagate(index, value) # Propagate value
self.max = max(value, self.max)
def append(self, data, value):
self.data[self.index] = data # Store data in underlying data structure
self.update(self.index + self.size - 1, value) # Update tree
self.index = (self.index + 1) % self.size # Update index
self.full = self.full or self.index == 0 # Save when capacity reached
self.max = max(value, self.max)
# Searches for the location of a value in sum tree
def _retrieve(self, index, value):
left, right = 2 * index + 1, 2 * index + 2
if left >= len(self.sum_tree):
return index
elif value <= self.sum_tree[left]:
return self._retrieve(left, value)
else:
return self._retrieve(right, value - self.sum_tree[left])
# Searches for a value in sum tree and returns value, data index and tree index
def find(self, value):
index = self._retrieve(0, value) # Search for index of item from root
data_index = index - self.size + 1
return (self.sum_tree[index], data_index, index) # Return value, data index, tree index
# Returns data given a data index
def get(self, data_index):
return self.data[data_index % self.size]
def total(self):
return self.sum_tree[0]
class ReplayMemory():
def __init__(self, args, capacity):
self.device = args.device
self.capacity = capacity
self.history = args.history_length
self.discount = args.discount
self.n = args.multi_step
self.priority_weight = args.priority_weight # Initial importance sampling weight β, annealed to 1 over course of training
self.priority_exponent = args.priority_exponent
self.t = 0 # Internal episode timestep counter
self.transitions = SegmentTree(capacity) # Store transitions in a wrap-around cyclic buffer within a sum tree for querying priorities
# Adds state and action at time t, reward and terminal at time t + 1
def append(self, state, action, reward, terminal):
# changed by penghuan on 20190527
# state = state[-1].mul(255).to(dtype=torch.uint8, device=torch.device('cpu')) # Only store last frame and discretise to save memory
self.transitions.append(Transition(self.t, state, action, reward, not terminal), self.transitions.max) # Store new transition with maximum priority
self.t = 0 if terminal else self.t + 1 # Start new episodes with t = 0
# Returns a transition with blank states where appropriate
def _get_transition(self, idx):
transition = np.array([None] * (self.history + self.n))
transition[self.history - 1] = self.transitions.get(idx)
for t in range(self.history - 2, -1, -1): # e.g. 2 1 0
if transition[t + 1].timestep == 0:
transition[t] = blank_trans # If future frame has timestep 0
else:
transition[t] = self.transitions.get(idx - self.history + 1 + t)
for t in range(self.history, self.history + self.n): # e.g. 4 5 6
if transition[t - 1].nonterminal:
transition[t] = self.transitions.get(idx - self.history + 1 + t)
else:
transition[t] = blank_trans # If prev (next) frame is terminal
return transition
# Returns a valid sample from a segment
def _get_sample_from_segment(self, segment, i):
valid = False
while not valid:
sample = np.random.uniform(i * segment, (i + 1) * segment) # Uniformly sample an element from within a segment
prob, idx, tree_idx = self.transitions.find(sample) # Retrieve sample from tree with un-normalised probability
# Resample if transition straddled current index or probablity 0
if (self.transitions.index - idx) % self.capacity > self.n and (idx - self.transitions.index) % self.capacity >= self.history and prob != 0:
valid = True # Note that conditions are valid but extra conservative around buffer index 0
# Retrieve all required transition data (from t - h to t + n)
transition = self._get_transition(idx)
# Create un-discretised state and nth next state
data = [trans.state for trans in transition[:self.history]]
print(data)
state = torch.stack(data).to(dtype=torch.float32, device=self.device).div(255)
next_state = torch.stack([trans.state for trans in transition[self.n:self.n + self.history]]).to(dtype=torch.float32, device=self.device).div(255)
# Discrete action to be used as index
action = torch.tensor([transition[self.history - 1].action], dtype=torch.int64, device=self.device)
# Calculate truncated n-step discounted return R^n = Σ_k=0->n-1 (γ^k)R_t+k+1 (note that invalid nth next states have reward 0)
R = torch.tensor([sum(self.discount ** n * transition[self.history + n - 1].reward for n in range(self.n))], dtype=torch.float32, device=self.device)
# Mask for non-terminal nth next states
nonterminal = torch.tensor([transition[self.history + self.n - 1].nonterminal], dtype=torch.float32, device=self.device)
return prob, idx, tree_idx, state, action, R, next_state, nonterminal
def sample(self, batch_size):
p_total = self.transitions.total() # Retrieve sum of all priorities (used to create a normalised probability distribution)
segment = p_total / batch_size # Batch size number of segments, based on sum over all probabilities
batch = [self._get_sample_from_segment(segment, i) for i in range(batch_size)] # Get batch of valid samples
probs, idxs, tree_idxs, states, actions, returns, next_states, nonterminals = zip(*batch)
states, next_states, = torch.stack(states), torch.stack(next_states)
actions, returns, nonterminals = torch.cat(actions), torch.cat(returns), torch.stack(nonterminals)
probs = np.array(probs, dtype=np.float32) / p_total # Calculate normalised probabilities
capacity = self.capacity if self.transitions.full else self.transitions.index
weights = (capacity * probs) ** -self.priority_weight # Compute importance-sampling weights w
weights = torch.tensor(weights / weights.max(), dtype=torch.float32, device=self.device) # Normalise by max importance-sampling weight from batch
return tree_idxs, states, actions, returns, next_states, nonterminals, weights
def update_priorities(self, idxs, priorities):
priorities = np.power(priorities, self.priority_exponent)
[self.transitions.update(idx, priority) for idx, priority in zip(idxs, priorities)]
# Set up internal state for iterator
def __iter__(self):
self.current_idx = 0
return self
# Return valid states for validation
def __next__(self):
if self.current_idx == self.capacity:
raise StopIteration
# Create stack of states
state_stack = [None] * self.history
state_stack[-1] = self.transitions.data[self.current_idx].state
prev_timestep = self.transitions.data[self.current_idx].timestep
for t in reversed(range(self.history - 1)):
if prev_timestep == 0:
state_stack[t] = blank_trans.state # If future frame has timestep 0
else:
state_stack[t] = self.transitions.data[self.current_idx + t - self.history + 1].state
prev_timestep -= 1
state = torch.stack(state_stack, 0).to(dtype=torch.float32, device=self.device).div_(255) # Agent will turn into batch
self.current_idx += 1
return state
|
[
"peng_h30@163.com"
] |
peng_h30@163.com
|
8e94d6b6e9eab0ee3e5aae875fea2da1e8c09c25
|
eddba20dfd883e6ac710fd7c7035a9da1a06ff96
|
/dataprep/CombineEventFiles.py
|
c7b30741a1fe034ab6c597b869f4a893230edaff
|
[] |
no_license
|
philippwindischhofer/HiggsPivoting
|
cc64ea6f75919c78a88ae4a182e30eb6f2669767
|
c103de67d4c8358aba698ecf4b1491c05b8f6494
|
refs/heads/paper
| 2022-11-27T18:03:17.491300
| 2020-04-15T14:39:43
| 2020-04-15T14:39:43
| 193,477,990
| 3
| 0
| null | 2022-11-21T21:31:39
| 2019-06-24T09:46:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,772
|
py
|
import os, glob
from argparse import ArgumentParser
from h5add import h5add
def IsGoodEventFile(eventfile):
try:
import pandas as pd
df = pd.read_hdf(eventfile)
return True
except:
return False
def CombineEventFiles(indir, channel):
from CombineLumiFiles import IsGoodLumiFile
# the assumed event file names
eventfile = {"0lep": "events_0lep.h5",
"1lep": "events_1lep.h5"}
assert len(indir) == 1
indir = indir[0]
# first, look for all existing lumi files
# Note: this semi-automatic way of doing it is faster than simply
# lumifiles = glob.glob(os.path.join(indir, "**/lumi.conf"), recursive = True)
sub_dirs = glob.glob(os.path.join(indir, '*/'))
event_file_candidates = []
for sub_dir in sub_dirs:
eventfile_path = os.path.join(sub_dir, eventfile[channel])
# ignore any subdirectory that does not have a lumi file in it
if IsGoodLumiFile(os.path.join(sub_dir, "lumi.conf")) and IsGoodEventFile(eventfile_path):
event_file_candidates.append(eventfile_path)
else:
print("Warning: '{}' does not have a good lumi file or a corrupted event file, ignoring its events!".format(sub_dir))
print("have found {} good event files in this directory".format(len(event_file_candidates)))
# combine them together
output_file = os.path.join(indir, "events.h5")
h5add(output_file, event_file_candidates)
if __name__ == "__main__":
parser = ArgumentParser(description = "combine event files")
parser.add_argument("indir", nargs = '+', action = "store")
parser.add_argument("--channel", action = "store", default = "0lep")
args = vars(parser.parse_args())
CombineEventFiles(**args)
|
[
"philipp.windischhofer@cern.ch"
] |
philipp.windischhofer@cern.ch
|
1e380dd89f23fb303d9f546af9e4298c9b1f44de
|
cd784ca69d8cc3d1977cd86296052c556e7050ff
|
/week8/venv/bin/wheel
|
8dd655b5046ec634eebcb7dc6be179fbe01c0a29
|
[] |
no_license
|
bennerl/qbb2016-answers
|
15d7c67b927ecb4c175dcc8b1669e952c12cbefc
|
2868aae69f1105e6aa20fdb7e99c139f3ca88e82
|
refs/heads/master
| 2020-12-01T16:37:42.568744
| 2016-11-28T03:12:41
| 2016-11-28T03:12:41
| 74,932,378
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
#!/Users/cmdb/qbb2016-answers/week8/venv/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"leif.benner@gmail.com"
] |
leif.benner@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.