blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4c85c67d781eb179e4dafca7cb96d60c8873cc0e
|
0b35072547001ebefa3fde2eea0eae30423e2190
|
/editregions/contrib/textfiles/admin.py
|
1bc746c9c1addc07aa4ae926a814393591df4afb
|
[
"BSD-2-Clause-Views",
"BSD-2-Clause"
] |
permissive
|
kezabelle/django-editregions
|
ceba5561a4768ccda9ccd279f3a6a35e11dbdfea
|
961ddeffb37d30d40fb4e3e9224bc3f956b7a5b5
|
refs/heads/master
| 2020-06-06T19:40:36.682702
| 2015-01-21T14:33:52
| 2015-01-21T14:33:52
| 14,645,861
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,474
|
py
|
# -*- coding: utf-8 -*-
from functools import update_wrapper
from django.contrib import admin
from django.contrib.admin import ModelAdmin
from django.core.exceptions import ValidationError, PermissionDenied
from django.http import HttpResponse
from django.template.defaultfilters import striptags
from django.template.loader import render_to_string
try:
from django.utils.text import Truncator
def truncate_words(s, num):
return Truncator(s).words(num, truncate='...')
except ImportError as e: # pragma: no cover
from django.utils.text import truncate_words
from editregions.admin.modeladmins import ChunkAdmin
from editregions.contrib.textfiles.utils import valid_md_file
from .models import Markdown
from .forms import MarkdownSelectionForm
class MarkdownAdmin(ChunkAdmin, ModelAdmin):
form = MarkdownSelectionForm
list_display = ['filepath', 'created', 'modified']
add_form_template = 'admin/editregions/markdown/change_form.html'
change_form_template = 'admin/editregions/markdown/change_form.html'
def render_into_region(self, obj, context, **kwargs):
return render_to_string('editregions/textfiles/markdown.html',
context_instance=context)
def render_into_summary(self, obj, context, **kwargs):
data = striptags(obj.rendered_content).strip()
if data:
return truncate_words(data, 50)
return '[missing content]'
def get_urls(self):
default_urls = super(MarkdownAdmin, self).get_urls()
from django.conf.urls import patterns, url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
urlpatterns = patterns('',
url(r'^preview/(.+)$', wrap(self.preview), name='%s_%s_preview' % info),
)
return urlpatterns + default_urls
def preview(self, request, target_file, extra_context=None):
if not self.has_add_permission(request):
raise PermissionDenied("Need add permission")
try:
valid_md_file(target_file)
except ValidationError:
raise PermissionDenied("Invalid file")
fake_obj = self.model(filepath=target_file)
return HttpResponse(fake_obj.rendered_content)
admin.site.register(Markdown, MarkdownAdmin)
|
[
"keryn@kerynknight.com"
] |
keryn@kerynknight.com
|
2ffcff7acb286a0ae13295300bd567c7f8e0cc56
|
b8467af3373374f54aef8e4a060eb9550028f298
|
/functionsprogram/myprograms.py
|
dcf9e8fa570248d0908469b331daa249eba8591c
|
[] |
no_license
|
luminartraining/luminarPythonNovember
|
6790acf957bba6ec47ab5c872f8f22b1c22d63f7
|
33f4627f89c281fca45f58d00a4e3f1f221144fc
|
refs/heads/master
| 2023-01-28T20:23:03.863310
| 2020-12-07T02:46:03
| 2020-12-07T02:46:03
| 315,807,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
#import modules
#module ? package?
#we have to import mathooperatoons.py module
import functionsprogram.matoperations as fun
res=fun.add(100,200)
print(res)
sub=fun.sub(200,100)
print(res)
#create account in github
#git
|
[
"sajaykannan10@gmail.com"
] |
sajaykannan10@gmail.com
|
ab03d8f6384fcbd71e3cd2408f91f5910e8878b7
|
521c1beeb2776161ae6d550be35cd0c180887129
|
/customkeywords/tailProtocol.py
|
a1c19882a20fb2a2a23c6e113b2e1687b55fbdc7
|
[] |
no_license
|
elvis2workspace/CustomLibrary
|
601b552792ac2c33beeb709474f857c82793ac7e
|
6449eea8aa99ca1172f54b669d97703d36132ce3
|
refs/heads/master
| 2021-01-23T21:33:05.617871
| 2017-09-26T01:57:48
| 2017-09-26T01:57:48
| 58,983,388
| 0
| 1
| null | 2016-12-06T09:56:14
| 2016-05-17T02:22:14
|
Python
|
UTF-8
|
Python
| false
| false
| 3,144
|
py
|
# -*- coding: utf-8 -*-
"""
Created on 2015年5月8日
@author: zhang.xiuhai
"""
import os
import hashlib
from twisted.internet.protocol import ServerFactory, ProcessProtocol
from twisted.protocols.basic import LineReceiver
from twisted.python import log
from twisted.internet import reactor, threads
class TailProtocol(object):
"""classdocs"""
def __init__(self, write_callback):
self.write = write_callback
def out_received(self, data):
self.write("Begin lastlog\n")
data = [line for line in data.split('\n') if not line.startswith('==')]
for d in data:
self.write(d + '\n')
self.write("End lastlog\n")
def processEnded(self, reason):
if reason.value.exitCode != 0:
log.msg(reason)
class HashCompute(object):
def __init__(self, path, write_callback):
self.path = path
self.write = write_callback
def blockingMethod(self):
os.path.isfile(self.path)
data = file(self.path).read()
# uncomment to add more delay
# import time
# time.sleep(10)
return hashlib.sha1(data).hexdigest()
def compute(self):
d = threads.deferToThread(self.blockingMethod)
d.addCallback(self.ret)
d.addErrback(self.err)
def ret(self, hdata):
self.write("File hash is : %s\n" % hdata)
def err(self, failure):
self.write("An error occured : %s\n" % failure.getErrorMessage())
class CmdProtocol(LineReceiver):
delimiter = '\n'
def processCmd(self, line):
if line.startswith('lastlog'):
tailProtocol = TailProtocol(self.transport.write)
# reactor.spawnProcess(tailProtocol, '/usr/bin/tail', args=['/usr/bin/tail', '-10', '/var/log/syslog'])
elif line.startswith('comphash'):
try:
useless, path = line.split(' ')
except:
self.transport.write('Please provide a path.\n')
return
hc = HashCompute(path, self.transport.write)
hc.compute()
elif line.startswith('exit'):
self.transport.loseConnection()
else:
self.transport.write('Command not found.\n')
def connectionMade(self):
self.client_ip = self.transport.getPeer()[1]
log.msg("Client connection from %s" % self.client_ip)
if len(self.factory.clients) >= self.factory.clients_max:
log.msg("Too many connections. bye !")
self.client_ip = None
self.transport.loseConnection()
else:
self.factory.clients.append(self.client_ip)
def connectionLost(self, reason):
log.msg('Lost client connection. Reason: %s' % reason)
if self.client_ip:
self.factory.clients.remove(self.client_ip)
def lineReceived(self, line):
log.msg('Cmd received from %s : %s' % (self.client_ip, line))
self.processCmd(line)
class MyFactory(ServerFactory):
protocol = CmdProtocol
def __init__(self, clients_max=10):
self.clients_max = clients_max
self.clients = []
|
[
"xiuhai5052@hotmail.com"
] |
xiuhai5052@hotmail.com
|
3c70d5350e30d99e67991d3efcbe1f1fce136e79
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/app/util/demangler/gnu/__init__.pyi
|
32ce2a5c514debd6436731a597a14186a1d74212
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
pyi
|
from .DemanglerParseException import DemanglerParseException as DemanglerParseException
from .GnuDemangler import GnuDemangler as GnuDemangler
from .GnuDemanglerNativeProcess import GnuDemanglerNativeProcess as GnuDemanglerNativeProcess
from .GnuDemanglerOptions import GnuDemanglerOptions as GnuDemanglerOptions
from .GnuDemanglerParser import GnuDemanglerParser as GnuDemanglerParser
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
8ff270a59500d90bbb8a3db156cc23e09b9628ce
|
0124528676ee3bbaec60df5d6950b408e6da37c8
|
/Projects/QTPy/adafruit-circuitpython-bundle-7.x-mpy-20220601/examples/lsm303_accel_fast.py
|
87d21c1b86f615c46bf8bd76cc4373c70703b763
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
land-boards/lb-boards
|
8127658dc537dcfde0bb59a5018ab75c3f0087f6
|
eeb98cc2003dac1924845d949f6f5bd387376568
|
refs/heads/master
| 2023-06-07T15:44:46.110742
| 2023-06-02T22:53:24
| 2023-06-02T22:53:24
| 4,847,305
| 10
| 12
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
""" Read data from the accelerometer and print it out, ASAP! """
import board
import adafruit_lsm303_accel
i2c = board.I2C() # uses board.SCL and board.SDA
sensor = adafruit_lsm303_accel.LSM303_Accel(i2c)
while True:
accel_x, accel_y, accel_z = sensor.acceleration
print("{0:10.3f} {1:10.3f} {2:10.3f}".format(accel_x, accel_y, accel_z))
|
[
"doug@douglasgilliland.com"
] |
doug@douglasgilliland.com
|
ef94ec0911459dc49f3392233ca8f7e4ed07ddc1
|
1dacbf90eeb384455ab84a8cf63d16e2c9680a90
|
/lib/python2.7/site-packages/openopt/kernel/oologfcn.py
|
e9819b86d856962730889b3c6c11f8a6131dfd5e
|
[
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] |
permissive
|
wangyum/Anaconda
|
ac7229b21815dd92b0bd1c8b7ec4e85c013b8994
|
2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6
|
refs/heads/master
| 2022-10-21T15:14:23.464126
| 2022-10-05T12:10:31
| 2022-10-05T12:10:31
| 76,526,728
| 11
| 10
|
Apache-2.0
| 2022-10-05T12:10:32
| 2016-12-15T05:26:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,040
|
py
|
class OpenOptException(BaseException):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
#pass
#def ooassert(cond, msg):
# assert cond, msg
def oowarn(msg):
s = oowarn.s + msg
print(s)
return s
oowarn.s = 'OpenOpt Warning: '
errSet = set()
def ooerr(msg):
s = ooerr.s + msg
if msg not in errSet:
print(s)
errSet.add(msg)
raise OpenOptException(msg)
ooerr.s = 'OpenOpt Error: '
ooerr.set = errSet
pwSet = set()
def ooPWarn(msg):
if msg in pwSet: return ''
pwSet.add(msg)
oowarn(msg)
return msg
ooPWarn.s = 'OpenOpt Warning: '
ooPWarn.set = pwSet
def ooinfo(msg):
s = ooinfo.s + msg
print(s)
return s
ooinfo.s = 'OpenOpt info: '
def oohint(msg):
s = oohint.s + msg
print(s)
return s
oohint.s = 'OpenOpt hint: '
def oodisp(msg):
print(msg)
return msg
oodisp.s = ''
def oodebugmsg(p, msg):
if p.debug:
print('OpenOpt debug msg: %s' % msg)
return msg
return ''
|
[
"noreply@github.com"
] |
wangyum.noreply@github.com
|
d1a6d4762ab2b6475a8e17c25b4e44c0fe9b6611
|
6bc0a7bbbe769192ff7a7c403d2c5086be1d186c
|
/main.py
|
570d04262295be9d63758e1cf20432c9c0e60610
|
[
"MIT"
] |
permissive
|
veryhannibal/pytorch-distributed-cifar
|
5c4c2aff27b16f4c6350e7e789f2edc9c50f8111
|
cc3528bb80c1bb498fbf717a30d6083ef1931fad
|
refs/heads/master
| 2022-03-12T21:26:06.125558
| 2019-11-21T11:58:02
| 2019-11-21T11:58:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,497
|
py
|
'''Train CIFAR10 with PyTorch.'''
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import time
import argparse
from models.vgg import *
from models.dpn import *
from models.lenet import *
from models.senet import *
from models.pnasnet import *
from models.densenet import *
from models.googlenet import *
from models.shufflenet import *
from models.shufflenetv2 import *
from models.resnet import *
from models.resnext import *
from models.preact_resnet import *
from models.mobilenet import *
from models.mobilenetv2 import *
# from utils import progress_bar
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--data_dir', type=str)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--model', type=str)
parser.add_argument('--model_dir', type=str)
parser.add_argument('--lr_decay_step_size', type=int, default=0)
parser.add_argument('--lr_decay_factor', type=float, default=1.0)
parser.add_argument('--save_frequency', type=int, default=100)
parser.add_argument('--num_epochs', type=int)
parser.add_argument('--device_ids', nargs='+', type=int)
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
# trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
# trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)
trainset = torchvision.datasets.CIFAR10(root=args.data_dir, train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=2)
# testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
# testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)
testset = torchvision.datasets.CIFAR10(root=args.data_dir, train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# Model
print('==> Building model: ' + args.model)
if args.model == 'vgg16':
net = VGG('VGG16')
elif args.model == 'vgg19':
net = VGG('VGG19')
elif args.model == 'resnet18':
net = ResNet18()
elif args.model == 'resnet34':
net = ResNet34()
elif args.model == 'resnet50':
net = ResNet50()
elif args.model == 'resnet101':
net = ResNet101()
elif args.model == 'resnet152':
net = ResNet152()
# net = PreActResNet18()
elif args.model == 'googlenet':
net = GoogLeNet()
# net = DenseNet121()
# net = ResNeXt29_2x64d()
# net = MobileNet()
# net = MobileNetV2()
# net = DPN92()
# net = ShuffleNetG2()
# net = SENet18()
# net = ShuffleNetV2(1)
net = net.to(device)
if device == 'cuda':
if args.device_ids is not None:
net = torch.nn.DataParallel(net, device_ids=args.device_ids)
else:
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir(model_dir), 'Error: no checkpoint directory found!'
checkpoint = torch.load(model_dir + '/ckpt.t7')
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
if args.lr_decay_step_size > 0:
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_decay_step_size, gamma=args.lr_decay_factor)
# Training
def train(epoch):
lr = scheduler.get_lr()[0] if args.lr_decay_step_size > 0 else args.lr
print('\nEpoch: %d\nlr = %g' % (epoch, lr))
net.train()
train_loss = 0
correct = 0
total = 0
time_used = 0.0
for batch_idx, (inputs, targets) in enumerate(trainloader):
start = time.time()
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
end = time.time()
time_used += end - start
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
# progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
# % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
print('[%d/%d] Loss: %.3f | Acc: %.3f%% (%d/%d) | Throughput: %.3f (images/sec)'
% (batch_idx + 1, len(trainloader), train_loss/(batch_idx+1),
100.*correct/total, correct, total, args.batch_size/(end-start+1e-6)))
print('\n[Epoch %d] Loss: %.3f | Acc: %.3f%% (%d/%d) | Throughput: %.3f (images/sec)'
% (epoch, train_loss/(len(trainloader)),
100.*correct/total, correct, total,
args.batch_size*len(trainloader)/(time_used+1e-6)))
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
# progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
# % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
print('\n[Evaluation] Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
# Save checkpoint.
acc = 100.*correct/total
if (epoch == start_epoch + args.num_epochs - 1) or \
(args.save_frequency > 0 and (epoch + 1) % args.save_frequency == 0):
# if acc > best_acc:
print('\nSaving..')
state = {
'net': net.state_dict(),
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir(args.model_dir):
os.makedirs(args.model_dir)
torch.save(state, args.model_dir + '/' + str(epoch) + '-ckpt.t7')
if acc > best_acc:
best_acc = acc
for epoch in range(start_epoch, start_epoch + args.num_epochs):
train(epoch)
test(epoch)
if args.lr_decay_step_size > 0:
scheduler.step()
|
[
"kurisusnowdeng@gmail.com"
] |
kurisusnowdeng@gmail.com
|
9f9628dbb17329bb8f0ea4ceb9b08d65ac60ec44
|
a3c8b67b91fac686aa98fd20179f8807be3ad4c0
|
/rsbroker/core/upstream.py
|
1ace31aa954eabd78a040a0b49ac38183023f813
|
[
"Apache-2.0"
] |
permissive
|
land-pack/RsBroker
|
d0c6c8fa13cec0c057d24b9f02b20e256b199737
|
d556fda09582e0540cac0eabc163a984e8fc1c44
|
refs/heads/master
| 2021-01-12T07:16:39.853668
| 2016-12-22T07:28:05
| 2016-12-22T07:28:05
| 76,930,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,322
|
py
|
import time
import logging
import ujson
from tornado import gen
from tornado import httpclient
from tornado import httputil
from tornado import ioloop
from tornado import websocket
try:
from util.tools import Log
except ImportError:
logger = logging.getLogger(__name__)
else:
logger = Log().getLog()
APPLICATION_JSON = 'application/json'
DEFAULT_CONNECT_TIMEOUT = 30
DEFAULT_REQUEST_TIMEOUT = 30
class WebSocketClient(object):
"""Base for web socket clients.
"""
DISCONNECTED = 0
CONNECTING = 1
CONNECTED = 2
def __init__(self, io_loop=None,
connect_timeout=DEFAULT_CONNECT_TIMEOUT,
request_timeout=DEFAULT_REQUEST_TIMEOUT):
self.connect_timeout = connect_timeout
self.request_timeout = request_timeout
self._io_loop = io_loop or ioloop.IOLoop.current()
self._ws_connection = None
self._connect_status = self.DISCONNECTED
def connect(self, url):
"""Connect to the server.
:param str url: server URL.
"""
self._connect_status = self.CONNECTING
headers = httputil.HTTPHeaders({'Content-Type': APPLICATION_JSON})
request = httpclient.HTTPRequest(url=url,
connect_timeout=self.connect_timeout,
request_timeout=self.request_timeout,
headers=headers)
ws_conn = websocket.WebSocketClientConnection(self._io_loop, request)
ws_conn.connect_future.add_done_callback(self._connect_callback)
def send(self, data):
"""Send message to the server
:param str data: message.
"""
if self._ws_connection:
self._ws_connection.write_message(ujson.dumps(data))
def close(self, reason=''):
"""Close connection.
"""
if self._connect_status != self.DISCONNECTED:
self._connect_status = self.DISCONNECTED
self._ws_connection and self._ws_connection.close()
self._ws_connection = None
self.on_connection_close(reason)
def _connect_callback(self, future):
if future.exception() is None:
self._connect_status = self.CONNECTED
self._ws_connection = future.result()
self.on_connection_success()
self._read_messages()
else:
self.close(future.exception())
def is_connected(self):
return self._ws_connection is not None
@gen.coroutine
def _read_messages(self):
while True:
msg = yield self._ws_connection.read_message()
if msg is None:
self.close()
break
self.on_message(msg)
def on_message(self, msg):
"""This is called when new message is available from the server.
:param str msg: server message.
"""
pass
def on_connection_success(self):
"""This is called on successful connection ot the server.
"""
pass
def on_connection_close(self, reason):
"""This is called when server closed the connection.
"""
pass
class RTCWebSocketClient(WebSocketClient):
hb_msg = 'p' # hearbeat
message = ''
heartbeat_interval = 3
def __init__(self, io_loop=None,
connect_timeout=DEFAULT_CONNECT_TIMEOUT,
request_timeout=DEFAULT_REQUEST_TIMEOUT):
self.connect_timeout = connect_timeout
self.request_timeout = request_timeout
self._io_loop = io_loop or ioloop.IOLoop.current()
self.ws_url = None
self.auto_reconnet = False
self.last_active_time = 0
self.pending_hb = None
super(RTCWebSocketClient, self).__init__(self._io_loop,
self.connect_timeout,
self.request_timeout)
def connect(self, url, auto_reconnet=True, reconnet_interval=10):
# self.url_template = url
# self.ws_url = url % self._node_id
self.ws_url = url
self.auto_reconnet = auto_reconnet
self.reconnect_interval = reconnet_interval
super(RTCWebSocketClient, self).connect(self.ws_url)
def send(self, msg):
super(RTCWebSocketClient, self).send(msg)
self.last_active_time = time.time()
def on_message(self, msg):
self.last_active_time = time.time()
self.dispatch(msg)
def on_connection_success(self):
logger.info('Connect ...')
self.last_active_time = time.time()
self.send_heartbeat()
def on_connection_close(self, reason):
logger.warning('Connection closed reason=%s' % (reason,))
self.pending_hb and self._io_loop.remove_timeout(self.pending_hb)
self.reconnect()
def reconnect(self):
logger.info('Reconnect')
# TODO when reconnect the room server has trigger,
# TODO the url should has new param ~~
# self.ws_url = self.ws_recovery_url % self._nod_id
logger.info("Send node id [%s] to remote server" % self._node_id)
# self.ws_url = self.url_template % self._node_id
if not self.is_connected() and self.auto_reconnet:
self._io_loop.call_later(self.reconnect_interval,
super(RTCWebSocketClient, self).connect, self.ws_url)
def send_heartbeat(self):
if self.is_connected():
now = time.time()
if (now > self.last_active_time + self.heartbeat_interval):
self.last_active_time = now
self.send(self.hb_msg)
self.pending_hb = self._io_loop.call_later(self.heartbeat_interval, self.send_heartbeat)
def dispatch(self, message):
"""
You must override this method!
"""
print 'message .......[%s]' % (message,)
def main():
io_loop = ioloop.IOLoop.instance()
client = RTCWebSocketClient(io_loop)
# ws_url = 'ws://127.0.0.1:8888/ws?ip=127.0.0.1&port=9001&mode=1'
ws_url = 'ws://echo.websocket.org'
client.connect(ws_url, auto_reconnet=True, reconnet_interval=10)
try:
io_loop.start()
except KeyboardInterrupt:
client.close()
if __name__ == '__main__':
main()
|
[
"ada.frank.ak@gmail.com"
] |
ada.frank.ak@gmail.com
|
675b214d92c6d69d2cb94daf2533333331d29cb5
|
edf6a50044827f5f24b7fab4806dc25d8887c316
|
/news/migrations/0001_initial.py
|
aa5a3112ead155a681e1a9bdbfb815eb1a9690cf
|
[] |
no_license
|
zhcxk1998/AlgYun
|
4958fdf31bb9009cb6541c39f715410f569b84ff
|
c54b95af8a87a12848401c7088bb7748793e6b63
|
refs/heads/master
| 2020-03-09T04:30:35.322800
| 2018-04-08T01:33:29
| 2018-04-08T01:33:29
| 128,589,517
| 1
| 1
| null | 2018-04-08T02:40:27
| 2018-04-08T02:40:27
| null |
UTF-8
|
Python
| false
| false
| 2,345
|
py
|
# Generated by Django 2.0.3 on 2018-04-03 16:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=256, verbose_name='标题')),
('slug', models.CharField(db_index=True, max_length=256, verbose_name='网址')),
('content', models.TextField(blank=True, default='', verbose_name='内容')),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发表时间')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='更新时间')),
('published', models.BooleanField(default=True, verbose_name='正式发布')),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='作者')),
],
options={
'verbose_name': '教程',
'verbose_name_plural': '教程',
},
),
migrations.CreateModel(
name='Column',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, verbose_name='栏目名称')),
('slug', models.CharField(db_index=True, max_length=256, verbose_name='栏目网址')),
('intro', models.TextField(default='', verbose_name='栏目简介')),
],
options={
'verbose_name': '栏目',
'verbose_name_plural': '栏目',
'ordering': ['name'],
},
),
migrations.AddField(
model_name='article',
name='column',
field=models.ManyToManyField(to='news.Column', verbose_name='归属栏目'),
),
]
|
[
"dickqi87@gmail.com"
] |
dickqi87@gmail.com
|
9a04690951ef33d0a11036986d4b9e7e88d5b906
|
b039d4f7da5085a4e7d7491acb7bc04f7b896f24
|
/tests/network/test_fees.py
|
683cd8023b40e02b3ed8189ea6bf2916bdcc9128
|
[
"MIT"
] |
permissive
|
d4le/bit
|
c052cfadd79c52add99b6b576d2498119168c478
|
1153162f55c2ed1b007b237d4215d80ab7db429b
|
refs/heads/master
| 2021-01-22T20:43:57.543622
| 2017-03-16T05:15:18
| 2017-03-16T05:15:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,608
|
py
|
from time import sleep, time
import bit
from bit.network.fees import get_fee, get_fee_cached, set_fee_cache_time
def test_set_fee_cache_time():
original = bit.network.fees.DEFAULT_CACHE_TIME
set_fee_cache_time(30)
updated = bit.network.fees.DEFAULT_CACHE_TIME
assert original != updated
assert updated == 30
set_fee_cache_time(original)
def test_get_fee():
assert get_fee(fast=True) != get_fee(fast=False)
class TestFeeCache:
def test_fast(self):
sleep(0.2)
start_time = time()
set_fee_cache_time(0)
get_fee_cached(fast=True)
initial_time = time() - start_time
start_time = time()
set_fee_cache_time(600)
get_fee_cached(fast=True)
cached_time = time() - start_time
assert initial_time > cached_time
def test_hour(self):
sleep(0.2)
start_time = time()
set_fee_cache_time(0)
get_fee_cached(fast=False)
initial_time = time() - start_time
start_time = time()
set_fee_cache_time(600)
get_fee_cached(fast=False)
cached_time = time() - start_time
assert initial_time > cached_time
def test_expires(self):
sleep(0.2)
set_fee_cache_time(0)
get_fee_cached()
start_time = time()
set_fee_cache_time(600)
get_fee_cached()
cached_time = time() - start_time
sleep(0.2)
start_time = time()
set_fee_cache_time(0.1)
get_fee_cached()
update_time = time() - start_time
assert update_time > cached_time
|
[
"ofekmeister@gmail.com"
] |
ofekmeister@gmail.com
|
6d9d4d609a09d4ffcab3ad82b4ed5491f6b80906
|
26cf7d2d6c3d6d83b17e304aa94d5e8b3fac323a
|
/autoencoder.py
|
a2b7ddcca50d4ad7bc09af0e2d31abe70e518764
|
[] |
no_license
|
zbn123/Autoencoder
|
4ad8f48e33d9b228dceae6d919c3a5320318fc7e
|
13ebeb8f15edecbdd6e362e7769b69053c1f31e0
|
refs/heads/master
| 2021-07-19T08:46:28.559592
| 2017-10-26T20:08:34
| 2017-10-26T20:08:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,667
|
py
|
import tensorflow.contrib.slim as slim
import matplotlib.pyplot as plt
import cPickle as pickle
import tensorflow as tf
import numpy as np
import requests
import random
import time
import gzip
import os
batch_size = 5000
'''
Leaky RELU
'''
def lrelu(x, leak=0.2, name="lrelu"):
return tf.maximum(x, leak*x)
def encoder(x):
e_conv1 = slim.convolution(x, 32, 2, stride=2, activation_fn=tf.identity, normalizer_fn=slim.batch_norm, scope='e_conv1')
e_conv1 = lrelu(e_conv1)
print 'conv1: ', e_conv1
e_conv2 = slim.convolution(e_conv1, 64, 2, stride=2, activation_fn=tf.identity, normalizer_fn=slim.batch_norm, scope='e_conv2')
e_conv2 = lrelu(e_conv2)
print 'conv2: ', e_conv2
# convolutional layer with a leaky Relu activation
e_conv3 = slim.convolution(e_conv2, 128, 2, stride=2, activation_fn=tf.identity, normalizer_fn=slim.batch_norm, scope='e_conv3')
e_conv3 = lrelu(e_conv3)
print 'conv3: ', e_conv3
e_conv3_flat = tf.reshape(e_conv3, [batch_size, -1])
e_fc1 = slim.fully_connected(e_conv3_flat, 256, normalizer_fn=slim.batch_norm, activation_fn=tf.identity, scope='e_fc1')
e_fc1 = lrelu(e_fc1)
print 'fc1: ', e_fc1
e_fc2 = slim.fully_connected(e_fc1, 64, normalizer_fn=slim.batch_norm, activation_fn=tf.identity, scope='e_fc2')
e_fc2 = lrelu(e_fc2)
print 'fc2: ', e_fc2
e_fc3 = slim.fully_connected(e_fc2, 32, normalizer_fn=slim.batch_norm, activation_fn=tf.identity, scope='e_fc3')
e_fc3 = lrelu(e_fc3)
print 'fc3: ', e_fc3
e_fc4 = slim.fully_connected(e_fc3, 8, normalizer_fn=slim.batch_norm, activation_fn=tf.identity, scope='e_fc4')
e_fc4 = lrelu(e_fc4)
print 'fc4: ', e_fc4
return e_fc4
def decoder(x):
print
print 'x: ', x
d_fc1 = slim.fully_connected(x, 32, normalizer_fn=slim.batch_norm, activation_fn=tf.identity, scope='d_fc1')
d_fc1 = lrelu(d_fc1)
print 'd_fc1: ', d_fc1
d_fc2 = slim.fully_connected(x, 64, normalizer_fn=slim.batch_norm, activation_fn=tf.identity, scope='d_fc2')
d_fc2 = lrelu(d_fc2)
print 'd_fc2: ', d_fc2
d_fc3 = slim.fully_connected(x, 256, normalizer_fn=slim.batch_norm, activation_fn=tf.identity, scope='d_fc3')
d_fc3 = lrelu(d_fc3)
print 'd_fc3: ', d_fc3
d_fc3 = tf.reshape(d_fc3, [batch_size, 4, 4, 16])
print 'd_fc3: ', d_fc3
e_transpose_conv1 = slim.convolution2d_transpose(d_fc3, 64, 2, stride=2, normalizer_fn=slim.batch_norm, activation_fn=tf.identity, scope='e_transpose_conv1')
e_transpose_conv1 = lrelu(e_transpose_conv1)
print 'e_transpose_conv1: ', e_transpose_conv1
e_transpose_conv2 = slim.convolution2d_transpose(e_transpose_conv1, 32, 2, stride=2, normalizer_fn=slim.batch_norm, activation_fn=tf.identity, scope='e_transpose_conv2')
e_transpose_conv2 = lrelu(e_transpose_conv2)
print 'e_transpose_conv2: ', e_transpose_conv2
e_transpose_conv3 = slim.convolution2d_transpose(e_transpose_conv2, 1, 2, stride=2, normalizer_fn=slim.batch_norm, activation_fn=tf.identity, scope='e_transpose_conv3')
e_transpose_conv3 = lrelu(e_transpose_conv3)
e_transpose_conv3 = e_transpose_conv3[:,:28,:28,:]
print 'e_transpose_conv3: ', e_transpose_conv3
return e_transpose_conv3
def train(mnist_train, mnist_test):
with tf.Graph().as_default():
global_step = tf.Variable(0, trainable=False, name='global_step')
# placeholder for mnist images
images = tf.placeholder(tf.float32, [batch_size, 28, 28, 1])
# encode images to 128 dim vector
encoded = encoder(images)
# decode 128 dim vector to (28,28) dim image
decoded = decoder(encoded)
loss = tf.nn.l2_loss(images - decoded)
train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss)
# saver for the model
saver = tf.train.Saver(tf.all_variables())
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
try:
os.mkdir('images/')
except:
pass
try:
os.mkdir('checkpoint/')
except:
pass
ckpt = tf.train.get_checkpoint_state('checkpoint/')
if ckpt and ckpt.model_checkpoint_path:
try:
saver.restore(sess, ckpt.model_checkpoint_path)
print 'Model restored'
except:
print 'Could not restore model'
pass
step = 0
while True:
step += 1
# get random images from the training set
batch_images = random.sample(mnist_train, batch_size)
# send through the network
s = time.time()
_, loss_ = sess.run([train_op, loss], feed_dict={images: batch_images})
t = time.time()-s
print 'Step: ' + str(step) + ' Loss: ' + str(loss_) + ' time: ' + str(t)
if step%100 == 0:
print
print 'Saving model'
print
saver.save(sess, "checkpoint/checkpoint", global_step=global_step)
# get random images from the test set
batch_images = random.sample(mnist_test, batch_size)
# encode them using the encoder, then decode them
encode_decode = sess.run(decoded, feed_dict={images: batch_images})
# write out a few
c = 0
for real, dec in zip(batch_images, encode_decode):
dec, real = np.squeeze(dec), np.squeeze(real)
plt.imsave('images/'+str(step)+'_'+str(c)+'real.png', real)
plt.imsave('images/'+str(step)+'_'+str(c)+'dec.png', dec)
if c == 5:
break
c+=1
def main(argv=None):
# mnist data in gz format
url = 'http://deeplearning.net/data/mnist/mnist.pkl.gz'
# check if it's already downloaded
if not os.path.isfile('mnist.pkl.gz'):
print 'Downloading mnist...'
with open('mnist.pkl.gz', 'wb') as f:
r = requests.get(url)
if r.status_code == 200:
f.write(r.content)
else:
print 'Could not connect to ', url
print 'opening mnist'
f = gzip.open('mnist.pkl.gz', 'rb')
train_set, val_set, test_set = pickle.load(f)
mnist_train = []
mnist_test = []
print 'Reading mnist...'
# reshape mnist to make it easier for understanding convs
for t,l in zip(*train_set):
mnist_train.append(np.reshape(t, (28,28,1)))
for t,l in zip(*val_set):
mnist_train.append(np.reshape(t, (28,28,1)))
for t,l in zip(*test_set):
mnist_test.append(np.reshape(t, (28,28,1)))
mnist_train = np.asarray(mnist_train)
mnist_test = np.asarray(mnist_test)
train(mnist_train, mnist_test)
if __name__ == '__main__':
tf.app.run()
|
[
"cameronfabbri@gmail.com"
] |
cameronfabbri@gmail.com
|
114fc87817862cd893476714a9ddd0fcef1b5e71
|
f4726db4ec192dee3709c6d474b1fb9a743e9d2f
|
/rllib/algorithms/mpc/abstract_solver.py
|
6e9395f231c134c5549b377604bf32b20c842991
|
[
"MIT"
] |
permissive
|
SamueleMeta/optimal_is
|
fc32dbec2eaa1ceb24a70a0c6c7d982bd7cb2c69
|
7d8e0041825acfa003874cd1ad2aec0581f6a9e1
|
refs/heads/master
| 2023-03-05T23:25:22.651481
| 2021-02-13T21:30:30
| 2021-02-13T21:30:30
| 323,657,959
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,597
|
py
|
"""MPC Algorithms."""
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import torch
import torch.nn as nn
from rllib.dataset.utilities import stack_list_of_tuples
from rllib.util.multiprocessing import run_parallel_returns
from rllib.util.neural_networks.utilities import repeat_along_dimension
from rllib.util.rollout import rollout_actions
from rllib.util.value_estimation import discount_sum
class MPCSolver(nn.Module, metaclass=ABCMeta):
r"""Solve the discrete time trajectory optimization controller.
..math :: u[0:H-1] = \arg \max \sum_{t=0}^{H-1} r(x0, u) + final_reward(x_H)
When called, it will return the sequence of actions that solves the problem.
Parameters
----------
dynamical_model: state transition model.
reward_model: reward model.
horizon: int.
Horizon to solve planning problem.
gamma: float, optional.
Discount factor.
scale: float, optional.
Scale of covariance matrix to sample.
num_iter: int, optional.
Number of iterations of solver method.
num_samples: int, optional.
Number of samples for shooting method.
termination_model: Callable, optional.
Termination condition.
terminal_reward: terminal reward model, optional.
warm_start: bool, optional.
Whether or not to start the optimization with a warm start.
default_action: str, optional.
Default action behavior.
num_cpu: int, optional.
Number of CPUs to run the solver.
"""
def __init__(
self,
dynamical_model,
reward_model,
horizon=25,
gamma=1.0,
num_iter=1,
num_samples=400,
termination_model=None,
scale=0.3,
terminal_reward=None,
warm_start=True,
clamp=True,
default_action="zero",
action_scale=1.0,
num_cpu=1,
*args,
**kwargs,
):
super().__init__()
self.dynamical_model = dynamical_model
self.reward_model = reward_model
self.termination_model = termination_model
assert self.dynamical_model.model_kind == "dynamics"
assert self.reward_model.model_kind == "rewards"
if self.termination_model is not None:
assert self.termination_model.model_kind == "termination"
self.horizon = horizon
self.gamma = gamma
self.num_iter = num_iter
self.num_samples = num_samples
self.terminal_reward = terminal_reward
self.warm_start = warm_start
self.default_action = default_action
self.dim_action = self.dynamical_model.dim_action[0]
self.mean = None
self._scale = scale
self.covariance = (scale ** 2) * torch.eye(self.dim_action).repeat(
self.horizon, 1, 1
)
if isinstance(action_scale, np.ndarray):
action_scale = torch.tensor(action_scale, dtype=torch.get_default_dtype())
elif not isinstance(action_scale, torch.Tensor):
action_scale = torch.full((self.dim_action,), action_scale)
if len(action_scale) < self.dim_action:
extra_dim = self.dim_action - len(action_scale)
action_scale = torch.cat((action_scale, torch.ones(extra_dim)))
self.action_scale = action_scale
self.clamp = clamp
self.num_cpu = num_cpu
def evaluate_action_sequence(self, action_sequence, state):
"""Evaluate action sequence by performing a rollout."""
trajectory = stack_list_of_tuples(
rollout_actions(
self.dynamical_model,
self.reward_model,
self.action_scale * action_sequence, # scale actions.
state,
self.termination_model,
),
dim=-2,
)
returns = discount_sum(trajectory.reward, self.gamma)
if self.terminal_reward:
terminal_reward = self.terminal_reward(trajectory.next_state[..., -1, :])
returns = returns + self.gamma ** self.horizon * terminal_reward
return returns
@abstractmethod
def get_candidate_action_sequence(self):
"""Get candidate actions."""
raise NotImplementedError
@abstractmethod
def get_best_action(self, action_sequence, returns):
"""Get best action."""
raise NotImplementedError
@abstractmethod
def update_sequence_generation(self, elite_actions):
"""Update sequence generation."""
raise NotImplementedError
def initialize_actions(self, batch_shape):
"""Initialize mean and covariance of action distribution."""
if self.warm_start and self.mean is not None:
next_mean = self.mean[1:, ..., :]
if self.default_action == "zero":
final_action = torch.zeros_like(self.mean[:1, ..., :])
elif self.default_action == "constant":
final_action = self.mean[-1:, ..., :]
elif self.default_action == "mean":
final_action = torch.mean(next_mean, dim=0, keepdim=True)
else:
raise NotImplementedError
self.mean = torch.cat((next_mean, final_action), dim=0)
else:
self.mean = torch.zeros(self.horizon, *batch_shape, self.dim_action)
self.covariance = (self._scale ** 2) * torch.eye(self.dim_action).repeat(
self.horizon, *batch_shape, 1, 1
)
def get_action_sequence_and_returns(
self, state, action_sequence, returns, process_nr=0
):
"""Get action_sequence and returns associated.
These are bundled for parallel execution.
The data inside action_sequence and returns will get modified.
"""
if self.num_cpu > 1:
# Multi-Processing inherits random state.
torch.manual_seed(int(1000 * time.time()))
action_sequence[:] = self.get_candidate_action_sequence()
returns[:] = self.evaluate_action_sequence(action_sequence, state)
def forward(self, state):
"""Return action that solves the MPC problem."""
self.dynamical_model.eval()
batch_shape = state.shape[:-1]
self.initialize_actions(batch_shape)
state = repeat_along_dimension(state, number=self.num_samples, dim=-2)
batch_actions = [
torch.randn(
(self.horizon,) + batch_shape + (self.num_samples, self.dim_action)
)
for _ in range(self.num_cpu)
]
batch_returns = [
torch.randn(batch_shape + (self.num_samples,)) for _ in range(self.num_cpu)
]
for action_, return_ in zip(batch_actions, batch_returns):
action_.share_memory_()
return_.share_memory_()
for _ in range(self.num_iter):
run_parallel_returns(
self.get_action_sequence_and_returns,
[
(state, batch_actions[rank], batch_returns[rank], rank)
for rank in range(self.num_cpu)
],
num_cpu=self.num_cpu,
)
action_sequence = torch.cat(batch_actions, dim=-2)
returns = torch.cat(batch_returns, dim=-1)
elite_actions = self.get_best_action(action_sequence, returns)
self.update_sequence_generation(elite_actions)
if self.clamp:
return self.mean.clamp(-1.0, 1.0)
return self.mean
def reset(self, warm_action=None):
"""Reset warm action."""
self.mean = warm_action
|
[
"metasamuele@gmail.com"
] |
metasamuele@gmail.com
|
c9ddc6020f6335f0c218bb5bf57f71ec81f55e3c
|
62179a165ec620ba967dbc20016e890978fbff50
|
/tests/torch/pruning/test_onnx_export.py
|
841d1b4a7fc6f0b959dc57c9259844df873540c2
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/nncf
|
91fcf153a96f85da166aacb7a70ca4941e4ba4a4
|
c027c8b43c4865d46b8de01d8350dd338ec5a874
|
refs/heads/develop
| 2023-08-24T11:25:05.704499
| 2023-08-23T14:44:05
| 2023-08-23T14:44:05
| 263,687,600
| 558
| 157
|
Apache-2.0
| 2023-09-14T17:06:41
| 2020-05-13T16:41:05
|
Python
|
UTF-8
|
Python
| false
| false
| 6,169
|
py
|
# Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from tests.torch.helpers import load_exported_onnx_version
from tests.torch.pruning.helpers import BigPruningTestModel
from tests.torch.pruning.helpers import DiffConvsModel
from tests.torch.pruning.helpers import GroupNormModel
from tests.torch.pruning.helpers import PruningTestModelConcat
from tests.torch.pruning.helpers import PruningTestModelEltwise
from tests.torch.pruning.helpers import get_basic_pruning_config
pytestmark = pytest.mark.skip(reason="Export as actually deleting filters from the model is currently disabled.")
def find_value_by_name_in_list(obj_list, name):
for obj in obj_list:
if obj.name == name:
return obj
return None
def check_bias_and_weight_shape(node_name, onnx_model_proto, weight_shape, bias_shape):
node_weight = find_value_by_name_in_list(onnx_model_proto.graph.initializer, node_name + ".weight")
node_bias = find_value_by_name_in_list(onnx_model_proto.graph.initializer, node_name + ".bias")
assert node_weight.dims == weight_shape
assert node_bias.dims == bias_shape
def test_pruning_export_simple_model(tmp_path):
model = BigPruningTestModel()
nncf_config = get_basic_pruning_config(input_sample_size=[1, 1, 8, 8])
nncf_config["compression"]["pruning_init"] = 0.5
nncf_config["compression"]["algorithm"] = "filter_pruning"
onnx_model_proto = load_exported_onnx_version(nncf_config, model, path_to_storage_dir=tmp_path)
# Check that conv2 + BN were pruned by output filters
# WARNING: starting from at least torch 1.7.0, torch.onnx.export will fuses BN into previous
# convs if torch.onnx.export is done with `training=False`, so this test might fail.
check_bias_and_weight_shape("nncf_module.conv2", onnx_model_proto, [16, 16, 3, 3], [16])
check_bias_and_weight_shape("nncf_module.bn", onnx_model_proto, [16], [16])
# Check that up was pruned by input filters
check_bias_and_weight_shape("nncf_module.up", onnx_model_proto, [16, 32, 3, 3], [32])
# Check that conv3 was pruned by input filters
check_bias_and_weight_shape("nncf_module.conv3", onnx_model_proto, [1, 32, 5, 5], [1])
@pytest.mark.parametrize(
("prune_first", "ref_shapes"),
[
(False, [[[16, 1, 2, 2], [16]], [[16, 16, 2, 2], [16]], [[16, 16, 2, 2], [16]], [[16, 32, 3, 3], [16]]]),
(True, [[[8, 1, 2, 2], [8]], [[16, 8, 2, 2], [16]], [[16, 8, 2, 2], [16]], [[16, 32, 3, 3], [16]]]),
],
)
def test_pruning_export_concat_model(tmp_path, prune_first, ref_shapes):
model = PruningTestModelConcat()
nncf_config = get_basic_pruning_config(input_sample_size=[1, 1, 8, 8])
nncf_config["compression"]["algorithm"] = "filter_pruning"
nncf_config["compression"]["params"]["prune_first_conv"] = prune_first
nncf_config["compression"]["pruning_init"] = 0.5
onnx_model_proto = load_exported_onnx_version(nncf_config, model, path_to_storage_dir=tmp_path)
for i in range(1, 5):
conv_name = "nncf_module.conv{}".format(i)
check_bias_and_weight_shape(conv_name, onnx_model_proto, *ref_shapes[i - 1])
@pytest.mark.parametrize(
("prune_first", "ref_shapes"),
[
(False, [[[16, 1, 2, 2], [16]], [[16, 16, 2, 2], [16]], [[16, 16, 2, 2], [16]], [[16, 16, 3, 3], [16]]]),
(True, [[[8, 1, 2, 2], [8]], [[16, 8, 2, 2], [16]], [[16, 8, 2, 2], [16]], [[16, 16, 3, 3], [16]]]),
],
)
def test_pruning_export_eltwise_model(tmp_path, prune_first, ref_shapes):
model = PruningTestModelEltwise()
nncf_config = get_basic_pruning_config(input_sample_size=[1, 1, 8, 8])
nncf_config["compression"]["algorithm"] = "filter_pruning"
nncf_config["compression"]["params"]["prune_first_conv"] = prune_first
nncf_config["compression"]["pruning_init"] = 0.5
onnx_model_proto = load_exported_onnx_version(nncf_config, model, path_to_storage_dir=tmp_path)
for i in range(1, 5):
conv_name = "nncf_module.conv{}".format(i)
check_bias_and_weight_shape(conv_name, onnx_model_proto, *ref_shapes[i - 1])
@pytest.mark.parametrize(
("prune_first", "ref_shapes"),
[
(False, [[[32, 1, 2, 2], [32]], [[32, 1, 1, 1], [32]], [[32, 32, 3, 3], [32]], [[16, 4, 1, 1], [16]]]),
(True, [[[16, 1, 2, 2], [16]], [[16, 1, 1, 1], [16]], [[32, 16, 3, 3], [32]], [[16, 4, 1, 1], [16]]]),
],
)
def test_pruning_export_diffconvs_model(tmp_path, prune_first, ref_shapes):
model = DiffConvsModel()
nncf_config = get_basic_pruning_config(input_sample_size=[1, 1, 8, 8])
nncf_config["compression"]["algorithm"] = "filter_pruning"
nncf_config["compression"]["params"]["prune_first_conv"] = prune_first
nncf_config["compression"]["pruning_init"] = 0.5
onnx_model_proto = load_exported_onnx_version(nncf_config, model, path_to_storage_dir=tmp_path)
for i in range(1, 5):
conv_name = "nncf_module.conv{}".format(i)
check_bias_and_weight_shape(conv_name, onnx_model_proto, *ref_shapes[i - 1])
def test_pruning_export_groupnorm_model(tmp_path):
model = GroupNormModel()
nncf_config = get_basic_pruning_config(input_sample_size=[1, 1, 8, 8])
nncf_config["compression"]["algorithm"] = "filter_pruning"
nncf_config["compression"]["params"]["prune_first_conv"] = True
nncf_config["compression"]["pruning_init"] = 0.5
onnx_model_proto = load_exported_onnx_version(nncf_config, model, path_to_storage_dir=tmp_path)
check_bias_and_weight_shape("nncf_module.conv1", onnx_model_proto, [8, 1, 1, 1], [8])
check_bias_and_weight_shape("nncf_module.conv2", onnx_model_proto, [16, 8, 1, 1], [16])
|
[
"noreply@github.com"
] |
openvinotoolkit.noreply@github.com
|
dc7b599f8daa973d59789d42ded36ded640ad411
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r10p1/Gen/DecFiles/options/16165060.py
|
6c65b7ec1a29a4f35185a3b5334145f51a786eb9
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,597
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/16165060.py generated: Wed, 25 Jan 2017 15:25:15
#
# Event Type: 16165060
#
# ASCII decay Descriptor: [ Xi_bc+ -> (D+ -> K- pi+ pi+) p+ K- ]cc
#
from Configurables import Generation
Generation().EventType = 16165060
Generation().SampleGenerationTool = "Special"
from Configurables import Special
Generation().addTool( Special )
Generation().Special.ProductionTool = "GenXiccProduction"
Generation().PileUpTool = "FixedLuminosityForRareProcess"
from Configurables import GenXiccProduction
Generation().Special.addTool( GenXiccProduction )
Generation().Special.GenXiccProduction.BaryonState = "Xi_bc+"
Generation().Special.GenXiccProduction.Commands = ["mixevnt imix 1", "loggrade ivegasopen 0", "loggrade igrade 1", "vegasbin nvbin 300", "counter xmaxwgt 5000000", "confine pscutmin 0.0", "confine pscutmax 7.0"]
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Xibc_DpK,Kpipi=DecProdCut,m=6.9GeV,t=0.4ps.dec"
Generation().Special.CutTool = "XiccDaughtersInLHCb"
from Configurables import XiccDaughtersInLHCb
Generation().Special.addTool( XiccDaughtersInLHCb )
Generation().Special.XiccDaughtersInLHCb.BaryonState = Generation().Special.GenXiccProduction.BaryonState
from Configurables import LHCb__ParticlePropertySvc
LHCb__ParticlePropertySvc().Particles = [ " Xi_bc+ 532 5242 1.0 6.90000000 0.400000e-12 Xi_bc+ 5242 0.00000000", " Xi_bc~- 533 -5242 -1.0 6.90000000 0.400000e-12 anti-Xi_bc- -5242 0.00000000" ]
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
763f3843193e373ffd0a2229f3233e4ebeb3d73f
|
3d14a2263e7547f9d3b463a35e4b25e3abb67306
|
/ccal/get_vcf_info_ann.py
|
4e76f65f9a5dda963ac65d084997f3fcb9b8b31f
|
[
"MIT"
] |
permissive
|
alex-wenzel/ccal
|
ec91d214cd169913909de67a8592b9ce4a82af3f
|
74dfc604d93e6ce9e12f34a828b601618df51faa
|
refs/heads/master
| 2020-04-17T02:45:50.976156
| 2019-04-22T01:14:24
| 2019-04-22T01:14:24
| 166,151,841
| 0
| 0
|
MIT
| 2019-01-17T03:10:39
| 2019-01-17T03:10:38
| null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
from .get_vcf_info import get_vcf_info
from .VCF_ANN_FIELDS import VCF_ANN_FIELDS
def get_vcf_info_ann(info, field, n_ann=None):
ann = get_vcf_info(info, "ANN")
if ann:
field_index = VCF_ANN_FIELDS.index(field)
return [ann_.split(sep="|")[field_index] for ann_ in ann.split(sep=",")[:n_ann]]
else:
return []
|
[
"kwatme8@gmail.com"
] |
kwatme8@gmail.com
|
7675618bd38f2113e3e7c2c01134a738a88bf0fd
|
11b9e95ce3d73e62ac07893a36690cb3a0d9041b
|
/NeuralNetwork/随机森林(非神经网络)/RandomForest2.py
|
9bc8c512e9be0109525f67ca4ab60c14b7a1ff62
|
[] |
no_license
|
zhangjinzhi/SimpleTools
|
3fdd2005cb8cf52e8e75d8ec283090a628cebc59
|
be9e766d2e22124ce41bec254664b12fdfacaa4b
|
refs/heads/master
| 2020-12-30T11:51:27.116851
| 2018-01-30T09:47:01
| 2018-01-30T09:47:01
| 91,527,509
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 716
|
py
|
# coding=utf-8
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
import numpy as np
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['is_train'] = np.random.uniform(0, 1, len(df)) <= .75
df['species'] = pd.Categorical.from_codes(iris.target, iris.target_names)
df.head()
train, test = df[df['is_train']==True], df[df['is_train']==False]
features = df.columns[:4]
clf = RandomForestClassifier(n_jobs=2)
y, _ = pd.factorize(train['species'])
clf.fit(train[features], y)
preds = iris.target_names[clf.predict(test[features])]
pd.crosstab(test['species'], preds, rownames=['actual'], colnames=['preds'])
|
[
"584392383@qq.com"
] |
584392383@qq.com
|
f03019f06080a632cd818000dfeb75a5995b4219
|
fd877cb919622d6a4efa305fb9eaec8a31e8dd37
|
/scripts/ua/rule13.py
|
45196c0db7db6b6593bc008e7f51e87aa635ae06
|
[
"MIT"
] |
permissive
|
NCiobo/iem
|
37df9bc466ffcbe4f6b1f9c29c6b5266559f200c
|
75da5e681b073c6047f5a2fb76721eaa0964c2ed
|
refs/heads/master
| 2021-01-23T09:39:33.090955
| 2017-09-05T16:34:12
| 2017-09-05T16:34:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,671
|
py
|
'''
Compute the difference between the 12 UTC 850 hPa temp and afternoon high
'''
from pyiem.datatypes import temperature
import psycopg2
import datetime
ASOS = psycopg2.connect(database='asos', host='iemdb', user='nobody')
acursor = ASOS.cursor()
POSTGIS = psycopg2.connect(database='postgis', host='iemdb', user='nobody')
pcursor = POSTGIS.cursor()
data = [0]*12
for i in range(12):
data[i] = []
pcursor.execute("""
select valid, tmpc from raob_profile p JOIN raob_flights f on
(p.fid = f.fid) where f.station in ('KOAX', 'KOVN', 'KOMA') and
p.pressure = 850 and extract(hour from valid at time zone 'UTC') = 12
and tmpc > -40
ORDER by valid ASC
""")
for row in pcursor:
valid = row[0]
t850 = temperature(row[1], 'C')
acursor.execute("""SELECT max(tmpf) from t"""+ str(valid.year) +"""
WHERE station = 'OMA' and valid BETWEEN %s and %s
""", (valid, valid + datetime.timedelta(hours=12)))
row2 = acursor.fetchone()
if row2[0] is None:
continue
high = temperature(row2[0], 'F')
print valid.year, valid.month, high.value('C') - t850.value('C')
data[valid.month-1].append( high.value('C') - t850.value('C') )
import matplotlib.pyplot as plt
(fig, ax) = plt.subplots(1,1)
ax.plot([1,12], [13,13], '-', lw=1.5, color='green', zorder=1)
ax.boxplot(data)
ax.set_title("1960-2013 Omaha Daytime High Temp vs 12 UTC 850 hPa Temp")
ax.set_ylabel(r"Temperature Difference $^\circ$C")
ax.set_xticks(range(1,13))
ax.set_ylim(-20,25)
ax.set_xticklabels(("Jan", 'Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec') )
ax.grid(axis='y')
fig.savefig('test.ps')
import iemplot
iemplot.makefeature('test')
|
[
"akrherz@iastate.edu"
] |
akrherz@iastate.edu
|
6d23619ee3008348e0c51778e10152aa5b555140
|
099172fda019272760aebaa7795def519907e152
|
/com.ibm.streamsx.topology/opt/python/packages/streamsx/topology/schema.py
|
480e8799768afddfd82f92c6a4705a006a30baf0
|
[
"Apache-2.0"
] |
permissive
|
rrea/streamsx.topology
|
58ffd48745e83cd6d054d50405e9f7bbbe73876d
|
d8828ccf3e32cb8a7629f9e325fc14f565cf913e
|
refs/heads/master
| 2020-12-11T07:16:53.701724
| 2016-08-12T19:36:42
| 2016-08-12T19:36:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,601
|
py
|
import enum
class StreamSchema(object) :
"""SPL stream schema."""
def __init__(self, schema):
self.__schema=schema.strip()
def schema(self):
return self.__schema;
def spl_json(self):
_splj = {}
_splj["type"] = 'spltype'
_splj["value"] = self.schema()
return _splj
def extend(self, schema):
"""
Extend a schema by another
"""
base = self.schema()
extends = schema.schema()
new_schema = base[:-1] + ',' + extends[6:]
return StreamSchema(new_schema)
# XML = StreamSchema("tuple<xml document>")
@enum.unique
class CommonSchema(enum.Enum):
"""
Common stream schemas for interoperability within Streams applications.
Python - Stream constains Python objects
Json - Stream contains JSON objects. Streams with schema Json can be published and subscribed between Streams applications implemented in different languages.
String - Stream contains strings. Streams with schema String can be published and subscribed between Streams applications implemented in different languages.
Binary - Stream contains binary tuples. NOT YET SUPPORTED IN Python.
"""
Python = StreamSchema("tuple<blob __spl_po>")
Json = StreamSchema("tuple<rstring jsonString>")
String = StreamSchema("tuple<rstring string>")
Binary = StreamSchema("tuple<blob binary>")
def schema(self):
return self.value.schema();
def spl_json(self):
return self.value.spl_json()
def extend(self, schema):
return self.value.extend(schema)
|
[
"debrunne@us.ibm.com"
] |
debrunne@us.ibm.com
|
c3dbd2c439c3758ad3b7f4b9ce1618b504c41c15
|
b92eee41d665314bc42043d1ff46c608af5ffdfd
|
/sesion_2/prueba.2.py
|
aeeadbfa75b82afe081d82967acbd794d76c7081
|
[] |
no_license
|
badillosoft/python-economics
|
40efe8326558a8fb93f84fdbd2137428844ee5f3
|
82af43c7a47297ce186dc0e23e30620d46e6693a
|
refs/heads/master
| 2021-01-11T18:55:15.762752
| 2017-05-09T01:15:59
| 2017-05-09T01:15:59
| 79,656,798
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 162
|
py
|
# Importación unitaria (por elemento)
from geg import suma, suma2
# from geg import *
print suma([1, 3, 5, 7, 9])
print suma([1, 76, 201, 76])
print suma([5])
|
[
"kmmx@hsoft.local"
] |
kmmx@hsoft.local
|
07522c3ec7506ead2e1af745eda523dcd66be75d
|
7d027557cee0ad5ae783480fe79c1b3d11c4eccb
|
/backend/delivery_order/migrations/0001_initial.py
|
0f19ff998f55d6c8023cdddbe7fe661b124db324
|
[] |
no_license
|
crowdbotics-apps/lol-21119
|
b284e53d4b7ebfa9e4c068807aff813f53871c1e
|
78965517edf60545b646e90e77a63aeb7c4515af
|
refs/heads/master
| 2022-12-24T13:05:52.179667
| 2020-10-05T17:54:53
| 2020-10-05T17:54:53
| 301,492,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,307
|
py
|
# Generated by Django 2.2.16 on 2020-10-05 17:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
("menu", "0001_initial"),
("delivery_user_profile", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Bill",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("total_amount", models.FloatField()),
("timestamp_created", models.DateTimeField(auto_now_add=True)),
(
"contact_info",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="bill_contact_info",
to="delivery_user_profile.ContactInfo",
),
),
(
"profile",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="bill_profile",
to="delivery_user_profile.Profile",
),
),
],
),
migrations.CreateModel(
name="PaymentMethod",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
("detail", models.TextField()),
],
),
migrations.CreateModel(
name="Order",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("quantity", models.IntegerField()),
("total_price", models.FloatField()),
("status", models.CharField(max_length=20)),
("notes", models.TextField()),
("timestamp_created", models.DateTimeField(auto_now_add=True)),
(
"bill",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="order_bill",
to="delivery_order.Bill",
),
),
(
"item_variant",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="order_item_variant",
to="menu.ItemVariant",
),
),
(
"payment_method",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="order_payment_method",
to="delivery_order.PaymentMethod",
),
),
(
"profile",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="order_profile",
to="delivery_user_profile.Profile",
),
),
],
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
d199b63471d48fe41ba0034f9a18d32ec3530fc2
|
0dfdeec83f0b4acd804b9a52212426e9045edfd2
|
/v13/addons/cristo_concern/models/dashboard.py
|
a62adf086f3cf39937c6a7ab9fea53b4303af462
|
[] |
no_license
|
cokotracy/Cristo
|
2c89d6789a901ec2f1e2f0eca7563d1cebee40cb
|
aea45d82d9b822c0ffe6275cfcce3eeb5283a152
|
refs/heads/main
| 2023-08-30T05:01:02.913064
| 2021-07-06T10:09:29
| 2021-07-06T10:09:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,787
|
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api, _
class Member(models.Model):
_inherit = 'res.member'
@api.model
def get_user_member_details(self):
res = super(Member, self).get_user_member_details()
# Navigation
con_action_id = self.env.ref("cristo_concern.action_concern").id
con_menu_id = self.env.ref("cristo_concern.cristo_concern_main_menu").id
res[0].update({'con_action_id': con_action_id, 'con_menu_id': con_menu_id, })
if self.user_has_groups('cristo_concern.group_concern_user') or self.user_has_groups('cristo.group_role_cristo_bsa_super_admin'):
total_concern = self.env['cristo.concern'].search_count([])
if self.user_has_groups('cristo.group_role_cristo_religious_institute_admin'):
total_cal = self.env['cristo.concern'].search_count([('user_id','=',self.env.user.id),('institute_id','=',self.env.user.institute_id.id)])
elif self.user_has_groups('cristo.group_role_cristo_religious_province'):
total_cal = self.env['cristo.concern'].search_count([('user_id','=',self.env.user.id),('rel_province_id','=',self.env.user.rel_province_id.id)])
elif self.user_has_groups('cristo.group_role_cristo_religious_house'):
total_cal = self.env['cristo.concern'].search_count([('user_id','=',self.env.user.id),('community_id','=',self.env.user.community_id.id)])
elif self.user_has_groups('cristo.group_role_cristo_apostolic_institution'):
total_cal = self.env['cristo.concern'].search_count([('user_id','=',self.env.user.id),('institution_id','=',self.env.user.institution_id.id)])
res[0].update({'concern':1,'total_concern':total_concern})
return res
|
[
"charlesit333@gmail.com"
] |
charlesit333@gmail.com
|
43e00d123f74fceb0d9123050968b8d5a1ecd98c
|
a86293a2033c06410aa8ed19bcbce8ca55ea3c55
|
/src/client_libraries/python/microsoft/dynamics/customerinsights/api/models/workflow_refresh_schedule_py3.py
|
686a18f736ebacb397d4da542efc3475aa595443
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
ramotheonly/Dynamics365-CustomerInsights-Client-Libraries
|
a3ca28aa78d2b5509e65d9895ff4a0d42d05f611
|
e00632f7972717b03e0fb1a9e2667e8f9444a0fe
|
refs/heads/main
| 2023-08-02T08:09:04.063030
| 2021-09-28T22:42:15
| 2021-09-28T22:42:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,286
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class WorkflowRefreshSchedule(Model):
"""Represents a DAG refresh schedule.
Variables are only populated by the server, and will be ignored when
sending a request.
:param operation_type: Possible values include: 'none', 'ingestion',
'derivedEntity', 'dataPreparation', 'map', 'match', 'merge',
'profileStore', 'search', 'activity', 'attributeMeasures',
'entityMeasures', 'measures', 'segmentation', 'enrichment',
'intelligence', 'aiBuilder', 'insights', 'export', 'modelManagement',
'relationship', 'roleAssignment', 'analysis', 'all'
:type operation_type: str or
~microsoft.dynamics.customerinsights.api.models.enum
:param sub_type: Possible values include: 'templatedMeasures',
'createAnalysisModel', 'linkAnalysisModel'
:type sub_type: str or
~microsoft.dynamics.customerinsights.api.models.enum
:param identifiers: Gets the identifiers of the schedule
:type identifiers: list[str]
:param job_type: Possible values include: 'full', 'incremental'
:type job_type: str or
~microsoft.dynamics.customerinsights.api.models.enum
:ivar is_active: Gets a value indicating whether the schedule is active.
:vartype is_active: bool
:ivar timezone_id: Gets the ID of the timezone
:vartype timezone_id: str
:ivar cron_schedules: Gets the schedule in CRON format
:vartype cron_schedules: list[str]
:ivar schedule_id: Gets the ID of the schedule
:vartype schedule_id: str
:ivar instance_id: Gets the Customer Insights instance id associated with
this object.
:vartype instance_id: str
"""
_validation = {
'is_active': {'readonly': True},
'timezone_id': {'readonly': True},
'cron_schedules': {'readonly': True},
'schedule_id': {'readonly': True},
'instance_id': {'readonly': True},
}
_attribute_map = {
'operation_type': {'key': 'operationType', 'type': 'str'},
'sub_type': {'key': 'subType', 'type': 'str'},
'identifiers': {'key': 'identifiers', 'type': '[str]'},
'job_type': {'key': 'jobType', 'type': 'str'},
'is_active': {'key': 'isActive', 'type': 'bool'},
'timezone_id': {'key': 'timezoneId', 'type': 'str'},
'cron_schedules': {'key': 'cronSchedules', 'type': '[str]'},
'schedule_id': {'key': 'scheduleId', 'type': 'str'},
'instance_id': {'key': 'instanceId', 'type': 'str'},
}
def __init__(self, *, operation_type=None, sub_type=None, identifiers=None, job_type=None, **kwargs) -> None:
super(WorkflowRefreshSchedule, self).__init__(**kwargs)
self.operation_type = operation_type
self.sub_type = sub_type
self.identifiers = identifiers
self.job_type = job_type
self.is_active = None
self.timezone_id = None
self.cron_schedules = None
self.schedule_id = None
self.instance_id = None
|
[
"michaelajohnston@mac.com"
] |
michaelajohnston@mac.com
|
4b20687780fb0fdf9ee2aa84da2e4f050fe6925d
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/cirq_new/cirq_program/startCirq_pragma17.py
|
3146b2f412bfb79c7789014ace702fe920a73cb6
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,262
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=8
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.rx(1.6147786239451536).on(input_qubit[3])) # number=5
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=6
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=7
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma17.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
155563938f93e28b814ded6a968be8956f270d22
|
1287bbb696e240dd0b92d56d4fdf4246370f3e14
|
/numpy-demo.py
|
fd2619fdc2e82feac4224f8733d84b6b6ec43540
|
[] |
no_license
|
omerfarukcelenk/PythonCalismalari
|
ed0c204084860fddcb892e6edad84fdbc1ed38ec
|
28da12d7d042ec306f064fb1cc3a1a026cb57b74
|
refs/heads/main
| 2023-04-13T18:23:15.270020
| 2021-04-26T21:06:21
| 2021-04-26T21:06:21
| 361,893,918
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,151
|
py
|
import numpy as np
# 1- (10,15,30,45,60) değerlerine sahip numpy dizisi oluşturunuz.
result = np.array([10,15,30,45,60])
# 2- (5-15) arasındaki sayılarla numpy dizisi oluşturunuz.
result = np.arange(5,15)
# 3- (50-100) arasında 5'er 5'er artarak numpy dizisi oluşturunuz.
result = np.arange(50,100,5)
# 4- 10 elemanlı sıfırlardan oluşan bir dizi oluşturunuz.
result = np.zeros(10)
# 5- 10 elemanlı birlerden oluşan bir dizi oluşturunuz.
result = np.ones(10)
# 6- (0-100) arasında eşit aralıklı 5 sayı üretin.
result = np.linspace(0,100,5)
# 7- (10-30) arasında rastgele 5 tane tamsayı üretin.
result = np.random.randint(10,30,5)
# 8- [-1 ile 1] arasında 10 adet sayı üretin.
result = np.random.randn(10)
# 9- (3x5) boyutlarında (10-50) arasında rastgele bir matris oluşturunuz.
# result = np.random.randint(10,50,15).reshape(3,5)
# 10- Üretilen matrisin satır ve sütun sayıları toplamlarını hesaplayınız ?
matris = np.random.randint(-50,50,15).reshape(3,5)
# rowTotal = matris.sum(axis = 1)
# colTotal = matris.sum(axis = 0)
print(matris)
# print(rowTotal)
# print(colTotal)
# 11- Üretilen matrisin en büyük, en küçük ve ortalaması nedir ?
result = matris.max()
result = matris.min()
result = matris.mean()
# 12- Üretilen matrisin en büyük değerinin indeksi kaçtır ?
result = matris.argmax()
result = matris.argmin()
# 13- (10-20) arasındaki sayıları içeren dizinin ilk 3 elemanını seçiniz.
arr = np.arange(10,20)
print(arr)
result = arr[0:3]
# 14- Üretilen dizinin elemanlarını tersten yazdırın.
result = arr[::-1]
# 15- Üretilen matrisin ilk satırını seçiniz.
result = matris[0]
# 16- Üretilen matrisin 2.satır 3.sütundaki elemanı hangisidir ?
result = matris[1,2]
# 17- Üretilen matrisin tüm satırlardaki ilk elemanı seçiniz.
result = matris[:,0]
# 18- Üretilen matrisin her bir elemanının karesini alınız.
result = matris ** 2
# 19- Üretilen matris elemanlarının hangisi pozitif çift sayıdır ?
# Aralığı (-50,+50) arasında yapınız.
ciftler = matris[matris % 2 == 0]
result = ciftler[ciftler>0]
print(result)
|
[
"omerfar0133@gmail.com"
] |
omerfar0133@gmail.com
|
5f1384b0ccaccf589e497ee288792f747e49c667
|
e2860eb874db045fb8d0279566a935af907e5bdf
|
/keras01/keras24_cancer_binary.py
|
224913ac725bca03a09cec370f03a7af20a3ef3f
|
[] |
no_license
|
MinseokCHAE/Bitcamp2_new
|
dda7990907cb136c2e709a345eec634dfdb6ac02
|
849adb5a330b621f1c681f0b5e92005d1281a44d
|
refs/heads/main
| 2023-08-31T03:28:18.068561
| 2021-10-05T00:48:52
| 2021-10-05T00:48:52
| 390,228,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,010
|
py
|
import numpy as np
from sklearn.datasets import load_breast_cancer
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.callbacks import EarlyStopping
#1. 데이터
datasets = load_breast_cancer()
'''
print(datasets)
print(datasets.keys())
print(datasets.feature_names)
print(datasets.DESCR)
'''
x = datasets.data
y = datasets.target
'''
print(x.shape) # (569, 30)
print(y.shape) # (569, )
print(y[:20]) # [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1]
print(np.unique(y)) # [0 1]
'''
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=66)
scaler = StandardScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
#2. 모델링
input = Input(shape=(30, ))
x = Dense(128, activation='relu')(input)
x = Dense(64, activation='relu')(x)
x = Dense(64, activation='relu')(x)
x = Dense(64, activation='relu')(x)
x = Dense(32, activation='relu')(x)
output = Dense(1, activation='sigmoid')(x)
model = Model(inputs=input, outputs=output)
#3. 컴파일, 훈련
es = EarlyStopping(monitor='loss', patience=5, mode='min', verbose=1)
num_epochs = 100
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['mse', 'accuracy'])
model.fit(x_train, y_train, epochs=num_epochs, batch_size=8,
validation_split=0.1, callbacks=[es])
#4. 평가,예측
loss = model.evaluate(x_test, y_test)
y_predict = model.predict(x_test[-5:-1])
print('epochs = ', num_epochs)
print('loss = ', loss[0])
print('mse = ', loss[1])
print('accuracy = ', loss[2])
print(y_test[-5:-1])
print(y_predict)
'''
epochs = 100
loss = 0.2840605676174164
mse = 0.0262874998152256
accuracy = 0.9736841917037964
[1 0 1 1]
[[1.0000000e+00]
[1.7312156e-16]
[1.0000000e+00]
[1.0000000e+00]]
'''
|
[
"minseok6739@naver.com"
] |
minseok6739@naver.com
|
962812ea95c14c421b5d385ad22db70190075490
|
3284c2a6e99c27c9a72de68f60284b7865a7fb0c
|
/Principles_of_Computing/poc_fifteen_testsuite.py
|
d67f71a5ed434da0c1ac0fb0994003f591b64614
|
[] |
no_license
|
bbusenius/Projects
|
e6c972a4356d1ece978b402004cd0bdf23279c98
|
57240764f64dd66ce69336450509e8556dbc597e
|
refs/heads/master
| 2022-09-17T17:18:08.884037
| 2022-08-27T21:39:55
| 2022-08-27T21:39:55
| 20,620,395
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,706
|
py
|
import random
from Fifteen import Puzzle
def test_lower_row_invariant(puzzle, start):
""" test lower row invariants """
assert start[0] > 1, "not an interior tile"
assert puzzle.lower_row_invariant(*start)
if start[1] == 0:
puzzle.solve_col0_tile(start[0])
else:
puzzle.solve_interior_tile(*start)
if start[1] > 0:
return puzzle.lower_row_invariant(start[0], start[1]-1)
else:
return puzzle.lower_row_invariant(start[0]-1, puzzle.get_width()-1)
def test_upper_row_invariants(puzzle, start):
""" test row0 and row1 invariants """
if start[0] == 1:
assert puzzle.row1_invariant(start[1])
puzzle.solve_row1_tile(start[1])
return puzzle.row0_invariant(start[1])
else:
assert puzzle.row0_invariant(start[1])
puzzle.solve_row0_tile(start[1])
return puzzle.row1_invariant(start[1]-1)
def test_2x2(puzzle, dummy=None):
""" test if puzzle's top-left corner is correct """
assert puzzle.row1_invariant(1)
puzzle.solve_2x2()
return is_correct(puzzle)
def test_game(puzzle, dummy=None):
""" complete puzzle test runner """
move_str = generate_random_move_str()
valid_moves = []
for move in move_str:
try:
puzzle.update_puzzle(move)
valid_moves.append(move)
except AssertionError:
pass # ignore invalid moves
print "puzzle string: %s\n" % "".join(valid_moves)
print puzzle
result = puzzle.solve_puzzle()
return is_correct(puzzle)
def run_test(puzzle, start, name, complete=False, stats=[0,0]):
""" run single test """
print "running test '%s'" % name
if complete:
test_func = test_game
else:
print puzzle
if start is None:
test_func = test_2x2
else:
test_func = test_lower_row_invariant if start[0] >= 2 else test_upper_row_invariants
if test_func(puzzle, start):
stats[0] += 1
print puzzle
print "test #%d: '%s' passed. total=%d/%d\n" % (sum(stats), name, stats[0], stats[1])
else:
stats[1] += 1
print puzzle
print "test #%d: '%s' failed. total=%d/%d\n" % (sum(stats), name, stats[0], stats[1])
def run_tests_interior():
""" interior test runner """
base = Puzzle(4, 5, [[10, 11, 12, 9, 8],
[7, 6, 5, 4, 3],
[2, 1, 0, 13, 14],
[15, 16, 17, 18, 19]])
obj = base.clone()
run_test(obj, (2,2), "interior same col")
obj = base.clone()
obj.set_number(1,1, 12)
obj.set_number(0,2, 6)
run_test(obj, (2,2), "interior half left")
obj = base.clone()
obj.set_number(1,3, 12)
obj.set_number(0,2, 4)
run_test(obj, (2,2), "interior half right")
obj = base.clone()
obj.set_number(0,0, 12)
obj.set_number(0,2, 10)
run_test(obj, (2,2), "interior upper left")
obj = base.clone()
obj.set_number(0,4, 12)
obj.set_number(0,2, 8)
run_test(obj, (2,2), "interior upper right")
obj = base.clone()
obj.set_number(2,0, 12)
obj.set_number(0,2, 2)
run_test(obj, (2,2), "interior same row")
obj = base.clone()
obj.set_number(2,1, 12)
obj.set_number(0,2, 1)
run_test(obj, (2,2), "interior short path")
def run_tests_col0():
""" column 0 test runner """
base = Puzzle(4, 5, [[10, 6, 5, 4, 3],
[2, 1, 8, 9, 7],
[0, 11, 12, 13, 14],
[15, 16, 17, 18, 19]])
obj = base.clone()
obj.set_number(1,0, 10)
obj.set_number(0,0, 2)
run_test(obj, (2,0), "col0 short path")
obj = base.clone()
run_test(obj, (2,0), "col0 upper left")
obj = base.clone()
obj.set_number(0,4, 10)
obj.set_number(0,0, 3)
run_test(obj, (2,0), "col0 upper right")
obj = base.clone()
obj.set_number(1,2, 10)
obj.set_number(0,0, 8)
run_test(obj, (2,0), "col0 half right")
obj = base.clone()
obj.set_number(1,1, 10)
obj.set_number(0,0, 1)
run_test(obj, (2,0), "col0 diagonal")
def run_tests_row1():
""" row 1 test runner """
base = Puzzle(4, 5, [[9, 4, 6, 5, 1],
[7, 3, 8, 2, 0],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]])
obj = base.clone()
run_test(obj, (1,4), "row1 upper left")
base = Puzzle(4, 5, [[4,7,2,6,9],
[5,3,8,1,0],
[10,11,12,13,14],
[15,16,17,18,19]])
obj = base.clone()
run_test(obj, (1,4), "row1 upper right")
obj = base.clone()
obj.set_number(1,0, 9)
obj.set_number(0,0, 7)
run_test(obj, (1,4), "row1 lower left")
obj = base.clone()
obj.set_number(1,4, 9)
obj.set_number(1,3, 0)
obj.set_number(0,0, 2)
obj.set_number(0,4, 4)
obj.set_number(0,1, 1)
run_test(obj, (1,3), "row1 lower half left")
obj = base.clone()
obj.set_number(1,4, 9)
obj.set_number(1,3, 0)
obj.set_number(1,2, 6)
obj.set_number(0,2, 8)
obj.set_number(0,4, 4)
obj.set_number(0,1, 1)
run_test(obj, (1,3), "row1 upper half left")
def run_tests_row0():
""" row 0 test runner """
base = Puzzle(4, 5, [[1, 5, 6, 0, 4],
[7, 3, 2, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]])
obj = base.clone()
run_test(obj, (0,3), "row0 lower half left")
obj = base.clone()
obj.set_number(0,1, 3)
obj.set_number(1,1, 5)
run_test(obj, (0,3), "row0 upper half left")
obj = base.clone()
obj.set_number(1,2, 3)
obj.set_number(1,1, 2)
run_test(obj, (0,3), "row0 diagonal")
obj = base.clone()
obj.set_number(1,0, 3)
obj.set_number(1,1, 7)
run_test(obj, (0,3), "row0 lower left")
obj = base.clone()
obj.set_number(0,0, 3)
obj.set_number(1,1, 1)
run_test(obj, (0,3),"row0 upper left")
obj = Puzzle(4, 5, [[1, 2, 0, 3, 4],
[6, 5, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]])
obj.solve_row0_tile(2)
def run_tests_2x2():
""" 2x2 test runner """
base = Puzzle(4, 5, [[1, 6, 2, 3, 4],
[5, 0, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]])
obj = base.clone()
run_test(obj, None, "2x2 #1")
obj = base.clone()
obj.set_number(0,0, 6)
obj.set_number(0,1, 5)
obj.set_number(1,0, 1)
obj.set_number(1,1, 0)
run_test(obj, None, "2x2 #2")
base = Puzzle(3, 3, [[4, 3, 2], [1, 0, 5], [6, 7, 8]])
obj = base.clone()
run_test(obj, None, "2x2 #3")
def run_tests_game():
""" complete game test runner """
sizes = [(2,2), (2,3), (3,2), (3,3), (5,4), (2,5), (5,2)]
for size in sizes:
run_test(Puzzle(*size), None, "random Puzzle(%d,%d)" % size, True)
def is_correct(puzzle):
for row in range(puzzle.get_height()):
for col in range(puzzle.get_width()):
if not puzzle.current_position(row, col) == (row, col):
return False
return True
def generate_random_move_str():
""" helper method to generate a random solvable puzzle """
num = 100
moves = list("r" * 100 + "l" * 100 + "u" * 100 + "d" * 100)
random.shuffle(moves)
move_str = "".join(moves)
return "".join(move_str)
run_tests_interior()
#run_tests_col0()
#run_tests_row1()
#run_tests_row0()
#run_tests_2x2()
#run_tests_game()
|
[
"bbusenius@users.noreply.github.com"
] |
bbusenius@users.noreply.github.com
|
c98db546b314ceed3b99d5a290116ca53e79c0be
|
5e45f1d1d9f58aa1456777b0d75334d6efd43840
|
/challenges/contests/code_forces/round310/b.py
|
ddeeef394ad7437d7d538332cc5b25d2b0c84464
|
[] |
no_license
|
missingdays/nerdy.school
|
604953dc9b3c38a0f71793f066ce2707aa980dae
|
051673e0ebc54bc2f7e96a6477697d1d528dc45c
|
refs/heads/master
| 2021-01-17T08:10:19.558851
| 2016-06-06T15:29:01
| 2016-06-06T15:29:01
| 59,897,184
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 777
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 missingdays <missingdays@missingdays>
#
# Distributed under terms of the MIT license.
"""
"""
def rotate(a, n):
clock = True
for i in range(n):
if clock:
if a[i] == n-1:
a[i] = 0
else:
a[i] += 1
else:
if a[i] == 0:
a[i] = n-1
else:
a[i] -= 1
clock = not clock
return a
def check(a):
for i in range(len(a)):
if a[i] != i:
return False
return True
n = int(input())
a = list(map(int, input().split()))
while a[0] != 0:
a = rotate(a, n)
yes = check(a)
if yes:
print("YES")
else:
print("NO")
|
[
"rebovykin@gmail.com"
] |
rebovykin@gmail.com
|
2d226f5e77c40290e347888ab04c3fd10c6a2a14
|
d92330be8ea281bdfefff5d17039b1a6d44057dc
|
/src/stiamro/startup.py
|
478af5d7aa11eaad8202de82c16120520a35f02a
|
[] |
no_license
|
avoinea/stiamro
|
2af6f2329abafb59b7e6b54abacb95c8f6b3d697
|
5ee6ec3b754a776cb87a9fa452e21cb2afbf38f9
|
refs/heads/master
| 2021-01-18T14:38:49.669592
| 2011-03-11T10:39:44
| 2011-03-11T10:39:44
| 1,397,488
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,354
|
py
|
import os
import sys
import code
import zdaemon.zdctl
import zope.app.wsgi
import zope.app.debug
def application_factory(global_conf, conf='zope.conf'):
zope_conf = os.path.join(global_conf['here'], conf)
return zope.app.wsgi.getWSGIApplication(zope_conf)
def interactive_debug_prompt(zope_conf='zope.conf'):
db = zope.app.wsgi.config(zope_conf)
debugger = zope.app.debug.Debugger.fromDatabase(db)
# Invoke an interactive interpreter shell
banner = ("Welcome to the interactive debug prompt.\n"
"The 'root' variable contains the ZODB root folder.\n"
"The 'app' variable contains the Debugger, 'app.publish(path)' "
"simulates a request.")
code.interact(banner=banner, local={'debugger': debugger,
'app': debugger,
'root': debugger.root()})
class ControllerCommands(zdaemon.zdctl.ZDCmd):
def do_debug(self, rest):
interactive_debug_prompt()
def help_debug(self):
print "debug -- Initialize the application, providing a debugger"
print " object at an interactive Python prompt."
def zdaemon_controller(zdaemon_conf='zdaemon.conf'):
args = ['-C', zdaemon_conf] + sys.argv[1:]
zdaemon.zdctl.main(args, options=None, cmdclass=ControllerCommands)
|
[
"alin@serenity.(none)"
] |
alin@serenity.(none)
|
0823d037b25810b90b0805adedcdad019270008c
|
301b039050c00a9efa4f3a5635e8b633f8adf988
|
/caffe2/experiments/python/SparseTransformer.py
|
c09bb09574798a2db7772eb526185e515e98feb4
|
[
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
sunpan822/caffe2
|
9704b6fe556d272fbedfd6edfdb796f6a8f02970
|
a3c56d892eb85054b4e7cbd1cf0a0d07422ae796
|
refs/heads/master
| 2020-04-12T14:31:45.919799
| 2019-04-19T04:10:40
| 2019-04-19T04:10:40
| 162,555,100
| 1
| 0
|
Apache-2.0
| 2018-12-20T09:14:48
| 2018-12-20T09:14:47
| null |
UTF-8
|
Python
| false
| false
| 6,362
|
py
|
## @package SparseTransformer
# Module caffe2.experiments.python.SparseTransformer
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import workspace
import scipy.sparse
class NetDefNode():
def __init__(self, name, optype, p=None, op=None):
self.name = name
self.optype = optype
self.ops = {}
self.prev = {}
self.insertInput(p)
self.visited = False
self.op = op
def insertInput(self, p):
"""
Insert input of this op
also maintain the output of previous op
p: a node or a list of node
"""
if isinstance(p, list):
for i in p:
self.prev[i.name] = i
i.ops[self.name] = self
elif isinstance(p, NetDefNode):
self.prev[p.name] = p
p.ops[self.name] = self
def deleteInput(self, p):
if isinstance(p, NetDefNode):
del self.prev[p.name]
del p.ops[self.name]
def maskNallocate(weight_name):
"""
Combine mask and weights
create wcsr, iw, jw, return their names
"""
w = workspace.FetchBlob(weight_name)
w_csr = scipy.sparse.csr_matrix(w)
wcsr = w_csr.data
iw = w_csr.indptr
jw = w_csr.indices
workspace.FeedBlob(weight_name + "wcsr", wcsr)
workspace.FeedBlob(weight_name + "iw", iw)
workspace.FeedBlob(weight_name + "jw", jw)
return weight_name + "wcsr", weight_name + "iw", weight_name + "jw"
def transFCRelu(cur, id2node, name2id, ops, model):
"""
Add trans before and after this FC_Prune->(Relu)->FC_Prune chain.
"""
# 1. add trans before the start of this chain
# assuming that cur is a FC_Prune, and it has only one input
pre = cur.prev.itervalues().next()
# Create a node /op and insert it.
# TODO(wyiming): check whether it is correct here
current_blob = model.Transpose(cur.op.input[0], cur.op.input[0] + "_trans")
# print model.net.Proto()
trans_op = model.net.Proto().op[-1]
trans_node = NetDefNode(trans_op.output[0], "Transpose", pre, trans_op)
trans_node.visited = True
pre_new = trans_node
# 2. use while loop to visit the chain
while True:
# breakup with the parent
cur.deleteInput(pre)
if not (cur.optype == "FC_Prune" or cur.optype == "Relu"):
print("Reaching the end of the chain")
break
if len(cur.ops) > 1:
print("A FC/Relu giving more than 1 useful outputs")
if cur.optype == "FC_Prune":
op = cur.op
wcsr, iw, jw = maskNallocate(op.input[1])
bias_name = op.input[3]
# TODO(wyiming): create a new Op here
current_blob = model.FC_Sparse(current_blob,
cur.op.output[0] + "_Sparse",
wcsr, iw, jw, bias_name)
sps_op = model.net.Proto().op[-1]
sps_node = NetDefNode(cur.op.output[0] + "_Sparse",
"FC_Sparse",
pre_new, sps_op)
sps_node.visited = True
pre_new = sps_node
if cur.optype == "Relu":
op = cur.op
current_blob = model.Relu(current_blob, current_blob)
rel_op = model.net.Proto().op[-1]
rel_node = NetDefNode(str(current_blob), "Relu",
pre_new, rel_op)
rel_node.visited = True
pre_new = rel_node
cur.visited = True
pre = cur
flag = False
for _, temp in cur.ops.iteritems():
if temp.optype == "Relu" or temp.optype == "FC_Prune":
flag = True
cur = temp
if not flag:
# assume that there is only 1 output that is not PrintOP
cur = cur.ops.itervalues().next()
cur.deleteInput(pre)
print("No FC/RElu children")
print(cur.op.type)
break
# 3. add trans after this chain like 1.
current_blob = model.Transpose(current_blob, pre.op.output[0])
trans_op = model.net.Proto().op[-1]
trans_node = NetDefNode(str(current_blob), "Transpose", pre_new, trans_op)
trans_node.visited = True
cur.insertInput(trans_node)
print(cur.prev)
print(trans_node.ops)
def Prune2Sparse(cur, id2node, name2id, ops, model):
# Assume that FC and Relu takes in only 1 input;
# If not raise warning
if not cur.visited and cur.optype == "FC_Prune":
transFCRelu(cur, id2node, name2id, ops, model)
cur.visited = True
for name, n in cur.ops.iteritems():
Prune2Sparse(n, id2node, name2id, ops, model)
def net2list(net_root):
"""
Use topological order(BFS) to print the op of a net in a list
"""
bfs_queue = []
op_list = []
cur = net_root
for _, n in cur.ops.iteritems():
bfs_queue.append(n)
while bfs_queue:
node = bfs_queue[0]
bfs_queue = bfs_queue[1:]
op_list.append(node.op)
for _, n in node.ops.iteritems():
bfs_queue.append(n)
return op_list
def netbuilder(model):
print("Welcome to model checker")
proto = model.net.Proto()
net_name2id = {}
net_id2node = {}
net_root = NetDefNode("net_root", "root", None)
for op_id, op in enumerate(proto.op):
if op.type == "Print":
continue
op_name = '%s/%s (op#%d)' % (op.name, op.type, op_id) \
if op.name else '%s (op#%d)' % (op.type, op_id)
# print(op_name)
op_node = NetDefNode(op_name, op.type, op=op)
net_id2node[op_id] = op_node
if_has_layer_input = False
for input_name in op.input:
if input_name not in net_name2id:
# assume that un_occured name are non_layers
# TODO: write a non-layer checker and log it
continue
op_node.insertInput(net_id2node[net_name2id[input_name]])
if_has_layer_input = True
if not if_has_layer_input:
op_node.insertInput(net_root)
for output_name in op.output:
net_name2id[output_name] = op_id
return net_root, net_name2id, net_id2node
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
10d6c38f461f0064766acde067ce1501198f039b
|
b92b0e9ba2338ab311312dcbbeefcbb7c912fc2e
|
/build/shogun_lib/applications/edrt/tutorial_examples/isomap.py
|
968afc975fb56b891080841483da3d0476df5337
|
[] |
no_license
|
behollis/muViewBranch
|
384f8f97f67723b2a4019294854969d6fc1f53e8
|
1d80914f57e47b3ad565c4696861f7b3213675e0
|
refs/heads/master
| 2021-01-10T13:22:28.580069
| 2015-10-27T21:43:20
| 2015-10-27T21:43:20
| 45,059,082
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,100
|
py
|
import modshogun as sg
import data
import numpy as np
# load data
feature_matrix = data.swissroll()
# create features instance
features = sg.RealFeatures(feature_matrix)
# create Isomap converter instance
converter = sg.Isomap()
# set target dimensionality
converter.set_target_dim(2)
# compute embedding with Isomap method
embedding = converter.embed(features)
# enable landmark approximation
converter.set_landmark(True)
# set number of landmarks
converter.set_landmark_number(100)
# set number of threads
converter.parallel.set_num_threads(2)
# compute approximate embedding
approx_embedding = converter.embed(features)
# disable landmark approximation
converter.set_landmark(False)
# compute cosine distance matrix 'manually'
N = features.get_num_vectors()
distance_matrix = np.zeros((N,N))
for i in range(N):
for j in range(N):
distance_matrix[i,j] = \
np.cos(np.linalg.norm(feature_matrix[:,i]-feature_matrix[:,j],2))
# create custom distance instance
distance = sg.CustomDistance(distance_matrix)
# construct embedding based on created distance
converter.embed_distance(distance)
|
[
"prosen@305cdda6-5ce1-45b3-a98d-dfc68c8b3305"
] |
prosen@305cdda6-5ce1-45b3-a98d-dfc68c8b3305
|
7e22946ff315b5f228a8591af59455b54238cba7
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03151/s822034196.py
|
4313c1c0f2ac588afa27facef6408828aef26183
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
n = int(input())
a_list = list(map(int, input().split()))
b_list = list(map(int, input().split()))
if sum(a_list) < sum(b_list):
print(-1)
exit()
a_sub_b = [a_list[i] - b_list[i] for i in range(n)]
insufficients = [x for x in a_sub_b if x < 0]
cnt = len(insufficients)
sum_insuf = sum(insufficients)
for x in sorted(a_sub_b, reverse=True):
if sum_insuf >= 0:
break
sum_insuf += x
cnt += 1
print(cnt)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
788ba397c4eae832f0db90c5895d6a977650d8ce
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_8/crnkee002/question2.py
|
cb2b9f4a82509a3b49d881ae545369931ced4326
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 694
|
py
|
"""A8Q2 - Character Pairs
5/3/2012
CRNKEE002"""
def main():
message = input('Enter a message:\n')
print('Number of pairs:', double_chars(0, message))
def double_chars(pos, word):
if len(word) == 1 or len(word) == 0:
return 0
elif (word[pos] == word[pos+1]) and (pos+1 == len(word)-1):
return 1
elif (word[pos] == word[pos+1]) and (pos+1 < len(word)-1):
return 1 + double_chars(0, word[2::])
elif (word[pos] != word[pos+1]) and (pos+1 == len(word)-1):
return 0
elif (word[pos] != word[pos+1]) and (pos+1 < len(word)-1):
return double_chars(0, word[1::])
if __name__ == '__main__':
main()
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
d0cac88d4a8c7145000a7513167b23dddeb83fc1
|
f2f88a578165a764d2ebb4a022d19e2ea4cc9946
|
/pyvisdk/do/host_vmci_access_manager_access_spec.py
|
3be9649b6bb56dbd6ab8a434dee41655e7e84b7b
|
[
"MIT"
] |
permissive
|
pombredanne/pyvisdk
|
1ecc68a1bf264095f72f274c776e5868fb302673
|
de24eb4426eb76233dc2e57640d3274ffd304eb3
|
refs/heads/master
| 2021-01-21T16:18:39.233611
| 2014-07-28T19:50:38
| 2014-07-28T19:50:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,363
|
py
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def HostVmciAccessManagerAccessSpec(vim, *args, **kwargs):
'''The AccessSpec data object declares an update to the service access granted to
a VM. The given list of services will either be granted in addition to existing
services, replace the existing service or be revoked depending on the mode
specified. In case of a revoke, an empty or non-existing service list indicates
that all granted services should be revoked.'''
obj = vim.client.factory.create('ns0:HostVmciAccessManagerAccessSpec')
# do some validation checking...
if (len(args) + len(kwargs)) < 2:
raise IndexError('Expected at least 3 arguments got: %d' % len(args))
required = [ 'mode', 'vm' ]
optional = [ 'services', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
[
"guy@rzn.co.il"
] |
guy@rzn.co.il
|
1b172b649cf62bea7e4b718c542cb3c43cb28435
|
d38a37c6997f2282b2138fc0a74a82996940dade
|
/loginapp/views.py
|
62e1e750131eabd25d674f07cffda8e604f17a49
|
[] |
no_license
|
ethicalrushi/SignUp
|
9a295c0dcf20ea7eb47fe4af968b0c3d0caf6532
|
6deac079539bb9281f32f19e027d47436d59ce3d
|
refs/heads/master
| 2020-03-08T13:25:28.023977
| 2018-04-05T15:13:08
| 2018-04-05T15:13:08
| 128,157,728
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,319
|
py
|
from django.shortcuts import render
from .forms import UserForm
from .models import User
from django.contrib.auth.hashers import make_password, check_password
from django.http import HttpResponse
from rest_framework.response import Response
from rest_framework import status, viewsets
from .serializers import UserSerializer
from django.http import JsonResponse
import json
from django.views.generic.base import View
from django.urls import resolve
# Create your view
def register(request):
form = UserForm()
if request.method == 'POST':
form = UserForm(request.POST, request.FILES)
if form.is_valid():
password = form.cleaned_data['password']
print(password)
user = User()
user.password = make_password(password)
user.username = form.cleaned_data['username']
user.fullname = form.cleaned_data['fullname']
user.image = form.cleaned_data['image']
user.save()
print(user.password)
else:
print(form.errors)
return render(request,'register.html',{'form':form,})
else:
return render(request,'register.html',{'form':form,})
return render(request,'register.html',{'form':form,})
class DisplayViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class= UserSerializer
def dataview(request):
return render(request,'data.html')
|
[
"pupalerushikesh@gmail.com"
] |
pupalerushikesh@gmail.com
|
ba51dcc9f9c7d92cde27dcd455523ff50ad8d58e
|
8930d812d545e4a67be14b928212878befa1a535
|
/primes/euler0124.py
|
393a0d38105dda9afc7ccd0dea331099a4a39243
|
[] |
no_license
|
jwodder/euler
|
bd59935e9f359e8760b4140243c907a6c44247b8
|
7549bb38dba746a04dcaa021f0c7dc06342d078b
|
refs/heads/master
| 2020-12-22T20:20:29.311514
| 2016-08-06T01:28:33
| 2016-08-06T01:28:33
| 21,482,466
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,028
|
py
|
#!/usr/bin/python
r"""Ordered radicals
The radical of $n$, $rad(n)$, is the product of the distinct prime factors
of $n$. For example, $504 = 2^3\times 3^2\times 7$, so $rad(504) = 2\times
3\times 7 = 42$.
If we calculate $rad(n)$ for $1\leq n\leq 10$, then sort them on $rad(n)$,
and sorting on $n$ if the radical values are equal, we get:
**Unsorted** **Sorted**
$n$ $rad(n)$ $n$ $rad(n)$ $k$
1 1 1 1 1
2 2 2 2 2
3 3 4 2 3
4 2 8 2 4
5 5 3 3 5
6 6 9 3 6
7 7 5 5 7
8 2 6 6 8
9 3 7 7 9
10 10 10 10 10
Let $E(k)$ be the $k$th element in the sorted $n$ column; for example,
$E(4) = 8$ and $E(6) = 9$.
If $rad(n)$ is sorted for $1\leq n\leq 100000$, find $E(10000)$."""
import sys; sys.path.insert(1, sys.path[0] + '/..')
from eulerlib import primeIter, generateAsc
__tags__ = ['radical', 'prime numbers', 'ordering', 'factorization']
bound = 100000
index = 10000
def solve():
primes = tuple(primeIter(bound=bound))
seen = 0
def nextNodes(x):
rad, ps, nextI = x
if nextI < len(primes):
nextP = primes[nextI]
yield (rad*nextP, ps+[nextP], nextI+1)
if ps:
yield (rad//ps[-1] * nextP, ps[:-1]+[nextP], nextI+1)
for (rad, ps, nextI) in generateAsc([(1,[],0)], nextNodes):
expses = [rad]
for p in ps:
for x in expses[:]:
x *= p
while x <= bound:
expses.append(x)
x *= p
seen += len(expses)
if seen >= index:
expses.sort()
return expses[index - seen - 1]
if __name__ == '__main__':
print solve()
|
[
"jwodder@sdf.lonestar.org"
] |
jwodder@sdf.lonestar.org
|
9dfda40d614bb48a5fe3ab1fc73a182b42c25ced
|
6846a0469efc79b89edc8f856944d5a8005d7244
|
/id_0060.py
|
01b7c31aa52da90e0370908d86d70201e6913a1f
|
[] |
no_license
|
CGenie/project_euler
|
42cb966e13645339490046eb44a729660ae0c092
|
cc90edd061b0f4d9e076d5a684b842c202a6812a
|
refs/heads/master
| 2020-06-05T00:41:49.266961
| 2014-01-13T19:11:31
| 2014-01-13T19:11:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,663
|
py
|
#!/usr/bin/python2
# #####################################################################
# id_0060.py
#
# Przemyslaw Kaminski <cgenie@gmail.com>
# Time-stamp: <>
######################################################################
import pickle
from itertools import combinations
from id_0003 import is_prime
if __name__ == '__main__':
f = open("./primes.pickle")
lst_primes = pickle.load(f)
f.close()
n = 0
# for primes in combinations(lst_primes[1:500], 5):
# nice_four = True
# if n % 100 == 0:
# n = 1
# print "Testing " + str(primes)
# for doubles in combinations(primes, 2):
# p1 = int(''.join([str(doubles[0]), str(doubles[1])]))
# p2 = int(''.join([str(doubles[1]), str(doubles[0])]))
# if not p1 in lst_primes or not p2 in lst_primes:
# nice_four = False
# break
# if nice_four:
# print "A nice five has been found: " + str(primes)
# print "The sum is " + str(sum(primes))
# foo = input("Press enter to continue...")
# n += 1
join_nums = lambda x, y: int(''.join([str(x), str(y)]))
for idx, x in enumerate(lst_primes):
if x > 10000:
break
maxidx = idx
for a in xrange(5, maxidx):
pa = lst_primes[a]
for b in xrange(a, maxidx):
pb = lst_primes[b]
# check this pair first
pab = join_nums(pa, pb)
pba = join_nums(pb, pa)
#if pab in lst_primes and pba in lst_primes:
if is_prime(pab) and is_prime(pba):
print "%d, %d ok so far..." % (pa, pb)
for c in xrange(b, maxidx):
pc = lst_primes[c]
pac = join_nums(pa, pc)
pca = join_nums(pc, pa)
pbc = join_nums(pb, pc)
pcb = join_nums(pc, pb)
#print "Testing %d with pac = %d, pca = %d, pbc = %d, pcb = %d" % (pc, pac, pca, pbc, pcb)
#if pac in lst_primes and pca in lst_primes and pbc in lst_primes and pcb in lst_primes:
if is_prime(pac) and is_prime(pca) and is_prime(pbc) and is_prime(pcb):
print "%d, %d, %d ok so far..." % (pa, pb, pc)
for d in xrange(c, maxidx):
nice_four = True
pd = lst_primes[d]
pad = join_nums(pa, pd)
pda = join_nums(pd, pa)
pbd = join_nums(pb, pd)
pdb = join_nums(pd, pb)
pcd = join_nums(pc, pd)
pdc = join_nums(pd, pc)
#print "pad = %d, pda = %d, pbd = %d, pdb = %d, pcd = %d, pdc = %d" % (pad, pda, pbd, pdb, pcd, pdc)
for pp in [pad, pda, pbd, pdb, pcd, pdc]:
#if not pp in lst_primes:
if not is_prime(pp):
nice_four = False
break
if nice_four:
print "%d, %d, %d, %d ok so far..." % (pa, pb, pc, pd)
for e in xrange(d, maxidx):
nice_five = True
pe = lst_primes[e]
pae = join_nums(pa, pe)
pea = join_nums(pe, pa)
pbe = join_nums(pb, pe)
peb = join_nums(pe, pb)
pce = join_nums(pc, pe)
pec = join_nums(pe, pc)
pde = join_nums(pd, pe)
ped = join_nums(pe, pd)
for pp in [pae, pea, pbe, peb, pce, pec, pde, ped]:
#if not pp in lst_primes:
if not is_prime(pp):
nice_five = False
break
if nice_five:
print "A nice five has been found: %d, %d, %d, %d, %d" % (pa, pb, pc, pd, pe)
print "The sum is " + str(sum([pa, pb, pc, pd, pe]))
foo = input("Press enter to continue...")
|
[
"cgenie@gmail.com"
] |
cgenie@gmail.com
|
86d8efefc0aaa304a58e7c9b58202117545fbf48
|
03ac34ae59b3d85b1876a9ca61e08c0b7020537c
|
/myproject/pages/tests.py
|
49bde0bf4c1a592e8d6c4a196c1ff01db6c1fbf1
|
[] |
no_license
|
pramodkumarpanda/http-django.repo
|
1e94890a50ef59166cbe1919791cdac6ca73b21c
|
9551a5991f9dda76674a442598eb14c2cf2312cb
|
refs/heads/master
| 2020-07-02T10:32:58.668280
| 2019-10-21T15:28:37
| 2019-10-21T15:28:37
| 201,500,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,203
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
# Create your tests here.
from django.http import HttpRequest
#from django.test import SimpleTestCase
from django.urls import reverse
from django.test import TestCase,SimpleTestCase
from . import views
from .models import Post
class HomePageTests(SimpleTestCase):
def test_home_page_status_code(self):
response = self.client.get('/')
self.assertEquals(response.status_code, 200)
def test_view_url_by_name(self):
response = self.client.get(reverse('home'))
self.assertEquals(response.status_code, 200)
def test_view_uses_correct_template(self):
response = self.client.get(reverse('home'))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'home.html')
def test_home_page_contains_correct_html(self):
response = self.client.get('/')
self.assertContains(response, '<h1>Homepage</h1>')
def test_home_page_does_not_contain_incorrect_html(self):
response = self.client.get('/')
self.assertNotContains(
response, 'Hi there! I should not be on the page.')
|
[
"user@localhost"
] |
user@localhost
|
1cca13ce33cc9925a5981731585f0970d3da571e
|
e3f3b986b256911e43496fe91c463f79dda9b334
|
/customauth/migrations/0006_remove_user_is_moderator.py
|
3692f715b5ea627df4ca42712fb605ba20be132a
|
[] |
no_license
|
itkpi/itkpimail
|
e2ca56849c1ca18dec0b9c7d661b3563ed1f2ffe
|
6622208ca36d322e61821935804b2367f956d0b6
|
refs/heads/master
| 2021-01-01T19:07:29.107033
| 2015-11-08T18:57:45
| 2015-11-08T18:57:45
| 34,176,219
| 3
| 1
| null | 2018-12-09T05:00:27
| 2015-04-18T17:52:20
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 358
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('customauth', '0005_user_is_moderator'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='is_moderator',
),
]
|
[
"roman.rader@gmail.com"
] |
roman.rader@gmail.com
|
35178571ff5eaafb96690f4636cddb7b8642190d
|
a4e4c3faa29043fc80f62a8442e2f8333cd23933
|
/U_Net/primitives.py
|
cee4c15d4e046dc3f71f456fa4a622db17cc0c77
|
[] |
no_license
|
FangYang970206/Intrinsic_Image
|
652ab87c2d95b400cf80c6a49d1863a40d1cba07
|
3b8ec261b7b3aeaa1c611473f53fb4e23b82893b
|
refs/heads/master
| 2023-01-21T05:18:40.748488
| 2020-11-24T02:22:00
| 2020-11-24T02:22:00
| 228,824,635
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,432
|
py
|
import sys, torch, torch.nn as nn, torch.nn.functional as F
from torch.autograd import Variable
def conv(in_channels, out_channels, kernel_size, stride, padding):
convolution = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
batch_norm = nn.BatchNorm2d(out_channels)
layer = nn.Sequential(convolution, batch_norm)
return layer
## Returns function to concatenate tensors.
## Used in skip layers to join encoder and decoder layers.
def join(ind):
return lambda x, y: torch.cat( (x,y), ind )
## channels : list of ints
## kernel_size : int
## padding : int
## stride_fn : fn(channel_index) --> int
## mult=1 if encoder, 2 if decoder
def build_encoder(channels, kernel_size, padding, stride_fn, mult=1):
layers = []
sys.stdout.write( ' %3d' % channels[0] )
for ind in range(len(channels)-1):
m = 1 if ind == 0 else mult
in_channels = channels[ind] * m
out_channels = channels[ind+1]
stride = stride_fn(ind)
sys.stdout.write( ' --> %3d' % out_channels )
if ind < len(channels)-2:
block = conv(in_channels, out_channels, kernel_size, stride, padding)
else:
block = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
layers.append(block)
sys.stdout.write('\n')
sys.stdout.flush()
return nn.ModuleList(layers)
|
[
"15270989505@163.com"
] |
15270989505@163.com
|
bf3e130b1bab3b6fcb044e25147092962b48fd91
|
1af49694004c6fbc31deada5618dae37255ce978
|
/tools/metrics/histograms/merge_xml.py
|
d522fae6d1f2e541fa077ec5b9d3ced366c2ce53
|
[
"LGPL-2.0-or-later",
"Zlib",
"BSD-3-Clause",
"MIT",
"LGPL-2.1-only",
"LicenseRef-scancode-unknown-license-reference",
"MPL-1.1",
"GPL-2.0-only",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown",
"APSL-2.0"
] |
permissive
|
sadrulhc/chromium
|
59682b173a00269ed036eee5ebfa317ba3a770cc
|
a4b950c23db47a0fdd63549cccf9ac8acd8e2c41
|
refs/heads/master
| 2023-02-02T07:59:20.295144
| 2020-12-01T21:32:32
| 2020-12-01T21:32:32
| 317,678,056
| 3
| 0
|
BSD-3-Clause
| 2020-12-01T21:56:26
| 2020-12-01T21:56:25
| null |
UTF-8
|
Python
| false
| false
| 7,451
|
py
|
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A script to merge multiple source xml files into a single histograms.xml."""
import argparse
import os
import sys
import xml.dom.minidom
import expand_owners
import extract_histograms
import histogram_configuration_model
import histogram_paths
import populate_enums
def GetElementsByTagName(trees, tag, depth=2):
"""Gets all elements with the specified tag from a set of DOM trees.
Args:
trees: A list of DOM trees.
tag: The tag of the elements to find.
depth: The depth in the trees by which a match should be found.
Returns:
A list of DOM nodes with the specified tag.
"""
iterator = extract_histograms.IterElementsWithTag
return list(e for t in trees for e in iterator(t, tag, depth))
def GetEnumsNodes(doc, trees):
"""Gets all enums from a set of DOM trees.
If trees contain ukm events, populates a list of ints to the
"UkmEventNameHash" enum where each value is a ukm event name hash truncated
to 31 bits and each label is the corresponding event name.
Args:
doc: The document to create the node in.
trees: A list of DOM trees.
Returns:
A list of enums DOM nodes.
"""
enums_list = GetElementsByTagName(trees, 'enums')
ukm_events = GetElementsByTagName(
GetElementsByTagName(trees, 'ukm-configuration'), 'event')
# Early return if there are no ukm events provided. MergeFiles have callers
# that do not pass ukm events so, in that case, we don't need to iterate
# through the enum list.
if not ukm_events:
return enums_list
for enums in enums_list:
populate_enums.PopulateEnumsWithUkmEvents(doc, enums, ukm_events)
return enums_list
def CombineHistogramsSorted(doc, trees):
"""Sorts histograms related nodes by name and returns the combined nodes.
This function sorts nodes including <histogram>, <variant> and
<histogram_suffix>. Then it returns one <histograms> that contains the
sorted <histogram> and <variant> nodes and the other <histogram_suffixes_list>
node containing all <histogram_suffixes> nodes.
Args:
doc: The document to create the node in.
trees: A list of DOM trees.
Returns:
A list containing the combined <histograms> node and the combined
<histogram_suffix_list> node.
"""
# Create the combined <histograms> tag.
combined_histograms = doc.createElement('histograms')
def SortByLowerCaseName(node):
return node.getAttribute('name').lower()
variants_nodes = GetElementsByTagName(trees, 'variants', depth=3)
sorted_variants = sorted(variants_nodes, key=SortByLowerCaseName)
histogram_nodes = GetElementsByTagName(trees, 'histogram', depth=3)
sorted_histograms = sorted(histogram_nodes, key=SortByLowerCaseName)
for variants in sorted_variants:
# Use unsafe version of `appendChild` function here because the safe one
# takes a lot longer (10000x) to append all children. The unsafe version
# is ok here because:
# 1. the node to be appended is a clean node.
# 2. The unsafe version only does fewer checks but not changing any
# behavior and it's documented to be usable if performance matters.
# See https://github.com/python/cpython/blob/2.7/Lib/xml/dom/minidom.py#L276.
xml.dom.minidom._append_child(combined_histograms, variants)
for histogram in sorted_histograms:
xml.dom.minidom._append_child(combined_histograms, histogram)
# Create the combined <histogram_suffixes_list> tag.
combined_histogram_suffixes_list = doc.createElement(
'histogram_suffixes_list')
histogram_suffixes_nodes = GetElementsByTagName(trees,
'histogram_suffixes',
depth=3)
sorted_histogram_suffixes = sorted(histogram_suffixes_nodes,
key=SortByLowerCaseName)
for histogram_suffixes in sorted_histogram_suffixes:
xml.dom.minidom._append_child(combined_histogram_suffixes_list,
histogram_suffixes)
return [combined_histograms, combined_histogram_suffixes_list]
def MakeNodeWithChildren(doc, tag, children):
"""Creates a DOM node with specified tag and child nodes.
Args:
doc: The document to create the node in.
tag: The tag to create the node with.
children: A list of DOM nodes to add as children.
Returns:
A DOM node.
"""
node = doc.createElement(tag)
for child in children:
node.appendChild(child)
return node
def MergeTrees(trees, should_expand_owners):
"""Merges a list of histograms.xml DOM trees.
Args:
trees: A list of histograms.xml DOM trees.
should_expand_owners: Whether we want to expand owners for histograms.
Returns:
A merged DOM tree.
"""
doc = xml.dom.minidom.Document()
doc.appendChild(
MakeNodeWithChildren(
doc,
'histogram-configuration',
# This can result in the merged document having multiple <enums> and
# similar sections, but scripts ignore these anyway.
GetEnumsNodes(doc, trees) +
# Sort the <histogram> and <histogram_suffixes> nodes by name and
# return the combined nodes.
CombineHistogramsSorted(doc, trees)))
# After using the unsafe version of appendChild, we see a regression when
# pretty-printing the merged |doc|. This might because the unsafe appendChild
# doesn't build indexes for later lookup. And thus, we need to convert the
# merged |doc| to a xml string and convert it back to force it to build
# indexes for the merged |doc|.
doc = xml.dom.minidom.parseString(doc.toxml())
# Only perform fancy operations after |doc| becomes stable. This helps improve
# the runtime perforamnce.
if should_expand_owners:
for histograms in doc.getElementsByTagName('histograms'):
expand_owners.ExpandHistogramsOWNERS(histograms)
return doc
def MergeFiles(filenames=[], files=[], should_expand_owners=False):
"""Merges a list of histograms.xml files.
Args:
filenames: A list of histograms.xml filenames.
files: A list of histograms.xml file-like objects.
should_expand_owners: Whether we want to expand owners. By default, it's
false because most of the callers don't care about the owners for each
metadata.
Returns:
A merged DOM tree.
"""
all_files = files + [open(f) for f in filenames]
trees = [xml.dom.minidom.parse(f) for f in all_files]
return MergeTrees(trees, should_expand_owners)
def PrettyPrintMergedFiles(filenames=[], files=[]):
return histogram_configuration_model.PrettifyTree(
MergeFiles(filenames=filenames, files=files, should_expand_owners=True))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output', required=True)
args = parser.parse_args()
with open(args.output, 'w') as f:
# This is run by
# https://source.chromium.org/chromium/chromium/src/+/master:tools/metrics/BUILD.gn;drc=573e48309695102dec2da1e8f806c18c3200d414;l=5
# to send the merged histograms.xml to the server side. Providing |UKM_XML|
# here is not to merge ukm.xml but to populate `UkmEventNameHash` enum
# values.
f.write(PrettyPrintMergedFiles(
histogram_paths.ALL_XMLS + [histogram_paths.UKM_XML]))
if __name__ == '__main__':
main()
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
51c20a8be35d9a81e63f1be880d978d0d13c9257
|
017033cc7094fd20334607e4c3e6e90bc5006687
|
/django/api/index/migrations/0032_auto_20210607_1433.py
|
92261d96b3f94ca8d42235adf992eaaf50474f0b
|
[] |
no_license
|
HellMenDos/DjangoDockerPostgresSocket.io
|
cadc8cbc5ec1cd84b1e2455361f9a04ac557c73c
|
88e4ff65cfc80df7932cffe23eee0ae221ec3519
|
refs/heads/master
| 2023-05-31T06:31:35.206756
| 2021-06-21T19:14:55
| 2021-06-21T19:14:55
| 367,129,423
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
# Generated by Django 3.1.4 on 2021-06-07 14:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('index', '0031_auto_20210607_1431'),
]
operations = [
migrations.AlterField(
model_name='userdata',
name='avatar',
field=models.ImageField(blank=True, default='', upload_to='static', verbose_name='фото'),
),
]
|
[
"poznkirill3@gmail.com"
] |
poznkirill3@gmail.com
|
b91955ad63dfda9a71f830d93717f0db2366bf70
|
7ec91f8b8342b1ab62d315424f43588a13dda307
|
/solu/221. Maximal Square.py
|
f17368e629bb9254ed8a5f5a492dfce8f0b97edc
|
[] |
no_license
|
coolmich/py-leetcode
|
bbd001a1cb41b13cd0515d1b764ec327dfaaa03c
|
3129438b032d3aeb87c6ac5c4733df0ebc1272ba
|
refs/heads/master
| 2020-05-21T08:44:46.564419
| 2016-09-15T15:45:08
| 2016-09-15T15:45:08
| 60,917,444
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
class Solution(object):
def maximalSquare(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
if not len(matrix): return 0
mat, maxi = [[0 for i in range(len(matrix[0]))] for j in range(len(matrix))], 0
for c in range(len(matrix[0])):
for r in range(len(matrix)):
if not r or not c:
mat[r][c] = 1 if matrix[r][c] == '1' else 0
else:
if matrix[r][c] == '1':
mat[r][c] = min(mat[r-1][c-1], mat[r][c-1], mat[r-1][c]) + 1
maxi = max(maxi, mat[r][c])
return maxi**2
|
[
"coolmich00@gmail.com"
] |
coolmich00@gmail.com
|
fc36f5d3bbf141bd5b97e9bfa970fa8c1f641c82
|
31fc068f935aa723a089eda3c8a639e1d9c4cee9
|
/jason.py
|
27b41fdd76c9d5384f1f606053da85231f1a696c
|
[] |
no_license
|
jaythaceo/Jaythaceo
|
31098a016c4b3453996ef89252f2d9a1f05e9c10
|
f4cea385318b0ff1708e3d35e96f4eb53925d8d0
|
refs/heads/master
| 2023-05-30T15:16:27.589337
| 2023-04-28T16:23:31
| 2023-04-28T16:23:31
| 157,614,343
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 969
|
py
|
# Lets make a bot that does some cool shit
from flask import Flask, render_template, request, jsonify
import aiml
import os
app = Flask(__name__)
@app.route("/")
def hello():
return render_template('chat.html')
@app.route("/ask", methods=['POST'])
def ask():
message = request.form['messageText'].encode('utf-8').strip()
kernel = aiml.Kernel()
if os.path.isfile("bot_brain.brn"):
kernel.bootstrap(brainFile = "bot_brain.brn")
else:
kernel.bootstrap(learnFiles = os.path.abspath("aiml/std-startup.xml"), commands = "load aiml b")
kernel.saveBrain("bot_brain.brn")
# kernel now ready for use
while True:
if message == "quit":
exit()
elif message == "save":
kernel.saveBrain("bot_brain.brn")
else:
bot_response = kernel.respond(message)
# print bot_response
return jsonify({'status':'OK','answer':bot_response})
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
|
[
"jaythaceo@gmail.com"
] |
jaythaceo@gmail.com
|
88c1de86d4d9ba1db69c49098476c61e05196810
|
540b4199dd80228f1d84c9b687e974cfa2c289a2
|
/【Python+Dash快速web应用开发】系列文章/16 多页面应用/app2.py
|
4298790c10256c39aeb6b4f979952cf587c417be
|
[] |
no_license
|
CNFeffery/DataScienceStudyNotes
|
1186e26c88874b89b65f841af5f78dc49429e479
|
d45b42b49be04ba4add9cdd18b4787fb3c334b1f
|
refs/heads/master
| 2023-08-17T07:18:43.730916
| 2023-07-25T14:05:17
| 2023-07-25T14:05:17
| 206,516,448
| 1,141
| 485
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,543
|
py
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
app = dash.Dash(__name__)
app.layout = dbc.Container(
[
dcc.Location(id='url', refresh=False),
dbc.Row(
[
dbc.Col(
[
html.A('页面A', href='/pageA'),
html.Br(),
html.A('页面B', href='/pageB'),
html.Br(),
html.A('页面C', href='/pageC'),
],
width=2,
style={
'backgroundColor': '#eeeeee'
}
),
dbc.Col(
html.H3(id='render-page-content'),
width=10
)
]
)
],
style={
'paddingTop': '20px',
'height': '100vh',
'weight': '100vw'
}
)
@app.callback(
Output('render-page-content', 'children'),
Input('url', 'pathname')
)
def render_page_content(pathname):
if pathname == '/':
return '欢迎来到首页'
elif pathname == '/pageA':
return '欢迎来到页面A'
elif pathname == '/pageB':
return '欢迎来到页面B'
elif pathname == '/pageC':
return '欢迎来到页面C'
else:
return '当前页面不存在!'
if __name__ == '__main__':
app.run_server(debug=True)
|
[
"fefferypzy@gmail.com"
] |
fefferypzy@gmail.com
|
1c555d3c397b93d8929f1f3d32c733ad9362307a
|
28bf7793cde66074ac6cbe2c76df92bd4803dab9
|
/answers/Utkarsh Srivastava/Day 6/Question 1.py
|
51078cd3ed9f7841f0a3dc1a97b49dc757ef6903
|
[
"MIT"
] |
permissive
|
Codechef-SRM-NCR-Chapter/30-DaysOfCode-March-2021
|
2dee33e057ba22092795a6ecc6686a9d31607c9d
|
66c7d85025481074c93cfda7853b145c88a30da4
|
refs/heads/main
| 2023-05-29T10:33:31.795738
| 2021-06-10T14:57:30
| 2021-06-10T14:57:30
| 348,153,476
| 22
| 135
|
MIT
| 2021-06-10T14:57:31
| 2021-03-15T23:37:26
|
Java
|
UTF-8
|
Python
| false
| false
| 371
|
py
|
candies = input("Enter Candies ").split()
max = 0
result = [0]*len(candies)
extra = int(input("Enter extra candies "))
for i in range(len(candies)):
candies[i] = int(candies[i])
if int(candies[i])>max:
max = candies[i]
for i in range(len(candies)):
if(candies[i]+extra>=max):
result[i] = True
else:
result[i] = False
print(result)
|
[
"noreply@github.com"
] |
Codechef-SRM-NCR-Chapter.noreply@github.com
|
3cc37cf0d6c169ef8ca16a2422467566dd03733e
|
24e843a90a3b3a37cc4d76a207f41d1fc628c2e7
|
/python3/test141~225.py
|
8fb44d48461a29e5a31ea55c2269ee6e23ee6e0f
|
[] |
no_license
|
erikliu0801/leetcode
|
c595ea786716f7df86bd352c1e8d691f1870ec70
|
1de7bfe192324f9de28afa06b9539331c87d1346
|
refs/heads/master
| 2023-08-07T14:47:19.074076
| 2021-09-05T09:46:35
| 2021-09-05T09:46:35
| 224,321,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,035
|
py
|
#141
def hasCycle(head):
linked_list = []
while head != None:
if head in linked_list:
return True
linked_list.append(head)
head = head.next
return False
#168
def convertToTitle(n):
if n <= 0:
return
else:
digits = ['Z','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y']
digits_sum, digits_num, digits_level = 0, 1, 0
while digits_sum < n:
digits_num *= 26
digits_level += 1
digits_sum += digits_num
if digits_sum == n:
return 'Z' * digits_level
else:
all_digits = ''
while digits_level > 0:
all_digits = digits[n % 26] + all_digits
if n % 26 == 0:
n = n//26 -1
else:
n = n//26
digits_level -= 1
return all_digits
#169
def majorityElement(nums):
for num in set(nums):
if nums.count(num) > len(nums)//2:
return num
#171
def titleToNumber(s):
def addSum(digits):
add_sum = 0
if digits > 0:
add_sum = 26**digits + addSum(digits-1)
return add_sum
digits = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
add_sum = addSum(len(s)-1) + 1
for i in range(len(s)):
add_sum += digits.index(s[-1-i]) * 26 ** i
return add_sum
#172
def trailingZeroes(n):
negative = False
if n < 0 and n % 2 != 0:
negative = True
n = abs(n)
five_scale = 5
five_scale_addnum = [1]
while five_scale < n:
five_scale *= 5
five_scale_addnum.append(five_scale_addnum[-1]*5 +1)
add_num = 0
for i in range(1,len(five_scale_addnum)+1):
add_num += (n // five_scale) * five_scale_addnum[-i]
n %= five_scale
five_scale //= 5
if negative == True:
return 0 - add_num
else:
return add_num
#202
def isHappy(n):
if n < 1:
return False
pre = list()
while n != 1:
if n in pre:
return False
pre.append(n)
cache = 0
for c in str(n):
cache += int(c)**2
n = cache
return True
# def main():
# print(isHappy(200))
#203
def removeElements(head, val):
if type(head) != ListNode:
return
while head.val == val:
if head.next != None:
head = head.next
else:
return
if head.next == None:
return head
l1 = removeElements(head.next, val)
if l1:
head.next = l1
else:
head.next = None
return head
#204
def countPrimes(n):
import time
now = time.time()
if n <= 2:
return 0
else:
primes = set({2})
x = set(range(3,n,2))
while len(x) != 0:
prime = min(x)
primes.add(prime)
x.remove(prime)#
x -= x.intersection(set(range(prime**2,n,prime))) ##
if prime**2 >= n:
break
primes |= x
print(time.time() - now)
return len(primes)
#205
def isIsomorphic(s, t):
pass
#206
def reverseList(head):
if type(head) != ListNode:
return
elif head.next == None:
return head
else:
node = head.next #n2
head.next = None #n1->X
while node.next: #if n3 exist
new_head = node.next #n3
node.next = head #n2->n1
if new_head.next != None:
head = node # n1 = n2
node = new_head # n2 = n3
else:
new_head.next = node #n3->n2
return new_head
node.next = head
return node
#217
def containsDuplicate(nums):
return len(set(nums)) != len(nums)
#219
def containsNearbyDuplicate(nums, k):
if len(set(nums)) == len(nums):
return False
c = list()
c_i_j = list()
for m, num in enumerate(nums):
if num in c:
c_i_j[c.index(num)].append(m)
else:
c.append(num)
c_i_j.append([m])
k_s = set()
for m in c_i_j:
for n, i in enumerate(m):
for j in m[n+1:]:
k_s.add(abs(j-i))
return k in k_s
def main():
input_nums = [[1,2,3,1], [1,0,1,1], [1,0,1,1], [1,0,1,1], [1,2,3,1,2,3], [1,2,3,1,2,3]]
input_k = [3, 1, 2, 3, 2, 3]
expected_output = [True, True, True, True, False, True]
for i in range(len(input_nums)):
if containsNearbyDuplicate(input_nums[i], input_k[i]) != expected_output[i]:
print("Wrong!!!")
print(containsNearbyDuplicate(input_nums[i], input_k[i]))
else:
print("Right")
# print(containsNearbyDuplicate(input_nums[-1], input_k[-1]))
if __name__ == '__main__':
main()
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def LinkedList2List(ListNode):
"""
ListNode: Head
rtype: List
"""
list1 = []
l0 = ListNode
while l0 != None:
list1.append(l0.val)
l0 = l0.next
return list1
def List2LinkedList(List):
"""
rtype: Head of ListNode
"""
l = ListNode(0)
l0 = l
for i in range(len(List)):
l.val = List[i]
if i + 1 != len(List):
l.next = ListNode(0)
l = l.next
return l0
#141
# input_linked_list = [[3,2,0,-4]]
# expected_output = [True]
# for i in range(len(input_linked_list)):
# if hasCycle(List2LinkedList(input_linked_list[i])) != expected_output[i]:
# print("Wrong!!!")
# print(hasCycle(List2LinkedList(input_linked_list[i])))
# else:
# print("Right")
# print(hasCycle(List2LinkedList(input_linked_list[-1])))
#168
# for i in range(1,28):
# print(convertToTitle(i))
# input_int = [1, 26, 28, 701, 702, 703, 17576, 18278, 18279]
# expected_output = ['A', 'Z', 'AB', 'ZY', 'ZZ', 'AAA', 'YYZ', 'ZZZ', 'AAAA']
# for i in range(len(input_int)):
# if convertToTitle(input_int[i]) != expected_output[i]:
# print("Wrong!!!")
# print(convertToTitle(input_int[i]))
# else:
# print("Right")
# print(convertToTitle())
#169
# print(majorityElement([2,2,1,1,1,2,2]))
#171
# print(titleToNumber('BA'))
#172
# for i in range(1,11):
# print(factorial(i))
# print(trailingZeroes(124))
#203
# print(LinkedList2List(removeElements(List2LinkedList([6,6,3,4,5,6,6]),6)))
#204
# print(countPrimes(1000000))
#205
# input_s = ['egg', 'foo', 'paper']
# input_t = ['add', 'bar', 'title']
# expected_output = [True, False, True]
# for i in range(len(input_s)):
# if isIsomorphic(input_s[i],input_t[i]) != expected_output[i]:
# print("Wrong!!!")
# print(isIsomorphic(input_s[i],input_t[i]))
# else:
# print("Right")
# print(isIsomorphic(input_s[-1],input_t[-1]))
#206
# print(LinkedList2List(reverseList(List2LinkedList([1,2]))))
#217
# print(containsDuplicate([1,2]))
#219
|
[
"erikliu0801@gmail.com"
] |
erikliu0801@gmail.com
|
e356b5be593efe2b242480222729f42b266cea26
|
99799383b4e618061fe9261aa70cfe420e02a5aa
|
/person/migrations/0008_person_datetime_updated.py
|
b93c4440a6d2a8a9406b93a49b315141c371f377
|
[
"MIT"
] |
permissive
|
openkamer/openkamer
|
f311a97d5c9e182eabd6602f42475e8e049912b0
|
bb99963c00ad90299deccd44d977c27aee7eb16c
|
refs/heads/master
| 2023-07-20T10:45:11.402427
| 2023-07-18T17:41:56
| 2023-07-18T17:41:56
| 57,322,204
| 62
| 7
|
MIT
| 2023-07-17T18:15:43
| 2016-04-28T17:43:23
|
Python
|
UTF-8
|
Python
| false
| false
| 397
|
py
|
# Generated by Django 2.2.8 on 2019-12-05 19:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('person', '0007_auto_20191205_1833'),
]
operations = [
migrations.AddField(
model_name='person',
name='datetime_updated',
field=models.DateTimeField(auto_now=True),
),
]
|
[
"bart.romgens@gmail.com"
] |
bart.romgens@gmail.com
|
4ff6bf0f114dfc486ac2a7d447a98809d1b04a35
|
d63b1b36634b68070f6f3c017c0250a7ea646e6f
|
/SMC/GEM5/gem5/src/mem/ruby/structures/RubyPrefetcher.py
|
18bb3dc69472cf6d08ff90277396485033e73a76
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] |
permissive
|
jiwon-choe/Brown-SMCSim
|
ccf506d34d85fb3d085bf50ed47de8b4eeaee474
|
ff3d9334c1d5c8d6a00421848c0d51e50e6b67f8
|
refs/heads/master
| 2021-06-30T00:15:57.128209
| 2020-11-24T03:11:41
| 2020-11-24T03:11:41
| 192,596,189
| 15
| 8
|
MIT
| 2019-06-20T15:43:00
| 2019-06-18T18:53:40
|
C++
|
UTF-8
|
Python
| false
| false
| 2,437
|
py
|
# Copyright (c) 2012 Mark D. Hill and David A. Wood
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nilay Vaish
from m5.SimObject import SimObject
from System import System
from m5.params import *
from m5.proxy import *
class Prefetcher(SimObject):
type = 'Prefetcher'
cxx_class = 'Prefetcher'
cxx_header = "mem/ruby/structures/Prefetcher.hh"
num_streams = Param.UInt32(4,
"Number of prefetch streams to be allocated")
pf_per_stream = Param.UInt32(1, "Number of prefetches per stream")
unit_filter = Param.UInt32(8,
"Number of entries in the unit filter array")
nonunit_filter = Param.UInt32(8,
"Number of entries in the non-unit filter array")
train_misses = Param.UInt32(4, "")
num_startup_pfs = Param.UInt32(1, "")
cross_page = Param.Bool(False, """True if prefetched address can be on a
page different from the observed address""")
sys = Param.System(Parent.any, "System this prefetcher belongs to")
|
[
"brandnew7th@gmail.com"
] |
brandnew7th@gmail.com
|
6063712ab5545c979e943f37231f60c58696e514
|
1cc5d45273d008e97497dad9ec004505cc68c765
|
/cheatsheet/ops_doc-master/Service/cfaq/personalNoteBook/pythonLearn-decorator.py
|
79936304b8d47aa4fb0180ebc31d541b08fcf72f
|
[] |
no_license
|
wangfuli217/ld_note
|
6efb802989c3ea8acf031a10ccf8a8a27c679142
|
ad65bc3b711ec00844da7493fc55e5445d58639f
|
refs/heads/main
| 2023-08-26T19:26:45.861748
| 2023-03-25T08:13:19
| 2023-03-25T08:13:19
| 375,861,686
| 5
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 859
|
py
|
from functools import wraps
def memorization(func: object) -> object:
cache = {}
@wraps(func)
def wrapper(*args):
v = cache.get(args, None)
if v == None:
cache[args] = func(*args)
return cache[args]
return wrapper
@memorization
def fibonacci(n: int) -> int:
global count
count += 1
if n < 2:
return n
return fibonacci(n-1) + fibonacci(n-2)
count = 0
if __name__ == '__main__':
for i in range(10):
num = fibonacci(i)
print(i, num, sep='->',end=';')
print('compute times: ',count)
/////////////////////////////////////output:
#0->0;1->1;2->1;3->2;4->3;5->5;6->8;7->13;8->21;9->34;compute times: 10 with memorization
#0->0;1->1;2->1;3->2;4->3;5->5;6->8;7->13;8->21;9->34;compute times: 276 without memorization
|
[
"wangfl217@126.com"
] |
wangfl217@126.com
|
ec8332242621c2a458f725a777fa1c7e23397c1c
|
998e1a1346f59c8e9b7669e7ebf716f9ac8cd117
|
/EVSCapp/EVSCApi/urls.py
|
a7f4daee03f8376cd0fe5a37f9a9cb4180942c30
|
[] |
no_license
|
meku54444026/EVSCProject
|
3415cf3b0abb682866fcca9bbebb32f37cb744c4
|
1bef5b405391409f27ea5948203c5e28fa1a28ff
|
refs/heads/master
| 2023-07-02T04:45:15.314439
| 2021-08-10T14:52:48
| 2021-08-10T14:52:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,339
|
py
|
from django.db import router
from django.urls import path
from django.urls.conf import include
from EVSCapp.EVSCApi.views import (RecordDetailAPIView,
LisVehicle,
VehicleDetailAPIView,
ReportRUDAPIView,
UpdateFcmTokenApiView,
ListFcmTokenDevices,
# fcm_token_detail
ListReport,
MyProfileLoadAPIView,
ListUser,
ListUserDetail,
RecordViewSet,
# RecordList,
list_records
)
from rest_framework.routers import DefaultRouter
from EVSCapp.EVSCApi import views as qv
router=DefaultRouter()
router.register("records",qv.RecordViewSet)
# router.register('devices', FCMDeviceAuthorizedViewSet)
urlpatterns = [
# path("",include(router.urls)),
path("",include(router.urls)),
path('rest-auth/',include("rest_auth.urls")),
path('records/',list_records,name='list-rcords'),
path('records/<int:pk>/',RecordDetailAPIView.as_view(),name='list-detail'),
path("records/<int:pk>/report/", qv.ReportCreateAPiView.as_view(),name='create-report'),
path('vehicles/',LisVehicle.as_view(),name='list-vehicle'),
path('vehicles/<int:pk>/',VehicleDetailAPIView.as_view(),name='vehicle-detail'),
path('reports/',ListReport.as_view(),name='report-list'),
path('reports/<int:pk>',ReportRUDAPIView.as_view(),name='report-detail'),
path('devices/',ListFcmTokenDevices.as_view(),name='list-device-token'),
path('devices/<int:user>/',UpdateFcmTokenApiView.as_view(),name='create-device-token'),
path('user-profile/',MyProfileLoadAPIView.as_view(),name ='retriev-user-profile'),
path('users/',ListUser.as_view(), name ='users'),
path('users/<int:pk>',ListUserDetail.as_view(), name = 'user-detail')
# path('devices/<int:pk>',fcm_token_detail,name='create-device-token')
# path('records/<int:pk>/report',ReportCreateAPiView.as_view(),name='create-record')
]
|
[
"you@example.com"
] |
you@example.com
|
1b01557b777216b35b876eb5b76e8c11dcae98f7
|
51a37b7108f2f69a1377d98f714711af3c32d0df
|
/src/leetcode/P376.py
|
67230c5dd08e1fd4c1ad079f193aeec2d1ebc6e8
|
[] |
no_license
|
stupidchen/leetcode
|
1dd2683ba4b1c0382e9263547d6c623e4979a806
|
72d172ea25777980a49439042dbc39448fcad73d
|
refs/heads/master
| 2022-03-14T21:15:47.263954
| 2022-02-27T15:33:15
| 2022-02-27T15:33:15
| 55,680,865
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 697
|
py
|
class Solution:
def wiggleMaxLength(self, nums):
n = len(nums)
if n < 2:
return n
inc = None
t = 1
for i in range(1, n):
if inc is None:
if nums[i] == nums[i - 1]:
continue
else:
if nums[i] < nums[i - 1]:
inc = True
else:
inc = False
if (inc and nums[i] < nums[i - 1]) or (not inc and nums[i] > nums[i - 1]) :
t += 1
inc = not inc
return t
if __name__ == '__main__':
print(Solution().wiggleMaxLength([1,17,5,10,13,15,10,5,16,8]))
|
[
"stupidchen@foxmail.com"
] |
stupidchen@foxmail.com
|
11dcbec116bedb68d9ed2b8d582d4fa3c22d4ed6
|
e87403a46c10b0528ae3d51e9d316c6c92409e2c
|
/models/attention/encoders/bgru_encoder.py
|
1f4cbcc86a02f9905836b1f34c787648784c843e
|
[
"MIT"
] |
permissive
|
xwang0415/tensorflow_end2end_speech_recognition
|
662c9b899863f5595f903c4ce3b87e675e1d51a1
|
9d4661e9296b01d1116e82de823f398407207e1f
|
refs/heads/master
| 2021-01-21T20:53:51.563852
| 2017-06-19T06:52:37
| 2017-06-19T06:52:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,718
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Bidirectional GRU Encoder class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from .encoder_base import EncoderOutput, EncoderBase
class BGRUEncoder(EncoderBase):
"""Bidirectional GRU Encoder.
Args:
num_unit:
num_layer:
keep_prob_input:
keep_prob_hidden:
parameter_init:
clip_activation: not used
num_proj: not used
"""
def __init__(self,
num_unit,
num_layer,
keep_prob_input=1.0,
keep_prob_hidden=1.0,
parameter_init=0.1,
clip_activation=50, # not used
num_proj=None, # not used
name='bgru_encoder'):
EncoderBase.__init__(self, num_unit, num_layer, keep_prob_input,
keep_prob_hidden, parameter_init, clip_activation,
num_proj, name)
def _build(self, inputs, inputs_seq_len):
"""Construct Bidirectional GRU encoder.
Args:
inputs:
inputs_seq_len:
Returns:
EncoderOutput: A tuple of
`(outputs, final_state,
attention_values, attention_values_length)`
outputs:
final_state:
attention_values:
attention_values_length:
"""
self.inputs = inputs
self.inputs_seq_len = inputs_seq_len
# Input dropout
outputs = tf.nn.dropout(inputs,
self.keep_prob_input,
name='dropout_input')
# Hidden layers
for i_layer in range(self.num_layer):
with tf.name_scope('BiGRU_encoder_hidden' + str(i_layer + 1)):
initializer = tf.random_uniform_initializer(
minval=-self.parameter_init,
maxval=self.parameter_init)
with tf.variable_scope('GRU', initializer=initializer):
gru_fw = tf.contrib.rnn.GRUCell(self.num_unit)
gru_bw = tf.contrib.rnn.GRUCell(self.num_unit)
# Dropout (output)
gru_fw = tf.contrib.rnn.DropoutWrapper(
gru_fw,
output_keep_prob=self.keep_prob_hidden)
gru_bw = tf.contrib.rnn.DropoutWrapper(
gru_bw,
output_keep_prob=self.keep_prob_hidden)
# _init_state_fw = lstm_fw.zero_state(self.batch_size,
# tf.float32)
# _init_state_bw = lstm_bw.zero_state(self.batch_size,
# tf.float32)
# initial_state_fw=_init_state_fw,
# initial_state_bw=_init_state_bw,
# Stacking
(outputs_fw, outputs_bw), final_state = tf.nn.bidirectional_dynamic_rnn(
cell_fw=gru_fw,
cell_bw=gru_bw,
inputs=outputs,
sequence_length=inputs_seq_len,
dtype=tf.float32,
scope='BiGRU_' + str(i_layer + 1))
# Concatenate each direction
outputs = tf.concat(
axis=2, values=[outputs_fw, outputs_bw])
return EncoderOutput(outputs=outputs,
final_state=final_state,
attention_values=outputs,
attention_values_length=inputs_seq_len)
|
[
"hiro.mhbc@gmail.com"
] |
hiro.mhbc@gmail.com
|
24387105dd66efbecce28fdcd23e85ae0ba6337e
|
d429c131df32789e11a98e9e965e652176fcee97
|
/443A - Anton and Letters.py
|
608e7a2cee9913d55638b75cd9c1c8131e89672c
|
[] |
no_license
|
shan-mathi/Codeforces
|
a11841a1ef1a1ef78e3d506d58d9fdf4439421bd
|
6f8166b79bea0eb1f575dbfc74c252ba71472c7e
|
refs/heads/main
| 2023-06-15T08:25:41.130432
| 2021-06-24T10:36:06
| 2021-06-24T10:36:06
| 341,176,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
#108786380 Mar/01/2021 14:42UTC+5.5 Shan_XD 443A - Anton and Letters PyPy 3 Accepted 108 ms 0 KB
list = str(input())
if list=='{}':
print(0)
else:
list = (list[1:-1].split(','))
list = [i.strip() for i in list]
print(len(set(list)))
|
[
"noreply@github.com"
] |
shan-mathi.noreply@github.com
|
1c79daaf5498a22d80f0c0867463ce97502692e9
|
3da991a057cd81de802c40da2edd640878685258
|
/caffe2/python/operator_test/ctc_beam_search_decoder_op_test.py
|
21ca68fe007addb4333d4e8913cecfb64e83a685
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
sjx0451/pytorch
|
9f5b1c0c7c874f9da72c0190dc131944ba828ab7
|
3544f60f7602081398ee62bc5d652a87f4743dab
|
refs/heads/master
| 2022-12-01T22:30:29.888370
| 2020-08-13T23:45:58
| 2020-08-13T23:48:31
| 287,421,291
| 2
| 0
|
NOASSERTION
| 2020-08-14T02:06:11
| 2020-08-14T02:06:11
| null |
UTF-8
|
Python
| false
| false
| 5,391
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from caffe2.python.test_util import caffe2_flaky
from collections import defaultdict, Counter
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import unittest
DEFAULT_BEAM_WIDTH = 10
DEFAULT_PRUNE_THRESHOLD = 0.001
class TestCTCBeamSearchDecoderOp(serial.SerializedTestCase):
@given(
batch=st.sampled_from([1, 2, 4]),
max_time=st.sampled_from([1, 8, 64]),
alphabet_size=st.sampled_from([1, 2, 32, 128, 512]),
beam_width=st.sampled_from([1, 2, 16, None]),
num_candidates=st.sampled_from([1, 2]),
**hu.gcs_cpu_only
)
@settings(deadline=None, max_examples=30)
def test_ctc_beam_search_decoder(
self, batch, max_time, alphabet_size, beam_width, num_candidates, gc, dc
):
if not beam_width:
beam_width = DEFAULT_BEAM_WIDTH
op_seq_len = core.CreateOperator('CTCBeamSearchDecoder',
['INPUTS', 'SEQ_LEN'],
['OUTPUT_LEN', 'VALUES', 'OUTPUT_PROB'],
num_candidates=num_candidates)
op_no_seq_len = core.CreateOperator('CTCBeamSearchDecoder',
['INPUTS'],
['OUTPUT_LEN', 'VALUES', 'OUTPUT_PROB'],
num_candidates=num_candidates)
else:
num_candidates = min(num_candidates, beam_width)
op_seq_len = core.CreateOperator('CTCBeamSearchDecoder',
['INPUTS', 'SEQ_LEN'],
['OUTPUT_LEN', 'VALUES', 'OUTPUT_PROB'],
beam_width=beam_width,
num_candidates=num_candidates)
op_no_seq_len = core.CreateOperator('CTCBeamSearchDecoder',
['INPUTS'],
['OUTPUT_LEN', 'VALUES', 'OUTPUT_PROB'],
beam_width=beam_width,
num_candidates=num_candidates)
def input_generater():
inputs = np.random.rand(max_time, batch, alphabet_size)\
.astype(np.float32)
seq_len = np.random.randint(1, max_time + 1, size=batch)\
.astype(np.int32)
return inputs, seq_len
def ref_ctc_decoder(inputs, seq_len):
output_len = np.zeros(batch * num_candidates, dtype=np.int32)
output_prob = np.zeros(batch * num_candidates, dtype=np.float32)
val = np.array([]).astype(np.int32)
for i in range(batch):
Pb, Pnb = defaultdict(Counter), defaultdict(Counter)
Pb[0][()] = 1
Pnb[0][()] = 0
A_prev = [()]
ctc = inputs[:, i, :]
ctc = np.vstack((np.zeros(alphabet_size), ctc))
len_i = seq_len[i] if seq_len is not None else max_time
for t in range(1, len_i + 1):
pruned_alphabet = np.where(ctc[t] > DEFAULT_PRUNE_THRESHOLD)[0]
for l in A_prev:
for c in pruned_alphabet:
if c == 0:
Pb[t][l] += ctc[t][c] * (Pb[t - 1][l] + Pnb[t - 1][l])
else:
l_plus = l + (c,)
if len(l) > 0 and c == l[-1]:
Pnb[t][l_plus] += ctc[t][c] * Pb[t - 1][l]
Pnb[t][l] += ctc[t][c] * Pnb[t - 1][l]
else:
Pnb[t][l_plus] += \
ctc[t][c] * (Pb[t - 1][l] + Pnb[t - 1][l])
if l_plus not in A_prev:
Pb[t][l_plus] += \
ctc[t][0] * \
(Pb[t - 1][l_plus] + Pnb[t - 1][l_plus])
Pnb[t][l_plus] += ctc[t][c] * Pnb[t - 1][l_plus]
A_next = Pb[t] + Pnb[t]
A_prev = sorted(A_next, key=A_next.get, reverse=True)
A_prev = A_prev[:beam_width]
candidates = A_prev[:num_candidates]
index = 0
for candidate in candidates:
val = np.hstack((val, candidate))
output_len[i * num_candidates + index] = len(candidate)
output_prob[i * num_candidates + index] = Pb[t][candidate] + Pnb[t][candidate]
index += 1
return [output_len, val, output_prob]
def ref_ctc_decoder_max_time(inputs):
return ref_ctc_decoder(inputs, None)
inputs, seq_len = input_generater()
self.assertReferenceChecks(
device_option=gc,
op=op_seq_len,
inputs=[inputs, seq_len],
reference=ref_ctc_decoder,
)
self.assertReferenceChecks(
device_option=gc,
op=op_no_seq_len,
inputs=[inputs],
reference=ref_ctc_decoder_max_time,
)
if __name__ == "__main__":
import random
random.seed(2603)
unittest.main()
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
42894b20a30101258c0c73f702b75506575bf3c4
|
e08801ffc8aa0e59ef88662ba529056a89d924ef
|
/examples/elk/lsda/elk_pp.py
|
13592bebd6ed836096d15314b3a173ff5bfc559f
|
[] |
no_license
|
chrinide/DFTtoolbox
|
a8c848849693426b82f4c329523cc8d82f4d39ac
|
dfe003507011ec14ef520df36d0da55f52dd0028
|
refs/heads/master
| 2021-04-15T14:28:40.593612
| 2017-12-13T23:00:11
| 2017-12-13T23:00:11
| 126,837,451
| 1
| 0
| null | 2018-03-26T14:00:25
| 2018-03-26T14:00:24
| null |
UTF-8
|
Python
| false
| false
| 710
|
py
|
from DFTtoolbox.elk import postproc
import os
# Parameters =======================================
run_task=[1,2,3,4,5,6]
wkdir=os.path.dirname(os.path.realpath(__file__))
klabel=['$\Gamma$','X','W','K','$\Gamma$','L','U','W','L','K']
Ebound=[-5,5]
state_grp=[['1:1/1/a/a'],['2:2/2/a/a']]
# Main =============================================
print(wkdir)
pp=postproc(wkdir)
for task in run_task:
if task is 1:
pp.band_read()
elif task is 2:
pp.band_plot(klabel,Ebound)
elif task is 3:
pp.fatband_read()
elif task is 4:
pp.fatband_plot(state_grp,klabel,Ebound)
elif task is 5:
pp.pdos_read()
elif task is 6:
pp.pdos_plot(state_grp,Ebound)
|
[
"pipidog@gmail.com"
] |
pipidog@gmail.com
|
6d32704f852655a0cfb1ee6f5c83b7814aa83fcb
|
d748a68c9d9100cb2ad275ebf0fd161532dd8200
|
/cubicus/device.py
|
04e4204bac84a483be21a740aef3331e7875f0eb
|
[] |
no_license
|
drpancake/cubicus-daemon
|
aff192aa6e5b2ed97a5a34d5e1f3528d99bb4e71
|
feaa8009a1bfe25ef47ca198e1bc3783ad5b58fd
|
refs/heads/master
| 2021-01-02T09:27:10.321494
| 2012-05-01T21:31:09
| 2012-05-01T21:31:09
| 3,901,291
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,322
|
py
|
import os
import random
from cubicus.sock import SocketThread
from cubicus.models import Event
PAIRINGS_FILE = 'pairings.dat'
def display_pin(pin):
""" Kludge - PIL for now """
from PIL import Image, ImageDraw
im = Image.new('RGBA', (300, 100))
draw = ImageDraw.Draw(im)
draw.text((5, 5), pin)
im.show()
class DeviceSocketThread(SocketThread):
"""
Accepts a socket to a remote Cubicus device and services it
"""
def __init__(self, clientsocket):
SocketThread.__init__(self, clientsocket)
self._paired = False
self._challenged_guid = None
self._pin = None
# Subscribe to manager updates
self.manager.subscribe(self)
def notify(self, obj, name, new_value):
if name in ['current_context', 'current_application']:
self.send_state()
elif name == 'event':
event = new_value
if event.source != Event.DEVICE_EVENT:
# Event originated elsewhere, so forward it
# to the device
self.queue_message('event', event.to_json())
def allowed_types(self):
types = ['device_identify', 'state', 'event', 'pair_response']
return SocketThread.allowed_types(self) + types
def send_applications(self):
apps = map(lambda a: a.to_json(), self.manager.applications)
self.queue_message('applications', apps)
def handle_pair_response(self, pin):
if pin == self._pin:
# Successfully paired so store the GUID
fp = open(PAIRINGS_FILE, 'a')
fp.write('%s\n' % self._challenged_guid)
fp.close()
# Continue to next step
self._paired = True
self.send_applications()
self.send_state()
else:
self.queue_message('pair_failure')
self.stop()
def handle_device_identify(self, guid):
"""
Checks for existing pairing with the given GUID. If none
exists, initiate the pairing process. Once paired, queues
the remaining handshake messages
"""
assert self._paired is False
# Touch if its not there
if not os.path.isfile(PAIRINGS_FILE):
open(PAIRINGS_FILE, 'w').close()
fp = open(PAIRINGS_FILE, 'r')
s = fp.read()
fp.close()
pairs = s.split('\n')
if guid not in pairs:
# Unknown GUID so challenge for a random PIN number
self.log('Need to pair for "%s"' % guid)
self._challenged_guid = guid
self._pin = ''.join(map(str, [random.randint(0, 9)
for i in range(4)]))
display_pin(self._pin) # Display on host machine
self.queue_message('pair_request')
else:
# Already paired, continue to next step
self._paired = True
self.send_applications()
self.send_state()
def handle_state(self, state):
self.manager.current_application = state['current_application']
self.manager.current_context = state['current_context']
def handle_event(self, json_event):
event = Event.from_json(json_event)
event.source = Event.DEVICE_EVENT
self.manager.send_event(event)
|
[
"james.potter@gmail.com"
] |
james.potter@gmail.com
|
65afdd06bb2760a7cd754a6439f901e8c2c18c55
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-sblp-obt/sblp_ut=3.5_rd=0.5_rw=0.06_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=50/sched.py
|
906f21dc0c17a1a4da4f66497f1581033d82294a
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
-S 0 -X RUN -Q 0 -L 7 138 400
-S 0 -X RUN -Q 0 -L 7 119 400
-S 1 -X RUN -Q 1 -L 4 90 400
-S 1 -X RUN -Q 1 -L 4 72 250
-S 2 -X RUN -Q 2 -L 3 64 250
-S 2 -X RUN -Q 2 -L 3 62 250
-S 3 -X RUN -Q 3 -L 2 40 125
-S 3 -X RUN -Q 3 -L 2 35 150
-S 4 33 150
-S 4 31 150
-S 4 26 100
-S 5 24 150
-S 4 18 100
-S 5 16 175
-S 5 14 125
-S 5 5 100
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
1efe310b28c4e6a1bf1bfe0b321d4aecdd908515
|
c0aff8a6ea16a6921bdbb907e6769d229dcdb6cb
|
/src/push_server/ios/push_worker/apnslib/notify_mgr.py
|
e55b66465e5275213ab7b49bdf5df1d116cc4eb8
|
[] |
no_license
|
chwangbjtu/pushService
|
fe1d3f92ea9f1292603be41894f8496fb7c13fba
|
28a58bcba1522275d07bb20d41e8df5642955367
|
refs/heads/master
| 2021-01-11T18:17:30.263415
| 2016-09-27T10:43:08
| 2016-09-27T10:43:08
| 69,335,813
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,074
|
py
|
#-*- coding: utf-8 -*-
from __init__ import *
from connection import *
from tornado import log
import traceback
import etc
from statistics import Statistics
from tokens import TokenMgr
class APNSNotificationWrapper(object):
sandbox = True
payloads = None
connection = None
identifier = 1
error_at = 0
def __init__(self, certificate = None, sandbox = True):
self.connection = APNSConnection(certificate = certificate)
self.sandbox = sandbox
self.payloads = []
def append(self, payload = None):
if not isinstance(payload, APNSNotification):
raise APNSTypeError, "Unexpected argument type. Argument should be an instance of APNSNotification object"
payload.identify(self.identifier)
self.payloads.append(payload)
self.identifier += 1
def count(self):
return len(self.payloads)
def add_failed_num(self):
statistics_obj = Statistics()
statistics_obj.add_failed_num()
def clear(self):
self.identifier = 1
self.error_at = 0
self.payloads = []
def notify(self):
payloads = [o.payload() for o in self.payloads]
messages = []
if len(payloads) == 0:
return False
for p in payloads:
plen = len(p)
messages.append(struct.pack('%ds' % plen, p))
message = "".join(messages)
apnsConnection = self.connection
if self.sandbox != True:
apns_host = etc.APNS_HOST
else:
apns_host = etc.APNS_SANDBOX_HOST
apnsConnection.connect(apns_host, etc.APNS_PORT)
buf = apnsConnection.write(message)
apnsConnection.close()
if buf:
log.app_log.info("error occured")
self.add_failed_num()
self.error_handler(buf)
return True
def error_handler(self, buf):
try:
unpack_buf = struct.unpack("!BBI", buf)
if len(unpack_buf) == 3:
log.app_log.info("error :%s", unpack_buf)
error_at = unpack_buf[2]
error_no = unpack_buf[1]
start = error_at - self.error_at
if error_no == etc.SHUTDOWN or error_no == etc.NO_ERROR_HAPPENED or error_no == etc.PROCESSING_ERROR:
#start = start - 1
pass
else:
if error_no == etc.INVALID_TOKEN:
error_payload = self.payloads[start - 1]
error_token = error_payload.get_token()
log.app_log.info("invalid token:%s", error_token)
token_mgr = TokenMgr()
token_mgr.add_delete_token(error_token)
self.payloads = self.payloads[start:]
self.error_at = error_at
self.notify()
except Exception, e:
pass
|
[
"chwangbjtu@gmail.com"
] |
chwangbjtu@gmail.com
|
98ee322836ec34a3a4310506f52b914c4f8634ea
|
56bf1dbfa5d23257522fb03906e13c597a829ed3
|
/lib/wamp_components/analytics_component.py
|
09b1634f64ccea3c071b0d8e16dfe5a84f9696bd
|
[
"MIT"
] |
permissive
|
fendaq/SerpentAI
|
0417777bbc0fccb50df456d0ced1bce839aa3211
|
e9c147f33a790a9cd3e4ee631ddbf6bbf91c3921
|
refs/heads/master
| 2021-07-23T02:04:15.977726
| 2017-08-26T23:31:59
| 2017-08-26T23:31:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,685
|
py
|
import asyncio
from autobahn.asyncio.wamp import ApplicationSession, ApplicationRunner
from autobahn.wamp.types import RegisterOptions, SubscribeOptions
from autobahn.wamp import auth
from lib.config import config
import aioredis
import json
class AnalyticsComponent:
@classmethod
def run(cls):
print(f"Starting {cls.__name__}...")
url = "ws://%s:%s" % (config["analytics"]["host"], config["analytics"]["port"])
runner = ApplicationRunner(url=url, realm=config["analytics"]["realm"])
runner.run(AnalyticsWAMPComponent)
class AnalyticsWAMPComponent(ApplicationSession):
def __init__(self, c=None):
super().__init__(c)
def onConnect(self):
self.join(config["analytics"]["realm"], ["wampcra"], config["analytics"]["auth"]["username"])
def onDisconnect(self):
print("Disconnected from Crossbar!")
def onChallenge(self, challenge):
secret = config["analytics"]["auth"]["password"]
signature = auth.compute_wcs(secret.encode('utf8'), challenge.extra['challenge'].encode('utf8'))
return signature.decode('ascii')
async def onJoin(self, details):
self.redis_client = await self._initialize_redis_client()
while True:
redis_key, event = await self.redis_client.brpop("SERPENT:AISAAC_MAZE:EVENTS")
event = json.loads(event.decode("utf-8"))
topic = event.pop("project_key")
self.publish(topic, event)
async def _initialize_redis_client(self):
return await aioredis.create_redis(
(config["redis"]["host"], config["redis"]["port"]),
loop=asyncio.get_event_loop()
)
|
[
"info@nicholasbrochu.com"
] |
info@nicholasbrochu.com
|
468e2fa105770bc6613f4c974b423ea70ccf8192
|
26b03741c4651eb2f076e51cd34d71f2fb826fcf
|
/dev/web/Rocky/src/accounts/migrations/0007_wallettransactions.py
|
ba8f123aea7966fa763c98b89bb2264471459679
|
[] |
no_license
|
sreeram315/Rocky---Hardware-project-ATmega-Django-mySQL
|
072fc8e9913f22dc0e47a73d29ace6ff1795ed0f
|
1ef82b1914bdfc57866e7420e12bf4318cf3f030
|
refs/heads/main
| 2023-06-04T16:01:51.176564
| 2021-06-21T22:18:01
| 2021-06-21T22:18:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,182
|
py
|
# Generated by Django 2.2.7 on 2020-04-03 05:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0006_userprofile_wallet_balance'),
]
operations = [
migrations.CreateModel(
name='WalletTransactions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('credit', models.IntegerField(default=0)),
('created_on', models.DateTimeField(auto_now_add=True, null=True)),
('updated_on', models.DateTimeField(auto_now=True, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='wallet', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Wallet Transaction',
'verbose_name_plural': 'Wallet Transactions',
'db_table': 'tbl_wallet_transactions',
},
),
]
|
[
"sreerammaram2@gmail.com"
] |
sreerammaram2@gmail.com
|
054b06d9579be1ed0da49ab1d44ab99a4bdf7eaf
|
27b86f422246a78704e0e84983b2630533a47db6
|
/docs/source/tutorials/src/hatch/solid_hatch_true_color.py
|
71612e3499d520bbc0a4d87c8b1813705602c924
|
[
"MIT"
] |
permissive
|
mozman/ezdxf
|
7512decd600896960660f0f580cab815bf0d7a51
|
ba6ab0264dcb6833173042a37b1b5ae878d75113
|
refs/heads/master
| 2023-09-01T11:55:13.462105
| 2023-08-15T11:50:05
| 2023-08-15T12:00:04
| 79,697,117
| 750
| 194
|
MIT
| 2023-09-14T09:40:41
| 2017-01-22T05:55:55
|
Python
|
UTF-8
|
Python
| false
| false
| 598
|
py
|
import ezdxf
# hatch with true color requires DXF R2004 or later
doc = ezdxf.new("R2004")
msp = doc.modelspace()
# important: major axis >= minor axis (ratio <= 1.)
msp.add_ellipse((0, 0), major_axis=(0, 10), ratio=0.5)
hatch = msp.add_hatch() # use default ACI fill color
hatch.rgb = (211, 40, 215)
# every boundary path is a 2D element
edge_path = hatch.paths.add_edge_path()
# each edge path can contain line arc, ellipse and spline elements
# important: major axis >= minor axis (ratio <= 1.)
edge_path.add_ellipse((0, 0), major_axis=(0, 10), ratio=0.5)
doc.saveas("solid_rgb_hatch.dxf")
|
[
"me@mozman.at"
] |
me@mozman.at
|
daeecd30d81c8cbdc6492acdc2b0ed1ec95cae6e
|
c42142003122bc8172b00ccee0e733417d06fde0
|
/webstore/cart/models.py
|
187e1966774582896c01e0a3d38b75a74f863be0
|
[] |
no_license
|
sloniewski/django_webstore
|
53fbe52b7284220a106b7d96abcc06308e1d1b23
|
76b46396b6915e21d65e1ad0fbc8786d6f15b122
|
refs/heads/master
| 2021-10-10T16:32:09.942377
| 2019-01-13T22:37:44
| 2019-01-13T22:37:44
| 115,758,610
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,923
|
py
|
from decimal import Decimal, getcontext
from django.db import models
from django.db.models import Sum
from django.contrib.auth import get_user_model
from django.utils.functional import cached_property
from webstore.product.models import Product
User = get_user_model()
class CartManager(models.Manager):
def get_for_session(self, request):
session = request.session.session_key
cart = self.get_queryset().filter(
session=session).first()
return cart
class CartItem(models.Model):
"""
Reference table for m2m relation cart -> product.
Stores additional information about quantity.
"""
cart = models.ForeignKey(
'Cart',
on_delete=models.CASCADE,
)
product = models.ForeignKey(
Product,
on_delete=models.CASCADE,
)
quantity = models.PositiveIntegerField()
class Meta:
unique_together = [
('cart', 'product')
]
@property
def price(self):
return self.product.actual_price
@property
def weight(self):
return self.quantity * self.product.weight
def add_qty(self, qty):
self.quantity += qty
self.save()
def remove_qty(self, qty):
if self.quantity <= qty:
self.delete()
return None
self.quantity -= qty
self.save()
return self
@property
def value(self):
"""
Returns value of order-line.
"""
getcontext().prec = 4
# Order of multiplication is important, to call __mul__ of Cash class
price = self.product.actual_price
if price:
return price * self.quantity
return 0
class Cart(models.Model):
"""
Cart representation, has unique reference to session_key.
Does not store items, cart items are m2m relation to cart & product
"""
objects = CartManager()
session = models.CharField(
max_length=255,
unique=True
)
product = models.ManyToManyField(
Product,
through=CartItem,
)
created = models.DateTimeField(
auto_now_add=True,
)
def _get_item(self, item):
item = CartItem.objects.get(
product_id=item,
cart=self,
)
return item
def add_item(self, item, qty):
try:
cart_item = self._get_item(item)
cart_item.add_qty(qty)
except CartItem.DoesNotExist:
cart_item = CartItem.objects.create(
product_id=item,
cart=self,
quantity=qty,
)
return cart_item
def remove_item(self, item, qty):
try:
cart_item = self._get_item(item)
item = cart_item.remove_qty(qty)
if item is None:
return None
return item
except CartItem.DoesNotExist:
pass
def delete_item(self, item):
try:
cart_item = self._get_item(item)
cart_item.delete()
return True
except CartItem.DoesNotExist:
return True
@property
def item_count(self):
item_count = self.cartitem_set.aggregate(
Sum('quantity'))['quantity__sum']
if item_count is None:
return 0
return item_count
def get_items(self):
return self.cartitem_set.all().select_related('product')
@property
def value(self):
value = 0
# TODO should be aggregate
for item in self.cartitem_set.filter(quantity__gte=1):
value += item.value
return value
@property
def items(self):
return self.cartitem_set.all().select_related('product')
@cached_property
def weight(self):
weight = 0
for item in self.items:
weight += (item.product.weight * item.quantity)
return weight
|
[
"sloniewski.maciej@gmail.com"
] |
sloniewski.maciej@gmail.com
|
a66986685895e6214469af6309e06d1c7e0e0654
|
3a891a79be468621aae43defd9a5516f9763f36e
|
/desktop/core/ext-py/Django-1.11/tests/urlpatterns_reverse/test_localeregexprovider.py
|
401e9a1ad03f556c6b498ceba120ce6877d00bb2
|
[
"BSD-3-Clause",
"Python-2.0",
"Apache-2.0"
] |
permissive
|
oyorooms/hue
|
b53eb87f805063a90f957fd2e1733f21406269aa
|
4082346ef8d5e6a8365b05752be41186840dc868
|
refs/heads/master
| 2020-04-15T20:31:56.931218
| 2019-01-09T19:02:21
| 2019-01-09T19:05:36
| 164,998,117
| 4
| 2
|
Apache-2.0
| 2019-01-10T05:47:36
| 2019-01-10T05:47:36
| null |
UTF-8
|
Python
| false
| false
| 2,478
|
py
|
from __future__ import unicode_literals
import os
from django.core.exceptions import ImproperlyConfigured
from django.test import SimpleTestCase, mock, override_settings
from django.urls import LocaleRegexProvider
from django.urls.resolvers import LocaleRegexDescriptor
from django.utils import translation
from django.utils._os import upath
here = os.path.dirname(upath(os.path.abspath(__file__)))
@override_settings(LOCALE_PATHS=[os.path.join(here, 'translations', 'locale')])
class LocaleRegexProviderTests(SimpleTestCase):
def setUp(self):
translation.trans_real._translations = {}
def tearDown(self):
translation.trans_real._translations = {}
def test_translated_regex_compiled_per_language(self):
provider = LocaleRegexProvider(translation.gettext_lazy('^foo/$'))
with translation.override('de'):
de_compiled = provider.regex
# compiled only once per language
error = AssertionError('tried to compile url regex twice for the same language')
with mock.patch('django.urls.resolvers.re.compile', side_effect=error):
de_compiled_2 = provider.regex
with translation.override('fr'):
fr_compiled = provider.regex
self.assertEqual(fr_compiled.pattern, '^foo-fr/$')
self.assertEqual(de_compiled.pattern, '^foo-de/$')
self.assertEqual(de_compiled, de_compiled_2)
def test_nontranslated_regex_compiled_once(self):
provider = LocaleRegexProvider('^foo/$')
with translation.override('de'):
de_compiled = provider.regex
with translation.override('fr'):
# compiled only once, regardless of language
error = AssertionError('tried to compile non-translated url regex twice')
with mock.patch('django.urls.resolvers.re.compile', side_effect=error):
fr_compiled = provider.regex
self.assertEqual(de_compiled.pattern, '^foo/$')
self.assertEqual(fr_compiled.pattern, '^foo/$')
def test_regex_compile_error(self):
"""Regex errors are re-raised as ImproperlyConfigured."""
provider = LocaleRegexProvider('*')
msg = '"*" is not a valid regular expression: nothing to repeat'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
provider.regex
def test_access_locale_regex_descriptor(self):
self.assertIsInstance(LocaleRegexProvider.regex, LocaleRegexDescriptor)
|
[
"ranade@cloudera.com"
] |
ranade@cloudera.com
|
4750561ea9d7788e3f16bfbe1c96adc5a4de2664
|
4d892dc51e2dda0fcce246ac608fc4e0ce98c52b
|
/FirstStepsInPython/Basics/Lab2 Conditional Statements/06.AreaOfFigures.py
|
1f9ae6a548d4270bf0e1a3d28da1d38717d4e372
|
[
"MIT"
] |
permissive
|
inovei6un/SoftUni-Studies-1
|
510088ce65e2907c2755a15e427fd156909157f0
|
3837c2ea0cd782d3f79353e61945c08a53cd4a95
|
refs/heads/main
| 2023-08-14T16:44:15.823962
| 2021-10-03T17:30:48
| 2021-10-03T17:30:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
from math import pi
figure = input()
a = str("square")
b = str("rectangle")
c = str("circle")
d = str("triangle")
if figure == a:
side = float(input())
print(side * side)
elif figure == b:
side_a = float(input())
side_b = float(input())
print(side_a * side_b)
elif figure == c:
rad = float(input())
print(pi * rad * rad)
elif figure == d:
side_a = float(input())
side_b = float(input())
print((side_a * side_b) / 2)
|
[
"lazar_off@yahoo.com"
] |
lazar_off@yahoo.com
|
71464c227b83bec13b1cda37a74689e9e64c894d
|
7b5c1352e1a4fb8352161cc135bfd1225a633828
|
/2017-cvr-tencent-final/src/ffm_gbdt/evaluate.py
|
eff22cd6da75a597c9be09fbed629e6051ba6cfe
|
[] |
no_license
|
zgcgreat/2017-cvr-tencent
|
b7f54ae8df55fbb30f2430f695a148844982aa3a
|
fe79d0756bbf862d45e63e35b7c28da8396bcbda
|
refs/heads/master
| 2021-04-03T08:32:33.651705
| 2018-07-17T08:36:53
| 2018-07-17T08:36:53
| 124,724,199
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,248
|
py
|
# _*_ coding: utf-8 _*_
import sys
from csv import DictReader
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import log_loss
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
data_path = sys.argv[1]
result_path = sys.argv[2]
label_path = data_path + 'validation.csv'
predict_path = result_path + 'submission.csv'
label_reader = DictReader(open(label_path))
predict_reader = DictReader(open(predict_path))
count = 0
y_true = []
y_pred = []
y_scores = []
for t, row in enumerate(label_reader):
predict = predict_reader.__next__()
actual = float(row['Label'])
predicted = float(predict['Predicted'])
y_true.append(actual)
y_scores.append(predicted)
# 大于阈值的即视为点击
if (predicted >= 0.5):
y_pred.append(1)
else:
y_pred.append(0)
count += 1
# 计算性能指标
auc = roc_auc_score(y_true, y_scores)
logloss = log_loss(y_true, y_pred)
accuracy = accuracy_score(y_true, y_pred)
precision = precision_score(y_true, y_pred)
recall = recall_score(y_true, y_pred)
f1 = f1_score(y_true, y_pred)
print('Accuracy: {0} Precision: {1} Recall: {2} F1-Measure: {3}\n'.format(accuracy, precision, recall, f1))
print('logloss: {0} auc: {1}\n'.format(logloss, auc))
result = open(result_path + 'details.txt', 'w')
result.write('------------------------------------------------------------\n\n')
result.write('Total instances: {count}\n\n\nValidation File: {vafile}\n\nPrediction file: {prefile}\n\n'
.format(count=count, vafile=label_path, prefile=predict_path))
result.write(
'Accuracy: {0}\n\nPrecision: {1}\n\nRecall: {2}\n\nF1-Measure: {3}\n\n'.format(accuracy, precision, recall, f1))
result.write('logloss: {0}\n\nauc: {1}\n\n'.format(logloss, auc))
result.write('-------------------------------------------------------------\n\n')
result.close()
# 将结果写入表格
statistics = open(result_path + 'result.csv', 'w')
statistics.writelines('Accuracy,Precision,Recall,F1-Measure,Logloss,AUC\n')
statistics.writelines('{0},{1},{2},{3},{4},{5}'.format(accuracy, precision, recall, f1, logloss, auc))
statistics.close()
|
[
"1107630485@qq.com"
] |
1107630485@qq.com
|
ac538fcd79a7e716accd2aa0b73d989b81b002af
|
12123592a54c4f292ed6a8df4bcc0df33e082206
|
/py3/pgms/sec4/Circle.py
|
fa62fb9969f45e1a99a84b59831899e078e263fc
|
[] |
no_license
|
alvinooo/advpython
|
b44b7322915f832c8dce72fe63ae6ac7c99ef3d4
|
df95e06fd7ba11b0d2329f4b113863a9c866fbae
|
refs/heads/master
| 2021-01-23T01:17:22.487514
| 2017-05-30T17:51:47
| 2017-05-30T17:51:47
| 92,860,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
# Circle.py - Circle module
import math
class Circle(object):
def __init__(self, radius=1):
self.__radius = radius
def getRadius(self): return self.__radius
def circum(self): return 2 * math.pi * self.__radius
def area(self): return math.pi * self.__radius ** 2
def __str__(self):
return "Circle: %s" %(self.__radius)
|
[
"alvin.heng@teradata.com"
] |
alvin.heng@teradata.com
|
c5ea92fa5595fba5d666432acdda222cf54fe4cb
|
5fe72bb13baf3649058ebe11aa86ad4fc56c69ed
|
/hard-gists/1f66e4d58074d64c8268/snippet.py
|
a669f0ac17f9e738398c8f61266bb602ed43eb70
|
[
"Apache-2.0"
] |
permissive
|
dockerizeme/dockerizeme
|
8825fed45ff0ce8fb1dbe34959237e8048900a29
|
408f3fa3d36542d8fc1236ba1cac804de6f14b0c
|
refs/heads/master
| 2022-12-10T09:30:51.029846
| 2020-09-02T13:34:49
| 2020-09-02T13:34:49
| 144,501,661
| 24
| 20
|
Apache-2.0
| 2022-11-21T12:34:29
| 2018-08-12T21:21:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,223
|
py
|
import argparse
import urllib.request
import os
import img2pdf
from os import walk
from os.path import join
from bs4 import BeautifulSoup
work_dir = os.path.dirname(__file__)
def download_images(url):
html = urllib.request.urlopen(url).read()
soup = BeautifulSoup(html)
title = 'pdf_images' # soup.title.string
images = soup.findAll('img', {'class': 'slide_image'})
for image in images:
image_url = image.get('data-full').split('?')[0]
command = 'wget %s -P %s --quiet' % (image_url, title)
os.system(command)
convert_pdf(title)
def convert_pdf(url):
f = []
for (dirpath, dirnames, filenames) in walk(join(work_dir, url)):
f.extend(filenames)
break
f = ["%s/%s" % (url, x) for x in f]
print("Making pdf")
pdf_bytes = img2pdf.convert(f, dpi=300, x=None, y=None)
doc = open('presentation.pdf', 'wb')
doc.write(pdf_bytes)
doc.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("url", type=str,
help="download an slideshare presentation given the url")
args = parser.parse_args()
download_images(args.url)
os.system('rm -r pdf_images')
|
[
"42325807+dockerizeme@users.noreply.github.com"
] |
42325807+dockerizeme@users.noreply.github.com
|
4120f2826dcf9ed8b34f5ccdbaa5e04098ba005c
|
1677eaad65da601a3ac34bd6648c973ffd23c5a9
|
/test/test_recipients_api.py
|
3649e01a51dd5da0c43ad32857deb08372c0acba
|
[] |
no_license
|
jeffkynaston/sdk-spike-python
|
dc557cc1557387f8a126cd8e546201d141de535e
|
f9c65f578abb801ffe5389b2680f9c6ed1fcebd3
|
refs/heads/main
| 2023-07-10T00:58:13.864373
| 2021-08-05T21:38:07
| 2021-08-05T21:38:07
| 393,175,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,403
|
py
|
"""
Plastiq Public API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import unittest
import openapi_client
from openapi_client.api.recipients_api import RecipientsApi # noqa: E501
class TestRecipientsApi(unittest.TestCase):
"""RecipientsApi unit test stubs"""
def setUp(self):
self.api = RecipientsApi() # noqa: E501
def tearDown(self):
pass
def test_recipients_get(self):
"""Test case for recipients_get
Retrieve a paginated list of Recipients by query parameter(s) # noqa: E501
"""
pass
def test_recipients_id_delete(self):
"""Test case for recipients_id_delete
Delete a Recipient # noqa: E501
"""
pass
def test_recipients_id_get(self):
"""Test case for recipients_id_get
Retrieve a Recipient # noqa: E501
"""
pass
def test_recipients_id_patch(self):
"""Test case for recipients_id_patch
Update a Recipient # noqa: E501
"""
pass
def test_recipients_post(self):
"""Test case for recipients_post
Create a Recipient # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"jeff.kynaston@plastiq.com"
] |
jeff.kynaston@plastiq.com
|
bdfab790143c4ba126b8efec958c5486207c0a99
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf.0/gsn-edf_ut=3.5_rd=0.5_rw=0.06_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=93/params.py
|
278fd438e6fdf9246abe78754f18f7426f4fa985
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
{'cpus': 4,
'duration': 30,
'final_util': '3.543786',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'GSN-EDF',
'trial': 93,
'utils': 'uni-medium-3'}
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
2f71ff7ba4d2e8f7e6ddba2ab05056646a76a884
|
e669b3fe7da2698da4ce02e98325ce154d2aa546
|
/swaps/utils/api_signature_v2.py
|
63401bc3ab76d3f913b31820716826a8895d2d7b
|
[
"Apache-2.0"
] |
permissive
|
marcellinamichie291/cash_carry_leveraged_futures_arbitrageur
|
0834a911fdd6c9f1462f6f2f59926f715fc51461
|
1120ebfb487ce4987fe70e6645b36e0d7ce041ec
|
refs/heads/main
| 2023-03-16T18:35:28.730554
| 2020-12-04T07:46:13
| 2020-12-04T07:46:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,850
|
py
|
import base64
import hashlib
import hmac
import datetime
from urllib import parse
import urllib.parse
from swaps.exception.huobi_api_exception import HuobiApiException
def create_signature_v2(api_key, secret_key, method, url, builder):
if api_key is None or secret_key is None or api_key == "" or secret_key == "":
raise HuobiApiException(HuobiApiException.KEY_MISSING, "API key and secret key are required")
timestamp = utc_now()
builder.put_url("accessKey", api_key)
builder.put_url("signatureVersion", "2.1")
builder.put_url("signatureMethod", "HmacSHA256")
builder.put_url("timestamp", timestamp)
host = urllib.parse.urlparse(url).hostname
path = urllib.parse.urlparse(url).path
# 对参数进行排序:
keys = sorted(builder.param_map.keys())
# 加入&
qs0 = '&'.join(['%s=%s' % (key, parse.quote(builder.param_map[key], safe='')) for key in keys])
# 请求方法,域名,路径,参数 后加入`\n`
payload0 = '%s\n%s\n%s\n%s' % (method, host, path, qs0)
dig = hmac.new(secret_key.encode('utf-8'), msg=payload0.encode('utf-8'), digestmod=hashlib.sha256).digest()
# 进行base64编码
s = base64.b64encode(dig).decode()
builder.put_url("signature", s)
builder.put_url("authType", "api")
params = {
"accessKey": api_key,
"signatureVersion": "2.1",
"signatureMethod": "HmacSHA256",
"timestamp": timestamp,
"signature":s,
"authType":"api"
}
builder.put_url("action", "req")
builder.put_url("ch", "auth")
builder.put_url("params", params)
"""
# for test
ret_maps = {
"action": "req",
"ch": "auth",
"params" : params
}
return json.dumps(ret_maps)
"""
def utc_now():
return datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
|
[
"jare@coindexlabs.com"
] |
jare@coindexlabs.com
|
0014e1e799b36fa9daf5f3be780340dca0a2ac61
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/test/legacy_test/test_sparse_isnan_op.py
|
b807e6ba624452c55d74e668f661008d194d7a44
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 3,150
|
py
|
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
class TestSparseIsnan(unittest.TestCase):
"""
Test the API paddle.sparse.isnan on some sparse tensors.
x: sparse tensor, out: sparse tensor
"""
def to_sparse(self, x, format):
if format == 'coo':
return x.detach().to_sparse_coo(sparse_dim=x.ndim)
elif format == 'csr':
return x.detach().to_sparse_csr()
def check_result(self, x_shape, format, data_type="float32"):
raw_inp = np.random.randint(-100, 100, x_shape)
mask = np.random.randint(0, 2, x_shape)
inp_x = (raw_inp * mask).astype(data_type)
inp_x[inp_x > 0] = np.nan
np_out = np.isnan(inp_x[inp_x != 0])
dense_x = paddle.to_tensor(inp_x)
sp_x = self.to_sparse(dense_x, format)
sp_out = paddle.sparse.isnan(sp_x)
sp_out_values = sp_out.values().numpy()
np.testing.assert_allclose(np_out, sp_out_values, rtol=1e-05)
def test_isnan_shape(self):
self.check_result([20], 'coo')
self.check_result([4, 5], 'coo')
self.check_result([4, 5], 'csr')
self.check_result([8, 16, 32], 'coo')
self.check_result([8, 16, 32], 'csr')
def test_isnan_dtype(self):
self.check_result([4, 5], 'coo', "float32")
self.check_result([4, 5], 'csr', "float32")
self.check_result([8, 16, 32], 'coo', "float64")
self.check_result([8, 16, 32], 'csr', "float64")
class TestStatic(unittest.TestCase):
def test(self):
paddle.enable_static()
indices = paddle.static.data(
name='indices', shape=[2, 3], dtype='int32'
)
values = paddle.static.data(name='values', shape=[3], dtype='float32')
dense_shape = [3, 3]
sp_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape)
sp_y = paddle.sparse.isnan(sp_x)
out = sp_y.to_dense()
exe = paddle.static.Executor()
indices_data = [[0, 1, 2], [1, 2, 0]]
values_data = np.array([1.0, float("nan"), 3.0]).astype('float32')
fetch = exe.run(
feed={'indices': indices_data, 'values': values_data},
fetch_list=[out],
return_numpy=True,
)
correct_out = np.array(
[[False, False, False], [False, False, True], [False, False, False]]
).astype('float32')
np.testing.assert_allclose(correct_out, fetch[0], rtol=1e-5)
paddle.disable_static()
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
PaddlePaddle.noreply@github.com
|
8cb3839fefedd518307f93a56d09d5034b97b681
|
472661f4a0094ce070ed9702da1d2e3e55f7cbe7
|
/data/io/convert_data_to_tfrecord_voc2012.py
|
e8bf49e6fc5c81a9acab30099d5e87774e780eee
|
[
"MIT"
] |
permissive
|
hasan-mh-aziz/RetinaNet_Tensorflow
|
917612d4d58308b8c8444a650e4c43eef291c722
|
d5d1103243816506f96d36f41f1fb0b56eeefcc1
|
refs/heads/master
| 2020-07-31T01:45:20.002881
| 2019-05-26T11:00:56
| 2019-05-26T11:00:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,125
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import sys
sys.path.append('../../')
import xml.etree.cElementTree as ET
import numpy as np
import tensorflow as tf
import glob
import cv2
from tqdm import tqdm
from libs.label_name_dict.label_dict import *
from help_utils.tools import *
tf.app.flags.DEFINE_string('VOC_dir', '/data/VOC2012/VOCdevkit/VOC2012/', 'Voc dir')
tf.app.flags.DEFINE_string('xml_dir', 'Annotations', 'xml dir')
tf.app.flags.DEFINE_string('image_dir', 'JPEGImages', 'image dir')
tf.app.flags.DEFINE_string('save_name', 'train2012', 'save name')
tf.app.flags.DEFINE_string('save_dir', '../tfrecord/', 'save name')
tf.app.flags.DEFINE_string('img_format', '.jpg', 'format of image')
tf.app.flags.DEFINE_string('dataset', 'pascal', 'dataset')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def read_xml_gtbox_and_label(xml_path):
"""
:param xml_path: the path of voc xml
:return: a list contains gtboxes and labels, shape is [num_of_gtboxes, 5],
and has [xmin, ymin, xmax, ymax, label] in a per row
"""
tree = ET.parse(xml_path)
root = tree.getroot()
img_width = None
img_height = None
box_list = []
for child_of_root in root:
# if child_of_root.tag == 'filename':
# assert child_of_root.text == xml_path.split('/')[-1].split('.')[0] \
# + FLAGS.img_format, 'xml_name and img_name cannot match'
if child_of_root.tag == 'size':
for child_item in child_of_root:
if child_item.tag == 'width':
img_width = int(child_item.text)
if child_item.tag == 'height':
img_height = int(child_item.text)
if child_of_root.tag == 'object':
label = None
for child_item in child_of_root:
if child_item.tag == 'name':
label = NAME_LABEL_MAP[child_item.text]
if child_item.tag == 'bndbox':
tmp_box = [0, 0, 0, 0]
for node in child_item:
if node.tag == 'xmin':
tmp_box[0] = int(node.text)
if node.tag == 'ymin':
tmp_box[1] = int(node.text)
if node.tag == 'xmax':
tmp_box[2] = int(node.text)
if node.tag == 'ymax':
tmp_box[3] = int(node.text)
assert label is not None, 'label is none, error'
tmp_box.append(label)
box_list.append(tmp_box)
gtbox_label = np.array(box_list, dtype=np.int32)
return img_height, img_width, gtbox_label
def convert_pascal_to_tfrecord():
xml_path = FLAGS.VOC_dir + FLAGS.xml_dir
image_path = FLAGS.VOC_dir + FLAGS.image_dir
save_path = FLAGS.save_dir + FLAGS.dataset + '_' + FLAGS.save_name + '.tfrecord'
mkdir(FLAGS.save_dir)
# writer_options = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)
# writer = tf.python_io.TFRecordWriter(path=save_path, options=writer_options)
writer = tf.python_io.TFRecordWriter(path=save_path)
fr = open('/data/VOC2012/VOCdevkit/VOC2012/ImageSets/Main/trainval.txt', 'r')
lines = fr.readlines()
real_cnt = 0
pbar = tqdm(glob.glob(xml_path + '/*.xml'))
for xml in pbar:
xml = xml.replace('\\', '/')
tmp = xml.split('/')[-1].split('.')[0] + "\n"
if tmp not in lines:
continue
img_name = xml.split('/')[-1].split('.')[0] + FLAGS.img_format
img_path = image_path + '/' + img_name
if not os.path.exists(img_path):
print('{} is not exist!'.format(img_path))
continue
img_height, img_width, gtbox_label = read_xml_gtbox_and_label(xml)
# img = np.array(Image.open(img_path))
img = cv2.imread(img_path)[:, :, ::-1]
feature = tf.train.Features(feature={
# do not need encode() in linux
'img_name': _bytes_feature(img_name.encode()),
# 'img_name': _bytes_feature(img_name),
'img_height': _int64_feature(img_height),
'img_width': _int64_feature(img_width),
'img': _bytes_feature(img.tostring()),
'gtboxes_and_label': _bytes_feature(gtbox_label.tostring()),
'num_objects': _int64_feature(gtbox_label.shape[0])
})
example = tf.train.Example(features=feature)
writer.write(example.SerializeToString())
real_cnt += 1
pbar.set_description("Conversion progress")
print('\nConversion is complete! {} images.'.format(real_cnt))
if __name__ == '__main__':
# xml_path = '../data/dataset/VOCdevkit/VOC2007/Annotations/000005.xml'
# read_xml_gtbox_and_label(xml_path)
convert_pascal_to_tfrecord()
|
[
"yangxue@megvii.com"
] |
yangxue@megvii.com
|
1f07aa448ad1e6d68b20ec4e9f8479fc2df38a6e
|
ea04557e60fa600a19a2a47da78b0407cf7b3e17
|
/cms/cms/doctype/module_menu/test_module_menu.py
|
98c73da43b1ce3e1454a24e9870fea7408ac2018
|
[
"MIT"
] |
permissive
|
Nirchains/f-cms
|
ea5b5d09e492a0c3d6691b90454b01720894fc03
|
8cefaad087994ca3dad0b1c5fadb250904cdd2cb
|
refs/heads/master
| 2021-07-19T06:26:10.804498
| 2020-02-10T12:02:00
| 2020-02-10T12:02:00
| 167,004,571
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Pedro Antonio Fernández Gómez and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestModulemenu(unittest.TestCase):
pass
|
[
"nirchains@gmail.com"
] |
nirchains@gmail.com
|
1722fd154551780247f251986b64cf22acbcd063
|
15c9450e30742cfaad5d5ce88e86ff29749af975
|
/training/isotropic/train_auto_2stage.py
|
6f02f81ae96c2a869392b0a50364775ef77882e9
|
[
"BSD-2-Clause"
] |
permissive
|
constantinpape/CNNectome
|
4e4ed3987c7934c3f378f0758c5c545b4ea1ed54
|
102758cabd4bf9c149b9867709b0a8bea9222438
|
refs/heads/master
| 2021-04-15T13:10:17.213845
| 2018-03-09T20:47:21
| 2018-03-09T20:47:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,230
|
py
|
from __future__ import print_function
from gunpowder import *
from gunpowder.tensorflow import *
from training.gunpowder_wrappers import prepare_h5source
import malis
import os
import math
import json
import tensorflow as tf
def train_until(max_iteration, data_sources):
if tf.train.latest_checkpoint('.'):
trained_until = int(tf.train.latest_checkpoint('.').split('_')[-1])
else:
trained_until = 0
if trained_until >= max_iteration:
return
data_providers = []
fib25_dir = "/groups/saalfeld/home/funkej/workspace/projects/caffe/run/fib25/01_data/train"
if 'fib25h5' in data_sources:
for volume_name in ("tstvol-520-1", "tstvol-520-2", "trvol-250-1", "trvol-250-2"):
h5_source = Hdf5Source(os.path.join(fib25_dir, volume_name + '.hdf'),
datasets={VolumeTypes.RAW: 'volumes/raw',
VolumeTypes.GT_LABELS: 'volumes/labels/neuron_ids',
VolumeTypes.GT_MASK: 'volumes/labels/mask', },
volume_specs={
VolumeTypes.GT_MASK: VolumeSpec(interpolatable=False)
})
data_providers.append(h5_source)
fib19_dir = "/groups/saalfeld/saalfeldlab/larissa/fib19"
if 'fib19h5' in data_sources:
for volume_name in ("trvol-250", "trvol-600"):
h5_source = prepare_h5source(fib19_dir, volume_name)
data_providers.append(h5_source)
with open('net_io_names.json', 'r') as f:
net_io_names = json.load(f)
register_volume_type('RAW')
#register_volume_type('ALPHA_MASK')
register_volume_type('GT_LABELS')
register_volume_type('GT_MASK')
register_volume_type('GT_AFFINITIES')
#register_volume_type('GT_AFFINITIES_MASK')
register_volume_type('GT_SCALE')
register_volume_type('PREDICTED_AFFS_1')
register_volume_type('PREDICTED_AFFS_2')
register_volume_type('LOSS_GRADIENT_1')
register_volume_type('LOSS_GRADIENT_2')
voxel_size = Coordinate((8, 8, 8))
input_size = Coordinate((220,)*3) * voxel_size
output_1_size = Coordinate((132,)*3) * voxel_size
output_2_size = Coordinate((44,)*3) * voxel_size
#input_size = Coordinate((66, 228, 228))*(40,4,4)
#output_1_size = Coordinate((38, 140, 140))*(40,4,4)
#output_2_size = Coordinate((10, 52, 52))*(40,4,4)
request = BatchRequest()
request.add(VolumeTypes.RAW, input_size)
request.add(VolumeTypes.GT_LABELS, output_1_size)
request.add(VolumeTypes.GT_MASK, output_1_size)
request.add(VolumeTypes.GT_AFFINITIES, output_1_size)
#request.add(VolumeTypes.GT_AFFINITIES_MASK, output_1_size)
request.add(VolumeTypes.GT_SCALE, output_1_size)
snapshot_request = BatchRequest()
snapshot_request.add(VolumeTypes.RAW, input_size) # just to center the rest correctly
snapshot_request.add(VolumeTypes.PREDICTED_AFFS_1, output_1_size)
snapshot_request.add(VolumeTypes.PREDICTED_AFFS_2, output_2_size)
snapshot_request.add(VolumeTypes.LOSS_GRADIENT_1, output_1_size)
snapshot_request.add(VolumeTypes.LOSS_GRADIENT_2, output_2_size)
data_sources = tuple(
provider +
Normalize() +
Pad(
{
VolumeTypes.RAW: Coordinate((100, 100, 100)) * voxel_size,
VolumeTypes.GT_MASK: Coordinate((100, 100, 100)) * voxel_size
}
) +
RandomLocation() +
Reject()
for provider in data_providers
)
train_pipeline = (
data_sources +
RandomProvider() +
ElasticAugment([40, 40, 40], [2, 2, 2], [0, math.pi/2.0], prob_slip=0.01, prob_shift=0.05, max_misalign=1,
subsample=8) +
SimpleAugment() +
IntensityAugment(0.9, 1.1, -0.1, 0.1) +
IntensityScaleShift(2, -1) +
ZeroOutConstSections()+
GrowBoundary(steps=2) +
SplitAndRenumberSegmentationLabels() +
AddGtAffinities(
malis.mknhood3d()) +
BalanceLabels({
VolumeTypes.GT_AFFINITIES: VolumeTypes.GT_SCALE
},
{
VolumeTypes.GT_AFFINITIES: VolumeTypes.GT_MASK
})+
PreCache(
cache_size=40,
num_workers=10) +
Train(
'wnet',
optimizer=net_io_names['optimizer'],
loss=net_io_names['loss'],
summary=net_io_names['summary'],
log_dir='.log',
inputs={
net_io_names['raw']: VolumeTypes.RAW,
net_io_names['gt_affs_1']: VolumeTypes.GT_AFFINITIES,
net_io_names['loss_weights_1']: VolumeTypes.GT_SCALE,
},
outputs={
net_io_names['affs_1']: VolumeTypes.PREDICTED_AFFS_1,
net_io_names['affs_2']: VolumeTypes.PREDICTED_AFFS_2
},
gradients={
net_io_names['affs_1']: VolumeTypes.LOSS_GRADIENT_1,
net_io_names['affs_2']: VolumeTypes.LOSS_GRADIENT_2
}) +
IntensityScaleShift(0.5, 0.5) +
Snapshot({
VolumeTypes.RAW: 'volumes/raw',
VolumeTypes.GT_LABELS: 'volumes/labels/neuron_ids',
VolumeTypes.GT_AFFINITIES: 'volumes/labels/affinities',
VolumeTypes.PREDICTED_AFFS_1: 'volumes/labels/pred_affinities_1',
VolumeTypes.PREDICTED_AFFS_2: 'volumes/labels/pred_affinities_2',
VolumeTypes.LOSS_GRADIENT_1: 'volumes/loss_gradient_1',
VolumeTypes.LOSS_GRADIENT_2: 'volumes/loss_gradient_2',
},
every=500,
output_filename='batch_{iteration}.hdf',
additional_request=snapshot_request) +
PrintProfilingStats(every=1000)
)
print("Starting training...")
with build(train_pipeline) as b:
for i in range(max_iteration - trained_until):
b.request_batch(request)
print("Training finished")
if __name__ == "__main__":
set_verbose(False)
data_sources = ['fib25h5']
max_iteration = 400000
train_until(max_iteration, data_sources)
|
[
"heinrichl@janelia.hhmi.org"
] |
heinrichl@janelia.hhmi.org
|
4df487dabf6fc57d7a08692f301529248de7184c
|
bdff6688cee79226723fbcf9980c3757a55651b7
|
/algorithms/implementation/library_fine.py
|
c8265f6a9940b9b3618fc90b84b5b0ccf7ae488c
|
[] |
no_license
|
kruthar/hackerrank
|
1f151203c8f26c033585f30d2cf69a2b22dcaf71
|
ef81b2aa41a678ad6b0692f933f438a62b1d6b64
|
refs/heads/master
| 2016-08-10T07:15:19.165058
| 2016-02-26T17:48:58
| 2016-02-26T17:48:58
| 49,286,556
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 569
|
py
|
import sys
import datetime
actual = map(int, sys.stdin.next().strip().split(" "))
expected = map(int, sys.stdin.next().strip().split(" "))
actual_date = datetime.date(actual[2], actual[1], actual[0])
expected_date = datetime.date(expected[2], expected[1], expected[0])
diff = actual_date.toordinal() - expected_date.toordinal()
if diff <= 0:
print 0
elif actual_date.year == expected_date.year:
if actual_date.month == expected_date.month:
print 15 * diff
else:
print 500 * (actual_date.month - expected_date.month)
else:
print 10000
|
[
"kruthar@gmail.com"
] |
kruthar@gmail.com
|
98f5338c30e8cc19a51612f76bb8ba0ad89b8674
|
fb54704d4a6f9475f42b85d8c470e3425b37dcae
|
/medium/ex402.py
|
70e030c095d74e75664d2cab7759e72aad5eef4e
|
[] |
no_license
|
ziyuan-shen/leetcode_algorithm_python_solution
|
b2784071a94b04e687fd536b57e8d5a9ec1a4c05
|
920b65db80031fad45d495431eda8d3fb4ef06e5
|
refs/heads/master
| 2021-06-27T05:19:47.774044
| 2021-02-04T09:47:30
| 2021-02-04T09:47:30
| 210,991,299
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 568
|
py
|
from collections import deque
class Solution:
def removeKdigits(self, num: str, k: int) -> str:
if len(num) == k:
return "0"
q = deque([0])
for i in range(1, k + 1):
while q and num[i] < num[q[-1]]:
q.pop()
q.append(i)
ans = str(num[q[0]])
q.popleft()
for i in range(k + 1, len(num)):
while q and num[i] < num[q[-1]]:
q.pop()
q.append(i)
ans += str(num[q[0]])
q.popleft()
return str(int(ans))
|
[
"ziyuan.shen@duke.edu"
] |
ziyuan.shen@duke.edu
|
d0b2fdbcbc1ba73c2421753c096f41008cea2e13
|
61050d0d7f0c0a60474e4e85d30be4e5ea7c6b04
|
/vnf/dom/scattering_kernels/IsotropicElasticKernel.py
|
7416c4fba85409320daab5dcb70dafe5be561ba0
|
[] |
no_license
|
danse-inelastic/vnf
|
8173f06f32b4a2fa2b71fddfe0fecf9c19e05e9a
|
be989448577f14f424aca4ce852c7198304ca57b
|
refs/heads/master
| 2021-01-22T01:06:00.294100
| 2015-05-02T23:25:45
| 2015-05-02T23:25:45
| 34,947,878
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,183
|
py
|
# -*- Python -*-
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2007 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# kernel that scatters neutron isotropicall and elastically
from AbstractScatteringKernel import AbstractScatteringKernel as base, TableBase
class IsotropicElasticKernel(base):
# scattering_length = 1.0
def customizeLubanObjectDrawer(self, drawer):
drawer.sequence = ['properties']
# drawer.mold.sequence = ['scattering_length']
return
pass
InvBase = base.Inventory
class Inventory(InvBase):
# scattering_length = InvBase.d.float(name = 'scattering_length', default = 1.)
dbtablename = 'isotropicelastickernels'
pass
IsotropicElasticKernel.Inventory = Inventory
del Inventory
from _ import o2t
IsotropicElasticKernelTable = o2t(
IsotropicElasticKernel,
{'subclassFrom': TableBase},
)
# version
__id__ = "$Id$"
# End of file
|
[
"linjiao@caltech.edu"
] |
linjiao@caltech.edu
|
2a8d5dcb4c882246f14bbb1b9d05dfd6ac54fd4a
|
1cc8ecb740cb5550016bdaf18dab8b2651945ebc
|
/src/common/helpers/getrequest.py
|
9dfcbab4279e3a15c4ccf47d36fd363b48656c94
|
[] |
no_license
|
ShipraShalini/BidEngine
|
2e1b18c9a93e5be25422e3f521d17763d718c7a7
|
a6f28b8de7b0e3d8442f7a5a6ebc06b0b9c19cda
|
refs/heads/master
| 2021-01-10T15:43:20.510985
| 2017-12-12T13:40:43
| 2017-12-12T13:40:43
| 48,623,215
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 781
|
py
|
import json
def read_request(request):
user = request.user.get_username()
if request.method == "POST" or request.method == "DELETE" :
data = json.loads(request.body)
item_name = data['item']
try:
amount = data['amount']
except KeyError:
return user, item_name
else:
return user, item_name, amount
if request.method == "GET":
item_name = request.GET.get('item', None)
return user, item_name
if request.method == "PUT":
data = json.loads(request.body)
item_name = data['item']
del data['item']
return user, item_name, data
def values(item):
return item.item_name, item.created_at, item.status, item.seller, item.min_bid, item.sold_to
|
[
"code.shipra@gmail.com"
] |
code.shipra@gmail.com
|
7516b196903db3fd1f64e5811a200d7669055a8a
|
5f46ffd83e844df8e4aa4d8bd495f2653a924cad
|
/sessions/week_2/debug_example.py
|
bea50406ad8b5f1c174190dd63fdca72733e3732
|
[] |
no_license
|
mvwettum/basictrack-2020-2021-2b
|
33605b48a982f91ac84e19f64218b7b16b164175
|
3967efdb9b67aa07f4168f7358503a94eb1c4444
|
refs/heads/master
| 2023-04-03T14:50:25.915630
| 2021-04-22T15:54:30
| 2021-04-22T15:54:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
current_time_str = input("What is the current time (in hours 0-23)?")
wait_time_str = input("How many hours do you want to wait")
current_time_int = int(current_time_str)
wait_time_int = int(wait_time_str)
final_time_int = current_time_int + wait_time_int
final_answer = final_time_int % 24
print("The time after waiting is: ", final_answer)
|
[
"mail@vincentvelthuizen.com"
] |
mail@vincentvelthuizen.com
|
d3bc6232292463809ee1b24a4047b595b648e6c6
|
8ef5a09d76a11c56963f18e6a08474a1a8bafe3c
|
/leet_code/127. Word Ladder.py
|
e10d37091ad01c1102b61a505f8948cdbf7af7bf
|
[] |
no_license
|
roiei/algo
|
32c4677649c7666db148f6183fbfbf66c8b1969f
|
ae8bb8bf4ae4026ccaf1dce323b4098547dd35ec
|
refs/heads/master
| 2022-04-01T19:21:27.768675
| 2022-02-19T06:15:29
| 2022-02-19T06:15:29
| 169,021,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,347
|
py
|
import time
from util.util_list import *
from util.util_tree import *
import copy
import collections
from typing import List
class Solution:
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
g = collections.defaultdict(list)
visited = set()
for word in wordList:
for i in range(len(word)):
key = word[:i] + '@' + word[i + 1:]
g[key] += word,
q = [(beginWord, 1)]
diff = 0
while q:
word, diff = q.pop(0)
if word == endWord:
break
for i in range(len(word)):
key = word[:i] + '@' + word[i + 1:]
for v in g[key]:
if v in visited:
continue
q += (v, diff + 1),
visited.add(v)
return diff if word == endWord else 0
stime = time.time()
#print(5 == Solution().ladderLength("hit", "cog", ["hot","dot","dog","lot","log","cog"]))
#print(0 == Solution().ladderLength("hit", "cog", ["hot","dot","dog","lot","log"]))
print(2 == Solution().ladderLength("hot", "dot", ["hot","dot","dog"]))
print('elapse time: {} sec'.format(time.time() - stime))
|
[
"hyoukjea.son@hyundai.com"
] |
hyoukjea.son@hyundai.com
|
a8268005cae24a2d3f0293db9ac13fef78c391e0
|
1880e6a98d9c7957414392cad17cec7455ec84f6
|
/player71.py
|
62eb5f846a08ed9604bdd6eec8848c22d9236e2e
|
[] |
no_license
|
THABUULAGANATHAN/guviprojects
|
613759b96875005175db308f2dfcdecc355d7894
|
9eca3e84d227984c5e7a3a988d55674ec31dcd05
|
refs/heads/master
| 2022-01-16T17:09:05.390055
| 2019-07-19T12:54:57
| 2019-07-19T12:54:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
n=int(input())
li=[int(i) for i in input().split()]
for i in range(n-1):
if(li[i]<li[i+1]):
print(li[i+1],end=" ")
else:
print(li[i],end=" ")
|
[
"noreply@github.com"
] |
THABUULAGANATHAN.noreply@github.com
|
5d60548f1170918b905f952667c82e95e24761a3
|
d0df4037ac7cc1d229058ec46400bdb2c83599fb
|
/search_in_rotated_sorted_array_ii.py
|
c3f0fed03cfe683c8848c23429e7f8c015ec5b75
|
[] |
no_license
|
TechPuppies/leetcode-python
|
470505b4217b54ee9e5a7f559079bf684dd4b5d1
|
d3b5ef8ac49ec72213ad7d189f10a2818d7f0a89
|
refs/heads/master
| 2016-09-15T17:29:05.933313
| 2015-01-06T21:26:46
| 2015-01-06T21:26:46
| 25,418,174
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
# coding=utf-8
# AC Rate: 30.7%
# SOURCE URL: https://oj.leetcode.com/problems/search-in-rotated-sorted-array-ii/
#
# Follow up for "Search in Rotated Sorted Array":
# What if duplicates are allowed?
# Would this affect the run-time complexity? How and why?
# Write a function to determine if a given target is in the array.
#
class Solution:
# @param A a list of integers
# @param target an integer
# @return a boolean
def search(self, A, target):
|
[
"xinzhou918@gmail.com"
] |
xinzhou918@gmail.com
|
e41b8aaa05fc038f29ca34d6623ee20b20eee4d9
|
8a8b0267c4db8847a898ac73ccb6e78e1744e24c
|
/Python_Net_Programming/pnp-ex01/sync/client.py
|
828ad8fcea34b0480616031726d433815df0a484
|
[] |
no_license
|
entirelymagic/Link_Academy
|
41ba890df6793924d186ea94dc8d13b0636c6679
|
844c39ff1281fae8406cd1a0dc06afd357f0bef3
|
refs/heads/master
| 2023-06-07T03:17:00.527924
| 2021-07-03T09:59:25
| 2021-07-03T09:59:25
| 314,755,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
import socket
sClient = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sClient.connect(("localhost",8005))
print("Server said: ", sClient.recv(256).decode("utf-8"))
msg = input("msg: ")
sClient.send(bytes(msg,"utf-8"))
print("Server said: ", sClient.recv(256).decode("utf-8"))
sClient.close()
|
[
"entirelymagic@gmail.com"
] |
entirelymagic@gmail.com
|
9005c88fe2b68a760c0615bfdf885573b6c96618
|
f24d16c0e064d1f77a09cc02217a6dfe9ee39d56
|
/pipeline/data_process.py
|
90837db91d24a7a228b1eb0243ba1a5a70990875
|
[] |
no_license
|
hbradlow/modelbuilder
|
92ec5fd92527f6989d43212dd6ffd27abcb4738c
|
e537fb37f7331bf50e0ea849bc097b996dbfdbdd
|
refs/heads/master
| 2020-05-20T00:17:37.352732
| 2012-11-13T23:44:11
| 2012-11-13T23:44:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,339
|
py
|
import numpy as np
import scipy
import math
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
class Arrow3D(FancyArrowPatch):
"""
An arrow object for a matplotlib 3d plot.
Code from http://stackoverflow.com/questions/11140163/python-matplotlib-plotting-a-3d-cube-a-sphere-and-a-vector
"""
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
def __repr__(self):
return self._verts3d
class Ellipse:
def __init__(self,origin=[0,0,0],radius=(0,0),angle=0):
self.origin = origin
self.radius = radius
self.angle = angle
self.axis = [0,0,1]
def __repr__(self):
return "center: " + str(self.origin) + ", radius: " + str(self.radius)
class Point:
def __init__(self,x=0,y=0,z=0,is_valid=True):
self.x = x
self.y = y
self.z = z
self.is_valid = is_valid
def set(self,l):
self.x = l[0]
self.y = l[1]
self.z = l[2]
def list(self):
return [self.x,self.y,self.z]
class CircleFit:
def __init__(self,points=[]):
self.circle = Circle()
self.points = []
for p in points:
self.points.append(Point(p[0],p[1],p[2]))
self.plane_to_xy_transform = None
self.flatten_transform = None
def process(self):
self.calculate_plane_to_xy_transform()
self.transform_data(self.plane_to_xy_transform)
self.calculate_flatten_transform()
self.transform_data(self.flatten_transform)
self.show()
self.calculate_best_fit_ellipse()
"""
self.transform_data(self.flatten_transform,inverse=True)
self.transform_data(self.plane_to_yz_transform,inverse=True)
"""
def transform_data(self,t,inverse=False):
def transform(t,v):
return np.dot(t,np.array(v)).tolist()
if inverse:
t = np.linalg.inv(np.array(t))
else:
t = np.array(t)
for (index, point) in enumerate(self.points):
self.points[index].set(transform(t,point.list()+[1]))
self.circle.origin = transform(t,self.circle.origin + [1])[0:3]
self.circle.axis = transform(t,self.circle.axis + [0])[0:3]
self.normal = transform(t,self.normal + [0])[0:3]
def best_fit_plane(self):
"""
Find the plane that best fits the set of translations
"""
def zeros(i):
return [0 for a in range(i)]
A = np.array([zeros(3) for j in range(3)])
b = np.array(zeros(3))
for point in self.points:
A = np.add(np.array([ [point.x*point.x, point.x*point.y, point.x],
[point.x*point.y, point.y*point.y, point.y],
[point.x, point.y, 1]]),A)
b = np.add(np.array([point.x*point.z,point.y*point.z,point.z]),b)
x = np.linalg.solve(A,b)
return x
def calculate_plane_to_xy_transform(self):
"""
Calculate the transform to rotate the plane of the circle into the yz plane.
"""
def chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i+n]
x = self.best_fit_plane()
normal = [x[0],x[1],-1]
self.normal = normal
from cgkit.cgtypes import quat, mat3, slerp
axis = np.cross(np.array(normal),np.array([0,0,1]))
angle = math.acos(np.dot(np.array(normal),np.array([0,0,1]))/np.linalg.norm(normal))
q = quat()
q = q.fromAngleAxis(angle,axis.tolist())
transform = [i for i in chunks(q.toMat4().toList(rowmajor=True),4)]
self.plane_to_xy_transform = transform
return transform
def calculate_flatten_transform(self):
"""
Calculate the transform to move all the translation points into the yz plane. Basically just remove the x values.
"""
def ave(l):
return reduce(lambda x,y: x+y,l)/len(l)
a = ave([point.z for point in self.points if point.is_valid])
transform = [ [1,0,0,0],
[0,1,0,0],
[0,0,1,-a],
[0,0,0,1]]
self.flatten_transform = transform
return transform
def calculate_best_fit_ellipse(self):
"""
http://math.stackexchange.com/questions/214661/circle-least-squares-fit
"""
A = []
b = []
def f(b,*args):
det = b[1]**2 - 4*b[0]*b[2]
if det > -.1:
return 999999
total = 0
for point in self.points:
total += np.dot(np.array([point.x**2,point.x*point.y,point.y**2,point.x,point.y,1]),np.array(b))**2
return total
x = scipy.optimize.fmin(f,(1,1,1,1,1,1))
self.circle = Circle([0] + x[0].tolist()[0:2],x[0].tolist()[2])
self.circle.radius = math.sqrt(self.circle.radius + self.circle.origin[1]**2 + self.circle.origin[2]**2)
self.circle.axis = [1,0,0]
def show(self):
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import mpl_toolkits.mplot3d.art3d as art3d
import matplotlib.patches
fig = plt.figure("Circle Fit")
ax = fig.add_subplot(111,projection="3d",aspect=1)
x = [];y = [];z = []
for point in self.points:
x.append(point.x)
y.append(point.y)
z.append(point.z)
ax.scatter(x,y,z,color="r",s=200)
ax.auto_scale_xyz([-.5, .5], [-.5, .5], [-0, 1])
circle_axis = Arrow3D((0,self.normal[0]),(0,self.normal[1]),(0,self.normal[2]),mutation_scale=20,lw=3,arrowstyle="-|>", color="g")
ax.add_artist(circle_axis)
plt.show()
|
[
"hbradlow@berkeley.edu"
] |
hbradlow@berkeley.edu
|
9ed43354fe92a243dd52ae9e8338df41be2e2346
|
58df224689ab08c99359b1a6077d2fba3728dc61
|
/lamda-ocr/merge-files/borb/toolkit/diff/pdf_diff.py
|
86117069066e1f49a4c64f9246aec7c34fa2a4c9
|
[] |
no_license
|
LIT-Midas/LITHackathon
|
2b286728c156d79d3f426f6d19b160a2a04690db
|
7b990483dd48b91cf3ec3452b78ab67770da71af
|
refs/heads/main
| 2023-08-13T05:22:59.373965
| 2021-08-16T01:09:49
| 2021-08-16T01:09:49
| 395,024,729
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,022
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This class was meant to perform a dictionary/array level comparison of PDF documents.
It makes it a lot easier to debug problems.
"""
import typing
from borb.io.filter.stream_decode_util import decode_stream
from borb.io.read.types import Decimal, Dictionary, List, Name, Stream
from borb.pdf.document import Document
class PDFDiff:
"""
This class was meant to perform a dictionary/array level comparison of PDF documents.
It makes it a lot easier to debug problems.
"""
def __init__(self, pdf_a: Document, pdf_b: Document):
self._document_a: Document = pdf_a
self._document_b: Document = pdf_b
self._already_compared: typing.List[int] = []
self._errors: typing.List[str] = []
def compare(self) -> None:
"""
This method compares the given PDF documents, logging any differences between them.
"""
self._compare(self._document_a, self._document_b, "", "")
@staticmethod
def _get_reference_or_none(obj) -> str:
try:
if obj.get_reference() is not None:
return "(%d 0 R)" % obj.get_reference().object_number
except:
pass
return ""
def _log_difference(self, error_msg: str) -> None:
print(error_msg)
self._errors.append(error_msg)
def _compare(self, a, b, path_to_a, path_to_b) -> None:
if id(a) in self._already_compared:
return
if id(b) in self._already_compared:
return
self._already_compared.append(id(a))
self._already_compared.append(id(b))
# check type
if a.__class__.__name__ != b.__class__.__name__:
self._log_difference(
"Class mismatch : %s %s <--> %s %s"
% (path_to_a, a.__class__.__name__, path_to_b, b.__class__.__name__)
)
if isinstance(a, Name):
if str(a) != str(b):
self._log_difference(
"Name mismatch : %s %s <--> %s %s"
% (path_to_a, str(a), path_to_b, str(b))
)
return
if isinstance(a, Decimal):
if int(a) != int(b):
self._log_difference(
"Value mismatch : %s %s <--> %s %s"
% (path_to_a, str(a), path_to_b, str(b))
)
# get references if they exist
ref_a = PDFDiff._get_reference_or_none(a)
ref_b = PDFDiff._get_reference_or_none(a)
# compare streams
if isinstance(a, Stream):
decode_stream(a)
decode_stream(b)
if "DecodedBytes" not in a:
self._log_difference("Unable to decode Stream %s" % (path_to_a + ref_a))
if "DecodedBytes" not in b:
self._log_difference("Unable to decode Stream %s" % (path_to_b + ref_b))
dba: bytes = a["DecodedBytes"]
dbb: bytes = b["DecodedBytes"]
if len(dba) != len(dbb):
self._errors.append(
"Stream Length mismatch : %s %d <--> %s %d"
% (path_to_a + ref_a, len(a), path_to_b + ref_b, len(b))
)
else:
for i in range(0, len(dba)):
if dba[i] != dbb[i]:
self._errors.append(
"Stream content mismatch : %s %d <--> %s %d"
% (path_to_a + ref_a, i, path_to_b + ref_b, i)
)
# compare dictionary
if isinstance(a, Dictionary):
for k, v in a.items():
if k == "ID":
continue
if k == "Bytes":
continue
if k == "DecodedBytes":
continue
if isinstance(a, Stream) and k == "Length":
continue
if k not in b:
self._log_difference(
"Key absent/present mismatch : %s %s <--> %s %s"
% (path_to_a + ref_a, str(k), path_to_b + ref_b, None)
)
continue
self._compare(
a[k],
b[k],
path_to_a + "/" + str(k) + ref_a,
path_to_b + "/" + str(k) + ref_b,
)
return
# compare array
if isinstance(a, List):
if len(a) != len(b):
self._errors.append(
"Array Length mismatch : %s %d <--> %s %d"
% (path_to_a + ref_a, len(a), path_to_b + ref_b, len(b))
)
for i in range(0, min(len(a), len(b))):
self._compare(
a[i],
b[i],
path_to_a + ref_a + "/" + str(i),
path_to_b + ref_b + "/" + str(i),
)
return
|
[
"trevordino@gmail.com"
] |
trevordino@gmail.com
|
cf6f57b4c05d78d82369ca1c29ef07844bdec546
|
2d98f950a9bc701b360e3fd807bb07b85edabee9
|
/9/认识爬虫-课件-v1/Py爬虫课件/15/example-project/example/pipelines.py
|
4db113535b1e543050b79319adff4a9e45c4bba5
|
[] |
no_license
|
Ran-oops/python_notes2
|
eaf3e98ee460d0d63d2bf8881cacd10916baa902
|
3a1bf86a803c716f4ef4aeec53a69ebb3662cf49
|
refs/heads/master
| 2020-11-30T15:40:53.850721
| 2019-12-28T05:22:49
| 2019-12-28T05:22:49
| 230,429,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/topics/item-pipeline.html
from datetime import datetime
class ExamplePipeline(object):
def process_item(self, item, spider):
item["crawled"] = datetime.utcnow() # 获取utc时间
item["spider"] = spider.name # 爬虫名称
return item
|
[
"3460466167@qq.com"
] |
3460466167@qq.com
|
81fb2fce5bc257958679a04b84d127ffaecb919c
|
c9afaf387faf7c478e860f4ab5f087b254b5b87f
|
/main.py
|
b864b06dab49d5e7afad121326acdff0184725b9
|
[] |
no_license
|
INNFINITEMINDS/FitzHugh-Nagumo-Neuron-Model
|
e3a5c7b7187fe77e575f96c955cb4253d00bf4fb
|
b1afb1745e3773c1ff7913a12ed98679094f0c2c
|
refs/heads/master
| 2022-12-19T17:41:06.466021
| 2020-10-20T10:51:01
| 2020-10-20T10:51:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,582
|
py
|
import numpy as np
from src.model import FNNeuron
import os
from tqdm import tqdm
import matplotlib.pyplot as plt
from scipy.fft import fft
from scipy import signal
def exp1():
exp_plot_dir = 'images/exp1'
dt = 0.001
niter = int(10e4)
b = 0.25
I_ext = 0.1
fn = FNNeuron(
dt,
niter
)
v = np.random.random()
w = np.random.random()
b_ = np.arange(10)*0.1
I_ = np.arange(30)*0.1
for b in tqdm(b_):
for I_ext in I_:
fn.set_b(b)
fn.set_v(v)
fn.set_w(w)
fn.set_I_ext(I_ext)
image_name = 'v_{val_v:.4f}_w_{val_w:.4f}_b_{val_b:.4f}_dt_{val_dt:.4f}_I_ext_{val_I:.4f}_niter_{n}.png'.format(
val_v = v,
val_w = w,
val_b = b,
val_dt = dt,
val_I = I_ext,
n = niter
)
if not os.path.exists(exp_plot_dir):
os.mkdir(exp_plot_dir)
fn()
fn.plot(os.path.join(exp_plot_dir, image_name))
fn.reset()
def exp2():
exp_plot_dir = 'images/exp2'
dt = 0.001
niter = int(10e4)
b = -0.5
I_ext = 0
fn = FNNeuron(
dt,
niter
)
num_exp = 50
v = np.random.normal(0, 1, num_exp)
w = np.random.normal(0, 1, num_exp)
fig, axes = plt.subplots(1, 1,figsize = (5, 5))
for i in tqdm(range(num_exp)):
fn.set_b(b)
fn.set_v(v[i])
fn.set_w(w[i])
fn.set_I_ext(I_ext)
fn()
axes.plot(fn.v_hist, fn.w_hist)
axes.set_xlabel('voltage')
axes.set_ylabel('recovery variable')
axes.set_title('phase plot')
fn.reset()
if not os.path.exists(exp_plot_dir):
os.mkdir(exp_plot_dir)
image_name = 'case_1a_phase_plot_num_iter_{num}_b_{val_b:.4f}_dt_{val_dt:.4f}_I_ext_{val_I:.4f}_niter_{n}.png'.format(
num = num_exp,
val_b = b,
val_dt = dt,
val_I = I_ext,
n = niter
)
fig.savefig(os.path.join(exp_plot_dir, image_name))
def exp3():
exp_plot_dir = 'images/exp3'
dt = 0.001
niter = int(10e4)
b = 0
I_ext = 0
fn = FNNeuron(
dt,
niter
)
V = np.arange(-10, 20)*0.1
w = 0
for v in tqdm(V):
fn.set_b(b)
fn.set_v(v)
fn.set_w(w)
fn.set_I_ext(I_ext)
image_name = 'case_1b_v_{val_v:.4f}_w_{val_w:.4f}_b_{val_b:.4f}_dt_{val_dt:.4f}_I_ext_{val_I:.4f}_niter_{n}.png'.format(
val_v = v,
val_w = w,
val_b = b,
val_dt = dt,
val_I = I_ext,
n = niter
)
exp = 'b_{val_b:.4f}'.format(val_b=b)
dir = os.path.join(exp_plot_dir, exp)
if not os.path.exists(dir):
os.mkdir(os.path.join(dir))
fn()
fn.plot(os.path.join(dir,image_name))
fn.reset()
def exp4():
I_ext = np.arange(115, 285, 2)*0.01
b = 0.4
v = np.random.normal(0, 1)
w = np.random.normal(0, 1)
exp_plot_dir = 'images/exp4'
dt = 0.001
niter = int(10e5)
fn = FNNeuron(
dt,
niter
)
def is_periodic(samples, tol):
m = tol*max(samples)
t = (max(samples) - min(samples)) <= 0.25*max(samples)
return all(m <= d for d in samples) and t
osc_I = []
for I in I_ext:
fn.set_b(b)
fn.set_v(v)
fn.set_w(w)
fn.set_I_ext(I)
image_name = 'case_2a_v_{val_v:.4f}_w_{val_w:.4f}_b_{val_b:.4f}_dt_{val_dt:.4f}_I_ext_{val_I:.4f}_niter_{n}.png'.format(
val_v = v,
val_w = w,
val_b = b,
val_dt = dt,
val_I = I,
n = niter
)
if not os.path.exists(exp_plot_dir):
os.mkdir(os.path.join(exp_plot_dir))
fn()
peaks, _ = signal.find_peaks(fn.v_hist)
heights = [fn.v_hist[p] for p in peaks]
print('\n')
print(I)
val = is_periodic(heights[1:], 0.75)
print(val)
if val:
osc_I.append(I)
fn.plot(os.path.join(exp_plot_dir, 'phase_'+image_name))
fn.reset()
return osc_I
def exp5():
exp_plot_dir = 'images/exp5'
dt = 0.001
niter = int(10e4)
b = 0.4
I_ext = 1.55
fn = FNNeuron(
dt,
niter
)
num_exp = 50
v = np.random.normal(0, 1, num_exp)
w = np.random.normal(0, 1, num_exp)
fig, axes = plt.subplots(1, 1,figsize = (5, 5))
for i in tqdm(range(num_exp)):
fn.set_b(b)
fn.set_v(v[i])
fn.set_w(w[i])
fn.set_I_ext(I_ext)
fn()
axes.plot(fn.v_hist, fn.w_hist, 'b')
axes.set_xlabel('voltage')
axes.set_ylabel('recovery variable')
axes.set_title('phase plot')
fn.reset()
fn.set_niter(int(10e5))
fn.set_b(b)
fn.set_v(np.random.normal(0, 1))
fn.set_w(np.random.normal(0, 1))
fn.set_I_ext(0.5)
fn(True)
axes.plot(fn.v_hist, fn.w_hist, 'r')
axes.set_xlabel('voltage')
axes.set_ylabel('recovery variable')
axes.set_title('phase plot')
fn.reset()
if not os.path.exists(exp_plot_dir):
os.mkdir(exp_plot_dir)
image_name = 'case_2ba_phase_plot_num_iter_{num}_b_{val_b:.4f}_dt_{val_dt:.4f}_I_ext_{val_I:.4f}_niter_{n}.png'.format(
num = num_exp,
val_b = b,
val_dt = dt,
val_I = I_ext,
n = niter
)
fig.savefig(os.path.join(exp_plot_dir, image_name))
exp5()
#I = exp4()
#exp3()
#exp2()
#exp1()
|
[
"shandilya.shreyas@gmail.com"
] |
shandilya.shreyas@gmail.com
|
06a47fd3f3d748fea890d46dfe910447204d0544
|
0db19410e9751790af8ce4a0a9332293e379c02f
|
/mmpose/models/heads/heatmap_heads/__init__.py
|
b482216b36f61ceb66aae8974ae178a8455d5022
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmpose
|
2c9986521d35eee35d822fb255e8e68486026d94
|
537bd8e543ab463fb55120d5caaa1ae22d6aaf06
|
refs/heads/main
| 2023-08-30T19:44:21.349410
| 2023-07-04T13:18:22
| 2023-07-04T13:18:22
| 278,003,645
| 4,037
| 1,171
|
Apache-2.0
| 2023-09-14T09:44:55
| 2020-07-08T06:02:55
|
Python
|
UTF-8
|
Python
| false
| false
| 373
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .ae_head import AssociativeEmbeddingHead
from .cid_head import CIDHead
from .cpm_head import CPMHead
from .heatmap_head import HeatmapHead
from .mspn_head import MSPNHead
from .vipnas_head import ViPNASHead
__all__ = [
'HeatmapHead', 'CPMHead', 'MSPNHead', 'ViPNASHead',
'AssociativeEmbeddingHead', 'CIDHead'
]
|
[
"noreply@github.com"
] |
open-mmlab.noreply@github.com
|
ded383f66dbaa90059ccca1ab9639a978ad264f9
|
78db5bc74181173f2d00bea409997a64b4682adf
|
/venv/lib/python3.9/site-packages/pip/_vendor/chardet/euctwprober.py
|
7dbc136e80b7e704891fc4fdde70bb8b6d72ba56
|
[
"MIT"
] |
permissive
|
CiscoDevNet/meraki-code
|
dfe680f077ebd053a3b663f1434f648f5a91b541
|
d031aab82e3fa5ce7cf57b257fef8c9a4c63d71e
|
refs/heads/master
| 2023-05-28T18:43:28.848983
| 2022-04-11T19:45:19
| 2022-04-11T19:45:19
| 188,288,487
| 67
| 60
|
MIT
| 2023-05-23T00:51:58
| 2019-05-23T18:43:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,793
|
py
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis
from .mbcssm import EUCTW_SM_MODEL
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
super(EUCTWProber, self).__init__()
self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL)
self.distribution_analyzer = EUCTWDistributionAnalysis()
self.reset()
@property
def charset_name(self):
return "EUC-TW"
@property
def language(self):
return "Taiwan"
|
[
"agentle@cisco.com"
] |
agentle@cisco.com
|
36024e8c09241fb2a5405711eeb21edb7a07e067
|
60eb98538025c61cf94a91f6c96f9ee81dcd3fdf
|
/monai/handlers/confusion_matrix.py
|
368aacc6cbe04f1ed17742b90b4cd21c7b41fda1
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
gagandaroach/MONAI
|
167e7746995d4b6136731881e22ad4df333b16a9
|
79b83d9fac41efae9b90ed2f9ad078d6d664bf64
|
refs/heads/master
| 2023-06-02T19:54:47.737846
| 2021-06-24T18:34:02
| 2021-06-24T18:34:02
| 270,741,899
| 0
| 0
|
Apache-2.0
| 2020-06-08T16:29:32
| 2020-06-08T16:29:31
| null |
UTF-8
|
Python
| false
| false
| 3,205
|
py
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable
from monai.handlers.ignite_metric import IgniteMetric
from monai.metrics import ConfusionMatrixMetric
from monai.metrics.utils import MetricReduction
class ConfusionMatrix(IgniteMetric):
"""
Compute confusion matrix related metrics from full size Tensor and collects average over batch, class-channels, iterations.
"""
def __init__(
self,
include_background: bool = True,
metric_name: str = "hit_rate",
output_transform: Callable = lambda x: x,
save_details: bool = True,
) -> None:
"""
Args:
include_background: whether to skip metric computation on the first channel of
the predicted output. Defaults to True.
metric_name: [``"sensitivity"``, ``"specificity"``, ``"precision"``, ``"negative predictive value"``,
``"miss rate"``, ``"fall out"``, ``"false discovery rate"``, ``"false omission rate"``,
``"prevalence threshold"``, ``"threat score"``, ``"accuracy"``, ``"balanced accuracy"``,
``"f1 score"``, ``"matthews correlation coefficient"``, ``"fowlkes mallows index"``,
``"informedness"``, ``"markedness"``]
Some of the metrics have multiple aliases (as shown in the wikipedia page aforementioned),
and you can also input those names instead.
output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then
construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or
lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`.
for example: if `ignite.engine.state.output` is `{"pred": xxx, "label": xxx, "other": xxx}`,
output_transform can be `lambda x: (x["pred"], x["label"])`.
save_details: whether to save metric computation details per image, for example: TP/TN/FP/FN of every image.
default to True, will save to `engine.state.metric_details` dict with the metric name as key.
See also:
:py:meth:`monai.metrics.confusion_matrix`
"""
metric_fn = ConfusionMatrixMetric(
include_background=include_background,
metric_name=metric_name,
compute_sample=False,
reduction=MetricReduction.MEAN,
)
self.metric_name = metric_name
super().__init__(
metric_fn=metric_fn,
output_transform=output_transform,
save_details=save_details,
)
|
[
"noreply@github.com"
] |
gagandaroach.noreply@github.com
|
1de3d74ecc579a3482423f8897072d74c418249d
|
e8ae11e5017507da59e2e92d423b6a1994490de4
|
/env/lib/python2.7/site-packages/azure/mgmt/network/models/virtual_network.py
|
d04dbdddf6025303d691acc85256825d49980cec
|
[] |
no_license
|
teopeurt/ansible-ubuntu-server
|
613d00cea28bc6531acf4a39aeeb9cd0baa2a391
|
b5b6127d2ee9723c5088443efe2ffb8ae30cfea7
|
refs/heads/master
| 2021-06-28T12:49:50.935753
| 2017-07-31T17:34:33
| 2017-07-31T17:34:33
| 98,912,808
| 0
| 1
| null | 2020-07-24T00:05:31
| 2017-07-31T17:32:56
|
Makefile
|
UTF-8
|
Python
| false
| false
| 3,797
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualNetwork(Resource):
"""
Virtual Network resource
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource Id
:type id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict
:param address_space: Gets or sets AddressSpace that contains an array of
IP address ranges that can be used by subnets
:type address_space: :class:`AddressSpace
<azure.mgmt.network.models.AddressSpace>`
:param dhcp_options: Gets or sets DHCPOptions that contains an array of
DNS servers available to VMs deployed in the virtual network
:type dhcp_options: :class:`DhcpOptions
<azure.mgmt.network.models.DhcpOptions>`
:param subnets: Gets or sets List of subnets in a VirtualNetwork
:type subnets: list of :class:`Subnet <azure.mgmt.network.models.Subnet>`
:param resource_guid: Gets or sets resource guid property of the
VirtualNetwork resource
:type resource_guid: str
:param provisioning_state: Gets or sets Provisioning state of the
PublicIP resource Updating/Deleting/Failed
:type provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'address_space': {'key': 'properties.addressSpace', 'type': 'AddressSpace'},
'dhcp_options': {'key': 'properties.dhcpOptions', 'type': 'DhcpOptions'},
'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, location=None, tags=None, address_space=None, dhcp_options=None, subnets=None, resource_guid=None, provisioning_state=None, etag=None):
super(VirtualNetwork, self).__init__(id=id, location=location, tags=tags)
self.address_space = address_space
self.dhcp_options = dhcp_options
self.subnets = subnets
self.resource_guid = resource_guid
self.provisioning_state = provisioning_state
self.etag = etag
|
[
"me@teopeurt.com"
] |
me@teopeurt.com
|
1124f2ed2301c3d86e4644f9b6d42729ac140055
|
19a5937501ff40d53b69617d6b05484c2861c54b
|
/apps/urls.py
|
212155a4226cd217323270aab6a2cd9abf0d642c
|
[] |
no_license
|
pombredanne/abyss
|
44319541f614669861157955b5d4059fcf3f8aad
|
8de3f2438ad74ad4d2703ce0bb7ccf7672423820
|
refs/heads/master
| 2020-12-29T19:03:52.168087
| 2013-09-16T13:48:28
| 2013-09-16T13:48:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 944
|
py
|
from django.conf.urls import patterns, url
from apps import views
urlpatterns = patterns(
'',
url(r'^$', views.ListApp.as_view(), name='list-app'),
url(r'^create/$', views.CreateApp.as_view(), name='create-app'),
url(r'^run/$', views.Run.as_view(), name='run'),
url(r'^(?P<app_name>[\w-]+)/$',
views.AppDetail.as_view(), name='detail-app'),
url(r'^(?P<name>[\w-]+)/remove/$',
views.RemoveApp.as_view(), name='remove_app'),
url(r'^(?P<app_name>[\w-]+)/log/$', views.AppLog.as_view(),
name='app_log'),
url(r'^(?P<app_name>[\w-]+)/env/$', views.AppEnv.as_view(),
name='get-env'),
url(r'^(?P<app_name>[\w-]+)/teams/$',
views.AppTeams.as_view(), name='app-teams'),
url(r'^(?P<app_name>[\w-]+)/team/add/$',
views.AppAddTeam.as_view(), name='app-add-team'),
url(r'^(?P<app_name>[\w-]+)/units/$',
views.ChangeUnit.as_view(), name='change-units'),
)
|
[
"andrewsmedina@gmail.com"
] |
andrewsmedina@gmail.com
|
d6461e3ed1fa74dfbabee2a0c0d5db2f1b055f26
|
c9287937c4d7900d311640a2b16c08c42eedfe58
|
/tensorflow/python/distribute/mirrored_function_strategy.py
|
bbe52984d1eff41f9c4d304dfe927d7e70cfaddd
|
[
"Apache-2.0"
] |
permissive
|
Purplme/tensorflow
|
e868e9bf59cc8eb680f1c35bf0b8615ec2b68c62
|
d2d6c3f07a0b874e64a024c767deb7c9fb39b704
|
refs/heads/master
| 2022-11-23T23:38:00.243591
| 2020-07-16T06:20:19
| 2020-07-16T06:25:23
| 280,074,885
| 2
| 0
|
Apache-2.0
| 2020-07-16T06:39:14
| 2020-07-16T06:39:13
| null |
UTF-8
|
Python
| false
| false
| 7,594
|
py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class MirroredFunctionStrategy implementing tf.distribute.Strategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import values
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.util import nest
_replica_index = threading.local()
_replica_id_key = object()
def _replica_id_tensor():
return ops.get_default_graph().capture_call_time_value(
closure=lambda: constant_op.constant(_replica_index.current),
spec=tensor_spec.TensorSpec((), dtypes.int32),
key=_replica_id_key)
def _in_run():
return (hasattr(_replica_index, "current") and
_replica_index.current is not None)
def _outside_run_graph():
if hasattr(_replica_index, "graph_outside_run"):
return _replica_index.graph_outside_run
else:
return None
class MirroredFunctionStrategy(distribute_lib.Strategy):
"""Mirrors vars to distribute across multiple devices and machines.
This strategy uses one replica per device and sync replication for its
multi-GPU version. Unlike `tf.distribute.MirroredStrategy`, it creates a
function for a single replica, and calls that function repeatedly instead of
recording the operations for each replica separately.
"""
def __init__(self, devices=None):
"""Create an instance of `MirroredFunctionStrategy`.
Args:
devices: a list of device strings. If `None`, all available GPUs are
used. If no GPUs are found, CPU is used.
"""
extended = MirroredFunctionExtended(self, devices)
super(MirroredFunctionStrategy, self).__init__(extended)
# TODO(josh11b): Switch to V2 when we no longer need to support tf.compat.v1.
class MirroredFunctionExtended(distribute_lib.StrategyExtendedV1):
"""Implementation of MirroredFunctionStrategy."""
def __init__(self, container_strategy, devices):
super(MirroredFunctionExtended, self).__init__(container_strategy)
if devices is None:
devices = mirrored_strategy.all_devices()
if not devices:
raise ValueError("Got an empty `devices` list. Please make sure the "
"`devices` you pass in is not empty.")
device_tuple = tuple(device_util.resolve(d) for d in devices)
assert len(set(device_tuple)) == len(device_tuple), (
"No duplicates allowed in `devices` argument: %s" % (devices,))
self._devices = device_tuple
self._retrace_functions_for_each_device = False
def _call_for_each_replica(self, fn, args, kwargs):
# For now, `fn` must be an @tf.function.
# TODO(josh11b): Relax this restriction? Main problem is if
# (a) executing eagerly, (b) `fn` not @tf.function, and
# (c) executed frequently.
assert isinstance(fn, def_function.Function)
if _outside_run_graph() is not None:
# Nested case, should just use outer function's context for things like
# the current replica index.
# TODO(josh11b): Test this case!
with MirroredFunctionReplicaContext(self._container_strategy()):
results = fn(*nest.map_structure(_unwrap_tensors, args),
**nest.map_structure(_unwrap_tensors, kwargs))
return nest.map_structure(_wrap_tensors, results)
_replica_index.graph_outside_run = ops.get_default_graph()
return_values = []
try:
with MirroredFunctionReplicaContext(self._container_strategy()):
for index, device in enumerate(self._devices):
_replica_index.current = index
with ops.device(device):
if context.executing_eagerly():
# NOTE: These functions need to execute concurrently if they
# use a collective op. This is a particular concern with eager
# execution.
with context.execution_mode(context.ASYNC):
return_values.append(
fn(*distribute_utils.select_replica(index, args),
**distribute_utils.select_replica(index, kwargs)))
else:
return_values.append(
fn(*distribute_utils.select_replica(index, args),
**distribute_utils.select_replica(index, kwargs)))
finally:
_replica_index.graph_outside_run = None
_replica_index.current = None
return distribute_utils.regroup(return_values)
def _local_results(self, val):
if isinstance(val, values.DistributedValues):
return val.values
return (val,)
class FnMergedValue(object):
def __init__(self, value):
self._value = value
def _wrap_tensors(maybe_tensor):
if isinstance(maybe_tensor, ops.Tensor): # TODO(josh11b): or composite tensor?
return FnMergedValue(maybe_tensor)
return maybe_tensor
def _unwrap_tensors(maybe_wrapped):
if isinstance(maybe_wrapped, FnMergedValue):
return maybe_wrapped._value # pylint: disable=protected-access
return maybe_wrapped
class MirroredFunctionReplicaContext(distribute_lib.ReplicaContext):
"""ReplicaContext used in MirroredFunctionStrategy."""
def __init__(self, strategy):
distribute_lib.ReplicaContext.__init__(self, strategy, None)
@property
def _replica_id_in_sync_group(self):
return _replica_id_tensor()
@_replica_id_in_sync_group.setter
def _replica_id_in_sync_group(self, value):
assert value is None
def _merge_call(self, merge_fn, args, kwargs):
# We wrap all args/kwargs with tensor values in a class that prevents them
# for being used by anything other than MirroredFunctionStrategy APIs that
# have been specifically written to recognize the wrapper and unwrap the
# values (such as extended.reduce_to/update).
# TODO(josh11b): Should these set expand_composites=True?
args = nest.map_structure(_wrap_tensors, args)
kwargs = nest.map_structure(_wrap_tensors, kwargs)
# pylint: disable=protected-access
distribution_strategy_context._push_per_thread_mode(
distribution_strategy_context._CrossReplicaThreadMode(self._strategy))
try:
results = merge_fn(self._strategy, *args, **kwargs)
finally:
distribution_strategy_context._pop_per_thread_mode()
# pylint: enable=protected-access
return nest.map_structure(_unwrap_tensors, results)
@property
def devices(self):
raise RuntimeError("Can't get the devices for the current replica.")
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
d29f0e7212d54245a016a4c1738e9a56780420ae
|
f82c474bd7d5f60d976b14432a9f20d5e561ca4c
|
/low_level/tensors/E01_eval_tensors.py
|
4c5de2fabfddd42b36d5709c324d61965ec7b948
|
[] |
no_license
|
GlassyWing/tf-learn
|
1065551e27adf8a3f2b05e540e52d820e6b931d6
|
6733ac86cda430ecce13c8694c8bdfb79e8b70ad
|
refs/heads/master
| 2020-03-27T05:52:51.466385
| 2018-09-03T12:58:54
| 2018-09-03T12:58:54
| 146,059,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 159
|
py
|
import tensorflow as tf
constant = tf.constant([1, 2, 3])
tensor = constant * constant
sess = tf.Session()
with sess.as_default():
print(tensor.eval())
|
[
"1490215053@qq.com"
] |
1490215053@qq.com
|
6734850359cb4971c5ffeac81b4d804ea15b9c6a
|
b8faf65ea23a2d8b119b9522a0aa182e9f51d8b1
|
/vmraid/website/page_renderers/not_permitted_page.py
|
7acaf0baaf57f3b618f32d9267e2b99aa555c797
|
[
"MIT"
] |
permissive
|
vmraid/vmraid
|
a52868c57b1999a8d648441eb9cd05815204345d
|
3c2e2a952003ba7ea2cf13673b9e79e127f4166e
|
refs/heads/main
| 2022-07-29T18:59:28.585133
| 2022-04-22T08:02:52
| 2022-04-22T08:02:52
| 372,473,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
import vmraid
from vmraid import _
from vmraid.utils import cstr
from vmraid.website.page_renderers.template_page import TemplatePage
class NotPermittedPage(TemplatePage):
def __init__(self, path=None, http_status_code=None, exception=""):
vmraid.local.message = cstr(exception)
super().__init__(path=path, http_status_code=http_status_code)
self.http_status_code = 403
def can_render(self):
return True
def render(self):
vmraid.local.message_title = _("Not Permitted")
vmraid.local.response["context"] = dict(
indicator_color="red", primary_action="/login", primary_label=_("Login"), fullpage=True
)
self.set_standard_path("message")
return super().render()
|
[
"sowrisurya@outlook.com"
] |
sowrisurya@outlook.com
|
f1efb7f4fea5ef7e9421f004aabccd95b303d845
|
b345171a3968240caf135b8b9f2780324319cb22
|
/__init__.py
|
98b9499ad471ceefd2c1cd32ad977467eaff0937
|
[] |
no_license
|
pytsite/plugin-auth_google
|
ecd8bb9b8a2d59ed1fe1eb3515d2079b4359f03b
|
f22c90ac560d25c839db9b94cee6339e8681f299
|
refs/heads/master
| 2020-06-18T21:43:35.932709
| 2019-07-12T09:41:45
| 2019-07-12T09:41:45
| 74,938,727
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
"""PytSite Google Authentication Driver Plugin
"""
__author__ = 'Oleksandr Shepetko'
__email__ = 'a@shepetko.com'
__license__ = 'MIT'
# Public API
from . import _error as error
from ._api import get_client_id, get_authorization_url, get_client_secret, get_user_credentials, create_oauth2_flow
def plugin_load_wsgi():
from plugins import auth
from . import _driver
auth.register_auth_driver(_driver.Auth())
|
[
"a@shepetko.com"
] |
a@shepetko.com
|
211e94253365bf19b8a5e01da1a514175bac390e
|
4df948c31bde1b49c110820ecf8a38f949a78f62
|
/vta/python/vta/pkg_config.py
|
30b4808f5e2d8976d70677e197960aa94182db4d
|
[
"Apache-2.0"
] |
permissive
|
jroesch/tvm
|
40b4b8707177e3354c264ce31092721930ced376
|
c2b36154778503a509a70a3b5309b201969eccab
|
refs/heads/master
| 2021-12-19T03:38:13.732405
| 2018-10-22T16:31:59
| 2018-10-22T16:31:59
| 135,759,537
| 4
| 7
|
Apache-2.0
| 2021-06-17T07:22:42
| 2018-06-01T20:15:33
|
C++
|
UTF-8
|
Python
| false
| false
| 2,677
|
py
|
"""VTA Package configuration module
This module is dependency free and can be used to configure package.
"""
from __future__ import absolute_import as _abs
import json
import glob
class PkgConfig(object):
"""Simple package config tool for VTA.
This is used to provide runtime specific configurations.
Parameters
----------
cfg : dict
The config dictionary
proj_root : str
Path to the project root
"""
cfg_keys = [
"TARGET",
"HW_FREQ",
"HW_CLK_TARGET",
"HW_VER",
"LOG_INP_WIDTH",
"LOG_WGT_WIDTH",
"LOG_ACC_WIDTH",
"LOG_OUT_WIDTH",
"LOG_BATCH",
"LOG_BLOCK_IN",
"LOG_BLOCK_OUT",
"LOG_UOP_BUFF_SIZE",
"LOG_INP_BUFF_SIZE",
"LOG_WGT_BUFF_SIZE",
"LOG_ACC_BUFF_SIZE",
]
def __init__(self, cfg, proj_root):
# include path
self.include_path = [
"-I%s/include" % proj_root,
"-I%s/vta/include" % proj_root,
"-I%s/3rdparty/dlpack/include" % proj_root,
"-I%s/3rdparty/dmlc-core/include" % proj_root
]
# List of source files that can be used to build standalone library.
self.lib_source = []
self.lib_source += glob.glob("%s/vta/src/*.cc" % proj_root)
self.lib_source += glob.glob("%s/vta/src/%s/*.cc" % (proj_root, cfg["TARGET"]))
# macro keys
self.macro_defs = []
self.cfg_dict = {}
for key in self.cfg_keys:
self.macro_defs.append("-DVTA_%s=%s" % (key, str(cfg[key])))
self.cfg_dict[key] = cfg[key]
self.target = cfg["TARGET"]
if self.target == "pynq":
self.ldflags = [
"-L/usr/lib",
"-lsds_lib",
"-L/opt/python3.6/lib/python3.6/site-packages/pynq/drivers/",
"-L/opt/python3.6/lib/python3.6/site-packages/pynq/lib/",
"-l:libdma.so"]
else:
self.ldflags = []
@property
def cflags(self):
return self.include_path + self.macro_defs
@property
def cfg_json(self):
return json.dumps(self.cfg_dict, indent=2)
def same_config(self, cfg):
"""Compare if cfg is same as current config.
Parameters
----------
cfg : the configuration
The configuration
Returns
-------
equal : bool
Whether the configuration is the same.
"""
for k, v in self.cfg_dict.items():
if k not in cfg:
return False
if cfg[k] != v:
return False
return True
|
[
"tqchen@users.noreply.github.com"
] |
tqchen@users.noreply.github.com
|
f4c54df91cedb4ab534312995cd85ac41bb8b565
|
d4bcb9cc3b6aa9f690be59f630778d512882d34d
|
/ht/conv_jacket.pyi
|
237e03450cf918d66fad5744e2f420949830a545
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
CalebBell/ht
|
ba31cd9a7a2b3cb83599770c81b343ea5c60fc23
|
d9385d98311debcc47def7f5fc093f2e0152d059
|
refs/heads/master
| 2023-08-03T10:45:09.062053
| 2023-07-25T05:36:50
| 2023-07-25T05:36:50
| 48,963,057
| 154
| 38
|
MIT
| 2023-04-20T01:39:50
| 2016-01-03T22:33:12
|
Python
|
UTF-8
|
Python
| false
| false
| 810
|
pyi
|
# DO NOT EDIT - AUTOMATICALLY GENERATED BY tests/make_test_stubs.py!
from typing import List
from typing import Optional
def Lehrer(
m: float,
Dtank: float,
Djacket: float,
H: float,
Dinlet: float,
rho: float,
Cp: float,
k: float,
mu: float,
muw: Optional[float] = ...,
isobaric_expansion: Optional[float] = ...,
dT: Optional[float] = ...,
inlettype: str = ...,
inletlocation: str = ...
) -> float: ...
def Stein_Schmidt(
m: float,
Dtank: float,
Djacket: float,
H: float,
Dinlet: float,
rho: float,
Cp: float,
k: float,
mu: float,
muw: Optional[float] = ...,
rhow: Optional[float] = ...,
inlettype: str = ...,
inletlocation: str = ...,
roughness: float = ...
) -> float: ...
__all__: List[str]
|
[
"Caleb.Andrew.Bell@gmail.com"
] |
Caleb.Andrew.Bell@gmail.com
|
7744da668bfa14a29636e0a5e6d816ccf32ea932
|
ffc479dadf059388dad3a66c5f4662b113dc6285
|
/basics/psdemospliut.py
|
aae2237e7becdf9c66bfabf64d9db3040c555452
|
[] |
no_license
|
ravijaya/oct15-2020
|
fd87ee4f6aa7f0a63c77c8c470405eff479289b3
|
4fe4d4f2aac1f40349cec831c175652834b17b5d
|
refs/heads/main
| 2022-12-29T09:45:08.837682
| 2020-10-15T12:31:09
| 2020-10-15T12:31:09
| 304,318,346
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
s = 'root:x:0:0:root:/root:/bin/bash'
items = s.split(':')
print(items)
print()
print(s.split(':')[0]) # indexing
print()
print(s.split(':')[1:]) # slicing
print()
# iterator
for item in s.split(':')[1:]: # iteration
print(item)
|
[
"ravi.goglobium@gmail.com"
] |
ravi.goglobium@gmail.com
|
94c538986256e0c9b56cfb18d0def97857f7224a
|
3006ba184fd85d9bfe64a2040683618d7aa24e54
|
/paylogic/settings_base.py
|
fb3a17cdd1544a5df7dd2096072c1f69f5589c59
|
[
"Apache-2.0"
] |
permissive
|
esjee/codereview
|
909ca5ecff6b5436b023c5e4e6872366db1a0c3f
|
a8fc0e0b51be18db387b8b915aeda3f63e37c04f
|
refs/heads/master
| 2021-01-24T03:49:09.315286
| 2014-07-14T11:50:52
| 2014-07-14T11:50:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,616
|
py
|
"""Django settings for django_gae2django project."""
# NOTE: Keep the settings.py in examples directories in sync with this one!
# from settings import *
import re
import os
import statsd
ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Admin', 'admin@example.com'),
)
EMAIL_HOST = 'localhost'
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Amsterdam'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(ROOT, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(ROOT, 'static')
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(ROOT, 'static'),
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'some-secret'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'gae2django.middleware.FixRequestUserMiddleware',
# Keep in mind, that CSRF protection is DISABLED!
'rietveld_helper.middleware.DisableCSRFMiddleware',
'rietveld_helper.middleware.AddUserToRequestMiddleware',
'django.middleware.doc.XViewMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth', # required by admin panel
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.static',
)
ROOT_URLCONF = 'paylogic.urls'
TEMPLATE_DIRS = (
os.path.join(ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django_openid_auth',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.staticfiles',
'django.contrib.messages',
'jquery',
'django_select2',
'gae2django',
'rietveld_helper',
'paylogic',
'codereview',
)
OPENID_CREATE_USERS = True
OPENID_SSO_SERVER_URL = 'https://google.com/accounts/o8/site-xrds?hd=paylogic.eu'
OPENID_USE_AS_ADMIN_LOGIN = False
LOGIN_URL = '/openid/login/'
LOGIN_REDIRECT_URL = '/'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_openid_auth.auth.OpenIDBackend',
)
# Set your DSN value
RAVEN_CONFIG = {
'dsn': 'https://yor-dsn',
}
# Add raven to the list of installed apps
INSTALLED_APPS = INSTALLED_APPS + (
'raven.contrib.django.raven_compat',
)
INTERNAL_IPS = ('127.0.0.1',)
AUTH_PROFILE_MODULE = 'codereview.Account'
LOGIN_REDIRECT_URL = '/'
# This won't work with gae2django.
RIETVELD_INCOMING_MAIL_ADDRESS = None
RIETVELD_REVISION = open(os.path.join(ROOT, 'VERSION')).read().strip()
UPLOAD_PY_SOURCE = os.path.join(ROOT, 'upload.py')
VCS = {
'hg': {
'base_dir': '/var/codereview/hg/',
'regex': re.compile('^((ssh://code\.(?:example\.com)/)?/var/codereview/hg/|hg\+)(.+)$'),
'supports_direct_export': True,
'supports_simple_cloning': True,
'default_branch': 'default',
},
'bzr': {
'base_dir': '/var/codereview/bzr/',
'regex': re.compile('^((ssh://code\.(?:example\.com)/)?/var/codereview/bzr/|bzr\+)(.+)$'),
'supports_direct_export': True,
'supports_simple_cloning': False,
'default_branch': 'trunk',
},
'git': {
'base_dir': '/var/codereview/git/',
'regex': re.compile('^((ssh://code\.(?:example\.com)/)?/var/codereview/git/|git\+)(.+)$'),
'supports_direct_export': False,
'supports_simple_cloning': True,
'default_branch': 'master',
}
}
FEATURE_BRANCH_DEFAULT_PREFIX = 'hg+/var/codereview/hg/users/'
ORIGINAL_BRANCH_DEFAULT_PREFIX = 'hg+/var/hg/codereview/example.com#'
TEMP_FOLDER = '/var/tmp/codereview/'
FOGBUGZ_URL = 'https://fogbugz.example.com'
FOGBUGZ_TOKEN = 'fogbugz-token'
# Override this token in your settings_local.py file in order to
# API functions
API_TOKEN = 'some-token'
FOGBUGZ_MERGEKEEPER_USER_ID = 999
FOGBUGZ_APPROVED_REVISION_FIELD_ID = "plugin_customfields_at_fogcreek_com_approvedxrevision"
FOGBUGZ_TARGET_BRANCH_FIELD_ID = "plugin_customfields_at_fogcreek_com_targetxbranch"
FOGBUGZ_ORIGINAL_BRANCH_FIELD_ID = "plugin_customfields_at_fogcreek_com_originalxbranch"
FOGBUGZ_FEATURE_BRANCH_FIELD_ID = "plugin_customfields_at_fogcreek_com_featurexbranch"
FOGBUGZ_CI_PROJECT_FIELD_ID = "cixproject"
CODEREVIEW_IGNORED_FILES = ['.hg_archival.txt']
CODEREVIEW_MAX_FILE_SIZE = 1024 * 1024
CODEREVIEW_VALIDATORS = [
]
CODEREVIEW_TARGET_BRANCH_CHOICES_GETTER = lambda ci_project, original_branch, branches: []
AUTO_RENDER_SELECT2_STATICS = False
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
},
'loggers': {
}
}
DEFAULT_MAIL_CC = 'fogbugz@example.com'
statsd.Connection.set_defaults(host='localhost', port=8125)
try:
from paylogic.settings_local import * # NOQA
except ImportError:
pass
|
[
"bubenkoff@gmail.com"
] |
bubenkoff@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.