blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0189a50f15557b57e5dd47c6ad38deada72bbd5f | 08e6b46769aa36da479f29ef345bdb15e5d0d102 | /admin_mysql_purge_binlog.py | 3cd6ca5344c7563aad947373aba1ea153792b032 | [] | no_license | speedocjx/lepus_python | 30a33852efdef5b24402cbe81b8d9798072f9309 | 27d61c154d4cde97e004e3851203420f77a63c5d | refs/heads/master | 2021-06-14T22:37:50.936419 | 2017-03-21T06:18:12 | 2017-03-21T06:18:12 | 70,157,618 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,114 | py | #!//bin/env python
#coding:utf-8
import os
import sys
import string
import time
import datetime
import MySQLdb
path='.\include'
sys.path.insert(0,path)
import functions as func
from multiprocessing import Process;
def admin_mysql_purge_binlog(host,port,user,passwd,binlog_store_days):
datalist=[]
try:
connect=MySQLdb.connect(host=host,user=user,passwd=passwd,port=int(port),connect_timeout=2,charset='utf8')
cur=connect.cursor()
connect.select_db('information_schema')
master_thread=cur.execute("select * from information_schema.processlist where COMMAND = 'Binlog Dump';")
datalist=[]
if master_thread >= 1:
now=datetime.datetime.now()
delta=datetime.timedelta(days=binlog_store_days)
n_days=now-delta
before_n_days= n_days.strftime('%Y-%m-%d %H:%M:%S')
cur.execute("purge binary logs before '%s'" %(before_n_days));
print ("mysql %s:%s binlog been purge" %(host,port) )
except MySQLdb.Error,e:
pass
print "Mysql Error %d: %s" %(e.args[0],e.args[1])
def main():
user = func.get_config('mysql_db','username')
passwd = func.get_config('mysql_db','password')
servers=func.mysql_query("select host,port,binlog_store_days from db_servers_mysql where is_delete=0 and monitor=1 and binlog_auto_purge=1;")
if servers:
print("%s: admin mysql purge binlog controller started." % (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),));
plist = []
for row in servers:
host=row[0]
port=row[1]
binlog_store_days=row[2]
p = Process(target = admin_mysql_purge_binlog, args = (host,port,user,passwd,binlog_store_days))
plist.append(p)
for p in plist:
p.start()
time.sleep(60)
for p in plist:
p.terminate()
for p in plist:
p.join()
print("%s: admin mysql purge binlog controller finished." % (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),))
if __name__=='__main__':
main()
| [
"changjingxiu1@163.com"
] | changjingxiu1@163.com |
3f6f08ef5f468cb2d54278ec26f951ab44045747 | 02508aa773dcbd9939eb879952ee2cb3dd90bcad | /test/test_static_runtime.py | 9b38a5a7e36a8c0e2dea89f3a54fcde58e30a11d | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | dhivyadharshin/pytorch | d8a3b7f3c03e21e776ea34788d13743467b738c8 | 6a170011876bb8bd1909e8f60fba1270ac7a5577 | refs/heads/master | 2023-07-18T07:31:52.918955 | 2021-08-17T18:12:01 | 2021-08-17T18:12:01 | 397,330,616 | 5 | 0 | NOASSERTION | 2021-08-17T18:12:02 | 2021-08-17T16:57:16 | null | UTF-8 | Python | false | false | 12,355 | py | import unittest
from typing import Dict, Optional
import numpy as np
import torch
from torch import nn
from torch.testing._internal.common_utils import TestCase, run_tests
class StaticModule:
def __init__(self, scripted):
# this is an nn.Module
if hasattr(scripted, "_c"):
self.static_module = torch._C._jit_to_static_module(scripted._c)
else:
self.static_module = torch._C._jit_to_static_module(scripted.graph)
def __call__(self, *args, **kwargs):
if not kwargs:
return self.static_module(args)
else:
return self.static_module(args, kwargs)
def benchmark(self, args, kwargs, warmup_runs, main_runs):
self.static_module.benchmark(args, kwargs, warmup_runs, main_runs)
def benchmark_individual_ops(self, args, kwargs, warmup_runs, main_runs):
return self.static_module.benchmark_individual_ops(
args, kwargs, warmup_runs, main_runs
)
def linear_shim(
input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] = None
) -> torch.Tensor:
output = input.matmul(weight.t())
if bias is not None:
output += bias
ret = output
return ret
torch.nn.functional.linear = linear_shim
class MultiHeadAttentionLayer(nn.Module):
def __init__(self, hid_dim, n_heads, dropout, device):
super().__init__()
assert hid_dim % n_heads == 0
self.hid_dim = hid_dim
self.n_heads = n_heads
self.head_dim = hid_dim // n_heads
self.fc_q = nn.Linear(hid_dim, hid_dim)
self.fc_k = nn.Linear(hid_dim, hid_dim)
self.fc_v = nn.Linear(hid_dim, hid_dim)
self.fc_o = nn.Linear(hid_dim, hid_dim)
# self.dropout = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([self.head_dim])).to(device)
def forward(self, query, key, value, mask):
batch_size = query.shape[0]
Q = self.fc_q(query)
K = self.fc_k(key)
V = self.fc_v(value)
Q = Q.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
K = K.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
V = V.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
energy = torch.matmul(Q, K.permute(0, 1, 3, 2)) / self.scale
# energy = energy.masked_fill(mask == 0, -1e10)
attention = torch.softmax(energy, dim=-1)
# x = torch.matmul(self.dropout(attention), V)
x = torch.matmul(attention, V)
x = x.permute(0, 2, 1, 3).contiguous()
x = x.view(batch_size, -1, self.hid_dim)
x = self.fc_o(x)
return x, attention
# Taken from https://github.com/facebookresearch/dlrm/blob/master/dlrm_s_pytorch.py
def create_mlp(ln, sigmoid_layer):
layers = nn.ModuleList()
for i in range(0, len(ln) - 1):
n = ln[i]
m = ln[i + 1]
LL = nn.Linear(int(n), int(m), bias=True)
mean = 0.0 # std_dev = np.sqrt(variance)
std_dev = np.sqrt(2 / (m + n)) # np.sqrt(1 / m) # np.sqrt(1 / n)
W = np.random.normal(mean, std_dev, size=(m, n)).astype(np.float32)
std_dev = np.sqrt(1 / m) # np.sqrt(2 / (m + 1))
bt = np.random.normal(mean, std_dev, size=m).astype(np.float32)
LL.weight.data = torch.tensor(W, requires_grad=True)
LL.bias.data = torch.tensor(bt, requires_grad=True)
layers.append(LL)
if i == sigmoid_layer:
layers.append(nn.Sigmoid())
else:
layers.append(nn.ReLU())
with torch.no_grad():
s = torch.jit.script(torch.nn.Sequential(*layers))
s.eval()
return s
def trivial_graph(a, b, c):
s = torch.tensor([[3, 3], [3, 3]])
return a + b * c + s
def loop_graph(a, b, iters: int):
c = a + b * 2
for i in range(iters):
c = c + b
c *= 2
c -= a
return c
def output_graph(a, b, c, iters: int):
s = torch.tensor([[3, 3], [3, 3]])
k = a + b * c + s
d: Dict[int, torch.Tensor] = {}
for i in range(iters):
d[i] = k + i
return d
class SubModule(nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.a = 11
self.b = 2
def forward(self, x):
return self.a + self.b + x
class SubModule2(nn.Module):
def __init__(self):
super(SubModule2, self).__init__()
self.a = 12
self.b = 2
def forward(self, x):
self.b = 30
return self.a + self.b + x
class TestModule(nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.sub1 = SubModule()
self.sub2 = SubModule2()
self.a = 3
self.b = 4
def forward(self, x):
self.b = 20
return self.sub1(x) + self.a + self.b + self.sub2(x)
class TestStaticModule(TestCase):
def test_multihead_attention_layer(self):
HID_DIM = 256
QUERY_LEN = 8
BATCH_SIZE = 128
LAYERS = 3
HEADS = 8
DROPOUT = 0.1
device = torch.device("cpu")
attention = MultiHeadAttentionLayer(HID_DIM, HEADS, DROPOUT, device).to(device)
with torch.no_grad():
src = torch.randn(BATCH_SIZE, QUERY_LEN, HID_DIM).to(device)
src_mask = (src > 0)[:, :, 0].unsqueeze(1).unsqueeze(2).to(device)
attention.eval()
attention = torch.jit.script(attention)
attention.eval()
o_ref = attention(src, src, src, src_mask)
attention_a = StaticModule(attention)
o_test = attention_a(src, src, src, src_mask)
o_test_kw = attention_a(src, src, value=src, mask=src_mask)
for a, b in zip(o_ref, o_test):
torch.testing.assert_allclose(a, b)
for a, b in zip(o_ref, o_test_kw):
torch.testing.assert_allclose(a, b)
def test_multihead_attention_layer_benchmark(self):
HID_DIM = 256
QUERY_LEN = 8
BATCH_SIZE = 128
LAYERS = 3
HEADS = 8
DROPOUT = 0.1
device = torch.device("cpu")
attention = MultiHeadAttentionLayer(HID_DIM, HEADS, DROPOUT, device).to(device)
with torch.no_grad():
src = torch.randn(BATCH_SIZE, QUERY_LEN, HID_DIM).to(device)
src_mask = (src > 0)[:, :, 0].unsqueeze(1).unsqueeze(2).to(device)
attention.eval()
attention = torch.jit.script(attention)
attention_a = StaticModule(attention)
attention_a.benchmark([src, src, src, src_mask], {}, 2, 2)
metrics = attention_a.benchmark_individual_ops(
[src, src, src, src_mask], {}, 2, 2
)
def test_mlp(self):
# Arguments taken from benchmark script, ./bench/dlrm_s_benchmark.sh
ln_bot = [512, 512, 64]
sigmoid_bot = -1
ln_top = [100, 1024, 1024, 1024, 1]
sigmoid_top = 3
bot_l = create_mlp(ln_bot, sigmoid_bot)
bot_l_acc = StaticModule(bot_l)
top_l = create_mlp(ln_top, sigmoid_top)
top_l_acc = StaticModule(top_l)
with torch.no_grad():
bot_inp = torch.randn(2048, 512) # torch.Size([2048, 512])
top_inp = torch.randn(2048, 100) # torch.Size([2048, 100])
ref_bot = bot_l(bot_inp)
acc_bot = bot_l_acc(bot_inp)[0]
torch.testing.assert_allclose(acc_bot, ref_bot)
ref_top = top_l(top_inp)
acc_top = top_l_acc(top_inp)[0]
torch.testing.assert_allclose(acc_top, ref_top)
for _ in range(5):
with torch.no_grad():
bot_inp = torch.randn(2048, 512) # torch.Size([2048, 512])
top_inp = torch.randn(2048, 100) # torch.Size([2048, 100])
ref_bot = bot_l(bot_inp)
acc_bot = bot_l_acc(bot_inp)[0]
torch.testing.assert_allclose(acc_bot, ref_bot)
ref_top = top_l(top_inp)
acc_top = top_l_acc(top_inp)[0]
torch.testing.assert_allclose(acc_top, ref_top)
def test_trivial_graph(self):
s = torch.full((2, 2), 2)
tg = torch.jit.script(trivial_graph)
o_ref = tg(s, s, s)
tg_a = StaticModule(tg)
o_test = tg_a(s, s, s)[0]
torch.testing.assert_allclose(o_ref, o_test)
def test_leaky_relu(self):
s = torch.randn(5, 5)
tg = torch.jit.script(nn.LeakyReLU(0.1))
o_ref = tg(s)
tg_a = StaticModule(tg)
o_test = tg_a(s)[0]
torch.testing.assert_allclose(o_ref, o_test)
def test_attr(self):
"""
TorchScript IR of TestModule() after freezing:
graph(%self : __torch__.test_static_runtime.___torch_mangle_0.TestModule,
%x.1 : Tensor):
%18 : int = prim::Constant[value=30]()
%30 : int = prim::Constant[value=13]()
%3 : int = prim::Constant[value=20]()
%2 : int = prim::Constant[value=1]()
%self.sub2.a : int = prim::Constant[value=12]()
%self.a : int = prim::Constant[value=3]()
= prim::SetAttr[name="b"](%self, %3)
%17 : Tensor = aten::add(%x.1, %30, %2)
%7 : Tensor = aten::add(%17, %self.a, %2)
%b.1 : int = prim::GetAttr[name="b"](%self)
%9 : Tensor = aten::add(%7, %b.1, %2)
%sub2 : __torch__.test_static_runtime.___torch_mangle_2.SubModule2 = prim::GetAttr[name="sub2"](%self)
= prim::SetAttr[name="b"](%sub2, %18)
%b : int = prim::GetAttr[name="b"](%sub2)
%22 : int = aten::add(%self.sub2.a, %b)
%23 : Tensor = aten::add(%x.1, %22, %2)
%12 : Tensor = aten::add(%9, %23, %2)
return (%12)
"""
# test prim::SetAttr and prim::GetAttr impl in Static Runtime
m = TestModule()
m.eval()
input = torch.randn(2, 2)
output_s = m.forward(input)
ms = torch.jit.script(m)
sm = StaticModule(ms)
output_sm = sm(input)[0]
torch.testing.assert_allclose(output_s, output_sm)
sm.benchmark([input], {}, 2, 2)
sm.benchmark_individual_ops([input], {}, 2, 2)
sm.benchmark([], {"x": input}, 2, 2)
sm.benchmark_individual_ops([], {"x": input}, 2, 2)
@unittest.skip("Temporarily disabled")
def test_fusion_trivial_graph(self):
s = torch.full((2, 2), 2)
tg = torch.jit.script(trivial_graph)
o_ref = tg(s, s, s)
torch._C._fuse_to_static_module(tg.graph)
assert "StaticSubgraph" in str(tg.graph)
o_test = tg(s, s, s)
torch.testing.assert_allclose(o_ref, o_test)
@unittest.skip("Temporarily disabled")
def test_fusion_multihead_attention_layer(self):
HID_DIM = 256
QUERY_LEN = 8
BATCH_SIZE = 128
LAYERS = 3
HEADS = 8
DROPOUT = 0.1
device = torch.device("cpu")
attention = MultiHeadAttentionLayer(HID_DIM, HEADS, DROPOUT, device).to(device)
with torch.no_grad():
src = torch.randn(BATCH_SIZE, QUERY_LEN, HID_DIM).to(device)
src_mask = (src > 0)[:, :, 0].unsqueeze(1).unsqueeze(2).to(device)
attention.eval()
attention = torch.jit.script(attention)
attention.eval()
o_ref = attention(src, src, src, src_mask)
torch._C._fuse_to_static_module(attention._c)
o_test = attention(src, src, src, src_mask)
for a, b in zip(o_ref, o_test):
torch.testing.assert_allclose(a, b)
@unittest.skip("Temporarily disabled")
def test_fusion_loop(self):
a = torch.randn(5, 5)
b = torch.randn(5, 5)
c = 4
lg = torch.jit.script(loop_graph)
o_ref = lg(a, b, c)
torch._C._fuse_to_static_module(lg.graph)
assert "StaticSubgraph" in str(lg.graph)
o_test = lg(a, b, c)
torch.testing.assert_allclose(o_ref, o_test)
@unittest.skip("Temporarily disabled")
def test_fusion_outputs(self):
a = torch.randn(2, 2)
b = torch.randn(2, 2)
c = 4
og = torch.jit.script(output_graph)
o_ref = og(a, b, b, c)
torch._C._fuse_to_static_module(og.graph)
assert "StaticSubgraph" in str(og.graph)
o_test = og(a, b, b, c)
for i in o_ref.keys():
torch.testing.assert_allclose(o_ref[i], o_test[i])
if __name__ == "__main__":
run_tests()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
76428ce31dbddb8a14d11a3a0fa372bea28b6157 | 9b422078f4ae22fe16610f2ebc54b8c7d905ccad | /xlsxwriter/test/comparison/test_chart_doughnut01.py | 7048957c7e5110ea5f6179445573456bacf51b58 | [
"BSD-2-Clause-Views"
] | permissive | projectsmahendra/XlsxWriter | 73d8c73ea648a911deea63cb46b9069fb4116b60 | 9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45 | refs/heads/master | 2023-07-21T19:40:41.103336 | 2023-07-08T16:54:37 | 2023-07-08T16:54:37 | 353,636,960 | 0 | 0 | NOASSERTION | 2021-04-01T08:57:21 | 2021-04-01T08:57:20 | null | UTF-8 | Python | false | false | 1,121 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_doughnut01.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'doughnut'})
data = [
[2, 4, 6],
[60, 30, 10],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$3',
'values': '=Sheet1!$B$1:$B$3',
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
ad358bf4d32aea80191fa3241fe832f390353029 | 41311e8bbed80e1f819157d24d7943c05ba6b2e6 | /ProblemSet1/loadWords/loadWords.py | 3a94e083edcd22587507bf4aeaa2b961c0adee1b | [] | no_license | tanglan2009/MITx6.00.2x_Introductin_Computational_Thinking_and_Data_Science | c0bb39cb0964014661823e1301f05af7837ff3c5 | 334726fca7f87eae55f5f45c3cdc4dbac02cfac4 | refs/heads/master | 2021-01-10T02:49:34.663406 | 2016-03-06T19:49:44 | 2016-03-06T19:49:44 | 53,272,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | import string
PATH_TO_FILE = 'words.txt'
def loadWords():
inFile = open(PATH_TO_FILE, 'r', 0)
line = inFile.readline()
wordlist = string.split(line)
print " ", len(wordlist), "words loaded."
return wordlist
loadWords()
# Uncomment the following function if you want to try the code template
def loadWords2():
try:
inFile = open(PATH_TO_FILE, 'r', 0)
#line of code to be added here#
except:
print "The wordlist doesn't exist; using some fruits for now"
return ['apple', 'orange', 'pear', 'lime', 'lemon', 'grape', 'pineapple']
line = inFile.readline()
wordlist = string.split(line)
print " ", len(wordlist), "words loaded."
return wordlist
PATH_TO_FILE = 'words2.txt'
loadWords2()
PATH_TO_FILE = 'doesntExist.txt'
loadWords2()
| [
"tanglan2009@gmail.com"
] | tanglan2009@gmail.com |
3c143d64c0a9ee24662b8197f431c8e59eaedb17 | 5864e86954a221d52d4fa83a607c71bacf201c5a | /eve/client/script/ui/shared/planet/pinContainers/CommandCenterContainer.py | 0f9f7fe007c59f3f24d139453aa6cabc5bd2683c | [] | no_license | connoryang/1v1dec | e9a2303a01e5a26bf14159112b112be81a6560fd | 404f2cebf13b311e754d45206008918881496370 | refs/heads/master | 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,300 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\ui\shared\planet\pinContainers\CommandCenterContainer.py
from carbonui.primitives.container import Container
from eve.client.script.ui.control.eveLabel import Label, EveLabelSmall
import evetypes
import uiprimitives
import uicontrols
import uix
import carbonui.const as uiconst
import util
import uicls
import blue
import uthread
import const
import uiutil
import eve.client.script.ui.control.entries as listentry
import localization
import eve.common.script.util.planetCommon as planetCommon
from .BasePinContainer import CaptionAndSubtext, BasePinContainer
from .StorageFacilityContainer import StorageFacilityContainer
from .. import planetCommon as planetCommonUI
class CommandCenterContainer(StorageFacilityContainer):
__guid__ = 'planet.ui.CommandCenterContainer'
default_name = 'CommandCenterContainer'
INFO_CONT_HEIGHT = 110
def _GetInfoCont(self):
self.storageGauge = uicls.Gauge(parent=self.infoContLeft, value=0.0, color=planetCommonUI.PLANET_COLOR_STORAGE, label=localization.GetByLabel('UI/PI/Common/Capacity'), top=0)
self.launchTimeTxt = CaptionAndSubtext(parent=self.infoContLeft, caption=localization.GetByLabel('UI/PI/Common/NextLaunchTime'), top=45)
self.cooldownTimer = CaptionAndSubtext(parent=self.infoContLeft, caption=localization.GetByLabel('UI/PI/Common/NextTransferAvailable'), top=80)
self.cpuGauge = uicls.Gauge(parent=self.infoContRight, value=0.0, color=planetCommonUI.PLANET_COLOR_CPU, top=0)
self.powerGauge = uicls.Gauge(parent=self.infoContRight, value=0.0, color=planetCommonUI.PLANET_COLOR_POWER, top=40)
self.upgradeLevelGauge = uicls.Gauge(parent=self.infoContRight, value=self._GetUpgradeLevelGaugeValue(), color=planetCommonUI.PLANET_COLOR_CURRLEVEL, backgroundColor=util.Color.GetGrayRGBA(0.5, 0.5), label=localization.GetByLabel('UI/PI/Common/UpgradeLevel'), top=80)
self.upgradeLevelGauge.ShowMarkers([0.167,
0.333,
0.5,
0.667,
0.833], color=util.Color.BLACK)
def _UpdateInfoCont(self):
nextLaunchTime = self.pin.GetNextLaunchTime()
if nextLaunchTime is not None and nextLaunchTime > blue.os.GetWallclockTime():
nextLaunchTime = util.FmtTime(nextLaunchTime - blue.os.GetWallclockTime())
else:
nextLaunchTime = localization.GetByLabel('UI/Common/Now')
self.launchTimeTxt.SetSubtext(nextLaunchTime)
self.storageGauge.SetValue(float(self.pin.capacityUsed) / self.pin.GetCapacity())
self.storageGauge.SetSubText(localization.GetByLabel('UI/PI/Common/StorageUsed', capacityUsed=self.pin.capacityUsed, capacityMax=self.pin.GetCapacity()))
colony = sm.GetService('planetUI').GetCurrentPlanet().GetColony(self.pin.ownerID)
if colony is None or colony.colonyData is None:
raise RuntimeError('Unable to find colony to update info container')
cpuUsage = colony.colonyData.GetColonyCpuUsage()
cpuSupply = colony.colonyData.GetColonyCpuSupply()
if cpuSupply > 0:
percentage = min(1.0, float(cpuUsage) / cpuSupply)
else:
percentage = 0.0
self.cpuGauge.SetValue(percentage)
self.cpuGauge.SetText(localization.GetByLabel('UI/PI/Common/CPUPercentage', usedPercentage=percentage * 100))
self.cpuGauge.SetSubText(localization.GetByLabel('UI/PI/Common/CPUUsed', teraFlopsUsed=int(cpuUsage), teraFlopsMax=cpuSupply))
powerUsage = colony.colonyData.GetColonyPowerUsage()
powerSupply = colony.colonyData.GetColonyPowerSupply()
if powerSupply > 0:
percentage = min(1.0, float(powerUsage) / powerSupply)
else:
percentage = 0.0
self.powerGauge.SetValue(percentage)
self.powerGauge.SetText(localization.GetByLabel('UI/PI/Common/PowerPercentage', usedPercentage=percentage * 100))
self.powerGauge.SetSubText(localization.GetByLabel('UI/PI/Common/PowerUsed', megaWattsUsed=int(powerUsage), megaWattsMax=powerSupply))
self.upgradeLevelGauge.SetValue(self._GetUpgradeLevelGaugeValue())
if self.pin.lastRunTime is None or self.pin.lastRunTime <= blue.os.GetWallclockTime():
self.cooldownTimer.SetSubtext(localization.GetByLabel('UI/Common/Now'))
else:
self.cooldownTimer.SetSubtext(util.FmtTime(self.pin.lastRunTime - blue.os.GetWallclockTime()))
def _GetActionButtons(self):
btns = [util.KeyVal(id=planetCommonUI.PANEL_UPGRADE, panelCallback=self.PanelUpgrade), util.KeyVal(id=planetCommonUI.PANEL_LAUNCH, panelCallback=self.PanelLaunch), util.KeyVal(id=planetCommonUI.PANEL_STORAGE, panelCallback=self.PanelShowStorage)]
btns.extend(BasePinContainer._GetActionButtons(self))
return btns
def _GetUpgradeLevelGaugeValue(self):
currLevel = self.planetUISvc.planet.GetCommandCenterLevel(session.charid)
return float(currLevel + 1) / (planetCommonUI.PLANET_COMMANDCENTERMAXLEVEL + 1)
def PanelLaunch(self):
self.ResetPayloadContents()
cont = Container(parent=self.actionCont, state=uiconst.UI_HIDDEN)
topCont = Container(align=uiconst.TOTOP_PROP, height=0.5, parent=cont)
bottomCont = Container(align=uiconst.TOTOP_PROP, height=0.5, parent=cont)
self.contentsScroll = uicontrols.Scroll(parent=topCont, name='contentsScroll')
self.costText = uicontrols.EveLabelMedium(parent=topCont, idx=0, align=uiconst.TOBOTTOM, state=uiconst.UI_DISABLED)
manipBtns = [[localization.GetByLabel('UI/PI/Common/Add'), self._AddCommodities, None], [localization.GetByLabel('UI/PI/Common/Remove'), self._RemCommodities, None]]
self.manipBtns = uicontrols.ButtonGroup(btns=manipBtns, parent=topCont, idx=0)
self.payloadScroll = uicontrols.Scroll(parent=bottomCont, name='payloadScroll')
self._ReloadScrolls()
self.countdownCont = Container(parent=bottomCont, pos=(0, 0, 0, 35), align=uiconst.TOTOP, state=uiconst.UI_HIDDEN)
btns = [[localization.GetByLabel('UI/PI/Common/GoForLaunch'), self._DoLaunch, None], [localization.GetByLabel('UI/PI/Common/ScrubLaunch'), self._CancelLaunch, None]]
self.launchBtns = uicontrols.ButtonGroup(btns=btns, parent=bottomCont, idx=0)
return cont
def _ReloadScrolls(self):
scrolllist = []
for typeID, amount in self.contentsCommodities.iteritems():
data = util.KeyVal()
data.label = '<t>%s<t>%s' % (evetypes.GetName(typeID), amount)
data.typeID = typeID
data.itemID = None
data.getIcon = True
sortBy = amount
scrolllist.append((sortBy, listentry.Get('Item', data=data)))
scrolllist = uiutil.SortListOfTuples(scrolllist)
self.contentsScroll.Load(contentList=scrolllist, noContentHint=localization.GetByLabel('UI/PI/Common/StorehouseIsEmpty'), headers=['', localization.GetByLabel('UI/PI/Common/Type'), localization.GetByLabel('UI/Common/Quantity')])
scrolllist = []
for typeID, amount in self.payloadCommodities.iteritems():
data = util.KeyVal()
data.label = '<t>%s<t>%s' % (evetypes.GetName(typeID), amount)
data.typeID = typeID
data.itemID = None
data.getIcon = True
sortBy = amount
scrolllist.append((sortBy, listentry.Get('Item', data=data)))
scrolllist = uiutil.SortListOfTuples(scrolllist)
self.payloadScroll.Load(contentList=scrolllist, noContentHint=localization.GetByLabel('UI/PI/Common/PayloadIsEmpty'), headers=['', localization.GetByLabel('UI/PI/Common/Type'), localization.GetByLabel('UI/Common/Quantity')])
self.costText.text = localization.GetByLabel('UI/PI/Common/LaunchCost', iskAmount=util.FmtISK(self.pin.GetExportTax(self.payloadCommodities)))
def _DoLaunch(self, *args):
if len(self.payloadCommodities) < 1:
raise UserError('PleaseSelectCommoditiesToLaunch')
if not self.pin.CanLaunch(self.payloadCommodities):
raise UserError('CannotLaunchCommandPinNotReady')
if sm.GetService('planetUI').GetCurrentPlanet().IsInEditMode():
raise UserError('CannotLaunchInEditMode')
if len(self.payloadCommodities) == 0:
return
sm.GetService('audio').SendUIEvent('wise:/msg_pi_spaceports_launch_play')
try:
self.planetUISvc.myPinManager.LaunchCommodities(self.pin.id, self.payloadCommodities)
except UserError:
self.ResetPayloadContents()
self._ReloadScrolls()
raise
finally:
self._ToggleButtons()
self._CancelLaunch()
def _CancelLaunch(self, *args):
self.ShowPanel(self.PanelLaunch, planetCommonUI.PANEL_LAUNCH)
def _ToggleButtons(self):
if self.launchBtns.state == uiconst.UI_HIDDEN:
self.launchBtns.state = uiconst.UI_PICKCHILDREN
else:
self.launchBtns.state = uiconst.UI_HIDDEN
if self.manipBtns.state == uiconst.UI_HIDDEN:
self.manipBtns.state = uiconst.UI_PICKCHILDREN
else:
self.manipBtns.state = uiconst.UI_HIDDEN
def ResetPayloadContents(self):
self.contentsCommodities = self.pin.GetContents()
self.payloadCommodities = {}
def _AddCommodities(self, *args):
selected = self.contentsScroll.GetSelected()
toMove = {}
for entry in selected:
toMove[entry.typeID] = self.contentsCommodities[entry.typeID]
for typeID, qty in toMove.iteritems():
self.contentsCommodities[typeID] -= qty
if self.contentsCommodities[typeID] <= 0:
del self.contentsCommodities[typeID]
if typeID not in self.payloadCommodities:
self.payloadCommodities[typeID] = 0
self.payloadCommodities[typeID] += qty
self._ReloadScrolls()
def _RemCommodities(self, *args):
selected = self.payloadScroll.GetSelected()
toMove = {}
for entry in selected:
toMove[entry.typeID] = self.payloadCommodities[entry.typeID]
for typeID, qty in toMove.iteritems():
self.payloadCommodities[typeID] -= qty
if self.payloadCommodities[typeID] <= 0:
del self.payloadCommodities[typeID]
if typeID not in self.contentsCommodities:
self.contentsCommodities[typeID] = 0
self.contentsCommodities[typeID] += qty
self._ReloadScrolls()
def _DrawStoredCommoditiesIcons(self):
pass
def PanelUpgrade(self):
cont = Container(parent=self.actionCont, state=uiconst.UI_HIDDEN)
self.currLevel = self.planetUISvc.planet.GetCommandCenterLevel(session.charid)
self.newLevel = self.currLevel
self.currPowerOutput = self.pin.GetPowerOutput()
self.maxPowerOutput = float(planetCommon.GetPowerOutput(level=planetCommonUI.PLANET_COMMANDCENTERMAXLEVEL))
self.currCPUOutput = self.pin.GetCpuOutput()
self.maxCPUOutput = float(planetCommon.GetCPUOutput(level=planetCommonUI.PLANET_COMMANDCENTERMAXLEVEL))
colorDict = {uicls.ClickableBoxBar.COLOR_BELOWMINIMUM: planetCommonUI.PLANET_COLOR_CURRLEVEL,
uicls.ClickableBoxBar.COLOR_SELECTED: planetCommonUI.PLANET_COLOR_UPGRADELEVEL,
uicls.ClickableBoxBar.COLOR_UNSELECTED: util.Color.GetGrayRGBA(0.4, alpha=0.7),
uicls.ClickableBoxBar.COLOR_ABOVEMAXIMUM: (1.0, 0.0, 0.0, 0.25)}
boxBarCont = Container(parent=cont, align=uiconst.TOTOP, state=uiconst.UI_PICKCHILDREN, height=33)
upgradeSkill = sm.GetService('skills').GetSkill(const.typeCommandCenterUpgrade)
upgradeSkillLevel = 0
if upgradeSkill is not None:
upgradeSkillLevel = upgradeSkill.skillLevel
boxBar = uicls.ClickableBoxBar(align=uiconst.CENTERTOP, width=280, height=16, parent=boxBarCont, numBoxes=6, boxValues=range(0, 6), boxWidth=45, boxHeight=14, readonly=False, backgroundColor=(0.0, 0.0, 0.0, 0.0), colorDict=colorDict, minimumValue=self.currLevel + 1, hintformat=None, maximumValue=upgradeSkillLevel, aboveMaxHint=localization.GetByLabel('UI/PI/Common/UpgradeFailedInsufficientSkill', skillName=evetypes.GetName(const.typeCommandCenterUpgrade)))
boxBar.OnValueChanged = self.OnUpgradeBarValueChanged
boxBar.OnAttemptBoxClicked = self.OnUpgradeBarBoxClicked
self.upgradeText = EveLabelSmall(parent=boxBarCont, text=localization.GetByLabel('UI/PI/Common/NoUpgradeSelected'), align=uiconst.CENTERBOTTOM)
if self.currLevel == planetCommonUI.PLANET_COMMANDCENTERMAXLEVEL:
self.upgradeText.SetText(localization.GetByLabel('UI/PI/Common/MaximumUpgradeLevelReached'))
return cont
bottomCont = Container(name='bottomCont', align=uiconst.TOTOP, parent=cont, height=50, padTop=16)
leftBottomCont = Container(name='leftBottomCont', align=uiconst.TOLEFT_PROP, width=0.5, parent=bottomCont)
rightBottomCont = Container(name='rightBottomCont', align=uiconst.TOLEFT_PROP, width=0.5, parent=bottomCont)
powerValue = float(self.currPowerOutput) / self.maxPowerOutput
self.upgradePowerGauge = uicls.GaugeMultiValue(parent=leftBottomCont, value=0.0, colors=[planetCommonUI.PLANET_COLOR_POWER, planetCommonUI.PLANET_COLOR_POWERUPGRADE], values=[powerValue, 0.0], label=localization.GetByLabel('UI/PI/Common/PowerOutput'))
self.upgradePowerGauge.ShowMarker(value=powerValue, color=util.Color.GetGrayRGBA(0.0, 0.5))
self.costText = CaptionAndSubtext(parent=leftBottomCont, caption=localization.GetByLabel('UI/Common/Cost'), subtext=localization.GetByLabel('UI/PI/Common/NoCost'), top=42)
cpuValue = float(self.currCPUOutput) / self.maxCPUOutput
self.upgradeCPUGauge = uicls.GaugeMultiValue(parent=rightBottomCont, colors=[planetCommonUI.PLANET_COLOR_CPU, planetCommonUI.PLANET_COLOR_CPUUPGRADE], values=[cpuValue, 0.0], label=localization.GetByLabel('UI/PI/Common/CpuOutput'))
self.upgradeCPUGauge.ShowMarker(value=cpuValue, color=util.Color.GetGrayRGBA(0.0, 0.5))
btns = [(localization.GetByLabel('UI/PI/Common/Upgrade'), self._ApplyUpgrade, None)]
btnGroup = uicontrols.ButtonGroup(btns=btns, parent=cont, line=False, alwaysLite=True)
self.upgradeButton = btnGroup.GetBtnByLabel(localization.GetByLabel('UI/PI/Common/Upgrade'))
self.upgradeButton.Disable()
return cont
def OnUpgradeBarValueChanged(self, oldValue, newValue):
self.newLevel = newValue
txt = localization.GetByLabel('UI/PI/Common/UpgradeFromLevelXToY', currLevel=util.IntToRoman(self.currLevel + 1), newLevel=util.IntToRoman(self.newLevel + 1))
skill = sm.GetService('skills').GetSkill(const.typeCommandCenterUpgrade)
commandCenterSkillLevel = 0
if skill is not None:
commandCenterSkillLevel = skill.skillLevel
if commandCenterSkillLevel < newValue:
hint = localization.GetByLabel('UI/PI/Common/NeedSkillToUpgrade', skillLevel=util.IntToRoman(newValue), skillName=evetypes.GetName(const.typeCommandCenterUpgrade))
txt = localization.GetByLabel('UI/PI/Common/InsufficientSkillForUpgrade')
self.upgradeButton.Disable()
else:
hint = ''
self.upgradeButton.Enable()
self.upgradeText.SetText(txt)
self.upgradeText.hint = hint
newPowerOutput = planetCommon.GetPowerOutput(self.newLevel)
self.upgradePowerGauge.SetValue(gaugeNum=1, value=newPowerOutput / self.maxPowerOutput)
self.upgradePowerGauge.hint = self._GetPowerGaugeHint(newPowerOutput)
self._SetPowerGaugeSubText(newPowerOutput)
newCPUOutput = planetCommon.GetCPUOutput(self.newLevel)
self.upgradeCPUGauge.SetValue(gaugeNum=1, value=newCPUOutput / self.maxCPUOutput)
self.upgradeCPUGauge.hint = self._GetCPUGaugeHint(newCPUOutput)
self._SetCPUGaugeSubText(newCPUOutput)
iskCost = util.FmtISK(planetCommon.GetUpgradeCost(self.currLevel, self.newLevel), showFractionsAlways=0)
self.costText.SetSubtext(iskCost)
def _SetPowerGaugeSubText(self, newPowerOutput):
diff = newPowerOutput - self.currPowerOutput
subText = '+%s MW' % diff
self.upgradePowerGauge.SetSubText(subText)
def _GetPowerGaugeHint(self, newOutput):
return localization.GetByLabel('UI/PI/Common/UpgradeHintPower', current=self.currPowerOutput, after=newOutput)
def _GetCPUGaugeHint(self, newOutput):
return localization.GetByLabel('UI/PI/Common/UpgradeHintCPU', current=self.currCPUOutput, after=newOutput)
def _SetCPUGaugeSubText(self, newCPUOutput):
diff = newCPUOutput - self.currCPUOutput
subText = localization.GetByLabel('UI/PI/Common/CPUAdded', teraFlops=diff)
self.upgradeCPUGauge.SetSubText(subText)
def OnUpgradeBarBoxClicked(self, oldValue, newValue):
return True
def _ApplyUpgrade(self, *args):
self.planetUISvc.planet.UpgradeCommandCenter(self.pin.id, self.newLevel)
sm.GetService('audio').SendUIEvent('wise:/msg_pi_upgrade_play')
self.HideCurrentPanel()
| [
"le02005@163.com"
] | le02005@163.com |
e728f33e5d0da7256dab52088914b8554fcfb53b | 2fb0af0a30e3133ef4c5e649acd3f9911430062c | /src/otp/level/ZoneEntity.py | dd4b697f834eeffe4a6e4330b73a50fe508dda0f | [] | no_license | Teku16/Toontown-Crystal-Master | 4c01c0515f34a0e133441d2d1e9f9156ac267696 | 77a9345d52caa350ee0b1c7ad2b7461a3d6ed830 | refs/heads/master | 2020-05-20T06:02:58.106504 | 2015-07-25T07:23:59 | 2015-07-25T07:23:59 | 41,053,558 | 0 | 1 | null | 2015-08-19T18:51:11 | 2015-08-19T18:51:11 | null | UTF-8 | Python | false | false | 1,244 | py | import ZoneEntityBase
import BasicEntities
class ZoneEntity(ZoneEntityBase.ZoneEntityBase, BasicEntities.NodePathAttribs):
def __init__(self, level, entId):
ZoneEntityBase.ZoneEntityBase.__init__(self, level, entId)
self.nodePath = self.level.getZoneNode(self.entId)
if self.nodePath is None:
self.notify.error('zone %s not found in level model' % self.entId)
BasicEntities.NodePathAttribs.initNodePathAttribs(self, doReparent=0)
self.visibleZoneNums = {}
self.incrementRefCounts(self.visibility)
def destroy(self):
BasicEntities.NodePathAttribs.destroy(self)
ZoneEntityBase.ZoneEntityBase.destroy(self)
def getNodePath(self):
return self.nodePath
def getVisibleZoneNums(self):
return self.visibleZoneNums.keys()
def incrementRefCounts(self, zoneNumList):
for zoneNum in zoneNumList:
self.visibleZoneNums.setdefault(zoneNum, 0)
self.visibleZoneNums[zoneNum] += 1
def decrementRefCounts(self, zoneNumList):
for zoneNum in zoneNumList:
self.visibleZoneNums[zoneNum] -= 1
if self.visibleZoneNums[zoneNum] == 0:
del self.visibleZoneNums[zoneNum] | [
"vincentandrea15k@gmail.com"
] | vincentandrea15k@gmail.com |
cf1f4af51afa41b4dec936aee3e234c05d0c1381 | fbf8bbc67ee98632531bb79b0353b536427d7572 | /variables_and_scope/exe1_function.py | 6687c59c3573e3f5e48fd39f5c3f3b5cafce1c0a | [] | no_license | bartoszmaleta/3rd-Self-instructed-week | c0eea57a8b077d91fe09fe53c1109d3a79e3f37c | 4d00306e64ba2f7c2dd8213fd776ce8d3da142fc | refs/heads/master | 2020-08-20T22:31:17.675733 | 2019-10-28T12:24:22 | 2019-10-28T12:24:22 | 216,073,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | def my_funcion(a):
b = a - 2
# print(b)
return b
c = 1
if c > 2:
d = my_funcion(5)
print(d)
# my_funcion(5)
| [
"bartosz.maleta@gmail.com"
] | bartosz.maleta@gmail.com |
6c365f68c5d1ed3c3e601bb023af3f91a5b78b92 | 14b44aa2b73fb3df08c9c085219ebfd320d5d63a | /register_service/venv/bin/jp.py | be778c22379e6ca2900eb32e774e96139bfddb56 | [] | no_license | sande2jm/CI-CD-Service | c46f95f380872e9aca02d5c5e5a88578ba6e88b0 | 34535e69a3c39a78cd1d1ca785587d5e78a03580 | refs/heads/master | 2020-03-27T02:16:00.843764 | 2018-08-25T00:28:32 | 2018-08-25T00:28:32 | 145,778,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,714 | py | #!/Users/jacob/Desktop/_ML/CI-CD-Service/venv/bin/python3
import sys
import json
import argparse
from pprint import pformat
import jmespath
from jmespath import exceptions
def main():
parser = argparse.ArgumentParser()
parser.add_argument('expression')
parser.add_argument('-f', '--filename',
help=('The filename containing the input data. '
'If a filename is not given then data is '
'read from stdin.'))
parser.add_argument('--ast', action='store_true',
help=('Pretty print the AST, do not search the data.'))
args = parser.parse_args()
expression = args.expression
if args.ast:
# Only print the AST
expression = jmespath.compile(args.expression)
sys.stdout.write(pformat(expression.parsed))
sys.stdout.write('\n')
return 0
if args.filename:
with open(args.filename, 'r') as f:
data = json.load(f)
else:
data = sys.stdin.read()
data = json.loads(data)
try:
sys.stdout.write(json.dumps(
jmespath.search(expression, data), indent=4))
sys.stdout.write('\n')
except exceptions.ArityError as e:
sys.stderr.write("invalid-arity: %s\n" % e)
return 1
except exceptions.JMESPathTypeError as e:
sys.stderr.write("invalid-type: %s\n" % e)
return 1
except exceptions.UnknownFunctionError as e:
sys.stderr.write("unknown-function: %s\n" % e)
return 1
except exceptions.ParseError as e:
sys.stderr.write("syntax-error: %s\n" % e)
return 1
if __name__ == '__main__':
sys.exit(main())
| [
"sande2jm@gmail.com"
] | sande2jm@gmail.com |
08b8127958744b568e48462f5aed02e97b1ddad2 | 62bbef9472f343adea9804e29f403798434455df | /octaveimp/dictgen.py | 3a9e93641d4d9b0d46bbce71a0195c6000186de2 | [] | no_license | drdhaval2785/SamaasaClassification | d46658abce7ea7d7b6c89522ecc22a9d4391c011 | f71be804d2b6fb0ec370d0917adf1a58079df550 | refs/heads/master | 2020-12-24T08:55:08.230647 | 2016-08-07T09:17:39 | 2016-08-07T09:17:39 | 38,927,628 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,408 | py | # -*- coding: utf-8 -*-
import sys, re
import codecs
import string
import datetime
"""
Usage - python dictgen.py step2.csv step3.csv dict.txt class.txt
Creates a dictionary and index of unique words in step2.csv.
The replacements are stored in step3.csv.
dict.txt file has the dictionary.
class.txt file has unique classes.
"""
# Function to return timestamp
def timestamp():
return datetime.datetime.now()
def readcsv(csvfile):
output = []
for line in open(csvfile):
line = line.strip()
word,micro,macro = line.split(',')
output.append((word,micro,macro))
return output
def writedict(readcsvdata,dictfile):
output = []
diction = codecs.open(dictfile,'w','utf-8')
for (word,micro,macro) in readcsvdata:
output += word.split('-')
output = list(set(output))
diction.write('\n'.join(output))
diction.close()
def findindex(word,diction):
lendict = xrange(len(diction))
for i in lendict:
line = diction[i].strip()
if word == line:
return i
else:
return 0
def repdict(readcsvdata,step3file,dictfile,classfile):
step3 = codecs.open(step3file,'w','utf-8')
diction = codecs.open(dictfile,'r','utf-8').readlines()
log = codecs.open('log.txt','a','utf-8')
log.write('==========More than two parts in compound==========\n')
counter = 0
classtypes = []
for (word,micro,macro) in readcsvdata:
classtypes.append(macro)
classfout = codecs.open(classfile,'w','utf-8')
classtypes = list(set(classtypes))
classfout.write('\n'.join(classtypes))
classfout.close()
for (word,micro,macro) in readcsvdata:
wordsplit = word.split('-')
if len(wordsplit) == 2:
counter += 1
word1, word2 = word.split('-')
ind1 = findindex(word1,diction)
ind2 = findindex(word2,diction)
classrep = classtypes.index(macro)
step3.write(str(ind1)+','+str(ind2)+','+str(classrep)+'\n')
if counter % 100 == 0:
print counter
else:
log.write(word+','+micro+','+macro+'\n')
log.close()
step3.close()
if __name__=="__main__":
fin = sys.argv[1]
fout = sys.argv[2]
dictfile = sys.argv[3]
classfile = sys.argv[4]
readcsvdata = readcsv(fin)
print len(readcsvdata), "entries in step2.csv"
writedict(readcsvdata,dictfile)
repdict(readcsvdata,fout,dictfile,classfile)
step3data = codecs.open(fout,'r','utf-8').readlines()
print len(step3data), "entries in step3.csv"
classtypes = codecs.open(classfile,'r','utf-8').readlines()
print len(classtypes), "types of class in data" | [
"drdhaval2785@gmail.com"
] | drdhaval2785@gmail.com |
687ff0dd4534bcef0bd6a6f15d062ca2605815a3 | 83319338fd174cbbbc53e77ee4f5e8c959dd1efa | /Userregistration/settings.py | 258566225e98c1fe3b2909ebfa60f885aa33c384 | [] | no_license | syedarfa459/Django-User-Login | 9959a05ac03f14be79e90d929c6021a304665566 | 724aa4c3d98590731929be848f80e32005eb72c1 | refs/heads/master | 2023-01-02T10:18:54.366768 | 2020-10-29T05:43:22 | 2020-10-29T05:43:22 | 308,228,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,153 | py | """
Django settings for Userregistration project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e*s%@9_i3lr4*i8*eylfuh=@#quyao9po(!v5pcy+899*-hk)5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'userloginsystem.apps.UserloginsystemConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Userregistration.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Userregistration.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"arfashahji@gmail.com"
] | arfashahji@gmail.com |
0052186786f9135544da4bbd4cbbd0182d70d987 | 2c9db62ddaffd77c097b3da4990021270912ea40 | /프로그래머스/42626.py | 0fee2888d34a12c55c57058eb42adda113a2d477 | [] | no_license | nahyun119/algorithm | 9ae120fbe047819a74e06fc6879f55405bc9ea71 | 40e291305a4108266073d489e712787df1dbae4b | refs/heads/master | 2023-08-27T23:18:44.133748 | 2021-10-03T11:32:12 | 2021-10-03T11:32:12 | 326,661,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | import heapq
def solution(scoville, K):
answer = 0
q = []
for s in scoville:
heapq.heappush(q, s)
is_done = False
while q:
s = heapq.heappop(q)
if not q:
if s >= K:
is_done = True
break
if s < K: # 맨처음 원소가 작다면
s2 = heapq.heappop(q)
heapq.heappush(q, s + s2 * 2)
answer += 1
else:
is_done = True
break
if not is_done:
return -1
return answer | [
"nahyun858@gmail.com"
] | nahyun858@gmail.com |
fae041b6f545357627db8a4e4d1117a61ad3b850 | 507daab36fdc1be0008d5dbcdb4402e299f6da8a | /mysite/mysite/urls.py | 461a1e33e3c7ee75eabf8812862d0bc858b88bf1 | [] | no_license | atmosphere1365/mastered | 057cf80b9969bfba690aef4009e4f6b39703471d | b777728eb656b4fc209c1cb2592ed35fc0864b83 | refs/heads/master | 2020-09-16T09:35:50.941166 | 2019-11-24T11:24:43 | 2019-11-24T11:24:43 | 223,729,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('grappelli/', include('grappelli.urls')), # grappelli URLS
path('admin/', admin.site.urls),
path('', include('mainApp.urls')),
path('007', include('blog.urls')),
path('news/', include('news.urls')),
] | [
"you@example.com"
] | you@example.com |
6f1a8f80c6cc21deb42ee605ff08484974623be8 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_interviewed.py | 661432027101ffd3f901f39af597b5a42f7dd1a0 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py |
from xai.brain.wordbase.verbs._interview import _INTERVIEW
#calss header
class _INTERVIEWED(_INTERVIEW, ):
def __init__(self,):
_INTERVIEW.__init__(self)
self.name = "INTERVIEWED"
self.specie = 'verbs'
self.basic = "interview"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
3e0526a246b75f146753bdf4fc42abc23114aa4e | c87a510d79eb216a4602c8dac5165fb8f954b297 | /trunk/models/system.py | fd362ef1dee2a3e05bcc36c7807ef1d3f2513188 | [] | no_license | hanjiangfly/python-admin | cf77adfbb0a3ce870c3668a02b5314a7c8393748 | df69d300e4027b772b3fc839e5eb6f79e378ac53 | refs/heads/master | 2022-06-19T10:49:01.990119 | 2020-05-06T09:22:54 | 2020-05-06T09:22:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,525 | py | #!/usr/bin/env python
# -*- coding:UTF-8 -*-
'''
@Description: 系统相关的几张表结构
@Author: Zpp
@Date: 2019-09-05 15:57:55
@LastEditTime: 2020-05-06 16:28:05
@LastEditors: Zpp
'''
from models import db
import datetime
import json
InterfaceToMenu = db.Table(
'db_interface_to_menu',
db.Column('menu_id', db.String(36), db.ForeignKey('db_menu.menu_id', ondelete='CASCADE')),
db.Column('interface_id', db.String(36), db.ForeignKey('db_interface.interface_id', ondelete='CASCADE'))
)
class Admin(db.Model):
'''
管理员
'''
__tablename__ = 'db_admin'
id = db.Column(db.Integer, nullable=False, primary_key=True, index=True, autoincrement=True)
admin_id = db.Column(db.String(36), index=True, nullable=False, unique=True)
username = db.Column(db.String(64), index=True, nullable=False, unique=True)
password = db.Column(db.String(32), nullable=False)
nickname = db.Column(db.String(64))
email = db.Column(db.String(255))
sex = db.Column(db.SmallInteger, default=1)
avatarUrl = db.Column(db.String(255))
is_disabled = db.Column(db.Boolean, index=True, default=False)
create_time = db.Column(db.DateTime, index=True, default=datetime.datetime.now)
update_time = db.Column(db.DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
role_id = db.Column(db.String(36), db.ForeignKey('db_role.role_id', ondelete='CASCADE'))
__table_args__ = {
'useexisting': True,
'mysql_engine': 'InnoDB'
}
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.admin_id)
def to_json(self):
dict = self.__dict__
if "_sa_instance_state" in dict:
del dict["_sa_instance_state"]
if "update_time" in dict:
dict["update_time"] = dict["update_time"].strftime('%Y-%m-%d %H:%M:%S')
if "create_time" in dict:
dict["create_time"] = dict["create_time"].strftime('%Y-%m-%d %H:%M:%S')
return dict
def __repr__(self):
return '<admin %r>' % self.username
class LoginLock(db.Model):
'''
登录锁定
'''
__tablename__ = 'db_login_lock'
id = db.Column(db.Integer, nullable=False, primary_key=True, index=True, autoincrement=True)
lock_id = db.Column(db.String(36), index=True, nullable=False, unique=True)
user_id = db.Column(db.String(36), index=True, nullable=False)
flag = db.Column(db.Boolean, index=True, default=False) # 是否锁定
number = db.Column(db.Integer, primary_key=True, default=0)
ip = db.Column(db.String(36), index=True)
lock_time = db.Column(db.DateTime)
__table_args__ = {
'useexisting': True,
'mysql_engine': 'InnoDB'
}
def to_json(self):
dict = self.__dict__
if "_sa_instance_state" in dict:
del dict["_sa_instance_state"]
if "lock_time" in dict:
dict["lock_time"] = dict["lock_time"].strftime('%Y-%m-%d %H:%M:%S')
return dict
def __repr__(self):
return '<LoginLock %r>' % self.lock_id
class Role(db.Model):
'''
权限
'''
__tablename__ = 'db_role'
id = db.Column(db.Integer, nullable=False, primary_key=True, index=True, autoincrement=True)
role_id = db.Column(db.String(36), index=True, nullable=False, unique=True)
name = db.Column(db.String(64), nullable=False, unique=True)
mark = db.Column(db.String(64), nullable=False, unique=True)
is_disabled = db.Column(db.Boolean, index=True, default=False)
role_list = db.Column(db.Text)
admins = db.relationship('Admin', backref='role')
__table_args__ = {
'useexisting': True,
'mysql_engine': 'InnoDB'
}
def to_json(self):
dict = self.__dict__
if "_sa_instance_state" in dict:
del dict["_sa_instance_state"]
if "role_list" in dict:
del dict["role_list"]
return dict
def __repr__(self):
return '<Role %r>' % self.name
class Menu(db.Model):
'''
菜单
'''
__tablename__ = 'db_menu'
id = db.Column(db.Integer, nullable=False, primary_key=True, index=True, autoincrement=True)
menu_id = db.Column(db.String(36), index=True, nullable=False, unique=True)
pid = db.Column(db.String(36), nullable=False, index=True, default='0')
name = db.Column(db.String(64), index=True, nullable=False, unique=True)
title = db.Column(db.String(64), nullable=False, unique=True)
path = db.Column(db.String(255), nullable=False, unique=True)
icon = db.Column(db.String(255), nullable=False)
mark = db.Column(db.String(255), nullable=False, unique=True)
component = db.Column(db.String(255), nullable=False)
componentPath = db.Column(db.String(255), nullable=False)
cache = db.Column(db.Boolean, index=True, default=True)
sort = db.Column(db.SmallInteger, index=True, default=1)
is_disabled = db.Column(db.Boolean, index=True, default=False)
interfaces = db.relationship('Interface',
secondary=InterfaceToMenu,
backref=db.backref('db_interface', lazy='dynamic'),
lazy='dynamic')
__table_args__ = {
'useexisting': True,
'mysql_engine': 'InnoDB'
}
def to_json(self):
dict = self.__dict__
if "_sa_instance_state" in dict:
del dict["_sa_instance_state"]
return dict
def __repr__(self):
return '<Menu %r>' % self.title
class Interface(db.Model):
'''
接口
'''
__tablename__ = 'db_interface'
id = db.Column(db.Integer, nullable=False, primary_key=True, index=True, autoincrement=True)
interface_id = db.Column(db.String(36), index=True, nullable=False, unique=True)
name = db.Column(db.String(64), index=True, nullable=False, unique=True)
path = db.Column(db.String(255), nullable=False, unique=True)
method = db.Column(db.String(36), nullable=False)
description = db.Column(db.String(255), nullable=False)
mark = db.Column(db.String(255), nullable=False, unique=True)
is_disabled = db.Column(db.Boolean, index=True, default=False)
forbidden = db.Column(db.Boolean, index=True, default=True)
__table_args__ = {
'useexisting': True,
'mysql_engine': 'InnoDB'
}
def to_json(self):
dict = self.__dict__
if "_sa_instance_state" in dict:
del dict["_sa_instance_state"]
return dict
def __repr__(self):
return '<Interface %r>' % self.name
class Document(db.Model):
'''
附件
'''
__tablename__ = 'db_document'
id = db.Column(db.Integer, nullable=False, primary_key=True, index=True, autoincrement=True)
document_id = db.Column(db.String(36), index=True, nullable=False, unique=True)
admin_id = db.Column(db.String(36), index=True, nullable=False)
name = db.Column(db.String(64), index=True, nullable=False)
path = db.Column(db.String(255), nullable=False)
status = db.Column(db.SmallInteger, index=True, default=1) # 1=图片 2=附件 (其他的自己定义了)
ext = db.Column(db.String(64), nullable=False)
size = db.Column(db.Integer, nullable=False)
deleted = db.Column(db.Boolean, index=True, default=False) # True = 回收站
create_time = db.Column(db.DateTime, index=True, default=datetime.datetime.now)
folder_id = db.Column(db.String(36), db.ForeignKey('db_folder.folder_id', ondelete='CASCADE'))
__table_args__ = {
'useexisting': True,
'mysql_engine': 'InnoDB'
}
def to_json(self):
dict = self.__dict__
if "_sa_instance_state" in dict:
del dict["_sa_instance_state"]
if "create_time" in dict:
dict["create_time"] = dict["create_time"].strftime('%Y-%m-%d %H:%M:%S')
return dict
def __repr__(self):
return '<Document %r>' % self.name
class Folder(db.Model):
'''
文件夹
'''
__tablename__ = 'db_folder'
id = db.Column(db.Integer, nullable=False, primary_key=True, index=True, autoincrement=True)
folder_id = db.Column(db.String(36), index=True, nullable=False, unique=True)
admin_id = db.Column(db.String(36), index=True)
pid = db.Column(db.String(36), nullable=False, index=True, default='0')
name = db.Column(db.String(36), index=True, nullable=False)
is_sys = db.Column(db.Boolean, index=True, default=True) # True = 系统文件夹
create_time = db.Column(db.DateTime, index=True, default=datetime.datetime.now)
documents = db.relationship('Document', backref='folder')
__table_args__ = {
'useexisting': True,
'mysql_engine': 'InnoDB'
}
def to_json(self):
dict = self.__dict__
if "_sa_instance_state" in dict:
del dict["_sa_instance_state"]
if "create_time" in dict:
dict["create_time"] = dict["create_time"].strftime('%Y-%m-%d %H:%M:%S')
return dict
def __repr__(self):
return '<Folder %r>' % self.name
class InitSql(db.Model):
'''
是否已经初始化数据库
'''
__tablename__ = 'db_init_sql'
id = db.Column(db.Integer, nullable=False, primary_key=True, index=True, autoincrement=True)
isInit = db.Column(db.Boolean, index=True, default=True)
__table_args__ = {
'useexisting': True,
'mysql_engine': 'InnoDB'
}
| [
"375532103@qq.com"
] | 375532103@qq.com |
3c4f0d6329fae0254ec8bb1115b4712d8a53553f | a8314fb4e71a229f2288ca0588bbb3ebd58b7db0 | /leet/number_of_islands/test.py | 2920c806eccab6da138ebbf27ddc5c6f99a14a53 | [] | no_license | blhwong/algos_py | 6fc72f1c15fe04f760a199535a0df7769f6abbe6 | 9b54ad6512cf0464ecdd084d899454a99abd17b2 | refs/heads/master | 2023-08-30T17:45:51.862913 | 2023-07-24T18:56:38 | 2023-07-24T18:56:38 | 264,782,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | from leet.number_of_islands.main import Solution
s = Solution()
def test_1():
grid = [
['1','1','1','1','0'],
['1','1','0','1','0'],
['1','1','0','0','0'],
['0','0','0','0','0']
]
assert s.numIslands(grid) == 1
def test_2():
grid = [
['1', '1', '0', '0', '0'],
['1', '1', '0', '0', '0'],
['0', '0', '1', '0', '0'],
['0', '0', '0', '1', '1']
]
assert s.numIslands(grid) == 3
| [
"brandon@yerdle.com"
] | brandon@yerdle.com |
77e3a994f5aac16a79d46f57d851b6e0b920b3ba | eaa68c471c333336a7facad1ecb42f97aeca74f5 | /backend/msm_gtfrd051101_dev_14630/urls.py | 3c48dcc5318541339f45d7dca954cd710ed6dfa9 | [] | no_license | crowdbotics-apps/msm-gtfrd051101-dev-14630 | d1ba14f914db0ba5eb55a27f2828fa172c0c2a3a | 9e1ef45aec6d690b8279aac71242e664cd4055d9 | refs/heads/master | 2023-01-04T19:29:24.277234 | 2020-11-05T05:49:43 | 2020-11-05T05:49:43 | 310,202,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,965 | py | """msm_gtfrd051101_dev_14630 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "MSM-gtfrd051101"
admin.site.site_title = "MSM-gtfrd051101 Admin Portal"
admin.site.index_title = "MSM-gtfrd051101 Admin"
# swagger
api_info = openapi.Info(
title="MSM-gtfrd051101 API",
default_version="v1",
description="API documentation for MSM-gtfrd051101 App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
92fad4e71acbdd48c5803a52796be580d18dab58 | 407e2e0448c92cb258c4b8c57f7b023efcbbd878 | /MachineLearning/DeepLearningNN_KerasTensorFlow/env4keras2/lib/python3.9/site-packages/qtpy/QtWebEngineWidgets.py | 69f40d9e067a6ce159fdb5a7f338eccaec47aa68 | [] | no_license | KPAdhikari/PythonStuff | f017aa8aa1ad26673263e7dc31761c46039df8c4 | bea3a58792270650b5df4da7367686e2a9a76dbf | refs/heads/master | 2022-10-19T05:04:40.454436 | 2022-02-28T06:02:11 | 2022-02-28T06:02:11 | 98,236,478 | 1 | 1 | null | 2022-09-30T18:59:18 | 2017-07-24T21:28:31 | Python | UTF-8 | Python | false | false | 1,846 | py | #
# Copyright © 2014-2015 Colin Duquesnoy
# Copyright © 2009- The Spyder development Team
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
"""
Provides QtWebEngineWidgets classes and functions.
"""
from . import PYQT5, PYQT6, PYSIDE2, PYSIDE6, PythonQtError
# To test if we are using WebEngine or WebKit
WEBENGINE = True
if PYQT5:
try:
from PyQt5.QtWebEngineWidgets import QWebEnginePage
from PyQt5.QtWebEngineWidgets import QWebEngineView
from PyQt5.QtWebEngineWidgets import QWebEngineSettings
# Based on the work at https://github.com/spyder-ide/qtpy/pull/203
from PyQt5.QtWebEngineWidgets import QWebEngineProfile
except ImportError:
from PyQt5.QtWebKitWidgets import QWebPage as QWebEnginePage
from PyQt5.QtWebKitWidgets import QWebView as QWebEngineView
from PyQt5.QtWebKit import QWebSettings as QWebEngineSettings
WEBENGINE = False
elif PYQT6:
from PyQt6.QtWebEngineWidgets import *
from PyQt6.QtWebEngineCore import QWebEnginePage
from PyQt6.QtWebEngineCore import QWebEngineSettings
from PyQt6.QtWebEngineCore import QWebEngineProfile
elif PYSIDE6:
from PySide6.QtWebEngineWidgets import *
from PySide6.QtWebEngineCore import QWebEnginePage
from PySide6.QtWebEngineCore import QWebEngineSettings
from PySide6.QtWebEngineCore import QWebEngineProfile
elif PYSIDE2:
from PySide2.QtWebEngineWidgets import QWebEnginePage
from PySide2.QtWebEngineWidgets import QWebEngineView
from PySide2.QtWebEngineWidgets import QWebEngineSettings
# Based on the work at https://github.com/spyder-ide/qtpy/pull/203
from PySide2.QtWebEngineWidgets import QWebEngineProfile
else:
raise PythonQtError('No Qt bindings could be found')
| [
"kpadhikari@MyMacs-MacBook-Air.local"
] | kpadhikari@MyMacs-MacBook-Air.local |
0e0887a54e76dea68a4c769949c2d9f6ddbe309c | 8b3ca44ee3d990233e74655b7131d616094f70c2 | /experiments/runtime/drug_sensitivity_gdsc/gaussian_laplace_inversegaussian.py | f9bb4292ab74f6fba25423f0efab7ffe599636c1 | [] | no_license | zshwuhan/BMF_Priors | 8b8c54271285a72d2085a56a9475c0756f375e67 | 6a600da1c41f1ccde2f2ba99298b40e68fb9910a | refs/heads/master | 2021-05-13T19:10:07.203215 | 2017-12-01T13:30:21 | 2017-12-01T13:30:21 | 116,883,181 | 1 | 0 | null | 2018-01-09T23:36:13 | 2018-01-09T23:36:13 | null | UTF-8 | Python | false | false | 905 | py | '''
Measure runtime on the GDSC drug sensitivity dataset, with the Gaussian + Laplace + IG model.
'''
project_location = "/Users/thomasbrouwer/Documents/Projects/libraries/"
import sys
sys.path.append(project_location)
from BMF_Priors.code.models.bmf_gaussian_laplace_inversegaussian import BMF_Gaussian_Laplace_IG
from BMF_Priors.data.drug_sensitivity.load_data import load_gdsc_ic50_integer
from BMF_Priors.experiments.runtime.runtime_experiment import measure_runtime
''' Run the experiment. '''
R, M = load_gdsc_ic50_integer()
model_class = BMF_Gaussian_Laplace_IG
values_K = [5, 10, 20, 50]
settings = {
'R': R,
'M': M,
'hyperparameters': { 'alpha':1., 'beta':1. },
'init': 'random',
'iterations': 100,
}
fout = './results/times_gaussian_laplace_ig.txt'
times_per_iteration = measure_runtime(values_K, model_class, settings, fout)
print zip(values_K, times_per_iteration) | [
"tab43@cam.ac.uk"
] | tab43@cam.ac.uk |
b3d59022b1fba8710896fc6c879618c2dcb414f9 | ab8b066221447d9dc8b02f5fa83a561e164a82c0 | /variation/src/analyzePhenotype.py | 9821cef5ea9c041c4509993f2562e96f1ed1849a | [] | no_license | polyactis/gwasmodules | 52729435a91933cfd8409973e1afda5514615d93 | b9333b85daed71032a1cba766585d0be1986ffdb | refs/heads/master | 2021-01-17T12:16:19.813899 | 2015-03-14T00:30:40 | 2015-03-14T00:30:40 | 32,554,385 | 0 | 1 | null | 2020-07-24T15:51:39 | 2015-03-20T00:50:11 | Python | UTF-8 | Python | false | false | 62,764 | py | """
This class provides functionality (in python) to evaluate which transformation to choose.
"""
import phenotypeData, gwaResults, gc, plotResults
import math
#import matplotlib
#matplotlib.use('Agg')
#import matplotlib.pyplot as plt
import pylab as plt
def drawHistogram(phed, p_i, title = None , pdfFile = None, pngFile = None,withLabels=False):
plt.figure(figsize=(5,4))
plt.axes([0.14,0.13,0.81,0.83])
if withLabels:
label = loadHistogramLabels()[p_i]
plt.xlabel(label)
pi_old = p_i
p_i = phed.getPhenIndex(p_i)
phenValues = []
for values in phed.phenotypeValues:
if values[p_i] != 'NA':
if int(pi_old)==272:
phenValues.append(100*float(values[p_i]))
else:
phenValues.append(float(values[p_i]))
minVal = min(phenValues)
maxVal = max(phenValues)
x_range = maxVal - minVal
histRes= plt.hist(phenValues, bins = len(phenValues)/4)
y_max = max(histRes[0])
plt.axis([minVal-0.035*x_range,maxVal+0.035*x_range,-0.035*y_max,1.1*y_max])
num_phen_vals = len(phenValues)
#plt.text(maxVal-0.7*x_range,1.02*y_max,"Number of values: "+str(num_phen_vals),size="x-small")
print max(histRes[0])
plt.ylabel("Frequency")
#if title:
# plt.title(title)
if pdfFile:
plt.savefig(pdfFile, format = "pdf")
if pngFile:
plt.savefig(pngFile, format = "png",dpi=300)
elif not pdfFile:
plt.show()
plt.clf()
def _getQuantiles_(scores, numQuantiles):
scores.sort()
quantiles = []
for i in range(1, numQuantiles + 1):
j = int(len(scores) * i / (numQuantiles + 2))
quantiles.append(scores[j])
return quantiles
def __getExpectedPvalueQuantiles__(numQuantiles):
quantiles = []
for i in range(1, numQuantiles + 1):
quantiles.append(float(i) / (numQuantiles + 2))
return quantiles
def _calcMedian_(scores,exp_median=0.5):
scores.sort()
median = scores[len(scores)/2]
return (exp_median-median)
def drawQQPlot(results, numQuantiles, phenName = None, pdfFile = None, pngFile = None, perm_pvalues=None, isBinary=False, **kwargs):
#plt.figure(figsize=(10,8))
plt.figure(figsize=(4,3.5))
plt.axes([0.15,0.14,0.82,0.82])
plt.plot([0, 1], [0, 1],"k",label="Expected")
areas = []
medians = []
for j in range(0, len(results)):
result = results[j]
label = kwargs['resultTypes'][j]
print label
label = label.split("_")
#label = " ".join(label.split("_"))
label = label[0]
if label=="KW" and isBinary:
label = "Fisher"
newScores = result.scores[:]
quantiles = _getQuantiles_(newScores, numQuantiles)
if perm_pvalues and j==0: #j==0 is KW..
print "Getting exp. quantiles for permuted p-values"
expQuantiles = _getQuantiles_(perm_pvalues,numQuantiles)
q_i = numQuantiles/2
if numQuantiles%2==0: #even
exp_median = (expQuantiles[q_i-1]+expQuantiles[q_i])/2.0
else: #odd
exp_median = expQuantiles[q_i]
else:
exp_median = 0.5
expQuantiles = __getExpectedPvalueQuantiles__(numQuantiles)
area = _estAreaBetweenCurves_(quantiles,expQuantiles)
median = _calcMedian_(newScores,exp_median)
plt.plot(expQuantiles,quantiles, label = label+", A="+str(round(area,3))+", M="+str(round(median,3)))
areas.append(area)
medians.append(median)
# if phenName:
# plt.title(phenName)
fontProp = matplotlib.font_manager.FontProperties(size=8)
plt.legend(loc = 2, numpoints = 4, handlelen = 0.05, markerscale = 1,prop=fontProp,pad=0.018)
plt.axis([-0.01,1.01,-0.01,1.01])
plt.xlabel("Expected $p$-value")
plt.ylabel("Observed $p$-value")
if pdfFile:
plt.savefig(pdfFile, format = "pdf")
if pngFile:
plt.savefig(pngFile, format = "png",dpi=300)
elif not pdfFile:
plt.show()
plt.clf()
return (areas,medians)
def loadHistogramLabels():
import csv
l = {}
filename = "/Users/bjarnivilhjalmsson/Projects/Data/phenotypes/x_axis_label_old.csv"
f = open(filename)
r = csv.reader(f)
for line in r:
if line[0] == "Phenotype":
continue
id = int((line[0].split("_"))[0])
l[id] = line[1]
f.close()
return l
def _estAreaBetweenCurves_(quantiles,expQuantiles):
area = 0
for i in range(0,len(quantiles)-1):
area += (expQuantiles[i+1]-expQuantiles[i])*(abs(quantiles[i+1]-expQuantiles[i+1]+quantiles[i]-expQuantiles[i]))/2.0
#area = area*(expQuantiles[1]-expQuantiles[0])
return area
def _calcKS_(scores,exp_scores=None):
ret = {}
ret["D"] = -1
try:
from rpy import r
if exp_scores:
res = r.ks_test(scores,exp_scores)
else:
res = r.ks_test(scores,"punif")
ret = res["statistic"]
ret["p.value"] = res["p.value"]
except Exception, message:
print "Calculating KS failed??",message
return ret
def _getLogQuantilesMaxVal_(scores,maxScore=None):
scores.sort()
i = 0
new_score = -math.log(scores[i],10)
score = new_score
#print score, maxScore
while i < len(scores)-1 and new_score > maxScore:
score = new_score
i += 1
new_score = -math.log(scores[i],10)
maxVal = math.log((len(scores))/float(i+1),10)
#print maxVal,i, score, maxScore
return(maxVal)
def _getLogQuantiles_(scores, numDots, maxVal=None):
scores.sort()
quantiles = []
for i in range(0,numDots):
j = max(int(round(math.pow(10,-(float(i)/(numDots-1))*maxVal)*len(scores)))-1,0) #A bug fixed to make sure j was not less than 0
val = -math.log10(scores[j])
quantiles.append(val)
return quantiles
def __getExpectedLogQuantiles__(numDots,maxVal):
quantiles = []
for i in range(1, numDots + 1):
quantiles.append((float(i)/(numDots+2.0))*maxVal)
return quantiles
def _estLogSlope_(ys,xs=None):
if xs:
q1 = _getQuantiles_(xs,1000)
else:
q1 = __getExpectedPvalueQuantiles__(1000)
q2 = _getQuantiles_(ys,1000)
b_sum = 0.0
num_valid = 0
for (x,y) in zip(q1,q2):
if x<1.0:
b_sum += math.log(y,10)/math.log(x,10)
num_valid += 1
return(b_sum/num_valid)
def drawLogQQPlot(results, numDots, maxVal, phenName = None, pdfFile = None, pngFile = None, perm_pvalues=None, isBinary=False, **kwargs):
#FIXME: FINSIH numdots and maxVal!!!
#plt.figure(figsize=(10,8))
plt.figure(figsize=(4,3.5))
plt.axes([0.15,0.14,0.82,0.82])
maxVal = min(math.log10(len(results[0].scores)), maxVal)
minVal = (1.0/numDots)*maxVal
valRange = maxVal-minVal
plt.plot([minVal, maxVal], [minVal, maxVal], "k",label="Expected")
maxObsVals = []
areas = []
ds = []
slopes = []
for j in range(0, len(results)):
result = results[j]
label = kwargs['resultTypes'][j]
#label = " ".join(label.split("_"))
label = label.split("_")
label = label[0]
if label=="KW" and isBinary:
label = "Fisher"
if perm_pvalues and j==0:#j==0 is KW..
exp_maxVal = _getLogQuantilesMaxVal_(perm_pvalues[:],maxVal)
expQuantiles = _getLogQuantiles_(perm_pvalues[:],numDots,exp_maxVal)
ks_res = _calcKS_(result.scores,perm_pvalues)
quantiles = _getLogQuantiles_(result.scores[:], numDots, exp_maxVal)
slope = _estLogSlope_(result.scores[:],perm_pvalues)
else:
quantiles = _getLogQuantiles_(result.scores[:], numDots, maxVal)
expQuantiles = __getExpectedLogQuantiles__(numDots,maxVal)
ks_res = _calcKS_(result.scores)
slope = _estLogSlope_(result.scores[:])
area = _estAreaBetweenCurves_(quantiles,expQuantiles)
areas.append(area)
slopes.append(slope)
ds.append(ks_res["D"])
#plt.plot(expQuantiles, quantiles, label = label+", A="+str(round(area,2))+", D="+str(round(ks_res["D"],3))+", S="+str(round(slope,3)))
plt.plot(expQuantiles, quantiles, label = label+", D="+str(round(ks_res["D"],3))+", S="+str(round(slope,3)))
maxObsVals.append(max(quantiles))
maxObsVal = max(maxObsVals)
obsValRange = maxObsVal-minVal
plt.axis([minVal-0.025*valRange,maxVal+0.025*valRange,minVal-0.025*obsValRange,maxObsVal+0.025*obsValRange])
plt.ylabel("Observed $-log_{10}(p$-value$)$")
plt.xlabel("Expected $-log_{10}(p$-value$)$")
# if phenName:
# plt.title(phenName)
fontProp = matplotlib.font_manager.FontProperties(size=8)
plt.legend(loc = 2, numpoints = 4, handlelen = 0.05, markerscale = 1,prop=fontProp,pad=0.018)
if pdfFile:
plt.savefig(pdfFile, format = "pdf")
if pngFile:
plt.savefig(pngFile, format = "png",dpi=300)
elif not pdfFile:
plt.show()
plt.clf()
return (ds,areas,slopes)
def drawPermLogQQPlot(results, permutedResultsList, numDots=1000, maxVal=5, phenName = None, pdfFile = None, pngFile=None, **kwargs):
maxVal = min(math.log10(len(results[0].scores)), maxVal)
minVal = (1.0/numDots)*maxVal
valRange = maxVal-minVal
plt.plot([minVal, maxVal], [minVal, maxVal], "k",label="Expected")
maxObsVals = []
for (permutedResults,label,color) in permutedResultsList:
for result in permutedResults:
quantiles = _getLogQuantiles_(result.scores[:], numDots, maxVal)
expQuantiles = __getExpectedLogQuantiles__(numDots,maxVal)
plt.plot(expQuantiles, quantiles,color=color)
maxObsVals.append(max(quantiles))
plt.plot(expQuantiles, quantiles,label=label,color=color)
for result in results:
ks_res = _calcKS_(result.scores)
quantiles = _getLogQuantiles_(result.scores[:], numDots, maxVal)
expQuantiles = __getExpectedLogQuantiles__(numDots,maxVal)
area = _estAreaBetweenCurves_(quantiles,expQuantiles)
plt.plot(expQuantiles, quantiles, label = result.name+", A="+str(round(area,2))+", D="+str(round(ks_res["D"],3)))
maxObsVals.append(max(quantiles))
maxObsVal = max(maxObsVals)
obsValRange = maxObsVal-minVal
plt.axis([minVal-0.025*valRange,maxVal+0.025*valRange,minVal-0.025*obsValRange,maxObsVal+0.025*obsValRange])
if phenName:
plt.title(phenName)
fontProp = matplotlib.font_manager.FontProperties(size=10)
plt.legend(loc = 2, numpoints = 2, handlelen = 0.01, markerscale = 0.5,prop=fontProp,pad=0.1)
if pdfFile:
plt.savefig(pdfFile, format = "pdf")
if pngFile:
plt.savefig(pngFile, format = "png")
elif not pdfFile:
plt.show()
plt.clf()
def _drawPowerQQPlots_(phenotypeIndices=None,res_path="/Network/Data/250k/tmp-bvilhjal/power_analysis/results/",runId="gwPlot"):
"""
Draws all the GWA plots for 6 methods.
"""
import plotResults
phenotypeFile = "/Network/Data/250k/dataFreeze_080608/phenotypes_all_raw_120308.tsv"
phed = phenotypeData.readPhenotypeFile(phenotypeFile, delimiter='\t')
if not phenotypeIndices:
phenotypeIndices = phed.phenIds
#mainRTs = ["","_original192", "_original192_inverse","_original96","_original96_inverse"] #FIXME: add full result
#mainLabels = ["Full data", "192 acc. overlap", "192 acc. complement", "96 acc. overlap", "96 acc. complement"]
mainRTs = ["","_original192","_original96", "_latitude60","_latitude55", "_original192_latitude60", "_original192_latitude55"] #FIXME: add full result
mainLabels = ["Full data", "192 acc. overlap", "96 acc. overlap", "latitude < 60", "Latitude < 55", "192 acc. overl. and lat. < 60", "192 acc. overl. and lat. < 55"]
permRTs = []#["permTest","permTest"]
colors = []#[[0.6,0.8,0.6],[0.6,0.6,0.8]]
perm_counts = []#[10,10]
perm_sample_sizes = []#[65 ,112] #[170,96] #
permLabels = []#["random 65","random 112"]
for p_i in phenotypeIndices:
mainResults = []
phenName = phed.getPhenotypeName(p_i)
pdfFile = res_path+phenName+"_log_QQplot.pdf"
pngFile = res_path+phenName+"_log_QQplot.png"
for i in range(0,len(mainRTs)):
mainRT = mainRTs[i]
name = mainLabels[i]
filename = res_path+"KW_raw"+mainRT+"_"+phenName+".pvals"
rt = gwaResults.ResultType(resultType="KW",name=name)
print "Loading",filename
result = gwaResults.Result(filename, name=name, resultType=rt)
mainResults.append(result)
permResultsList = []
for i in range(0,len(permRTs)):
permResults = []
permRT = permRTs[i]
for j in range(0,perm_counts[i]):
filename = res_path+"KW_raw_"+permRT+"_"+phenName+"_r"+str(perm_sample_sizes[i])+"_"+str(j)+".pvals"
rt = gwaResults.ResultType(resultType="KW",name=permRT)
print "Loading",filename
result = gwaResults.Result(filename, name=permRT, resultType=rt)
permResults.append(result)
permResultsList.append((permResults,permLabels[i],colors[i]))
drawPermLogQQPlot(mainResults, permResultsList, phenName = phenName,pdfFile=pdfFile,pngFile=pngFile)
gc.collect() #Calling garbage collector, in an attempt to clean up memory..
#plotResults.plotResult(result,pdfFile,pngFile,ylab="- log10 pvalue",plotBonferroni=True)
def _test_():
phenotypeFile = "/Users/bjarnivilhjalmsson/Projects/Data/phenotypes/phenotypes_all_raw_081009.tsv"
print "Loading phenotype data"
phed = phenotypeData.readPhenotypeFile(phenotypeFile, delimiter = '\t')
ecotypes = map(str,phenotypeData._getFirst192Ecotypes_())
phed.filterAccessions(ecotypes)
tmp_dir = "/Users/bjarnivilhjalmsson/tmp/"
filename = tmp_dir+"hist_old"
for p_i in range(77,80):
phenName = phed.getPhenotypeName(p_i)
drawHistogram(phed, p_i, title = phenName,pdfFile = filename+"_"+str(p_i)+".pdf", pngFile = filename+"_"+str(p_i)+".png",withLabels=True)
# phed.logTransform(p_i)
# drawHistogram(phed, p_i, title = phenName)
def _loadEmmaData_(phed, phenotypeIndices):
res_path = "/Network/Data/250k/tmp-bvilhjal/"
resultsDirs = [res_path + "emma_results/", res_path + "emma_results/", res_path + "emma_results/"]
methods = ["Emma", "Emma","Emma",]
datasetNames = ["raw", "logTransform", "ranks",]
#mrIndex = 1 #The guiding (main) result
results_map = {}
resultTypes_map = {}
for i in phenotypeIndices:
try:
phenName = phed.getPhenotypeName(i)
phenName = phenName.replace("/", "_div_")
phenName = phenName.replace("*", "_star_")
phenIndex = phed.getPhenIndex(i)
results = []
resultTypes = []
for j in range(0, len(methods)):
#if not (methods[j]=="Emma" and phed.isBinary(i)):
resultFile = resultsDirs[j] + methods[j] + "_" + datasetNames[j] + "_" + phenName + ".pvals"
try:
print "Loading result file", resultFile
result = gwaResults.EmmaResult(resultFile, name = methods[j] + "_" + datasetNames[j] + "_" + phenName)
results.append(result)
resultTypes.append(methods[j] + "_" + datasetNames[j])
except Exception:
print "Couldn't load", resultFile
results_map[i] = results
resultTypes_map[i] = resultTypes
except Exception:
print "Couldn't load the result file"
gc.collect() #Calling garbage collector, in an attempt to clean up memory..
return (results_map, resultTypes_map)
def _loadData_(phed, phenotypeIndices,res_path="/Network/Data/250k/tmp-bvilhjal/"):
resultsDirs = [res_path + "kw_results/", res_path + "emma_results/", res_path + "emma_results/",]
methods = ["KW", "Emma", "Emma",]
datasetNames = ["raw", "raw", "logTransform",]
mafCutoffs = [0, 0.1, 0.1,]
#mrIndex = 1 #The guiding (main) result
results_map = {}
resultTypes_map = {}
for i in phenotypeIndices:
try:
phenName = phed.getPhenotypeName(i)
phenName = phenName.replace("/", "_div_")
phenName = phenName.replace("*", "_star_")
phenIndex = phed.getPhenIndex(i)
results = []
resultTypes = []
for j in range(0, len(methods)):
#if not (methods[j]=="Emma" and phed.isBinary(i)):
resultFile = resultsDirs[j] + methods[j] + "_" + datasetNames[j] + "_" + phenName + ".pvals"
try:
print "Loading result file", resultFile
result = gwaResults.Result(resultFile, name = methods[j] + "_" + datasetNames[j] + "_" + phenName)
result.filterMARF(minMaf = mafCutoffs[j])
results.append(result)
resultTypes.append(methods[j] + "_" + datasetNames[j])
except Exception:
print "Couldn't load", resultFile
results_map[i] = results
resultTypes_map[i] = resultTypes
except Exception:
print "Couldn't load the result file"
gc.collect() #Calling garbage collector, in an attempt to clean up memory..
return (results_map, resultTypes_map)
def _getPermPvalues_(phenName,perm_pval_dir="/Users/bjarnivilhjalmsson/Projects/Data/gwas_results/perm_tests/"):
perm_pvals = []
filename = perm_pval_dir+"KW_perm_n200_f1_"+phenName+".perm.pvals" #FIXME finish
print "Getting permuted p-values:",filename
f = open(filename,"r")
lines = f.readlines()
for line in lines:
pval_str_lst = line.split(",")
pvals = map(float,pval_str_lst)
for pval in pvals:
perm_pvals.append(pval)
return perm_pvals
def _testQQplot_(includeEmmaInBinary=False,usePvalueFiles=True):
resdir = "/Users/bjarni/tmp/"
#resdir = "/Network/Data/250k/tmp-bvilhjal/phenotype_analyzis/"
#resdir = "/Network/Data/250k/tmp-bvilhjal/qq_plots/ver_4/"
phenotypeFile = "/Network/Data/250k/dataFreeze_011209/phenotypes_all_raw_042109.tsv"
print "Loading phenotype data"
phed = phenotypeData.readPhenotypeFile(phenotypeFile, delimiter = '\t')
first_192_acc = map(str,phenotypeData._getFirst192Ecotypes_())
print first_192_acc
phed.filterAccessions(first_192_acc)
#phed2 = phenotypeData.readPhenotypeFile(phenotypeFile, delimiter = '\t')
phenotypeIndices = [264]#phenotypeData.categories_2_phenotypes[1]#+phenotypeData.categories_2_phenotypes[1]+phenotypeData.categories_2_phenotypes[4]+phenotypeData.categories_2_phenotypes[2] #[272,273,274,277,278,279,280,281,282,283]
#(results_map, resultTypes_map) = _loadData_(phed, phenotypeIndices)
q_pvalues = None
stat_dict = {}
for p_i in phenotypeIndices:
(results_map, resultTypes_map) = _loadData_(phed, [p_i])
#try:
phenName = phed.getPhenotypeName(p_i)
print phed.countValues(p_i),"phenotype values found."
phenNamePrint = " ".join(phenName.split("_")[1:])
print "\nWorking on phenotype",phenName
if usePvalueFiles:
q_pvalues = _getPermPvalues_(phenName)
print len(q_pvalues),"permuted pvalues found"
valCount = phed.countValues(p_i)
print valCount,"values found."
if (not phed.isBinary(p_i)) or includeEmmaInBinary:
histogramFile = resdir + phenName +"_hist.pdf"
histogramFile_png = resdir + phenName +"_hist.png"
drawHistogram(phed, p_i, title = phenNamePrint, pdfFile=histogramFile , pngFile = histogramFile_png)
if phed.logTransform(p_i):
histogramFile = resdir + phenName + "_hist_logTransformed.pdf"
histogramFile_png = resdir + phenName + "_hist_logTransformed.png"
drawHistogram(phed, p_i, title = phenNamePrint, pdfFile=histogramFile, pngFile = histogramFile_png)
elif not phed.isBinary(p_i):
print "adding scaled const."
phed.addSDscaledConstant(p_i)
if phed.logTransform(p_i):
histogramFile = resdir + phenName + "_hist_logTransformed_const.pdf"
histogramFile_png = resdir + phenName + "_hist_logTransformed_const.png"
drawHistogram(phed, p_i, title = phenNamePrint, pdfFile=histogramFile, pngFile = histogramFile_png)
# phed2.naOutliers(p_i,10)
# histogramFile = resdir + phenName + "_hist_noOutliers.pdf"
# histogramFile_png = resdir + phenName + "_hist_noOutliers.png"
# drawHistogram(phed2, p_i, title = phenName, pdfFile = histogramFile, pngFile = histogramFile_png)
# if phed2.logTransform(p_i):
# histogramFile = resdir + phenName + "_hist_logTransformed_noOutliers.pdf"
# histogramFile_png = resdir + phenName + "_hist_logTransformed_noOutliers.png"
# drawHistogram(phed2, p_i, title = phenName, pdfFile = histogramFile, pngFile = histogramFile_png)
results = results_map[p_i]
resultTypes = resultTypes_map[p_i]
qqplotFile = resdir + phenName + "_qqplot.pdf"
qqplotFile_png = resdir + phenName + "_qqplot.png"
s_dict={}
(As,Ms)=drawQQPlot(results, 1000, phenName = phenNamePrint, isBinary=phed.isBinary(p_i), resultTypes = resultTypes, pdfFile=qqplotFile, pngFile=qqplotFile_png, perm_pvalues = q_pvalues)
s_dict["A"]=As
s_dict["M"]=Ms
qqplotFile = resdir + phenName + "_qqplot_log.pdf"
qqplotFile_png = resdir + phenName + "_qqplot_log.png"
(ds,areas,slopes) = drawLogQQPlot(results, 1000,5, phenName = phenNamePrint, isBinary=phed.isBinary(p_i), resultTypes = resultTypes, pdfFile=qqplotFile, pngFile=qqplotFile_png, perm_pvalues = q_pvalues)
s_dict["A2"]=areas
s_dict["D"]=ds
s_dict["S"]=slopes
stat_dict[p_i] = s_dict
for i in range(0,len(results)):
result = results[i]
if i==1:
print "Emma"
#result.filterMARF(0.1)
result.negLogTransform()
pngFile = resdir + phenName + "_gwplot_" +resultTypes[i]+".png"
pdfFile = resdir + phenName + "_gwplot_" +resultTypes[i]+".pdf"
plotResults.plotResult(result,pdfFile=pdfFile,pngFile=pngFile,percentile=90,type="pvals", plotBonferroni=True,minScore=1.2)
#except Exception:
# print "\nPhenotype index", p_i, "failed."
del results_map
gc.collect() #Calling garbage collector, in an attempt to clean up memory..
print stat_dict
stat_file_name = resdir + "confounding_stat_4.txt"
f = open(stat_file_name,"w")
methods = ["KW","Emma"]
f.write("phenotype_name, method_name, is_binary, D, A, B, M, S\n")
for p_i in phenotypeIndices:
if stat_dict.has_key(p_i):
s_dict = stat_dict[p_i]
phenName = phed.getPhenotypeName(p_i)
phenName = " ".join(phenName.split("_")[1:])
for i in range(0,len(methods)):
st = phenName+", "+methods[i]+", "+str(phed.isBinary(p_i))+", "+str(s_dict["D"][i])+", "+str(s_dict["A"][i])+", "+str(s_dict["A2"][i])+", "+str(s_dict["M"][i])+", "+str(s_dict["S"][i])+"\n"
f.write(st)
f.close()
def plot_all_phenotype_analysis(includeEmmaInBinary=False,usePvalueFiles=True):
resdir = "/Users/bjarnivilhjalmsson/Projects/Data/gwas_results/analysis/" #"/Users/bjarnivilhjalmsson/tmp/"
phenotypeFile = "/Users/bjarnivilhjalmsson/Projects/Data/phenotypes/phenotypes_all_raw_081009.tsv"
print "Loading phenotype data"
phed = phenotypeData.readPhenotypeFile(phenotypeFile, delimiter = '\t')
phenotypeIndices = [5,161,167,170,173,176]#phed.phenIds
q_pvalues = None
stat_dict = {}
for p_i in phenotypeIndices:
(results_map, resultTypes_map) = _loadData_(phed, [p_i],res_path="/Users/bjarnivilhjalmsson/Projects/Data/gwas_results/")
try:
phenName = phed.getPhenotypeName(p_i)
print phed.countValues(p_i),"phenotype values found."
phenNamePrint = " ".join(phenName.split("_")[1:])
print "\nWorking on phenotype",phenName
if usePvalueFiles:
q_pvalues = _getPermPvalues_(phenName)
print len(q_pvalues),"permuted pvalues found"
valCount = phed.countValues(p_i)
print valCount,"values found."
if (not phed.isBinary(p_i)) or includeEmmaInBinary:
histogramFile = None #resdir + phenName +"_hist.pdf"
histogramFile_png = resdir + phenName +"_hist.png"
drawHistogram(phed, p_i, title = phenNamePrint, pdfFile=histogramFile , pngFile = histogramFile_png)
if phed.logTransform(p_i):
histogramFile = None #resdir + phenName + "_hist_logTransformed.pdf"
histogramFile_png = resdir + phenName + "_hist_logTransformed.png"
drawHistogram(phed, p_i, title = phenNamePrint, pdfFile=histogramFile, pngFile = histogramFile_png)
elif not phed.isBinary(p_i):
print "adding scaled const."
phed.addSDscaledConstant(p_i)
if phed.logTransform(p_i):
histogramFile = None #resdir + phenName + "_hist_logTransformed_const.pdf"
histogramFile_png = resdir + phenName + "_hist_logTransformed_const.png"
drawHistogram(phed, p_i, title = phenNamePrint, pdfFile=histogramFile, pngFile = histogramFile_png)
results = results_map[p_i]
resultTypes = resultTypes_map[p_i]
qqplotFile = None #resdir + phenName + "_qqplot.pdf"
qqplotFile_png = resdir + phenName + "_qqplot.png"
s_dict={}
(As,Ms)=drawQQPlot(results, 1000, phenName = phenNamePrint, isBinary=phed.isBinary(p_i),
resultTypes = resultTypes, pdfFile=qqplotFile, pngFile=qqplotFile_png, perm_pvalues = q_pvalues)
s_dict["A"]=As
s_dict["M"]=Ms
qqplotFile = None #resdir + phenName + "_qqplot_log.pdf"
qqplotFile_png = resdir + phenName + "_qqplot_log.png"
(ds,areas,slopes) = drawLogQQPlot(results, 1000,5, phenName = phenNamePrint, isBinary=phed.isBinary(p_i),
resultTypes = resultTypes, pdfFile=qqplotFile, pngFile=qqplotFile_png, perm_pvalues = q_pvalues)
s_dict["A2"]=areas
s_dict["D"]=ds
s_dict["S"]=slopes
stat_dict[p_i] = s_dict
except Exception, err_str:
print "\nPhenotype index", p_i, "failed when drawing QQ-plots or histograms:",err_str
for i in range(0,len(results)):
try:
result = results[i]
if i>0:
print "Emma"
result.filterMARF(0.1)
result.negLogTransform()
pngFile = resdir + phenName + "_gwplot_" +resultTypes[i]+".png"
#pdfFile = resdir + phenName + "_gwplot_" +resultTypes[i]+".pdf"
plotResults.plotResult(result,pdfFile=None,pngFile=pngFile,percentile=90,type="pvals", maxScore=None,plotBonferroni=True,minScore=1.2)
except Exception, err_str:
print "\nPhenotype index", p_i,result.resultType.name, "failed when drawing association scores:",err_str
del results_map
gc.collect() #Calling garbage collector, in an attempt to clean up memory..
print stat_dict
stat_file_name = resdir + "confounding_stat_4.txt"
f = open(stat_file_name,"w")
methods = ["KW","Emma"]
f.write("phenotype_name, method_name, is_binary, D, A, B, M, S\n")
for p_i in phenotypeIndices:
if stat_dict.has_key(p_i):
s_dict = stat_dict[p_i]
phenName = phed.getPhenotypeName(p_i)
phenName = " ".join(phenName.split("_")[1:])
for i in range(0,len(methods)):
st = phenName+", "+methods[i]+", "+str(phed.isBinary(p_i))+", "+str(s_dict["D"][i])+", "+str(s_dict["A"][i])+", "+str(s_dict["A2"][i])+", "+str(s_dict["M"][i])+", "+str(s_dict["S"][i])+"\n"
f.write(st)
f.close()
def drawMAFPlots(includeEmmaInBinary=True,usePvalueFiles=True):
resdir = "/Users/bjarni/tmp/"
#resdir = "/Network/Data/250k/tmp-bvilhjal/phenotype_analyzis/"
#resdir = "/Network/Data/250k/tmp-bvilhjal/qq_plots/ver_1/"
phenotypeFile = "/Network/Data/250k/dataFreeze_011209/phenotypes_all_raw_042109.tsv"
print "Loading phenotype data"
phed = phenotypeData.readPhenotypeFile(phenotypeFile, delimiter = '\t')
first_192_acc = map(str,phenotypeData._getFirst192Ecotypes_())
print first_192_acc
phed.filterAccessions(first_192_acc)
phenotypeIndices = phenotypeData.categories_2_phenotypes[1]#+phenotypeData.categories_2_phenotypes[1]+phenotypeData.categories_2_phenotypes[4]+phenotypeData.categories_2_phenotypes[2] #[272,273,274,277,278,279,280,281,282,283]
maf_list = [[],[]]
scores_list = [[],[]]
for p_i in phenotypeIndices:
(results_map, resultTypes_map) = _loadData_(phed, [p_i])
results = results_map[p_i]
del results_map
for i in range(0,len(results)):
result = results[i]
result.negLogTransform()
result.filterPercentile(0.99)
scores_list[i]+=result.scores
maf_list[i]+=result.mafs
#try:
phenName = phed.getPhenotypeName(p_i)
print phed.countValues(p_i),"phenotype values found."
phenNamePrint = " ".join(phenName.split("_")[1:])
print "\nWorking on phenotype",phenName
valCount = phed.countValues(p_i)
print valCount,"values found."
gc.collect() #Calling garbage collector, in an attempt to clean up memory..
plt.figure(figsize=(7.5,4))
plt.axes([0.07,0.10,0.90,0.88])
plt.plot(maf_list[0],scores_list[0],".",alpha=0.8)
plt.plot([-2,100],[6.64,6.64],"k-.")
plt.axis([-2,100,1,25])
plotFile = resdir+"KW_MAF"
plt.ylabel("-log$_{10}$(p-value)")
plt.xlabel("MAF")
plt.savefig(plotFile+".png", format = "png",dpi=300)
plt.savefig(plotFile+".pdf", format = "pdf",dpi=300)
plt.clf()
plt.axes([0.07,0.10,0.90,0.88])
plt.plot(maf_list[1],scores_list[1],".",alpha=0.8)
plt.plot([-2,100],[6.64,6.64],"k-.")
plt.axis([-2,100,1,25])
plotFile = resdir+"Emma_MAF"
plt.ylabel("-log$_{10}$(p-value)")
plt.xlabel("MAF")
plt.savefig(plotFile+".png", format = "png",dpi=300)
plt.savefig(plotFile+".pdf", format = "pdf",dpi=300)
def drawEmmaPlots(result, pngFile = None, **kwargs):
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
res_list = zip(result.scores,result.marfs,result.mafs,result.var_expl,result.ml,result.vg,result.ve)
res_list.sort()
res = map(list,zip(*res_list))
#plot likelihood VS. pvalue
plt.figure(figsize=(14,5))
plt.axes([0.04,0.05,0.95,0.92])
plt.scatter(result.marfs,result.scores,c=result.ml,alpha=0.5)
y_max = max(result.scores)
y_min = min(result.scores)
y_range = y_max - y_min
padding = 0.03
plt.axis([-0.5*padding,0.5*(1+padding),y_min-y_range*padding,y_max+y_range*padding])
plt.colorbar()
if pngFile:
label = kwargs['resultType']
ml_pngFile = pngFile+"_pval_"+label+".png"
plt.savefig(ml_pngFile, format = "png")
plt.clf()
plt.axes([0.04,0.05,0.95,0.92])
plt.scatter(result.marfs,result.ml,c=result.scores,alpha=0.5)
y_max = max(result.ml)
y_min = min(result.ml)
y_range = y_max - y_min
plt.axis([-0.5*padding,0.5*(1+padding),y_min-y_range*padding,y_max+y_range*padding])
plt.colorbar()
if pngFile:
label = kwargs['resultType']
ml_pngFile = pngFile+"_ml_"+label+".png"
plt.savefig(ml_pngFile, format = "png")
plt.clf()
plt.axes([0.04,0.05,0.95,0.92])
plt.scatter(res[1],res[5],c=res[0],alpha=0.5)
y_max = max(res[5])
y_min = min(res[5])
y_range = y_max - y_min
plt.axis([-0.5*padding,0.5*(1+padding),y_min-y_range*padding,y_max+y_range*padding])
plt.colorbar()
if pngFile:
label = kwargs['resultType']
vg_pngFile = pngFile+"_vg_"+label+".png"
plt.savefig(vg_pngFile, format = "png")
plt.clf()
plt.axes([0.04,0.05,0.95,0.92])
plt.scatter(res[1],res[6],c=res[0],alpha=0.5)
y_max = max(res[6])
y_min = min(res[6])
y_range = y_max - y_min
plt.axis([-0.5*padding,0.5*(1+padding),y_min-y_range*padding,y_max+y_range*padding])
plt.colorbar()
if pngFile:
label = kwargs['resultType']
ve_pngFile = pngFile+"_ve_"+label+".png"
plt.savefig(ve_pngFile, format = "png")
plt.clf()
import numpy as np
plt.axes([0.04,0.05,0.95,0.92])
vg = np.array(res[5])
ve = np.array(res[6])
v = vg/(vg+ve)
plt.scatter(res[1],v,c=res[0],alpha=0.5)
y_max = max(v)
y_min = min(v)
y_range = y_max - y_min
plt.axis([-0.5*padding,0.5*(1+padding),y_min-y_range*padding,y_max+y_range*padding])
plt.colorbar()
if pngFile:
label = kwargs['resultType']
ve_pngFile = pngFile+"_v_"+label+".png"
plt.savefig(ve_pngFile, format = "png")
plt.clf()
plt.axes([0.04,0.05,0.95,0.92])
plt.scatter(res[1],res[3],c=res[0],alpha=0.5)
y_max = max(res[3])
y_min = min(res[3])
y_range = y_max - y_min
plt.axis([-0.5*padding,0.5*(1+padding),y_min-y_range*padding,y_max+y_range*padding])
plt.colorbar()
if pngFile:
label = kwargs['resultType']
ve_pngFile = pngFile+"_var_expl_"+label+".png"
plt.savefig(ve_pngFile, format = "png")
plt.clf()
def emmaAnalysisPlots(usePvalueFiles=False):
import scipy.stats as st
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
resdir = "/Users/bjarni/tmp/"
#resdir = "/Network/Data/250k/tmp-bvilhjal/phenotype_analyzis/"
#resdir = "/Network/Data/250k/tmp-bvilhjal/qq_plots/ver_1/"
phenotypeFile = "/Network/Data/250k/dataFreeze_011209/phenotypes_all_raw_042109.tsv"
print "Loading phenotype data"
phed = phenotypeData.readPhenotypeFile(phenotypeFile, delimiter = '\t')
phenotypeIndices = [277,282,283]#+phenotypeData.categories_2_phenotypes[1]+phenotypeData.categories_2_phenotypes[2]+phenotypeData.categories_2_phenotypes[3]+phenotypeData.categories_2_phenotypes[4]
#(results_map, resultTypes_map) = _loadData_(phed, phenotypeIndices)
q_pvalues = None
stat_dict = {}
for p_i in phenotypeIndices:
(results_map, resultTypes_map) = _loadEmmaData_(phed, [p_i])
#try:
phenName = phed.getPhenotypeName(p_i)
phenNamePrint = " ".join(phenName.split("_")[1:])
print "\nWorking on phenotype",phenName
if usePvalueFiles:
q_pvalues = _getPermPvalues_(phenName)
print len(q_pvalues),"permuted pvalues found"
valCount = phed.countValues(p_i)
print valCount,"values found."
if not phed.isBinary(p_i):
histogramFile = resdir + phenName +"_hist.pdf"
histogramFile_png = resdir + phenName +"_hist.png"
drawHistogram(phed, p_i, title = phenNamePrint, pdfFile=histogramFile , pngFile = histogramFile_png)
if phed.logTransform(p_i):
histogramFile = resdir + phenName + "_hist_logTransformed.pdf"
histogramFile_png = resdir + phenName + "_hist_logTransformed.png"
drawHistogram(phed, p_i, title = phenNamePrint, pdfFile=histogramFile, pngFile = histogramFile_png)
elif not phed.isBinary(p_i):
print "adding scaled const."
phed.addSDscaledConstant(p_i)
if phed.logTransform(p_i):
histogramFile = resdir + phenName + "_hist_logTransformed_const.pdf"
histogramFile_png = resdir + phenName + "_hist_logTransformed_const.png"
drawHistogram(phed, p_i, title = phenNamePrint, pdfFile=histogramFile, pngFile = histogramFile_png)
# phed2.naOutliers(p_i,10)
# histogramFile = resdir + phenName + "_hist_noOutliers.pdf"
# histogramFile_png = resdir + phenName + "_hist_noOutliers.png"
# drawHistogram(phed2, p_i, title = phenName, pdfFile = histogramFile, pngFile = histogramFile_png)
# if phed2.logTransform(p_i):
# histogramFile = resdir + phenName + "_hist_logTransformed_noOutliers.pdf"
# histogramFile_png = resdir + phenName + "_hist_logTransformed_noOutliers.png"
# drawHistogram(phed2, p_i, title = phenName, pdfFile = histogramFile, pngFile = histogramFile_png)
results = results_map[p_i]
resultTypes = resultTypes_map[p_i]
s_dict={}
#s_dict["ml_pval_cor"]=mpc
#(mpc) = drawEmmaPlots(results,pngFile=pngFile,resultTypes=resultTypes)
#s_dict["ml_pval_cor"]=mpc
qqplotFile = resdir + phenName + "_qqplot.pdf"
qqplotFile_png = resdir + phenName + "_qqplot.png"
s_dict={}
(As,Ms)=drawQQPlot(results, 1000, phenName = phenNamePrint, isBinary=phed.isBinary(p_i), resultTypes = resultTypes, pdfFile=qqplotFile, pngFile=qqplotFile_png, perm_pvalues = q_pvalues)
s_dict["A"]=As
s_dict["M"]=Ms
qqplotFile = resdir + phenName + "_qqplot_log.pdf"
qqplotFile_png = resdir + phenName + "_qqplot_log.png"
(ds,areas,slopes) = drawLogQQPlot(results, 1000,5, phenName = phenNamePrint, isBinary=phed.isBinary(p_i), resultTypes = resultTypes, pdfFile=qqplotFile, pngFile=qqplotFile_png, perm_pvalues = q_pvalues)
s_dict["A2"]=areas
s_dict["D"]=ds
s_dict["S"]=slopes
stat_dict[p_i] = s_dict
results[0].negLogTransform()
results[1].negLogTransform()
results[2].negLogTransform()
x = np.array(results[0].scores)
y = np.array(results[1].scores)
z = np.array(results[2].scores)
plt.figure(figsize=(7,6))
plt.plot(x,y,"k.",label="correlation is: "+str(round(st.pearsonr(x,y)[0],4)))
plt.legend(loc = 2, numpoints = 4, handlelen = 0.05, markerscale = 1,pad=0.018)
pngFile = resdir + phenName + "_corr_raw_logTransform.png"
if pngFile:
plt.savefig(pngFile, format = "png")
plt.clf()
plt.plot(x,z,"k.",label="correlation is: "+str(round(st.pearsonr(x,z)[0],4)))
plt.legend(loc = 2, numpoints = 4, handlelen = 0.05, markerscale = 1,pad=0.018)
pngFile = resdir + phenName + "_corr_raw_ranks.png"
if pngFile:
plt.savefig(pngFile, format = "png")
plt.clf()
for i in range(0,len(results)):
result = results[i]
print resultTypes[i]
pngFile = resdir + phenName + "_fit"
drawEmmaPlots(result,pngFile=pngFile,resultType=resultTypes[i])
pngFile = resdir + phenName + "_gwplot_" +resultTypes[i]+".png"
pdfFile = resdir + phenName + "_gwplot_" +resultTypes[i]+".pdf"
plotResults.plotResult(result,pdfFile=pdfFile,pngFile=pngFile,percentile=96,type="pvals", plotBonferroni=True,minScore=1)
#except Exception:
# print "\nPhenotype index", p_i, "failed."
del results_map
gc.collect() #Calling garbage collector, in an attempt to clean up memory..
print stat_dict
stat_file_name = resdir + "confounding_stat_4.txt"
f = open(stat_file_name,"w")
methods = ["KW","Emma"]
f.write("phenotype_name, method_name, is_binary, D, A, B, M, S\n")
for p_i in phenotypeIndices:
if stat_dict.has_key(p_i):
s_dict = stat_dict[p_i]
phenName = phed.getPhenotypeName(p_i)
phenName = " ".join(phenName.split("_")[1:])
for i in range(0,len(methods)):
st = phenName+", "+methods[i]+", "+str(phed.isBinary(p_i))+", "+str(s_dict["D"][i])+", "+str(s_dict["A"][i])+", "+str(s_dict["A2"][i])+", "+str(s_dict["M"][i])+", "+str(s_dict["S"][i])+"\n"
f.write(st)
f.close()
def _countVals_():
resdir = "/Network/Data/250k/tmp-bvilhjal/phenotype_analyzis/"
phenotypeFile = "/Network/Data/250k/dataFreeze_011209/phenotypes_all_raw_012509.tsv"
print "Loading phenotype data"
phed = phenotypeData.readPhenotypeFile(phenotypeFile, delimiter = '\t')
phenotypeIndices = phenotypeData.categories_2_phenotypes[1]+phenotypeData.categories_2_phenotypes[2]+phenotypeData.categories_2_phenotypes[3]+phenotypeData.categories_2_phenotypes[4]
print "total # of phenotypes:", phed.countPhenotypes()
print "# of phenotypes analyzed:", len(phenotypeIndices)
totalCounts = []
for p_i in phenotypeIndices:
valCount = phed.countValues(p_i)
totalCounts.append(valCount)
snpsDataFile="/Network/Data/250k/dataFreeze_011209/250K_f13_012509.csv"
import dataParsers,snpsdata
snpsds = dataParsers.parseCSVData(snpsDataFile, format=1, deliminator=",")#,debug=True)
snpsd = snpsdata.SNPsDataSet(snpsds,[1,2,3,4,5])
phed.removeAccessionsNotInSNPsData(snpsd)
overlappingCounts = []
for p_i in phenotypeIndices:
valCount = phed.countValues(p_i)
overlappingCounts.append(valCount)
#ecotypes_192 = phenotypeData._getFirst192Ecotypes_()
ecotypes_192 = _get192Ecotypes_()
ecotypes_192 = [str(e) for e in ecotypes_192]
print "len(ecotypes_192):",len(ecotypes_192)
print ecotypes_192
phed.filterAccessions(ecotypes_192)
filename = resdir+"phen_value_count_new_data_012509_v2.txt"
f = open(filename,"w")
f.write("Phenotype, total_count, overlapping_count, 192_overlap_count\n")
for i in range(0,len(phenotypeIndices)):
p_i = phenotypeIndices[i]
try:
phenName = phed.getPhenotypeName(p_i)
valCount = phed.countValues(p_i)
f.write(str(phenName)+", "+str(totalCounts[i])+", "+str(overlappingCounts[i])+", "+str(valCount)+"\n")
except Exception:
print "\nPhenotype index", p_i, "failed."
f.close()
def _get192Ecotypes_():
resdir = "/Network/Data/250k/tmp-bvilhjal/phenotype_analyzis/"
phenotypeFile = "/Network/Data/250k/dataFreeze_011209/phenotypes_all_raw_012509.tsv"
print "Loading phenotype data"
phed = phenotypeData.readPhenotypeFile(phenotypeFile, delimiter = '\t')
phenotypeIndices = phenotypeData.categories_2_phenotypes[1]+phenotypeData.categories_2_phenotypes[2]+phenotypeData.categories_2_phenotypes[3]+phenotypeData.categories_2_phenotypes[4]
total_accessions = set()
for p_i in phenotypeIndices:
if not p_i in [5,6,7]:
accessions = phed.getAccessionsWithValues(p_i)
total_accessions = total_accessions.union(accessions)
ecotypes_192 = phenotypeData._getFirst192Ecotypes_()
ecotypes_192 = [str(e) for e in ecotypes_192]
print "len(ecotypes_192):",len(ecotypes_192)
#print ecotypes_192
phed.filterAccessions(ecotypes_192)
for p_i in [5,6,7]:
accessions = phed.getAccessionsWithValues(p_i)
total_accessions = total_accessions.union(accessions)
total_accessions = list(total_accessions)
print len(total_accessions)
total_accessions.sort()
print total_accessions
ecotype_info_dict = phenotypeData._getEcotypeIdInfoDict_()
ets = []
i = 0
for et in total_accessions:
et = int(et)
if ecotype_info_dict.has_key(et):
print str(et)+", "+str(ecotype_info_dict[et][0])+", "+str(ecotype_info_dict[et][1])
i += 1
ets.append(et)
else:
print et,"is missing in genotype data."
print i
return ets
def _plotConfoundingStats_():
#import pylab as plt
resdir = "/Users/bjarnivilhjalmsson/Projects/Data/gwas_results/perm_tests/"
stat_file_dir = "/Users/bjarnivilhjalmsson/tmp/"
phenotypeFile = "/Users/bjarnivilhjalmsson/Projects/Data/phenotypes/phenotypes_all_raw_042109.tsv"
print "Loading phenotype data"
phed = phenotypeData.readPhenotypeFile(phenotypeFile, delimiter = '\t')
phenotypeIndices = phenotypeData.categories_2_phenotypes[1]+phenotypeData.categories_2_phenotypes[2]+phenotypeData.categories_2_phenotypes[3]+phenotypeData.categories_2_phenotypes[4]
m_pvals = {}
a_pvals = {}
ks_pvals = {}
s_pvals = {}
for p_i in phenotypeIndices:
#if not phed.isBinary(p_i):
phenName = phed.getPhenotypeName(p_i)
print "Loading permutation stats data for",phenName
filename = resdir+"KW_perm_n1000_f0.01_"+phenName+".perm.stat.txt"
f = open(filename,"r")
lines = f.readlines()
pvals = (lines[-1].strip()).split(',')
m_pvals[p_i] = -math.log10(float(pvals[0].split(" ")[-1]))
a_pvals[p_i] = -math.log10(float(pvals[1]))
ks_pvals[p_i] = -math.log10(float(pvals[2]))
s_pvals[p_i] = -math.log10(float(pvals[3]))
x_ticks = []
s_ticks = []
x_pos = 1
for cat in [1,2,3,4]:
for p_i in phenotypeData.categories_2_phenotypes[cat]:
p_name = phed.getPhenotypeName(p_i,niceName=True)
if p_name in ["avrPphB","avrRpm1","avrB","avrRpt2"]:
s_ticks.append("$\mathit{A"+p_name[1:]+"}$")
else:
s_ticks.append(p_name)
#plt.text(x_pos+shift,min_stat-0.1*stat_range,p_i,rotation="vertical",size="xx-small")
x_ticks.append(x_pos-0.5)
x_pos += 1
x_pos = x_pos+1
plt.figure(figsize=(11,8))
x_pos = 0
plt.subplot(411)
colors = {1:"b",2:"r",3:"g",4:"c"}
categories_names = {1:"Flowering time",2:"Developmental",3:"Defense",4:"Ionomics"}
categories_pos_corr = {1:-2,2:-3,3:-4,4:0}
for i in [1,2,3,4]:
phenotypeIndices = phenotypeData.categories_2_phenotypes[i]
newPhenotypeIndices = []
for p_i in phenotypeIndices:
#if not phed.isBinary(p_i):
newPhenotypeIndices.append(p_i)
phenotypeIndices = newPhenotypeIndices
m_list = []
for p_i in phenotypeIndices:
m_list.append(m_pvals[p_i])
plt.bar(range(x_pos,len(m_list)+x_pos),m_list,color = colors[i])
plt.text(x_pos+len(m_list)/2-4+categories_pos_corr[i],4.26,categories_names[i])
x_pos = x_pos+len(m_list)+1
plt.plot([0-0.02*(x_pos-1),1.02*(x_pos-1)],[-math.log10(0.05),-math.log10(0.05)],"k-.")
plt.axis([0-0.02*(x_pos-1),1.02*(x_pos-1),-0.08,4.08])
#plt.xticks(x_ticks,s_ticks,size="x-small",rotation="vertical")
plt.ylabel("M: -log$_{10}$(p)")
#plt.savefig(stat_file_dir+"confounding_M_pvalues.png", format = "png")
#plt.clf()
#plt.axes([.06,.16,.91,.81])
x_pos = 0
plt.subplot(412)
for i in [1,2,3,4]:
phenotypeIndices = phenotypeData.categories_2_phenotypes[i]
newPhenotypeIndices = []
for p_i in phenotypeIndices:
#if not phed.isBinary(p_i):
newPhenotypeIndices.append(p_i)
phenotypeIndices = newPhenotypeIndices
a_list = []
for p_i in phenotypeIndices:
a_list.append(a_pvals[p_i])
plt.bar(range(x_pos,len(a_list)+x_pos),a_list,color = colors[i])
x_pos = x_pos+len(a_list)+1
plt.plot([0-0.02*(x_pos-1),1.02*(x_pos-1)],[-math.log10(0.05),-math.log10(0.05)],"k-.")
plt.axis([0-0.02*(x_pos-1),1.02*(x_pos-1),-0.08,4.08])
#plt.xticks(x_ticks,s_ticks,size="x-small",rotation="vertical")
plt.ylabel("A: -log$_{10}$(p)")
#plt.savefig(stat_file_dir+"confounding_A_pvalues.png", format = "png")
#plt.clf()
#plt.axes([.06,.16,.91,.81])
x_pos = 0
plt.subplot(413)
for i in [1,2,3,4]:
phenotypeIndices = phenotypeData.categories_2_phenotypes[i]
newPhenotypeIndices = []
for p_i in phenotypeIndices:
#if not phed.isBinary(p_i):
newPhenotypeIndices.append(p_i)
phenotypeIndices = newPhenotypeIndices
a_list = []
for p_i in phenotypeIndices:
a_list.append(ks_pvals[p_i])
plt.bar(range(x_pos,len(a_list)+x_pos),a_list,color = colors[i])
x_pos = x_pos+len(a_list)+1
plt.plot([0-0.02*(x_pos-1),1.02*(x_pos-1)],[-math.log10(0.05),-math.log10(0.05)],"k-.")
plt.axis([0-0.02*(x_pos-1),1.02*(x_pos-1),-0.08,4.08])
#plt.xticks(x_ticks,s_ticks,size="x-small",rotation="vertical")
plt.ylabel("D: -log$_{10}$(p)")
#plt.savefig(stat_file_dir+"confounding_KS_pvalues.png", format = "png")
#plt.clf()
#plt.axes([.06,.16,.91,.81])
x_pos = 0
plt.subplot(414)
for i in [1,2,3,4]:
phenotypeIndices = phenotypeData.categories_2_phenotypes[i]
newPhenotypeIndices = []
for p_i in phenotypeIndices:
#if not phed.isBinary(p_i):
newPhenotypeIndices.append(p_i)
phenotypeIndices = newPhenotypeIndices
a_list = []
for p_i in phenotypeIndices:
a_list.append(s_pvals[p_i])
plt.bar(range(x_pos,len(a_list)+x_pos),a_list,color = colors[i])
x_pos = x_pos+len(a_list)+1
plt.plot([0-0.02*(x_pos-1),1.02*(x_pos-1)],[-math.log10(0.05),-math.log10(0.05)],"k-.")
plt.axis([0-0.02*(x_pos-1),1.02*(x_pos-1),-0.08,4.08])
plt.xticks(x_ticks,s_ticks,size="x-small",rotation="vertical")
plt.subplots_adjust(right=0.99)
plt.subplots_adjust(left=0.05)
plt.subplots_adjust(bottom=0.15)
plt.subplots_adjust(top=0.96)
plt.ylabel("S: -log$_{10}$(p)")
plt.savefig(stat_file_dir+"confounding_pvalues.pdf", format = "pdf")
plt.clf()
x_ticks = []
s_ticks = []
x_pos = 1
for cat in [1,2,3,4]:
for p_i in phenotypeData.categories_2_phenotypes[cat]:
s_ticks.append(phed.getPhenotypeName(p_i))
#plt.text(x_pos+shift,min_stat-0.1*stat_range,p_i,rotation="vertical",size="xx-small")
x_ticks.append(x_pos-0.5)
x_pos += 1
x_pos = x_pos+1
#print m_pvals, a_pvals, ks_pvals, s_pvals
import numpy as np
for i in [1,2,3,4]:
phenotypeIndices = phenotypeData.categories_2_phenotypes[i]
x_ticks = []
s_ticks = []
x_pos = 1
for p_i in phenotypeData.categories_2_phenotypes[cat]:
s_ticks.append(phed.getPhenotypeName(p_i))
#plt.text(x_pos+shift,min_stat-0.1*stat_range,p_i,rotation="vertical",size="xx-small")
x_ticks.append(x_pos)
x_pos += 1
val_list = []
for d in [m_pvals, a_pvals, ks_pvals, s_pvals]:
s_list = []
for p_i in phenotypeIndices:
s_list.append(d[p_i])
val_list.append(s_list)
a = np.array(val_list)
print a
#plt.matshow(a,vmin=0, vmax=1,extent=None)
#plt.xticks(x_ticks,s_ticks,size="x-small",rotation=90)
#plt.colorbar()
#plt.show()
def _plotConfoundingStats2_():
resdir = "/Network/Data/250k/tmp-bvilhjal/perm_tests/"
stat_file_dir = "/Users/bjarni/tmp/"
stats_dict = {1:{},2:{},3:{},4:{}}
for cat in [1,2,3,4]:
f = open(stat_file_dir+"confounding_stat_"+str(cat)+".txt","r")
lines = f.readlines()
p_dict = {}
m_dict = {}
for line in lines[1:]:
p_name = line[0].strip()
line = line.strip()
line = line.split(",")
#"phenotype_name, method_name, is_binary, D, A, B, M, S\n"
method = line[1].strip()
m_dict[method] = {}
m_dict[method]["is_binary"] = (line[2].strip()=="True")
m_dict[method]["D"]= float(line[3])
m_dict[method]["A"]= float(line[4])
m_dict[method]["B"]= float(line[5])
m_dict[method]["M"]= float(line[6])
m_dict[method]["S"]= float(line[7])-1.0
p_dict[line[0].strip()] = m_dict.copy()
stats_dict[cat] = p_dict
print stats_dict[1]
print stats_dict[2]
print stats_dict[3]
print stats_dict[4]
#import pylab as plt
figure = plt.figure(figsize=(14,8))
axes = plt.Axes(figure, [.06,.15,.91,.81])
figure.add_axes(axes)
x_pos = 0
colors = {1:"b",2:"r",3:"g",4:"y"}
max_stats = []
min_stats = []
for cat in [1,2,3,4]:
phenotypes_dict = stats_dict[cat]
phenotypes = phenotypes_dict.keys()
phenotypes.sort()
s_list = {0:[],1:[]}
for p_i in phenotypes:
if not phenotypes_dict[p_i]["KW"]["is_binary"]:
s_list[0].append(phenotypes_dict[p_i]["KW"]["B"])
s_list[1].append(phenotypes_dict[p_i]["Emma"]["B"])
max_stats.append(max(max(s_list[0]),max(s_list[1])))
min_stats.append(min(min(s_list[0]),min(s_list[1])))
for method in [0,1]:
plt.bar(range(method+x_pos,method+2*len(s_list[method])+x_pos,2),s_list[method],color = colors[1+2*((cat-1)%2)+(method)%2])
x_pos = x_pos+2*len(s_list[method])+1
max_stat = max(max_stats)
min_stat = min(0,min(min_stats))
print min_stat
stat_range = max_stat-min_stat
plt.axis([0-0.02*(x_pos-1),1.02*(x_pos-1),min_stat-stat_range*0.02,max_stat+stat_range*0.02])
x_ticks = []
s_ticks = []
x_pos = 1
for cat in [1,2,3,4]:
shift = 0
phenotypes_dict = stats_dict[cat]
phenotypes = phenotypes_dict.keys()
phenotypes.sort()
for p_i in phenotypes:
if not phenotypes_dict[p_i]["KW"]["is_binary"]:
s_ticks.append(p_i)
#plt.text(x_pos+shift,min_stat-0.1*stat_range,p_i,rotation="vertical",size="xx-small")
x_ticks.append(x_pos+shift)
shift += 2
x_pos = x_pos+shift+1
plt.xticks(x_ticks,s_ticks,size="x-small",rotation="vertical")
plt.ylabel("Area between expected and observed $log(p)$ curve.")
#plt.ylabel("Area between expected and observed p-value curve.")
#plt.ylabel("Kolmogorov-Smirnov $D$-statistic")
#plt.ylabel("(est. slope of $log(p)$)$- 1$")
#plt.ylabel("(median p-value)$- 0.5$")
#plt.show()
plt.savefig(stat_file_dir+"confounding_B.png", format = "png")
plt.clf()
def _plotConfoundingStats3_():
resdir = "/Users/bjarnivilhjalmsson/Projects/Data/gwas_results/perm_tests/"
stat_file_dir = "/Users/bjarnivilhjalmsson/tmp/"
phenotypeFile = "/Users/bjarnivilhjalmsson/Projects/Data/phenotypes/phenotypes_all_raw_042109.tsv"
phed = phenotypeData.readPhenotypeFile(phenotypeFile, delimiter='\t')
name_dict = phed.getPhenotypeNiceNamesDict()
stats_dict = {1:{},2:{},3:{},4:{}}
for cat in [1,2,3,4]:
f = open(stat_file_dir+"confounding_stat_"+str(cat)+".txt","r")
lines = f.readlines()
p_dict = {}
m_dict = {}
for line in lines[1:]:
p_name = line[0].strip()
line = line.strip()
line = line.split(",")
#"phenotype_name, method_name, is_binary, D, A, B, M, S\n"
method = line[1].strip()
m_dict[method] = {}
m_dict[method]["is_binary"] = (line[2].strip()=="True")
m_dict[method]["D"]= float(line[3])
m_dict[method]["A"]= float(line[4])
m_dict[method]["B"]= float(line[5])
m_dict[method]["M"]= float(line[6])
m_dict[method]["S"]= float(line[7])-1.0
p_dict[line[0].strip()] = m_dict.copy()
stats_dict[cat] = p_dict
print stats_dict[1]
print stats_dict[2]
print stats_dict[3]
print stats_dict[4]
#import pylab as plt
figure = plt.figure(figsize=(18,14))
#axes = plt.Axes(figure, [.06,.15,.91,.81])
#figure.add_axes(axes)
colors = {1:"b",2:"r",3:"g",4:"y"}
stat_ylabs = ["(median p-value)$- 0.5$","Area between expected and observed p-value curve.","Kolmogorov-Smirnov $D$-statistic","(Estimated slope of $log(p)$)$- 1$"]
stat_types = ["M","A","D","S"]
for i in range(0,4):
x_pos = 0
max_stats = []
min_stats = []
plt.subplot(411+i)
for cat in [1,2,3,4]:
phenotypes_dict = stats_dict[cat]
phenotypes = phenotypes_dict.keys()
phenotypes.sort()
s_list = {0:[],1:[]}
for p_i in phenotypes:
if phenotypes_dict[p_i]["KW"]["is_binary"]:
s_list[0].append(phenotypes_dict[p_i]["KW"][stat_types[i]])
s_list[1].append(phenotypes_dict[p_i]["Emma"][stat_types[i]])
if len(s_list[0]):
max_stats.append(max(max(s_list[0]),max(s_list[1])))
min_stats.append(min(min(s_list[0]),min(s_list[1])))
for method in [0,1]:
plt.bar(range(method+x_pos,method+2*len(s_list[method])+x_pos,2),s_list[method],color = colors[1+2*((cat-1)%2)+(method)%2])
x_pos = x_pos+2*len(s_list[method])+1
max_stat = max(max_stats)
min_stat = min(0,min(min_stats))
print min_stat
stat_range = max_stat-min_stat
plt.axis([0-0.02*(x_pos-1),1.02*(x_pos-1),min_stat-stat_range*0.02,max_stat+stat_range*0.02])
x_ticks = []
s_ticks = []
x_pos = 1
for cat in [1,2,3,4]:
shift = 0
phenotypes_dict = stats_dict[cat]
phenotypes = phenotypes_dict.keys()
phenotypes.sort()
for p_i in phenotypes:
if phenotypes_dict[p_i]["KW"]["is_binary"]:
if i==3:
p_name = name_dict[p_i]
if p_name in ["avrPphB","avrRpm1","avrB","avrRpt2"]:
s_ticks.append("$\mathit{A"+p_name[1:]+"}$")
else:
s_ticks.append(p_name)
else:
s_ticks.append("")
#plt.text(x_pos+shift,min_stat-0.1*stat_range,p_i,rotation="vertical",size="xx-small")
x_ticks.append(x_pos+shift)
shift += 2
if shift:
x_pos = x_pos+shift+1
plt.xticks(x_ticks,s_ticks,rotation="vertical")
plt.ylabel(stat_ylabs[i],size="small")
plt.subplots_adjust(right=0.99)
plt.subplots_adjust(left=0.04)
plt.subplots_adjust(bottom=0.12)
plt.subplots_adjust(top=0.99)
#plt.show()
#plt.savefig(stat_file_dir+"confounding_quantitative.pdf", format = "pdf")
plt.savefig(stat_file_dir+"confounding_binary.pdf", format = "pdf")
plt.clf()
def _plotConfoundingStats4_():
resdir = "/Network/Data/250k/tmp-bvilhjal/perm_tests/"
stat_file_dir = "/Users/bjarni/tmp/"
stats_dict = {1:{},2:{},3:{},4:{}}
for cat in [1,2,3,4]:
f = open(stat_file_dir+"confounding_stat_"+str(cat)+".txt","r")
lines = f.readlines()
p_dict = {}
m_dict = {}
for line in lines[1:]:
p_name = line[0].strip()
line = line.strip()
line = line.split(",")
#"phenotype_name, method_name, is_binary, D, A, B, M, S\n"
method = line[1].strip()
m_dict[method] = {}
m_dict[method]["is_binary"] = (line[2].strip()=="True")
m_dict[method]["D"]= float(line[3])
m_dict[method]["A"]= float(line[4])
m_dict[method]["B"]= float(line[5])
m_dict[method]["M"]= float(line[6])
m_dict[method]["S"]= float(line[7])-1.0
p_dict[line[0].strip()] = m_dict.copy()
stats_dict[cat] = p_dict
print stats_dict[1]
print stats_dict[2]
print stats_dict[3]
print stats_dict[4]
#import pylab as plt
figure = plt.figure(figsize=(14,8))
axes = plt.Axes(figure, [.06,.15,.91,.81])
figure.add_axes(axes)
x_pos = 0
colors = {1:"b",2:"r",3:"g",4:"y"}
max_stats = []
min_stats = []
for cat in [1,2,3,4]:
phenotypes_dict = stats_dict[cat]
phenotypes = phenotypes_dict.keys()
phenotypes.sort()
s_list = {0:[],1:[]}
for p_i in phenotypes:
s_list[0].append(phenotypes_dict[p_i]["KW"]["B"])
s_list[1].append(phenotypes_dict[p_i]["Emma"]["B"])
max_stats.append(max(s_list[0]))
min_stats.append(min(s_list[0]))
plt.bar(range(x_pos,len(s_list[0])+x_pos),s_list[0],color = colors[1+2*((cat-1)%2)])
x_pos = x_pos+len(s_list[0])+1
max_stat = max(max_stats)
min_stat = min(0,min(min_stats))
print min_stat
stat_range = max_stat-min_stat
plt.axis([0-0.02*(x_pos-1),1.02*(x_pos-1),min_stat-stat_range*0.02,max_stat+stat_range*0.02])
x_ticks = []
s_ticks = []
x_pos = 1
for cat in [1,2,3,4]:
shift = 0
phenotypes_dict = stats_dict[cat]
phenotypes = phenotypes_dict.keys()
phenotypes.sort()
for p_i in phenotypes:
s_ticks.append(p_i)
#plt.text(x_pos+shift,min_stat-0.1*stat_range,p_i,rotation="vertical",size="xx-small")
x_ticks.append(x_pos+shift-0.5)
shift += 1
x_pos = x_pos+shift+1
plt.xticks(x_ticks,s_ticks,size="small",rotation="vertical")
plt.ylabel("Area between expected and observed $log(p)$ curve.")
#plt.ylabel("Area between expected and observed p-value curve.")
#plt.ylabel("Kolmogorov-Smirnov $D$-statistic")
#plt.ylabel("(est. slope of $log(p)$)$- 1$")
#plt.ylabel("(median p-value)$- 0.5$")
#plt.show()
plt.savefig(stat_file_dir+"confounding_KWonly_B.png", format = "png")
plt.clf()
def _plotRobustnessTests_():
import csv
resdir = "/Network/Data/250k/tmp-bvilhjal/robustness_test/"
fig_dir = "/Users/bjarni/tmp/"
phenotypeFile = "/Network/Data/250k/dataFreeze_011209/phenotypes_all_raw_012509.tsv"
print "Loading phenotype data"
phed = phenotypeData.readPhenotypeFile(phenotypeFile, delimiter = '\t')
phenotypeIndices = phenotypeData.categories_2_phenotypes[1]+phenotypeData.categories_2_phenotypes[2]+phenotypeData.categories_2_phenotypes[3]+phenotypeData.categories_2_phenotypes[4]
#First KW
emma_sd_dict = {}
emma_sd_list = []
emma_log_pvalues = []
found_phenotypes = []
for p_i in phenotypeIndices:
try:
phenName = phed.getPhenotypeName(p_i)
filename = resdir+"KW_rob_f1_"+phenName+".rob.log_pvals_sd"
print "Loading", filename, "..."
reader = csv.reader(open(filename, "rb"))
reader.next()
for row in reader:
emma_log_pvalues.append(float(row[0]))
emma_sd_list.append(float(row[1]))
found_phenotypes.append(p_i)
except Exception:
print p_i,"failed."
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
xs = np.array(emma_log_pvalues)
ys = np.array(emma_sd_list)
print len(emma_sd_list),len(emma_log_pvalues)
xmin = xs.min()
xmax = xs.max()
ymin = ys.min()
ymax = ys.max()
#plt.subplots_adjust(hspace=0.5)
#plt.subplot(121)
plt.hexbin(xs,ys,bins='log',cmap=cm.jet)
plt.axis([xmin, xmax, ymin, ymax])
cb = plt.colorbar()
cb.set_label('$log_{10}(N)$')
plt.ylabel("SD$(log(p))$")
plt.xlabel("$-log(p)$")
plt.savefig(fig_dir+"KW_overall_robustness.png", format = "png")
plt.savefig(fig_dir+"KW_overall_robustness.pdf", format = "pdf")
plt.clf()
kw_xs = emma_log_pvalues
kw_ys = emma_sd_list
emma_sd_dict = {}
emma_sd_list = []
emma_log_pvalues = []
found_phenotypes = []
for p_i in phenotypeIndices:
try:
phenName = phed.getPhenotypeName(p_i)
filename = resdir+"Emma_rob_f1_"+phenName+".rob.log_pvals_sd"
print "Loading", filename, "..."
reader = csv.reader(open(filename, "rb"))
reader.next()
for row in reader:
emma_log_pvalues.append(float(row[0]))
emma_sd_list.append(float(row[1]))
found_phenotypes.append(p_i)
except Exception:
print p_i,"failed."
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
xs = np.array(emma_log_pvalues)
ys = np.array(emma_sd_list)
print len(emma_sd_list),len(emma_log_pvalues)
xmin = xs.min()
xmax = xs.max()
ymin = ys.min()
ymax = ys.max()
#plt.subplots_adjust(hspace=0.5)
#plt.subplot(121)
plt.hexbin(xs,ys,bins='log',cmap=cm.jet)
plt.axis([xmin, xmax, ymin, ymax])
cb = plt.colorbar()
cb.set_label('$log_{10}(N)$')
plt.ylabel("SD$(log(p))$")
plt.xlabel("$-log(p)$")
plt.savefig(fig_dir+"Emma_overall_robustness.png", format = "png",dpi=300)
plt.clf()
xs = emma_log_pvalues
ys = emma_sd_list
print len(kw_xs), len(xs)
figure = plt.figure(figsize=(6,4.5))
plt.axes([0.10, 0.10,0.88, 0.88])
plt.plot(kw_xs,kw_ys,".",markersize=2,color="#aa5555")
plt.plot(xs,ys,".",markersize=2,color="#5555aa")
min_x = min(min(kw_xs),min(xs))
max_x = max(max(kw_xs),max(xs))
delta_x = max_x - min_x
num_bins = 20
kw_bins = [[] for i in range(0,num_bins+1)]
emma_bins = [[] for i in range(0,num_bins+1)]
for i in range(0,len(kw_xs)):
x = kw_xs[i]
bin_nr = int(num_bins*(x-min_x)/delta_x)
kw_bins[bin_nr].append(kw_ys[i])
for i in range(0,len(xs)):
x = xs[i]
bin_nr = int(num_bins*(x-min_x)/delta_x)
emma_bins[bin_nr].append(ys[i])
kw_means = []
emma_means = []
kw_xvals = []
emma_xvals = []
for i in range(0,num_bins):
xval = xmin+delta_x*((i+0.5)/num_bins)
if len(kw_bins[i])>4:
kw_means.append(sum(kw_bins[i])/len(kw_bins[i]))
kw_xvals.append(xval)
if len(emma_bins[i])>4:
emma_means.append(sum(emma_bins[i])/len(emma_bins[i]))
emma_xvals.append(xval)
min_y = min(min(kw_ys),min(ys))
max_y = max(max(kw_ys),max(ys))
delta_y = max_y-min_y
plt.plot(kw_xvals,kw_means,"r",label="Wilcoxon")
plt.plot(emma_xvals,emma_means,"b",label="EMMA")
plt.axis([min_x-0.025*delta_x, max_x+0.025*delta_x, min_y-0.025*delta_y, max_y+0.025*delta_y])
plt.ylabel("SD$(log(p))$")
plt.xlabel("$-log(p)$")
fontProp = matplotlib.font_manager.FontProperties(size=10)
plt.legend(numpoints=1,handlelen=0.005,prop=fontProp)
plt.savefig(fig_dir+"Overall_robustness.png", format = "png",dpi=300)
plt.savefig(fig_dir+"Overall_robustness.pdf", format = "pdf",dpi=300)
plt.clf()
def _phenotype_correlation_(phenotype_file="/Users/bjarnivilhjalmsson/Projects/Data/phenotypes/phenotypes_all_raw_042109.tsv", only_107=True, only_192_accessions=True):
resdir = "/Users/bjarni/tmp/"
phed = phenotypeData.readPhenotypeFile(phenotype_file, delimiter = '\t')
first_192_acc = map(str,phenotypeData._getFirst192Ecotypes_())
phed.filterAccessions(first_192_acc)
#FIXME: remove values for which we don't have genotype data??
#What phenotyps should we use.. (All 107)
phenotypeIndices = phenotypeData.categories_2_phenotypes[1]+phenotypeData.categories_2_phenotypes[2]+phenotypeData.categories_2_phenotypes[3]+phenotypeData.categories_2_phenotypes[4]
num_phens = len(phenotypeIndices)
#phen_labels = phed.getPhenotypeNiceNamesDict()
phen_names = []
for i in phenotypeIndices:
name = phed.getPhenotypeName(i, niceName=True)
if name in ['avrRpm1','avrRpt2','avrPphB','avrB']:
name ="$\mathit{A"+name[1:]+"}$"
phen_names.append(name)
import numpy as np
correlation_matrix = np.arange(num_phens*num_phens,dtype=float).reshape(num_phens,num_phens)
for i, pi_1 in enumerate(phenotypeIndices):
vals_1 = phed.getPhenVals(pi_1,noNAs=False)
for j, pi_2 in enumerate(phenotypeIndices):
vals_2 = phed.getPhenVals(pi_2,noNAs=False)
v1 = []
v2 = []
for k in range(0,len(vals_1)):
if vals_1[k]!="NA" and vals_2[k]!="NA":
v1.append(vals_1[k])
v2.append(vals_2[k])
#print np.corrcoef(v1,v2)
correlation_matrix[i,j] = float(np.corrcoef(v1,v2)[0,1])
print correlation_matrix
#Draw matrix
plt.figure(figsize=(19,17))
plt.axes([0.08,0.08,0.94,0.91])
plt.pcolor(correlation_matrix)
plt.axis([-0.01*num_phens,1.01*num_phens,-0.01*num_phens,1.01*num_phens])
cb = plt.colorbar(fraction=0.082,pad=0.02,shrink=0.4)
cb.set_label("Correlation coefficient")
phen_pos = []
for i in range(num_phens):
phen_pos.append(i+0.5)
yticks,labels = plt.yticks(phen_pos,phen_names,size="small")
for i in range(num_phens):
pi = phenotypeIndices[i]
if pi in phenotypeData.categories_2_phenotypes[1]:
yticks[i].label1.set_color("blue")
elif pi in phenotypeData.categories_2_phenotypes[2]:
yticks[i].label1.set_color("red")
elif pi in phenotypeData.categories_2_phenotypes[3]:
yticks[i].label1.set_color("green")
elif pi in phenotypeData.categories_2_phenotypes[4]:
yticks[i].label1.set_color("purple")
yticks,labels = plt.xticks(phen_pos,phen_names,size="small",rotation=90)
for i in range(num_phens):
pi = phenotypeIndices[i]
if pi in phenotypeData.categories_2_phenotypes[1]:
yticks[i].label1.set_color("blue")
elif pi in phenotypeData.categories_2_phenotypes[2]:
yticks[i].label1.set_color("red")
elif pi in phenotypeData.categories_2_phenotypes[3]:
yticks[i].label1.set_color("green")
elif pi in phenotypeData.categories_2_phenotypes[4]:
yticks[i].label1.set_color("purple")
plt.savefig("/Users/bjarnivilhjalmsson/tmp/test.pdf")
print "arg"
f = open("/tmp/test.csv","w")
st = "phenotype\phenotype,"+(",".join(phen_names))+"\n"
f.write(st)
for i in range(num_phens):
st = phen_names[i]
for j in range(num_phens):
st += ","+str(correlation_matrix[i,j])
st += "\n"
f.write(st)
f.close()
#Cluster phenotypes
#Reorder correlations
if __name__ == '__main__':
#_get192Ecotypes_()
#_countVals_()
#emmaAnalysisPlots()
#plot_all_phenotype_analysis(includeEmmaInBinary=True)
#_drawPowerQQPlots_([5])
#_test_()
#_plotConfoundingStats_()
#_plotRobustnessTests_()
#drawMAFPlots()
#_test_()
#_phenotype_correlation_()
#loadHistogramLabels()
print "Done!"
| [
"tom@phantom"
] | tom@phantom |
691326cf620d757d749fd0cea91b4c52d295a97c | 78d17c3a7332be85078b513eee02f7ae4f18b3db | /lintcode/best_time_to_buy_and_sell_stockIII.py | 98aa9d43dbefced47f890d948e90f8c6c0446edd | [] | no_license | yuhanlyu/coding-challenge | c28f6e26acedf41cef85519aea93e554b43c7e8e | 9ff860c38751f5f80dfb177aa0d1f250692c0500 | refs/heads/master | 2021-01-22T21:59:27.278815 | 2017-11-26T07:34:04 | 2017-11-26T07:34:04 | 85,498,747 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | class Solution:
"""
@param prices: Given an integer array
@return: Maximum profit
"""
def maxProfit(self, prices):
buy1, buy2, sell1, sell2 = -2 ** 32, -2 ** 32, 0, 0
for price in prices:
buy2, sell2 = max(buy2, sell1 - price), max(sell2, buy2 + price)
buy1, sell1 = max(buy1, - price), max(sell1, buy1 + price)
return sell2
| [
"yuhanlyu@gmail.com"
] | yuhanlyu@gmail.com |
f8ac9401d36296f4a1f0612d0736446bc3cdb4ca | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2849/60673/267341.py | eaf3ff20eb6312f521c762fbe44ab3b471ce01c9 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | inp = int(input())
nums = input().split(" ")
res = -1
for i in range(inp):
nums[i]=int(nums[i])
allposs = []
for i in range(1,min(nums)):
res = i
for j in range(inp):
if(j%i!=0):
res = -1
break
if(res!=-1):
allposs.append(res)
print (max(allposs)) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
7643cfd178b387b8b06cff656e50bb6d918ebaaa | 471c3ad9912423763295c353c3dcbb7e2e74b818 | /seqmod/modules/encoder.py | d9a24cc8d3e8309c5359bdf3fba1972041ed9d17 | [] | no_license | mikekestemont/seqmod | a3bfcbf4c7418005e71cb5381c30a837cff88aec | 7d8c976a03836fcf347395c192987dba531c2144 | refs/heads/master | 2021-01-21T22:05:18.518306 | 2017-06-21T10:26:39 | 2017-06-21T10:26:39 | 95,155,787 | 0 | 0 | null | 2017-06-22T20:55:51 | 2017-06-22T20:55:51 | null | UTF-8 | Python | false | false | 3,168 | py |
import torch
import torch.nn as nn
from torch.autograd import Variable
import seqmod.utils as u
class Encoder(nn.Module):
"""
RNN Encoder that computes a sentence matrix representation
of the input using an RNN.
"""
def __init__(self, in_dim, hid_dim, num_layers, cell,
dropout=0.0, bidi=True):
self.cell = cell
self.num_layers = num_layers
self.num_dirs = 2 if bidi else 1
self.bidi = bidi
self.hid_dim = hid_dim // self.num_dirs
assert hid_dim % self.num_dirs == 0, \
"Hidden dimension must be even for BiRNNs"
super(Encoder, self).__init__()
self.rnn = getattr(nn, cell)(in_dim, self.hid_dim,
num_layers=self.num_layers,
dropout=dropout,
bidirectional=self.bidi)
def init_hidden_for(self, inp):
batch = inp.size(1)
size = (self.num_dirs * self.num_layers, batch, self.hid_dim)
h_0 = Variable(inp.data.new(*size).zero_(), requires_grad=False)
if self.cell.startswith('LSTM'):
c_0 = Variable(inp.data.new(*size).zero_(), requires_grad=False)
return h_0, c_0
else:
return h_0
def forward(self, inp, hidden=None, compute_mask=False, mask_symbol=None):
"""
Paremeters:
-----------
inp: torch.Tensor (seq_len x batch x emb_dim)
hidden: tuple (h_0, c_0)
h_0: ((num_layers * num_dirs) x batch x hid_dim)
n_0: ((num_layers * num_dirs) x batch x hid_dim)
Returns: output, (h_t, c_t)
--------
output: (seq_len x batch x hidden_size * num_directions)
h_t: (num_layers x batch x hidden_size * num_directions)
c_t: (num_layers x batch x hidden_size * num_directions)
"""
if compute_mask: # fixme, somehow not working
seqlen, batch, _ = inp.size()
outs, hidden = [], hidden or self.init_hidden_for(inp)
for inp_t in inp.chunk(seqlen):
out_t, hidden = self.rnn(inp_t, hidden)
mask_t = inp_t.data.squeeze(0).eq(mask_symbol).nonzero()
if mask_t.nelement() > 0:
mask_t = mask_t.squeeze(1)
if self.cell.startswith('LSTM'):
hidden[0].data.index_fill_(1, mask_t, 0)
hidden[1].data.index_fill_(1, mask_t, 0)
else:
hidden.data.index_fill_(1, mask_t, 0)
outs.append(out_t)
outs = torch.cat(outs)
else:
outs, hidden = self.rnn(inp, hidden or self.init_hidden_for(inp))
if self.bidi:
# BiRNN encoder outputs (num_layers * 2 x batch x hid_dim)
# but decoder expects (num_layers x batch x hid_dim * 2)
if self.cell.startswith('LSTM'):
hidden = (u.repackage_bidi(hidden[0]),
u.repackage_bidi(hidden[1]))
else:
hidden = u.repackage_bidi(hidden)
return outs, hidden
| [
"enrique.manjavacas@gmail.com"
] | enrique.manjavacas@gmail.com |
2534b2fee587754dc076a37e9e6e118570ce4945 | d7be997e821923c296e6c74699515b4f62bd4e5f | /lunchbreak/lunch/__init__.py | 150d6f295419e00936ce6130ad86ec89d24197a2 | [] | no_license | ssprasad100/Lunchbreak_backend_again | 1563f5b3590f19f1caf827c8be7cdae9f30b40b9 | 31c99fbf13ea1ae85bfdac94d052c7475f422b38 | refs/heads/master | 2020-05-05T02:14:50.312761 | 2019-04-03T04:16:20 | 2019-04-03T04:16:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | default_app_config = 'lunch.apps.LunchConfig'
from . import transformations # noqa
| [
"andreas@backx.org"
] | andreas@backx.org |
0bb3df088c46abb2f9a2f47d4e354b816b0342e3 | 780b01976dad99c7c2ed948b8473aa4e2d0404ba | /backtester/reports/campaign_report.py | 8e64e3371ffa925313d9cc8be657c6b8328cc0db | [] | no_license | trendmanagement/tmqrexo_alexveden | a8ad699c2c3df4ce283346d287aff4364059a351 | 4d92e2ee2bc97ea2fcf075382d4a5f80ce3d72e4 | refs/heads/master | 2021-03-16T08:38:00.518593 | 2019-01-23T08:30:18 | 2019-01-23T08:30:18 | 56,336,692 | 1 | 1 | null | 2019-01-22T14:21:03 | 2016-04-15T17:05:53 | Python | UTF-8 | Python | false | false | 15,113 | py | import os
import sys
import warnings
from collections import OrderedDict
from datetime import datetime, date
import pandas as pd
from exobuilder.data.exceptions import QuoteNotFoundException
from tradingcore.campaign import Campaign
from exobuilder.data.assetindex_mongo import AssetIndexMongo
from exobuilder.data.datasource_mongo import DataSourceMongo
from exobuilder.data.exostorage import EXOStorage
from tradingcore.campaign_bridge import ALPHA_NEW_PREFIX
#import matplotlib.pyplot as plt
#
# Warnings messages formatting
#
#def custom_formatwarning(msg, *a):
def custom_formatwarning(msg, category, filename, lineno, line=''):
# ignore everything except the message
return str(msg) + '\n'
warnings.formatwarning = custom_formatwarning
def ipython_info():
ip = False
if 'ipykernel' in sys.modules:
ip = 'notebook'
elif 'IPython' in sys.modules:
ip = 'terminal'
return ip
COMMISSION_PER_CONTRACT = 3.0
class CampaignReport:
def __init__(self, campaign_name, datasource=None, **kwargs):
self.datasource = datasource
storage = kwargs.get('exo_storage', False)
raise_exc = kwargs.get('raise_exceptions', False)
self.pnl_settlement_ndays = kwargs.get('pnl_settlement_ndays', 10)
if not storage:
storage = self.datasource.exostorage
calc_settlements = True
else:
calc_settlements = False
campaign_dict = storage.campaign_load(campaign_name)
if campaign_dict is None:
if raise_exc:
raise Exception("Campaign not found: " + campaign_name)
else:
warnings.warn("Campaign not found: " + campaign_name)
return
self.last_date = None
self.prev_date = None
self.cmp = Campaign(campaign_dict, self.datasource)
self.campaign_name = campaign_name
self.swarms_data = storage.swarms_data(self.cmp.alphas_list(), load_v2_alphas=True)
self.isok = True
campaign_dict = {}
campaign_deltas_dict = {}
campaign_costs_dict = {}
for alpha_name, swm_exposure_dict in self.cmp.alphas.items():
swarm_name = alpha_name
series = self.swarms_data[swarm_name]['swarm_series']
date_begin = swm_exposure_dict.get('begin', datetime(1900, 1, 1))
date_end = swm_exposure_dict.get('end', datetime(2100, 1, 1))
campaign_dict[swarm_name] = series['equity'].ix[date_begin:date_end] * swm_exposure_dict['qty']
campaign_deltas_dict[swarm_name] = series['delta'].ix[date_begin:date_end] * swm_exposure_dict['qty']
campaign_costs_dict[swarm_name] = series['costs'].ix[date_begin:date_end] * swm_exposure_dict['qty']
campaign_equity = pd.DataFrame(campaign_dict).ffill().diff().cumsum().sum(axis=1)
campaign_deltas = pd.DataFrame(campaign_deltas_dict).sum(axis=1)
campaign_costs = pd.DataFrame(campaign_costs_dict).sum(axis=1)
result_dict = {
'Equity': campaign_equity,
'Change': campaign_equity.diff(),
'Delta': campaign_deltas,
'Costs': campaign_costs
}
if calc_settlements:
campaign_settle_chg = pd.Series(index=campaign_equity.index)
prev_idx_dt = None
for idx_dt in campaign_settle_chg.index[-self.pnl_settlement_ndays:]:
if prev_idx_dt is None:
prev_idx_dt = idx_dt
continue
try:
diff = self.cmp.positions_at_date(prev_idx_dt, idx_dt).pnl_settlement - self.cmp.positions_at_date(
prev_idx_dt).pnl_settlement
except QuoteNotFoundException:
diff = float('nan')
campaign_settle_chg[idx_dt] = diff + campaign_costs[idx_dt]
prev_idx_dt = idx_dt
result_dict['SettleChange'] = campaign_settle_chg
self.campaign_stats = pd.DataFrame(result_dict)
def check_swarms_integrity(self):
isok = True
last_date = datetime(1900, 1, 1)
prev_date = datetime(1900, 1, 1)
decision_time = datetime(1900, 1, 1, 0, 0, 0)
for k, v in self.swarms_data.items():
seriesdf = v['swarm_series']
last_date = max(last_date, seriesdf.index[-1])
prev_date = max(prev_date, seriesdf.index[-2])
alphas_alignment = {}
for k, v in self.swarms_data.items():
# Skip integrity checks for inactive alphas
if not self.cmp.alpha_is_active(k, last_date):
continue
seriesdf = v['swarm_series']
alphas_alignment[k] = seriesdf['exposure']
if k.startswith(ALPHA_NEW_PREFIX):
# Skip V2 alphas
if (last_date - seriesdf.index[-1]).days > 0:
warnings.warn('[DELAYED] {0}: {1}'.format(k, seriesdf.index[-1]))
isok = False
continue
instrument = k.split('_')[0]
asset_info = self.datasource.assetindex.get_instrument_info(instrument)
exec_time, decision_time = AssetIndexMongo.get_exec_time(datetime.now(), asset_info)
if (last_date - v['last_date']).days > 0:
warnings.warn('[DELAYED] {0}: {1}'.format(k, v['last_date']))
isok = False
elif datetime.now() > decision_time and (datetime.now() - v['last_date']).days > 0:
warnings.warn('[NOT_ACTUAL] {0}: {1}'.format(k, v['last_date']))
isok = False
elif (prev_date - seriesdf.index[-2]).days > 0:
warnings.warn('[ERR_PREVDAY] {0}: {1}'.format(k, seriesdf.index[-2]))
isok = False
print('Last quote date: {0} Pevious date: {1}'.format(last_date, prev_date))
aligment_df = pd.concat(alphas_alignment, axis=1)
from IPython.display import display, HTML
if aligment_df.tail(5).isnull().sum().sum() > 0:
warnings.warn("Alphas of the campaign are not properly aligned, data holes or inconsistent index detected!")
isok = False
with pd.option_context('display.max_rows', None):
print('Exposure alignment (past 5 days):')
_alignment_df1 = aligment_df.tail(5)
not_aligned = _alignment_df1.isnull().any(axis=0)
display(_alignment_df1[not_aligned.index[not_aligned]].T.sort_index())
if isok:
print('Alphas seems to be valid')
else:
warnings.warn("Some alphas corrupted!")
self.last_date = datetime.combine(last_date.date(), decision_time.time())
self.prev_date = datetime.combine(prev_date.date(), decision_time.time())
return isok
def report_exo_exposure(self):
exos = OrderedDict()
for exo_name, exp_dict in self.cmp.exo_positions(self.last_date).items():
edic = exos.setdefault(exo_name, {'LastDate': 0.0, 'PrevDate': 0.0})
edic['LastDate'] = exp_dict['exposure']
for exo_name, exp_dict in self.cmp.exo_positions(self.prev_date).items():
edic = exos.setdefault(exo_name, {'LastDate': 0.0, 'PrevDate': 0.0})
edic['PrevDate'] = exp_dict['exposure']
print("\n\nEXO Exposure report")
with pd.option_context('display.max_rows', None):
print(pd.DataFrame(exos).T.sort_index())
def report_alpha_exposure(self):
pd.set_option('display.max_colwidth', 90)
pd.set_option('display.width', 1000)
alphas = OrderedDict()
for alpha_name, exp_dict in self.cmp.alphas_positions(self.last_date).items():
edic = alphas.setdefault(alpha_name, {'LastDate': 0.0, 'PrevDate': 0.0})
edic['LastDate'] = exp_dict['exposure']
for alpha_name, exp_dict in self.cmp.alphas_positions(self.prev_date).items():
edic = alphas.setdefault(alpha_name, {'LastDate': 0.0, 'PrevDate': 0.0})
edic['PrevDate'] = exp_dict['exposure']
#
# Add bridged alpha v2 exposure to the report
#
for k, v in self.swarms_data.items():
# Skip integrity checks for inactive alphas
if not self.cmp.alpha_is_active(k, self.last_date):
continue
if not k.startswith(ALPHA_NEW_PREFIX):
continue
exposure_series = v['exposure'].sum(axis=1).copy()
exposure_series.index = exposure_series.index.map(lambda d: date(d.year, d.month, d.day))
alphas[k] = {'LastDate': exposure_series.get(self.last_date.date(), float('nan')) * self.cmp.alphas[k]['qty'],
'PrevDate': exposure_series.get(self.prev_date.date(), float('nan')) * self.cmp.alphas[k]['qty']}
print("\n\nAlphas Exposure report")
with pd.option_context('display.max_rows', None):
print(pd.DataFrame(alphas).T.sort_index())
def report_positions(self):
pos_last = self.cmp.positions_at_date(self.last_date)
pos_prev = self.cmp.positions_at_date(self.prev_date)
positions = OrderedDict()
for contract, exp_dict in pos_last.netpositions.items():
try:
q = round(exp_dict['qty']*100)/100
if q == 0:
continue
edic = positions.setdefault(contract.name, {'LastDate': 0.0, 'PrevDate': 0.0, 'Contract': contract})
edic['LastDate'] = q
except QuoteNotFoundException:
warnings.warn("QuoteNotFound for: {0}".format(contract.name))
for contract, exp_dict in pos_prev.netpositions.items():
try:
q = round(exp_dict['qty'] * 100) / 100
if q == 0:
continue
edic = positions.setdefault(contract.name, {'LastDate': 0.0, 'PrevDate': 0.0, 'Contract': contract})
edic['PrevDate'] = q
except QuoteNotFoundException:
warnings.warn("QuoteNotFound for: {0}".format(contract.name))
with pd.option_context('display.max_rows', None):
print("\n\nPositions Exposure report")
df = pd.DataFrame(positions).T.sort_index()
if len(df) > 0:
print(df[['LastDate', 'PrevDate']])
else:
print('No positions opened')
print("\nTrades report")
if len(df) > 0:
df['Qty'] = df['LastDate'] - df['PrevDate']
df['Price'] = df['Contract'].apply(lambda x: x.price)
trades_df = df[df['Qty'] != 0]
if len(trades_df) > 0:
print(trades_df[['Qty', 'Price']])
else:
print("No trades occurred")
else:
print("No trades occurred")
def report_pnl(self):
print(self.campaign_stats.tail(self.pnl_settlement_ndays))
def report_export(self):
from IPython.display import display, HTML
if not os.path.exists('export'):
os.mkdir('export')
if not os.path.exists(os.path.join('export', 'campaigns')):
os.mkdir(os.path.join('export', 'campaigns'))
fn = os.path.join('export', 'campaigns', self.campaign_name + '.csv')
self.campaign_stats.to_csv(fn)
if ipython_info() == 'notebook':
link = '<a href="{0}" target="_blank">Download CSV: {1}</a>'.format(fn, self.campaign_name)
display(HTML(link))
else:
print("File saved to: {0}".format(fn))
def calculate_performance_fee(self, starting_capital=50000, dollar_costs=3, performance_fee=0.2, fixed_mgmt_fee=0, plot_graph=False):
eq = self.campaign_stats.Equity.fillna(0.0)
costs_sum = self.campaign_stats['Costs'].cumsum()
equity_without_costs = (eq - costs_sum)
#
# Calculating equity with new costs
#
ncontracts_traded = (self.campaign_stats['Costs'] / 3.0).abs()
new_costs = ncontracts_traded * -abs(dollar_costs)
new_equity = equity_without_costs + new_costs.cumsum() + starting_capital
#
# Calculation of the performance fees (with high-water mark)
#
monthly_eq = new_equity.resample('M').last()
monthly_high_watermark = monthly_eq.expanding().max().shift()
# Skip periods when equity closed lower than previous month's high-water mark
performance_fee_base = monthly_eq - monthly_high_watermark
performance_fee_base[performance_fee_base <= 0] = 0
performance_fee = performance_fee_base * -abs(performance_fee)
management_fee = pd.Series(-abs(fixed_mgmt_fee), index=performance_fee.index)
performance_fees_sum = performance_fee.cumsum().reindex(eq.index, method='ffill')
management_fee_sum = management_fee.cumsum().reindex(eq.index, method='ffill')
performance_fee_equity = new_equity + performance_fees_sum.fillna(0.0) + management_fee_sum.fillna(0.0)
df_result = pd.DataFrame({
"equity_original": eq + starting_capital,
"equity_with_costs": new_equity,
"equity_all_included": performance_fee_equity,
"costs_sum": new_costs.cumsum(),
'performance_fee_sum': performance_fees_sum,
'management_fee_sum': management_fee_sum,
'ncontracts_traded': ncontracts_traded,
'costs': new_costs,
'delta': self.campaign_stats['Delta'],
})
if plot_graph:
df_result[["equity_original", "equity_with_costs", "equity_all_included"]].plot()
#plt.figure()
df_result[["costs_sum", 'performance_fee_sum', 'management_fee_sum']].plot()
return df_result
def report_all(self):
self.check_swarms_integrity()
self.report_exo_exposure()
self.report_alpha_exposure()
self.report_positions()
self.report_pnl()
self.report_export()
if __name__ == '__main__':
from scripts.settings import *
# from backtester.reports.campaign_report import CampaignReport
assetindex = AssetIndexMongo(MONGO_CONNSTR, MONGO_EXO_DB)
storage = EXOStorage(MONGO_CONNSTR, MONGO_EXO_DB)
#datasource = DataSourceSQL(SQL_HOST, SQL_USER, SQL_PASS, assetindex, 3, 20, storage)
futures_limit = 4
options_limit = 20
datasource = DataSourceMongo(MONGO_CONNSTR, MONGO_EXO_DB, assetindex, futures_limit, options_limit, storage)
#rpt = CampaignReport('ZN_Bidirectional_W_Risk_Reversals V1', datasource)
#rpt.report_all()
campaign_dict = storage.campaign_load('ZN_Bidirectional_W_Risk_Reversals V1')
cmp = Campaign(campaign_dict, datasource)
cmp.positions_at_date()
pass | [
"i@alexveden.com"
] | i@alexveden.com |
c72dcbc745508edb37b6bfd454e99223aa888a0d | 94a511b7b31858d383be63ee5a3c1d3272bb6bf3 | /week_2/2_12.py | c2c728ccca1175842f85ad3c14238b72a14c012f | [] | no_license | kolevatov/python_lessons | 90bb3c2139e23cfc0f25f993c4ee636737e7daa8 | ff9290d87a5bdc20ddfb7109015ddb48429a4dd8 | refs/heads/master | 2021-01-13T01:18:15.997795 | 2018-03-06T15:03:03 | 2018-03-06T15:03:03 | 81,450,169 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,711 | py | # Шашки
# На доске стоит белая шашка. Требуется определить,
# может ли она попасть в заданную клетку, делая ходы по правилам
# (не превращаясь в дамку). Белые шашки могут ходить по черным клеткам
# по диагонали вверх-влево или вверх-вправо. Ходов может быть несколько!
# Формат ввода
# Вводится клетка, где стоит шашка, а затем клетка, куда шашка должна попасть
# Каждая клетка описывается номером вертикали, а затем номером горизонтали.
# Формат вывода
# Выведите слово YES (заглавными буквами), если шашка может попасть из
# начальной клетки в указанную, и NO в противном случае.
# Примечания
# Доска имеет размер 8x8, вертикали и горизонтали нумеруются числами от 1 до 8
# начиная с левого нижнего угла. Исходная и конечная клетки не совпадают.
X1 = int(input())
Y1 = int(input())
X2 = int(input())
Y2 = int(input())
blackX = ((X1 + Y1) % 2 == 0) and ((X2 + Y2) % 2 == 0)
blackY = (((X1 + Y1) % 2 != 0) and ((X2 + Y2) % 2 != 0))
if blackX or blackY:
if (Y2 > Y1):
if (Y2 - Y1) >= (X2 - X1):
print('YES')
else:
print('NO')
else:
print('NO')
else:
print('NO')
| [
"kolevatov@bpcbt.com"
] | kolevatov@bpcbt.com |
396f618507110469fb3700af03b351713222ae05 | dded9fb6567928952a283fc1c6db6a5a860bc1a6 | /nerodia/elements/d_list.py | a4897b8ded9bf4603dc2794d59fdbd088c35962b | [
"MIT"
] | permissive | watir/nerodia | 08b84aca4b72eae37e983006c15b824412335553 | 7e020f115b324ad62fe7800f3e1ec9cc8b25fcfe | refs/heads/master | 2023-04-15T20:02:34.833489 | 2023-04-06T23:46:14 | 2023-04-06T23:46:14 | 87,383,565 | 88 | 14 | MIT | 2023-04-06T23:42:29 | 2017-04-06T03:43:47 | Python | UTF-8 | Python | false | false | 311 | py | import six
from .html_elements import HTMLElement
from ..meta_elements import MetaHTMLElement
@six.add_metaclass(MetaHTMLElement)
class DList(HTMLElement):
def to_dict(self):
keys = [e.text for e in self.dts()]
values = [e.text for e in self.dds()]
return dict(zip(keys, values))
| [
"lucast1533@gmail.com"
] | lucast1533@gmail.com |
0a01821ba0aab521ea8241cf8c72d240e40870cf | 4a90ed83fce4632d47f7eb2997eb742d0230c7e2 | /tests/brython_jinja2/test_context.py | 7ccd7b92e76b4fef6102670685bbc548234ba4ae | [
"BSD-3-Clause"
] | permissive | jonathanverner/brython-jinja2 | 943b0eecc435ee5551ee464e3134164aad7aef27 | cec6e16de1750203a858d0acf590f230fc3bf848 | refs/heads/master | 2022-12-14T15:04:11.668530 | 2017-09-29T02:18:08 | 2017-09-29T02:18:08 | 100,823,068 | 2 | 0 | BSD-3-Clause | 2021-06-01T21:48:13 | 2017-08-19T21:47:17 | Python | UTF-8 | Python | false | false | 875 | py | import asyncio
import pytest
from brython_jinja2.context import Context
def test_extension():
base = Context()
base.a = 10
base.c = 30
child = Context(base=base)
# Child should have access to parent
assert child.a == 10
# The _get method should work for accessing parent
assert child._get('a') == 10
# Child should not be allowed to modify parent
child.a = 20
assert child.a == 20
assert base.a == 10
# Attributes should propagate recursively
second_child = Context(base=child)
assert second_child.c == 30
assert second_child.a == 20
def test_future(event_loop):
asyncio.set_event_loop(event_loop)
ctx = Context()
fut = asyncio.async(asyncio.sleep(0.1, result=3))
ctx.test = fut
assert hasattr(ctx, 'test') is False
event_loop.run_until_complete(fut)
assert ctx.test == 3
| [
"jonathan.verner@matfyz.cz"
] | jonathan.verner@matfyz.cz |
3887e47d266aa37eeacf4a8d0fe2ecb63c05ffc8 | 342a3af41306cf607eb49bde49348926c6dcd73b | /Packages/Dead/demo/Lib/geoparse.py | d9d4d2a17606539c34e5ed8e9a9790b3b39a6a6c | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause"
] | permissive | CDAT/cdat | 17e5de69a0d9d3bb5b79aaaecb4198ae9d92ed47 | 5133560c0c049b5c93ee321ba0af494253b44f91 | refs/heads/master | 2022-06-05T02:41:12.155720 | 2022-05-18T22:31:18 | 2022-05-18T22:31:18 | 6,660,536 | 72 | 17 | NOASSERTION | 2022-05-18T22:31:19 | 2012-11-12T20:58:18 | Fortran | UTF-8 | Python | false | false | 1,158 | py | #############################################################################
# File: geoparse.py #
# Author: Velimir Mlaker, mlaker1@llnl.gov #
# Date: 05-Aug-2005 #
# Desc: Parsers for geometry string. #
# KNOWN BUG: Only works on form wxh+x+y, i.e. with '+' for x and #
# y coords. It will fail if using '-' for x and y. #
#############################################################################
# Extract width from the geometry string.
def get_w (geo):
return geo [:geo.find('x')]
# Extract height from the geometry string.
def get_h (geo):
return geo [geo.find('x')+1 : geo.find('+')]
# Extract X and Y from the geometry string.
def get_xy (geo):
return geo [geo.find('+')+1 : len(geo)]
# Extract X from the geometry string.
def get_x (geo):
xy = get_xy(geo)
return xy [:xy.find('+')]
# Extract Y from the geometry string.
def get_y (geo):
xy = get_xy(geo)
return xy [xy.find('+')+1 : len(xy)]
| [
"doutriaux1@meryem.llnl.gov"
] | doutriaux1@meryem.llnl.gov |
aeaf592f72b9d0bb705e1c4cdd9ed1b97ee88ac1 | 20810657fed82d4fce65487a83e8b057da6dd794 | /python1/validate_input.py | 130007da5b8ee27bb6ceb9d662bc17c7f8aff8f4 | [] | no_license | jmwoloso/Python_1 | a2ddc7e2a3e9caeefe21c90c618c0c91871239b7 | ce49a142f96bca889684261f997c6ac667bd15ce | refs/heads/master | 2016-09-05T11:13:24.296676 | 2015-06-08T17:35:51 | 2015-06-08T17:35:51 | 37,082,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | #!/usr/local/bin/python3
"""Validate user input."""
while True:
s = input("Type 'yes' or 'no': ")
if s == 'yes':
break
if s == 'no':
break
print("Wrong! Try again.")
print(s) | [
"jmwoloso@asu.edu"
] | jmwoloso@asu.edu |
de53fed39a76f4a71654451bf454b8401756b843 | 5a0f6aeb6147115a8318d5f517cc62f01cfd3f1c | /python/example_publish.py | c621fbd7d9fe24deb9f6f8ab9f14f6b5e2950d00 | [
"MIT"
] | permissive | magictimelapse/mqtt-iota | 2ec61b31281af91dd9f18b9f764d4c5b8789c0a5 | d5c06c4193ca1519c110856c1967dfea01ed9050 | refs/heads/master | 2020-04-14T11:50:53.276082 | 2019-01-02T18:48:42 | 2019-01-02T18:48:42 | 163,824,361 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | #!/usr/bin/env python
import paho.mqtt.client as paho
import json
broker = 'localhost'
port = 1883
client = paho.Client()
client.connect(broker,port)
data = {'temperature':22, 'humidity': 15}
# stringify the json data:
stringified_data = json.dumps(data, separators=(',',':'))
ret = client.publish('sensors/data',stringified_data)
| [
"michael.rissi@gmail.com"
] | michael.rissi@gmail.com |
2c118bb2cfb4390ce34d88766238155d957b2817 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_8/kpprya001/question2.py | 35d0a565502a114e8a6ac7400df28b01ba4b664a | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | word = input("Enter a message:\n")
string1 = word
pairs = 0
def count_characters(string1,pairs):
if(string1[1:2]!=""):
if(string1[0:1]==string1[1:2]):
pairs += 1
return count_characters(string1[2:],pairs)
else:
return count_characters(string1[2:],pairs)
return pairs
print("Number of pairs:",count_characters(string1,pairs))
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
c67f3c2d5b47b1adbad2d799d01f3bb7792dc3a5 | 52855d750ccd5f2a89e960a2cd03365a3daf4959 | /ABC/ABC118_A.py | ad973aa79ebf7f7aab79ef27a1e9bd77a051dff0 | [] | no_license | takuwaaan/Atcoder_Study | b15d4f3d15d48abb06895d5938bf8ab53fb73c08 | 6fd772c09c7816d147abdc50669ec2bbc1bc4a57 | refs/heads/master | 2021-03-10T18:56:04.416805 | 2020-03-30T22:36:49 | 2020-03-30T22:36:49 | 246,477,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | A, B = map(int, input().split())
ans = (A + B if B % A == 0 else B - A)
print(ans)
| [
"takutotakuwan@gmail.com"
] | takutotakuwan@gmail.com |
870d8432b7bd7ef532bd4460e8619c7b327af57e | ff441ab720623b522ba522d0fbd6232215a1dc85 | /src/CreateFakeDataForTesting.py | 266512ed534858b229c871fdc88ad1b49d1f56fa | [] | no_license | LeeHuangChen/2017_12_21_ADDA_ExecutionWrapper_versionControlled | 0872dbb76598b4fb0695fcd8a27a12566fd7c96f | 04e6117de3164b12815d195831d6e591d321038f | refs/heads/master | 2021-09-06T22:53:25.693378 | 2018-02-12T23:52:42 | 2018-02-12T23:52:42 | 115,046,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,139 | py | import os, random
def random_AA_seq(length):
seq = ''.join(random.choice('ACDEFGHIKLMNPQRSTVWY') for i in range(length))
# return "TaxonA "+seq
return seq
def appendFile(filepath, message):
with open(filepath, "a") as f:
f.write(message)
def toFastaSeq(name, seq, taxa):
header = ">" + name + " [" + taxa + "]\n"
return header + seq + "\n\n"
CreateFilename = "Test30AA.fasta"
CreateFolder = "Sequences/"
filepath = os.path.join(CreateFolder, CreateFilename)
proteinLength = 30
ABFuseCount = 1
BAFuseCount = 1
ACount = 20
BCount = 20
A = random_AA_seq(proteinLength)
B = random_AA_seq(proteinLength)
open(filepath, "w")
length = len(A)
mid = length / 2
for i in range(ABFuseCount):
appendFile(filepath, toFastaSeq("AB" + str(i + 1), A[0:mid] + B[mid:length], "test taxa"))
for i in range(BAFuseCount):
appendFile(filepath, toFastaSeq("BA" + str(i + 1), B[0:mid] + A[mid:length], "test taxa"))
for i in range(ACount):
appendFile(filepath, toFastaSeq("A" + str(i + 1), A, "test taxa"))
for i in range(BCount):
appendFile(filepath, toFastaSeq("B" + str(i + 1), B, "test taxa"))
| [
"lhc1@rice.edu"
] | lhc1@rice.edu |
d6fc4824750ae5886e59f819e7cd36f2ad6e2073 | 714f0c4a37771b98de3cb817c1950fd08b18a8eb | /WebFrame/WebFrame.py | 9eb045e30a633f1ff641dfabdb4ea0efbb120a55 | [] | no_license | haoen110/http-server | 8543fd6e3cebc63f1390468e44b032c3b0f493fd | 809dcbeed4d2cd10c0f91a5c9b247984e1d28625 | refs/heads/master | 2020-09-14T14:58:17.213936 | 2019-11-21T11:49:37 | 2019-11-21T11:49:37 | 223,162,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,756 | py | # coding = utf - 8
from socket import *
import time
from setting import *
from urls import *
from views import *
class Application(object):
def __init__(self):
self.sockfd = socket()
self.sockfd.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.sockfd.bind(frame_addr)
def start(self):
self.sockfd.listen(5)
while True:
connfd, addr = self.sockfd.accept()
# 接收请求方法
method = connfd.recv(128).decode()
# 接收请求内容
path = connfd.recv(128).decode()
if method == "GET":
if path == '/' or path[-5:] == '.html':
status, response_body = self.get_html(path)
else:
status, response_body = self.get_data(path)
# 将结果给HttpServer
connfd.send(status.encode())
time.sleep(0.05)
connfd.send(response_body.encode())
elif method == "POST":
pass
def get_html(self, path):
if path == '/': # 主页
get_file = STATIC_DIR + '/index.html'
else:
get_file = STATIC_DIR + path
try:
f = open(get_file)
except IOError:
response = ('404', '===Sorry not found the page===')
else:
response = ('200', f.read())
finally:
return response
def get_data(self, path):
for url, handler in urls:
if path == url:
response_body = handler()
return "200", response_body
return "404","Not Found data"
if __name__ == "__main__":
app = Application()
app.start() # 启动框架等待request | [
"haoenwei@outlook.com"
] | haoenwei@outlook.com |
c7ce27bd37e52a5d67cf6f140e15d898a4b4a87f | cadfcc0879aa94cc0a5b4b4993bf9bcbddbf424d | /is_livraison_16831/urls.py | 40ce640e380c9119a0a6eeed1b119514dd4a30de | [] | no_license | crowdbotics-apps/is-livraison-16831 | 72ab3f68a4cd0a10c7ad2bf4d3e22f07168dfd90 | 5105eda2fcd9ae1ab27cccbc3aa4b547dc9cb839 | refs/heads/master | 2023-05-29T11:31:43.194218 | 2020-05-10T08:23:05 | 2020-05-10T08:23:05 | 262,741,914 | 0 | 0 | null | 2021-06-12T03:02:34 | 2020-05-10T08:19:28 | Python | UTF-8 | Python | false | false | 2,430 | py | """is_livraison_16831 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("api/v1/", include("delivery_order.api.v1.urls")),
path("delivery_order/", include("delivery_order.urls")),
path("api/v1/", include("driver.api.v1.urls")),
path("driver/", include("driver.urls")),
path("home/", include("home.urls")),
path("api/v1/", include("menu.api.v1.urls")),
path("menu/", include("menu.urls")),
path("api/v1/", include("delivery_user_profile.api.v1.urls")),
path("delivery_user_profile/", include("delivery_user_profile.urls")),
]
admin.site.site_header = "IS Livraison"
admin.site.site_title = "IS Livraison Admin Portal"
admin.site.index_title = "IS Livraison Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="IS Livraison API",
default_version="v1",
description="API documentation for IS Livraison App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
a309fd2997f541b938f725a4a3034e26bdaf4f12 | 5bc589ae06cd11da557bb84553fcb10330abf980 | /warunek.py | 6506a250c40625aa10f344b3f7863c5ab714fda7 | [] | no_license | keinam53/Zrozumiec_Programowanie | b06d8260a4f227f521d0b762b9cbfb0ad8a17989 | 5da9fc9eaaedd6962b225517c4c02297ae18c800 | refs/heads/master | 2023-06-04T17:31:10.114679 | 2021-07-03T19:23:39 | 2021-07-03T19:23:39 | 358,726,195 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,781 | py | # jablka = 3
# banany = 4.5
# gruszki = 3
# print(f"Czy jabłka są droższe od bananów?\t\t\t\t {jablka>banany}")
# print(f"czy gruszki mają taką samą cenę jak banany?\t\t{gruszki==banany}")
#
# result = jablka==banany
# print(type(result))
# name = "Mariusz"
# result = name =="Mariusz"
# print(result)
# name = input("Jak masz na imię? ")
# porownanie = name == "Mariusz"
# print(f"Twoje imię to Mariusz?\t\t\t {porownanie}")
# shopping_list = ["Mąka","Jogurt"]
# my_list = ["Czekolada","Marchewka","chleb"]
# print(f"{shopping_list} > {my_list} -> {shopping_list>my_list}")
# zad1
# ceny = []
# price = float(input("Podaj cenę pierwszego produktu "))
# ceny.append(price)
# price = float(input("Podaj cenę drugiego produktu "))
# ceny.append(price)
# price = float(input("Podaj cenę trzeciego produktu "))
# ceny.append(price)
#
# print("Porównanie cen")
# print(f"Produkt 1 jest droższy do 2? {ceny[0]>ceny[1]}")
# print(f"Produkt 3 jest droższy do 1? {ceny[2]>ceny[0]}")
# print(f"Produkt 2 jest droższy do 3? {ceny[1]>ceny[2]}")
# zad2
# shopping_elements = input("Podaj listę zakupów rozdzielając elementy przecinkami ")
# shopping_list = shopping_elements.split(",")
# long = len(shopping_list) > 4
# print(f"Czy uważam, że lista zakupów jest długa?\t {long}")
#zad3
print("Kalkulator oprocentowania")
wartosc_pocz = float(input("Jaką wartość wpłaciłeś? "))
oprocentowanie = float(input("Jakie jest oprocentowanie? "))
czas = float(input("Ile lat trwa lokata? "))
wartosc_koncowa = wartosc_pocz * (1+oprocentowanie/100)**czas
zysk = wartosc_koncowa - wartosc_pocz
zysl_w_proc = (wartosc_koncowa/wartosc_pocz)*100
print(f"Zysk na lokacie wyniesie {zysk} zł")
print(f"Czy zysk na lokacie będzie większy niż 10%\t{zysl_w_proc >= 10}")
| [
"mariusz.baran536@gmail.com"
] | mariusz.baran536@gmail.com |
f07a987ce8d79b004f67e63ac870971f4bc7b1b7 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part001088.py | d44bfe80ca43817a7cad73759d1a8d2507d6d110 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,978 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher87108(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i3.1.2.0', 1, 1, S(0)), Add)
]),
1: (1, Multiset({1: 1}), [
(VariableWithCount('i3.1.2.0', 1, 1, S(0)), Add)
]),
2: (2, Multiset({2: 1}), [
(VariableWithCount('i3.1.2.0', 1, 1, S(0)), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher87108._instance is None:
CommutativeMatcher87108._instance = CommutativeMatcher87108()
return CommutativeMatcher87108._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 87107
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i3.1.2.1.0', S(1))
except ValueError:
pass
else:
pass
# State 87109
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp2 = subjects.popleft()
subjects3 = deque(tmp2._args)
# State 87110
if len(subjects3) >= 1:
tmp4 = subjects3.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i3.1.2.1.1', tmp4)
except ValueError:
pass
else:
pass
# State 87111
if len(subjects3) >= 1:
tmp6 = subjects3.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i3.1.2.1.2', tmp6)
except ValueError:
pass
else:
pass
# State 87112
if len(subjects3) == 0:
pass
# State 87113
if len(subjects) == 0:
pass
# 0: d*x**n /; (cons_f2) and (cons_f3) and (cons_f8) and (cons_f29) and (cons_f5) and (cons_f1575)
yield 0, subst3
# 1: d*x**n /; (cons_f2) and (cons_f3) and (cons_f8) and (cons_f29) and (cons_f4) and (cons_f5) and (cons_f1497)
yield 1, subst3
# 2: d*x**n /; (cons_f29) and (cons_f4) and (cons_f70) and (cons_f71)
yield 2, subst3
subjects3.appendleft(tmp6)
subjects3.appendleft(tmp4)
subjects.appendleft(tmp2)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp8 = subjects.popleft()
associative1 = tmp8
associative_type1 = type(tmp8)
subjects9 = deque(tmp8._args)
matcher = CommutativeMatcher87115.get()
tmp10 = subjects9
subjects9 = []
for s in tmp10:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp10, subst0):
pass
if pattern_index == 0:
pass
# State 87120
if len(subjects) == 0:
pass
# 0: d*x**n /; (cons_f2) and (cons_f3) and (cons_f8) and (cons_f29) and (cons_f5) and (cons_f1575)
yield 0, subst1
# 1: d*x**n /; (cons_f2) and (cons_f3) and (cons_f8) and (cons_f29) and (cons_f4) and (cons_f5) and (cons_f1497)
yield 1, subst1
# 2: d*x**n /; (cons_f29) and (cons_f4) and (cons_f70) and (cons_f71)
yield 2, subst1
subjects.appendleft(tmp8)
return
yield
from .generated_part001089 import *
from matchpy.matching.many_to_one import CommutativeMatcher
from collections import deque
from matchpy.utils import VariableWithCount
from multiset import Multiset | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
8eadc0f79a7b8575a34f645f947de6a744f43919 | c9f67529e10eb85195126cfa9ada2e80a834d373 | /lib/python3.5/site-packages/torch/distributions/chi2.py | ff789c17370fb76cf21ee26e22e1025d3ff63a63 | [
"Apache-2.0"
] | permissive | chilung/dllab-5-1-ngraph | 10d6df73ea421bfaf998e73e514972d0cbe5be13 | 2af28db42d9dc2586396b6f38d02977cac0902a6 | refs/heads/master | 2022-12-17T19:14:46.848661 | 2019-01-14T12:27:07 | 2019-01-14T12:27:07 | 165,513,937 | 0 | 1 | Apache-2.0 | 2022-12-08T04:59:31 | 2019-01-13T14:19:16 | Python | UTF-8 | Python | false | false | 759 | py | from torch.distributions import constraints
from torch.distributions.gamma import Gamma
class Chi2(Gamma):
r"""
Creates a Chi2 distribution parameterized by shape parameter `df`.
This is exactly equivalent to Gamma(alpha=0.5*df, beta=0.5)
Example::
>>> m = Chi2(torch.tensor([1.0]))
>>> m.sample() # Chi2 distributed with shape df=1
0.1046
[torch.FloatTensor of size 1]
Args:
df (float or Tensor): shape parameter of the distribution
"""
arg_constraints = {'df': constraints.positive}
def __init__(self, df, validate_args=None):
super(Chi2, self).__init__(0.5 * df, 0.5, validate_args=validate_args)
@property
def df(self):
return self.concentration * 2
| [
"chilung.cs06g@nctu.edu.tw"
] | chilung.cs06g@nctu.edu.tw |
7bfed9146727f6bf33c206145d54f8f7e691afc7 | cf457dacc75ade598d52a4cfd58c2120192da84c | /Python1808/第一阶段/打飞机/game01/04-动画效果.py | ff0c9a113d67985f54c3802c8f4eb93e0598e79f | [] | no_license | LYblogs/python | b62608d73eb0a5a19306cabd4fd5706806fd557b | 1ee0bcecc3a78c6d2b191600872a1177e9c8df60 | refs/heads/master | 2020-04-12T10:12:01.478382 | 2018-12-19T10:59:47 | 2018-12-19T10:59:47 | 162,422,214 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,484 | py | """__author__ = 余婷"""
"""
动画原理:不断的刷新界面上的内容(一帧一帧的画)
"""
import pygame
from random import randint
def static_page(screen):
"""
页面上的静态内容
"""
# 静态文字
font = pygame.font.SysFont('Times', 40)
title = font.render('Welcome', True, (0, 0, 0))
screen.blit(title, (200, 200))
def animation_title(screen):
"""
字体颜色发生改变
"""
font = pygame.font.SysFont('Times', 40)
title = font.render('Python', True, (randint(0,255), randint(0,255), randint(0,255)))
screen.blit(title, (100, 100))
if __name__ == '__main__':
pygame.init()
screen = pygame.display.set_mode((600, 400))
screen.fill((255, 255, 255))
static_page(screen)
pygame.display.flip()
while True:
# for里面的代码只有事件发生后才会执行
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
# 在下面去写每一帧要显示的内容
"""程序执行到这个位置,cup休息一段时间再执行后面的代码(线程在这儿阻塞指定的时间)
单位:毫秒 (1000ms == 1s)
"""
pygame.time.delay(60)
# 动画前要将原来的内容全部清空
screen.fill((255, 255, 255))
static_page(screen)
animation_title(screen)
# 内容展示完成后,要更新到屏幕上
pygame.display.update()
| [
"2271032145@qq.com"
] | 2271032145@qq.com |
8a19f905ec49cb35ed33920e86912dccf5e9c127 | f2a2f41641eb56a17009294ff100dc9b39cb774b | /old_session/session_1/_188/_188_best_time_to_buy_and_sell_stock_4.py | e9c2d60469b92cb5e4467a4d0984402cb459fd56 | [] | no_license | YJL33/LeetCode | 0e837a419d11d44239d1a692140a1468f6a7d9bf | b4da922c4e8406c486760639b71e3ec50283ca43 | refs/heads/master | 2022-08-13T01:46:14.976758 | 2022-07-24T03:59:52 | 2022-07-24T04:11:32 | 52,939,733 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,466 | py | """
188. Best Time to Buy and Sell Stock IV
Total Accepted: 35261
Total Submissions: 150017
Difficulty: Hard
Contributors: Admin
Say you have an array for which the ith element is the price of a given stock on day i.
Design an algorithm to find the maximum profit. You may complete at most k transactions.
Note:
You may not engage in multiple transactions at the same time
(ie, you must sell the stock before you buy again).
"""
class Solution(object):
def maxProfit(self, k, prices):
"""
:type k: int
:type prices: List[int]
:rtype: int
"""
# dp[i][j]: max profit until day j, using i transactions
# dp[i][j] = max(dp[i][j-1], (price[j]- price[jj] + dp[i-1][jj]), jj in range(0, j-1)))
# dp[0][j]: 0
# dp[i][0]: 0
n = len(prices)
if n <= 1: return 0
# if k >= n/2, then you can make maximum number of transactions.
if k >= n/2:
maxPro = 0
for i in xrange(1,n):
if prices[i] > prices[i-1]:
maxPro += prices[i] - prices[i-1]
return maxPro
dp = [[0 for _ in xrange(n)] for _ in xrange(k+1)]
for i in xrange(1, k+1):
localMax = dp[i-1][0] - prices[0]
for j in xrange(1, n):
dp[i][j] = max(dp[i][j-1], prices[j]+localMax)
localMax = max(localMax, dp[i-1][j]-prices[j])
return dp[k][n-1]
| [
"yunjun.l33@gmail.com"
] | yunjun.l33@gmail.com |
39f7c7acf75f6b9a74c34de56a6e06a69f0ccd96 | 93e9bbcdd981a6ec08644e76ee914e42709579af | /backtracking/526_Beautiful_Arrangement.py | 44a4d02c074bc1b7a25249856c55e2ac84376c9c | [] | no_license | vsdrun/lc_public | 57aa418a8349629494782f1a009c1a8751ffe81d | 6350568d16b0f8c49a020f055bb6d72e2705ea56 | refs/heads/master | 2020-05-31T11:23:28.448602 | 2019-10-02T21:00:57 | 2019-10-02T21:00:57 | 190,259,739 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,116 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
https://leetcode.com/problems/beautiful-arrangement/description/
Suppose you have N integers from 1 to N.
We define a beautiful arrangement as an array that is constructed by
these N numbers successfully if one of the following is true for
the ith position (1 <= i <= N) in this array:
The number at the ith position is divisible by i.
OR
i is divisible by the number at the ith position.
Now given N, how many beautiful arrangements can you construct?
Example 1:
Input: 2
Output: 2
Explanation:
The first beautiful arrangement is [1, 2]:
Number at the 1st position (i=1) is 1, and 1 is divisible by i (i=1).
Number at the 2nd position (i=2) is 2, and 2 is divisible by i (i=2).
The second beautiful arrangement is [2, 1]:
Number at the 1st position (i=1) is 2, and 2 is divisible by i (i=1).
Number at the 2nd position (i=2) is 1, and i (i=2) is divisible by 1.
Note:
N is a positive integer and will not exceed 15.
"""
cache = {}
class Solution(object):
def countArrangement(self, N):
"""
:type N: int
:rtype: int
"""
def helper(i, X):
"""
X 為 index tuple , starts from 1 ~ N
"""
# i 為index, 由後往前。
if i == 1:
# 因為任何數 % 1 為0
return 1
# index: 從0 ~ 此index的所有數
# 由後往前 i 初始為 N
key = (i, X)
if key in cache:
return cache[key]
recursive = []
for j, x in enumerate(X):
if x % i == 0 or i % x == 0:
recursive.append(helper(i - 1, X[:j] + X[j + 1:]))
total = sum(recursive)
cache[key] = total
return total
# 以index 為pivot支點。
# 由後往前
return helper(N, tuple(range(1, N + 1)))
def build_input():
return 2
if __name__ == "__main__":
b = build_input()
s = Solution()
result = s.countArrangement(b)
# Return ["eat","oath"].
print(result)
| [
"vsdmars@gmail.com"
] | vsdmars@gmail.com |
a9ba0366d74eb0694176c79d13f83932130643ee | 3a1fea0fdd27baa6b63941f71b29eb04061678c6 | /src/ch10/rtda/heap/ConstantPool.py | a3c1c48aa318b84890d2ffb6c8daf0408f4390cf | [] | no_license | sumerzhang/JVMByPython | 56a7a896e43b7a5020559c0740ebe61d608a9f2a | 1554cf62f47a2c6eb10fe09c7216518416bb65bc | refs/heads/master | 2022-12-02T17:21:11.020486 | 2020-08-18T06:57:10 | 2020-08-18T06:57:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,922 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: HuRuiFeng
@file: ConstantPool.py
@time: 2019/9/16 17:25
@desc: 运行时常量池,主要存放两类信息:字面量和符号引用,字面量包括整数、浮点数和字符串字面量;
符号引用包括类符号引用、字段符号引用、方法符号引用和接口符号引用。
"""
class ConstantPool:
def __init__(self, clazz, consts):
self._class = clazz
self.consts = consts
from ch10.classfile.ConstantPool import ConstantPool
# 把class文件中的常量池转换成运行时常量池
@staticmethod
def new_constant_pool(clazz, cfConstantPool: ConstantPool):
from ch10.classfile.CpNumeric import ConstantDoubleInfo, ConstantLongInfo, ConstantFloatInfo, ConstantIntegerInfo
from ch10.classfile.ConstantStringInfo import ConstantStringInfo
from ch10.classfile.ConstantClassInfo import ConstantClassInfo
from ch10.classfile.ConstantMemberRefInfo import ConstantFieldRefInfo, ConstantMethodRefInfo, \
ConstantInterfaceMethodRefInfo
from ch10.rtda.heap.CpClassRef import ClassRef
from ch10.rtda.heap.CpFieldRef import FieldRef
from ch10.rtda.heap.CpMethodRef import MethodRef
from ch10.rtda.heap.CpInterfaceMethodRef import InterfaceMethodRef
cp_count = len(cfConstantPool.cp)
consts = [None for _ in range(cp_count)]
rt_constant_pool = ConstantPool(clazz, consts)
for i in range(1, cp_count):
cp_info = cfConstantPool.cp[i]
if isinstance(cp_info, ConstantIntegerInfo):
consts[i] = cp_info.val
elif isinstance(cp_info, ConstantFloatInfo):
consts[i] = cp_info.val
elif isinstance(cp_info, ConstantLongInfo):
consts[i] = cp_info.val
elif isinstance(cp_info, ConstantDoubleInfo):
consts[i] = cp_info.val
elif isinstance(cp_info, ConstantStringInfo):
consts[i] = str(cp_info)
elif isinstance(cp_info, ConstantClassInfo):
consts[i] = ClassRef(rt_constant_pool, cp_info)
elif isinstance(cp_info, ConstantFieldRefInfo):
consts[i] = FieldRef(rt_constant_pool, cp_info)
elif isinstance(cp_info, ConstantMethodRefInfo):
consts[i] = MethodRef(rt_constant_pool, cp_info)
elif isinstance(cp_info, ConstantInterfaceMethodRefInfo):
consts[i] = InterfaceMethodRef(rt_constant_pool, cp_info)
# rt_constant_pool.consts = consts
return rt_constant_pool
def get_class(self):
return self._class
# 根据索引返回常量
def get_constant(self, index):
c = self.consts[index]
if c is not None:
return c
else:
raise RuntimeError("No constants at index {0}".format(index))
| [
"huruifeng1202@163.com"
] | huruifeng1202@163.com |
6d13df9f1490f790caa07de014986fd9c92569f8 | d0e953d791920b508104d5f3ca298eab2f6e7bea | /面向对象编程/test5.py | 4ec9b113ff3b5cd1eaafc7fdb07d9173c3233d98 | [] | no_license | JKFjkf/Practise | 97ebabc376e0929f50fd542d0ede77739e3f9088 | 3371d5cc878bdb64f645311f2eb097f59c492c3c | refs/heads/master | 2023-07-03T21:28:06.873370 | 2021-08-11T12:19:39 | 2021-08-11T12:19:39 | 394,978,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | class Father():
def __init__(self):
self.money = 0
def action(self):
print("调用了父类方法")
class Son(Father):
def __init__(self):
self.money = 1000
def action(self):
print("子类重写父类方法")
son = Son()#子类son继承父类father的所有属性和方法
son.action()#子类son调用自身的action而不是父类的action方法
print(son.money)#自己的1000 | [
"1920578919@qq.com"
] | 1920578919@qq.com |
db76ae5a9d42054ec2eb762f17657fd4835fe398 | b019cb48889c67b1818605154e757dfeba626cf5 | /Lecon_un/06_02_1.py | d4a889965401a77a1e6a8055206be5a82ad17a3c | [] | no_license | citroen8897/Python_2 | 0ee5d46501bb8b42fe2ed686fbffc98843c8f046 | 035a6384e857221eca0f9d88fb3758313998d5f9 | refs/heads/main | 2023-03-02T20:47:42.150668 | 2021-02-14T23:03:48 | 2021-02-14T23:03:48 | 336,619,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | class A:
def __init__(self, name, years):
self.name = name
self.years = years
print('Hello class A!')
def test(self):
print(f'Hello {self.name}\nYou are {self.years} years...')
def test_2(self):
self.name = input('Введите имя: ')
self.years = input('Ваш возраст: ')
self.test()
def test_3(self):
print(self.surname)
t_1 = A('Ivan', 25)
t_1.test()
t_1.test_2()
print(t_1.name)
t_1.name = 'Vasya'
print(t_1.name)
t_1.surname = 'Jackson'
print(t_1.surname)
t_1.test_3()
| [
"citroen8897@gmail.com"
] | citroen8897@gmail.com |
98dead09577ada0f018d2403a2bcaa0dac33096a | c0c3f303ff6407f055bb24b4d13e3a4a3e796988 | /peachpy/encoder.py | 5353c1f42a4182be6dee5a674f49bd91de441ec4 | [
"BSD-2-Clause"
] | permissive | lemire/PeachPy | d610700476c9ff805fa5dd0d3554b21ecbfef012 | 650a4d866bd67d007371effdc7c096788f0acf20 | refs/heads/master | 2023-08-18T23:50:35.026362 | 2015-09-17T05:24:18 | 2015-09-17T05:24:18 | 42,653,254 | 1 | 1 | null | 2015-09-17T11:49:26 | 2015-09-17T11:49:26 | null | UTF-8 | Python | false | false | 7,810 | py | # This file is part of Peach-Py package and is licensed under the Simplified BSD license.
# See license.rst for the full text of the license.
from peachpy.abi import Endianness
class Encoder:
def __init__(self, endianness, bitness=None):
assert endianness in {Endianness.Little, Endianness.Big}
if endianness == Endianness.Little:
self.int16 = Encoder.int16le
self.uint16 = Encoder.uint16le
self.int32 = Encoder.int32le
self.uint32 = Encoder.uint32le
self.int64 = Encoder.int64le
self.uint64 = Encoder.uint64le
else:
self.int16 = Encoder.int16be
self.uint16 = Encoder.uint16be
self.int32 = Encoder.int32be
self.uint32 = Encoder.uint32be
self.int64 = Encoder.int64be
self.uint64 = Encoder.uint64be
self.bitness = bitness
if bitness is not None:
assert bitness in {32, 64}, "Only 32-bit and 64-bit encoders are supported"
if bitness == 32:
self.signed_offset = self.int32
self.unsigned_offset = self.uint32
else:
self.signed_offset = self.int64
self.unsigned_offset = self.uint64
@staticmethod
def int8(n):
"""Converts signed 8-bit integer to bytearray representation"""
assert -128 <= n <= 127, "%u can not be represented as an 8-bit signed integer" % n
return bytearray([n & 0xFF])
@staticmethod
def uint8(n):
"""Converts unsigned 8-bit integer to bytearray representation"""
assert 0 <= n <= 255, "%u can not be represented as an 8-bit unsigned integer" % n
return bytearray([n])
@staticmethod
def int16le(n):
"""Converts signed 16-bit integer to little-endian bytearray representation"""
assert -32768 <= n <= 32767, "%u can not be represented as a 16-bit signed integer" % n
return bytearray([n & 0xFF, (n >> 8) & 0xFF])
@staticmethod
def int16be(n):
"""Converts signed 16-bit integer to big-endian bytearray representation"""
assert -32768 <= n <= 32767, "%u can not be represented as a 16-bit signed integer" % n
return bytearray([n >> 8, (n & 0xFF) & 0xFF])
@staticmethod
def uint16le(n):
"""Converts unsigned 16-bit integer to little-endian bytearray representation"""
assert 0 <= n <= 65535, "%u can not be represented as a 16-bit unsigned integer" % n
return bytearray([n & 0xFF, n >> 8])
@staticmethod
def uint16be(n):
"""Converts unsigned 16-bit integer to big-endian bytearray representation"""
assert 0 <= n <= 65535, "%u can not be represented as a 16-bit unsigned integer" % n
return bytearray([n >> 8, n & 0xFF])
@staticmethod
def int32le(n):
"""Converts signed 32-bit integer to little-endian bytearray representation"""
assert -2147483648 <= n <= 2147483647, "%u can not be represented as a 32-bit signed integer" % n
return bytearray([n & 0xFF, (n >> 8) & 0xFF, (n >> 16) & 0xFF, (n >> 24) & 0xFF])
@staticmethod
def int32be(n):
"""Converts signed 32-bit integer to big-endian bytearray representation"""
assert -2147483648 <= n <= 2147483647, "%u can not be represented as a 32-bit signed integer" % n
return bytearray([(n >> 24) & 0xFF, (n >> 16) & 0xFF, (n >> 8) & 0xFF, n & 0xFF])
@staticmethod
def uint32le(n):
"""Converts unsigned 32-bit integer to little-endian bytearray representation"""
assert 0 <= n <= 4294967295, "%u can not be represented as a 32-bit unsigned integer" % n
return bytearray([n & 0xFF, (n >> 8) & 0xFF, (n >> 16) & 0xFF, n >> 24])
@staticmethod
def uint32be(n):
"""Converts unsigned 32-bit integer to big-endian bytearray representation"""
assert 0 <= n <= 4294967295, "%u can not be represented as a 32-bit unsigned integer" % n
return bytearray([n >> 24, (n >> 16) & 0xFF, (n >> 8) & 0xFF, n & 0xFF])
@staticmethod
def int64le(n):
"""Converts signed 64-bit integer to little-endian bytearray representation"""
assert -9223372036854775808 <= n <= 9223372036854775807, \
"%u can not be represented as a 64-bit signed integer" % n
return bytearray([n & 0xFF, (n >> 8) & 0xFF, (n >> 16) & 0xFF, (n >> 24) & 0xFF,
(n >> 32) & 0xFF, (n >> 40) & 0xFF, (n >> 48) & 0xFF, (n >> 56) & 0xFF])
@staticmethod
def int64be(n):
"""Converts signed 64-bit integer to big-endian bytearray representation"""
assert -9223372036854775808 <= n <= 9223372036854775807, \
"%u can not be represented as a 64-bit signed integer" % n
return bytearray([(n >> 56) & 0xFF, (n >> 48) & 0xFF, (n >> 40) & 0xFF, (n >> 32) & 0xFF,
(n >> 24) & 0xFF, (n >> 16) & 0xFF, (n >> 8) & 0xFF, n & 0xFF])
@staticmethod
def uint64le(n):
"""Converts unsigned 64-bit integer to little-endian bytearray representation"""
assert 0 <= n <= 18446744073709551615, "%u can not be represented as a 64-bit unsigned integer" % n
return bytearray([n & 0xFF, (n >> 8) & 0xFF, (n >> 16) & 0xFF, (n >> 24) & 0xFF,
(n >> 32) & 0xFF, (n >> 40) & 0xFF, (n >> 48) & 0xFF, (n >> 56) & 0xFF])
@staticmethod
def uint64be(n):
"""Converts unsigned 64-bit integer to big-endian bytearray representation"""
assert 0 <= n <= 18446744073709551615, "%u can not be represented as a 64-bit unsigned integer" % n
return bytearray([(n >> 56) & 0xFF, (n >> 48) & 0xFF, (n >> 40) & 0xFF, (n >> 32) & 0xFF,
(n >> 24) & 0xFF, (n >> 16) & 0xFF, (n >> 8) & 0xFF, n & 0xFF])
def int16(self, n):
"""Converts signed 16-bit integer to bytearray representation according to encoder endianness"""
pass
def uint16(self, n):
"""Converts unsigned 16-bit integer to bytearray representation according to encoder endianness"""
pass
def int32(self, n):
"""Converts signed 32-bit integer to bytearray representation according to encoder endianness"""
pass
def uint32(self, n):
"""Converts unsigned 32-bit integer to bytearray representation according to encoder endianness"""
pass
def int64(self, n):
"""Converts signed 64-bit integer to bytearray representation according to encoder endianness"""
pass
def uint64(self, n):
"""Converts unsigned 64-bit integer to bytearray representation according to encoder endianness"""
pass
@staticmethod
def fixed_string(string, size):
"""Converts string to fixed-length bytearray representation"""
assert isinstance(size, (int, long)) and size > 0, "size %u is not a positive integer" % size
if string is None:
return bytearray(size)
import codecs
byte_string = codecs.encode(string, "utf8")
if len(byte_string) > size:
raise ValueError("The length of %s exceeds the target %d" % (string, size))
elif len(byte_string) == size:
return byte_string
else:
return byte_string + bytearray(size - len(byte_string))
def signed_offset(self, n):
"""Converts signed integer offset to bytearray representation according to encoder bitness and endianness"""
raise ValueError("Can not encode signed offset: encoder bitness not specified")
def unsigned_offset(self, n):
"""Converts unsigned integer offset to bytearray representation according to encoder bitness and endianness"""
raise ValueError("Can not encode unsigned offset: encoder bitness not specified")
| [
"maratek@gmail.com"
] | maratek@gmail.com |
57108dea039fca66dfd1781469adfe35ca38ca46 | 72612d94e07649586dda53c94a058a26af5ed3e6 | /amr_maldi_ml/deprecated/mean_intensities.py | e2e3b8e8f74586a0b063ad466cd78fa16eb5bbf1 | [
"BSD-3-Clause"
] | permissive | SanmiAndreSofa/maldi_amr | 91e88d0a23d2cb1e5007f73a8ba04be6828d6b6e | cc084d73a2d14c5936878e609f6d44fad0b524c7 | refs/heads/master | 2023-08-06T10:26:58.989597 | 2021-10-04T09:12:05 | 2021-10-04T09:12:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,091 | py | """Calculate mean intensity values of a given scenario for both classes."""
import argparse
import dotenv
import json
import logging
import pathlib
import os
import sys
import numpy as np
from maldi_learn.driams import DRIAMSDatasetExplorer
from maldi_learn.driams import DRIAMSLabelEncoder
from maldi_learn.driams import load_driams_dataset
from maldi_learn.utilities import stratify_by_species_and_label
from models import load_pipeline
from utilities import generate_output_filename
dotenv.load_dotenv()
DRIAMS_ROOT = os.getenv('DRIAMS_ROOT')
# These parameters should remain fixed for this particular
# experiment. We always train on the same data set, using
# *all* available years.
site = 'DRIAMS-A'
years = ['2015', '2016', '2017', '2018']
if __name__ == '__main__':
# Basic log configuration to ensure that we see where the process
# spends most of its time.
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(message)s'
)
parser = argparse.ArgumentParser()
parser.add_argument(
'INPUT',
type=str,
help='Input file',
nargs='+',
)
name = 'mean_intensities'
parser.add_argument(
'-o', '--output',
default=pathlib.Path(__file__).resolve().parent.parent / 'results'
/ name,
type=str,
help='Output path for storing the results.'
)
parser.add_argument(
'-f', '--force',
action='store_true',
help='If set, overwrites all files. Else, skips existing files.'
)
args = parser.parse_args()
# Create the output directory for storing all results of the
# individual combinations.
os.makedirs(args.output, exist_ok=True)
# Keeps track of the parameters used for all scenarios. This ensures
# that the user does not call this script for incomparable scenarios
# that would lead to inconsistent results.
all_antibiotics = []
all_sites = []
all_years = []
all_seeds = []
all_models = []
all_species = []
all_metadata_versions = []
# Will contain the mean of all intensities over all scenarios of
# this run.
all_mean_intensities = {}
for f in args.INPUT:
pipeline, data = load_pipeline(f)
# Extract the required parameters to build the exact scenario
# used in the input file.
antibiotic = data['antibiotic']
site = data['site']
years = data['years']
model = data['model']
seed = data['seed']
species = data['species']
logging.info(f'Site: {site}')
logging.info(f'Years: {years}')
logging.info(f'Seed: {seed}')
explorer = DRIAMSDatasetExplorer(DRIAMS_ROOT)
metadata_fingerprints = explorer.metadata_fingerprints(site)
driams_dataset = load_driams_dataset(
DRIAMS_ROOT,
site,
years,
species=species,
antibiotics=antibiotic, # Only a single one for this run
encoder=DRIAMSLabelEncoder(),
handle_missing_resistance_measurements='remove_if_all_missing',
spectra_type='binned_6000',
)
logging.info(f'Loaded data set for {species} and {antibiotic}')
# Create feature matrix from the binned spectra. We only need to
# consider the second column of each spectrum for this.
X = np.asarray([spectrum.intensities for spectrum in driams_dataset.X])
logging.info('Finished vectorisation')
# Stratified train--test split
train_index, test_index = stratify_by_species_and_label(
driams_dataset.y,
antibiotic=antibiotic,
random_state=seed,
)
logging.info('Finished stratification')
y = driams_dataset.to_numpy(antibiotic)
X_train, y_train = X[train_index], y[train_index]
mean_intensities = {}
# Pretend that we do not know the labels; there should be only
# two, but this script actually does not care.
for l in np.unique(y_train):
spectra = X_train[y_train == l]
mean_intensities[str(l)] = np.mean(spectra, axis=0)
# We do *not* yet convert the resulting array because it is
# to keep `np.array` around for sums etc.
if str(l) in all_mean_intensities:
all_mean_intensities[str(l)] += mean_intensities[str(l)]
else:
all_mean_intensities[str(l)] = mean_intensities[str(l)]
# Convert to list in order to ensure proper serialisation
# later on. This is not the most elegant thing.
mean_intensities[str(l)] = mean_intensities[str(l)].tolist()
if years not in all_years:
all_years.append(years)
all_antibiotics.append(antibiotic)
all_sites.append(site)
all_seeds.append(seed)
all_species.append(species)
all_models.append(model)
all_metadata_versions.append(metadata_fingerprints)
# Reduce the output and only report the relevant parts. We do
# not need information about the model, for example, because
# no model was involved in the training. It is purely needed
# for the output name generation, though.
output = {
'site': site,
'years': years,
'seed': seed,
'antibiotic': antibiotic,
'species': species,
'model': model,
'metadata_versions': metadata_fingerprints,
'mean_intensities': mean_intensities,
}
output_filename = generate_output_filename(
args.output,
output
)
if not os.path.exists(output_filename) or args.force:
logging.info(f'Saving {os.path.basename(output_filename)}')
with open(output_filename, 'w') as f:
json.dump(output, f, indent=4)
else:
logging.warning(
f'Skipping {output_filename} because it already exists.'
)
if len(args.INPUT) > 1:
mean_intensities = {}
for l in all_mean_intensities:
mean_intensities[l] = all_mean_intensities[l] / len(args.INPUT)
print(len(mean_intensities[l]))
print(sum(mean_intensities[l]))
mean_intensities[l] = mean_intensities[l].ravel().tolist()
sites = list(set(all_sites))
antibiotics = list(set(all_antibiotics))
species = list(set(all_species))
models = list(set(all_models))
# Stop if files from more than one antibiotics-species-model scenario
# were given as input.
if any([len(l) > 1 for l in [all_years,
sites,
antibiotics,
species,
models]]):
logging.warning(
'Cannot include more than one scenario in average '
'intensity calculation.')
sys.exit(0)
output = {
'site': sites[0],
'years': all_years[0],
'seed': all_seeds,
'antibiotic': antibiotics[0],
'species': species[0],
'model': models[0],
'mean_intensities': mean_intensities,
}
output_print = output.copy()
output_print['seed'] = '-'.join([str(seed) for seed in all_seeds])
output_filename = generate_output_filename(
args.output,
output_print,
suffix='mean_intensities',
)
if not os.path.exists(output_filename) or args.force:
logging.info(f'Saving {os.path.basename(output_filename)}')
with open(output_filename, 'w') as f:
json.dump(output, f, indent=4)
else:
logging.warning(
f'Skipping {output_filename} because it already exists.'
)
| [
"bastian.rieck@bsse.ethz.ch"
] | bastian.rieck@bsse.ethz.ch |
6e90f2dd477e7c47bc2b9d1496d59b963db72248 | 97f88c3382903ea93391e67523744e4c8aba5214 | /2018_cfg.py | ce7bddea6f14a0730474802697bbe677b48d661a | [] | no_license | diemort/pps-quick-test | 60194090be5de3ec3ae4e9a164051b1f8628d8ca | 0b599608689bb2bfde13b41581cf269d29f7a685 | refs/heads/master | 2023-07-15T19:41:22.552022 | 2019-12-12T16:19:46 | 2019-12-12T16:19:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('TEST', eras.Run2_2018)
from base import *
SetDefaults(process)
#process.source.fileNames = cms.untracked.vstring("/store/data/Run2018D/ZeroBias/RAW/v1/000/320/688/00000/601A721D-AD95-E811-B21A-FA163E28A50A.root")
process.source.fileNames = cms.untracked.vstring("root://eoscms.cern.ch//eos/cms/store/group/phys_pps/sw_test_input/601A721D-AD95-E811-B21A-FA163E28A50A.root")
process.ctppsProtonReconstructionPlotter.rpId_45_F = 23
process.ctppsProtonReconstructionPlotter.rpId_45_N = 3
process.ctppsProtonReconstructionPlotter.rpId_56_N = 103
process.ctppsProtonReconstructionPlotter.rpId_56_F = 123
process.ctppsProtonReconstructionPlotter.outputFile = "2018_reco_plots.root"
| [
"jan.kaspar@cern.ch"
] | jan.kaspar@cern.ch |
546ebf18503cfe45d623bc097095c2ce40c8e910 | 905da4dc6a829845dba931c18517a4d8b38cc163 | /docs/conf.py | 7743f60c6077222d7cbbde52915a7d7598bbb39f | [
"BSD-2-Clause"
] | permissive | interrogator/drum | 3995cefa9b0e7751b149355a4e19c7a1863549cd | 7a25c574941f9da8b89b0ae162b205f0e3fd5eba | refs/heads/master | 2020-04-29T12:14:24.490049 | 2019-03-20T23:46:35 | 2019-03-20T23:46:35 | 176,129,523 | 0 | 0 | BSD-2-Clause | 2019-03-17T16:34:44 | 2019-03-17T16:34:43 | null | UTF-8 | Python | false | false | 149 | py | from __future__ import unicode_literals
# This file is automatically generated via sphinx-me
from sphinx_me import setup_conf; setup_conf(globals())
| [
"steve@jupo.org"
] | steve@jupo.org |
3f8c40b2684440089f9b5afdd7e2549878803c84 | 575ab9f0027d82a26041f37a443cda16cf010379 | /DeepLearning/ReinforcementLearning/__init__.py | bf5fdbd2a46e8c62f704b5dcdc0cce5d7ddbd326 | [] | no_license | huning2009/MachineLearning | ca665d63c6924d6229bcdea09d0e9fe715d2d1c8 | 016b98eae7e31aab4e2ca5a14e86150f31e97bba | refs/heads/master | 2022-04-08T06:01:05.370260 | 2020-03-12T04:24:16 | 2020-03-12T04:24:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | #-*- coding:utf-8 _*-
"""
@author:charlesXu
@file: __init__.py.py
@desc:
@time: 2018/03/26
""" | [
"charlesxu86@163.com"
] | charlesxu86@163.com |
321503ad2d0b60331c3b699e7713e9775b995570 | 592498a0e22897dcc460c165b4c330b94808b714 | /9000번/9656_돌 게임 2.py | 42c39e9c69e7e35dc98e8bf3285314e7fb09d23e | [] | no_license | atom015/py_boj | abb3850469b39d0004f996e04aa7aa449b71b1d6 | 42b737c7c9d7ec59d8abedf2918e4ab4c86cb01d | refs/heads/master | 2022-12-18T08:14:51.277802 | 2020-09-24T15:44:52 | 2020-09-24T15:44:52 | 179,933,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | n = int(input())
print("SK" if n % 2 == 0 else "CY")
| [
"zeezlelove@gmail.com"
] | zeezlelove@gmail.com |
906fb2da3da63cf9e71cc8a9c157b1c6b497cd0b | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc039/A/4882474.py | c2ea3729c7876f55faa106340633522238bf101b | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 58 | py | A,B,C = map(int,input().split())
print(2*A*B+2*A*C+2*B*C) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
5f873cf2ba2a4f40f7e64fce82de01710715ef70 | f8f2536fa873afa43dafe0217faa9134e57c8a1e | /aliyun-python-sdk-dataworks-public/aliyunsdkdataworks_public/request/v20200518/ListDataServiceApisRequest.py | db47ea5b60789ea1e5ddd70c65b2e677d34e8dad | [
"Apache-2.0"
] | permissive | Sunnywillow/aliyun-openapi-python-sdk | 40b1b17ca39467e9f8405cb2ca08a85b9befd533 | 6855864a1d46f818d73f5870da0efec2b820baf5 | refs/heads/master | 2022-12-04T02:22:27.550198 | 2020-08-20T04:11:34 | 2020-08-20T04:11:34 | 288,944,896 | 1 | 0 | NOASSERTION | 2020-08-20T08:04:01 | 2020-08-20T08:04:01 | null | UTF-8 | Python | false | false | 2,519 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdataworks_public.endpoint import endpoint_data
class ListDataServiceApisRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'dataworks-public', '2020-05-18', 'ListDataServiceApis','dide')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ApiNameKeyword(self):
return self.get_body_params().get('ApiNameKeyword')
def set_ApiNameKeyword(self,ApiNameKeyword):
self.add_body_params('ApiNameKeyword', ApiNameKeyword)
def get_ApiPathKeyword(self):
return self.get_body_params().get('ApiPathKeyword')
def set_ApiPathKeyword(self,ApiPathKeyword):
self.add_body_params('ApiPathKeyword', ApiPathKeyword)
def get_CreatorId(self):
return self.get_body_params().get('CreatorId')
def set_CreatorId(self,CreatorId):
self.add_body_params('CreatorId', CreatorId)
def get_PageNumber(self):
return self.get_body_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_body_params('PageNumber', PageNumber)
def get_PageSize(self):
return self.get_body_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_body_params('PageSize', PageSize)
def get_TenantId(self):
return self.get_body_params().get('TenantId')
def set_TenantId(self,TenantId):
self.add_body_params('TenantId', TenantId)
def get_ProjectId(self):
return self.get_body_params().get('ProjectId')
def set_ProjectId(self,ProjectId):
self.add_body_params('ProjectId', ProjectId) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
2672a1e1230d39d2ec8eb0b54f8fc7cc59b208b4 | 5726f72427fa9f9881c4610749427c33dba84714 | /ginga/misc/plugins/Operations.py | 2ecb4a0b6c5e85c40d2218c28935e527e530d044 | [
"BSD-3-Clause"
] | permissive | saimn/ginga | ff73829b540dfb53c06c38b482e09d877e36887f | 9daf1875b4c1b0fad0a053c5f258bf7d4c0f3455 | refs/heads/master | 2020-12-24T23:10:25.305394 | 2016-08-29T20:34:17 | 2016-08-29T20:34:17 | 12,879,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,681 | py | #
# Operations.py -- Operations management plugin for Ginga viewer
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga import GingaPlugin
from ginga.misc import Bunch
from ginga.gw import Widgets
class Operations(GingaPlugin.GlobalPlugin):
"""
This plugin defines the GUI for managing local plugins, AKA "operations".
By replacing or subclassing this plugin you can customize the way
the reference viewer starts and manages operations.
"""
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(Operations, self).__init__(fv)
prefs = self.fv.get_preferences()
self.settings = prefs.createCategory('plugin_Operations')
self.settings.addDefaults(show_channel_control=True,
use_popup_menu=True,
focuscolor="lightgreen")
self.settings.load(onError='silent')
fv.add_callback('add-channel', self.add_channel_cb)
fv.add_callback('delete-channel', self.delete_channel_cb)
fv.add_callback('channel-change', self.change_channel_cb)
fv.add_callback('add-operation', self.add_operation_cb)
self.operations = list(fv.get_operations())
self.focuscolor = self.settings.get('focuscolor', "lightgreen")
self.use_popup = True
def build_gui(self, container):
hbox = Widgets.HBox()
hbox.set_border_width(0)
hbox.set_spacing(2)
cbox1 = Widgets.ComboBox()
self.w.channel = cbox1
cbox1.set_tooltip("Select a channel")
cbox1.add_callback('activated', self.channel_select_cb)
if self.settings.get('show_channel_control', True):
hbox.add_widget(cbox1, stretch=0)
self.use_popup = self.settings.get('use_popup_menu', True)
if self.use_popup:
opmenu = Widgets.Menu()
btn = Widgets.Button("Operation")
else:
opmenu = Widgets.ComboBox()
opmenu.set_tooltip("Select an operation")
hbox.add_widget(opmenu, stretch=0)
btn = Widgets.Button("Go")
self.w.operation = opmenu
btn.add_callback('activated', self.invoke_op_cb)
btn.set_tooltip("Invoke operation")
self.w.opbtn = btn
hbox.add_widget(btn, stretch=0)
self.w.optray = Widgets.HBox()
self.w.optray.set_border_width(0)
self.w.optray.set_spacing(2)
hbox.add_widget(self.w.optray, stretch=1)
container.add_widget(hbox, stretch=0)
def add_channel_cb(self, viewer, channel):
chname = channel.name
self.w.channel.insert_alpha(chname)
pl_mgr = channel.opmon
pl_mgr.add_callback('activate-plugin', self.activate_plugin_cb)
pl_mgr.add_callback('deactivate-plugin', self.deactivate_plugin_cb)
pl_mgr.add_callback('focus-plugin', self.focus_plugin_cb)
pl_mgr.add_callback('unfocus-plugin', self.unfocus_plugin_cb)
self.logger.debug("added channel %s" % (chname))
def delete_channel_cb(self, viewer, channel):
chname = channel.name
self.w.channel.delete_alpha(chname)
self.logger.debug("deleted channel %s" % (chname))
def start(self):
# get the list of channels and populate our channel control
names = self.fv.get_channelNames()
for name in names:
channel = self.fv.get_channelInfo(name)
self.add_channel_cb(self.fv, channel)
# get the list of local plugins and populate our operation control
operations = self.fv.get_operations()
for opname in operations:
self.add_operation_cb(self.fv, opname)
def add_operation_cb(self, viewer, opname):
opmenu = self.w.operation
if self.use_popup:
item = opmenu.add_name(opname)
item.add_callback('activated',
lambda *args: self.start_operation_cb(opname))
else:
opmenu.insert_alpha(opname)
def start_operation_cb(self, name):
self.logger.debug("invoking operation menu")
idx = self.w.channel.get_index()
chname = str(self.w.channel.get_alpha(idx))
self.fv.error_wrap(self.fv.start_local_plugin, chname, name, None)
def channel_select_cb(self, widget, index):
if index >= 0:
chname = self.fv.get_channelNames()[index]
self.logger.debug("Channel changed, index=%d chname=%s" % (
index, chname))
self.fv.change_channel(chname)
def change_channel_cb(self, viewer, channel):
# Update the channel control
self.w.channel.show_text(channel.name)
def invoke_op_cb(self, btn_w):
self.logger.debug("invoking operation menu")
menu = self.w.operation
if self.use_popup:
menu.popup(btn_w)
else:
idx = menu.get_index()
opname = str(menu.get_alpha(idx))
self.start_operation_cb(opname)
def activate_plugin_cb(self, pl_mgr, bnch):
lname = bnch.pInfo.name.lower()
menu = Widgets.Menu()
item = menu.add_name("Focus")
item.add_callback('activated', lambda *args: pl_mgr.set_focus(lname))
item = menu.add_name("Unfocus")
item.add_callback('activated', lambda *args: pl_mgr.clear_focus(lname))
item = menu.add_name("Stop")
item.add_callback('activated', lambda *args: pl_mgr.deactivate(lname))
lblname = bnch.lblname
lbl = Widgets.Label(lblname, halign='center', style='clickable',
menu=menu)
lbl.set_tooltip("Right click for menu")
self.w.optray.add_widget(lbl, stretch=0)
lbl.add_callback('activated', lambda w: pl_mgr.set_focus(lname))
bnch.setvals(widget=lbl, label=lbl, menu=menu)
def deactivate_plugin_cb(self, pl_mgr, bnch):
if bnch.widget is not None:
self.logger.debug("removing widget from taskbar")
self.w.optray.remove(bnch.widget)
bnch.widget = None
bnch.label = None
def focus_plugin_cb(self, pl_mgr, bnch):
self.logger.debug("highlighting widget")
if bnch.label is not None:
bnch.label.set_color(bg=self.focuscolor)
def unfocus_plugin_cb(self, pl_mgr, bnch):
self.logger.debug("unhighlighting widget")
if bnch.label is not None:
bnch.label.set_color(bg='grey')
def __str__(self):
return 'operations'
#END
| [
"eric@naoj.org"
] | eric@naoj.org |
b62bc2db0a6ee6b5f3cb7e989a85c62fde7672cf | eebafeddcdbb520ab2afcac4e9d7dd75c58318af | /APO/prep_data/featurize_dihedrals.py | affde20d60e2e428fb455bda72382b599b473c49 | [
"MIT",
"CC-BY-4.0"
] | permissive | choderalab/SETD8-materials | 0e91f1c7c0348d4aa100df6bc33b16ab3ab96555 | 60a03632c8667ca91514f41a48cb27a255a47821 | refs/heads/master | 2021-09-21T04:12:22.596465 | 2018-08-20T00:36:45 | 2018-08-20T00:36:45 | 145,294,223 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | import pyemma
import mdtraj as md
import glob
import numpy as np
fnames = glob.glob('data_cut_start_noH_stride10/*/*.h5')
traj = md.load(fnames[0])
top = traj.top
feat = pyemma.coordinates.featurizer(top)
feat.add_backbone_torsions(cossin = True)
feat.add_chi1_torsions(cossin = True)
source = pyemma.coordinates.source(fnames, features = feat)
X = source.get_output()
for i in range(len(X)):
x = X[i]
np.save('data_cut_start_noH_stride10_featurized/dih/%d.npy' % x, x)
np.save('data_cut_start_noH_stride10_featurized/dih_comb/X.npy', X)
| [
"rafwiewiora@gmail.com"
] | rafwiewiora@gmail.com |
cee870482805f37c8d778e6d1300cb40e5facb92 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2587/60693/238740.py | ee8c7a54d56f3a4755d187c9812d54cb532e7ef9 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | pnum=int(input())
points=[]
for i in range(pnum):
co=list(map(int,input().split(',')))
points.append(co)
steps=0
for i in range(pnum-1):
pax,pay=points[i][0],points[i][1]
pbx,pby=points[i+1][0],points[i+1][1]
disx,disy=abs(pbx-pax),abs(pby-pay)
steps+=max(disx,disy)
print(steps) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
e913cf4c9f8ca280ae64674e4b5b530734accc8b | c61c27a778f0d11502acbd76ec69e77745c920ee | /go/apps/rapidsms/vumi_app.py | 85c7b8b0548cce8db5dd3a694e30fb48ea960d8e | [
"BSD-2-Clause"
] | permissive | ChrisNolan1992/vumi-go | baf852b2b8a85aa5f3d43b1362409cddc407d4b1 | be8d358a0a6efc0799c758644b6c8759a22db180 | refs/heads/master | 2020-12-31T03:56:45.262961 | 2014-08-19T13:56:08 | 2014-08-19T13:56:08 | 23,417,739 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,883 | py | # -*- test-case-name: go.apps.rapidsms.tests.test_vumi_app -*-
# -*- coding: utf-8 -*-
"""Vumi Go application worker for RapidSMS."""
from twisted.internet.defer import inlineCallbacks, returnValue
from vumi.application.rapidsms_relay import RapidSMSRelay
from vumi import log
from go.vumitools.app_worker import (
GoApplicationMixin, GoApplicationConfigMixin, GoWorkerConfigData)
class RapidSMSConfig(RapidSMSRelay.CONFIG_CLASS, GoApplicationConfigMixin):
pass
class RapidSMSApplication(GoApplicationMixin, RapidSMSRelay):
CONFIG_CLASS = RapidSMSConfig
worker_name = 'rapidsms_application'
# Basic AUTH uses colon to combine the username and password so don't use
# colon as the separator.
AUTH_SEP = "@"
@inlineCallbacks
def setup_application(self):
yield super(RapidSMSApplication, self).setup_application()
yield self._go_setup_worker()
@inlineCallbacks
def teardown_application(self):
yield super(RapidSMSApplication, self).teardown_application()
yield self._go_teardown_worker()
@classmethod
def vumi_username_for_conversation(cls, conversation):
return cls.AUTH_SEP.join(
[conversation.user_account.key, conversation.key])
def get_config_data_for_conversation(self, conversation):
dynamic_config = conversation.config.get('rapidsms', {}).copy()
dynamic_config["vumi_auth_method"] = "basic"
dynamic_config["vumi_username"] = self.vumi_username_for_conversation(
conversation)
auth_config = conversation.config.get('auth_tokens', {})
api_tokens = auth_config.get("api_tokens", [])
dynamic_config["vumi_password"] = api_tokens[0] if api_tokens else None
dynamic_config["conversation"] = conversation
return GoWorkerConfigData(self.config, dynamic_config)
@inlineCallbacks
def get_ctxt_config(self, ctxt):
username = getattr(ctxt, 'username', None)
if username is None:
raise ValueError("No username provided for retrieving"
" RapidSMS conversation.")
user_account_key, _, conversation_key = username.partition(
self.AUTH_SEP)
if not user_account_key or not conversation_key:
raise ValueError("Invalid username for RapidSMS conversation.")
conv = yield self.get_conversation(user_account_key, conversation_key)
if conv is None:
log.warning("Cannot find conversation '%s' for user '%s'." % (
conversation_key, user_account_key))
raise ValueError("No conversation found for retrieiving"
" RapidSMS configuration.")
config = yield self.get_config_for_conversation(conv)
returnValue(config)
def get_config(self, msg, ctxt=None):
if msg is not None:
return self.get_message_config(msg)
elif ctxt is not None:
return self.get_ctxt_config(ctxt)
else:
raise ValueError("No msg or context provided for"
" retrieving a RapidSMS config.")
def send_rapidsms_nonreply(self, to_addr, content, config, endpoint):
"""Call .send_to() for a message from RapidSMS that is not a reply.
This overrides the base method and adds conversation metadata.
"""
helper_metadata = {}
config.conversation.set_go_helper_metadata(helper_metadata)
return self.send_to(to_addr, content, endpoint=endpoint,
helper_metadata=helper_metadata)
def process_command_start(self, user_account_key, conversation_key):
log.info("Starting RapidSMS conversation (key: %r)." %
(conversation_key,))
return super(RapidSMSApplication, self).process_command_start(
user_account_key, conversation_key)
| [
"hodgestar@gmail.com"
] | hodgestar@gmail.com |
91eac5619a238e229992da106d3038013dd9373e | caf8cbcafd448a301997770165b323438d119f5e | /.history/mercari/mercari_search_20201124184532.py | 60067c2157e28c296d99956f5b4b379b1aeb19a2 | [
"MIT"
] | permissive | KustomApe/nerdape | 03e0691f675f13ce2aefa46ee230111247e90c72 | aef6fb2d1f8c364b26d91bf8570b4487a24de69a | refs/heads/main | 2023-01-23T10:13:26.584386 | 2020-11-28T22:29:49 | 2020-11-28T22:29:49 | 309,897,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,653 | py | from selenium import webdriver
from selenium.webdriver.support.ui import Select
import pandas as pd
import re
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import PyQt5
import time
"""[Initial Settings]
初期設定
"""
options = webdriver.ChromeOptions()
options.add_argument('--headeless')
options.add_argument('--disable-gpu')
options.add_argument('--lang-ja')
browser = webdriver.Chrome(chrome_options=options, executable_path='./chromedriver')
"""[CSS Selector Settings]
CSSセレクターの設定
"""
PAGER = "li.pager-next a"
word = input("検索したいキーワードを入力してください:")
while True:
if PAGER:
res = browser.get("https://www.mercari.com/jp/search/?page="+str(n)+"&keyword="+word)
df_main = pd.DataFrame(columns=['在庫有無','タイトル','値段','URL'])
df_graf = pd.DataFrame(columns=['SOLD','PRICE'])
browser.get(res)
item_boxlist = browser.find_elements_by_css_selector(".items-box")
for item_box in item_boxlist:
try:
if len(item_box.find_elements_by_css_selector(".item-sold-out-badge")) > 0:
sold = "SOLD"
else:
sold = "NOT SOLD"
sub_title = item_box.find_element_by_class_name("items-box-body")
title = sub_title.find_element_by_tag_name("h3").text
item_price = item_box.find_element_by_css_selector(".items-box-price")
price_text = item_price.text
price_text = re.sub(r",", "", price_text).lstrip("¥ ")
price_text_int = int(price_text)
print(price_text_int)
url = item_box.find_element_by_tag_name("a").get_attribute("href")
data = pd.Series( [ sold,title,price_text_int,url ], index=df_main.columns )
grdata = pd.Series( [ sold,price_text_int ], index=df_graf.columns )
df_main = df_main.append( data, ignore_index=True )
df_graf = df_graf.append( grdata, ignore_index=True )
except Exception as e:
print(e)
btn = browser.find_element_by_css_selector(PAGER).get_attribute('href')
n += 1
print('next url:{}'.format(btn))
time.sleep(3)
browser.get(btn)
print('Moving to next page...')
else:
print('No items anymore...')
break
print(df_main)
sns.stripplot(x='SOLD', y='PRICE', data=df_graf)
plt.show()
sns.pairplot(df_graf,hue="SOLD")
plt.show()
print('Writing out to CSV file...')
df_main.to_csv("pricedata.csv", encoding="utf_8_sig")
print("Done") | [
"kustomape@gmail.com"
] | kustomape@gmail.com |
787612f8fc43ee97d1b22bd8b708397309dc26ec | c3a61c9420c941722bad57a8cbcb7a58e3072012 | /sb3.py | 4375afbe9250308a448a01ec8cb8320797ed787d | [] | no_license | vwxyzjn/microrts-sb3 | e18db9427fd7cd3622c1356437c8fdbcbf796e19 | 72f4382f2926e3de61671d943b625391c8cc98f6 | refs/heads/master | 2023-08-22T14:54:27.869007 | 2021-09-20T01:59:28 | 2021-09-20T01:59:28 | 408,281,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,040 | py | import wandb
from sb3_contrib import MaskablePPO
from sb3_contrib.common.wrappers import ActionMasker
from stable_baselines3.common.vec_env import VecVideoRecorder, VecMonitor
from wandb.integration.sb3 import WandbCallback
from gym_microrts import microrts_ai
from gym_microrts.envs.new_vec_env import MicroRTSGridModeVecEnv
import numpy as np
import gym
def mask_fn(env: gym.Env) -> np.ndarray:
# Uncomment to make masking a no-op
# return np.ones_like(env.action_mask)
return env.get_action_mask()
def get_wrapper(env: gym.Env) -> gym.Env:
return ActionMasker(env, mask_fn)
config = {
"total_timesteps": int(100e6),
"num_envs": 8,
"env_name": "BreakoutNoFrameskip-v4",
}
run = wandb.init(
project="sb3",
config=config,
sync_tensorboard=True, # auto-upload sb3's tensorboard metrics
monitor_gym=True, # auto-upload the videos of agents playing the game
save_code=True, # optional
)
num_envs = 24
envs = MicroRTSGridModeVecEnv(
num_selfplay_envs=0,
num_bot_envs=num_envs,
partial_obs=False,
max_steps=2000,
render_theme=2,
ai2s=[microrts_ai.coacAI for _ in range(num_envs - 6)]
+ [microrts_ai.randomBiasedAI for _ in range(min(num_envs, 2))]
+ [microrts_ai.lightRushAI for _ in range(min(num_envs, 2))]
+ [microrts_ai.workerRushAI for _ in range(min(num_envs, 2))],
map_path="maps/16x16/basesWorkers16x16.xml",
reward_weight=np.array([10.0, 1.0, 1.0, 0.2, 1.0, 4.0]),
)
envs = VecMonitor(envs)
envs = VecVideoRecorder(envs, "videos", record_video_trigger=lambda x: x % 100000 == 0, video_length=2000) # record videos
model = MaskablePPO(
"CnnPolicy",
envs,
n_steps=128,
n_epochs=4,
learning_rate=lambda progression: 2.5e-4 * progression,
ent_coef=0.01,
clip_range=0.1,
batch_size=256,
verbose=1,
tensorboard_log=f"runs",
)
model.learn(
total_timesteps=config["total_timesteps"],
callback=WandbCallback(
gradient_save_freq=1000,
model_save_path=f"models/{run.id}",
),
) | [
"costa.huang@outlook.com"
] | costa.huang@outlook.com |
1218380ea622e8b2a378a0b78684ff6c0fa38617 | 66862c422fda8b0de8c4a6f9d24eced028805283 | /slambook2/3rdparty/opencv-3.3.0/modules/ts/misc/summary.py | 96826998507bf5e5af019fbdb1b222cbe317cb6a | [
"MIT",
"BSD-3-Clause"
] | permissive | zhh2005757/slambook2_in_Docker | 57ed4af958b730e6f767cd202717e28144107cdb | f0e71327d196cdad3b3c10d96eacdf95240d528b | refs/heads/main | 2023-09-01T03:26:37.542232 | 2021-10-27T11:45:47 | 2021-10-27T11:45:47 | 416,666,234 | 17 | 6 | MIT | 2021-10-13T09:51:00 | 2021-10-13T09:12:15 | null | UTF-8 | Python | false | false | 13,963 | py | #!/usr/bin/env python
import testlog_parser, sys, os, xml, glob, re
from table_formatter import *
from optparse import OptionParser
numeric_re = re.compile("(\d+)")
cvtype_re = re.compile("(8U|8S|16U|16S|32S|32F|64F)C(\d{1,3})")
cvtypes = { '8U': 0, '8S': 1, '16U': 2, '16S': 3, '32S': 4, '32F': 5, '64F': 6 }
convert = lambda text: int(text) if text.isdigit() else text
keyselector = lambda a: cvtype_re.sub(lambda match: " " + str(cvtypes.get(match.group(1), 7) + (int(match.group(2))-1) * 8) + " ", a)
alphanum_keyselector = lambda key: [ convert(c) for c in numeric_re.split(keyselector(key)) ]
def getSetName(tset, idx, columns, short = True):
if columns and len(columns) > idx:
prefix = columns[idx]
else:
prefix = None
if short and prefix:
return prefix
name = tset[0].replace(".xml","").replace("_", "\n")
if prefix:
return prefix + "\n" + ("-"*int(len(max(prefix.split("\n"), key=len))*1.5)) + "\n" + name
return name
if __name__ == "__main__":
if len(sys.argv) < 2:
print >> sys.stderr, "Usage:\n", os.path.basename(sys.argv[0]), "<log_name1>.xml [<log_name2>.xml ...]"
exit(0)
parser = OptionParser()
parser.add_option("-o", "--output", dest="format", help="output results in text format (can be 'txt', 'html' or 'auto' - default)", metavar="FMT", default="auto")
parser.add_option("-m", "--metric", dest="metric", help="output metric", metavar="NAME", default="gmean")
parser.add_option("-u", "--units", dest="units", help="units for output values (s, ms (default), mks, ns or ticks)", metavar="UNITS", default="ms")
parser.add_option("-f", "--filter", dest="filter", help="regex to filter tests", metavar="REGEX", default=None)
parser.add_option("", "--module", dest="module", default=None, metavar="NAME", help="module prefix for test names")
parser.add_option("", "--columns", dest="columns", default=None, metavar="NAMES", help="comma-separated list of column aliases")
parser.add_option("", "--no-relatives", action="store_false", dest="calc_relatives", default=True, help="do not output relative values")
parser.add_option("", "--with-cycles-reduction", action="store_true", dest="calc_cr", default=False, help="output cycle reduction percentages")
parser.add_option("", "--with-score", action="store_true", dest="calc_score", default=False, help="output automatic classification of speedups")
parser.add_option("", "--progress", action="store_true", dest="progress_mode", default=False, help="enable progress mode")
parser.add_option("", "--regressions", dest="regressions", default=None, metavar="LIST", help="comma-separated custom regressions map: \"[r][c]#current-#reference\" (indexes of columns are 0-based, \"r\" - reverse flag, \"c\" - color flag for base data)")
parser.add_option("", "--show-all", action="store_true", dest="showall", default=False, help="also include empty and \"notrun\" lines")
parser.add_option("", "--match", dest="match", default=None)
parser.add_option("", "--match-replace", dest="match_replace", default="")
parser.add_option("", "--regressions-only", dest="regressionsOnly", default=None, metavar="X-FACTOR", help="show only tests with performance regressions not")
parser.add_option("", "--intersect-logs", dest="intersect_logs", default=False, help="show only tests present in all log files")
(options, args) = parser.parse_args()
options.generateHtml = detectHtmlOutputType(options.format)
if options.metric not in metrix_table:
options.metric = "gmean"
if options.metric.endswith("%") or options.metric.endswith("$"):
options.calc_relatives = False
options.calc_cr = False
if options.columns:
options.columns = [s.strip().replace("\\n", "\n") for s in options.columns.split(",")]
if options.regressions:
assert not options.progress_mode, 'unsupported mode'
def parseRegressionColumn(s):
""" Format: '[r][c]<uint>-<uint>' """
reverse = s.startswith('r')
if reverse:
s = s[1:]
addColor = s.startswith('c')
if addColor:
s = s[1:]
parts = s.split('-', 1)
link = (int(parts[0]), int(parts[1]), reverse, addColor)
assert link[0] != link[1]
return link
options.regressions = [parseRegressionColumn(s) for s in options.regressions.split(',')]
# expand wildcards and filter duplicates
files = []
seen = set()
for arg in args:
if ("*" in arg) or ("?" in arg):
flist = [os.path.abspath(f) for f in glob.glob(arg)]
flist = sorted(flist, key= lambda text: str(text).replace("M", "_"))
files.extend([ x for x in flist if x not in seen and not seen.add(x)])
else:
fname = os.path.abspath(arg)
if fname not in seen and not seen.add(fname):
files.append(fname)
# read all passed files
test_sets = []
for arg in files:
try:
tests = testlog_parser.parseLogFile(arg)
if options.filter:
expr = re.compile(options.filter)
tests = [t for t in tests if expr.search(str(t))]
if options.match:
tests = [t for t in tests if t.get("status") != "notrun"]
if tests:
test_sets.append((os.path.basename(arg), tests))
except IOError as err:
sys.stderr.write("IOError reading \"" + arg + "\" - " + str(err) + os.linesep)
except xml.parsers.expat.ExpatError as err:
sys.stderr.write("ExpatError reading \"" + arg + "\" - " + str(err) + os.linesep)
if not test_sets:
sys.stderr.write("Error: no test data found" + os.linesep)
quit()
setsCount = len(test_sets)
if options.regressions is None:
reference = -1 if options.progress_mode else 0
options.regressions = [(i, reference, False, True) for i in range(1, len(test_sets))]
for link in options.regressions:
(i, ref, reverse, addColor) = link
assert i >= 0 and i < setsCount
assert ref < setsCount
# find matches
test_cases = {}
name_extractor = lambda name: str(name)
if options.match:
reg = re.compile(options.match)
name_extractor = lambda name: reg.sub(options.match_replace, str(name))
for i in range(setsCount):
for case in test_sets[i][1]:
name = name_extractor(case)
if options.module:
name = options.module + "::" + name
if name not in test_cases:
test_cases[name] = [None] * setsCount
test_cases[name][i] = case
# build table
getter = metrix_table[options.metric][1]
getter_score = metrix_table["score"][1] if options.calc_score else None
getter_p = metrix_table[options.metric + "%"][1] if options.calc_relatives else None
getter_cr = metrix_table[options.metric + "$"][1] if options.calc_cr else None
tbl = table(metrix_table[options.metric][0])
# header
tbl.newColumn("name", "Name of Test", align = "left", cssclass = "col_name")
for i in range(setsCount):
tbl.newColumn(str(i), getSetName(test_sets[i], i, options.columns, False), align = "center")
def addHeaderColumns(suffix, description, cssclass):
for link in options.regressions:
(i, ref, reverse, addColor) = link
if reverse:
i, ref = ref, i
current_set = test_sets[i]
current = getSetName(current_set, i, options.columns)
if ref >= 0:
reference_set = test_sets[ref]
reference = getSetName(reference_set, ref, options.columns)
else:
reference = 'previous'
tbl.newColumn(str(i) + '-' + str(ref) + suffix, '%s\nvs\n%s\n(%s)' % (current, reference, description), align='center', cssclass=cssclass)
if options.calc_cr:
addHeaderColumns(suffix='$', description='cycles reduction', cssclass='col_cr')
if options.calc_relatives:
addHeaderColumns(suffix='%', description='x-factor', cssclass='col_rel')
if options.calc_score:
addHeaderColumns(suffix='S', description='score', cssclass='col_name')
# rows
prevGroupName = None
needNewRow = True
lastRow = None
for name in sorted(test_cases.iterkeys(), key=alphanum_keyselector):
cases = test_cases[name]
if needNewRow:
lastRow = tbl.newRow()
if not options.showall:
needNewRow = False
tbl.newCell("name", name)
groupName = next(c for c in cases if c).shortName()
if groupName != prevGroupName:
prop = lastRow.props.get("cssclass", "")
if "firstingroup" not in prop:
lastRow.props["cssclass"] = prop + " firstingroup"
prevGroupName = groupName
for i in range(setsCount):
case = cases[i]
if case is None:
if options.intersect_logs:
needNewRow = False
break
tbl.newCell(str(i), "-")
else:
status = case.get("status")
if status != "run":
tbl.newCell(str(i), status, color="red")
else:
val = getter(case, cases[0], options.units)
if val:
needNewRow = True
tbl.newCell(str(i), formatValue(val, options.metric, options.units), val)
if needNewRow:
for link in options.regressions:
(i, reference, reverse, addColor) = link
if reverse:
i, reference = reference, i
tblCellID = str(i) + '-' + str(reference)
case = cases[i]
if case is None:
if options.calc_relatives:
tbl.newCell(tblCellID + "%", "-")
if options.calc_cr:
tbl.newCell(tblCellID + "$", "-")
if options.calc_score:
tbl.newCell(tblCellID + "$", "-")
else:
status = case.get("status")
if status != "run":
tbl.newCell(str(i), status, color="red")
if status != "notrun":
needNewRow = True
if options.calc_relatives:
tbl.newCell(tblCellID + "%", "-", color="red")
if options.calc_cr:
tbl.newCell(tblCellID + "$", "-", color="red")
if options.calc_score:
tbl.newCell(tblCellID + "S", "-", color="red")
else:
val = getter(case, cases[0], options.units)
def getRegression(fn):
if fn and val:
for j in reversed(range(i)) if reference < 0 else [reference]:
r = cases[j]
if r is not None and r.get("status") == 'run':
return fn(case, r, options.units)
valp = getRegression(getter_p) if options.calc_relatives or options.progress_mode else None
valcr = getRegression(getter_cr) if options.calc_cr else None
val_score = getRegression(getter_score) if options.calc_score else None
if not valp:
color = None
elif valp > 1.05:
color = 'green'
elif valp < 0.95:
color = 'red'
else:
color = None
if addColor:
if not reverse:
tbl.newCell(str(i), formatValue(val, options.metric, options.units), val, color=color)
else:
r = cases[reference]
if r is not None and r.get("status") == 'run':
val = getter(r, cases[0], options.units)
tbl.newCell(str(reference), formatValue(val, options.metric, options.units), val, color=color)
if options.calc_relatives:
tbl.newCell(tblCellID + "%", formatValue(valp, "%"), valp, color=color, bold=color)
if options.calc_cr:
tbl.newCell(tblCellID + "$", formatValue(valcr, "$"), valcr, color=color, bold=color)
if options.calc_score:
tbl.newCell(tblCellID + "S", formatValue(val_score, "S"), val_score, color = color, bold = color)
if not needNewRow:
tbl.trimLastRow()
if options.regressionsOnly:
for r in reversed(range(len(tbl.rows))):
for i in range(1, len(options.regressions) + 1):
val = tbl.rows[r].cells[len(tbl.rows[r].cells) - i].value
if val is not None and val < float(options.regressionsOnly):
break
else:
tbl.rows.pop(r)
# output table
if options.generateHtml:
if options.format == "moinwiki":
tbl.htmlPrintTable(sys.stdout, True)
else:
htmlPrintHeader(sys.stdout, "Summary report for %s tests from %s test logs" % (len(test_cases), setsCount))
tbl.htmlPrintTable(sys.stdout)
htmlPrintFooter(sys.stdout)
else:
tbl.consolePrintTable(sys.stdout)
if options.regressionsOnly:
sys.exit(len(tbl.rows))
| [
"594353397@qq.com"
] | 594353397@qq.com |
a3a8a56f4fea1271db284ab6c7e85df83934a615 | e780a5bd72f98ca2513c993d64a85b08578166a6 | /buildout-cache/eggs/Products.Archetypes-1.15-py2.7.egg/Products/Archetypes/BaseObject.py | 0359330d276e3e34176865053bd92e22a9fc9663 | [] | no_license | vedantc98/Plone-test | 023246597ffe848e2a49b9f65742ff49127b190b | 9fd520fc78481e2c0b9b7ec427821e7f961c777e | refs/heads/master | 2021-03-30T22:14:33.368739 | 2018-03-11T19:22:58 | 2018-03-11T19:22:58 | 124,671,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43,273 | py | import sys
from App.class_init import InitializeClass
from Products.Archetypes import PloneMessageFactory as _
from Products.Archetypes.debug import log_exc
from Products.Archetypes.utils import DisplayList
from Products.Archetypes.utils import mapply
from Products.Archetypes.utils import fixSchema
from Products.Archetypes.utils import shasattr
from Products.Archetypes.Field import StringField
from Products.Archetypes.Field import TextField
from Products.Archetypes.Renderer import renderer
from Products.Archetypes.Schema import Schema
from Products.Archetypes.Schema import getSchemata
from Products.Archetypes.Widget import IdWidget
from Products.Archetypes.Widget import StringWidget
from Products.Archetypes.Marshall import RFC822Marshaller
from Products.Archetypes.interfaces import IBaseObject
from Products.Archetypes.interfaces import IReferenceable
from Products.Archetypes.interfaces import IReferenceField
from Products.Archetypes.interfaces import ISchema
from Products.Archetypes.interfaces.field import IFileField
from Products.Archetypes.validator import AttributeValidator
from Products.Archetypes.config import ATTRIBUTE_SECURITY
from Products.Archetypes.config import RENAME_AFTER_CREATION_ATTEMPTS
from Products.Archetypes.event import ObjectInitializedEvent
from Products.Archetypes.event import ObjectEditedEvent
from Products.Archetypes.interfaces import IMultiPageSchema
from Products.Archetypes.interfaces import IObjectPreValidation
from Products.Archetypes.interfaces import IObjectPostValidation
from AccessControl import ClassSecurityInfo
from AccessControl import Unauthorized
from AccessControl.Permissions import copy_or_move as permission_copy_or_move
from Acquisition import aq_base
from Acquisition import aq_inner
from Acquisition import aq_parent
from Acquisition import ImplicitAcquisitionWrapper
from Acquisition import ExplicitAcquisitionWrapper
from Acquisition import Explicit
from ComputedAttribute import ComputedAttribute
from ZODB.POSException import ConflictError
import transaction
from Products.CMFCore import permissions
from Products.CMFCore.utils import getToolByName
from Referenceable import Referenceable
from ZPublisher import xmlrpc
from webdav.NullResource import NullResource
from zope import event
from zope.interface import implementer, Interface, providedBy
from zope.component import getSiteManager
from zope.component import subscribers
from zope.component import queryUtility
# Import conditionally, so we don't introduce a hard depdendency
try:
from plone.i18n.normalizer.interfaces import IUserPreferredURLNormalizer
from plone.i18n.normalizer.interfaces import IURLNormalizer
URL_NORMALIZER = True
except ImportError:
URL_NORMALIZER = False
try:
from plone.locking.interfaces import ILockable
HAS_LOCKING = True
except ImportError:
HAS_LOCKING = False
_marker = []
content_type = Schema((
StringField(
name='id',
required=0, # Still actually required, but the widget will
# supply the missing value on non-submits
mode='rw',
permission=permission_copy_or_move,
accessor='getId',
mutator='setId',
default=None,
widget=IdWidget(
label=_(u'label_short_name', default=u'Short name'),
description=_(u'help_shortname',
default=u'Should not contain spaces, underscores or mixed case. '
'Short Name is part of the item\'s web address.'),
visible={'view': 'invisible'}
),
),
StringField(
name='title',
required=1,
searchable=1,
default='',
accessor='Title',
widget=StringWidget(
label_msgid='label_title',
visible={'view': 'invisible'},
i18n_domain='plone',
),
),
), marshall=RFC822Marshaller())
@implementer(IBaseObject, IReferenceable)
class BaseObject(Referenceable):
security = ClassSecurityInfo()
# Protect AttributeStorage-based attributes. See the docstring of
# AttributeValidator for the low-down.
if ATTRIBUTE_SECURITY:
attr_security = AttributeValidator()
security.setDefaultAccess(attr_security)
# Delete so it cannot be accessed anymore.
del attr_security
schema = content_type
_signature = None
installMode = ['type', 'actions', 'indexes']
_at_rename_after_creation = False # rename object according to title?
def __init__(self, oid, **kwargs):
self.id = oid
security.declareProtected(permissions.ModifyPortalContent,
'initializeArchetype')
def initializeArchetype(self, **kwargs):
# Called by the generated add* factory in types tool.
try:
self.initializeLayers()
self.markCreationFlag()
self.setDefaults()
if kwargs:
kwargs['_initializing_'] = True
self.edit(**kwargs)
self._signature = self.Schema().signature()
except (ConflictError, KeyboardInterrupt):
raise
except:
log_exc()
security.declarePrivate('manage_afterAdd')
def manage_afterAdd(self, item, container):
__traceback_info__ = (self, item, container)
Referenceable.manage_afterAdd(self, item, container)
self.initializeLayers(item, container)
security.declarePrivate('manage_afterClone')
def manage_afterClone(self, item):
__traceback_info__ = (self, item)
Referenceable.manage_afterClone(self, item)
security.declarePrivate('manage_beforeDelete')
def manage_beforeDelete(self, item, container):
__traceback_info__ = (self, item, container)
self.cleanupLayers(item, container)
Referenceable.manage_beforeDelete(self, item, container)
security.declarePrivate('initializeLayers')
def initializeLayers(self, item=None, container=None):
self.Schema().initializeLayers(self, item, container)
security.declarePrivate('cleanupLayers')
def cleanupLayers(self, item=None, container=None):
self.Schema().cleanupLayers(self, item, container)
security.declareProtected(permissions.View, 'title_or_id')
def title_or_id(self):
"""Returns the title if it is not blank and the id otherwise.
"""
if shasattr(self, 'Title'):
if callable(self.Title):
return self.Title() or self.getId()
return self.getId()
security.declareProtected(permissions.View, 'getId')
def getId(self):
# Gets the object id.
return self.id
security.declareProtected(permissions.ModifyPortalContent, 'setId')
def setId(self, value):
# Sets the object id.
# avoid CopyError in OFS.CopySupport.manage_renameObject(),
# see http://dev.plone.org/ticket/8338
value = value.strip()
if value != self.getId():
parent = aq_parent(aq_inner(self))
if parent is not None:
# See Referenceable, keep refs on what is a move/rename
self._v_cp_refs = 1
# We can't rename if the object is locked
if HAS_LOCKING:
lockable = ILockable(self)
was_locked = False
if lockable.locked():
was_locked = True
lockable.unlock()
parent.manage_renameObject(self.id, value)
if was_locked:
lockable.lock()
else:
parent.manage_renameObject(self.id, value)
self._setId(value)
security.declareProtected(permissions.View, 'Type')
def Type(self):
# Dublin Core element - Object type.
#
# This method is redefined in ExtensibleMetadata but we need this
# at the object level (i.e. with or without metadata) to interact
# with the uid catalog.
if shasattr(self, 'getTypeInfo'):
ti = self.getTypeInfo()
if ti is not None:
return ti.Title()
return self.meta_type
security.declareProtected(permissions.View, 'getField')
def getField(self, key, wrapped=False):
# Returns a field object.
return self.Schema().get(key)
security.declareProtected(permissions.View, 'getWrappedField')
def getWrappedField(self, key):
# Gets a field by id which is explicitly wrapped.
# XXX Maybe we should subclass field from Acquisition.Explicit?
return ExplicitAcquisitionWrapper(self.getField(key), self)
security.declareProtected(permissions.View, 'getDefault')
def getDefault(self, field):
# Return the default value of a field.
field = self.getField(field)
return field.getDefault(self)
security.declareProtected(permissions.View, 'isBinary')
def isBinary(self, key):
# Return wether a field contains binary data.
field = self.getField(key)
if IFileField.providedBy(field):
value = field.getBaseUnit(self)
return value.isBinary()
mimetype = self.getContentType(key)
if mimetype and shasattr(mimetype, 'binary'):
return mimetype.binary
elif mimetype and mimetype.find('text') >= 0:
return 0
return 1
security.declareProtected(permissions.View, 'isTransformable')
def isTransformable(self, name):
# Returns wether a field is transformable.
field = self.getField(name)
return isinstance(field, TextField) or not self.isBinary(name)
security.declareProtected(permissions.View, 'widget')
def widget(self, field_name, mode="view", field=None, **kwargs):
# Returns the rendered widget.
if field is None:
field = self.Schema()[field_name]
widget = field.widget
return renderer.render(field_name, mode, widget, self, field=field,
**kwargs)
security.declareProtected(permissions.View, 'getFilename')
def getFilename(self, key=None):
# Returns the filename from a field.
value = None
if key is None:
field = self.getPrimaryField()
else:
field = self.getField(key) or getattr(self, key, None)
if field and shasattr(field, 'getFilename'):
return field.getFilename(self)
return value
security.declareProtected(permissions.View, 'getContentType')
def getContentType(self, key=None):
# Returns the content type from a field.
value = 'text/plain'
if key is None:
field = self.getPrimaryField()
else:
field = self.getField(key) or getattr(self, key, None)
if field and shasattr(field, 'getContentType'):
return field.getContentType(self)
return value
# Backward compatibility
# Note: ComputedAttribute should never be protected by a security
# declaration! See http://dev.plone.org/archetypes/ticket/712
content_type = ComputedAttribute(getContentType, 1)
# XXX Where's get_content_type comes from??? There's no trace at both
# Zope and CMF. It should be removed ASAP!
security.declareProtected(permissions.View, 'get_content_type')
get_content_type = getContentType
security.declareProtected(permissions.ModifyPortalContent,
'setContentType')
def setContentType(self, value, key=None):
# Sets the content type of a field.
if key is None:
field = self.getPrimaryField()
else:
field = self.getField(key) or getattr(self, key, None)
if field and IFileField.providedBy(field):
field.setContentType(self, value)
security.declareProtected(permissions.ModifyPortalContent, 'setFilename')
def setFilename(self, value, key=None):
# Sets the filename of a field.
if key is None:
field = self.getPrimaryField()
else:
field = self.getField(key) or getattr(self, key, None)
if field and IFileField.providedBy(field):
field.setFilename(self, value)
security.declareProtected(permissions.View, 'getPrimaryField')
def getPrimaryField(self):
# The primary field is some object that responds to
# PUT/manage_FTPget events.
fields = self.Schema().filterFields(primary=1)
if fields:
return fields[0]
return None
security.declareProtected(permissions.View, 'get_portal_metadata')
def get_portal_metadata(self, field):
# Returns the portal_metadata for a field.
pmt = getToolByName(self, 'portal_metadata')
policy = None
try:
schema = getattr(pmt, 'DCMI', None)
spec = schema.getElementSpec(field.accessor)
policy = spec.getPolicy(self.portal_type)
except (ConflictError, KeyboardInterrupt):
raise
except:
log_exc()
return None, False
if not policy:
policy = spec.getPolicy(None)
return DisplayList(map(lambda x: (x, x), policy.allowedVocabulary())), \
policy.enforceVocabulary()
security.declareProtected(permissions.View, 'Vocabulary')
def Vocabulary(self, key):
# Returns the vocabulary for a specified field.
vocab, enforce = None, 0
field = self.getField(key)
if field:
if field.isMetadata:
vocab, enforce = self.get_portal_metadata(field)
if vocab is None:
vocab, enforce = field.Vocabulary(self), \
field.enforceVocabulary
if vocab is None:
vocab = DisplayList()
return vocab, enforce
def __getitem__(self, key):
"""Overloads the object's item access.
"""
# Don't allow key access to hidden attributes
if key.startswith('_'):
raise Unauthorized, key
schema = self.Schema()
keys = schema.keys()
if key not in keys and not key.startswith('_'):
# XXX Fix this in AT 1.4
value = getattr(aq_inner(self).aq_explicit, key, _marker) or \
getattr(aq_parent(aq_inner(self)).aq_explicit, key, _marker)
if value is _marker:
raise KeyError, key
else:
return value
field = schema[key]
accessor = field.getEditAccessor(self)
if not accessor:
accessor = field.getAccessor(self)
# This is the access mode used by external editor. We need the
# handling provided by BaseUnit when its available
kw = {'raw': 1, 'field': field.__name__}
value = mapply(accessor, **kw)
return value
security.declarePrivate('setDefaults')
def setDefaults(self):
# Sets the field values to the default values.
self.Schema().setDefaults(self)
security.declareProtected(permissions.ModifyPortalContent, 'update')
def update(self, **kwargs):
# Changes the values of the field and reindex the object.
initializing = kwargs.get('_initializing_', False)
if initializing:
del kwargs['_initializing_']
self.Schema().updateAll(self, **kwargs)
self._p_changed = 1
if not initializing:
# Avoid double indexing during initialization.
self.reindexObject()
security.declareProtected(permissions.ModifyPortalContent, 'edit')
edit = update
security.declareProtected(permissions.View,
'validate_field')
def validate_field(self, name, value, errors):
# Field's validate hook.
#
# Write a method: validate_foo(new_value) -> "error" or None
# If there is a validate method defined for a given field invoke
# it by name
# name -- the name to register errors under
# value -- the proposed new value
# errors -- dict to record errors in
methodName = "validate_%s" % name
result = None
if shasattr(self, methodName):
method = getattr(self, methodName)
result = method(value)
if result is not None:
errors[name] = result
return result
# Pre/post validate hooks that will need to write errors
# into the errors dict directly using errors[fieldname] = ""
security.declareProtected(permissions.View, 'pre_validate')
def pre_validate(self, REQUEST=None, errors=None):
pass
security.declareProtected(permissions.View, 'post_validate')
def post_validate(self, REQUEST=None, errors=None):
pass
security.declareProtected(permissions.View, 'validate')
def validate(self, REQUEST=None, errors=None, data=None, metadata=None):
# Validates the form data from the request.
if errors is None:
errors = {}
self.pre_validate(REQUEST, errors)
for pre_validator in subscribers((self,), IObjectPreValidation):
pre_errors = pre_validator(REQUEST)
if pre_errors is not None:
for field_name, error_message in pre_errors.items():
if field_name in errors:
errors[field_name] += " %s" % error_message
else:
errors[field_name] = error_message
if errors:
return errors
self.Schema().validate(instance=self, REQUEST=REQUEST,
errors=errors, data=data, metadata=metadata)
self.post_validate(REQUEST, errors)
for post_validator in subscribers((self,), IObjectPostValidation):
post_errors = post_validator(REQUEST)
if post_errors is not None:
for field_name, error_message in post_errors.items():
if field_name in errors:
errors[field_name] += " %s" % error_message
else:
errors[field_name] = error_message
return errors
security.declareProtected(permissions.View, 'SearchableText')
def SearchableText(self):
# All fields marked as 'searchable' are concatenated together
# here for indexing purpose.
data = []
for field in self.Schema().fields():
if not field.searchable:
continue
if IReferenceField.providedBy(field):
# waking instances is cheaper than processing a potentially
# huge vocabulary for getting the title, therefore we handle
# reference fields seperately
objs = field.get(self)
if not isinstance(objs, (list, tuple)):
objs = [objs]
datum = ' '.join([o.Title() for o in objs])
data.append(datum)
else:
method = field.getIndexAccessor(self)
try:
datum = method(mimetype="text/plain")
except TypeError:
# Retry in case typeerror was raised because accessor doesn't
# handle the mimetype argument
try:
datum = method()
except (ConflictError, KeyboardInterrupt):
raise
except:
continue
if datum:
vocab = field.Vocabulary(self)
if isinstance(datum, (list, tuple)):
# Unmangle vocabulary: we index key AND value
vocab_values = map(
lambda value, vocab=vocab: vocab.getValue(value, ''), datum)
datum = list(datum)
datum.extend(vocab_values)
datum = ' '.join(datum)
elif isinstance(datum, basestring):
if isinstance(datum, unicode):
datum = datum.encode('utf-8')
value = vocab.getValue(datum, '')
if isinstance(value, unicode):
value = value.encode('utf-8')
datum = "%s %s" % (datum, value, )
if isinstance(datum, unicode):
datum = datum.encode('utf-8')
data.append(str(datum))
data = ' '.join(data)
return data
security.declareProtected(permissions.View, 'getCharset')
def getCharset(self):
# Returns the site default charset, or utf-8.
return 'utf-8'
security.declareProtected(permissions.View, 'get_size')
def get_size(self):
# Used for FTP and apparently the ZMI now too.
size = 0
for field in self.Schema().fields():
size += field.get_size(self)
return size
security.declarePrivate('_processForm')
def _processForm(self, data=1, metadata=None, REQUEST=None, values=None):
request = REQUEST or self.REQUEST
if values:
form = values
else:
form = request.form
fieldset = form.get('fieldset', None)
schema = self.Schema()
schemata = self.Schemata()
fields = []
if not IMultiPageSchema.providedBy(self):
fields = schema.fields()
elif fieldset is not None:
fields = schemata[fieldset].fields()
else:
if data:
fields += schema.filterFields(isMetadata=0)
if metadata:
fields += schema.filterFields(isMetadata=1)
form_keys = form.keys()
for field in fields:
# Delegate to the widget for processing of the form
# element. This means that if the widget needs _n_
# fields under a naming convention it can handle this
# internally. The calling API is process_form(instance,
# field, form) where instance should rarely be needed,
# field is the field object and form is the dict. of
# kv_pairs from the REQUEST
##
# The product of the widgets processing should be:
# (value, **kwargs) which will be passed to the mutator
# or None which will simply pass
if not field.writeable(self):
# If the field has no 'w' in mode, or the user doesn't
# have the required permission, or the mutator doesn't
# exist just bail out.
continue
try:
# Pass validating=False to inform the widget that we
# aren't in the validation phase, IOW, the returned
# data will be forwarded to the storage
result = field.widget.process_form(self, field, form,
empty_marker=_marker,
validating=False)
except TypeError:
# Support for old-style process_form methods
result = field.widget.process_form(self, field, form,
empty_marker=_marker)
if result is _marker or result is None:
continue
# Set things by calling the mutator
mutator = field.getMutator(self)
__traceback_info__ = (self, field, mutator)
result[1]['field'] = field.__name__
mapply(mutator, result[0], **result[1])
self.reindexObject()
security.declareProtected(permissions.ModifyPortalContent, 'processForm')
def processForm(self, data=1, metadata=0, REQUEST=None, values=None):
# Processes the schema looking for data in the form.
is_new_object = self.checkCreationFlag()
self._processForm(data=data, metadata=metadata,
REQUEST=REQUEST, values=values)
if self._at_rename_after_creation and is_new_object:
self._renameAfterCreation(check_auto_id=True)
self.unmarkCreationFlag()
# Post create/edit hooks
if is_new_object:
event.notify(ObjectInitializedEvent(self))
self.at_post_create_script()
else:
event.notify(ObjectEditedEvent(self))
self.at_post_edit_script()
# This method is only called once after object creation.
security.declarePrivate('at_post_create_script')
def at_post_create_script(self):
pass
# This method is called after every subsequent edit
security.declarePrivate('at_post_edit_script')
def at_post_edit_script(self):
pass
security.declareProtected(permissions.ModifyPortalContent,
'markCreationFlag')
def markCreationFlag(self):
# Sets flag on the instance to indicate that the object hasn't been
# saved properly (unset in content_edit).
#
# This will only be done if a REQUEST is present to ensure that objects
# created programmatically are considered fully created.
req = getattr(self, 'REQUEST', None)
if shasattr(req, 'get'):
if req.get('SCHEMA_UPDATE', None) is not None:
return
meth = req.get('REQUEST_METHOD', None)
# Ensure that we have an HTTP request, if you're creating an
# object with something other than a GET or POST, then we assume
# you are making a complete object.
if meth in ('GET', 'POST', 'PUT', 'MKCOL'):
self._at_creation_flag = True
security.declareProtected(permissions.ModifyPortalContent,
'unmarkCreationFlag')
def unmarkCreationFlag(self):
# Removes the creation flag.
if shasattr(aq_inner(self), '_at_creation_flag'):
self._at_creation_flag = False
security.declareProtected(permissions.ModifyPortalContent,
'checkCreationFlag')
def checkCreationFlag(self):
# Returns True if the object has not been fully saved, False otherwise.
return getattr(aq_base(self), '_at_creation_flag', False)
def generateNewId(self):
# Suggest an id for this object.
# This id is used when automatically renaming an object after creation.
title = self.Title()
# Can't work w/o a title
if not title:
return None
# Don't do anything without the plone.i18n package
if not URL_NORMALIZER:
return None
if not isinstance(title, unicode):
title = unicode(title, 'utf-8')
request = getattr(self, 'REQUEST', None)
if request is not None:
return IUserPreferredURLNormalizer(request).normalize(title)
return queryUtility(IURLNormalizer).normalize(title)
security.declarePrivate('_renameAfterCreation')
def _renameAfterCreation(self, check_auto_id=False):
# Renames an object like its normalized title.
old_id = self.getId()
if check_auto_id and not self._isIDAutoGenerated(old_id):
# No auto generated id
return False
new_id = self.generateNewId()
if new_id is None:
return False
invalid_id = True
check_id = getattr(self, 'check_id', None)
if check_id is not None:
invalid_id = check_id(new_id, required=1)
# If check_id told us no, or if it was not found, make sure we have an
# id unique in the parent folder.
if invalid_id:
unique_id = self._findUniqueId(new_id)
if unique_id is not None:
if check_id is None or check_id(new_id, required=1):
new_id = unique_id
invalid_id = False
if not invalid_id:
# Can't rename without a subtransaction commit when using
# portal_factory!
transaction.savepoint(optimistic=True)
self.setId(new_id)
return new_id
return False
security.declarePrivate('_findUniqueId')
def _findUniqueId(self, id):
# Find a unique id in the parent folder, based on the given id, by
# appending -n, where n is a number between 1 and the constant
# RENAME_AFTER_CREATION_ATTEMPTS, set in config.py. If no id can be
# found, return None.
check_id = getattr(self, 'check_id', None)
if check_id is None:
parent = aq_parent(aq_inner(self))
parent_ids = parent.objectIds()
check_id = lambda id, required: id in parent_ids
invalid_id = check_id(id, required=1)
if not invalid_id:
return id
idx = 1
while idx <= RENAME_AFTER_CREATION_ATTEMPTS:
new_id = "%s-%d" % (id, idx)
if not check_id(new_id, required=1):
return new_id
idx += 1
return None
security.declarePrivate('_isIDAutoGenerated')
def _isIDAutoGenerated(self, id):
# Avoid busting setDefaults if we don't have a proper acquisition
# context.
plone_tool = getToolByName(self, 'plone_utils', None)
if plone_tool is not None and \
shasattr(plone_tool, 'isIDAutoGenerated'):
return plone_tool.isIDAutoGenerated(id)
return False
security.declareProtected(permissions.View, 'Schemata')
def Schemata(self):
# Returns the Schemata for the Object.
return getSchemata(self)
def Schema(self):
# Return a (wrapped) schema instance for this object instance.
return ImplicitAcquisitionWrapper(ISchema(self), self)
security.declarePrivate('_isSchemaCurrent')
def _isSchemaCurrent(self):
# Determines whether the current object's schema is up to date.
return self._signature == self.Schema().signature()
security.declarePrivate('_updateSchema')
def _updateSchema(self, excluded_fields=None, out=None,
remove_instance_schemas=False):
"""Updates an object's schema when the class schema changes.
For each field we use the existing accessor to get its value,
then we re-initialize the class, then use the new schema
mutator for each field to set the values again.
We also copy over any class methods to handle product
refreshes gracefully (when a product refreshes, you end up
with both the old version of the class and the new in memory
at the same time -- you really should restart zope after doing
a schema update).
"""
if excluded_fields is None:
excluded_fields = []
if out is not None:
print >> out, 'Updating %s' % (self.getId())
if remove_instance_schemas and 'schema' in self.__dict__:
if out is not None:
print >> out, 'Removing schema from instance dict.'
del self.schema
new_schema = self.Schema()
# Read all the old values into a dict
values = {}
mimes = {}
for f in new_schema.fields():
name = f.getName()
if name in excluded_fields:
continue
if f.type == "reference":
continue
try:
values[name] = self._migrateGetValue(name, new_schema)
except ValueError:
if out is not None:
print >> out, ('Unable to get %s.%s'
% (str(self.getId()), name))
else:
if shasattr(f, 'getContentType'):
mimes[name] = f.getContentType(self)
obj_class = self.__class__
current_class = getattr(sys.modules[self.__module__],
self.__class__.__name__)
if obj_class.schema != current_class.schema:
# XXX This is kind of brutish. We do this to make sure that old
# class instances have the proper methods after a refresh. The
# best thing to do is to restart Zope after doing an update, and
# the old versions of the class will disappear.
for k in current_class.__dict__.keys():
obj_class.__dict__[k] = current_class.__dict__[k]
# Set a request variable to avoid resetting the newly created flag
req = getattr(self, 'REQUEST', None)
if req is not None:
req.set('SCHEMA_UPDATE', '1')
self.initializeArchetype()
for f in new_schema.fields():
name = f.getName()
kw = {}
if name not in excluded_fields and name in values:
if name in mimes:
kw['mimetype'] = mimes[name]
try:
self._migrateSetValue(name, values[name], **kw)
except ValueError:
if out is not None:
print >> out, ('Unable to set %s.%s to '
'%s' % (str(self.getId()),
name, str(values[name])))
# Make sure the changes are persisted
self._p_changed = 1
if out is not None:
return out
security.declarePrivate('_migrateGetValue')
def _migrateGetValue(self, name, new_schema=None):
# Try to get a value from an object using a variety of methods.
schema = self.Schema()
# Migrate pre-AT 1.3 schemas.
schema = fixSchema(schema)
# First see if the new field name is managed by the current schema
field = schema.get(getattr(new_schema.get(
name, None), 'old_field_name', name), None)
if field:
# At very first try to use the BaseUnit itself
try:
if IFileField.providedBy(field):
return field.getBaseUnit(self)
except (ConflictError, KeyboardInterrupt):
raise
except:
pass
# First try the edit accessor
try:
editAccessor = field.getEditAccessor(self)
if editAccessor:
return editAccessor()
except (ConflictError, KeyboardInterrupt):
raise
except:
pass
# No luck -- now try the accessor
try:
accessor = field.getAccessor(self)
if accessor:
return accessor()
except (ConflictError, KeyboardInterrupt):
raise
except:
pass
# No luck use standard method to get the value
return field.get(self)
# Still no luck -- try to get the value directly
# this part should be remove because for some fields this will fail
# if you get the value directly for example for FixPointField
# stored value is (0,0) but the input value is a string.
# at this time FixPointField fails if he got a tuple as input value
# Because of this line value = value.replace(',','.')
try:
return self[field.getName()]
except (ConflictError, KeyboardInterrupt):
raise
except:
pass
# Nope -- see if the new accessor method is present
# in the current object.
if new_schema:
new_field = new_schema.get(name)
# Try the new edit accessor
try:
editAccessor = new_field.getEditAccessor(self)
if editAccessor:
return editAccessor()
except (ConflictError, KeyboardInterrupt):
raise
except:
pass
# Nope -- now try the accessor
try:
accessor = new_field.getAccessor(self)
if accessor:
return accessor()
except (ConflictError, KeyboardInterrupt):
raise
except:
pass
# Still no luck -- try to get the value directly using the new name
try:
return self[new_field.getName()]
except (ConflictError, KeyboardInterrupt):
raise
except:
pass
# Nope -- now see if the current object has an attribute
# with the same name
# as the new field
if shasattr(self, name):
return getattr(self, name)
raise ValueError, 'name = %s' % (name)
security.declarePrivate('_migrateSetValue')
def _migrateSetValue(self, name, value, old_schema=None, **kw):
# Try to set an object value using a variety of methods.
schema = self.Schema()
# Migrate pre-AT 1.3 schemas.
schema = fixSchema(schema)
field = schema.get(name, None)
# Try using the field's mutator
if field:
mutator = field.getMutator(self)
if mutator is not None:
try:
args = [value]
mapply(mutator, *args, **kw)
return
except (ConflictError, KeyboardInterrupt):
raise
except:
log_exc()
else:
# Try setting an existing attribute
if shasattr(self, name):
setattr(self, name, value)
return
raise ValueError, 'name = %s, value = %s' % (name, value)
security.declareProtected(permissions.View, 'isTemporary')
def isTemporary(self):
# Checks to see if we are created as temporary object by
# portal factory.
parent = aq_parent(aq_inner(self))
return shasattr(parent, 'meta_type') and \
parent.meta_type == 'TempFolder'
security.declareProtected(permissions.View, 'getFolderWhenPortalFactory')
def getFolderWhenPortalFactory(self):
# Returns the folder where this object was created temporarily.
ctx = aq_inner(self)
if not ctx.isTemporary():
# Not a temporary object!
return aq_parent(ctx)
utool = getToolByName(self, 'portal_url')
portal_object = utool.getPortalObject()
while ctx.getId() != 'portal_factory':
# Find the portal factory object
if ctx == portal_object:
# uups, shouldn't happen!
return ctx
ctx = aq_parent(ctx)
# ctx is now the portal_factory in our parent folder
return aq_parent(ctx)
#
# Subobject Access
#
# Some temporary objects could be set by fields (for instance
# additional images that may result from the transformation of
# a PDF field to html).
#
# Those objects are specific to a session.
#
security.declareProtected(permissions.ModifyPortalContent,
'addSubObjects')
def addSubObjects(self, objects, REQUEST=None):
# Adds a dictionary of objects to a volatile attribute.
if objects:
storage = getattr(aq_base(self), '_v_at_subobjects', None)
if storage is None:
setattr(self, '_v_at_subobjects', {})
storage = getattr(aq_base(self), '_v_at_subobjects')
for name, obj in objects.items():
storage[name] = aq_base(obj)
security.declareProtected(permissions.View, 'getSubObject')
def getSubObject(self, name, REQUEST, RESPONSE=None):
# Gets a dictionary of objects from a volatile attribute.
storage = getattr(aq_base(self), '_v_at_subobjects', None)
if storage is None:
return None
data = storage.get(name, None)
if data is None:
return None
mtr = self.mimetypes_registry
mt = mtr.classify(data, filename=name)
return Wrapper(data, name, str(mt) or 'application/octet-stream').__of__(self)
def __bobo_traverse__(self, REQUEST, name):
# Allows transparent access to session subobjects.
#
# Sometimes, the request doesn't have a response, e.g. when
# PageTemplates traverse through the object path, they pass in
# a phony request (a dict).
RESPONSE = getattr(REQUEST, 'RESPONSE', None)
# Is it a registered sub object
data = self.getSubObject(name, REQUEST, RESPONSE)
if data is not None:
return data
# Or a standard attribute (maybe acquired...)
target = None
method = REQUEST.get('REQUEST_METHOD', 'GET').upper()
# Logic from "ZPublisher.BaseRequest.BaseRequest.traverse"
# to check whether this is a browser request
if (len(REQUEST.get('TraversalRequestNameStack', ())) == 0 and
not (method in ('GET', 'HEAD', 'POST') and not
isinstance(RESPONSE, xmlrpc.Response))):
if shasattr(self, name):
target = getattr(self, name)
else:
if shasattr(self, name): # attributes of self come first
target = getattr(self, name)
else: # then views
gsm = getSiteManager()
factory = gsm.adapters.lookup(
(providedBy(self), providedBy(REQUEST)), Interface, name
)
if factory is not None:
# We don't return the view, we raise an
# AttributeError instead (below)
target = None
else: # then acquired attributes
target = getattr(self, name, None)
if target is not None:
return target
elif (method not in ('GET', 'POST') and not
isinstance(RESPONSE, xmlrpc.Response) and
REQUEST.maybe_webdav_client):
return NullResource(self, name, REQUEST).__of__(self)
else:
# Raising AttributeError will look up views for us
raise AttributeError(name)
InitializeClass(BaseObject)
class Wrapper(Explicit):
"""Wrapper object for access to sub objects."""
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self, data, filename, mimetype):
self._data = data
self._filename = filename
self._mimetype = mimetype
def __call__(self, REQUEST=None, RESPONSE=None):
if RESPONSE is None:
RESPONSE = REQUEST.RESPONSE
if RESPONSE is not None:
mt = self._mimetype
name = self._filename
RESPONSE.setHeader('Content-type', str(mt))
RESPONSE.setHeader('Content-Disposition',
'inline;filename=%s' % name)
RESPONSE.setHeader('Content-Length', len(self._data))
return self._data
MinimalSchema = BaseObject.schema
__all__ = ('BaseObject', 'MinimalSchema')
| [
"vedantc98@gmail.com"
] | vedantc98@gmail.com |
bcd7cdf5f1d890ad89e2d3e52a30b2bfaf74c894 | c6d4fa98b739a64bb55a8750b4aecd0fc0b105fd | /ScanPi/QRbytes2/170.py | d2146bccd06e7a996200e1bdc39ff5a14f567951 | [] | no_license | NUSTEM-UK/Heart-of-Maker-Faire | de2c2f223c76f54a8b4c460530e56a5c74b65ca3 | fa5a1661c63dac3ae982ed080d80d8da0480ed4e | refs/heads/master | 2021-06-18T13:14:38.204811 | 2017-07-18T13:47:49 | 2017-07-18T13:47:49 | 73,701,984 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,746 | py | data = [
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0xFF, 0xF0, 0x00,
0xFC, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0xFF,
0xF0, 0x00, 0xFC, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xC0, 0xFF, 0xF0, 0x00, 0xFC, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xC0, 0xFF, 0xF0, 0x00, 0xFC, 0x0F, 0xFF, 0xFF,
0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0xFF, 0xF0, 0x00, 0xFC, 0x0F,
0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0xFF, 0xF0, 0x00,
0xFC, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xC0, 0xFC,
0x00, 0x3F, 0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F,
0xC0, 0xFC, 0x00, 0x3F, 0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00,
0x00, 0x0F, 0xC0, 0xFC, 0x00, 0x3F, 0x00, 0x0F, 0xC0, 0x00,
0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x00, 0x00, 0x00, 0x0F, 0xC0, 0xFC, 0x00, 0x3F, 0x00, 0x0F,
0xC0, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xC0, 0xFC, 0x00, 0x3F,
0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xC0, 0xFC,
0x00, 0x3F, 0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F,
0xC0, 0xFC, 0x00, 0x3F, 0xFC, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF,
0xFC, 0x0F, 0xC0, 0xFC, 0x00, 0x3F, 0xFC, 0x0F, 0xC0, 0xFF,
0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC, 0x00, 0x3F, 0xFC, 0x0F,
0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC, 0x00, 0x3F,
0xFC, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC,
0x00, 0x3F, 0xFC, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F,
0xC0, 0xFC, 0x00, 0x3F, 0xFC, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF,
0xFC, 0x0F, 0xC0, 0x00, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFF,
0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0x00, 0x0F, 0xFF, 0xFC, 0x0F,
0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0x00, 0x0F, 0xFF,
0xFC, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0x00,
0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F,
0xC0, 0x00, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF,
0xFC, 0x0F, 0xC0, 0x00, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFF,
0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0x03, 0xFF, 0xC0, 0xFC, 0x0F,
0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0x03, 0xFF, 0xC0,
0xFC, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0x03,
0xFF, 0xC0, 0xFC, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F,
0xC0, 0x03, 0xFF, 0xC0, 0xFC, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF,
0xFC, 0x0F, 0xC0, 0x03, 0xFF, 0xC0, 0xFC, 0x0F, 0xC0, 0xFF,
0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0x03, 0xFF, 0xC0, 0xFC, 0x0F,
0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xC0, 0xFC, 0x00, 0x00,
0xFC, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xC0, 0xFC,
0x00, 0x00, 0xFC, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F,
0xC0, 0xFC, 0x00, 0x00, 0xFC, 0x0F, 0xC0, 0x00, 0x00, 0x00,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00,
0x00, 0x0F, 0xC0, 0xFC, 0x00, 0x00, 0xFC, 0x0F, 0xC0, 0x00,
0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x00, 0x00, 0x00, 0x0F, 0xC0, 0xFC, 0x00, 0x00, 0xFC, 0x0F,
0xC0, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xC0, 0xFC, 0x00, 0x00,
0xFC, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0xFC,
0x0F, 0xC0, 0xFC, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xC0, 0xFC, 0x0F, 0xC0, 0xFC, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xC0, 0xFC, 0x0F, 0xC0, 0xFC, 0x0F, 0xFF, 0xFF,
0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0xFC, 0x0F, 0xC0, 0xFC, 0x0F,
0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0xFC, 0x0F, 0xC0,
0xFC, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0xFC,
0x0F, 0xC0, 0xFC, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xC0, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xC0,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF,
0xFC, 0x0F, 0xC0, 0xFC, 0x0F, 0xFF, 0x03, 0xFF, 0xFF, 0x00,
0x0F, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC, 0x0F, 0xFF, 0x03, 0xFF,
0xFF, 0x00, 0x0F, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC, 0x0F, 0xFF,
0x03, 0xFF, 0xFF, 0x00, 0x0F, 0xFF, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC,
0x0F, 0xFF, 0x03, 0xFF, 0xFF, 0x00, 0x0F, 0xFF, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFC, 0x0F,
0xC0, 0xFC, 0x0F, 0xFF, 0x03, 0xFF, 0xFF, 0x00, 0x0F, 0xFF,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF,
0xFC, 0x0F, 0xC0, 0xFC, 0x0F, 0xFF, 0x03, 0xFF, 0xFF, 0x00,
0x0F, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,
0xFF, 0xC0, 0xFC, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0x00, 0x0F,
0xFF, 0x00, 0x0F, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x03, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0xFC, 0x0F, 0xFF,
0x00, 0x0F, 0xFF, 0x00, 0x0F, 0xC0, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x03, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0xFC,
0x0F, 0xFF, 0x00, 0x0F, 0xFF, 0x00, 0x0F, 0xC0, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xC0, 0xFC, 0x00,
0x00, 0xFC, 0x0F, 0xFF, 0x00, 0x0F, 0xFF, 0x00, 0x0F, 0xC0,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xC0,
0xFC, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0x00, 0x0F, 0xFF, 0x00,
0x0F, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,
0xFF, 0xC0, 0xFC, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0x00, 0x0F,
0xFF, 0x00, 0x0F, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x03, 0xF0, 0x00, 0x00, 0x0F, 0xFF, 0xFC, 0x0F, 0xFF,
0x03, 0xF0, 0x00, 0xFC, 0x0F, 0xC0, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x03, 0xF0, 0x00, 0x00, 0x0F, 0xFF, 0xFC,
0x0F, 0xFF, 0x03, 0xF0, 0x00, 0xFC, 0x0F, 0xC0, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xF0, 0x00, 0x00, 0x0F,
0xFF, 0xFC, 0x0F, 0xFF, 0x03, 0xF0, 0x00, 0xFC, 0x0F, 0xC0,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xF0, 0x00,
0x00, 0x0F, 0xFF, 0xFC, 0x0F, 0xFF, 0x03, 0xF0, 0x00, 0xFC,
0x0F, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,
0xF0, 0x00, 0x00, 0x0F, 0xFF, 0xFC, 0x0F, 0xFF, 0x03, 0xF0,
0x00, 0xFC, 0x0F, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x03, 0xF0, 0x00, 0x00, 0x0F, 0xFF, 0xFC, 0x0F, 0xFF,
0x03, 0xF0, 0x00, 0xFC, 0x0F, 0xC0, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFF, 0xF0, 0x3F, 0xFF, 0xF0, 0x3F, 0x00,
0x00, 0x3F, 0x00, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xF0, 0x3F, 0xFF, 0xF0,
0x3F, 0x00, 0x00, 0x3F, 0x00, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xF0, 0x3F,
0xFF, 0xF0, 0x3F, 0x00, 0x00, 0x3F, 0x00, 0x0F, 0xFF, 0xFC,
0x0F, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
0xF0, 0x3F, 0xFF, 0xF0, 0x3F, 0x00, 0x00, 0x3F, 0x00, 0x0F,
0xFF, 0xFC, 0x0F, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFF, 0xF0, 0x3F, 0xFF, 0xF0, 0x3F, 0x00, 0x00, 0x3F,
0x00, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFF, 0xF0, 0x3F, 0xFF, 0xF0, 0x3F, 0x00,
0x00, 0x3F, 0x00, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x03, 0xFF,
0xFF, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xC0, 0xFC, 0x0F, 0xC0,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF,
0x03, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xC0, 0xFC,
0x0F, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
0xFF, 0xFF, 0x03, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0xFF,
0xC0, 0xFC, 0x0F, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFF, 0xFF, 0xFF, 0x03, 0xFF, 0xFF, 0x00, 0x00, 0x00,
0x03, 0xFF, 0xC0, 0xFC, 0x0F, 0xC0, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x03, 0xFF, 0xFF, 0x00,
0x00, 0x00, 0x03, 0xFF, 0xC0, 0xFC, 0x0F, 0xC0, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x03, 0xFF,
0xFF, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xC0, 0xFC, 0x0F, 0xC0,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFF, 0xFF, 0xC0, 0x03, 0xF0, 0x00, 0x03,
0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xC0, 0x03, 0xF0,
0x00, 0x03, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xC0,
0x03, 0xF0, 0x00, 0x03, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
0xFF, 0xC0, 0x03, 0xF0, 0x00, 0x03, 0xF0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFF, 0xFF, 0xC0, 0x03, 0xF0, 0x00, 0x03, 0xF0, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFF, 0xFF, 0xC0, 0x03, 0xF0, 0x00, 0x03,
0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x03, 0xFF, 0xC0, 0xFF, 0xFF,
0xFF, 0x00, 0x00, 0x3F, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x03, 0xFF, 0xC0,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x3F, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x03,
0xFF, 0xC0, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x3F, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xC0, 0x03, 0xFF, 0xC0, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x3F,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xC0, 0x03, 0xFF, 0xC0, 0xFF, 0xFF, 0xFF, 0x00,
0x00, 0x3F, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x03, 0xFF, 0xC0, 0xFF, 0xFF,
0xFF, 0x00, 0x00, 0x3F, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xC0, 0x03, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x03, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xC0, 0x03,
0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0xF0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F,
0xC0, 0x03, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0xF0, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00,
0x00, 0x0F, 0xC0, 0x03, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03,
0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x00, 0x00, 0x00, 0x0F, 0xC0, 0x03, 0xFF, 0xFF, 0x00, 0x00,
0x00, 0x03, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xC0, 0x03, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x03, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC,
0x0F, 0xFF, 0x03, 0xF0, 0x3F, 0x00, 0x0F, 0xC0, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F,
0xC0, 0xFC, 0x0F, 0xFF, 0x03, 0xF0, 0x3F, 0x00, 0x0F, 0xC0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF,
0xFC, 0x0F, 0xC0, 0xFC, 0x0F, 0xFF, 0x03, 0xF0, 0x3F, 0x00,
0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC, 0x0F, 0xFF, 0x03, 0xF0,
0x3F, 0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC, 0x0F, 0xFF,
0x03, 0xF0, 0x3F, 0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC,
0x0F, 0xFF, 0x03, 0xF0, 0x3F, 0x00, 0x0F, 0xC0, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F,
0xC0, 0xFF, 0xF0, 0x00, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0xC0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF,
0xFC, 0x0F, 0xC0, 0xFF, 0xF0, 0x00, 0xFF, 0xFF, 0xFF, 0x00,
0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFF, 0xF0, 0x00, 0xFF, 0xFF,
0xFF, 0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFF, 0xF0, 0x00,
0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFF,
0xF0, 0x00, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0xC0, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F,
0xC0, 0xFF, 0xF0, 0x00, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0xC0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF,
0xFC, 0x0F, 0xC0, 0xFC, 0x0F, 0xC0, 0x00, 0x0F, 0xC0, 0xFF,
0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC, 0x0F, 0xC0, 0x00, 0x0F,
0xC0, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC, 0x0F, 0xC0,
0x00, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC,
0x0F, 0xC0, 0x00, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F,
0xC0, 0xFC, 0x0F, 0xC0, 0x00, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF,
0xFC, 0x0F, 0xC0, 0xFC, 0x0F, 0xC0, 0x00, 0x0F, 0xC0, 0xFF,
0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x00, 0x00, 0x00, 0x0F, 0xC0, 0x03, 0xFF, 0xC0, 0xFC, 0x00,
0x3F, 0xFC, 0x0F, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xC0, 0x03, 0xFF, 0xC0,
0xFC, 0x00, 0x3F, 0xFC, 0x0F, 0xFF, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xC0, 0x03,
0xFF, 0xC0, 0xFC, 0x00, 0x3F, 0xFC, 0x0F, 0xFF, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F,
0xC0, 0x03, 0xFF, 0xC0, 0xFC, 0x00, 0x3F, 0xFC, 0x0F, 0xFF,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00,
0x00, 0x0F, 0xC0, 0x03, 0xFF, 0xC0, 0xFC, 0x00, 0x3F, 0xFC,
0x0F, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x00, 0x00, 0x00, 0x0F, 0xC0, 0x03, 0xFF, 0xC0, 0xFC, 0x00,
0x3F, 0xFC, 0x0F, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x03, 0xF0, 0x3F,
0x00, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x03,
0xF0, 0x3F, 0x00, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xC0, 0x03, 0xF0, 0x3F, 0x00, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xC0, 0x03, 0xF0, 0x3F, 0x00, 0x0F, 0xC0, 0xFF,
0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x03, 0xF0, 0x3F, 0x00, 0x0F,
0xC0, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x03, 0xF0, 0x3F,
0x00, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
]
| [
"lists@quernstone.com"
] | lists@quernstone.com |
1a0155105047bf78d9a8c8e733b0206d8aa10225 | 7813f41039c4fc96c56849792d4a411688696fd9 | /12.py | 9b2f1b5f7c2df2c6fc7d7f250b907cba32047d6a | [] | no_license | vkurup/project-euler | 497e00d4b7e22cfc27feb06837f87fa7ba0d3158 | fb357b9c4a8ba681fa1b638d4e82223502be5687 | refs/heads/master | 2021-01-02T22:58:49.737821 | 2012-04-29T10:31:20 | 2012-04-29T10:31:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | #!/usr/bin/env python2
# triangle numbers: 1, 3, 6, 10, 15, 21, 28, 36, 45
# What is the value of the first triangle number to have over 500
# divisors?
import math
def divisors(n):
"Return all factors of n"
divisors = []
for i in range(1, int(math.sqrt(n))+1):
if n%i == 0:
divisors.append(i)
divisors.append(n / i)
return sorted(set(divisors))
def odd(n):
return n%2 != 0
def first_triangle_with_n_divisors(n):
"Return first triangle number with greater than n divisors"
length = 0
i = 1
next_triangle = 1
while length <= n:
i += 1
next_triangle += i
if odd(next_triangle): continue
length = len(divisors(next_triangle))
return next_triangle
print "answer = ", first_triangle_with_n_divisors(500)
| [
"vinod@kurup.com"
] | vinod@kurup.com |
1be71155adf6f95d31377bd9bbad76fdcef9006f | d93159d0784fc489a5066d3ee592e6c9563b228b | /CondCore/RunInfoPlugins/test/inspect.py | 7bcf713503af8db60a91ef3b1e1c9d820f80d829 | [] | permissive | simonecid/cmssw | 86396e31d41a003a179690f8c322e82e250e33b2 | 2559fdc9545b2c7e337f5113b231025106dd22ab | refs/heads/CAallInOne_81X | 2021-08-15T23:25:02.901905 | 2016-09-13T08:10:20 | 2016-09-13T08:53:42 | 176,462,898 | 0 | 1 | Apache-2.0 | 2019-03-19T08:30:28 | 2019-03-19T08:30:24 | null | UTF-8 | Python | false | false | 752 | py | import os,sys, DLFCN
sys.setdlopenflags(DLFCN.RTLD_GLOBAL+DLFCN.RTLD_LAZY)
from pluginCondDBPyInterface import *
a = FWIncantation()
os.putenv("CORAL_AUTH_PATH","/afs/cern.ch/cms/DB/conddb")
rdbms = RDBMS()
dbName = "oracle://cms_orcoff_prod/CMS_COND_21X_RUN_INFO"
logName = "oracle://cms_orcoff_prod/CMS_COND_21X_POPCONLOG"
rdbms.setLogger(logName)
from CondCore.Utilities import iovInspector as inspect
db = rdbms.getDB(dbName)
tags = db.allTags()
tag = 'l1triggerscaler_test_v2'
try :
log = db.lastLogEntry(tag)
print log.getState()
iov = inspect.Iov(db,tag)
print iov.list()
for x in iov.summaries():
print x[1],x[3]
# print iov.trend("",[0,2,12])
except RuntimeError :
print " no iov? in", tag
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
8000208a5a8cd01f009a2662de9c7ec575a7e34c | eb0882f59cecc12c043764f2d51ab3cad1c1b5a0 | /example_designs/test/test_bist.py | ad07927f6d25640a21525b04befbff9ce7e7fb7c | [
"BSD-2-Clause"
] | permissive | mithro/litesata | 10a664a6d8895f64c3847612947b77e350d6aa29 | 410a73e89ee27c65915f0212f676f50719f4978c | refs/heads/master | 2021-09-02T03:14:41.917515 | 2017-12-29T22:01:58 | 2017-12-29T22:01:58 | 115,756,739 | 1 | 0 | null | 2017-12-29T21:58:20 | 2017-12-29T21:58:20 | null | UTF-8 | Python | false | false | 8,771 | py | import time
import argparse
import random as rand
from collections import OrderedDict
from litex.soc.tools.remote import RemoteClient
KB = 1024
MB = 1024*KB
GB = 1024*MB
logical_sector_size = 512
class Timer:
def __init__(self):
self.value = None
def start(self):
self._start = time.time()
def stop(self):
self._stop = time.time()
self.value = max(self._stop - self._start, 1/1000000)
class LiteSATABISTUnitDriver:
def __init__(self, regs, constants, name):
self.regs = regs
self.name = name
self.frequency = constants.system_clock_frequency
self.time = 0
for s in ["start", "sector", "count", "loops", "random", "done", "aborted", "errors", "cycles"]:
setattr(self, s, getattr(regs, name + "_" + s))
def run(self, sector, count, loops, random, blocking=True, hw_timer=True):
self.sector.write(sector)
self.count.write(count)
self.loops.write(loops)
self.random.write(random)
timer = Timer()
timer.start()
self.start.write(1)
if blocking:
while (self.done.read() == 0):
pass
timer.stop()
aborted = self.aborted.read()
if not aborted:
if hw_timer:
self.time = self.cycles.read()/self.frequency
else:
self.time = timer.value
speed = (loops*count*logical_sector_size)/self.time
errors = self.errors.read()
else:
speed = 0
errors = -1
return (aborted, errors, speed)
class LiteSATABISTGeneratorDriver(LiteSATABISTUnitDriver):
def __init__(self, regs, constants, name):
LiteSATABISTUnitDriver.__init__(self, regs, constants, name + "_generator")
class LiteSATABISTCheckerDriver(LiteSATABISTUnitDriver):
def __init__(self, regs, constants, name):
LiteSATABISTUnitDriver.__init__(self, regs, constants,name + "_checker")
class LiteSATABISTIdentifyDriver:
def __init__(self, regs, constants, name):
self.regs = regs
self.name = name
for s in ["start", "done", "data_width", "source_valid", "source_ready", "source_data"]:
setattr(self, s, getattr(regs, name + "_identify_" + s))
self.data = []
def read_fifo(self):
self.data = []
while self.source_valid.read():
dword = self.source_data.read()
word_lsb = dword & 0xffff
word_msb = (dword >> 16) & 0xffff
self.data += [word_lsb, word_msb]
self.source_ready.write(1)
def run(self, blocking=True):
self.read_fifo() # flush the fifo before we start
self.start.write(1)
if blocking:
while (self.done.read() == 0):
pass
self.read_fifo()
self.decode()
def decode(self):
self.serial_number = ""
for i, word in enumerate(self.data[10:20]):
s = word.to_bytes(2, byteorder='big').decode("utf-8")
self.serial_number += s
self.firmware_revision = ""
for i, word in enumerate(self.data[23:27]):
s = word.to_bytes(2, byteorder='big').decode("utf-8")
self.firmware_revision += s
self.model_number = ""
for i, word in enumerate(self.data[27:46]):
s = word.to_bytes(2, byteorder='big').decode("utf-8")
self.model_number += s
self.total_sectors = self.data[100]
self.total_sectors += (self.data[101] << 16)
self.total_sectors += (self.data[102] << 32)
self.total_sectors += (self.data[103] << 48)
self.capabilities = OrderedDict()
self.capabilities["SATA Gen1"] = (self.data[76] >> 1) & 0x1
self.capabilities["SATA Gen2"] = (self.data[76] >> 2) & 0x1
self.capabilities["SATA Gen3"] = (self.data[76] >> 3) & 0x1
self.capabilities["48 bits LBA supported"] = (self.data[83] >> 10) & 0x1
def hdd_info(self):
info = "Serial Number: " + self.serial_number + "\n"
info += "Firmware Revision: " + self.firmware_revision + "\n"
info += "Model Number: " + self.model_number + "\n"
info += "Capacity: {:3.2f} GB\n".format((self.total_sectors*logical_sector_size)/GB)
for k, v in self.capabilities.items():
info += k + ": " + str(v) + "\n"
print(info, end="")
def _get_args():
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description="""\
SATA BIST utility.
""")
parser.add_argument("-s", "--transfer_size", default=1024, help="transfer sizes (in KB, up to 16MB)")
parser.add_argument("-l", "--total_length", default=256, help="total transfer length (in MB, up to HDD capacity)")
parser.add_argument("-n", "--loops", default=1, help="number of loop per transfer (allow more precision on speed calculation for small transfers)")
parser.add_argument("-r", "--random", action="store_true", help="use random data")
parser.add_argument("-c", "--continuous", action="store_true", help="continuous mode (Escape to exit)")
parser.add_argument("-i", "--identify", action="store_true", help="only run identify")
parser.add_argument("-t", "--software_timer", action="store_true", help="use software timer")
parser.add_argument("-a", "--random_addressing", action="store_true", help="use random addressing")
parser.add_argument("-d", "--delayed_read", action="store_true", help="read after total length has been written")
return parser.parse_args()
if __name__ == "__main__":
args = _get_args()
wb = RemoteClient()
wb.open()
# # #
identify = LiteSATABISTIdentifyDriver(wb.regs, wb.constants, "sata_bist")
generator = LiteSATABISTGeneratorDriver(wb.regs, wb.constants, "sata_bist")
checker = LiteSATABISTCheckerDriver(wb.regs, wb.constants, "sata_bist")
identify.run()
identify.hdd_info()
if not int(args.identify):
count = int(args.transfer_size)*KB//logical_sector_size
loops = int(args.loops)
length = int(args.total_length)*MB
random = int(args.random)
continuous = int(args.continuous)
sw_timer = int(args.software_timer)
random_addressing = int(args.random_addressing)
write_and_read_sequence = {"write": 1, "read": 1}
write_sequence = {"write": 1, "read": 0}
read_sequence = {"write": 0, "read": 1}
if int(args.delayed_read):
sequences = [write_sequence, read_sequence]
else:
sequences = [write_and_read_sequence]
for sequence in sequences:
sector = 0
run_sectors = 0
try:
while ((run_sectors*logical_sector_size < length) or continuous) and (sector < identify.total_sectors):
retry = 0
if sequence["write"]:
# generator (write data to HDD)
write_done = False
while not write_done:
write_aborted, write_errors, write_speed = generator.run(sector, count, loops, random, True, not sw_timer)
write_done = not write_aborted
if not write_done:
retry += 1
else:
write_error, write_speed = 0, 0
if sequence["read"]:
# checker (read and check data from HDD)
read_done = False
while not read_done:
read_aborted, read_errors, read_speed = checker.run(sector, count, loops, random, True, not sw_timer)
read_done = not read_aborted
if not read_done:
retry += 1
else:
read_errors, read_speed = 0, 0
ratio = identify.data_width.read()//32
print("sector={:d} wr_speed={:4.2f}MB/s rd_speed={:4.2f}MB/s errors={:d} retry={:d} ({:d}MB)".format(
sector,
write_speed/MB*ratio,
read_speed/MB*ratio,
write_errors + read_errors,
retry,
int(run_sectors*logical_sector_size/MB)*ratio))
if random_addressing:
sector = rand.randint(0, identify.total_sectors//(256*2))*256
else:
sector += count
run_sectors += count
except KeyboardInterrupt:
pass
# # #
wb.close()
| [
"florent@enjoy-digital.fr"
] | florent@enjoy-digital.fr |
fc6f84346460d776b8a6cbd2b89fb45637a259b6 | 321e58ab3e6b2385bb3549aaaefd56a58c2a51e7 | /python/tests/tokyoperf_test.py | 04b1583da6c773994ccef0548f9749143f7f2646 | [] | no_license | alexmadon/atpic_photosharing | 7829118d032344bd9a67818cd50e2c27a228d028 | 9fdddeb78548dadf946b1951aea0d0632e979156 | refs/heads/master | 2020-06-02T15:00:29.282979 | 2017-06-12T17:09:52 | 2017-06-12T17:09:52 | 94,095,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,392 | py | # -*- coding: utf-8 -*-
"""
Unit tests for Tokyo Cabinet
Compare the speed of the different drivers
Conclusion:
google tc is 5X faster than atpic ctypes implementation
"""
import unittest
import os
import time
import atpic.tokyoctypes as tc
import tc as tc2
import tokyo.cabinet as tc3
from pyrant import Tyrant, Q
"""
Compares speed of atpic.tokyo and tc
"""
import psyco # package python-psyco
psyco.full()
class testTdbOpen(unittest.TestCase):
def setUp(self):
print "setUp"
self.dbfile="casketperf.tct"
if os.path.exists(self.dbfile):
os.remove(self.dbfile)
def tearDown(self):
print "tearDown"
if os.path.exists(self.dbfile):
os.remove(self.dbfile)
def testall(self):
print "Hiiiii"
# tdb=tc.Tdb()
tdb=tc.TDB(self.dbfile, tc.TDBOWRITER | tc.TDBOCREAT)
print tdb
print tdb.tdb
print "number of records in db: %s" % tdb.rnum()
time1=time.time()
# tdb.tranbegin()
for i in range(1,1000000):
tdb.put("key%s" %i ,{"firstname":"Alex","lastname":"Madon","age":34})
# tdb.trancommit()
time2=time.time()
dt=time2-time1
print "Atpic tc: %s" % dt
class testTdbOpen2(unittest.TestCase):
def setUp(self):
print "setUp"
self.dbfile="casketperf.tct"
if os.path.exists(self.dbfile):
os.remove(self.dbfile)
def tearDown(self):
print "tearDown"
if os.path.exists(self.dbfile):
os.remove(self.dbfile)
def testall(self):
print "Hiiiii"
# tdb=tc.Tdb()
tdb=tc2.TDB(self.dbfile, tc.TDBOWRITER | tc.TDBOCREAT)
# print "number of records in db: %s" % tdb.rnum()
time1=time.time()
for i in range(1,1000000):
tdb.put("key%s" %i ,{"firstname":"Alex","lastname":"Madon","age":"34"})
time2=time.time()
dt=time2-time1
print "Google tc: %s" % dt
class testTdbOpen3(unittest.TestCase):
"""Uses http://bitbucket.org/lasizoillo/tokyocabinet/"""
def setUp(self):
print "setUp"
self.dbfile="casketperf3.tct"
if os.path.exists(self.dbfile):
os.remove(self.dbfile)
def tearDown(self):
print "tearDown"
if os.path.exists(self.dbfile):
os.remove(self.dbfile)
def testall(self):
print "Hiiiii3"
# tdb=tc.Tdb()
tdb = tc3.TableDB()
tdb.open(self.dbfile, tc.TDBOWRITER | tc.TDBOCREAT)
# print "number of records in db: %s" % tdb.rnum()
time1=time.time()
for i in range(1,1000000):
tdb.put("key%s" %i ,{"firstname":"Alex","lastname":"Madon","age":"34"})
time2=time.time()
dt=time2-time1
print "bitbucket tc: %s" % dt
class testTdbTyrant(unittest.TestCase):
""" You need to start tyrant:
ttserver test.tct
"""
def NOtestall(self):
t = Tyrant(host='127.0.0.1', port=1978)
time1=time.time()
for i in range(1,10000):
key="key%s" %i
t[key]={"firstname":"Alex","lastname":"Madon","age":"34"}
time2=time.time()
dt=time2-time1
print "Tyran tc: %s" % dt
if __name__=="__main__":
unittest.main()
| [
"alex.madon@gmail.com"
] | alex.madon@gmail.com |
a4f63492557f24270930521e041d540a6598d393 | 0178e6a705ee8aa6bb0b0a8512bf5184a9d00ded | /Sungjin/Bruteforce/1436.py | f234d2c7ee02122bc2abe3d99724475881e3efb1 | [] | no_license | comojin1994/Algorithm_Study | 0379d513abf30e3f55d6a013e90329bfdfa5adcc | 965c97a9b858565c68ac029f852a1c2218369e0b | refs/heads/master | 2021-08-08T14:55:15.220412 | 2021-07-06T11:54:33 | 2021-07-06T11:54:33 | 206,978,984 | 0 | 1 | null | 2020-05-14T14:06:46 | 2019-09-07T14:23:31 | Python | UTF-8 | Python | false | false | 205 | py | import sys
input = sys.stdin.readline
N = int(input())
word = '666'
cnt = 0
num = 666
while True:
if word in str(num):
cnt += 1
if cnt == N:
print(num)
break
num += 1 | [
"comojin1994@gmail.com"
] | comojin1994@gmail.com |
d7cae89b1fe581e2ffda58f40388a10988806fa0 | 5da023dcc3ea1a4ad5d92c610de4aed981d6acf6 | /day05/migrations/0002_auto_20200917_0101.py | 8140298f7df4d9be525d5f37994e1ef4e7e77bae | [] | no_license | zwy-888/drf03 | a2bf9deea7badc3f070bd2515b1d273b71df0909 | 53e512376e2a52fea6978cbe30376fddb950cfbe | refs/heads/master | 2022-12-18T14:55:50.728061 | 2020-09-17T06:02:51 | 2020-09-17T06:02:51 | 295,579,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | # Generated by Django 3.0 on 2020-09-16 17:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('day05', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='employee2',
options={'verbose_name': '员工', 'verbose_name_plural': '员工'},
),
]
| [
"l"
] | l |
ef6a5849becb23b9f3c407f12ac2a9e8c0e7e182 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-oms/huaweicloudsdkoms/v2/model/task_group_dst_node_resp.py | e8c3ba6f581dcca705484bddfff656f83caf8ce8 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,971 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class TaskGroupDstNodeResp:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'bucket': 'str',
'region': 'str',
'save_prefix': 'str'
}
attribute_map = {
'bucket': 'bucket',
'region': 'region',
'save_prefix': 'save_prefix'
}
def __init__(self, bucket=None, region=None, save_prefix=None):
"""TaskGroupDstNodeResp
The model defined in huaweicloud sdk
:param bucket: 目的端桶的名称。
:type bucket: str
:param region: 目的端桶所处的区域。
:type region: str
:param save_prefix: 目的端桶内路径前缀(拼接在对象key前面,组成新的key,拼接后不能超过1024个字符)。
:type save_prefix: str
"""
self._bucket = None
self._region = None
self._save_prefix = None
self.discriminator = None
if bucket is not None:
self.bucket = bucket
if region is not None:
self.region = region
if save_prefix is not None:
self.save_prefix = save_prefix
@property
def bucket(self):
"""Gets the bucket of this TaskGroupDstNodeResp.
目的端桶的名称。
:return: The bucket of this TaskGroupDstNodeResp.
:rtype: str
"""
return self._bucket
@bucket.setter
def bucket(self, bucket):
"""Sets the bucket of this TaskGroupDstNodeResp.
目的端桶的名称。
:param bucket: The bucket of this TaskGroupDstNodeResp.
:type bucket: str
"""
self._bucket = bucket
@property
def region(self):
"""Gets the region of this TaskGroupDstNodeResp.
目的端桶所处的区域。
:return: The region of this TaskGroupDstNodeResp.
:rtype: str
"""
return self._region
@region.setter
def region(self, region):
"""Sets the region of this TaskGroupDstNodeResp.
目的端桶所处的区域。
:param region: The region of this TaskGroupDstNodeResp.
:type region: str
"""
self._region = region
@property
def save_prefix(self):
"""Gets the save_prefix of this TaskGroupDstNodeResp.
目的端桶内路径前缀(拼接在对象key前面,组成新的key,拼接后不能超过1024个字符)。
:return: The save_prefix of this TaskGroupDstNodeResp.
:rtype: str
"""
return self._save_prefix
@save_prefix.setter
def save_prefix(self, save_prefix):
"""Sets the save_prefix of this TaskGroupDstNodeResp.
目的端桶内路径前缀(拼接在对象key前面,组成新的key,拼接后不能超过1024个字符)。
:param save_prefix: The save_prefix of this TaskGroupDstNodeResp.
:type save_prefix: str
"""
self._save_prefix = save_prefix
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TaskGroupDstNodeResp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
53156ad796a1dc172b66692e05d181102af1ebdc | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02957/s228425638.py | 1e2c730b208275c68561a4e2d64ea2cf336e38f8 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | A, B = map(int, input().split())
K = A + B
if K%2 == 0:
print(K//2)
else:
print("IMPOSSIBLE")
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
6010bd42f0854269a3b919f84f88b5380165c751 | 485cf3c70fcaa68689a2b690b6465f1d6bcf21bd | /string_process/30_decorator_test_1.py | 3722bcbbd44c4c78e85834f45eaf86b6b10019b7 | [] | no_license | lxz0503/study_20190608 | 5ffe08c4704bb00ad8d1980baf16b8f5e7135ff4 | 47c37798140883b8d6dc21ec5da5bc7a20988ce9 | refs/heads/master | 2022-12-23T17:23:45.039015 | 2021-06-23T14:50:19 | 2021-06-23T14:50:19 | 190,884,812 | 1 | 3 | null | 2022-12-15T23:17:33 | 2019-06-08T12:22:56 | Python | UTF-8 | Python | false | false | 1,748 | py | # http://c.biancheng.net/view/2270.html refer to this link
# 装饰器本身就是一个函数,不会修改被修饰函数里面的代码,不能修改函数的调用方式。这是原则
# 可以理解为在一个函数外面加另外一个函数,来实现某些功能
# 应用场景,例如, 不能修改函数体
# 装饰器 = 高阶函数 + 函数嵌套 + 闭包
# 带固定参数的装饰器
# import time
#
# def deco(f): # 以函数名作为参数,就是高阶函数 high level function
# def wrapper(a,b): # 函数嵌套 function nesting
# start_time = time.time()
# f(a,b)
# end_time = time.time()
# execution_time = (end_time - start_time)*1000
# print("time is %d ms" % execution_time)
# return wrapper # 返回值是嵌套函数的函数名
#
# @deco
# def f(a,b):
# print("be on")
# time.sleep(1)
# print("result is %d" %(a+b)) # 执行完这一步,跳转到def wrapper(a,b)里面的f(a,b)
#
# if __name__ == '__main__':
# f(3,4) # 此处设置断点,查看函数如何执行,顺序是先执行deco()函数,在执行f(a,b)之前跳转到def f(a,b)函数里面
# 无固定参数的装饰器
import time
def deco(f):
def wrapper(*args, **kwargs):
start_time = time.time()
f(*args, **kwargs)
end_time = time.time()
execution_time = (end_time - start_time) * 1000
print("time is %d ms" % execution_time)
return wrapper
@deco
def f(a, b):
print("be on")
time.sleep(1)
print("result is %d" %(a+b))
@deco
def f2(a, b, c):
print("be on")
time.sleep(1)
print("result is %d" %(a+b+c))
#
if __name__ == '__main__':
f2(3,4,5)
f(3,4) | [
"lxz_20081025@163.com"
] | lxz_20081025@163.com |
dcb848e9f8e9bdc7e8cfdd036e3b5d1bb2d2f373 | 077e5ab67f2936b0aa531b8ee177ecf83a0a2e18 | /实例/4、sqlite3/Alchemy.py | 7a31d9808ded210b4e777112dda0dde8c0ee12c4 | [] | no_license | MKDevil/Python | 43ef6ebcc6a800b09b4fb570ef1401add410c51a | 17b8c0bdd2e5a534b89cdec0eb51bfcc17c91839 | refs/heads/master | 2022-02-02T08:34:06.622336 | 2022-01-28T08:52:25 | 2022-01-28T08:52:25 | 163,807,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | '''
Author: MK_Devil
Date: 2022-01-13 11:13:09
LastEditTime: 2022-01-14 14:13:31
LastEditors: MK_Devil
'''
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import sqlite3
# 建立数据库连接
conn = sqlite3.connect(r'.\实例\4、sqlite3\Alchemy.db')
# 创建游标
cur = conn.cursor()
# 查询输出所有
# cur.execute(r'select * from material')
# print(cur.fetchall())
| [
"MK_Devil@163.com"
] | MK_Devil@163.com |
e1d3a7a6134ce85427165e02d10c1fba15cdbe2b | 377d86194fd6d23c8ef3df3e6f7d90092dd8f9b4 | /workout_tracker/manage.py | 76a004d4e4b649ce5ac4f7982c4afddcbc13aa8f | [
"MIT"
] | permissive | e-dang/Workout-Tracker | f20f44b012e895244bad413a46103415ffae5732 | 00a27597ea628cff62b320d616f56b2df4f344a0 | refs/heads/master | 2022-12-28T07:49:34.179307 | 2020-10-12T20:48:28 | 2020-10-12T20:48:28 | 293,937,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'workout_tracker.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"edang830@gmail.com"
] | edang830@gmail.com |
b34d92c61e668e83b5eee607b7d5a8f484f82506 | 571a89f94f3ebd9ec8e6b618cddb7d05811e0d62 | /abc141/e/main.py | 45d936346360e80af662a6a165b30c1659e9bbe2 | [] | no_license | ryu19-1/atcoder_python | 57de9e1db8ff13a107b5861f8f6a231e40366313 | cc24b3c2895aad71d40cefbb8e2893dc397b8f4f | refs/heads/master | 2023-05-10T05:32:16.507207 | 2021-05-19T17:48:10 | 2021-05-19T17:48:10 | 368,954,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | # Z-algorithmで解いてみる
N = int(input())
S = input()
ans = 0
for h in range(N):
T = S[h:]
M = len(T)
Z = [0] * M
c = 0
for i in range(1, M):
l = i - c # 今見ている場所が計算済みcからどれだけ離れているか
if i + Z[l] < c + Z[c]:
Z[i] = Z[l]
else:
j = max(0, c + Z[c] - i)
while i + j < M and T[j] == T[i + j]:
j += 1
Z[i] = j
c = i
# Z[0] = M
# print(i, Z[i])
if i >= Z[i]:
ans = max(ans, Z[i])
print(ans)
| [
"ryu1007kami@gmail.com"
] | ryu1007kami@gmail.com |
e85a3f6d0b811cbb9545632cd020fd96c7bd4d2f | fff54b01b46cef0bbc70a6469c88c01c82af5a57 | /programming/language/python3/python3-qt5/actions.py | a166a816f0eb75c9660606d54552c2b738686a23 | [] | no_license | LimeLinux/Packages | e51deae6c0d1406e31f06caa5aaa7749466bef0b | d492e075d8b051df68b98c315ad0628e33a8fac4 | refs/heads/master | 2021-01-11T12:37:22.150638 | 2018-08-30T18:24:32 | 2018-08-30T18:24:32 | 77,054,292 | 5 | 19 | null | 2018-02-02T17:24:06 | 2016-12-21T13:33:45 | Python | UTF-8 | Python | false | false | 1,558 | py |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import pythonmodules
from pisi.actionsapi import shelltools
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
WorkDir="PyQt5_gpl-%s" % get.srcVERSION()
def setup():
pythonmodules.run("configure.py --confirm-license \
--qsci-api \
--sip /usr/bin/sip \
--qmake='/usr/bin/qmake' \
--destdir='/usr/lib/python3.6/site-packages' \
--sip-incdir='/usr/include/python3.6m' \
CFLAGS='%s' CXXFLAGS='%s'" % (get.CFLAGS(), get.CXXFLAGS()), pyVer = "3")
shelltools.system("find -name 'Makefile' | xargs sed -i 's|-Wl,-rpath,/usr/lib||g;s|-Wl,-rpath,.* ||g'")
def build():
autotools.make()
def install():
shelltools.cd("%s/PyQt5_gpl-%s" % (get.workDIR(),get.srcVERSION()))
autotools.rawInstall("-C pyrcc DESTDIR=%(DESTDIR)s INSTALL_ROOT=%(DESTDIR)s" % {'DESTDIR':get.installDIR()})
autotools.rawInstall("-C pylupdate DESTDIR=%(DESTDIR)s INSTALL_ROOT=%(DESTDIR)s" % {'DESTDIR':get.installDIR()})
autotools.rawInstall("DESTDIR=%(DESTDIR)s INSTALL_ROOT=%(DESTDIR)s" % {'DESTDIR':get.installDIR()})
#pisitools.dohtml("doc/html/*")
pisitools.dodoc("NEWS", "README","LICENSE*")
| [
"ergunsalman@hotmail.com"
] | ergunsalman@hotmail.com |
d86de11a83b6047dbc1dfc7aaddb0246e1930762 | 430791dcde1596a554984e38c554a367b85c9951 | /classes_and_instances/exercises/account.py | 66b666b637c2f594a8d2e92bc5a0a9e2e627fe1c | [] | no_license | mialskywalker/PythonOOP | 5fa8606cfe7c9ceb72ada8e62ff89513bac10d32 | e7f2d5f46983e9c8c50d9356497fcc9ed9f6d4dc | refs/heads/master | 2023-04-06T05:43:13.408741 | 2021-04-03T13:22:13 | 2021-04-03T13:22:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | class Account:
def __init__(self, id, name, balance=0):
self.id = id
self.name = name
self.balance = balance
def credit(self, amount):
self.balance += amount
return self.balance
def debit(self, amount):
if amount > self.balance:
return "Amount exceeded balance"
else:
self.balance -= amount
return self.balance
def info(self):
return f"User {self.name} with account {self.id} has {self.balance} balance"
account = Account(1234, "George", 1000)
print(account.credit(500))
print(account.debit(1500))
print(account.info())
account = Account(5411256, "Peter")
print(account.debit(500))
print(account.credit(1000))
print(account.debit(500))
print(account.info())
| [
"kalqga123@gmail.com"
] | kalqga123@gmail.com |
2e4b626d7a5009c01affd1651a2db5942cd936bd | d1ef84d05beedc811161314800193ded398bff07 | /tests/views/test_user_login.py | b65e0155aebd90b9a9ed87c86cd81702f49cffb2 | [
"MIT"
] | permissive | spookey/observatory | 8f4a98aeb214182124bc6a4ab6d1ddac697cd0bc | be5cc92f53f12e6341e7e3040f26360e54cfdf7d | refs/heads/master | 2023-04-22T03:31:34.879735 | 2021-01-16T17:50:07 | 2021-01-16T17:50:07 | 224,500,136 | 0 | 0 | MIT | 2021-05-12T03:53:02 | 2019-11-27T19:11:24 | Python | UTF-8 | Python | false | false | 3,122 | py | from flask import url_for
from flask_login import current_user
from pytest import mark
from tests.conftest import USER_PASS
ENDPOINT = 'user.login'
@mark.usefixtures('session')
class TestUserLogin:
@staticmethod
@mark.usefixtures('ctx_app')
def test_url():
assert url_for(ENDPOINT) == '/user/login'
@staticmethod
def test_basic_form(visitor):
res = visitor(ENDPOINT)
form = res.soup.select('form')[-1]
assert form.attrs['method'].lower() == 'post'
assert form.attrs['action'] == url_for(ENDPOINT, _external=True)
@staticmethod
def test_form_fields(visitor):
res = visitor(ENDPOINT)
form = res.soup.select('form')[-1]
fields = [
(inpt.attrs.get('name'), inpt.attrs.get('type'))
for inpt in form.select('input,button')
]
assert fields == [
('username', 'text'),
('password', 'password'),
('remember', 'checkbox'),
('submit', 'submit'),
]
@staticmethod
def test_form_wrong(visitor, gen_user):
assert current_user.is_authenticated is False
user = gen_user()
res = visitor(
ENDPOINT,
method='post',
data={
'username': user.username,
'password': user.pw_hash,
'remember': True,
'submit': True,
},
)
form = res.soup.select('form')[-1]
for sel, exp in [
('#username', user.username),
('#password', ''),
('#remember', 'True'),
]:
assert form.select(sel)[-1].attrs['value'] == exp
assert current_user.is_authenticated is False
@staticmethod
def test_form_login(visitor, gen_user):
assert current_user.is_authenticated is False
user = gen_user(password=USER_PASS)
home_url = url_for('user.home', _external=True)
res = visitor(
ENDPOINT,
method='post',
data={
'username': user.username,
'password': USER_PASS,
'remember': True,
'submit': True,
},
code=302,
)
assert res.request.headers['Location'] == home_url
assert current_user == user
assert current_user.is_authenticated is True
assert current_user.username == user.username
@staticmethod
def test_form_login_next(visitor, gen_user):
assert current_user.is_authenticated is False
user = gen_user(password=USER_PASS)
next_url = url_for('side.favicon', _external=True)
res = visitor(
ENDPOINT,
method='post',
data={
'username': user.username,
'password': USER_PASS,
'remember': True,
'submit': True,
},
query_string={'next': next_url},
code=302,
)
assert res.request.headers['Location'] == next_url
assert current_user.is_authenticated is True
| [
"frieder.griesshammer@der-beweis.de"
] | frieder.griesshammer@der-beweis.de |
cc39d1f7c55512474c929007c0a06a66c8e2a5d6 | 378eea7cbb49d52c13c3bd0bb86bc93fc93d3d56 | /100Days/Day13/process_ex3.py | 26f733e1574e399722e49c9603b1001315678062 | [] | no_license | Zpadger/Python | b9e54524841e14d05e8f52b829c8c99c91e308b8 | f13da6d074afac50396621c9df780bf5ca30ce6b | refs/heads/master | 2020-08-16T01:10:00.534615 | 2020-04-12T15:15:53 | 2020-04-12T15:15:53 | 172,426,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | # 进程间通信
from multiprocessing import Process
from time import sleep
counter = 0
def sub_task(string):
global counter
while counter<10:
print(string,end='',flush=True)
counter += 1
sleep(0.01)
def main():
Process(target=sub_task,args=('Ping',)).start()
Process(target=sub_task,args=('Pong',)).start()
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | Zpadger.noreply@github.com |
077aef3b071444c6c746bae01a97d8a8d91de4e3 | be5ea20226c37d81f1ccb2f704d8825d36e88765 | /05. Inheritance/LAB/03_hierarchical_inheritance/cat.py | d8e9e9ab312bd902582d72bbd922c2385e74aa58 | [] | no_license | dimDamyanov/PythonOOP | 3845e450e5a48fef4f70a186664e07c0cd60e09b | 723204f5b7e953874fac9314e48eb1d1628d6ff5 | refs/heads/main | 2023-04-07T18:00:36.735248 | 2021-04-19T20:57:14 | 2021-04-19T20:57:14 | 341,329,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | from .animal import Animal
class Cat(Animal):
@staticmethod
def meow():
return 'meowing...'
| [
"dim.damianov@gmail.com"
] | dim.damianov@gmail.com |
b5e17615285ca3c14fc8f5b3719570fdd384c7b7 | 795c2d7e2188f2ecb3e72bbb4053726856009c0d | /ctrl/cmorph/old/cmorph_extract_asia_ctrl.py | 2963d584be19522945de2a631d07ec2836a70891 | [
"Apache-2.0"
] | permissive | markmuetz/cosmic | 3a4ef310cb9cb92b81ff57b74bb1511841f790a5 | f215c499bfc8f1d717dea6aa78a58632a4e89113 | refs/heads/master | 2023-08-01T10:55:52.596575 | 2021-09-20T19:26:33 | 2021-09-20T19:26:33 | 217,045,140 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | from pathlib import Path
SCRIPT_PATH = 'cmorph_extract_asia.py'
BASEDIR = Path('/gws/nopw/j04/cosmic/mmuetz/data/cmorph_data')
years = range(1998, 2020)
CONFIG_KEYS = years
BSUB_KWARGS = {
'job_name': 'cmorph_cv',
'queue': 'new_users',
'max_runtime': '00:30',
}
SCRIPT_ARGS = {}
for year in CONFIG_KEYS:
SCRIPT_ARGS[str(year)] = year
| [
"markmuetz@gmail.com"
] | markmuetz@gmail.com |
5b6406cdc05d879e8f344ee3f33b1933286a85aa | d3e51b088f77ccd7ad21393136731667c4c91282 | /doc/source/usage_np.py | 5271c67d9107890a5a8b6d2ee4b918af7cc62567 | [
"MIT"
] | permissive | ForeverWintr/function-pipe | 2283e99902cdfd30b5ebdb26c928a545cdc36968 | 36f8653dbc6916c6e60d4a405a547a5a3ddf395f | refs/heads/master | 2021-07-04T03:32:27.723479 | 2017-01-24T17:13:50 | 2017-01-24T17:13:50 | 80,170,802 | 0 | 0 | null | 2017-01-27T00:56:12 | 2017-01-27T00:56:12 | null | UTF-8 | Python | false | false | 5,604 | py |
import sys
import argparse
from functools import reduce
import numpy as np
import function_pipe as fpn
class PixelFontInput(fpn.PipeNodeInput):
SHAPE = (5,5)
def __init__(self, pixel='*', scale=1):
super().__init__()
self.scale = scale
self.pixel = pixel
@fpn.pipe_node
def frame(**kwargs):
pfi = kwargs[fpn.PN_INPUT]
shape = tuple(s * pfi.scale for s in pfi.SHAPE)
return np.zeros(shape=shape, dtype=bool)
@fpn.pipe_node
def v_line(**kwargs):
pfi = kwargs[fpn.PN_INPUT]
m = kwargs[fpn.PREDECESSOR_RETURN].copy()
m[:, slice(0, pfi.scale)] = True
return m
@fpn.pipe_node
def h_line(**kwargs):
pfi = kwargs[fpn.PN_INPUT]
m = kwargs[fpn.PREDECESSOR_RETURN].copy()
m[slice(0, pfi.scale), :] = True
return m
@fpn.pipe_node_factory
def v_shift(steps, **kwargs):
if kwargs[fpn.PREDECESSOR_PN].unwrap == v_line.unwrap:
raise Exception('cannot v_shift a v_line')
pfi = kwargs[fpn.PN_INPUT]
return np.roll(kwargs[fpn.PREDECESSOR_RETURN], pfi.scale * steps, axis=0)
@fpn.pipe_node_factory
def h_shift(steps, **kwargs):
if kwargs[fpn.PREDECESSOR_PN].unwrap == h_line.unwrap:
raise Exception('cannot h_shift an h_line')
pfi = kwargs[fpn.PN_INPUT]
return np.roll(kwargs[fpn.PREDECESSOR_RETURN], pfi.scale * steps, axis=1)
@fpn.pipe_node_factory
def flip(*coords, **kwargs):
pfi = kwargs[fpn.PN_INPUT]
m = kwargs[fpn.PREDECESSOR_RETURN].copy()
for coord in coords: # x, y pairs
start = [i * pfi.scale for i in coord]
end = [i + pfi.scale for i in start]
iloc = slice(start[1], end[1]), slice(start[0], end[0])
m[iloc] = ~m[iloc]
return m
@fpn.pipe_node_factory
def union(*args, **kwargs):
return reduce(np.logical_or, args)
@fpn.pipe_node_factory
def intersect(*args, **kwargs):
return reduce(np.logical_and, args)
@fpn.pipe_node_factory
def concat(*args, **kwargs):
pfi = kwargs[fpn.PN_INPUT]
space = np.zeros(shape=(pfi.SHAPE[0] * pfi.scale, 1 * pfi.scale),
dtype=bool)
concat = lambda x, y: np.concatenate((x, space, y), axis=1)
return reduce(concat, args)
@fpn.pipe_node
def display(**kwargs):
pfi = kwargs[fpn.PN_INPUT]
m = kwargs[fpn.PREDECESSOR_RETURN]
for row in m:
for pixel in row:
if pixel:
print(pfi.pixel, end='')
else:
print(' ', end='')
print()
return m
# font based on http://www.dafont.com/visitor.font
chars = {
'_' : frame,
'.' : frame | flip((2,4)),
'p' : union(
frame | v_line,
frame | h_line,
frame | h_line | v_shift(2),
) | flip((4,0), (4,1)),
'y' : (frame | h_line | v_shift(2) |
flip((0,0), (0,1), (2,3), (2,4), (4,0), (4,1))),
'0' : union(
frame | v_line,
frame | v_line | h_shift(-1),
frame | h_line,
frame | h_line | v_shift(-1),
),
'1' : frame | v_line | h_shift(2) | flip((1,0)),
'2' : union(
frame | h_line,
frame | h_line | v_shift(2),
frame | h_line | v_shift(4),
) | flip((4, 1), (0, 3)),
'3' : union(
frame | h_line,
frame | h_line | v_shift(-1),
frame | v_line | h_shift(4),
) | flip((2, 2), (3, 2)),
'4' : union(
frame | h_line | v_shift(2),
frame | v_line | h_shift(-1),
) | flip((0, 0), (0, 1)),
'5' : union(
frame | h_line,
frame | h_line | v_shift(2),
frame | h_line | v_shift(-1),
) | flip((0, 1), (4, 3)),
'6' : union(
frame | h_line,
frame | h_line | v_shift(2),
frame | h_line | v_shift(-1),
frame | v_line,
) | flip((4, 3)),
#---------------------------------------------------------------------------
'a' : union(
frame | v_line,
frame | v_line | h_shift(-1),
frame | h_line,
frame | h_line | v_shift(2),
),
'b' : union(
frame | v_line,
frame | v_line | h_shift(-1),
frame | h_line,
frame | h_line | v_shift(-1),
frame | h_line | v_shift(2),
) | flip((4,0), (4,4)),
'h' : union(
frame | v_line,
frame | v_line | h_shift(-1),
frame | h_line | v_shift(-3),
),
'i' : union(
frame | h_line,
frame | h_line | v_shift(-1),
frame | v_line | h_shift(2),
),
'o' : union(
frame | v_line,
frame | v_line | h_shift(-1),
frame | h_line,
frame | h_line | v_shift(-1),
),
}
def msg_display_pipeline(msg):
get_char = lambda char: chars.get(char.lower(), chars['_'])
return concat(*tuple(map(get_char, msg))) | display
def version_banner(args):
p = argparse.ArgumentParser(
description='Display the Python version in a banner',
)
p.add_argument('--pixel', default='*',
help=('Set the character used for each pixel of the banner.')
)
p.add_argument('--scale', default=1, type=int,
help=('Set the pixel scale for the banner.')
)
ns = p.parse_args(args)
assert len(ns.pixel) == 1
assert ns.scale > 0
# get pipeline function
msg = 'py%s.%s.%s' % sys.version_info[:3]
f = msg_display_pipeline(msg)
pfi = PixelFontInput(pixel=ns.pixel, scale=ns.scale)
f[pfi]
if __name__ == '__main__':
version_banner(sys.argv[1:])
| [
"ariza@flexatone.com"
] | ariza@flexatone.com |
239da3b98f1e473cb137fac0bdb1d421e1cd0590 | b394bb6bd3e8848688b525f55e82962f152c1bb3 | /demos/upload/linear_systems/Elimination Matrices II.py | c988e68a06b987615742f555122112da1b5d72cc | [] | no_license | lukeolson/cs450-f20-demos | 02c2431d7696348cf9ca1ab67bdd5c44a97ac38b | 040e7dfa15c68f7f426cf69655cb600926f9f626 | refs/heads/master | 2023-01-22T19:12:33.394521 | 2020-12-03T19:48:18 | 2020-12-03T19:48:18 | 288,542,898 | 5 | 10 | null | 2020-10-05T19:39:07 | 2020-08-18T19:13:52 | null | UTF-8 | Python | false | false | 502 | py | #!/usr/bin/env python
# coding: utf-8
# # Behavior of Elimination Matrices
# In[3]:
import numpy as np
# In[30]:
n = 4
# ----------------
# Let's create some elimination matrices:
# In[40]:
M1 = np.eye(n)
M1[1,0] = 0.5
M1
# In[41]:
M2 = np.eye(n)
M2[3,0] = 4
M2
# In[42]:
M3 = np.eye(n)
M3[2,1] = 1.3
M3
# -------------------
# Now play around with them:
# In[43]:
M1.dot(M2)
# In[44]:
M2.dot(M1)
# In[45]:
M1.dot(M2).dot(M3)
# BUT:
# In[47]:
M3.dot(M1).dot(M2)
| [
"luke.olson@gmail.com"
] | luke.olson@gmail.com |
1ab2b623e06bba1dcbe4c824cacaa054bbb4d5a7 | 288952acfb81b217ac9cdc920c65d00aad1146c4 | /vendor/views.py | 6af0d4958fc56c1d2f724cd0268e0317aebb1e4c | [] | no_license | turamant/CompShop | 054e16cf929976b7246897cdfdcd6bc5dc984bbe | b30e17e6eabfa1f4b4a5ccbb74da81b53a926f82 | refs/heads/main | 2023-09-03T12:03:02.833691 | 2021-10-28T15:49:22 | 2021-10-28T15:49:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | from django.contrib.auth import login
from django.contrib.auth.forms import UserCreationForm
from django.shortcuts import redirect, render
from vendor.models import Vendor
def become_vendor(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
vendor = Vendor.objects.create(name=user.username, created_by=user)
return redirect('frontpage')
else:
form = UserCreationForm()
return render(request, 'vendor/become_vendor.html', {'form': form})
| [
"tur1amant@gmail.com"
] | tur1amant@gmail.com |
01e9ab4a6e000a580c29b7ed2c47633aa2770d19 | 5d34003423b4bcf641cb31b3d58c062d2011c7b7 | /venv/lib/python3.6/site-packages/panflute/utils.py | 73d5795605bcf6ed4e139db515ca7db7d58a0bc2 | [] | no_license | feiwl/Coding | a9f84cb867f7a84f0924b848a412dc1bedbb7d84 | 85973fe3d839b65f4f0b73c35ca0d0134588a76d | refs/heads/main | 2023-02-17T21:54:04.441162 | 2021-01-07T04:25:19 | 2021-01-07T04:25:19 | 327,518,641 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,463 | py | """
Auxiliary functions that have no dependencies
"""
# ---------------------------
# Imports
# ---------------------------
from collections import OrderedDict
import sys
import os.path as p
from importlib import import_module
# ---------------------------
# Functions
# ---------------------------
def get_caller_name():
'''Get the name of the calling Element
This is just the name of the Class of the __init__ calling function
'''
# References:
# https://jugad2.blogspot.com/2015/09/find-caller-and-callers-caller-of.html
# https://stackoverflow.com/a/47956089/3977107
# https://stackoverflow.com/a/11799376/3977107
pos = 1
while True:
pos += 1
try:
callingframe = sys._getframe(pos)
except ValueError:
return 'Panflute'
#print(pos, callingframe.f_code.co_name, file=sys.stderr)
if callingframe.f_code.co_name == '__init__':
class_name = callingframe.f_locals['self'].__class__.__name__
if 'Container' not in class_name:
return class_name
def check_type(value, oktypes):
# This allows 'Space' instead of 'Space()'
if callable(value):
value = value()
if isinstance(value, oktypes):
return value
# Invalid type
caller = get_caller_name()
tag = type(value).__name__
#oktypes_names = [x.name for x in oktypes]
#print(oktypes, file=sys.stderr)
msg = '\n\nElement "{}" received "{}" but expected {}\n'.format(caller, tag, oktypes)
raise TypeError(msg)
def check_group(value, group):
if value not in group:
tag = type(value).__name__
msg = 'element {} not in group {}'.format(tag, repr(group))
raise TypeError(msg)
else:
return value
def encode_dict(tag, content):
return OrderedDict((("t", tag), ("c", content)))
# ---------------------------
# Classes
# ---------------------------
class ContextImport:
"""
Import module context manager.
Temporarily prepends extra dir
to sys.path and imports the module,
Example:
>>> # /path/dir/fi.py
>>> with ContextImport('/path/dir/fi.py') as module:
>>> # prepends '/path/dir' to sys.path
>>> # module = import_module('fi')
>>> module.main()
>>> with ContextImport('dir.fi', '/path') as module:
>>> # prepends '/path' to sys.path
>>> # module = import_module('dir.fi')
>>> module.main()
"""
def __init__(self, module, extra_dir=None):
"""
:param module: str
module spec for import or file path
from that only basename without .py is used
:param extra_dir: str or None
extra dir to prepend to sys.path
if module then doesn't change sys.path if None
if file then prepends dir if None
"""
def remove_py(s):
return s[:-3] if s.endswith('.py') else s
self.module = remove_py(p.basename(module))
if (extra_dir is None) and (module != p.basename(module)):
extra_dir = p.dirname(module)
self.extra_dir = extra_dir
def __enter__(self):
if self.extra_dir is not None:
sys.path.insert(0, self.extra_dir)
return import_module(self.module)
def __exit__(self, exc_type, exc_value, traceback):
if self.extra_dir is not None:
sys.path.pop(0)
| [
"feiwl8378@163.com"
] | feiwl8378@163.com |
65b3d0fc12f1eea856eb29b11f3d9dbbd33601c2 | 65ffed7634bb8f4fdb063d7c1baf2f0028c3bea4 | /NLI-VC-Thesaurus/lib/python3.6/site-packages/numpy/lib/nanfunctions.py | cfbdba7bbb1af92b23a8fee55997d795b290aae1 | [] | no_license | ygherman/NLI-VC-Thesaurus | b94325dcbdf25d9756ed54e8b1414568e8dc4250 | 9c1355447b7a885cff7b1a76a0124ddf9bc4cca6 | refs/heads/master | 2021-08-16T22:52:15.818540 | 2017-11-20T13:07:50 | 2017-11-20T13:08:24 | 108,041,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50,823 | py | """
Functions that ignore NaN.
Functions
---------
- `nanmin` -- minimum non-NaN value
- `nanmax` -- maximum non-NaN value
- `nanargmin` -- index of minimum non-NaN value
- `nanargmax` -- index of maximum non-NaN value
- `nansum` -- sum of non-NaN values
- `nanprod` -- product of non-NaN values
- `nancumsum` -- cumulative sum of non-NaN values
- `nancumprod` -- cumulative product of non-NaN values
- `nanmean` -- mean of non-NaN values
- `nanvar` -- variance of non-NaN values
- `nanstd` -- standard deviation of non-NaN values
- `nanmedian` -- median of non-NaN values
- `nanpercentile` -- qth percentile of non-NaN values
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.lib.function_base import _ureduce as _ureduce
__all__ = [
'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean',
'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod',
'nancumsum', 'nancumprod'
]
def _replace_nan(a, val):
"""
If `a` is of inexact type, make a copy of `a`, replace NaNs with
the `val` value, and return the copy together with a boolean mask
marking the locations where NaNs were present. If `a` is not of
inexact type, do nothing and return `a` together with a mask of None.
Note that scalars will end up as array scalars, which is important
for using the result as the value of the out argument in some
operations.
Parameters
----------
a : array-like
Input array.
val : float
NaN values are set to val before doing the operation.
Returns
-------
y : ndarray
If `a` is of inexact type, return a copy of `a` with the NaNs
replaced by the fill value, otherwise return `a`.
mask: {bool, None}
If `a` is of inexact type, return a boolean mask marking locations of
NaNs, otherwise return None.
"""
a = np.array(a, subok=True, copy=True)
if a.dtype == np.object_:
# object arrays do not support `isnan` (gh-9009), so make a guess
mask = a != a
elif issubclass(a.dtype.type, np.inexact):
mask = np.isnan(a)
else:
mask = None
if mask is not None:
np.copyto(a, val, where=mask)
return a, mask
def _copyto(a, val, mask):
"""
Replace values in `a` with NaN where `mask` is True. This differs from
copyto in that it will deal with the case where `a` is a numpy scalar.
Parameters
----------
a : ndarray or numpy scalar
Array or numpy scalar some of whose values are to be replaced
by val.
val : numpy scalar
Value used a replacement.
mask : ndarray, scalar
Boolean array. Where True the corresponding element of `a` is
replaced by `val`. Broadcasts.
Returns
-------
res : ndarray, scalar
Array with elements replaced or scalar `val`.
"""
if isinstance(a, np.ndarray):
np.copyto(a, val, where=mask, casting='unsafe')
else:
a = a.dtype.type(val)
return a
def _divide_by_count(a, b, out=None):
"""
Compute a/b ignoring invalid results. If `a` is an array the division
is done in place. If `a` is a scalar, then its type is preserved in the
output. If out is None, then then a is used instead so that the
division is in place. Note that this is only called with `a` an inexact
type.
Parameters
----------
a : {ndarray, numpy scalar}
Numerator. Expected to be of inexact type but not checked.
b : {ndarray, numpy scalar}
Denominator.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
Returns
-------
ret : {ndarray, numpy scalar}
The return value is a/b. If `a` was an ndarray the division is done
in place. If `a` is a numpy scalar, the division preserves its type.
"""
with np.errstate(invalid='ignore', divide='ignore'):
if isinstance(a, np.ndarray):
if out is None:
return np.divide(a, b, out=a, casting='unsafe')
else:
return np.divide(a, b, out=out, casting='unsafe')
else:
if out is None:
return a.dtype.type(a / b)
else:
# This is questionable, but currently a numpy scalar can
# be output to a zero dimensional array.
return np.divide(a, b, out=out, casting='unsafe')
def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return minimum of an array or minimum along an axis, ignoring any NaNs.
When all-NaN slices are encountered a ``RuntimeWarning`` is raised and
Nan is returned for that slice.
Parameters
----------
a : array_like
Array containing numbers whose minimum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the minimum is computed. The default is to compute
the minimum of the flattened array.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `min` method
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
Returns
-------
nanmin : ndarray
An array with the same shape as `a`, with the specified axis
removed. If `a` is a 0-d array, or if axis is None, an ndarray
scalar is returned. The same dtype as `a` is returned.
See Also
--------
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
amin :
The minimum value of an array along a given axis, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
isnan :
Shows which elements are Not a Number (NaN).
isfinite:
Shows which elements are neither NaN nor infinity.
amax, fmax, maximum
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative
infinity is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.min.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmin(a)
1.0
>>> np.nanmin(a, axis=0)
array([ 1., 2.])
>>> np.nanmin(a, axis=1)
array([ 1., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmin([1, 2, np.nan, np.inf])
1.0
>>> np.nanmin([1, 2, np.nan, np.NINF])
-inf
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is np.ndarray and a.dtype != np.object_:
# Fast, but not safe for subclasses of ndarray, or object arrays,
# which do not implement isnan (gh-9009), or fmin correctly (gh-8975)
res = np.fmin.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, +np.inf)
res = np.amin(a, axis=axis, out=out, **kwargs)
if mask is None:
return res
# Check for all-NaN axis
mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2)
return res
def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return the maximum of an array or maximum along an axis, ignoring any
NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is
raised and NaN is returned for that slice.
Parameters
----------
a : array_like
Array containing numbers whose maximum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the maximum is computed. The default is to compute
the maximum of the flattened array.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `max` method
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
Returns
-------
nanmax : ndarray
An array with the same shape as `a`, with the specified axis removed.
If `a` is a 0-d array, or if axis is None, an ndarray scalar is
returned. The same dtype as `a` is returned.
See Also
--------
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
amax :
The maximum value of an array along a given axis, propagating any NaNs.
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
isnan :
Shows which elements are Not a Number (NaN).
isfinite:
Shows which elements are neither NaN nor infinity.
amin, fmin, minimum
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative
infinity is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.max.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmax(a)
3.0
>>> np.nanmax(a, axis=0)
array([ 3., 2.])
>>> np.nanmax(a, axis=1)
array([ 2., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmax([1, 2, np.nan, np.NINF])
2.0
>>> np.nanmax([1, 2, np.nan, np.inf])
inf
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is np.ndarray and a.dtype != np.object_:
# Fast, but not safe for subclasses of ndarray, or object arrays,
# which do not implement isnan (gh-9009), or fmax correctly (gh-8975)
res = np.fmax.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, -np.inf)
res = np.amax(a, axis=axis, out=out, **kwargs)
if mask is None:
return res
# Check for all-NaN axis
mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2)
return res
def nanargmin(a, axis=None):
"""
Return the indices of the minimum values in the specified axis ignoring
NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results
cannot be trusted if a slice contains only NaNs and Infs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmin, nanargmax
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmin(a)
0
>>> np.nanargmin(a)
2
>>> np.nanargmin(a, axis=0)
array([1, 1])
>>> np.nanargmin(a, axis=1)
array([1, 0])
"""
a, mask = _replace_nan(a, np.inf)
res = np.argmin(a, axis=axis)
if mask is not None:
mask = np.all(mask, axis=axis)
if np.any(mask):
raise ValueError("All-NaN slice encountered")
return res
def nanargmax(a, axis=None):
"""
Return the indices of the maximum values in the specified axis ignoring
NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the
results cannot be trusted if a slice contains only NaNs and -Infs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmax, nanargmin
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmax(a)
0
>>> np.nanargmax(a)
1
>>> np.nanargmax(a, axis=0)
array([1, 0])
>>> np.nanargmax(a, axis=1)
array([1, 1])
"""
a, mask = _replace_nan(a, -np.inf)
res = np.argmax(a, axis=axis)
if mask is not None:
mask = np.all(mask, axis=axis)
if np.any(mask):
raise ValueError("All-NaN slice encountered")
return res
def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the sum of array elements over a given axis treating Not a
Numbers (NaNs) as zero.
In NumPy versions <= 1.8.0 Nan is returned for slices that are all-NaN or
empty. In later versions zero is returned.
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the sum is computed. The default is to compute the
sum of the flattened array.
dtype : data-type, optional
The type of the returned array and of the accumulator in which the
elements are summed. By default, the dtype of `a` is used. An
exception is when `a` has an integer type with less precision than
the platform (u)intp. In that case, the default will be either
(u)int32 or (u)int64 depending on whether the platform is 32 or 64
bits. For inexact inputs, dtype must be inexact.
.. versionadded:: 1.8.0
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``. If provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details. The casting of NaN to integer can yield
unexpected results.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `mean` or `sum` methods
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
Returns
-------
nansum : ndarray.
A new array holding the result is returned unless `out` is
specified, in which it is returned. The result has the same
size as `a`, and the same shape as `a` if `axis` is not None
or `a` is a 1-d array.
See Also
--------
numpy.sum : Sum across array propagating NaNs.
isnan : Show which elements are NaN.
isfinite: Show which elements are not NaN or +/-inf.
Notes
-----
If both positive and negative infinity are present, the sum will be Not
A Number (NaN).
Examples
--------
>>> np.nansum(1)
1
>>> np.nansum([1])
1
>>> np.nansum([1, np.nan])
1.0
>>> a = np.array([[1, 1], [1, np.nan]])
>>> np.nansum(a)
3.0
>>> np.nansum(a, axis=0)
array([ 2., 1.])
>>> np.nansum([1, np.nan, np.inf])
inf
>>> np.nansum([1, np.nan, np.NINF])
-inf
>>> np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present
nan
"""
a, mask = _replace_nan(a, 0)
return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of array elements over a given axis treating Not a
Numbers (NaNs) as ones.
One is returned for slices that are all-NaN or empty.
.. versionadded:: 1.10.0
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the product is computed. The default is to compute
the product of the flattened array.
dtype : data-type, optional
The type of the returned array and of the accumulator in which the
elements are summed. By default, the dtype of `a` is used. An
exception is when `a` has an integer type with less precision than
the platform (u)intp. In that case, the default will be either
(u)int32 or (u)int64 depending on whether the platform is 32 or 64
bits. For inexact inputs, dtype must be inexact.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``. If provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details. The casting of NaN to integer can yield
unexpected results.
keepdims : bool, optional
If True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will
broadcast correctly against the original `arr`.
Returns
-------
nanprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case it is returned.
See Also
--------
numpy.prod : Product across array propagating NaNs.
isnan : Show which elements are NaN.
Examples
--------
>>> np.nanprod(1)
1
>>> np.nanprod([1])
1
>>> np.nanprod([1, np.nan])
1.0
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanprod(a)
6.0
>>> np.nanprod(a, axis=0)
array([ 3., 2.])
"""
a, mask = _replace_nan(a, 1)
return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def nancumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of array elements over a given axis treating Not a
Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are
encountered and leading NaNs are replaced by zeros.
Zeros are returned for slices that are all-NaN or empty.
.. versionadded:: 1.12.0
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
nancumsum : ndarray.
A new array holding the result is returned unless `out` is
specified, in which it is returned. The result has the same
size as `a`, and the same shape as `a` if `axis` is not None
or `a` is a 1-d array.
See Also
--------
numpy.cumsum : Cumulative sum across array propagating NaNs.
isnan : Show which elements are NaN.
Examples
--------
>>> np.nancumsum(1)
array([1])
>>> np.nancumsum([1])
array([1])
>>> np.nancumsum([1, np.nan])
array([ 1., 1.])
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nancumsum(a)
array([ 1., 3., 6., 6.])
>>> np.nancumsum(a, axis=0)
array([[ 1., 2.],
[ 4., 2.]])
>>> np.nancumsum(a, axis=1)
array([[ 1., 3.],
[ 3., 3.]])
"""
a, mask = _replace_nan(a, 0)
return np.cumsum(a, axis=axis, dtype=dtype, out=out)
def nancumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of array elements over a given axis treating Not a
Numbers (NaNs) as one. The cumulative product does not change when NaNs are
encountered and leading NaNs are replaced by ones.
Ones are returned for slices that are all-NaN or empty.
.. versionadded:: 1.12.0
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default
the input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If *dtype* is not specified, it
defaults to the dtype of `a`, unless `a` has an integer dtype with
a precision less than that of the default platform integer. In
that case, the default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
nancumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case it is returned.
See Also
--------
numpy.cumprod : Cumulative product across array propagating NaNs.
isnan : Show which elements are NaN.
Examples
--------
>>> np.nancumprod(1)
array([1])
>>> np.nancumprod([1])
array([1])
>>> np.nancumprod([1, np.nan])
array([ 1., 1.])
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nancumprod(a)
array([ 1., 2., 6., 6.])
>>> np.nancumprod(a, axis=0)
array([[ 1., 2.],
[ 3., 2.]])
>>> np.nancumprod(a, axis=1)
array([[ 1., 2.],
[ 3., 3.]])
"""
a, mask = _replace_nan(a, 1)
return np.cumprod(a, axis=axis, dtype=dtype, out=out)
def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Compute the arithmetic mean along the specified axis, ignoring NaNs.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the means are computed. The default is to compute
the mean of the flattened array.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for inexact inputs, it is the same as the input
dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `mean` or `sum` methods
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned. Nan is
returned for slices that contain only NaNs.
See Also
--------
average : Weighted average
mean : Arithmetic mean taken while not ignoring NaNs
var, nanvar
Notes
-----
The arithmetic mean is the sum of the non-NaN elements along the axis
divided by the number of non-NaN elements.
Note that for floating-point input, the mean is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32`. Specifying a
higher-precision accumulator using the `dtype` keyword can alleviate
this issue.
Examples
--------
>>> a = np.array([[1, np.nan], [3, 4]])
>>> np.nanmean(a)
2.6666666666666665
>>> np.nanmean(a, axis=0)
array([ 2., 4.])
>>> np.nanmean(a, axis=1)
array([ 1., 3.5])
"""
arr, mask = _replace_nan(a, 0)
if mask is None:
return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
if dtype is not None:
dtype = np.dtype(dtype)
if dtype is not None and not issubclass(dtype.type, np.inexact):
raise TypeError("If a is inexact, then dtype must be inexact")
if out is not None and not issubclass(out.dtype.type, np.inexact):
raise TypeError("If a is inexact, then out must be inexact")
cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims)
tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
avg = _divide_by_count(tot, cnt, out=out)
isbad = (cnt == 0)
if isbad.any():
warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2)
# NaN is the only possible bad value, so no further
# action is needed to handle bad results.
return avg
def _nanmedian1d(arr1d, overwrite_input=False):
"""
Private function for rank 1 arrays. Compute the median ignoring NaNs.
See nanmedian for parameter usage
"""
c = np.isnan(arr1d)
s = np.where(c)[0]
if s.size == arr1d.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=3)
return np.nan
elif s.size == 0:
return np.median(arr1d, overwrite_input=overwrite_input)
else:
if overwrite_input:
x = arr1d
else:
x = arr1d.copy()
# select non-nans at end of array
enonan = arr1d[-s.size:][~c[-s.size:]]
# fill nans in beginning of array with non-nans of end
x[s[:enonan.size]] = enonan
# slice nans away
return np.median(x[:-s.size], overwrite_input=True)
def _nanmedian(a, axis=None, out=None, overwrite_input=False):
"""
Private function that doesn't support extended axis or keepdims.
These methods are extended to this function using _ureduce
See nanmedian for parameter usage
"""
if axis is None or a.ndim == 1:
part = a.ravel()
if out is None:
return _nanmedian1d(part, overwrite_input)
else:
out[...] = _nanmedian1d(part, overwrite_input)
return out
else:
# for small medians use sort + indexing which is still faster than
# apply_along_axis
# benchmarked with shuffled (50, 50, x) containing a few NaN
if a.shape[axis] < 600:
return _nanmedian_small(a, axis, out, overwrite_input)
result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input)
if out is not None:
out[...] = result
return result
def _nanmedian_small(a, axis=None, out=None, overwrite_input=False):
"""
sort + indexing median, faster for small medians along multiple
dimensions due to the high overhead of apply_along_axis
see nanmedian for parameter usage
"""
a = np.ma.masked_array(a, np.isnan(a))
m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input)
for i in range(np.count_nonzero(m.mask.ravel())):
warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=3)
if out is not None:
out[...] = m.filled(np.nan)
return out
return m.filled(np.nan)
def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue):
"""
Compute the median along the specified axis, while ignoring NaNs.
Returns the median of the array elements.
.. versionadded:: 1.9.0
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis or axes along which the medians are computed. The default
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
`median`. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. If `overwrite_input` is ``True`` and `a` is not already an
`ndarray`, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If this is anything but the default value it will be passed
through (in the special case of an empty array) to the
`mean` function of the underlying array. If the array is
a sub-class and `mean` does not have the kwarg `keepdims` this
will raise a RuntimeError.
Returns
-------
median : ndarray
A new array holding the result. If the input contains integers
or floats smaller than ``float64``, then the output data-type is
``np.float64``. Otherwise, the data-type of the output is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, median, percentile
Notes
-----
Given a vector ``V`` of length ``N``, the median of ``V`` is the
middle value of a sorted copy of ``V``, ``V_sorted`` - i.e.,
``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two
middle values of ``V_sorted`` when ``N`` is even.
Examples
--------
>>> a = np.array([[10.0, 7, 4], [3, 2, 1]])
>>> a[0, 1] = np.nan
>>> a
array([[ 10., nan, 4.],
[ 3., 2., 1.]])
>>> np.median(a)
nan
>>> np.nanmedian(a)
3.0
>>> np.nanmedian(a, axis=0)
array([ 6.5, 2., 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> b = a.copy()
>>> np.nanmedian(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.nanmedian(b, axis=None, overwrite_input=True)
3.0
>>> assert not np.all(a==b)
"""
a = np.asanyarray(a)
# apply_along_axis in _nanmedian doesn't handle empty arrays well,
# so deal them upfront
if a.size == 0:
return np.nanmean(a, axis, out=out, keepdims=keepdims)
r, k = _ureduce(a, func=_nanmedian, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims and keepdims is not np._NoValue:
return r.reshape(k)
else:
return r
def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=np._NoValue):
"""
Compute the qth percentile of the data along the specified axis,
while ignoring nan values.
Returns the qth percentile(s) of the array elements.
.. versionadded:: 1.9.0
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute, which must be between 0 and 100
inclusive.
axis : {int, sequence of int, None}, optional
Axis or axes along which the percentiles are computed. The
default is to compute the percentile(s) along a flattened
version of the array. A sequence of axes is supported since
version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
`percentile`. This will save memory when you do not need to
preserve the contents of the input array. In this case you
should not make any assumptions about the contents of the input
`a` after this function completes -- treat it as undefined.
Default is False. If `a` is not already an array, this parameter
will have no effect as `a` will be converted to an array
internally regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction`` is
the fractional part of the index surrounded by ``i`` and
``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original array `a`.
If this is anything but the default value it will be passed
through (in the special case of an empty array) to the
`mean` function of the underlying array. If the array is
a sub-class and `mean` does not have the kwarg `keepdims` this
will raise a RuntimeError.
Returns
-------
percentile : scalar or ndarray
If `q` is a single percentile and `axis=None`, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the percentiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
nanmean, nanmedian, percentile, median, mean
Notes
-----
Given a vector ``V`` of length ``N``, the ``q``-th percentile of
``V`` is the value ``q/100`` of the way from the minimum to the
maximum in a sorted copy of ``V``. The values and distances of
the two nearest neighbors as well as the `interpolation` parameter
will determine the percentile if the normalized ranking does not
match the location of ``q`` exactly. This function is the same as
the median if ``q=50``, the same as the minimum if ``q=0`` and the
same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10., 7., 4.], [3., 2., 1.]])
>>> a[0][1] = np.nan
>>> a
array([[ 10., nan, 4.],
[ 3., 2., 1.]])
>>> np.percentile(a, 50)
nan
>>> np.nanpercentile(a, 50)
3.5
>>> np.nanpercentile(a, 50, axis=0)
array([ 6.5, 2., 2.5])
>>> np.nanpercentile(a, 50, axis=1, keepdims=True)
array([[ 7.],
[ 2.]])
>>> m = np.nanpercentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.nanpercentile(a, 50, axis=0, out=out)
array([ 6.5, 2., 2.5])
>>> m
array([ 6.5, 2. , 2.5])
>>> b = a.copy()
>>> np.nanpercentile(b, 50, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
"""
a = np.asanyarray(a)
q = np.asanyarray(q)
# apply_along_axis in _nanpercentile doesn't handle empty arrays well,
# so deal them upfront
if a.size == 0:
return np.nanmean(a, axis, out=out, keepdims=keepdims)
r, k = _ureduce(a, func=_nanpercentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims and keepdims is not np._NoValue:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear'):
"""
Private function that doesn't support extended axis or keepdims.
These methods are extended to this function using _ureduce
See nanpercentile for parameter usage
"""
if axis is None or a.ndim == 1:
part = a.ravel()
result = _nanpercentile1d(part, q, overwrite_input, interpolation)
else:
result = np.apply_along_axis(_nanpercentile1d, axis, a, q,
overwrite_input, interpolation)
# apply_along_axis fills in collapsed axis with results.
# Move that axis to the beginning to match percentile's
# convention.
if q.ndim != 0:
result = np.rollaxis(result, axis)
if out is not None:
out[...] = result
return result
def _nanpercentile1d(arr1d, q, overwrite_input=False, interpolation='linear'):
"""
Private function for rank 1 arrays. Compute percentile ignoring
NaNs.
See nanpercentile for parameter usage
"""
c = np.isnan(arr1d)
s = np.where(c)[0]
if s.size == arr1d.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=3)
if q.ndim == 0:
return np.nan
else:
return np.nan * np.ones((len(q),))
elif s.size == 0:
return np.percentile(arr1d, q, overwrite_input=overwrite_input,
interpolation=interpolation)
else:
if overwrite_input:
x = arr1d
else:
x = arr1d.copy()
# select non-nans at end of array
enonan = arr1d[-s.size:][~c[-s.size:]]
# fill nans in beginning of array with non-nans of end
x[s[:enonan.size]] = enonan
# slice nans away
return np.percentile(x[:-s.size], q, overwrite_input=True,
interpolation=interpolation)
def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the variance along the specified axis, while ignoring NaNs.
Returns the variance of the array elements, a measure of the spread of
a distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
For all-NaN slices or slices with zero degrees of freedom, NaN is
returned and a `RuntimeWarning` is raised.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the variance is computed. The default is to compute
the variance of the flattened array.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float32`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of non-NaN
elements. By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
Returns
-------
variance : ndarray, see dtype parameter above
If `out` is None, return a new array containing the variance,
otherwise return a reference to the output array. If ddof is >= the
number of non-NaN elements in a slice or the slice contains only
NaNs, then the result for that slice is NaN.
See Also
--------
std : Standard deviation
mean : Average
var : Variance while not ignoring NaNs
nanstd, nanmean
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(abs(x - x.mean())**2)``.
The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of a hypothetical infinite
population. ``ddof=0`` provides a maximum likelihood estimate of the
variance for normally distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32` (see example
below). Specifying a higher-accuracy accumulator using the ``dtype``
keyword can alleviate this issue.
For this function to work on sub-classes of ndarray, they must define
`sum` with the kwarg `keepdims`
Examples
--------
>>> a = np.array([[1, np.nan], [3, 4]])
>>> np.var(a)
1.5555555555555554
>>> np.nanvar(a, axis=0)
array([ 1., 0.])
>>> np.nanvar(a, axis=1)
array([ 0., 0.25])
"""
arr, mask = _replace_nan(a, 0)
if mask is None:
return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if dtype is not None:
dtype = np.dtype(dtype)
if dtype is not None and not issubclass(dtype.type, np.inexact):
raise TypeError("If a is inexact, then dtype must be inexact")
if out is not None and not issubclass(out.dtype.type, np.inexact):
raise TypeError("If a is inexact, then out must be inexact")
# Compute mean
if type(arr) is np.matrix:
_keepdims = np._NoValue
else:
_keepdims = True
# we need to special case matrix for reverse compatibility
# in order for this to work, these sums need to be called with
# keepdims=True, however matrix now raises an error in this case, but
# the reason that it drops the keepdims kwarg is to force keepdims=True
# so this used to work by serendipity.
cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims)
avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=_keepdims)
avg = _divide_by_count(avg, cnt)
# Compute squared deviation from mean.
np.subtract(arr, avg, out=arr, casting='unsafe')
arr = _copyto(arr, 0, mask)
if issubclass(arr.dtype.type, np.complexfloating):
sqr = np.multiply(arr, arr.conj(), out=arr).real
else:
sqr = np.multiply(arr, arr, out=arr)
# Compute variance.
var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
if var.ndim < cnt.ndim:
# Subclasses of ndarray may ignore keepdims, so check here.
cnt = cnt.squeeze(axis)
dof = cnt - ddof
var = _divide_by_count(var, dof)
isbad = (dof <= 0)
if np.any(isbad):
warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning, stacklevel=2)
# NaN, inf, or negative numbers are all possible bad
# values, so explicitly replace them with NaN.
var = _copyto(var, np.nan, isbad)
return var
def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the standard deviation along the specified axis, while
ignoring NaNs.
Returns the standard deviation, a measure of the spread of a
distribution, of the non-NaN array elements. The standard deviation is
computed for the flattened array by default, otherwise over the
specified axis.
For all-NaN slices or slices with zero degrees of freedom, NaN is
returned and a `RuntimeWarning` is raised.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Calculate the standard deviation of the non-NaN values.
axis : int, optional
Axis along which the standard deviation is computed. The default is
to compute the standard deviation of the flattened array.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it
is the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the
calculated values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of non-NaN
elements. By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If this value is anything but the default it is passed through
as-is to the relevant functions of the sub-classes. If these
functions do not have a `keepdims` kwarg, a RuntimeError will
be raised.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard
deviation, otherwise return a reference to the output array. If
ddof is >= the number of non-NaN elements in a slice or the slice
contains only NaNs, then the result for that slice is NaN.
See Also
--------
var, mean, std
nanvar, nanmean
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``.
The average squared deviation is normally calculated as
``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is
specified, the divisor ``N - ddof`` is used instead. In standard
statistical practice, ``ddof=1`` provides an unbiased estimator of the
variance of the infinite population. ``ddof=0`` provides a maximum
likelihood estimate of the variance for normally distributed variables.
The standard deviation computed in this function is the square root of
the estimated variance, so even with ``ddof=1``, it will not be an
unbiased estimate of the standard deviation per se.
Note that, for complex numbers, `std` takes the absolute value before
squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example
below). Specifying a higher-accuracy accumulator using the `dtype`
keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, np.nan], [3, 4]])
>>> np.nanstd(a)
1.247219128924647
>>> np.nanstd(a, axis=0)
array([ 1., 0.])
>>> np.nanstd(a, axis=1)
array([ 0., 0.5])
"""
var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if isinstance(var, np.ndarray):
std = np.sqrt(var, out=var)
else:
std = var.dtype.type(np.sqrt(var))
return std
| [
"gh.gherman@gmail.com"
] | gh.gherman@gmail.com |
257e1b6036b865b0f7704c9e85a02415ed2160b6 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/predaci.py | ff955d595760aa90d5826445d9f40489e524f567 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 44 | py | ii = [('RogePAV2.py', 2), ('WestJIT.py', 5)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
3858c9e67177860c510d2efa26a438da7c05c325 | 1986f044d6476fab476a9b5eb9a95cc30d6a8eac | /Chapter07/pygal_2.py | 276356af1474708c96e1e1f0a2509f75e8be4717 | [
"MIT"
] | permissive | PacktPublishing/Mastering-Python-Networking | 711f47ecff9ca2fec51f948badff22cd8c73ada4 | 52a2827919db1773f66700f3946390f200bd6dab | refs/heads/master | 2023-02-08T01:39:44.670413 | 2023-01-30T09:03:30 | 2023-01-30T09:03:30 | 82,666,812 | 138 | 127 | MIT | 2020-11-05T11:34:15 | 2017-02-21T10:25:34 | Python | UTF-8 | Python | false | false | 263 | py | #!/usr/bin/env python3
import pygal
line_chart = pygal.Pie()
line_chart.title = "Protocol Breakdown"
line_chart.add('TCP', 15)
line_chart.add('UDP', 30)
line_chart.add('ICMP', 45)
line_chart.add('Others', 10)
line_chart.render_to_file('pygal_example_3.svg')
| [
"echou@yahoo.com"
] | echou@yahoo.com |
40842236d29a3fe65789a8875561df71b77fc1bd | 113d0858a2476e5bd1b39ff25e41da33970b5dda | /blendernc/nodes/outputs/BlenderNC_NT_preloader.py | 9dddc69f3a5ec5b4c18f41c9326904028d0d8055 | [
"MIT"
] | permissive | peterhob/blendernc | aa52d387b74720f90950b8fb2df23f041ce25588 | 61eef2670fd299637633060ad5597bc3c6b53a02 | refs/heads/master | 2023-06-13T08:33:59.694357 | 2021-07-05T10:57:51 | 2021-07-05T10:57:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,476 | py | #!/usr/bin/env python3
# Imports
import bpy
class BlenderNC_NT_preloader(bpy.types.Node):
# === Basics ===
# Description string
"""A netcdf node"""
# Optional identifier string. If not explicitly defined,
# the python class name is used.
bl_idname = "netCDFPreloadNode"
# Label for nice name display
bl_label = "Load netCDF"
# Icon identifier
bl_icon = "SOUND"
blb_type = "NETCDF"
# TODO: This node will receive a datacube as
# input and store all the images in disk for easier import and animation.
# === Optional Functions ===
# Initialization function, called when a new node is created.
# This is the most common place to create the sockets for a node,
# as shown below.
def init(self, context):
pass
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
# scene = context.scene
layout.label(text="INFO: Work in progress", icon="INFO")
# if scene.nc_dictionary:
# layout.prop(self, "file_name")
# else:
# layout.label(text="No netcdf loaded")
# if self.file_name:
# layout.prop(self, "var_name")
# if self.var_name:
# layout.prop(self, "frame_start")
# layout.prop(self, "frame_end")
# if self.frame_end > self.frame_start:
# op = layout.operator("blendernc.preloader",
# icon="FILE_REFRESH",)
# op.file_name = self.file_name
# op.var_name = self.var_name
# op.frame_start = self.frame_start
# op.frame_end = self.frame_end
# else:
# layout.label(text="Cannot preload!")
# Detail buttons in the sidebar.
# If this function is not defined,
# the draw_buttons function is used instead
def draw_buttons_ext(self, context, layout):
pass
# Optional: custom label
# Explicit user label overrides this,
# but here we can define a label dynamically
def draw_label(self):
return "Load netCDF"
def update_value(self, context):
self.update()
def update(self):
pass
| [
"josue.martinezmoreno@anu.edu.au"
] | josue.martinezmoreno@anu.edu.au |
5aadf807bfd7c7562417684c05b510a369cbab93 | 8d47af9482444b07b52cf44cebcaf4b992df4d09 | /agents/31_StochasticMaxStochasticDeletionPRB/StochasticMaxStochasticDeletionPRB.py | 2e72a6673349262ca21c05cd26f5be8d587ef0ce | [] | no_license | w0lv3r1nix/retro-agents | f4dbce2db558c880b161062796e5397be65bdd10 | c7f93a737dc6c6fc5d8343c099e14bd2bc97aaf1 | refs/heads/master | 2020-08-01T01:19:41.660018 | 2018-06-13T04:28:09 | 2018-06-13T04:28:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,087 | py | import numpy as np
import random
from math import sqrt
from anyrl.rollouts import PrioritizedReplayBuffer
class StochasticMaxStochasticDeletionPRB(PrioritizedReplayBuffer):
"""
A prioritized replay buffer with stochastic maximum collection,
stochastic deletion and loss-proportional sampling.
"""
def __init__(self, capacity, alpha, beta, first_max=1, epsilon=0):
self.capacity = capacity
self.alpha = alpha
self.beta = beta
self.epsilon = epsilon
self.transitions = []
self.errors = CustomFloatBuffer(capacity)
self._max_weight_arg = first_max
def add_sample(self, sample, init_weight=None):
"""
Add a sample to the buffer.
When new samples are added without an explicit initial weight, the
maximum weight argument ever seen is used. When the buffer is empty,
first_max is used.
"""
if init_weight is None:
new_error = self._process_weight(self._max_weight_arg)
self.transitions.append(sample)
self.errors.append(new_error)
else:
new_error = self._process_weight(init_weight)
if random.random() < new_error / self.errors.max():
self.transitions.append(sample)
self.errors.append(new_error)
while len(self.transitions) > self.capacity:
del self.transitions[self.errors.inverse_sample(1)[0]]
class CustomFloatBuffer:
"""A ring-buffer of floating point values."""
def __init__(self, capacity, dtype='float64'):
self._capacity = capacity
self._start = 0
self._used = 0
self._buffer = np.zeros((capacity,), dtype=dtype)
self._bin_size = int(sqrt(capacity))
num_bins = capacity // self._bin_size
if num_bins * self._bin_size < capacity:
num_bins += 1
self._bin_sums = np.zeros((num_bins,), dtype=dtype)
self._min = 0
self._min_id = 0
self._max = 0
def append(self, value):
"""
Add a value to the end of the buffer.
If the buffer is full, the first value is removed.
"""
idx = (self._start + self._used) % self._capacity
if self._used < self._capacity:
self._used += 1
else:
self._start = (self._start + 1) % self._capacity
self._set_idx(idx, value)
def sample(self, num_values):
"""
Sample indices in proportion to their value.
Returns:
A tuple (indices, probs)
"""
assert self._used >= num_values
res = []
probs = []
bin_probs = self._bin_sums / np.sum(self._bin_sums)
while len(res) < num_values:
bin_idx = np.random.choice(len(self._bin_sums), p=bin_probs)
bin_values = self._bin(bin_idx)
sub_probs = bin_values / np.sum(bin_values)
sub_idx = np.random.choice(len(bin_values), p=sub_probs)
idx = bin_idx * self._bin_size + sub_idx
res.append(idx)
probs.append(bin_probs[bin_idx] * sub_probs[sub_idx])
return (np.array(list(res)) - self._start) % self._capacity, np.array(probs)
def inverse_sample(self, num_values):
"""
Sample indices in inverse proportion to their value. The sampling used
is $e^{-x}$.
Returns:
List of indices sampled.
"""
assert self._used >= num_values
res = []
e_neg_bin_sums = np.exp(-1 * self._bin_sums)
bin_probs = e_neg_bin_sums / np.sum(e_neg_bin_sums)
while len(res) < num_values:
bin_idx = np.random.choice(len(self._bin_sums), p=bin_probs)
bin_values = self._bin(bin_idx)
e_neg_bin_values = np.exp(-1 * bin_values)
sub_probs = e_neg_bin_values / np.sum(e_neg_bin_values)
sub_idx = np.random.choice(len(bin_values), p=sub_probs)
idx = bin_idx * self._bin_size + sub_idx
res.append(idx)
return (np.array(list(res)) - self._start) % self._capacity
def set_value(self, idx, value):
"""Set the value at the given index."""
idx = (idx + self._start) % self._capacity
self._set_idx(idx, value)
def min(self):
"""Get the minimum value in the buffer."""
return self._min
def max(self):
"""Get the maximum value in the buffer."""
return self._max
def min_id(self):
"""Get the index of minimum value in the buffer."""
return self._min_id
def sum(self):
"""Get the sum of the values in the buffer."""
return np.sum(self._bin_sums)
def _set_idx(self, idx, value):
assert not np.isnan(value)
assert value > 0
needs_recompute_min = False
if self._min == self._buffer[idx]:
needs_recompute_min = True
elif value < self._min:
self._min = value
needs_recompute_max = False
if self._max == self._buffer[idx]:
needs_recompute_max = True
elif value > self._max:
self._max = value
bin_idx = idx // self._bin_size
self._buffer[idx] = value
self._bin_sums[bin_idx] = np.sum(self._bin(bin_idx))
if needs_recompute_min:
self._recompute_min()
if needs_recompute_max:
self._recompute_max()
def _bin(self, bin_idx):
if bin_idx == len(self._bin_sums) - 1:
return self._buffer[self._bin_size * bin_idx:]
return self._buffer[self._bin_size * bin_idx: self._bin_size * (bin_idx + 1)]
def _recompute_min(self):
if self._used < self._capacity:
self._min_id = np.argmin(self._buffer[:self._used])
else:
self._min_id = np.argmin(self._buffer)
self._min = self._buffer[self._min_id]
def _recompute_max(self):
if self._used < self._capacity:
self._max = np.max(self._buffer[:self._used])
else:
self._max = np.max(self._buffer)
| [
"seungjaeryanlee@gmail.com"
] | seungjaeryanlee@gmail.com |
a76830600e020b9c16b8bc42c056f49e358bb5a0 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/plugin/core/functiongraph/mvc/LazySaveableXML.pyi | 0ac3ad1a85c90eb8dc01e18e730a2294cdc70003 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,245 | pyi | from typing import List
import ghidra.app.plugin.core.functiongraph.mvc
import ghidra.util
import java.lang
import org.jdom
class LazySaveableXML(ghidra.app.plugin.core.functiongraph.mvc.SaveableXML):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getElement(self) -> org.jdom.Element: ...
def getObjectStorageFields(self) -> List[java.lang.Class]: ...
def getSchemaVersion(self) -> int: ...
def hashCode(self) -> int: ...
def isEmpty(self) -> bool: ...
def isPrivate(self) -> bool: ...
def isUpgradeable(self, __a0: int) -> bool: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def restore(self, __a0: ghidra.util.ObjectStorage) -> None: ...
def save(self, __a0: ghidra.util.ObjectStorage) -> None: ...
def toString(self) -> unicode: ...
def upgrade(self, __a0: ghidra.util.ObjectStorage, __a1: int, __a2: ghidra.util.ObjectStorage) -> bool: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def empty(self) -> bool: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
de252d7d6be728353eb46d94dd4648cc6fa950b6 | 046c1141399890afa13fd243e55da3dbf31085c5 | /corl/wc_test/test1.py | dcc721a40aa8d1c33ced4c39281c74deb2bacee8 | [] | no_license | carusyte/tflab | 1d0edf87282352aeb5a38b83c58ab9c0189bbb1a | 2324c3b0ad22d28c50a4fd8db56e36a2836735c3 | refs/heads/master | 2021-05-12T06:58:26.270868 | 2019-03-24T14:57:44 | 2019-03-24T14:57:44 | 117,232,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,248 | py | from __future__ import print_function
# Path hack.
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..")
import tensorflow as tf
# pylint: disable-msg=E0401
from model import base as model0
from wc_data import base as data0
from time import strftime
import os
import numpy as np
import math
N_TEST = 100
TEST_INTERVAL = 50
LAYER_WIDTH = 256
MAX_STEP = 30
TIME_SHIFT = 2
LEARNING_RATE = 1e-3
USE_PEEPHOLES = True
TIED = False
LOG_DIR = 'logdir'
# pylint: disable-msg=E0601
def collect_summary(sess, model, base_dir):
train_writer = tf.summary.FileWriter(base_dir + "/train", sess.graph)
test_writer = tf.summary.FileWriter(base_dir + "/test", sess.graph)
with tf.name_scope("Basic"):
tf.summary.scalar("Mean_Diff", tf.sqrt(model.cost))
summary = tf.summary.merge_all()
return summary, train_writer, test_writer
def run():
tf.logging.set_verbosity(tf.logging.INFO)
loader = data0.DataLoader(TIME_SHIFT)
print('{} loading test data...'.format(strftime("%H:%M:%S")))
tuuids, tdata, tvals, tseqlen = loader.loadTestSet(MAX_STEP, N_TEST)
print('input shape: {}'.format(tdata.shape))
print('target shape: {}'.format(tvals.shape))
featSize = tdata.shape[2]
data = tf.placeholder(tf.float32, [None, MAX_STEP, featSize], "input")
target = tf.placeholder(tf.float32, [None], "target")
seqlen = tf.placeholder(tf.int32, [None], "seqlen")
with tf.Session() as sess:
model = model0.SRnnRegressorV2(
data=data,
target=target,
seqlen=seqlen,
cell='grid3lstm',
use_peepholes=USE_PEEPHOLES,
tied=TIED,
layer_width=LAYER_WIDTH,
learning_rate=LEARNING_RATE)
model_name = model.getName()
f = __file__
fbase = f[f.rfind('/')+1:f.rindex('.py')]
base_dir = '{}/{}_{}/{}'.format(LOG_DIR, fbase,
model_name, strftime("%Y%m%d_%H%M%S"))
print('{} using model: {}'.format(strftime("%H:%M:%S"), model_name))
if tf.gfile.Exists(base_dir):
tf.gfile.DeleteRecursively(base_dir)
tf.gfile.MakeDirs(base_dir)
sess.run(tf.global_variables_initializer())
summary, train_writer, test_writer = collect_summary(
sess, model, base_dir)
saver = tf.train.Saver()
bno = 0
epoch = 0
while True:
bno = epoch*TEST_INTERVAL
print('{} running on test set...'.format(strftime("%H:%M:%S")))
feeds = {data: tdata, target: tvals, seqlen: tseqlen}
mse, worst, test_summary_str = sess.run(
[model.cost, model.worst, summary], feeds)
bidx, max_diff, predict, actual = worst[0], worst[1], worst[2], worst[3]
print('{} Epoch {} diff {:3.5f} max_diff {:3.4f} predict {} actual {} uuid {}'.format(
strftime("%H:%M:%S"), epoch, math.sqrt(mse), max_diff, predict, actual, tuuids[bidx]))
summary_str = None
fin = False
for _ in range(TEST_INTERVAL):
bno = bno+1
print('{} loading training data for batch {}...'.format(
strftime("%H:%M:%S"), bno))
_, trdata, trvals, trseqlen = loader.loadTrainingData(
bno, MAX_STEP)
if len(trdata) > 0:
print('{} training...'.format(strftime("%H:%M:%S")))
else:
print('{} end of training data, finish training.'.format(
strftime("%H:%M:%S")))
fin = True
break
feeds = {data: trdata, target: trvals, seqlen: trseqlen}
summary_str, worst = sess.run(
[summary, model.worst, model.optimize], feeds)[:-1]
bidx, max_diff, predict, actual = worst[0], worst[1], worst[2], worst[3]
print('{} bno {} max_diff {:3.4f} predict {} actual {}'.format(
strftime("%H:%M:%S"), bno, max_diff, predict, actual))
train_writer.add_summary(summary_str, bno)
test_writer.add_summary(test_summary_str, bno)
train_writer.flush()
test_writer.flush()
checkpoint_file = os.path.join(base_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=bno)
epoch += 1
if fin:
break
# test last epoch
print('{} running on test set...'.format(strftime("%H:%M:%S")))
feeds = {data: tdata, target: tvals, seqlen: tseqlen}
mse, worst, test_summary_str = sess.run(
[model.cost, model.worst, summary], feeds)
bidx, max_diff, predict, actual = worst[0], worst[1], worst[2], worst[3]
print('{} Epoch {} diff {:3.5f} max_diff {:3.4f} predict {} actual {} uuid {}'.format(
strftime("%H:%M:%S"), epoch, math.sqrt(mse), max_diff, predict, actual, tuuids[bidx]))
train_writer.add_summary(summary_str, bno)
test_writer.add_summary(test_summary_str, bno)
train_writer.flush()
test_writer.flush()
if __name__ == '__main__':
run()
| [
"carusyte@163.com"
] | carusyte@163.com |
d547ea2ba4d64c65dab0c2bb26a6c4c03f3992af | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/storage/v20190601/list_storage_account_sas.py | d80c546cf9140068777a371bf7a7064bb5ddf67a | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,714 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = [
'ListStorageAccountSASResult',
'AwaitableListStorageAccountSASResult',
'list_storage_account_sas',
]
@pulumi.output_type
class ListStorageAccountSASResult:
"""
The List SAS credentials operation response.
"""
def __init__(__self__, account_sas_token=None):
if account_sas_token and not isinstance(account_sas_token, str):
raise TypeError("Expected argument 'account_sas_token' to be a str")
pulumi.set(__self__, "account_sas_token", account_sas_token)
@property
@pulumi.getter(name="accountSasToken")
def account_sas_token(self) -> str:
"""
List SAS credentials of storage account.
"""
return pulumi.get(self, "account_sas_token")
class AwaitableListStorageAccountSASResult(ListStorageAccountSASResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListStorageAccountSASResult(
account_sas_token=self.account_sas_token)
def list_storage_account_sas(account_name: Optional[str] = None,
i_p_address_or_range: Optional[str] = None,
key_to_sign: Optional[str] = None,
permissions: Optional[Union[str, 'Permissions']] = None,
protocols: Optional['HttpProtocol'] = None,
resource_group_name: Optional[str] = None,
resource_types: Optional[Union[str, 'SignedResourceTypes']] = None,
services: Optional[Union[str, 'Services']] = None,
shared_access_expiry_time: Optional[str] = None,
shared_access_start_time: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListStorageAccountSASResult:
"""
The List SAS credentials operation response.
:param str account_name: The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
:param str i_p_address_or_range: An IP address or a range of IP addresses from which to accept requests.
:param str key_to_sign: The key to sign the account SAS token with.
:param Union[str, 'Permissions'] permissions: The signed permissions for the account SAS. Possible values include: Read (r), Write (w), Delete (d), List (l), Add (a), Create (c), Update (u) and Process (p).
:param 'HttpProtocol' protocols: The protocol permitted for a request made with the account SAS.
:param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param Union[str, 'SignedResourceTypes'] resource_types: The signed resource types that are accessible with the account SAS. Service (s): Access to service-level APIs; Container (c): Access to container-level APIs; Object (o): Access to object-level APIs for blobs, queue messages, table entities, and files.
:param Union[str, 'Services'] services: The signed services accessible with the account SAS. Possible values include: Blob (b), Queue (q), Table (t), File (f).
:param str shared_access_expiry_time: The time at which the shared access signature becomes invalid.
:param str shared_access_start_time: The time at which the SAS becomes valid.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['iPAddressOrRange'] = i_p_address_or_range
__args__['keyToSign'] = key_to_sign
__args__['permissions'] = permissions
__args__['protocols'] = protocols
__args__['resourceGroupName'] = resource_group_name
__args__['resourceTypes'] = resource_types
__args__['services'] = services
__args__['sharedAccessExpiryTime'] = shared_access_expiry_time
__args__['sharedAccessStartTime'] = shared_access_start_time
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:storage/v20190601:listStorageAccountSAS', __args__, opts=opts, typ=ListStorageAccountSASResult).value
return AwaitableListStorageAccountSASResult(
account_sas_token=__ret__.account_sas_token)
| [
"noreply@github.com"
] | MisinformedDNA.noreply@github.com |
6021843176ab0f3c5177da202fa8526cd66f5582 | 633ab8880dc367feefdb6ef565ed0e70a4094bc1 | /10001-11000/10953.py | 9c3a615dcf5c0ebcae823dc2f97ebf611b933531 | [] | no_license | winston1214/baekjoon | 2e9740ee2824d7777f6e64d50087b5c040baf2c6 | 20125255cd5b359023a6297f3761b2db1057d67d | refs/heads/master | 2023-03-04T09:07:27.688072 | 2021-02-16T13:51:49 | 2021-02-16T13:51:49 | 284,832,623 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | # @Author YoungMinKim
# baekjoon
N=int(input())
for i in range(N):
a,b = map(int,input().split(','))
print(a+b) | [
"winston1214@naver.com"
] | winston1214@naver.com |
1e1a343dc5ae02796835273443f4f1bc06e98ac6 | 04f4f051ebbbcf5fdd4ffe4a8d24697c2dc55735 | /virtual/bin/alembic | 3694c1a6b7c9f67c9c6ab9abf81897de9d5ed23d | [
"MIT"
] | permissive | Pnshimiye/blog-app | 854a9ddea8387989ca487d308badc000d261fbd4 | ae6d7e7118d798c3f90cea660e13ba58ae649b63 | refs/heads/master | 2020-04-26T09:03:21.936209 | 2019-03-06T07:22:24 | 2019-03-06T07:22:24 | 173,442,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | #!/home/pauline/Documents/core-projects/Self_Pitch/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from alembic.config import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"pnshimiye@gmail.com"
] | pnshimiye@gmail.com | |
c2ad586c7ddc7ca370f5fc12f0ae2ffcbbefdb8b | cf1d45f536a5922f1a15e0340b6ccb6f2124b7f8 | /main.py | 8f91e2f4ea22e418d3c5123d8ad21225c1f6ab77 | [] | no_license | purpleyoung/A-star-maze | 4a658c633dcf5cab0cd28cf9f16647c16e35d78c | 60a1250da044366e332e19abf2bc80490d71fbe1 | refs/heads/main | 2023-02-01T00:09:19.225081 | 2020-12-08T09:02:43 | 2020-12-08T09:02:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | import time
from opt import Node, FQueue, h, g, valid_neighbors, add_close
from maze import maze
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--width", help="window width", default=7, type=int)
parser.add_argument("-he", "--height", help="window height", default=5, type=int)
parser.add_argument("-u", "--unit", help="window unit", default=50, type=int)
parser.add_argument("-hw", "--h_weight", help="weight for h function", default=2, type=float)
args = parser.parse_args()
def loop(q: FQueue) -> Node:
while True:
time.sleep(0.1)
n = q.get()
add_close(n)
if maze.is_end_node(n):
found = n
break
neighbors = valid_neighbors(n, q, maze)
for n_ in neighbors:
if n_.pre != n:
new_path_g = g(n, n_)
if n_.g > new_path_g:
n_.pre = n
n_.g = new_path_g
f = h(n_, maze.e_node, weight=args.h_weight) + n_.g
q.put(f, n_)
maze.add_f(f, n_)
return found
def main(event=None):
q = FQueue()
q.put(0, maze.s_node)
n = loop(q)
maze.backward(n)
maze.build(args.width, args.height, args.unit)
maze.bind('<space>', main)
maze.mainloop()
| [
"morvanzhou@hotmail.com"
] | morvanzhou@hotmail.com |
aed31ff15357a0315ac03bf1a02ac63151ca3b76 | ebb0bd236009cb203035ab31ce136df45584ad35 | /resources/admin.py | 8283c744d589b640f060e0139d45490978ec158b | [] | no_license | the-akira/Django-GraphQL-Tutorial | 7a798b0fdfc4c1d8c7cb47ac31a71ec1ccae29b6 | 74c543467fd0e93b75411f34215513fa3d498dfd | refs/heads/master | 2023-04-28T22:28:11.051719 | 2021-09-25T03:16:14 | 2021-09-25T03:16:14 | 248,372,724 | 2 | 0 | null | 2023-04-21T20:53:09 | 2020-03-19T00:21:32 | Python | UTF-8 | Python | false | false | 150 | py | from django.contrib import admin
from resources.models import Artista, Disco
classes = [Artista, Disco]
for c in classes:
admin.site.register(c) | [
"gabrielfelippe90@gmail.com"
] | gabrielfelippe90@gmail.com |
ced4d8baa28a26a118b0acb4155d12e6cdef3d5f | 8c39ba92cc71ff78242477d3256f6ee3daa872c7 | /conans/test/unittests/util/test_encrypt.py | 749a81bd3de44ecdc928a477a0923d413788d743 | [
"MIT"
] | permissive | conan-io/conan | eb4427e534a0edbb1fb06c753d5d9587faaef93c | bac455d1329b6744cdc41747354a727c9233179f | refs/heads/release/2.0 | 2023-09-03T18:51:54.345761 | 2023-09-03T17:30:43 | 2023-09-03T17:30:43 | 47,190,624 | 7,754 | 1,182 | MIT | 2023-09-14T15:16:09 | 2015-12-01T13:17:02 | Python | UTF-8 | Python | false | false | 1,463 | py | import uuid
import pytest
from conans.util import encrypt
def test_encryp_basic():
key = str(uuid.uuid4())
message = 'simple data ascii string'
data = encrypt.encode(message, key)
assert type(message) == type(data)
assert message != data
assert message != data
decoded = encrypt.decode(data, key)
assert type(message) == type(data)
assert message == decoded
def test_encrypt_unicode():
key = str(uuid.uuid4())
message_enc = b'espa\xc3\xb1a\xe2\x82\xac$' # Conan codebase allows only ASCII source files
message = message_enc.decode('utf-8')
data = encrypt.encode(message, key)
assert type(message) == type(data)
assert message != data
decoded = encrypt.decode(data, key)
assert type(message) == type(data)
assert message == decoded
def test_key_unicode():
key = b'espa\xc3\xb1a\xe2\x82\xac$'.decode('utf-8') # codebase allows only ASCII files
message = 'the message'
data = encrypt.encode(message, key)
assert type(message) == type(data)
assert message != data
decoded = encrypt.decode(data, key)
assert type(message) == type(data)
assert message == decoded
def test_key_empty():
# Empty keys, or keys with only non-ascii chars are not allowed
with pytest.raises(AssertionError):
encrypt.encode('message', '')
with pytest.raises(AssertionError):
encrypt.encode('message', b'\xc3\xb1\xe2\x82\xac'.decode('utf-8'))
| [
"noreply@github.com"
] | conan-io.noreply@github.com |
049931e552b123be245a259edb76d7fa1ba55f9b | 6733716dcdcacfcc739ae5c4af976db81ead852b | /ROOT/Project/functions/rootHist_TXT/func/D1H_rootHist_TXT_conversion_largeBin.py | 846d3c15ee2fe360a962709ea5b6f410983abfb4 | [] | no_license | StudyGroupPKU/fruit_team | 45202a058d59057081670db97b9229ee720fa77e | 9f9f673f5ce22ce6d25736871f3d7a5bd232c29d | refs/heads/master | 2021-01-24T08:15:37.909327 | 2018-05-11T08:53:06 | 2018-05-11T08:53:06 | 122,975,404 | 0 | 5 | null | 2018-04-05T02:37:14 | 2018-02-26T13:41:24 | Python | UTF-8 | Python | false | false | 5,645 | py | #Author : Junho LEE
#input/output txt format :: Nth_bin Start_of_bin End_of_bin Entry
#filename :: D1H_rootHist_TXT_conversion.py
def D1H_roothist_to_txt_largeBin(filename, outputpath = ''):
from ROOT import TFile, TCanvas, TPad
import os
if(filename[0]=="/"):
filename = filename
elif(filename[0] == '~'):
filename = filename.replace("~",os.environ['HOME'])
else:
filename = os.getcwd() + "/" + filename # get the path included filename
loca=len(filename)
for i in range (1,len(filename)+1): # find the "/" location
if(filename[-i] == "/"):
loca = i-1
break
FILENAME = filename.replace(filename[:-loca],"") # this is the shorten filename, excluded path
FILE = FILENAME.replace(".root","")
filename_NoRoot = filename.replace(filename[len(filename)-loca:len(filename)],"")
# print(FILENAME, "******")
filetxt = filename.replace(".root","")
filetxt = filetxt.replace("//","/")
if(outputpath==''):
pass
else:
if(outputpath[0] == "/"):
filetxt = outputpath+ "/" + FILENAME.replace(".root","")
filetxt = filetxt.replace("//","/")
elif(outputpath[0] == "~"):
filetxt = outputpath.replace("~",os.environ['HOME']) + "/" + FILENAME.replace(".root","")
filetxt = filetxt.replace("//","/")
else:
filetxt = os.getcwd() + "/" + outputpath+ "/" + FILENAME.replace(".root","")
filetxt = filetxt.replace("//","/")
print(filetxt)
f = TFile(filename,"READ"); # read file
dirlist = f.GetListOfKeys();
ITER = dirlist.MakeIterator();
key = ITER.Next();
jj = 0; FILE = None; LIST = []
while key: # iterating contained histogram inside of the read file
if key.GetClassName().index("TH1")>-1 :
FILE = key.ReadObj()
Name = FILE.GetName()
LIST.append(Name)
jj = jj + 1
key = ITER.Next()
# print(LIST); print(len(LIST))
OutputList = []
for ijk in range(0,len(LIST)):
hist = f.Get(LIST[ijk])
Nbin = hist.GetNbinsX()
# Filetxt = filetxt +"_"+ LIST[ijk] + "_F.txt"
Filetxt = LIST[ijk] + "_hist_largeBin.txt"
# print("!@#!!#R@#@", LIST[ijk])
wf= open(Filetxt,"w+")
OutputList.append(Filetxt)
print(Filetxt, "is generated")
for ii in range(1,Nbin+1):
bin_num = ii
bin_l = hist.GetBinLowEdge(ii)
bin_width = hist.GetBinWidth(ii);
bin_h = bin_l + bin_width;
binCont = hist.GetBinContent(ii);
wf.write("%i %f %f %f\n" %(bin_num,bin_l,bin_h,binCont))
f.Close()
wf.close()
# print(OutputList)
return OutputList
def D1H_txt_to_roothist(filename, outputpath=''):
from ROOT import TFile, TCanvas, TPad, TH1D, TLatex, TStyle, gStyle, TText, gPad, TPaveText
from inspect import currentframe, getframeinfo
import os
#gStyle.SetOptStat(0)
can = TCanvas("can","can",200,10,500,500);
if(filename[0]=="/"):
filename = filename
else:
filename = os.getcwd() + "/" + filename # get the path included filename
loca=len(filename)
for i in range (1,len(filename)+1): # find the "/" location
if(filename[-i] == "/"):
loca = i-1
break
FILENAME = filename.replace(filename[:-loca],"") # this is the shorten filename
# print(FILENAME, "******")
fileroot = filename.replace(".txt","_F.root")
fileroot = fileroot.replace("//","/")
f = open(filename,"r")
lineList = f.readlines()
Nbin = (len(lineList)) # get number of bins
Line_string = str(lineList[0])
_,bin_init,_,_ = Line_string.split(); bin_init = float(bin_init) # get initial bin
Line_string = str(lineList[len(lineList)-1])
_,_,bin_final,_ = Line_string.split(); bin_final = float(bin_final) # get final bin
f.seek(0) # reset python read line
hist = TH1D("hist","hist",Nbin,bin_init,bin_final)
total_e = 0
for i in range(1,Nbin+1):
Line_string = str(f.readline())
_,_,_,bin_c = Line_string.split();
bin_c = float(bin_c)
hist.SetBinContent(i,bin_c)
total_e = total_e + bin_c
total_e = int(total_e)
hist.Draw()
text = TText(hist.GetXaxis().GetBinCenter(2), hist.GetYaxis().GetBinCenter(1), "Recycled. Total Entry : %i" %total_e)
text.SetTextFont(10)
text.Draw()
gPad.Update()
can.Update()
if(outputpath==''):
wf = TFile(fileroot,"RECREATE")
print(fileroot, " root file is generated !!!")
else:
if(outputpath[0] == "/"):
fileroot = outputpath+ "/" + FILENAME.replace(".txt","_F.root")
fileroot = fileroot.replace("//","/")
elif(outputpath[0] == "~"):
fileroot = outputpath.replace("~",os.environ['HOME']) + "/" + FILENAME.replace(".txt","_F.root")
fileroot = fileroot.replace("//","/")
else:
fileroot = os.getcwd() + "/" + outputpath+ "/" + FILENAME.replace(".txt","_F.root")
fileroot = fileroot.replace("//","/")
wf = TFile(fileroot,"RECREATE")
print(fileroot, " root file is generated !!!")
hist.Write()
wf.Close()
fileroot = fileroot.replace("//","/")
# print(fileroot)
return fileroot
def main():
D1H_roothist_to_txt("/Users/leejunho/Desktop/git/python3Env/group_study/fruit_team/ROOT/Project/root_generator/root3_sin.root")
if __name__=="__main__":
main()
| [
"skyblue1293@naver.com"
] | skyblue1293@naver.com |
4270325a4df263a86fc3fec300870a7c3a26fb92 | 81cf04ed71fb1e141d7531e9cc9a82138adf1903 | /tensorflow_federated/python/research/gans/experiments/emnist/emnist_data_utils_test.py | 6ef867732cc5aefe49d2935761b1b59c50b2ba85 | [
"Apache-2.0"
] | permissive | tf-encrypted/federated | 1671fcae7c939dbe142f78c97ac43c1329db870c | 7797df103bf965a9d0cd70e20ae61066650382d9 | refs/heads/master | 2021-03-09T17:08:47.491876 | 2020-05-06T18:06:59 | 2020-05-27T20:37:50 | 246,360,226 | 1 | 2 | Apache-2.0 | 2020-05-08T02:43:41 | 2020-03-10T17:06:06 | Python | UTF-8 | Python | false | false | 3,770 | py | # Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Federated EMNIST dataset utilities."""
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
from tensorflow_federated.python.research.gans.experiments.emnist import emnist_data_utils
BATCH_SIZE = 7
def _summarize_model(model):
model.summary()
print('\n\n\n')
def _get_example_client_dataset():
client_data = tff.simulation.datasets.emnist.get_synthetic(num_clients=1)
return client_data.create_tf_dataset_for_client(client_data.client_ids[0])
def _get_example_client_dataset_containing_lowercase():
_, client_data = tff.simulation.datasets.emnist.load_data(only_digits=False)
return client_data.create_tf_dataset_for_client(client_data.client_ids[0])
class EmnistTest(tf.test.TestCase):
def test_preprocessed_img_inversion(self):
raw_images_ds = _get_example_client_dataset()
# Inversion turned off, average pixel is dark.
standard_images_ds = emnist_data_utils.preprocess_img_dataset(
raw_images_ds, invert_imagery=False, batch_size=BATCH_SIZE)
for batch in iter(standard_images_ds):
for image in batch:
self.assertLessEqual(np.average(image), -0.7)
# Inversion turned on, average pixel is light.
inverted_images_ds = emnist_data_utils.preprocess_img_dataset(
raw_images_ds, invert_imagery=True, batch_size=BATCH_SIZE)
for batch in iter(inverted_images_ds):
for image in batch:
self.assertGreaterEqual(np.average(image), 0.7)
def test_preprocessed_img_labels_are_case_agnostic(self):
raw_images_ds = _get_example_client_dataset_containing_lowercase()
raw_ds_iterator = iter(raw_images_ds)
# The first element in the raw dataset is an uppercase 'I' (label is 18).
self.assertEqual(next(raw_ds_iterator)['label'].numpy(), 18)
# The second element in the raw dataset is an uppercase 'C' (label is 12).
self.assertEqual(next(raw_ds_iterator)['label'].numpy(), 12)
# The third element in the raw dataset is a lowercase 'd' (label is 39).
self.assertEqual(next(raw_ds_iterator)['label'].numpy(), 47)
processed_ds = emnist_data_utils.preprocess_img_dataset(
raw_images_ds, include_label=True, batch_size=BATCH_SIZE, shuffle=False)
_, label_batch = next(iter(processed_ds))
processed_label_iterator = iter(label_batch)
# The first element (in first batch) in the processed dataset has a case
# agnostic label of 18 (i.e., assert that value remains unchanged).
self.assertEqual(next(processed_label_iterator).numpy(), 18)
# The second element (in first batch) in the processed dataset has a case
# agnostic label of 12 (i.e., assert that value remains unchanged).
self.assertEqual(next(processed_label_iterator).numpy(), 12)
# The third element (in first batch) in the processed dataset should now
# have a case agnostic label of 47 - 26 = 21.
self.assertEqual(next(processed_label_iterator).numpy(), 47 - 26)
for _, label_batch in iter(processed_ds):
for label in label_batch:
self.assertGreaterEqual(label, 0)
self.assertLessEqual(label, 36)
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
| [
"tensorflow.copybara@gmail.com"
] | tensorflow.copybara@gmail.com |
aa32e29c4b9131c0574be5bfb4f0959e114502b6 | 2d21730e5625cde259703bf6df8f74aef5cc6a58 | /tests/test_augments.py | 8f5896508facc2bdacf51cab344e9aadfc96375e | [
"MIT"
] | permissive | pattonw/augment | b0ea35bd774c760320a74e35d59c5ba54ccf3266 | 3643873d7b818da417c3d4fbf662bc36164ca10c | refs/heads/master | 2023-04-07T14:19:32.229258 | 2023-03-22T16:07:10 | 2023-03-22T16:07:10 | 202,191,635 | 0 | 0 | MIT | 2019-08-13T17:20:10 | 2019-08-13T17:20:09 | null | UTF-8 | Python | false | false | 856 | py | from augment import (
create_elastic_transformation,
create_identity_transformation,
create_rotation_transformation,
)
import numpy as np
def test_basics():
rot_transform = create_rotation_transformation((2, 2), 90)
id_transform = create_identity_transformation((2, 2))
el_transform = create_elastic_transformation((2, 2), (1, 1), 5)
expected_rot_transform = np.array(
[
[[0.27703846, 1.171035], [-1.171035, -0.27703846]],
[[1.171035, -0.27703846], [0.27703846, -1.171035]],
],
dtype=np.float32,
)
expected_id_transform = np.array(
[[[0.0, 0.0], [1.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]]], dtype=np.float32
)
assert all(np.isclose(rot_transform, expected_rot_transform).flatten())
assert all(np.isclose(id_transform, expected_id_transform).flatten())
| [
"pattonw@hhmi.org"
] | pattonw@hhmi.org |
e5914b3dbc97d8b3fbbab18f94c22d54dbd77dd8 | 03bbb27095d2afc03d0bd1a62caa91356292e5d5 | /MapMyPlaylist/MapMyPlaylist/urls.py | a86722dce18423a3169e51caedf2e8e7a5614f81 | [] | no_license | LindaAlblas/MapMyPlaylist | d624728daa6be2bed04018cdf1ab4dea609e9605 | 2e5b7a9c10231369d2c7722a4d8ae35dc8ac2f8f | refs/heads/master | 2020-12-24T22:39:34.805944 | 2013-02-20T15:21:27 | 2013-02-20T15:21:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'MapMyPlaylist.views.home', name='home'),
# url(r'^MapMyPlaylist/', include('MapMyPlaylist.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^findartist/(?P<artistName>[\w ]+)/$', 'findartist.views.artistQuery'),
)
| [
"finlaymccourt@gmail.com"
] | finlaymccourt@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.