blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6d39bfe89b71cb0c05cdf5b5824bde77c2647498
|
fca3fe7557c00a379e90cda8016a8719ca57fe28
|
/jexp/tests.py
|
32a07ef74a77e0e252e4893441888e74bb850e8b
|
[
"BSD-3-Clause"
] |
permissive
|
mhluongo/jexp
|
d8a4db0a2d4f0f5f70471c2e36ecc22c8835b73e
|
e23b375c00bb62cab9671bc76250023125b4e60f
|
refs/heads/master
| 2021-01-13T01:15:08.980614
| 2011-09-02T00:11:13
| 2011-09-02T00:11:13
| 2,158,299
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
from nose.tools import eq_
from jexp import J
a = J('a')
b = J('b')
#logical tests
def test_or():
eq_(str(a | b), '(a||b)')
def test_and():
eq_(str(a & b), '(a&&b)')
def test_inv():
eq_(str(~a), '(!a)')
#math tests
def test_negate():
eq_(str(-a), '(-a)')
|
[
"mhluongo@gmail.com"
] |
mhluongo@gmail.com
|
808213727226448e77ae3540979e0a54ba99ac8c
|
29d6101cc76550b3dbb47e885a6c160f46551bc1
|
/test
|
61dbb6fd4297fef87fa9d79ea8c095ff1b07c43c
|
[] |
no_license
|
awesomebjt/lpic-self-tests
|
b7dcc4062550b6ec06ef20ecb3c31c976ce46b32
|
bd772b1e25549e96caf02671b882212a3ab0cc13
|
refs/heads/master
| 2023-07-12T02:42:21.945693
| 2020-08-27T02:13:42
| 2020-08-27T02:13:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 558
|
#!/usr/bin/python
import json
from random import randint
import sys
try:
content = json.loads(open(sys.argv[1],'r').read())
except Exception as e:
print("Failed to load self-test. Did you provide the right file name as the first argument?")
raise e
total = 0
correct = 0
while len(content) > 0:
q = content.pop(randint(0,len(content)-1))
total += 1
print(q['Q'])
a=input("# ")
if a == q['A']:
correct += 1
print("Total: {}\tCorrect: {}\tGrade: {}%".format(
total,
correct,
int((correct/total)*100)))
|
[
"bjt@rabidquill.com"
] |
bjt@rabidquill.com
|
|
725d9518757190bbff990c8383cf7ec9d56e3cc5
|
c0d537532f11cf742493093c3c325b4625fdc6e4
|
/Q4/HW3_Q4.py
|
ca25aabc6ff5e1c28eeec4da92089f07eb0f066c
|
[] |
no_license
|
plusbzz/cs224w-hw3
|
c9b4296425f467e203d12e4008b871d6dd89333f
|
7d513c991ff6e16433b6a4241950a2a3b2c15a96
|
refs/heads/master
| 2016-09-06T06:56:45.495051
| 2013-11-07T05:35:46
| 2013-11-07T05:35:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,469
|
py
|
# Homework 3: Question 4
from snap import *
from random import sample,choice
from ggplot import *
N=10670
M=22002
nodes=arange(N)
dia_sample = 20
# Creating graphs
# Create a random Gnm network
g_nm = PUNGraph_New()
for i in nodes: g_nm.AddNode(i)
while True:
s,t = sample(nodes,2)
g_nm.AddEdge(s,t)
if g_nm.GetEdges() == M: break
g_nm.GetNodes(),g_nm.GetEdges()
# Save graph
SaveEdgeList_PUNGraph(g_nm,"Gnm.txt")
# Create a graph G_pa with preferential attachment
# Start with a complete graph of 40 nodes
N_init = 40
edges = []
g_pa = PUNGraph_New()
for n in xrange(N_init):
g_pa.AddNode(n)
for m in xrange(n):
g_pa.AddEdge(m,n)
edges.append((m,n))
for n in nodes[N_init:]:
g_pa.AddNode(n)
for i in xrange(2):
m = choice(choice(edges))
g_pa.AddEdge(m,n)
edges.append((m,n))
if g_pa.GetEdges() == M: break
g_pa.GetNodes(),g_pa.GetEdges()
SaveEdgeList_PUNGraph(g_pa,"Gpa.txt")
# Load Autonomous network graph
g_as = LoadEdgeList_PUNGraph("oregon1_010331.txt")
SaveEdgeList_PUNGraph(g_as,"Gas.txt")
# Q4.1) Deletion experiments for failure vs attack
# Failure deletion
def failure1(graph,batchsize,percent):
del_nodes = 0 # number of deleted nodes
N = graph.GetNodes()
stopN = (percent*N)/100 # number of nodes at which to stop
X = [0]
Y = [GetBfsFullDiam_PUNGraph(graph,dia_sample)]
nodeset = set(range(N))
while True: # start deleting
for d in sample(nodeset,batchsize):
graph.DelNode(d)
nodeset.remove(d)
del_nodes += batchsize
dia = GetBfsFullDiam_PUNGraph(graph,dia_sample)
X.append((100.0*del_nodes)/N)
Y.append(dia)
if del_nodes >= stopN: break
return X,Y
# Attack deletion
def attack1(graph,batchsize,percent):
del_nodes = 0 # number of deleted nodes
N = graph.GetNodes()
stopN = (percent*N)/100 # number of nodes at which to stop
X = [0]
Y = [GetBfsFullDiam_PUNGraph(graph,dia_sample)]
nodeset = set(range(N))
while True: # start deleting
for i in xrange(batchsize):
d = GetMxDegNId_PUNGraph(graph)
graph.DelNode(d)
nodeset.remove(d)
del_nodes += batchsize
dia = GetBfsFullDiam_PUNGraph(graph,dia_sample)
X.append((100.0*del_nodes)/N)
Y.append(dia)
if del_nodes >= stopN: break
return X,Y
# Plot for average diameter vs. deleted nodes
def plots(X,Y,xlab,ylab,tpref,failure_func,attack_func):
g_nm = LoadEdgeListStr_PUNGraph("Gnm.txt")
f_g_nm_x,f_g_nm_y = failure_func(g_nm,X,Y)
g_as = LoadEdgeListStr_PUNGraph("Gas.txt")
f_g_as_x,f_g_as_y = failure_func(g_as,X,Y)
g_pa = LoadEdgeListStr_PUNGraph("Gpa.txt")
f_g_pa_x,f_g_pa_y = failure_func(g_pa,X,Y)
g_nm = LoadEdgeListStr_PUNGraph("Gnm.txt")
a_g_nm_x,a_g_nm_y = attack_func(g_nm,X,Y)
g_as = LoadEdgeListStr_PUNGraph("Gas.txt")
a_g_as_x,a_g_as_y = attack_func(g_as,X,Y)
g_pa = LoadEdgeListStr_PUNGraph("Gpa.txt")
a_g_pa_x,a_g_pa_y = attack_func(g_pa,X,Y)
p = plt.plot(f_g_as_x,f_g_as_y,'-o',f_g_nm_x,f_g_nm_y,'-x',f_g_pa_x,f_g_pa_y,'-+',
a_g_as_x,a_g_as_y,'-.',a_g_nm_x,a_g_nm_y,'--',a_g_pa_x,a_g_pa_y,'-4',
lw=1,mew=2)
p = plt.legend(("Failure: AS","Failure: NM","Failure: PA",
"Attack: AS","Attack: NM","Attack: PA"),loc="best")
p = plt.title(tpref + ': ' + ylab + " vs. " + xlab)
p = plt.xlabel(xlab)
p = plt.ylabel(ylab)
# Scenario 1: X = N/100, Y = 50
X = N/100
Y = 50
plots(X,Y,"Percent of deleted nodes","Average sampled diameter","Q4.1)X=N/100,Y=50",
failure1,attack1)
# Scenario 2: X = N/1000, Y = 2
X = N/1000
Y = 2
plots(X,Y,"Percent of deleted nodes","Average sampled diameter","Q4.1)X=N/1000,Y=2",
failure1,attack1)
# Q4.2) Change in size of largest connected component
# Failure deletion
def failure2(graph,batchsize,percent):
del_nodes = 0 # number of deleted nodes
N = graph.GetNodes()
stopN = (percent*N)/100 # number of nodes at which to stop
X = [0]
Y = [float(GetMxWccSz_PUNGraph(graph))]
nodeset = set(range(N))
while True: # start deleting
for d in sample(nodeset,batchsize):
graph.DelNode(d)
nodeset.remove(d)
del_nodes += batchsize
lcc = float(GetMxWccSz_PUNGraph(graph)) # size of LCC
X.append((100.0*del_nodes)/N)
Y.append(lcc)
if del_nodes >= stopN: break
return X,Y
# Attack deletion
def attack2(graph,batchsize,percent):
del_nodes = 0 # number of deleted nodes
N = graph.GetNodes()
stopN = (percent*N)/100 # number of nodes at which to stop
X = [0]
Y = [float(GetMxWccSz_PUNGraph(graph))]
nodeset = set(range(N))
while True: # start deleting
for i in xrange(batchsize):
d = GetMxDegNId_PUNGraph(graph)
graph.DelNode(d)
nodeset.remove(d)
del_nodes += batchsize
lcc = float(GetMxWccSz_PUNGraph(graph))
X.append((100.0*del_nodes)/N)
Y.append(lcc)
if del_nodes >= stopN: break
return X,Y
# Plots of fraction in largest connected component vs. percent deleted nodes
X = N/100
Y = 50
plots(X,Y,"Percent of deleted nodes","Fraction of nodes in LCC","Q4.2)X=N/100,Y=50",
failure2,attack2)
|
[
"plusbzz@gmail.com"
] |
plusbzz@gmail.com
|
59accba5a656d5b413c7c3ad528bee9b9a83ad95
|
9025c27655e2f150d01e64ce0826df8166ac6813
|
/core/urls.py
|
a1c84250501f6e331d1daaab5d0a66f5b2db6bbf
|
[] |
no_license
|
kairat3/bella-plain
|
02dd219f6bf087c99772490a32d61cd242a18f28
|
1950fd46dc53b800461f6077af3044bdfcf8300c
|
refs/heads/master
| 2023-07-13T05:06:17.575811
| 2021-08-19T14:05:29
| 2021-08-19T14:05:29
| 393,064,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,452
|
py
|
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from product.views import ProductApiView
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(
title="Bella API",
default_version='v1',
description="Test description",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="sunerisestudiopro@gmail.com"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
router = DefaultRouter()
router.register('products', ProductApiView)
urlpatterns = [
path('', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
path('docs/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
path('', include('account.urls')),
path('', include('product.urls')),
path('admin/', admin.site.urls),
path('', include(router.urls)),
path('', include('info.urls')),
path('', include('news.urls')),
path('', include('cart.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"jvckmiller@gmail.com"
] |
jvckmiller@gmail.com
|
2c2aebeebd8ad4a79fc47d44907d6a0fd9cdc88d
|
f68c7045d39039bcc58b8d096aca7edf433429ca
|
/env/bin/easy_install
|
2a206bc40153a884b403668cb00e8f28646c0b1c
|
[
"MIT"
] |
permissive
|
kelvin-daniel/instagram
|
beca157eb4eb1130ebd86825a9f99d96b903da02
|
2ede5319266f4312a9440d4985d098bc7545c2ae
|
refs/heads/master
| 2022-12-30T17:31:37.451798
| 2020-10-26T09:02:55
| 2020-10-26T09:02:55
| 304,535,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
#!/home/kevon/Documents/moringa_school/Django/instagram/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"kaymutor@gmail.com"
] |
kaymutor@gmail.com
|
|
0cca7a3e106b4584b3b916276cb3e704eb75122f
|
e66efe2baf16c48398087e1e2322ae4e8e77b5f1
|
/deepbrain/train/segmenter/count_frequency.py
|
858fb7942997b6c83726df7342a20dcfe6b2a46e
|
[
"MIT"
] |
permissive
|
codewithkaranjeswani/deepbrain
|
b43e72e95c185dd96ec78f92f42afd7741fac75c
|
ac16db831ba0fb213c08b4449657f5895b136324
|
refs/heads/master
| 2022-11-25T11:12:41.954520
| 2020-08-03T15:52:48
| 2020-08-03T15:52:48
| 284,741,744
| 0
| 0
|
MIT
| 2020-08-03T15:50:43
| 2020-08-03T15:50:43
| null |
UTF-8
|
Python
| false
| false
| 1,581
|
py
|
# Count class frequency to deal with unbalance
import tensorflow as tf
import os
import nibabel as nib
import numpy as np
import random
import re
from skimage.transform import resize
from pathlib import Path
from const import *
# CSF: 1, 2, 23, 24, 0, 18 -> 1
# WM: 16, 17 -> 2
# GM: Rest -> 3
# Brain Stem: 7 -> 4
# Cerebellum WM: 12, 13 -> 5
# Cerebellum GM: 10, 11, 36, 37, 38 -> 6
def shrink_labels(labels):
labels[np.isin(labels, [1,2,23,24,0,18])] = 1
labels[np.isin(labels, [16,17])] = 2
labels[~np.isin(labels, [1,2,23,24,0,18,16,17,7,12,13,10,11,36,37,38])] = 3
labels[np.isin(labels, [7])] = 4
labels[np.isin(labels, [12,13])] = 5
labels[np.isin(labels, [10,11,36,37,38])] = 6
return labels
def run():
_dir = ADNI_DATASET_DIR
labels = Path(os.path.join(_dir, "masks", "malpem"))
brains = Path(os.path.join(_dir, "masks", "brain_masks"))
ret = {}
index = 0
for each in os.listdir(labels):
aux = each[7:]
p = labels / each
b = brains / aux
img = nib.load(str(p))
brain = (nib.load(str(b)).get_fdata().squeeze()) == 1
x = img.get_fdata()
x = x.astype(np.uint8).squeeze()
assert x.shape == brain.shape
x = x[brain]
x = shrink_labels(x)
y = np.bincount(x)
ii = np.nonzero(y)[0]
index +=1
if index % 100 == 0:
print("Processed {}".format(index))
for k, v in zip(ii,y[ii]):
ret[k] = ret.get(k, 0) + v
print(ret)
if __name__ == "__main__":
run()
|
[
"i.itzcovich@gmail.com"
] |
i.itzcovich@gmail.com
|
edbda326ea8cc86ed561de36cac7f9cfb7b215e5
|
97763df96bc21d91e46e3a98f9ee2b55f557035e
|
/qa/rpc-tests/wallet.py
|
096b0a373b81c77ca811d0f202f25b8aea30c591
|
[
"MIT"
] |
permissive
|
jaagcoin/JAAGCoin-Core
|
2f0138c38e28b98878bbcd5f011ab84d1441bb57
|
87073dbff406e2d95a6e9d81521973c3c8cef350
|
refs/heads/master
| 2020-03-26T05:34:39.790028
| 2018-08-30T15:46:16
| 2018-08-30T15:46:16
| 144,563,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,705
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletTest (BitcoinTestFramework):
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
assert_fee_amount(fee, tx_size, fee_per_byte * 1000)
return curr_balance
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
self.extra_args = [['-usehd={:d}'.format(i%2==0)] for i in range(4)]
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir, self.extra_args[:3], redirect_stderr=True)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
print("Mining blocks...")
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 500)
assert_equal(walletinfo['balance'], 0)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 500)
assert_equal(self.nodes[1].getbalance(), 500)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
assert_equal(len(self.nodes[0].listunspent()), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
# Send 210 JAAG from 0 to 2 using sendtoaddress call.
# Second transaction will be child of first, and will require a fee
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 110)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 100)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises_message(JSONRPCException, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 200)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all()
# node0 should end up with 1000 JAAG in block rewards plus fees, but
# minus the 210 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 1000-210)
assert_equal(self.nodes[2].getbalance(), 210)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True, False, True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True, False, True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 1000)
assert_equal(self.nodes[2].getbalance("from1"), 1000-210)
# Send 100 JAAG normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.00001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 100, "", "", False)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('900'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('100'))
# Send 100 JAAG with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 100, "", "", True)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('100')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('200'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Sendmany 100 JAAG
txid = self.nodes[2].sendmany('from1', {address: 100}, 0, False, "", [])
self.nodes[2].generate(1)
self.sync_all()
node_0_bal += Decimal('100')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('100'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 100 JAAG with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 100}, 0, False, "", [address])
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('100')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('100'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes)
self.nodes.append(start_node(3, self.options.tmpdir, self.extra_args[3], redirect_stderr=True))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), {txid1, txid2})
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
# Exercise balance rpcs
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1)
assert_equal(self.nodes[0].getunconfirmedbalance(), 1)
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 499.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
self.nodes[1].generate(1) #mine a block
self.sync_all()
unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
found = False
for uTx in unspentTxs:
if uTx['txid'] == zeroValueTxid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert(found)
#do some -walletbroadcast tests
stop_nodes(self.nodes)
self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all()
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
self.nodes[1].generate(1) #mine a block, tx should not be in there
self.sync_all()
assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all()
node_2_bal += 2
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
#restart the nodes with -walletbroadcast=1
stop_nodes(self.nodes)
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
node_2_bal += 2
#tx should be added to balance because after restarting the nodes tx should be broadcastet
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#send a tx with value in a string (PR#6380 +)
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-2'))
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
#check if JSON parser can handle scientific notation in strings
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
try:
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4")
except JSONRPCException as e:
assert("Invalid amount" in e.error['message'])
else:
raise AssertionError("Must not parse invalid amounts")
try:
self.nodes[0].generate("2")
raise AssertionError("Must not accept strings as numeric")
except JSONRPCException as e:
assert("not an integer" in e.error['message'])
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all()
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
# Mine a block from node0 to an address from node1
cbAddr = self.nodes[1].getnewaddress()
blkHash = self.nodes[0].generatetoaddress(1, cbAddr)[0]
cbTxId = self.nodes[0].getblock(blkHash)['tx'][0]
self.sync_all()
# Check that the txid and balance is found by node1
self.nodes[1].gettransaction(cbTxId)
# check if wallet or blockchain maintenance changes the balance
self.sync_all()
blocks = self.nodes[0].generate(2)
self.sync_all()
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
# Check modes:
# - True: unicode escaped as \u....
# - False: unicode directly as UTF-8
for mode in [True, False]:
self.nodes[0].ensure_ascii = mode
# unicode check: Basic Multilingual Plane, Supplementary Plane respectively
for s in [u'рыба', u'𝅘𝅥𝅯']:
addr = self.nodes[0].getaccountaddress(s)
label = self.nodes[0].getaccount(addr)
assert_equal(label, s)
assert(s in self.nodes[0].listaccounts().keys())
self.nodes[0].ensure_ascii = True # restore to default
# maintenance tests
maintenance = [
'-rescan',
'-reindex',
'-zapwallettxes=1',
'-zapwallettxes=2',
# disabled until issue is fixed: https://github.com/bitcoin/bitcoin/issues/7463
# '-salvagewallet',
]
chainlimit = 6
for m in maintenance:
print("check " + m)
stop_nodes(self.nodes)
# set lower ancestor limit for later
self.nodes = start_nodes(3, self.options.tmpdir, [[m, "-limitancestorcount="+str(chainlimit)]] * 3)
while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]:
# reindex will leave rpc warm up "early"; Wait for it to finish
time.sleep(0.1)
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
# ==Check that wallet prefers to use coins that don't exceed mempool limits =====
# Get all non-zero utxos together
chain_addrs = [self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()]
singletxid = self.nodes[0].sendtoaddress(chain_addrs[0], self.nodes[0].getbalance(), "", "", True)
self.nodes[0].generate(1)
node0_balance = self.nodes[0].getbalance()
# Split into two chains
rawtx = self.nodes[0].createrawtransaction([{"txid":singletxid, "vout":0}], {chain_addrs[0]:node0_balance/2-Decimal('0.01'), chain_addrs[1]:node0_balance/2-Decimal('0.01')})
signedtx = self.nodes[0].signrawtransaction(rawtx)
singletxid = self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
# Make a long chain of unconfirmed payments without hitting mempool limit
# Each tx we make leaves only one output of change on a chain 1 longer
# Since the amount to send is always much less than the outputs, we only ever need one output
# So we should be able to generate exactly chainlimit txs for each original output
sending_addr = self.nodes[1].getnewaddress()
txid_list = []
for i in range(chainlimit*2):
txid_list.append(self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001')))
assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit*2)
assert_equal(len(txid_list), chainlimit*2)
# Without walletrejectlongchains, we will still generate a txid
# The tx will be stored in the wallet but not accepted to the mempool
extra_txid = self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001'))
assert(extra_txid not in self.nodes[0].getrawmempool())
assert(extra_txid in [tx["txid"] for tx in self.nodes[0].listtransactions()])
self.nodes[0].abandontransaction(extra_txid)
total_txs = len(self.nodes[0].listtransactions("*",99999))
# Try with walletrejectlongchains
# Double chain limit but require combining inputs, so we pass SelectCoinsMinConf
stop_node(self.nodes[0],0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-walletrejectlongchains", "-limitancestorcount="+str(2*chainlimit)])
# wait for loadmempool
timeout = 10
while (timeout > 0 and len(self.nodes[0].getrawmempool()) < chainlimit*2):
time.sleep(0.5)
timeout -= 0.5
assert_equal(len(self.nodes[0].getrawmempool()), chainlimit*2)
node0_balance = self.nodes[0].getbalance()
# With walletrejectlongchains we will not create the tx and store it in our wallet.
assert_raises_message(JSONRPCException, "mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01'))
# Verify nothing new in wallet
assert_equal(total_txs, len(self.nodes[0].listtransactions("*",99999)))
if __name__ == '__main__':
WalletTest().main()
|
[
"dmitriy@Dmitriys-iMac.local"
] |
dmitriy@Dmitriys-iMac.local
|
35509fe6b955bd2603e79013b82691a6ac50a9c7
|
1fa21cd2c288a9f87295631e10f747fe075a1502
|
/Trip Planner APIs/trip.py
|
fe94241adebdc4d7399310e515b6760df7830685
|
[] |
no_license
|
bmrn/TfNSW_APIs
|
4bc22e800796f848ff5f1ced2c04dd56a0666472
|
b4cbe176ce811698739b5fd33517fb36edbfa68d
|
refs/heads/master
| 2021-01-19T22:55:33.722331
| 2017-04-28T06:33:23
| 2017-04-28T06:33:23
| 88,893,404
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,853
|
py
|
from urllib.parse import urlencode
import requests
import json
import tssetup
from pprint import pprint
api_key = tssetup.getKey()
base_url = "https://api.transport.nsw.gov.au/v1/tp/"
query_type = "trip?"
#initialise query param dictionary
qdict = {}
#add parameters
qdict["outputFormat"] = "rapidJSON"
qdict["coordOutputFormat"] = "EPSG:4326"
qdict["depArrMacro"] = "dep" #dep after or arr before
qdict["itdDate"] = "20170707"
qdict["itdTime"] = "1200"
qdict["type_origin"] = "any"
qdict["name_origin"] = "10101331" #get location/stop id from stop_finder.py
qdict["type_destination"] = "any"
qdict["name_destination"] = "10102027"
qdict["calcNumberOfTrips"] = 5
qdict["wheelchair"] = "" #or "on"
qdict["TfNSWSF"] = "true"
qdict["version"] = "10.2.1.15"
#encode params as querystring
qstring = urlencode(qdict)
#buildurl
urlsend = base_url + query_type + qstring
print(urlsend)
#get authentication
headers = {'Authorization': 'apikey ' + api_key, 'Accept': 'application/json'}
response = requests.get(urlsend, headers=headers)
#decode response and convert to JSON format
respdict = json.loads(response.content.decode('utf-8'))
#simple example to look at data
for x in range(len(respdict["journeys"])):
print("********* TRIP " + str(x+1) + " *********")
for y in range(len(respdict["journeys"][x]["legs"])):
print("LEG " + str(y+1) + "")
print("Duration " + str(respdict["journeys"][x]["legs"][y]["duration"]/60) + " mins", end="\n")
print(respdict["journeys"][x]["legs"][y]["origin"]["departureTimeEstimated"], end="\t")
print(respdict["journeys"][x]["legs"][y]["origin"]["name"], end="\n")
print(respdict["journeys"][x]["legs"][y]["destination"]["arrivalTimeEstimated"], end="\t")
print(respdict["journeys"][x]["legs"][y]["destination"]["name"], end="\n")
print("\t\t")
|
[
"ausben@gmail.com"
] |
ausben@gmail.com
|
0a4d9c5a7f1c40f757a2731830ff440ee111ecd1
|
03b7430a0fbba63c0d5712ed6539caa807cb8d05
|
/problem1e.py
|
920b116d89aa792337950f565febeb39b2f85035
|
[
"MIT"
] |
permissive
|
yasserglez/monkeys-typing
|
7e0dcecfea3bc7401fbd8d229180b8277dc0b6e0
|
c7d15802cedd29dc0a194612d3777a656cb43caa
|
refs/heads/master
| 2020-04-06T04:31:39.337137
| 2015-08-08T18:23:37
| 2015-08-08T18:23:37
| 18,028,427
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
from monkeys import compute_freq_tab, write_freq_tab
freq_tab_2nd_order = compute_freq_tab(2, 'books/agnes_grey.txt')
write_freq_tab(freq_tab_2nd_order, 'agnes_grey_2nd_order.json')
freq_tab_3rd_order = compute_freq_tab(3, 'books/agnes_grey.txt')
write_freq_tab(freq_tab_3rd_order, 'agnes_grey_3rd_order.json')
|
[
"ygonzalezfernandez@gmail.com"
] |
ygonzalezfernandez@gmail.com
|
0639bb8bfaf5d12027ea12b6ee6bbe9dec7363a0
|
6b7176e32e8e6b105d5ad8b4bda038ad9ae6a281
|
/P25034-zhaojie/week-11/homework.py
|
31a196df26a7d4580903c49e0900fa52b26d02c2
|
[
"Apache-2.0"
] |
permissive
|
xiaohh2016/python-25
|
20c7e0a157c4be5707891d1839644e015b28dbb4
|
8981ba89bfb32754c3f9c881ee8fcaf13332ce51
|
refs/heads/master
| 2021-01-05T18:50:53.838337
| 2020-02-12T08:46:53
| 2020-02-12T08:46:53
| 241,107,053
| 0
| 0
|
Apache-2.0
| 2020-02-17T12:52:31
| 2020-02-17T12:52:31
| null |
UTF-8
|
Python
| false
| false
| 1,910
|
py
|
#!/usr/bin/env python
# encoding:utf-8
# file: homework.py
# 自己实现python自带的map、zip和filter函数
# 还没学到 yield语法不熟 先简单实现
# 实现map函数
def my_map(*args):
"""文档字符串位置
"""
if len(args) < 2:
# 先不用异常的方式处理 只是打印
print('map()至少需要两个参数')
else:
# 判断是否为可迭代对象 先不处理
fnc_nme = args[0]
new_tpl = args[1:]
min_len = len(min(new_tpl, key=len))
for idx in range(min_len):
# yield后的代码会继续执行 yield只要存在函数就变成生成器
yield fnc_nme(*[itr[idx] for itr in new_tpl])
# 实现zip函数
def my_zip(*args):
if not len(args):
return tuple()
min_len = len(min(args, key=len))
for idx in range(min_len):
yield tuple(itr[idx] for itr in args)
# 实现filter函数
def my_filter(func, itr):
if func is not None:
for it in itr:
if func(it):
yield it
else:
for it in itr:
if it:
yield it
# 测试函数 加法
def func1(x, y):
return x + y
# 测试函数 平方
def func2(x):
return x ** 2
# 测试函数 取大于100的数
def func3(x):
return True if x > 100 else False
if __name__ == '__main__':
l1 = [3, 2, 3]
l2 = [6, 5]
print(list(my_map(func1, l1, l2)))
print(list(my_zip([1, 2, 3], [4, 5], 'abcdefg')))
print(list(my_filter(func3, [0, 201, 1, 2, 3, 100, 101])))
print(list(my_zip()))
print(list(my_filter(None, [0, 201, 1, 2, 3, 100, 101])))
print('-------- 对照组 --------')
print(list(map(func1, l1, l2)))
print(list(zip([1, 2, 3], [4, 5], 'abcdefg')))
print(list(filter(func3, [0, 201, 1, 2, 3, 100, 101])))
print(list(zip()))
print(list(filter(None, [0, 201, 1, 2, 3, 100, 101])))
|
[
"jasonz666@qq.com"
] |
jasonz666@qq.com
|
fb997a333c66b348a4b2be51acc74d9b48a24bd6
|
47a211616a0fd746c1738aac8ab00cb758d6b057
|
/Flask/Scripts/rst2xml.py
|
04f24d4bafeabae2bcd193f648d3307ea9da0421
|
[] |
no_license
|
yan-egorov/flask_bro
|
49b8b43ae5d113fd776adda6a65214b334daf63b
|
5e233a5665c9948fc22d7d185c6d43b50b58fe5c
|
refs/heads/master
| 2021-01-18T20:30:05.722883
| 2015-06-10T07:40:45
| 2015-06-10T07:40:45
| 37,182,165
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
#!C:\Users\rusegy\Documents\Python Scripts\TrueStories\Flask\Scripts\python.exe
# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)
|
[
"egorov.yan@gmail.com"
] |
egorov.yan@gmail.com
|
9524b3b82893f96b5401f320e1e5ef79be1a59ef
|
523db0f90e8069311705173cfcfdfb2c4417ae44
|
/06_port_scan/w8ay.py
|
a95691477badbd9c9fae4083fb5c2c06b3319282
|
[] |
no_license
|
ClayAndMore/py_scanner
|
b351fbc23fdc2d797fcc527472561333423d44f7
|
b21c1ae9ae8d9a6dc32841ec62bcd7cc40e0531f
|
refs/heads/master
| 2020-06-05T13:20:27.787920
| 2019-06-19T02:45:15
| 2019-06-19T02:45:15
| 192,449,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 601
|
py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
'''
Name:w8ayScan
Author:w8ay
Copyright (c) 2017
'''
import sys
from lib.core.Spider import SpiderMain
from lib.core import webcms, common, PortScan
reload(sys)
sys.setdefaultencoding('utf-8')
def main():
root = "https://shiyanlou.com"
threadNum = 10
ip = common.gethostbyname(root)
print "IP:",ip
print "Start Port Scan:"
pp = PortScan.PortScan(ip)
pp.work()
#webcms
ww = webcms.webcms(root,threadNum)
ww.run()
#spider
w8 = SpiderMain(root,threadNum)
w8.craw()
if __name__ == '__main__':
main()
|
[
"wangyu1@antiy.com"
] |
wangyu1@antiy.com
|
e6acc1a14b714638e4d8eb6b3210b8ad4b35a3c2
|
37069009dd428ce59819ffea2fcffc07dda6e712
|
/django_analyze/migrations/0068_auto__add_field_genotype_max_memory_usage.py
|
550ac7b81c79b27de932d2c0ecb1788805c93c03
|
[] |
no_license
|
chrisspen/django-analyze
|
829f560d7c5f2fb1c19fc07bc77cb1a83238e696
|
421ee35235f76ff8657f7befe5212acd7ccf3989
|
refs/heads/master
| 2020-04-28T15:42:51.773823
| 2015-04-18T14:50:02
| 2015-04-18T14:50:02
| 14,995,029
| 2
| 2
| null | 2014-07-07T12:39:22
| 2013-12-06T22:26:29
|
Python
|
UTF-8
|
Python
| false
| false
| 18,209
|
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Genotype.max_memory_usage'
db.add_column(u'django_analyze_genotype', 'max_memory_usage',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Genotype.max_memory_usage'
db.delete_column(u'django_analyze_genotype', 'max_memory_usage')
models = {
'django_analyze.epoche': {
'Meta': {'ordering': "('genome', '-index')", 'unique_together': "(('genome', 'index'),)", 'object_name': 'Epoche'},
'genome': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'epoches'", 'to': "orm['django_analyze.Genome']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'max_fitness': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'mean_fitness': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'min_fitness': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'oldest_epoche_of_creation': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'django_analyze.gene': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('genome', 'name'),)", 'object_name': 'Gene'},
'coverage_ratio': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'default': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'exploration_priority': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'genome': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'genes'", 'to': "orm['django_analyze.Genome']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_increment': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'max_value': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'max_value_observed': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'min_value': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'min_value_observed': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'mutation_weight': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'values': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'django_analyze.genedependency': {
'Meta': {'unique_together': "(('gene', 'dependee_gene', 'dependee_value'),)", 'object_name': 'GeneDependency'},
'dependee_gene': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dependents'", 'to': "orm['django_analyze.Gene']"}),
'dependee_value': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'gene': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dependencies'", 'to': "orm['django_analyze.Gene']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'positive': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'django_analyze.genestatistics': {
'Meta': {'ordering': "('genome', 'gene', '-mean_fitness')", 'object_name': 'GeneStatistics', 'managed': 'False'},
'gene': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['django_analyze.Gene']", 'on_delete': 'models.DO_NOTHING', 'db_column': "'gene_id'"}),
'genome': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'gene_statistics'", 'on_delete': 'models.DO_NOTHING', 'db_column': "'genome_id'", 'to': "orm['django_analyze.Genome']"}),
'genotype_count': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'primary_key': 'True'}),
'max_fitness': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mean_fitness': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'min_fitness': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'django_analyze.genome': {
'Meta': {'object_name': 'Genome'},
'_epoche': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'current_genome'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['django_analyze.Epoche']"}),
'delete_inferiors': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'elite_ratio': ('django.db.models.fields.FloatField', [], {'default': '0.1'}),
'epoche': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'epoche_stall': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10'}),
'epoches_since_improvement': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'error_report': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'evaluating_part': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'evaluation_timeout': ('django.db.models.fields.PositiveIntegerField', [], {'default': '300'}),
'evaluator': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'evolution_start_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'evolving': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_fitness': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'max_species': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10'}),
'maximum_evaluated_population': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1000'}),
'maximum_population': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10'}),
'min_fitness': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mutation_rate': ('django.db.models.fields.FloatField', [], {'default': '0.1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'production_at_best': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'production_evaluation_timeout': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'production_genotype': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'production_genomes'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['django_analyze.Genotype']"}),
'production_genotype_auto': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ratio_evaluated': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'django_analyze.genotype': {
'Meta': {'ordering': "('-fitness',)", 'unique_together': "(('genome', 'fingerprint'),)", 'object_name': 'Genotype', 'index_together': "(('valid', 'fresh', 'fitness'), ('genome', 'fresh'))"},
'accuracy': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'complete_parts': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'complete_ratio': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'epoche': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'genotypes'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['django_analyze.Epoche']"}),
'epoche_of_creation': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'epoche_of_evaluation': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'evaluating': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'evaluating_pid': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'fingerprint': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '700', 'null': 'True', 'db_column': "'fingerprint'", 'blank': 'True'}),
'fingerprint_fresh': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'fitness': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'fitness_evaluation_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'fitness_evaluation_datetime_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'fresh': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'gene_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'generation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'genome': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'genotypes'", 'to': "orm['django_analyze.Genome']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immortal': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'max_memory_usage': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'mean_absolute_error': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'mean_evaluation_seconds': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mean_memory_usage': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'memory_usage_samples': ('picklefield.fields.PickledObjectField', [], {'null': 'True', 'blank': 'True'}),
'ontime_parts': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'ontime_ratio': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'production_complete_parts': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'production_complete_ratio': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'production_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'production_evaluating': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'production_evaluating_pid': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'production_evaluation_end_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'production_evaluation_start_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'production_fresh': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'production_ontime_parts': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'production_ontime_ratio': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'production_success_parts': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'production_success_ratio': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'production_total_parts': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'production_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'species': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'genotypes'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['django_analyze.Species']"}),
'success_parts': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'success_ratio': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'total_evaluation_seconds': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'total_parts': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'})
},
'django_analyze.genotypegene': {
'Meta': {'ordering': "('gene__name',)", 'unique_together': "(('genotype', 'gene'),)", 'object_name': 'GenotypeGene'},
'_value': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'db_column': "'value'"}),
'_value_genome': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['django_analyze.Genome']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'gene': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'gene_values'", 'to': "orm['django_analyze.Gene']"}),
'genotype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'genes'", 'to': "orm['django_analyze.Genotype']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'django_analyze.genotypegeneillegal': {
'Meta': {'object_name': 'GenotypeGeneIllegal', 'managed': 'False'},
'gene_value': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['django_analyze.GenotypeGene']", 'on_delete': 'models.DO_NOTHING', 'primary_key': 'True', 'db_column': "'illegal_genotypegene_id'"}),
'genotype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'illegal_gene_values'", 'on_delete': 'models.DO_NOTHING', 'db_column': "'illegal_genotype_id'", 'to': "orm['django_analyze.Genotype']"}),
'illegal_gene_name': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
u'django_analyze.genotypegenemissing': {
'Meta': {'object_name': 'GenotypeGeneMissing', 'managed': 'False'},
'default': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'gene': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['django_analyze.Gene']", 'on_delete': 'models.DO_NOTHING', 'primary_key': 'True', 'db_column': "'gene_id'"}),
'gene_name': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'genotype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'missing_gene_values'", 'on_delete': 'models.DO_NOTHING', 'db_column': "'genotype_id'", 'to': "orm['django_analyze.Genotype']"})
},
'django_analyze.species': {
'Meta': {'ordering': "('genome', 'index')", 'unique_together': "(('genome', 'index'),)", 'object_name': 'Species', 'index_together': "(('genome', 'index'),)"},
'centroid': ('picklefield.fields.PickledObjectField', [], {'null': 'True', 'blank': 'True'}),
'genome': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'species'", 'to': "orm['django_analyze.Genome']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'population': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
}
}
complete_apps = ['django_analyze']
|
[
"chris@coronis"
] |
chris@coronis
|
25bd69a2f916412574ed02402bb69fe8bb639fc1
|
a1bfa15fdb28c2eb4f46c6a694dd310e0a174846
|
/jpg2mojo.py
|
c128a00be0cb268eea657c795fa607d5b2657c2a
|
[] |
no_license
|
Rhoana/MojoToolkit
|
2971f6634adbcf40a5b8658b29de7fb6215498c2
|
c64e6d0c266dbb61105a8cadda16db7a2f76e0eb
|
refs/heads/master
| 2020-12-21T12:06:05.149710
| 2017-09-18T16:42:31
| 2017-09-18T16:42:31
| 73,499,035
| 0
| 1
| null | 2016-11-11T17:49:48
| 2016-11-11T17:49:47
| null |
UTF-8
|
Python
| false
| false
| 2,135
|
py
|
#!/usr/bin/python
import os
import cv2
import glob
import argparse
import numpy as np
from toMojo.np2imgo import Imgo
from toMojo.np2sego import Sego
help = {
'out': 'output mojo parent (default mojo)',
'jpg2mojo': 'Stack all jpgs into a mojo folder!',
'jpgs': 'input folder with all jpgs (default jpgs)',
't': 'datatype for output file (default uint8)',
'c': '-c enables -t uint32 (and default -o bgr)',
'o': 'Little Endian channel order as rgba,bgr (default none)',
}
paths = {}
stack = {}
rgba = {
'r': 0,
'g': 1,
'b': 2,
'a': 3
}
parser = argparse.ArgumentParser(description=help['jpg2mojo'])
parser.add_argument('-t', metavar='string', default='uint8', help=help['t'])
parser.add_argument('-o', metavar='string', default='', help=help['o'])
parser.add_argument('jpgs', default='jpgs', nargs='?', help=help['jpgs'])
parser.add_argument('out', default='mojo', nargs='?', help=help['out'])
parser.add_argument('-c', help=help['c'], action='store_true')
# attain all arguments
args = vars(parser.parse_args())
for key in ['jpgs', 'out']:
paths[key] = os.path.realpath(os.path.expanduser(args[key]))
[order, color, dtype] = [args['o'], args['c'], args['t']]
# Set color datatype
if color:
dtype = 'uint32'
order = order or 'bgr'
dtype = getattr(np,dtype)
# read all jpgs in jpgs folder
search = os.path.join(paths['jpgs'],'*.jpg')
stack = sorted(glob.glob(search))
# Size input files
sliceShape = cv2.imread(stack[0], 0).shape
shape = (len(stack),) + sliceShape
# Open an output file
outfile = Imgo(paths['out'])
if order:
outfile = Sego(paths['out'])
# Add each jpg file as a slice
for zi, file in enumerate(stack):
written = np.zeros(sliceShape,dtype=dtype)
if not order:
written = cv2.imread(file, 0).astype(dtype)
else:
# pixel to integer
volume = cv2.imread(file)
for ci, char in enumerate(order):
colorbyte = volume[:, :, rgba[char]] * (256 ** ci)
written = written + colorbyte
# Write as image or segmentation
outfile.run(written,zi)
# Write metadata to ouput file
outfile.save(shape)
|
[
"thejohnhoffer@coxgpu04.rc.fas.harvard.edu"
] |
thejohnhoffer@coxgpu04.rc.fas.harvard.edu
|
fd8cfb47b2d8e17dae6ea7bb6a37a38a95978a58
|
ef5f8a1d7b098391b5e5fce57edc83870204fe69
|
/albert_model/clue_classifier_utils_char_no_space.py
|
b1755d70cbfbb75c08b321f41ecb2ab40f4d9ea6
|
[
"Apache-2.0"
] |
permissive
|
guome/subchar-transformers
|
9829ded6c312adabf481c11ea25a2eaa069a1aaa
|
54c3bfb5c197946fa5a8b6ed5524b81284259613
|
refs/heads/master
| 2022-07-04T16:21:12.589815
| 2020-05-13T12:49:54
| 2020-05-13T12:49:54
| 263,630,138
| 1
| 0
| null | 2020-05-13T12:57:25
| 2020-05-13T12:57:24
| null |
UTF-8
|
Python
| false
| false
| 21,002
|
py
|
# -*- coding: utf-8 -*-
# @Author: bo.shi
# @Date: 2019-12-01 22:28:41
# @Last Modified by: bo.shi
# @Last Modified time: 2019-12-02 18:36:50
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for GLUE classification tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import csv
import os
import six
import tensorflow as tf
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def __init__(self, args):
self.args = args
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, delimiter="\t", quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter=delimiter, quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
@classmethod
def _read_txt(cls, input_file):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = f.readlines()
lines = []
for line in reader:
lines.append(line.strip().split("_!_"))
return lines
@classmethod
def _read_json(cls, input_file):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = f.readlines()
lines = []
for line in reader:
lines.append(json.loads(line.strip()))
return lines
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "test.json")), "test")
def _create_examples(self, lines, set_type):
"""See base class."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = convert_to_unicode(line['premise'])
text_b = convert_to_unicode(line['hypo'])
label = convert_to_unicode(line['label']) if set_type != 'test' else 'contradiction'
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class TnewsProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
labels = []
for i in range(17):
if i == 5 or i == 11:
continue
labels.append(str(100 + i))
return labels
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line['sentence'].strip()
if hasattr(self.args, "max_sent_length"):
text_a = text_a[: self.args.max_sent_length]
if self.args.do_lower_case:
text_a = text_a.lower()
text_a = convert_to_unicode(text_a)
text_b = None
label = convert_to_unicode(line['label']) if set_type != 'test' else "100"
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class iFLYTEKDataProcessor(DataProcessor):
"""Processor for the iFLYTEKData data set (GLUE version)."""
def __init__(self, args):
super(iFLYTEKDataProcessor, self).__init__(args)
self.args = args
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
labels = []
for i in range(119):
labels.append(str(i))
return labels
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
# dict_char2comp = json.load(open("./resources/char2comp.json", "r"))
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line['sentence'].strip()
if hasattr(self.args, "max_sent_length"):
text_a = text_a[: self.args.max_sent_length]
if self.args.do_lower_case:
text_a = text_a.lower()
# print(text_a)
text_a = convert_to_unicode(text_a)
text_b = None
label = convert_to_unicode(line['label']) if set_type != 'test' else "0"
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
if i < 5:
print(text_a)
print(text_b)
return examples
class ChnSentiCorpDataProcessor(DataProcessor):
"""Processor for the iFLYTEKData data set (GLUE version)."""
def __init__(self, args):
super(ChnSentiCorpDataProcessor, self).__init__(args)
self.args = args
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
labels = []
for i in range(2):
labels.append(str(i))
return labels
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
# dict_char2comp = json.load(open("./resources/char2comp.json", "r"))
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line['sentence'].strip()
if hasattr(self.args, "max_sent_length"):
text_a = text_a[: self.args.max_sent_length]
if self.args.do_lower_case:
text_a = text_a.lower()
# print(text_a)
text_a = convert_to_unicode(text_a)
text_b = None
label = convert_to_unicode(line['label']) if set_type != 'test' else "0"
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
if i < 5:
print(text_a)
print(text_b)
return examples
class LCQMCProcessor(DataProcessor):
"""Processor for the internal data set. sentence pair classification"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line['sentence1'].strip()
if hasattr(self.args, "max_sent_length"):
text_a = text_a[: self.args.max_sent_length]
if self.args.do_lower_case:
text_a = text_a.lower()
text_a = convert_to_unicode(text_a)
text_b = line['sentence2'].strip()
if hasattr(self.args, "max_sent_length"):
text_b = text_b[: self.args.max_sent_length]
if self.args.do_lower_case:
text_b = text_b.lower()
text_b = convert_to_unicode(text_b)
label = convert_to_unicode(line['label']) if set_type != 'test' else '0'
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
if i < 5:
print(text_a)
print(text_b)
return examples
class AFQMCProcessor(DataProcessor):
"""Processor for the internal data set. sentence pair classification"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = convert_to_unicode(line['sentence1'])
text_b = convert_to_unicode(line['sentence2'])
label = convert_to_unicode(line['label']) if set_type != 'test' else '0'
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class CMNLIProcessor(DataProcessor):
"""Processor for the CMNLI data set."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples_json(os.path.join(data_dir, "train.json"), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples_json(os.path.join(data_dir, "dev.json"), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples_json(os.path.join(data_dir, "test.json"), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples_json(self, file_name, set_type):
"""Creates examples for the training and dev sets."""
examples = []
lines = tf.gfile.Open(file_name, "r")
index = 0
for line in lines:
line_obj = json.loads(line)
index = index + 1
guid = "%s-%s" % (set_type, index)
text_a = convert_to_unicode(line_obj["sentence1"])
text_b = convert_to_unicode(line_obj["sentence2"])
label = convert_to_unicode(line_obj["label"]) if set_type != 'test' else 'neutral'
if label != "-":
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class CslProcessor(DataProcessor):
"""Processor for the CSL data set."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = convert_to_unicode(" ".join(line['keyword']))
text_b = convert_to_unicode(line['abst'])
label = convert_to_unicode(line['label']) if set_type != 'test' else '0'
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WSCProcessor(DataProcessor):
"""Processor for the internal data set. sentence pair classification"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
return ["true", "false"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = convert_to_unicode(line['text'])
text_a_list = list(text_a)
target = line['target']
query = target['span1_text']
query_idx = target['span1_index']
pronoun = target['span2_text']
pronoun_idx = target['span2_index']
assert text_a[pronoun_idx: (pronoun_idx + len(pronoun))
] == pronoun, "pronoun: {}".format(pronoun)
assert text_a[query_idx: (query_idx + len(query))] == query, "query: {}".format(query)
if pronoun_idx > query_idx:
text_a_list.insert(query_idx, "_")
text_a_list.insert(query_idx + len(query) + 1, "_")
text_a_list.insert(pronoun_idx + 2, "[")
text_a_list.insert(pronoun_idx + len(pronoun) + 2 + 1, "]")
else:
text_a_list.insert(pronoun_idx, "[")
text_a_list.insert(pronoun_idx + len(pronoun) + 1, "]")
text_a_list.insert(query_idx + 2, "_")
text_a_list.insert(query_idx + len(query) + 2 + 1, "_")
text_a = "".join(text_a_list)
if set_type == "test":
label = "true"
else:
label = line['label']
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class COPAProcessor(DataProcessor):
"""Processor for the internal data set. sentence pair classification"""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
# dev_0827.tsv
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
@classmethod
def _create_examples_one(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
guid1 = "%s-%s" % (set_type, i)
# try:
if line['question'] == 'cause':
text_a = convert_to_unicode(line['premise'] + '原因是什么呢?' + line['choice0'])
text_b = convert_to_unicode(line['premise'] + '原因是什么呢?' + line['choice1'])
else:
text_a = convert_to_unicode(line['premise'] + '造成了什么影响呢?' + line['choice0'])
text_b = convert_to_unicode(line['premise'] + '造成了什么影响呢?' + line['choice1'])
label = convert_to_unicode(str(1 if line['label'] == 0 else 0)) if set_type != 'test' else '0'
examples.append(
InputExample(guid=guid1, text_a=text_a, text_b=text_b, label=label))
# except Exception as e:
# print('###error.i:',e, i, line)
return examples
@classmethod
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
i = 2 * i
guid1 = "%s-%s" % (set_type, i)
guid2 = "%s-%s" % (set_type, i + 1)
# try:
premise = convert_to_unicode(line['premise'])
choice0 = convert_to_unicode(line['choice0'])
label = convert_to_unicode(str(1 if line['label'] == 0 else 0)) if set_type != 'test' else '0'
#text_a2 = convert_to_unicode(line['premise'])
choice1 = convert_to_unicode(line['choice1'])
label2 = convert_to_unicode(
str(0 if line['label'] == 0 else 1)) if set_type != 'test' else '0'
if line['question'] == 'effect':
text_a = premise
text_b = choice0
text_a2 = premise
text_b2 = choice1
elif line['question'] == 'cause':
text_a = choice0
text_b = premise
text_a2 = choice1
text_b2 = premise
else:
print('wrong format!!')
return None
examples.append(
InputExample(guid=guid1, text_a=text_a, text_b=text_b, label=label))
examples.append(
InputExample(guid=guid2, text_a=text_a2, text_b=text_b2, label=label2))
# except Exception as e:
# print('###error.i:',e, i, line)
return examples
|
[
"michael_wzhu91@163.com"
] |
michael_wzhu91@163.com
|
c67c4e7d3e4988859508f7b101f56a874364ee59
|
1add9e012d8b61f17ca992f0c157ee0a5d1b7860
|
/env/bin/pyhtmlizer
|
5689466410d24f4b1c2200f96904416ace10fb6b
|
[] |
no_license
|
dims337/chat-app
|
3b0cd7dd4c2b22ba71219fba181ae35e91e7b3db
|
042594f1f5785feeb3a06f7c204fa726ae2b2352
|
refs/heads/master
| 2020-04-11T17:09:17.406649
| 2018-12-21T04:02:46
| 2018-12-21T04:02:46
| 161,949,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
#!/Users/dimsontenke/Desktop/chat_app/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from twisted.scripts.htmlizer import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"dimsontenke@dimsons-MacBook-Pro.local"
] |
dimsontenke@dimsons-MacBook-Pro.local
|
|
fe2dc08589eec0c27d13129f015869399ee3dae0
|
4bb72ba6ee6ed3ad887b799b27434946a92ff9d2
|
/algo/CryptoSystem.py
|
07c2ccc7e3f2ab9244ec99bd40722b77700c684c
|
[] |
no_license
|
Libi92/ECCBDD
|
33de3d9b2a91d671304f3e5bc6b134e7046d55f8
|
baa7b2c9177c6110e1cfa57bea6c936b30a4985a
|
refs/heads/master
| 2020-03-22T10:04:52.317899
| 2018-07-08T07:24:56
| 2018-07-08T07:24:56
| 139,879,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,516
|
py
|
import datetime
from functools import reduce
from algo import curve
from algo.ecc import EC
from algo.logger import Logger
from algo.point import Point
PXY_MATRIX_SIZE = 5
class CryptoSystem:
def __init__(self, g, ec):
self.g = g
self.ec = ec
def bit_invert(self, b):
inv = map(lambda x: '0' if x == '1' else '1', b)
return reduce(lambda x, y: x + y, inv)
def constructPxPyMetrix(self, decimal_list):
pxy_list = []
list_5 = []
for i in range(len(decimal_list)):
if i != 0 and i % 5 == 0:
pxy_list.append(list_5)
list_5 = []
py = i
px = decimal_list[i] + i
list_5.append(Point(px, py))
pxy_list.append(list_5)
return pxy_list
def get_gMatrix(self):
return [self.ec.mul(self.g, i) for i in range(1, 6)]
def add(self, a, b):
return [self.ec.add(m, n) for m, n in zip(a, b)]
def sub(self, a, b):
return [self.ec.sub(m, n) for m, n in zip(a, b)]
def matrixShiftAdd(self, a_list, b):
c_list = []
for a in a_list:
c = self.add(a, b)
b.append(b.pop(0))
c_list.append(c)
return c_list
def matrixShiftSub(self, a_list, b):
c_list = []
for a in a_list:
c = self.sub(a, b)
b.append(b.pop(0))
c_list.append(c)
return c_list
def print_matrix(self, matrix):
for x in matrix:
Logger.log(str(x.x) + ', ' + str(x.y))
def extractPx(self, pxy_list):
extracted = []
for list_5 in pxy_list:
ext = map(lambda p: Point(p.x - p.y, p.y), list_5)
extracted.append(list(ext))
return extracted
def encode(self, message):
start_time = datetime.datetime.now().microsecond
eq_ascii = [ord(x) for x in message]
Logger.log('ascii: ', eq_ascii)
bin_array = [format(x, '08b') for x in eq_ascii]
num_append = len(bin_array) % PXY_MATRIX_SIZE
if num_append != 0:
num_append = PXY_MATRIX_SIZE - num_append
for i in range(num_append):
bin_array.append(format(0, '08b'))
Logger.log('binary: ', bin_array)
inv_array = [self.bit_invert(b) for b in bin_array]
Logger.log('inverse binary: ', inv_array)
decimal_arr = [int(x, 2) for x in inv_array]
Logger.log('decimal: ', decimal_arr)
pxy_matrix = self.constructPxPyMetrix(decimal_arr)
Logger.log('PxPy (5x2)matrix: ', pxy_matrix)
g_matrix = self.get_gMatrix()
Logger.log('(5x2)g matrix: ')
self.print_matrix(g_matrix)
mapped_list = self.matrixShiftAdd(pxy_matrix, g_matrix)
Logger.log('encoded matrix: ')
for x in mapped_list: self.print_matrix(x)
end_time = datetime.datetime.now().microsecond
execution_time = end_time - start_time
Logger.log("Encoding time: {} μs".format(execution_time))
return mapped_list
def decode(self, encoded_list):
start_time = datetime.datetime.now().microsecond
g_matrix = self.get_gMatrix()
subs_matrix = self.matrixShiftSub(encoded_list, g_matrix)
Logger.log('Subtracted Matrix: ')
for x in subs_matrix: self.print_matrix(x)
extracted = self.extractPx(subs_matrix)
Logger.log('Px Extracted: ')
for x in extracted: self.print_matrix(x)
temp = []
for x in extracted: temp.extend(x)
extracted = temp
bin_array = [self.frmt(x) for x in extracted]
Logger.log(bin_array)
inv_bits = [self.bit_invert(b) for b in bin_array]
decimal_arr = [int(x, 2) for x in inv_bits]
Logger.log(decimal_arr)
chars = [chr(d) for d in decimal_arr]
plain_text = reduce(lambda x, y: x + y, chars)
end_time = datetime.datetime.now().microsecond
execution_time = end_time - start_time
Logger.log("Decoding time: {} μs".format(execution_time))
return plain_text
def frmt(self, X):
Logger.log(X, display=True)
return format(int(X.x), '08b')
if __name__ == '__main__':
plain_text = input("Enter your message: ")
curve = curve.P256
g = Point(curve.gy, curve.gy)
ec = EC(curve.a, curve.b, curve.p)
crypto = CryptoSystem(g, ec)
encoded = crypto.encode(plain_text)
decoded = crypto.decode(encoded)
print(decoded)
|
[
"libinbabup@hotmail.com"
] |
libinbabup@hotmail.com
|
a2dd70fc69879a4648eb45dac4bea8dae1233790
|
d83118503614bb83ad8edb72dda7f449a1226f8b
|
/src/dprj/platinumegg/app/cabaret/views/application/effect.py
|
40b158532e97911174a83a5334610da7b7a1310a
|
[] |
no_license
|
hitandaway100/caba
|
686fe4390e182e158cd9714c90024a082deb8c69
|
492bf477ac00c380f2b2758c86b46aa7e58bbad9
|
refs/heads/master
| 2021-08-23T05:59:28.910129
| 2017-12-03T19:03:15
| 2017-12-03T19:03:15
| 112,512,044
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 54,649
|
py
|
# -*- coding: utf-8 -*-
from platinumegg.app.cabaret.views.apphandler import AppHandler
from platinumegg.app.cabaret.util.api import BackendApi
import settings
from platinumegg.app.cabaret.util.url_maker import UrlMaker
from platinumegg.lib.pljson import Json
import settings_sub
from urlparse import urlparse
import urllib
from defines import Defines
from platinumegg.app.cabaret.util.scout import ScoutEventNone
from platinumegg.app.cabaret.util.card import CardUtil
from platinumegg.lib.opensocial.util import OSAUtil
from platinumegg.app.cabaret.util.present import PresentSet
import datetime
from platinumegg.app.cabaret.util.datetime_util import DateTimeUtil
from platinumegg.app.cabaret.util.rediscache import LoginBonusTimeLimitedAnimationSet
from platinumegg.app.cabaret.views.application.loginbonus.base import LoginBonusHandler
class Handler(AppHandler):
"""演出のパラメータを取得.
"""
@classmethod
def get_default_status(cls):
"""デフォルトで返すHttpStatus.
"""
return 500
def processError(self, error_message):
self.response.set_status(500)
self.response.end()
def __sendErrorResponse(self, status):
self.response.set_status(status)
self.response.end()
def checkUser(self):
pass
def check_process_pre(self):
if settings_sub.IS_LOCAL:
return True
elif self.osa_util.is_dbg_user:
pass
elif not settings_sub.IS_DEV and self.osa_util.viewer_id in ('10814964', '11404810', '39121', '12852359', '1412759', '11830507', '11467913', '10128761', '11868885', '434009', '23427632', '10918839', '21655464', '17279084', '24500573', '28774432', '11739356','2588824','28978730','20174324'):
pass
elif not self.checkMaintenance():
return False
return True
def process(self):
args = self.getUrlArgs('/effect/')
ope = args.get(0)
f = getattr(self, 'proc_%s' % ope, None)
if f is None:
self.__sendErrorResponse(404)
return
f(args)
def writeResponseBody(self, params):
if self.isUsePCEffect():
body = Json.encode({
'flashVars' : self.makeFlashVars(params)
})
else:
body = Json.encode(params)
self.response.set_header('Content-Type', 'plain/text')
self.response.set_status(200)
self.response.send(body)
def proc_battle(self, args):
"""バトル演出.
"""
model_mgr = self.getModelMgr()
v_player = self.getViewerPlayer(True)
if v_player is None:
# 結果が存在しない.
self.osa_util.logger.error('Player is None. opensocial_viewer_id=%s' % self.osa_util.viewer_id)
self.__sendErrorResponse(404)
return
# 結果データ.
battleresult = BackendApi.get_battleresult(model_mgr, v_player.id, using=settings.DB_READONLY)
if battleresult is None or not battleresult.anim:
# 結果が存在しない.
self.osa_util.logger.error('result is None')
self.__sendErrorResponse(404)
return
# 演出用パラメータ.
animationdata = battleresult.anim
params = animationdata.to_animation_data(self)
if BackendApi.get_current_battleevent_master(model_mgr, using=settings.DB_READONLY):
params['feverFlag'] = 0 # イベントでは表示しない.
urldata = urlparse(self.url_cgi)
url = '%s://%s%s' % (urldata.scheme, settings_sub.WEB_GLOBAL_HOST, urldata.path)
url = url + UrlMaker.battleresultanim()
url = self.osa_util.makeLinkUrl(self.addTimeStamp(url))
params['backUrl'] = url
self.writeResponseBody(params)
def proc_battleevent(self, args):
"""イベントバトル演出.
"""
model_mgr = self.getModelMgr()
v_player = self.getViewerPlayer(True)
if v_player is None:
# 結果が存在しない.
self.osa_util.logger.error('Player is None. opensocial_viewer_id=%s' % self.osa_util.viewer_id)
self.__sendErrorResponse(404)
return
uid = v_player.id
try:
eventid = int(args.get(1))
except:
# 引数がおかしい.
self.osa_util.logger.error('Invalid arguments')
self.__sendErrorResponse(400)
return
# 結果データ.
battleresult = BackendApi.get_battleevent_battleresult(model_mgr, eventid, uid, using=settings.DB_READONLY)
if battleresult is None or not battleresult.anim:
# 結果が存在しない.
self.osa_util.logger.error('result is None')
self.__sendErrorResponse(404)
return
# 演出用パラメータ.
animationdata = battleresult.anim
params = animationdata.to_animation_data(self)
params['feverFlag'] = 0 # イベントでは表示しない.
rarity = args.getInt(2)
piecenumber = args.getInt(3)
is_complete = args.getInt(4)
urldata = urlparse(self.url_cgi)
url = '%s://%s%s' % (urldata.scheme, settings_sub.WEB_GLOBAL_HOST, urldata.path)
url = url + UrlMaker.battleevent_battleresultanim(eventid, rarity, piecenumber, is_complete)
url = self.osa_util.makeLinkUrl(self.addTimeStamp(url))
params['backUrl'] = url
self.writeResponseBody(params)
def proc_scout(self, args):
"""スカウト演出.
"""
try:
scoutid = int(args.get(1))
scoutkey = urllib.unquote(args.get(2))
except:
# 引数がおかしい.
self.osa_util.logger.error('Invalid arguments')
self.__sendErrorResponse(400)
return
v_player = self.getViewerPlayer()
model_mgr = self.getModelMgr()
uid = v_player.id
using = settings.DB_READONLY
# 進行情報.
playdata = BackendApi.get_scoutprogress(model_mgr, uid, [scoutid], using=using).get(scoutid, None)
if playdata is None or playdata.alreadykey != scoutkey:
# DBからとり直すべき.
playdata = BackendApi.get_scoutprogress(model_mgr, uid, [scoutid], using=settings.DB_DEFAULT, reflesh=True).get(scoutid, None)
if playdata is None or playdata.alreadykey != scoutkey:
self.osa_util.logger.error('Not Found')
self.__sendErrorResponse(404)
return
eventlist = playdata.result.get('event', [])
if eventlist:
# ここで必要なのははじめの1件.
event = eventlist[0]
else:
# なにも起きなかった.
event = ScoutEventNone.create()
eventKind = event.get_type()
backUrl = None
# イベント毎の設定.
if eventKind == Defines.ScoutEventType.NONE:
# そのままもう一回.
backUrl = UrlMaker.scoutdo(scoutid, playdata.confirmkey)
elif eventKind in (Defines.ScoutEventType.LEVELUP, Defines.ScoutEventType.COMPLETE, Defines.ScoutEventType.HAPPENING):
# 結果表示へ.
backUrl = UrlMaker.scoutresultanim(scoutid, scoutkey, 0)
# 結果表示へ.
backUrl = backUrl or UrlMaker.scoutresult(scoutid, scoutkey)
# 演出のパラメータ.
scoutmaster = BackendApi.get_scouts(model_mgr, [scoutid], using=using)[0]
resultlist = playdata.result.get('result', [])
params = BackendApi.make_scoutanim_params(self, scoutmaster, eventlist, resultlist)
if params is None:
self.osa_util.logger.error('Not Found')
self.__sendErrorResponse(404)
return
urldata = urlparse(self.url_cgi)
url = '%s://%s%s' % (urldata.scheme, settings_sub.WEB_GLOBAL_HOST, urldata.path)
url = self.osa_util.makeLinkUrl(self.addTimeStamp(url + backUrl))
params['backUrl'] = url
self.writeResponseBody(params)
def __make_eventscoutanim_params(self, stagemaster, playdata, backUrl):
"""スカウトイベント演出.
"""
eventlist = playdata.result.get('event', [])
# 演出のパラメータ.
resultlist = playdata.result.get('result', [])
params = BackendApi.make_scoutanim_params(self, stagemaster, eventlist, resultlist, feveretime=getattr(playdata, 'feveretime', None))
if params is None:
self.osa_util.logger.error('Not Found')
self.__sendErrorResponse(404)
return
urldata = urlparse(self.url_cgi)
url = '%s://%s%s' % (urldata.scheme, settings_sub.WEB_GLOBAL_HOST, urldata.path)
url = self.osa_util.makeLinkUrl(self.addTimeStamp(url + backUrl))
params['backUrl'] = url
return params
def proc_scoutevent(self, args):
"""スカウトイベント演出.
"""
try:
stageid = int(args.get(1))
scoutkey = urllib.unquote(args.get(2))
except:
# 引数がおかしい.
self.osa_util.logger.error('Invalid arguments')
self.__sendErrorResponse(400)
return
v_player = self.getViewerPlayer()
model_mgr = self.getModelMgr()
using = settings.DB_READONLY
eventmaster = BackendApi.get_current_scouteventmaster(model_mgr, using=using)
if eventmaster is None:
# 引数がおかしい.
self.osa_util.logger.error('Event Not Found')
self.__sendErrorResponse(404)
return
mid = eventmaster.id
# 進行情報.
playdata = BackendApi.get_event_playdata(model_mgr, mid, v_player.id, using)
if playdata is None or playdata.alreadykey != scoutkey:
self.osa_util.logger.error('Not Found')
self.__sendErrorResponse(404)
return
eventlist = playdata.result.get('event', [])
if eventlist:
# ここで必要なのははじめの1件.
event = eventlist[0]
else:
# なにも起きなかった.
event = ScoutEventNone.create()
eventKind = event.get_type()
backUrl = None
# イベント毎の設定.
if eventKind == Defines.ScoutEventType.NONE:
# そのままもう一回.
backUrl = UrlMaker.scouteventdo(stageid, playdata.confirmkey)
else:
if playdata.result.get('feverstart'):
# フィーバー演出
backUrl = UrlMaker.scouteventfever(stageid, scoutkey)
elif playdata.result.get('lovetime_start'):
# 逢引タイム演出.
backUrl = UrlMaker.scouteventlovetime(stageid, scoutkey)
elif eventKind in (Defines.ScoutEventType.LEVELUP, Defines.ScoutEventType.COMPLETE, Defines.ScoutEventType.HAPPENING):
# 結果表示へ.
backUrl = UrlMaker.scouteventresultanim(stageid, scoutkey, 0)
# 結果表示へ.
backUrl = backUrl or UrlMaker.scouteventresult(stageid, scoutkey)
stagemaster = BackendApi.get_event_stage(model_mgr, stageid, using=using)
params = self.__make_eventscoutanim_params(stagemaster, playdata, backUrl)
if self.response.isEnd:
return
self.writeResponseBody(params)
def proc_raideventscout(self, args):
"""スカウトイベント演出.
"""
try:
stageid = int(args.get(1))
scoutkey = urllib.unquote(args.get(2))
except:
# 引数がおかしい.
self.osa_util.logger.error('Invalid arguments')
self.__sendErrorResponse(400)
return
v_player = self.getViewerPlayer()
uid = v_player.id
model_mgr = self.getModelMgr()
using = settings.DB_READONLY
eventmaster = BackendApi.get_current_raideventmaster(model_mgr, using=using)
if eventmaster is None:
# 引数がおかしい.
self.osa_util.logger.error('Event Not Found')
self.__sendErrorResponse(404)
return
mid = eventmaster.id
# 進行情報.
playdata = BackendApi.get_raideventstage_playdata(model_mgr, mid, uid, using)
if playdata is None or playdata.alreadykey != scoutkey:
self.osa_util.logger.error('Not Found')
self.__sendErrorResponse(404)
return
eventlist = playdata.result.get('event', [])
if eventlist:
# ここで必要なのははじめの1件.
event = eventlist[0]
else:
# なにも起きなかった.
event = ScoutEventNone.create()
eventKind = event.get_type()
backUrl = None
# イベント毎の設定.
if eventKind == Defines.ScoutEventType.NONE:
# そのままもう一回.
backUrl = UrlMaker.raidevent_scoutdo(stageid, playdata.confirmkey)
elif eventKind in (Defines.ScoutEventType.LEVELUP, Defines.ScoutEventType.COMPLETE, Defines.ScoutEventType.HAPPENING):
# 結果表示へ.
backUrl = UrlMaker.raidevent_scoutresultanim(stageid, scoutkey, 0)
# 結果表示へ.
backUrl = backUrl or UrlMaker.raidevent_scoutresult(stageid, scoutkey)
stagemaster = BackendApi.get_raidevent_stagemaster(model_mgr, stageid, using=using)
params = self.__make_eventscoutanim_params(stagemaster, playdata, backUrl)
if self.response.isEnd:
return
self.writeResponseBody(params)
def proc_produceeventscout(self, args):
"""プロデュースイベントのスカウトイベント演出.
"""
try:
stageid = int(args.get(1))
scoutkey = urllib.unquote(args.get(2))
except:
# 引数がおかしい.
self.osa_util.logger.error('Invalid arguments')
self.__sendErrorResponse(400)
return
v_player = self.getViewerPlayer()
uid = v_player.id
model_mgr = self.getModelMgr()
using = settings.DB_READONLY
eventmaster = BackendApi.get_current_produce_event_master(model_mgr, using=using)
if eventmaster is None:
# 引数がおかしい.
self.osa_util.logger.error('Event Not Found')
self.__sendErrorResponse(404)
return
mid = eventmaster.id
# 進行情報.
playdata = BackendApi.get_raideventstage_playdata(model_mgr, mid, uid, using)
playdata = BackendApi.get_produceeventstage_playdata(model_mgr, mid, uid, using)
if playdata is None or playdata.alreadykey != scoutkey:
self.osa_util.logger.error('Not Found')
self.__sendErrorResponse(404)
return
eventlist = playdata.result.get('event', [])
if eventlist:
# ここで必要なのははじめの1件.
event = eventlist[0]
else:
# なにも起きなかった.
event = ScoutEventNone.create()
eventKind = event.get_type()
backUrl = None
# イベント毎の設定.
if eventKind == Defines.ScoutEventType.NONE:
# そのままもう一回.
backUrl = UrlMaker.produceevent_scoutdo(stageid, playdata.confirmkey)
elif eventKind in (Defines.ScoutEventType.LEVELUP, Defines.ScoutEventType.COMPLETE, Defines.ScoutEventType.HAPPENING):
# 結果表示へ.
backUrl = UrlMaker.produceevent_scoutresultanim(stageid, scoutkey, 0)
# 結果表示へ.
backUrl = backUrl or UrlMaker.produceevent_scoutresult(stageid, scoutkey)
stagemaster = BackendApi.get_produceevent_stagemaster(model_mgr, stageid, using=using)
params = self.__make_eventscoutanim_params(stagemaster, playdata, backUrl)
if self.response.isEnd:
return
self.writeResponseBody(params)
def proc_gacha(self, args):
"""ガチャ演出.
"""
CONTENT_NUM_PER_PAGE = 10
try:
mid = int(args.get(1))
reqkey = urllib.unquote(args.get(2))
page = int(args.get(3) or 0)
except:
# 引数がおかしい.
self.osa_util.logger.error('Invalid arguments')
self.__sendErrorResponse(400)
return
model_mgr = self.getModelMgr()
using = settings.DB_READONLY
v_player = self.getViewerPlayer()
uid = v_player.id
gachamaster = BackendApi.get_gachamaster(model_mgr, mid, using)
playdata = None
gachamasterstep = None
if gachamaster:
if gachamaster.stepsid > 0:
if gachamaster.stepsid != gachamaster.id:
gachamasterstep = BackendApi.get_gachamaster(model_mgr, gachamaster.stepsid, using=using)
if gachamasterstep is None:
self.osa_util.logger.error('Not Found')
self.__sendErrorResponse(404)
return
else:
gachamasterstep = gachamaster
playdata = BackendApi.get_gachaplaydata(model_mgr, uid, [gachamaster.boxid], using=using).get(gachamaster.boxid)
if playdata is None or not playdata.result:
# 結果がない.
self.osa_util.logger.error('Not Found')
self.__sendErrorResponse(404)
return
if gachamaster.consumetype == Defines.GachaConsumeType.RANKING:
cardtextformat_getter = lambda master : Defines.EffectTextFormat.RANKINGGACHA_CARDTEXT
else:
cardtextformat_getter = lambda master : Defines.EffectTextFormat.GACHA_CARDTEXT if master.ckind == Defines.CardKind.NORMAL else Defines.EffectTextFormat.GACHA_ITEMTEXT
sep = Defines.ANIMATION_SEPARATE_STRING
urlsep = Defines.ANIMATION_URLSEPARATE_STRING
newFlag = []
rarityFlag = []
cardText = []
image = []
pointlist = []
expectation = []
is_first = page == 0
is_last = True
# 獲得したカード.
resultlist = playdata.result['result'] if isinstance(playdata.result, dict) else playdata.result
if gachamaster.consumetype in (Defines.GachaConsumeType.FUKUBUKURO, Defines.GachaConsumeType.FUKUBUKURO2016, Defines.GachaConsumeType.FUKUBUKURO2017):
page_last = int((len(resultlist) + CONTENT_NUM_PER_PAGE - 1) / CONTENT_NUM_PER_PAGE) - 1
page = min(page, page_last)
offset = page * CONTENT_NUM_PER_PAGE
resultlist = resultlist[offset:(offset+CONTENT_NUM_PER_PAGE)]
is_last = page == page_last
if gachamaster.consumetype == Defines.GachaConsumeType.FIXEDSR:
try:
gachamorecast = int(args.get(5))
except:
self.osa_util.logger.error('Invalid arguments')
self.__sendErrorResponse(400)
return
if gachamorecast == 0:
resultlist = resultlist[gachamaster.rarity_fixed_num:]
cardidlist = [data['id'] for data in resultlist]
cardmasters = BackendApi.get_cardmasters(cardidlist, model_mgr, using=settings.DB_READONLY)
groupidlist = [data['group'] for data in resultlist]
groupmaster_dict = BackendApi.get_gachagroupmaster_dict(model_mgr, groupidlist, using=settings.DB_READONLY)
rarityFlag_getter = None
if gachamaster.consumetype == Defines.GachaConsumeType.CHRISTMAS:
image_getter = lambda idx,master:(CardUtil.makeThumbnailUrlIcon(master) if idx < gachamaster.continuity-1 else CardUtil.makeThumbnailUrlMiddle(master))
cardtext_getter = lambda idx,master:master.name
elif gachamaster.consumetype in (Defines.GachaConsumeType.FUKUBUKURO, Defines.GachaConsumeType.FUKUBUKURO2016, Defines.GachaConsumeType.FUKUBUKURO2017):
image_getter = lambda idx,master:CardUtil.makeThumbnailUrlMiddle(master)
cardtext_getter = lambda idx,master:master.name
elif gachamaster.consumetype == Defines.GachaConsumeType.XMAS_OMAKE:
image_getter = lambda idx,master:CardUtil.makeThumbnailUrlIcon(master)
cardtext_getter = lambda idx,master:master.name
elif gachamaster.consumetype == Defines.GachaConsumeType.SCOUTEVENT and Defines.SCOUTEVENTGACHA_USE_EXCLUSIVE_USE_EFFECT:
image_getter = lambda idx,master:CardUtil.makeThumbnailUrlMiddle(master)
cardtext_getter = lambda idx,master:(cardtextformat_getter(master) % master.name)
else:
image_getter = lambda idx,master:self.makeAppLinkUrlImg(CardUtil.makeThumbnailUrlMiddle(master))
cardtext_getter = lambda idx,master:(cardtextformat_getter(master) % master.name)
rarityFlag_getter = rarityFlag_getter or (lambda master:'1' if Defines.Rarity.SUPERRARE <= master.rare else '0')
max_rare = Defines.Rarity.NORMAL
for idx,data in enumerate(resultlist):
master = cardmasters[data['id']]
groupmaster = groupmaster_dict.get(data['group'])
newFlag.append(str(int(bool(data['is_new']))))
cardText.append(cardtext_getter(idx, master))
image.append(image_getter(idx, master))
pointlist.append(str(data['point']))
expectation.append(str(groupmaster.expectation) if groupmaster else str(Defines.RankingGachaExpect.LOW))
rarityFlag.append(rarityFlag_getter(master))
if max_rare < master.rare:
max_rare = master.rare
v_player = self.getViewerPlayer()
# シートガチャ情報.
seatmodels = BackendApi.get_gachaseatmodels_by_gachamaster(model_mgr, uid, gachamasterstep or gachamaster, do_get_result=False, using=settings.DB_READONLY)
urldata = urlparse(self.url_cgi)
urlhead = '%s://%s%s' % (urldata.scheme, settings_sub.WEB_GLOBAL_HOST, urldata.path)
if seatmodels.get('playdata'):
# シート演出へ.
url = urlhead + UrlMaker.gachaseatanim(gachamaster.id, reqkey)
else:
url = urlhead + UrlMaker.gacharesult(gachamaster.id, reqkey)
backUrl = self.osa_util.makeLinkUrl(self.addTimeStamp(url))
params = {
'newFlag': sep.join(newFlag),
'cardText' : sep.join(cardText),
'image' : urlsep.join(image),
}
if gachamaster.consumetype == Defines.GachaConsumeType.CHRISTMAS:
params['logoPre'] = self.url_static + 'effect/sp/v2/gachaxmas/data/'
params['pre'] = self.url_static_img
params['cardText'] = cardText[-1]
elif gachamaster.consumetype == Defines.GachaConsumeType.RANKING:
params.update({
'point' : sep.join(pointlist),
'expectation' : sep.join(expectation),
'pre' : self.url_static + 'img/sp/large/gacha/ranking/rank_01/', # TODO:DBを見るように修正が必要.
'logo_img' : 'event_logo.png',
'logo_w_img' : 'event_logo_w.png',
})
elif gachamaster.consumetype == Defines.GachaConsumeType.SCOUTEVENT and Defines.SCOUTEVENTGACHA_USE_EXCLUSIVE_USE_EFFECT:
eventmaster = BackendApi.get_current_present_scouteventmaster(model_mgr, using=settings.DB_READONLY)
if Defines.SCOUTEVENTGACHA_FOR_VALENTINE:
params.update({
'pre' : self.url_static_img,
'effectPre' : self.url_static + 'effect/sp/v2/gachascev/data/scev_25/',
'cardText' : params['cardText'].replace('が入店しました', ''), # js, flash の修正をすると作業が大きくなるのでquick hack.
})
else:
params.update({
'imagePre' : self.url_static_img,
'rarityFlag' : sep.join(rarityFlag),
'logoPre' : self.makeAppLinkUrlImg('event/scevent/%s/gacha/' % eventmaster.codename),
})
elif gachamaster.consumetype in (Defines.GachaConsumeType.FUKUBUKURO, Defines.GachaConsumeType.FUKUBUKURO2016, Defines.GachaConsumeType.FUKUBUKURO2017):
url = None
if is_last:
if isinstance(playdata.result, dict) and playdata.result.get('omake'):
prizelist = BackendApi.get_prizelist(model_mgr, playdata.result['omake'], using=settings.DB_READONLY)
presentlist = BackendApi.create_present_by_prize(model_mgr, v_player.id, prizelist, 0, using=settings.DB_READONLY, do_set_save=False)
presentsetlist = PresentSet.presentToPresentSet(model_mgr, presentlist, using=settings.DB_READONLY)
thumblist = []
omakeindexes = []
for presentset in presentsetlist:
if presentset.present.itype in (Defines.ItemType.GOLD, Defines.ItemType.GACHA_PT):
num = 1
else:
num = presentset.num
if presentset.itemthumbnail in thumblist:
idx = thumblist.index(presentset.itemthumbnail)
else:
idx = len(thumblist)
thumblist.append(presentset.itemthumbnail)
omakeindexes.extend([str(idx)] * num)
if thumblist:
params.update({
'itemImage' : urlsep.join(thumblist),
'itemImageIdx' : sep.join(omakeindexes),
})
else:
url = urlhead + UrlMaker.gachaanimsub(gachamaster.id)
url = OSAUtil.addQuery(url, Defines.URLQUERY_PAGE, page + 1)
url = self.osa_util.makeLinkUrl(self.addTimeStamp(url))
params.update({
'skipUrl': backUrl,
'pre' : self.url_static_img,
# 4月ver
#'logoPre' : self.url_static + 'effect/sp/v2/gachahappybag201604/data/',
#'logoPre' : self.url_static + 'effect/sp/v2/gachahappybag201605/data/',
# 'logoPre' : self.url_static + 'effect/sp/v2/gachahappybag201607/data/',
# 'logoPre' : self.url_static + 'effect/sp/v2/gachahappybag201608/data/',
'logoPre' : self.url_static + 'effect/sp/v2/gachahappybag201701/data/',
'isFirst' : is_first,
'isLast' : is_last,
'n' : gachamaster.continuity,
'rarityFlag' : sep.join(rarityFlag),
})
del params['cardText']
backUrl = url or backUrl
elif gachamaster.consumetype == Defines.GachaConsumeType.SR_SSR_PROBABILITY_UP or gachamaster.consumetype == Defines.GachaConsumeType.PTCHANGE:
#トレードショップが開いていたら
if gachamaster.trade_shop_master_id is not None and 0 < gachamaster.trade_shop_master_id:
try:
lottery_point = int(args.get(4))
url = urlhead + UrlMaker.gacharesult(gachamaster.id, reqkey, lottery_point=lottery_point)
except:
# 引数がおかしい.
self.osa_util.logger.error('Invalid arguments')
self.__sendErrorResponse(400)
return
else:
url = urlhead + UrlMaker.gacharesult(gachamaster.id, reqkey)
# URL作り直し
backUrl = self.osa_util.makeLinkUrl(self.addTimeStamp(url))
elif gachamaster.consumetype == Defines.GachaConsumeType.FIXEDSR:
try:
gachamorecast = int(args.get(5))
except:
self.osa_util.logger.error('Invalid arguments')
self.__sendErrorResponse(400)
return
if gachamorecast == 0:
url = urlhead + UrlMaker.gachamorecast(gachamaster.id, reqkey)
backUrl = self.osa_util.makeLinkUrl(self.addTimeStamp(url))
else:
if 0 < gachamaster.rarity_fixed_num:
fixed_card_id = cardidlist[0]
card = BackendApi.get_cardmasters([fixed_card_id], model_mgr).get(fixed_card_id)
backUrl = self.makeAppLinkUrl(UrlMaker.gacharesult(gachamaster.id, reqkey))
params = {
'cardText': Defines.EffectTextFormat.GACHA_CARDTEXT % card.name,
'image': self.makeAppLinkUrlImg(CardUtil.makeThumbnailUrlMiddle(card)),
'pre': 'img/',
}
else:
self.osa_util.logger.error('Not set Gachamaster.rarity_fixed_num')
self.__sendErrorResponse(400)
return
elif gachamaster.consumetype == Defines.GachaConsumeType.XMAS_OMAKE:
params = {
'pre' : self.url_static_img,
'logoPre' : self.url_static + 'effect/sp/v2/gachaxmas2015/',
'image' : urlsep.join(image),
'newFlag': sep.join(newFlag)
}
params['backUrl'] = backUrl
self.writeResponseBody(params)
def proc_panelmission(self, args):
"""パネルミッション.
"""
try:
panel = int(args.get(1))
except:
# 引数がおかしい.
self.osa_util.logger.error('Invalid arguments')
self.__sendErrorResponse(400)
return
model_mgr = self.getModelMgr()
using = settings.DB_READONLY
# パネルのマスターデータ.
panelmaster = None
if panel:
panelmaster = BackendApi.get_panelmission_panelmaster(model_mgr, panel, using=using)
if panelmaster is None:
self.osa_util.logger.error('Illigal panel number')
self.__sendErrorResponse(400)
return
v_player = self.getViewerPlayer()
uid = v_player.id
now = OSAUtil.get_now()
# 進行情報.
panelplaydata = BackendApi.get_panelmission_data(model_mgr, uid, panel, using=using, get_instance=False)
if panelplaydata is None:
self.osa_util.logger.error('Illigal panel number')
self.__sendErrorResponse(400)
return
# 演出パラメータ.
params = {
# 'logoPre' : self.url_static + 'effect/sp/v2/panel_mission/data/201412/',
# 'logoPre' : self.url_static + 'effect/sp/v2/panel_mission/data/201505/',
# 'logoPre' : self.url_static + 'effect/sp/v2/panel_mission/data/201508/',
# 'logoPre' : self.url_static + 'effect/sp/v2/panel_mission/data/201512/',
# 'logoPre' : self.url_static + 'effect/sp/v2/panel_mission/data/201602/',
# 'logoPre' : self.url_static + 'effect/sp/v2/panel_mission/data/201604/',
# 'logoPre' : self.url_static + 'effect/sp/v2/panel_mission/data/201606/',
# 'logoPre' : self.url_static + 'effect/sp/v2/panel_mission/data/201607/',
# 'logoPre' : self.url_static + 'effect/sp/v2/panel_mission/data/201610/',
# 'logoPre' : self.url_static + 'effect/sp/v2/panel_mission/data/201612/',
'logoPre' : self.url_static + 'effect/sp/v2/panel_mission/data/201702/',
'pre' : self.url_static_img,
'panel' : panel,
'bg' : panelmaster.image,
}
# ミッションのマスター.
missionmaster_list = BackendApi.get_panelmission_missionmaster_by_panelid(model_mgr, panel, using=using)
# 全クリフラグ.
is_allend = True
# 今回クリアしたミッション.
max_time = None
clearlist = []
missionmaster_dict = {}
for missionmaster in missionmaster_list:
number = missionmaster.number
missionmaster_dict[number] = missionmaster
idx = number - 1
data = panelplaydata.get_data(number)
rtime = data['rtime']
if now < rtime:
# 未達成のミッション画像と名前.
params['m%d' % idx] = missionmaster.image_pre
params['mtext%d' % idx] = missionmaster.name
is_allend = False
continue
elif max_time and rtime < max_time:
continue
elif max_time is None or max_time < rtime:
max_time = rtime
clearlist = []
clearlist.append(str(idx))
if not clearlist:
self.osa_util.logger.error('You can not view the effect.')
self.__sendErrorResponse(400)
return
params['clear'] = ','.join(clearlist)
# 今回達成したミッションの画像と名前.
for idx in clearlist:
missionmaster = missionmaster_dict[int(idx) + 1]
params['m%s' % idx] = missionmaster.image_pre
params['mtext%s' % idx] = missionmaster.name
if is_allend:
# 獲得したカード画像と名前.
prizelist = BackendApi.get_prizelist(model_mgr, panelmaster.prizes, using=using)
if not prizelist:
self.osa_util.logger.error('prize none.')
self.__sendErrorResponse(400)
return
presentlist = BackendApi.create_present_by_prize(model_mgr, uid, prizelist, 0, using=using, do_set_save=False)
presentset = PresentSet.presentToPresentSet(model_mgr, presentlist[:1], using=using)[0]
params['card'] = presentset.itemthumbnail_middle
params['cname'] = presentset.itemname
# 次のパネル.
next_panelmaster = BackendApi.get_panelmission_panelmaster(model_mgr, panel + 1, using=using)
if next_panelmaster:
next_panelmissionmaster_list = BackendApi.get_panelmission_missionmaster_by_panelid(model_mgr, next_panelmaster.id, using=using)
for next_panelmissionmaster in next_panelmissionmaster_list:
idx = next_panelmissionmaster.number - 1
params['next%s' % idx] = next_panelmissionmaster.image_pre
urldata = urlparse(self.url_cgi)
url = '%s://%s%s' % (urldata.scheme, settings_sub.WEB_GLOBAL_HOST, urldata.path)
url = url + UrlMaker.panelmissiontop()
url = self.osa_util.makeLinkUrl(self.addTimeStamp(url))
params['backUrl'] = url
self.writeResponseBody(params)
def proc_loginbonustimelimited(self, args):
"""期限付きログインボーナス.
"""
mid = args.getInt(1)
loginbonus = args.getInt(2)
str_midlist = self.request.get(Defines.URLQUERY_ID) or ''
midlist = [int(str_mid) for str_mid in str_midlist.split(',') if str_mid.isdigit()]
model_mgr = self.getModelMgr()
now = OSAUtil.get_now()
master = BackendApi.get_loginbonustimelimitedmaster(model_mgr, mid, using=settings.DB_READONLY)
if master is None:
self.osa_util.logger.error('masterdata is not found.')
self.__sendErrorResponse(400)
return
# プレイヤー情報.
v_player = self.getViewerPlayer()
if BackendApi.check_lead_loginbonustimelimited(model_mgr, v_player.id, now):
# まだ受け取っていない.
self.osa_util.logger.error('not received.')
self.__sendErrorResponse(400)
return
logindata = BackendApi.get_logintimelimited_data(model_mgr, v_player.id, mid, using=settings.DB_READONLY)
if logindata is None:
self.osa_util.logger.error('logindata is None.')
self.__sendErrorResponse(400)
return
# 表示するログインボーナスを選別(現在の日数のボーナスの前のボーナスから4つ表示したい).
table = BackendApi.get_loginbonustimelimiteddaysmaster_day_table_by_timelimitedmid(model_mgr, mid, using=settings.DB_READONLY)
params = {
'pre' : self.url_static_img,
}
# 設定情報.
config = BackendApi.get_current_loginbonustimelimitedconfig(model_mgr, using=settings.DB_READONLY)
config_data = dict(config.getDataList()).get(master.id)
making_functions = {
'monthly_login' : self.__makeMonthlyLoginBonusParams,
}
func = making_functions.get(master.effectname, self.__makeCommonLoginBonusParams)
tmp, cur_bonusmaster, next_bonusmaster = func(master, logindata, table, config_data)
params.update(**tmp)
#取得したアイテム(名前,日数).
if cur_bonusmaster:
params['td'] = cur_bonusmaster.day
params['tt'] = self.getBonusItemText(cur_bonusmaster)
else:
# 演出いらない.
self.osa_util.logger.error('can not view the effect.')
self.__sendErrorResponse(400)
return
if next_bonusmaster:
params['nt'] = self.getBonusItemText(next_bonusmaster)
# 遷移先.
url = None
if mid in midlist:
next_idx = midlist.index(mid)+1
if next_idx < len(midlist):
# 次がある.
url = UrlMaker.loginbonustimelimitedanim(midlist[next_idx], loginbonus)
url = OSAUtil.addQuery(url, Defines.URLQUERY_ID, str_midlist)
if url is None:
if loginbonus:
# ログインボーナス.
url = UrlMaker.loginbonusanim()
else:
url = LoginBonusHandler.getEffectBackUrl(self)
anniversary_data = {}
if master.effectname == 'countdown_login_2ndanniversary':
anniversary_data = {
'ten_digit': params['day'] / 10,
'one_digit': params['day'] % 10,
}
elif master.effectname == 'countdown_login_3rdanniversary':
anniversary_data = {
'one_digit': params['day'] % 10,
'predata': self.url_static + 'effect/sp/v2/countdown_login_3rdanniversary/data/'
}
params.update(anniversary_data)
urldata = urlparse(self.url_cgi)
urlhead = '%s://%s%s' % (urldata.scheme, settings_sub.WEB_GLOBAL_HOST, urldata.path)
url = urlhead + url
url = self.osa_util.makeLinkUrl(self.addTimeStamp(url))
params['backUrl'] = url
self.writeResponseBody(params)
def __makeCommonLoginBonusParams(self, master, logindata, day_table, config_data):
"""共通のログインボーナス演出パラメータ.
"""
VIEW_ITEM_NUM_MAX_TABLE = {
Defines.LoginBonusTimeLimitedType.TOTAL : 4,
Defines.LoginBonusTimeLimitedType.FIXATION : 6,
Defines.LoginBonusTimeLimitedType.MONTHLY : 3,
}
VIEW_ITEM_NUM_MAX_TABLE_BY_EFFECTNAME = {
'hinamatsuri_login' : 4,
'countdown_login_2ndanniversary' : 4,
'countdown_login_3rdanniversary' : 4,
'2nd_anniversary_login' : 4,
'3rd_anniversary_login' : 4,
'valentine2016' : 6,
'end_of_year_countdown' : 3,
'newyear_login' : 7,
'newbie_login' : 7,
}
item_num_max = VIEW_ITEM_NUM_MAX_TABLE_BY_EFFECTNAME.get(master.effectname, VIEW_ITEM_NUM_MAX_TABLE[master.lbtype])
model_mgr = self.getModelMgr()
cur_day = logindata.days
params = {}
cur_bonusmaster = None
next_bonusmaster = None
mid = master.id
days = day_table.keys()
days.sort()
tmp_days = list(set(days + [cur_day]))
tmp_days.sort()
start = max(0, min(tmp_days.index(cur_day) - 1, len(days) - item_num_max))
bonusmidlist = []
has_next = False
for day in days[start:]:
if not day_table.has_key(day):
continue
elif len(bonusmidlist) == item_num_max:
has_next = True
break
bonusmidlist.append(day_table[day])
bonusmaster_list = BackendApi.get_loginbonustimelimiteddaysmaster_by_idlist(model_mgr, bonusmidlist, using=settings.DB_READONLY)
params.update(has_next=has_next)
if master.lbtype == Defines.LoginBonusTimeLimitedType.FIXATION:
min_time = DateTimeUtil.strToDateTime(logindata.lbtltime.strftime("%Y%m01"), "%Y%m%d") - datetime.timedelta(seconds=1)
min_time = DateTimeUtil.toLoginTime(min_time)
receive_flags = BackendApi.get_loginbonustimelimited_fixation_received_dates(logindata.uid, mid, min_time).keys()
params['logoPre'] = self.url_static + 'effect/sp/v2/%s/data/' % master.effectname
else:
params['logoPre'] = self.url_static + 'effect/sp/v2/%s/data/' % master.effectname
receive_flags = None
make_date_string = {
Defines.LoginBonusTimeLimitedType.FIXATION : lambda x:u'%s月%s日' % (logindata.lbtltime.month, x),
Defines.LoginBonusTimeLimitedType.MONTHLY : lambda x:u'%s日' % (logindata.lbtltime.month, x),
}.get(master.lbtype, lambda x:'%d日目' % x)
#アイテム一覧(日数と画像URL).
bonusmaster_list.sort(key=lambda x:x.day)
for idx, bonusmaster in enumerate(bonusmaster_list):
params['i%d' % idx] = bonusmaster.thumb
params['d%d' % idx] = bonusmaster.day
params['date%d' % idx] = make_date_string(bonusmaster.day)
if cur_day == bonusmaster.day:
cur_bonusmaster = bonusmaster
params['idx'] = idx
elif cur_bonusmaster and not next_bonusmaster:
next_bonusmaster = bonusmaster
if receive_flags is not None:
params['f%d' % idx] = 1 if bonusmaster.day in receive_flags else 0
# 最終日までの日数.
td = config_data['etime'] - logindata.lbtltime
params['day'] = td.days
if next_bonusmaster and 0 < td.days:
params['idxnext'] = params['idx'] + 1
if master.lbtype == Defines.LoginBonusTimeLimitedType.TOTAL:
for i in xrange(params['idx']):
params['f%d' % i] = 1
def getEffectDBValue(attname, default):
v = getattr(cur_bonusmaster, attname, '') if cur_bonusmaster else ''
return v or default
# 演出用文言.
params['logo'] = master.logo
params['preEffect'] = self.url_static_img + master.img_effect
params['bg'] = getEffectDBValue(u'bg', u'bg.png')
params['tlogo'] = getEffectDBValue(u'text_logo', master.text_logo)
params['t0'] = getEffectDBValue(u'text_start', master.text_start)
params['t1'] = getEffectDBValue(u'text_itemlist', master.text_itemlist)
params['t2'] = getEffectDBValue(u'text_itemget', master.text_itemget)
params['t3'] = getEffectDBValue(u'text_itemnext', master.text_itemnext)
params['t4'] = getEffectDBValue(u'text_end', master.text_end)
if cur_bonusmaster:
params['ix'] = cur_bonusmaster.item_x
params['iy'] = cur_bonusmaster.item_y
params['gx'] = cur_bonusmaster.item_x
params['gy'] = cur_bonusmaster.item_y
return params, cur_bonusmaster, next_bonusmaster
def __makeMonthlyLoginBonusParams(self, master, logindata, day_table, config_data):
"""月末ログインボーナス演出用パラメータ.
"""
LOOP_CNT = 3
ITEM_NUM_MAX = 3
model_mgr = self.getModelMgr()
mid = master.id
cur_day = logindata.days
params = {}
params['logoPre'] = self.url_static + 'effect/sp/v2/monthly_login/data/default/' # TODO: これをマスターデータで設定しないと.
# 次の日.
tomorrow = logindata.lbtltime + datetime.timedelta(days=1)
# 月末はなんか特殊.
bonusmaster_list = BackendApi.get_loginbonustimelimiteddaysmaster_by_idlist(model_mgr, day_table.values(), using=settings.DB_READONLY)
bonusmaster_list.sort(key=lambda x:x.id)
cur_bonusmaster = BackendApi.get_loginbonustimelimiteddaysmaster(model_mgr, mid, cur_day, using=settings.DB_READONLY)
next_bonusmaster = None
if config_data['stime'] <= tomorrow < config_data['etime']:
# 次の日が期間内.
next_bonusmaster = BackendApi.get_loginbonustimelimiteddaysmaster(model_mgr, mid, tomorrow.day, using=settings.DB_READONLY)
cur_prizeid = cur_bonusmaster.prizes[0] if cur_bonusmaster and cur_bonusmaster.prizes else 0
next_prizeid = next_bonusmaster.prizes[0] if next_bonusmaster and next_bonusmaster.prizes else 0
prizeidlist = []
for bonusmaster in bonusmaster_list:
if not bonusmaster.prizes:
continue
prizeid = bonusmaster.prizes[0]
if prizeid in prizeidlist:
continue
idx = len(prizeidlist)
params['i%d' % idx] = bonusmaster.thumb
prizeidlist.append(prizeid)
if ITEM_NUM_MAX <= len(prizeidlist):
break
idx = prizeidlist.index(cur_prizeid)
params['idx'] = idx
if next_prizeid:
params['idxnext'] = prizeidlist.index(next_prizeid)
params['rouletteCnt'] = LOOP_CNT * ITEM_NUM_MAX + idx
return params, cur_bonusmaster, next_bonusmaster
def getBonusItemText(self, master):
"""ログインボーナスのテキストを作成
"""
if LoginBonusTimeLimitedAnimationSet.exists(master.mid, master.day):
items = LoginBonusTimeLimitedAnimationSet.get(master.mid, master.day)
else:
model_mgr = self.getModelMgr()
prizelist = BackendApi.get_prizelist(model_mgr, master.prizes, using=settings.DB_READONLY)
prizeinfo = BackendApi.make_prizeinfo(self, prizelist, using=settings.DB_READONLY)
items = [listitem['text'] for listitem in prizeinfo['listitem_list']]
LoginBonusTimeLimitedAnimationSet.save(master.mid, master.day, items)
return Defines.STR_AND.join(items)
#==============================================================
# イベントシナリオ.
def proc_eventscenario(self, args):
"""イベントシナリオ.
"""
number = args.getInt(1)
edt = args.get(2) or ''
backUrl = '/'.join(args.args[3:])
model_mgr = self.getModelMgr()
data = BackendApi.get_eventscenario_by_number(model_mgr, number, using=settings.DB_READONLY)
if not data:
self.osa_util.logger.error('the scenario is not found...%s' % number)
self.__sendErrorResponse(404)
return
urldata = urlparse(self.url_cgi)
urlhead = '%s://%s%s' % (urldata.scheme, settings_sub.WEB_GLOBAL_HOST, urldata.path)
url = '%s/%s' % (urlhead, backUrl)
url = self.osa_util.makeLinkUrl(self.addTimeStamp(url))
img_pre = self.url_static_img + (data.get('thumb') or 'event/scenario/%d/' % number)
params = {
'backUrl' : url,
'pre' : img_pre,
'edt' : edt,
}
params.update(data)
self.writeResponseBody(params)
#==============================================================
# 双六.
def proc_sugoroku(self, args):
"""双六ログイン.
"""
mid = args.getInt(1)
if mid is None:
self.__sendErrorResponse(404)
return
page = args.getInt(2) or 0
model_mgr = self.getModelMgr()
# プレイヤー情報.
v_player = self.getViewerPlayer()
viewer_id = v_player.id
# 結果情報を取得.
logindata = BackendApi.get_loginbonus_sugoroku_playerdata(model_mgr, viewer_id, mid, using=settings.DB_DEFAULT)
if logindata is None:
self.__sendErrorResponse(404)
return
# 停まったマス.
squares_id_list = logindata.result.get('square_id_list')
squares_master_list = BackendApi.get_loginbonus_sugoroku_map_squares_master_list_by_id(model_mgr, squares_id_list, using=settings.DB_READONLY)
squares_master_dict = dict([(squares_master.id, squares_master) for squares_master in squares_master_list])
page_cnt = 0
arr = []
mapid = None
for squares_id in squares_id_list:
squares_master = squares_master_dict[squares_id]
if mapid is None:
mapid = squares_master.mid
elif mapid != squares_master.mid:
page_cnt += 1
if page < page_cnt:
# 次のマップの分も入れておく.
arr.append(squares_master)
break
mapid = squares_master.mid
if page_cnt == page:
arr.append(squares_master)
squares_master_list = arr
# マップ.
mapmaster = BackendApi.get_loginbonus_sugoroku_map_master(model_mgr, mapid, using=settings.DB_READONLY)
# 演出パラメータ.
params = dict(
backUrl = self.request.get('backUrl'),
logoPre = self.url_static_img + 'sugo6/{}/'.format(mapmaster.effectname),
pre = self.url_static_img,
lt = 0,
)
# 報酬.
prizeidlist_list = []
message_items = []
def get_prize_number(prizeidlist):
if prizeidlist in prizeidlist_list:
return prizeidlist_list.index(prizeidlist)
else:
prizeidlist_list.append(prizeidlist)
return len(prizeidlist_list) - 1
# 現在地.
if 0 < page:
params['continue'] = '1'
params['cp'] = 0
else:
squares_master = squares_master_list.pop(0)
params['cp'] = squares_master.number
if len(squares_id_list) == 1:
# 動いていない.
if squares_master.last:
# 最終マス.
params['completeitem'] = get_prize_number(mapmaster.prize)
message_items.append(params['completeitem'])
else:
# 休み.
params['lt'] = logindata.lose_turns + 1
# マップ情報.
map_squares_master_list = BackendApi.get_loginbonus_sugoroku_map_squares_master_by_mapid(model_mgr, mapid, using=settings.DB_READONLY)
for squares_master in map_squares_master_list:
number = squares_master.number
params['et{}'.format(number)] = squares_master.event_type
params['ev{}'.format(number)] = squares_master.event_value
if squares_master.prize:
params['ei{}'.format(number)] = get_prize_number(squares_master.prize)
# 停まったマス.
params['pn'] = len(squares_master_list)
pre_event_type = Defines.SugorokuMapEventType.NONE
for i,squares_master in enumerate(squares_master_list):
if squares_master.mid == mapid:
params['p{}'.format(i)] = squares_master.number
if squares_master.prize:
message_items.append(get_prize_number(squares_master.prize))
elif pre_event_type == Defines.SugorokuMapEventType.BACK:
# 戻って前のマップへ.
pre_map_squares_master_list = BackendApi.get_loginbonus_sugoroku_map_squares_master_by_mapid(model_mgr, squares_master.mid, using=settings.DB_READONLY)
params['p{}'.format(i)] = squares_master.number - len(pre_map_squares_master_list)
else:
# 進んで次のマップへ.
params['p{}'.format(i)] = len(map_squares_master_list) + squares_master.number
pre_event_type = squares_master.event_type
# アイテム.
params['in'] = len(prizeidlist_list)
for i,prizeidlist in enumerate(prizeidlist_list):
# アイテム画像.
if i in message_items:
prizelist = BackendApi.get_prizelist(model_mgr, prizeidlist, using=settings.DB_READONLY)
prizeinfo = BackendApi.make_prizeinfo(self, prizelist, using=settings.DB_READONLY)
# アイテム名.
params['in{}'.format(i)] = Defines.STR_AND.join([listitem['text'] for listitem in prizeinfo['listitem_list']])
else:
prizelist = BackendApi.get_prizelist(model_mgr, [prizeidlist[0]], using=settings.DB_READONLY)
prizeinfo = BackendApi.make_prizeinfo(self, prizelist, using=settings.DB_READONLY)
# アイテム画像.
params['i{}'.format(i)] = prizeinfo['listitem_list'][0]['thumbUrl'].replace(params['pre'], '')
self.writeResponseBody(params)
def main(request):
return Handler.run(request)
|
[
"shangye@mail.com"
] |
shangye@mail.com
|
a8a1af44b4ff29b22520121f30295c8ebe1d693f
|
554ec84f23825452f7692f91f742bdc81fa50e84
|
/chatbot_27549/urls.py
|
7d1264887b9b6eb6dad7fc662d8571cc66eddd66
|
[] |
no_license
|
crowdbotics-apps/chatbot-27549
|
a7806af210b6e7ccdfb3db3dbaaac9e9dcb5a5af
|
0e615cbb191a8d91e2874e7329b059193a8ad625
|
refs/heads/master
| 2023-05-26T13:30:53.116812
| 2021-05-29T07:24:50
| 2021-05-29T07:24:50
| 371,908,087
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,022
|
py
|
"""chatbot_27549 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Chatbot"
admin.site.site_title = "Chatbot Admin Portal"
admin.site.index_title = "Chatbot Admin"
# swagger
api_info = openapi.Info(
title="Chatbot API",
default_version="v1",
description="API documentation for Chatbot App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
faccd9f59e65cffd749ab558cda959576930e26c
|
0ea5d0f75e7cb9f8a7fd213e2eb4f29a339ea285
|
/wnfportal_python/wnfportal_dm_konten.py
|
a209429b9d0e3bab64b37d89220a66b699158bda
|
[] |
no_license
|
wnf58/wnfportal
|
1d5d7ba8e5b63b69feb016e57fc2ee7efccc8400
|
2d593fdd9266f44d60297f7f96b6b4a2c4c7ea98
|
refs/heads/master
| 2022-12-17T03:08:06.001400
| 2020-09-26T11:51:59
| 2020-09-26T11:51:59
| 115,601,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33,164
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
import fdb
import os
import configparser
import wnfportal_dm_datenbank
import wnfportal_tools as T
import time
from reportlab.graphics.shapes import Drawing
from reportlab.graphics.charts.barcharts import VerticalBarChart
from reportlab.graphics.charts.piecharts import Pie
from reportlab.graphics import renderPM
class dmKonten(wnfportal_dm_datenbank.dmDatenbank):
def __init__(self):
wnfportal_dm_datenbank.dmDatenbank.__init__(self)
self.setIniDatei('wnfKuB.ini')
# self.setIniDatei('wnfKITAOffice.ini')
def summeAlleKonten(self):
aSQL = """
SELECT SUM(BETRAG) FROM KO_KUBEA
"""
return self.sqlSumme(aSQL)
def summeProjekt(self, aProjekt_ID):
aSQL = "SELECT SUM(BETRAG) FROM KO_KUBEA WHERE PROJEKT_ID=%s" % (aProjekt_ID)
print(aSQL)
return self.sqlSumme(aSQL)
def summeProjektWintergarten(self):
aProjekt_ID = self.getProjekt_ID_Wintergarten_2017()
return self.summeProjekt(aProjekt_ID)
def listeAlleKonten(self):
aSQL = """
SELECT E.KUB_ID,K.KURZ,SUM(E.BETRAG)
FROM KO_KUBEA E
LEFT JOIN KO_KUB K ON K.ID=E.KUB_ID
GROUP BY K.KURZ,E.KUB_ID
HAVING SUM(E.BETRAG)<>0
ORDER BY K.KURZ
"""
cur = self.sqlOpen(aSQL)
if (cur == None):
return []
konten = []
for row in cur:
k = {'konto_id': row[0], 'konto': row[1], 'saldo': T.sDM(row[2])}
# print k
konten.append(k)
return konten
def jsonAlleKonten(self):
j = {'summe': T.sDM(self.summeAlleKonten()), 'konten': self.listeAlleKonten()}
return j
def listeLetzteEA(self):
aSumme = 0
aSQL = """
SELECT E.ID,E.DATUM,E.KURZ,E.BETRAG
FROM KO_KUBEA E
WHERE E.DATUM >= %s
ORDER BY E.DATUM DESC,E.KURZ
"""
aSQL = aSQL % (T.wnfDateToSQL(T.wnfTagVorVor8Wochen()))
print(aSQL)
cur = self.sqlOpen(aSQL)
if (cur == None):
return aSumme, []
ea = []
for row in cur:
s = "%s | %s" % (row[1].strftime("%d.%m.%y"), row[2])
ttmmjj = "%s" % (row[1].strftime("%d.%m.%y"))
# print (s)
k = {'konto_ea_id': row[0],
'datum': str(row[1]),
'ttmmjj': ttmmjj,
'kurz': row[2],
'betrag': T.sDM(row[3]),
'datumkurz': s}
aSumme = aSumme + row[3]
print(k)
ea.append(k)
return aSumme, ea
def listeProjekt(self, aProjekt_ID):
print(aProjekt_ID)
aSumme = 0
aSQL = """
SELECT E.ID,E.DATUM,E.KURZ,E.BETRAG
FROM KO_KUBEA E
WHERE E.PROJEKT_ID = %d
ORDER BY E.DATUM DESC,E.KURZ
"""
aSQL = aSQL % (aProjekt_ID)
print(aSQL)
cur = self.sqlOpen(aSQL)
if (cur == None):
return aSumme, []
ea = []
for row in cur:
s = "%s | %s" % (row[1].strftime("%d.%m.%y"), row[2])
ttmmjj = "%s" % (row[1].strftime("%d.%m.%y"))
# print (s)
k = {'konto_ea_id': row[0],
'datum': str(row[1]),
'ttmmjj': ttmmjj,
'kurz': row[2],
'betrag': T.sDM(row[3]),
'datumkurz': s}
aSumme = aSumme + row[3]
print(k)
ea.append(k)
return aSumme, ea
def listeProjektK(self, aProjekt_ID):
print(aProjekt_ID)
aSumme = 0
aSQL = """
SELECT K.ID,MAX(E.DATUM),K.KURZ,SUM(E.BETRAG)
FROM KO_KUBEA E
LEFT JOIN KO_KUBKAT K ON K.ID=E.KAT_ID
WHERE E.PROJEKT_ID = %d
AND NOT E.KAT_ID IS NULL
GROUP BY K.ID,K.KURZ
ORDER BY 4,K.KURZ
"""
aSQL = aSQL % (aProjekt_ID)
print(aSQL)
cur = self.sqlOpen(aSQL)
if (cur == None):
return aSumme, []
ea = []
for row in cur:
s = "%s | %s" % (row[1].strftime("%d.%m.%y"), row[2])
ttmmjj = "%s" % (row[1].strftime("%d.%m.%y"))
# print (s)
k = {'konto_ea_id': row[0],
'datum': str(row[1]),
'ttmmjj': ttmmjj,
'kurz': row[2],
'betrag': T.sDM(row[3]),
'datumkurz': s}
aSumme = aSumme + row[3]
print(k)
ea.append(k)
return aSumme, ea
def jsonLetzteEA(self):
aSumme, ea = self.listeLetzteEA()
j = {'summe': T.sDM(aSumme), 'ea': ea}
return j
def jsonListEA(self):
aSQL = """
SELECT E.ID,E.DATUM,E.KURZ, E.BEZ, E.BETRAG
FROM KO_KUBEA E
WHERE E.DATUM >= %s
ORDER BY E.DATUM DESC,E.KURZ
"""
aSQL = aSQL % (T.wnfDateToSQL(T.wnfErsterTagVormonat()))
# print(aSQL)
cur = self.sqlOpen(aSQL)
if (cur == None):
return []
ea = []
for row in cur:
s = "%s | %s" % (row[1].strftime("%d.%m.%y"), row[2])
ttmmjj = "%s" % (row[1].strftime("%d.%m.%y"))
# print s
k = {'id': row[0],
'datum': str(row[1]),
'kurz': row[2],
'bez': row[3],
'betrag': str(row[4])}
# print(k)
ea.append(k)
# print(ea)
return ea
def jsonListEASkip(self, aFirst, aSkip):
aSQL = """
SELECT FIRST %s SKIP %s E.ID,E.DATUM,E.KURZ, E.BEZ, E.BETRAG
FROM KO_KUBEA E
ORDER BY E.DATUM DESC,E.KURZ
"""
aSQL = aSQL % (aFirst, aSkip)
# print(aSQL)
cur = self.sqlOpen(aSQL)
if (cur == None):
return []
ea = []
for row in cur:
s = "%s | %s" % (row[1].strftime("%d.%m.%y"), row[2])
ttmmjj = "%s" % (row[1].strftime("%d.%m.%y"))
# print s
k = {'id': row[0],
'datum': str(row[1]),
'kurz': row[2],
'bez': row[3],
'betrag': str(row[4])}
# print(k)
ea.append(k)
print(ea)
return ea
def jsonDetailEA(self, id):
aSQL = """
SELECT E.ID,E.DATUM,E.KURZ, E.BEZ, E.BETRAG
FROM KO_KUBEA E
WHERE E.ID = %s
ORDER BY E.DATUM DESC,E.KURZ
"""
aSQL = aSQL % (id)
# print(aSQL)
cur = self.sqlOpen(aSQL)
if (cur == None):
return []
for row in cur:
s = "%s | %s" % (row[1].strftime("%d.%m.%y"), row[2])
ttmmjj = "%s" % (row[1].strftime("%d.%m.%y"))
# print s
k = {'id': row[0],
'datum': str(row[1]),
'kurz': row[2],
'bez': row[3],
'betrag': str(row[4])}
print(k)
return k
def jsonListKonten(self):
aSQL = """
SELECT
K.ID,
MAX(E.DATUM),
K.KURZ,
SUM(E.BETRAG)
FROM KO_KUBEA E
LEFT JOIN KO_KUB K ON K.ID=E.KUB_ID
GROUP BY K.KURZ,K.ID
HAVING SUM(E.BETRAG)<>0
ORDER BY K.KURZ
"""
# print(aSQL)
cur = self.sqlOpen(aSQL)
if (cur == None):
return []
ea = []
for row in cur:
k = {'id': row[0],
'datum': str(row[1]),
'kurz': row[2],
'betrag': str(row[3])}
# print(k)
ea.append(k)
print(ea)
return ea
def jsonKontostandSumme(self):
aSQL = """
SELECT
SUM(E.BETRAG)
FROM KO_KUBEA E
"""
# print(aSQL)
cur = self.sqlOpen(aSQL)
if (cur == None):
return {}
for row in cur:
k = [{'summe': str(row[0])}]
print(k)
return k
def htmlLetzteEA(self):
aSumme, ea = self.listeLetzteEA()
s = ''
for l in ea:
datum = l['ttmmjj'] # .encode('utf-8')
kurz = l['kurz'] # .encode('utf-8')
betrag = l['betrag']
s = '%s <tr><td class=table-3c-spalte1>%s</td><td class=table-3c-spalte2>%s</td><td class=table-3c-spalte3>%s</td></tr>' % (
s, datum, kurz, betrag)
return ("<table>"
"<tr><th class=table-3c-spalte1>Datum</th><th class=table-3c-spalte2>Bezeichnung</th><th class=table-3c-spalte3>Betrag</th></tr>"
"%s"
"<tr><th class=table-3c-spalte1></th><th class=table-3c-spalte2>Summe</th><th class=table-3c-spalte3>%s</th></tr>"
"</table>") % (s, T.sDM(aSumme))
def htmlEAMonatlich(self):
aSumme, ea = self.analyseEAMonatlich()
s = ''
for l in ea:
kurz = l['kurz'] # .encode('utf-8')
betrag = l['betrag']
durchschnitt = l['durchschnitt']
s = '%s <tr><td class=table-3c-spalte1>%s</td><td class=table-3c-spalte3>%.2f</td><td class=table-3c-spalte3>%.2f</td></tr>' % (
s, kurz, betrag, durchschnitt)
return ("<table>"
"<tr><th class=table-3c-spalte1>Bezeichnung</th><th class=table-3c-spalte3>Betrag</th><th class=table-3c-spalte3>Durchschnitt</th></tr>"
"%s"
"<tr><th class=table-3c-spalte1></th><th class=table-3c-spalte2>Summe</th><th class=table-3c-spalte3>%s</th></tr>"
"</table>") % (s, T.sDM(aSumme))
def htmldiagrammVonBis(self, aVon, aBis, dn):
aSumme, aData, aLabels, aRecord = self.analyseAusgabenVonBis(aVon, aBis)
p = '/home/wnf/Entwicklung/PycharmProjects/wnfportal/wnfportal_python/www/img/'
self.diagrammKostenartVonBis(p, dn, aData, aLabels)
s = ''
for l in aRecord:
aLabel = l['ID']
kurz = l['kurz'] # .encode('utf-8')
betrag = l['sDM']
s = '%s <tr><td class=table-3c-spalte1>%s</td><td class=table-3c-spalte2>%s</td><td class=table-3c-spalte3>%s</td></tr>' % (
s, aLabel, kurz, betrag)
tabelle = ("<table>"
"<tr><th class=table-3c-spalte1>Kurz</th><th class=table-3c-spalte2>Bezeichnung</th><th class=table-3c-spalte3>Betrag</th></tr>"
"%s"
"<tr><th class=table-3c-spalte1></th><th class=table-3c-spalte2>Summe</th><th class=table-3c-spalte3>%s</th></tr>"
"</table>") % (s, T.sDM(aSumme))
return ('<img src="img/%s.png" alt="Diagramm"> %s' % (dn, tabelle))
def htmldiagrammLetzterMonat(self):
aVon = T.wnfDateToSQL(T.wnfErsterTagVormonat())
aBis = T.wnfDateToSQL(T.wnfLetzterTagVormonat())
dn = 'kreis_vormonat'
return self.htmldiagrammVonBis(aVon, aBis, dn)
def htmldiagrammLetzte12Monate(self):
aVon = T.wnfDateToSQL(T.wnfErsterVor12Monaten())
aBis = T.wnfDateToSQL(T.wnfHeute())
dn = 'kreis_12Monate'
return self.htmldiagrammVonBis(aVon, aBis, dn)
def htmldiagrammDieserMonat(self):
aVon = T.wnfDateToSQL(T.wnfErsterDieserMonat())
aBis = T.wnfDateToSQL(T.wnfLetzterDieserMonat())
dn = 'kreis_diesermonat'
return self.htmldiagrammVonBis(aVon, aBis, dn)
def csvKontoVerlauf(self, dn):
# Die Datei wird nur alle Minute neu geschrieben
if os.path.exists(dn):
if (time.time() - os.path.getmtime(dn) < 60):
return
os.remove(dn)
print(dn)
with open(dn, 'x') as out:
s = 'Datum,Kontostand'
out.write(s + '\n')
# alle Monate
aSQL = 'SELECT MIN(E.DATUM),MAX(E.DATUM) FROM KO_KUBEA E'
cur = self.sqlOpen(aSQL)
if (cur == None):
return
for row in cur:
aVon = row[0]
aBis = row[1]
while (aVon < aBis):
aVon = T.ersterNaechsterMonat(aVon)
# print(aVon)
aSQL = """
SELECT
SUM(E.BETRAG)
FROM KO_KUBEA E
WHERE E.DATUM < '%s'
""" % (aVon)
# print(aSQL)
cur = self.sqlOpen(aSQL)
if (cur == None):
return
for row in cur:
betrag = row[0]
s = aVon.strftime("%Y/%m/%d")
s = "%s,%s" % (s, betrag)
print(s)
with open(dn, 'a') as out:
out.write(s + '\n')
return
def listeAlleJahreEA(self):
aSumme = 0
aAnzJahre = 0
aSQL = """
SELECT
EXTRACT(YEAR FROM E.DATUM) AS JAHR,
SUM(E.BETRAG),
SUM(CASE WHEN E.BETRAG > 0 THEN E.BETRAG END) AS Einnahme,
SUM(CASE WHEN E.BETRAG < 0 THEN E.BETRAG END) AS Ausgabe
FROM KO_KUBEA E
WHERE E.IGNORIEREN = 0
GROUP BY EXTRACT(YEAR FROM E.DATUM)
ORDER BY 1 DESC
"""
# print aSQL
cur = self.sqlOpen(aSQL)
if (cur == None):
return aSumme, []
ea = []
for row in cur:
s = "%s | %20s" % (row[0], T.sDM(row[1]))
# print s
k = {'jahr': row[0],
'betrag': row[1],
'sDM': T.sDM(row[1]),
'sDME': T.sDM(row[2]),
'sDMA': T.sDM(row[3])
}
aSumme = aSumme + row[1]
aAnzJahre = aAnzJahre + 1
# print k
ea.append(k)
if aAnzJahre > 0:
aSumme = aSumme / aAnzJahre
return aAnzJahre, aSumme, ea
def listeKostenartVonBis(self, aVon, aBis):
aSQL = """
SELECT ABS(SUM(E.BETRAG)),K.KURZ,K.ID
FROM KO_KUBEA E
LEFT JOIN KO_KUBKST K ON K.ID=E.KST_ID
WHERE E.IGNORIEREN = 0
AND NOT E.KST_ID IS NULL
AND E.BETRAG < 0
AND E.DATUM BETWEEN %s AND %s
GROUP BY K.KURZ,K.ID
ORDER BY 2
"""
aSQL = aSQL % (aVon, aBis)
print(aSQL)
cur = self.sqlOpen(aSQL)
if (cur == None):
return [], []
ea = []
kst = []
aRecord = []
aSumme = 0
for row in cur:
# s = "%s | %20s" % (row[0], T.sDM(row[1]))
# print s
ea.append(round(row[0]))
aSumme = aSumme + row[0]
kst.append(row[1])
k = {'betrag': row[0],
'sDM': T.sDM(row[0]),
'kurz': row[1],
'ID': row[2]
}
# print(aSumme)
aRecord.append(k)
return aSumme, ea, kst, aRecord
def analyseAusgabenVonBis10Prozent(self, aKst_ID, aKst_Kurz, aVon, aBis, a10Prozent):
aSQL = """
SELECT SUM(ABS(E.BETRAG)),K.KURZ,K.ID
FROM KO_KUBEA E
LEFT JOIN KO_KUBKAT K ON K.ID=E.KAT_ID
WHERE E.IGNORIEREN = 0
AND E.KST_ID = %d
AND E.BETRAG < 0
AND E.DATUM BETWEEN %s AND %s
GROUP BY K.KURZ,K.ID
ORDER BY 2
"""
aSQL = aSQL % (aKst_ID, aVon, aBis)
print(aSQL)
cur = self.sqlOpen(aSQL)
if (cur == None):
return []
aRec = []
aRest = 0
for row in cur:
if (row[0] > a10Prozent):
x = {'betrag': row[0],
'sDM': T.sDM(row[0]),
'kurz': '%s - %s' % (aKst_Kurz, row[1]),
'ID': aKst_ID
}
aRec.append(x)
else:
aRest = aRest + row[0]
if aRest > 0:
x = {'betrag': aRest,
'sDM': T.sDM(aRest),
'kurz': '%s' % (aKst_Kurz),
'ID': aKst_ID
}
aRec.append(x)
return aRec
def analyseAusgabenVonBis(self, aVon, aBis):
"""
Alle EA bis 10 % zusammenfassen
"""
aSQL = """
SELECT ABS(SUM(E.BETRAG)),K.KURZ,K.ID
FROM KO_KUBEA E
LEFT JOIN KO_KUBKST K ON K.ID=E.KST_ID
WHERE E.IGNORIEREN = 0
AND NOT E.KST_ID IS NULL
AND E.BETRAG < 0
AND E.DATUM BETWEEN %s AND %s
GROUP BY K.KURZ,K.ID
ORDER BY 2
"""
aSQL = aSQL % (aVon, aBis)
print(aSQL)
cur = self.sqlOpen(aSQL)
if (cur == None):
return 0, [], [], []
ea = []
kst = []
aRecord = []
aRecKst = []
aSumme = 0
for row in cur:
# s = "%s | %20s" % (row[0], T.sDM(row[1]))
# print s
k = {'betrag': row[0],
'kurz': row[1],
'KST_ID': row[2]
}
aSumme = aSumme + row[0]
# print(aSumme)
aRecKst.append(k)
a10Prozent = aSumme / 20
print(aSumme, a10Prozent)
for k in aRecKst:
if (k['betrag'] < a10Prozent):
x = {'betrag': k['betrag'],
'sDM': T.sDM(k['betrag']),
'kurz': k['kurz'],
'ID': k['KST_ID']
}
aRecord.append(x)
else:
rx = self.analyseAusgabenVonBis10Prozent(k['KST_ID'], k['kurz'], aVon, aBis, a10Prozent)
for x in rx:
aRecord.append(x)
print(aRecord)
for x in aRecord:
ea.append(round(x['betrag']))
kst.append(x['kurz'])
print(kst)
print(ea)
self.closeConnection()
return aSumme, ea, kst, aRecord
def analyseEAMonatlich(self):
"""
Alle EA mit Monatlich <> 0 zusammenfassen
"""
aSQL = """
SELECT SUM(E.BETRAG),K.KURZ,K.ID,E.MONATLICH,COUNT(*)
FROM KO_KUBEA E
LEFT JOIN KO_KUBKST K ON K.ID=E.KST_ID
WHERE E.IGNORIEREN = 0
AND E.MONATLICH <> 0
AND NOT E.KST_ID IS NULL
GROUP BY K.KURZ,K.ID,E.MONATLICH
ORDER BY 2
"""
aSQL = """
SELECT ABS(SUM(E.BETRAG)),E.KURZ,E.MONATLICH,COUNT(*)
FROM KO_KUBEA E
WHERE E.IGNORIEREN = 0
AND E.MONATLICH <> 0
AND E.BETRAG<0
GROUP BY E.KURZ,E.MONATLICH
ORDER BY 1,2
"""
print(aSQL)
cur = self.sqlOpen(aSQL)
if (cur == None):
return 0, [], [], []
aRecord = []
aSumme = 0
for row in cur:
# s = "%s | %20s" % (row[0], T.sDM(row[1]))
# print s
aDurchschnitt = row[0] / row[2] / row[3]
k = {'betrag': row[0],
'kurz': row[1],
'monatlich': row[2],
'anzahl': row[3],
'durchschnitt': aDurchschnitt
}
aSumme = aSumme + aDurchschnitt
# print(aSumme, k)
aRecord.append(k)
return aSumme, aRecord
def listeAlleMonateEA(self):
aSumme = 0
aAnzMonate = 0
aSQL = """
SELECT
EXTRACT(YEAR FROM E.DATUM) AS JAHR,
EXTRACT(MONTH FROM E.DATUM) AS MONAT,
SUM(E.BETRAG),
SUM(CASE WHEN E.BETRAG > 0 THEN E.BETRAG END) AS Einnahme,
SUM(CASE WHEN E.BETRAG < 0 THEN E.BETRAG END) AS Ausgabe
FROM KO_KUBEA E
WHERE E.IGNORIEREN = 0
GROUP BY EXTRACT(YEAR FROM E.DATUM),EXTRACT(MONTH FROM E.DATUM)
ORDER BY 1 DESC,2 DESC
"""
# print(aSQL)
cur = self.sqlOpen(aSQL)
if (cur == None):
return aAnzMonate, aSumme, []
ea = []
for row in cur:
s = "%s/%s | %20s" % (row[0],row[1], T.sDM(row[2]))
# print(s)
k = {'jahr': row[0],
'monat': row[1],
'betrag': row[2],
'sDM': T.sDM(row[2]),
'sDME': T.sDM(row[3]),
'sDMA': T.sDM(row[4])
}
aSumme = aSumme + row[1]
aAnzMonate = aAnzMonate + 1
# print(k)
ea.append(k)
if aAnzMonate > 0:
aSumme = aSumme / aAnzMonate
return aAnzMonate, aSumme, ea
def openKontoverlauf(self):
aSQL = """
SELECT
EXTRACT(YEAR FROM E.DATUM) AS JAHR,
EXTRACT(MONTH FROM E.DATUM) AS MONAT,
SUM(E.BETRAG)
FROM KO_KUBEA E
WHERE E.IGNORIEREN = 0
GROUP BY EXTRACT(YEAR FROM E.DATUM),EXTRACT(MONTH FROM E.DATUM)
ORDER BY 1,2
"""
# print(aSQL)
return self.sqlOpen(aSQL)
def openAlleMonateEinkommen(self):
aSQL = """
SELECT
FIRST 240
EXTRACT(YEAR FROM E.DATUM) AS JAHR,
EXTRACT(MONTH FROM E.DATUM) AS MONAT,
SUM(E.BETRAG),
SUM(CASE WHEN E.KAT_ID = 22 THEN E.BETRAG END) AS Uwe,
SUM(CASE WHEN E.KAT_ID = 24 THEN E.BETRAG END) AS Sabine
FROM KO_KUBEA E
WHERE E.IGNORIEREN = 0
AND E.BETRAG > 1000
AND E.KST_ID = 11
AND E.KAT_ID IN (22,24)
GROUP BY EXTRACT(YEAR FROM E.DATUM),EXTRACT(MONTH FROM E.DATUM)
ORDER BY 1 DESC,2 DESC
"""
# print(aSQL)
return self.sqlOpen(aSQL)
def openAlleJahreEinkommen(self):
aSQL = """
SELECT
EXTRACT(YEAR FROM E.DATUM) AS JAHR,
SUM(E.BETRAG),
SUM(CASE WHEN E.KAT_ID = 22 THEN E.BETRAG END) AS Uwe,
SUM(CASE WHEN E.KAT_ID = 24 THEN E.BETRAG END) AS Sabine
FROM KO_KUBEA E
WHERE E.IGNORIEREN = 0
AND E.KST_ID = 11
AND E.KAT_ID IN (22,24)
GROUP BY EXTRACT(YEAR FROM E.DATUM)
ORDER BY 1 DESC,2 DESC
"""
# print(aSQL)
return self.sqlOpen(aSQL)
def chartjsKontoverlauf(self):
aLabels = ''
aDaten = ''
# alle Monate
aSQL = 'SELECT MIN(E.DATUM),MAX(E.DATUM) FROM KO_KUBEA E'
cur = self.sqlOpen(aSQL)
if (cur == None):
return aLabels, aDaten
for row in cur:
aVon = row[0]
aBis = row[1]
while (aVon < aBis):
aVon = T.ersterNaechsterMonat(aVon)
# print(aVon)
aSQL = """
SELECT
SUM(E.BETRAG)
FROM KO_KUBEA E
WHERE E.DATUM < '%s'
""" % (aVon)
# print(aSQL)
cur = self.sqlOpen(aSQL)
if (cur == None):
return aLabels, aDaten
for row in cur:
betrag = row[0]
s = aVon.strftime("%m/%Y")
print(s)
if aLabels != '':
aLabels = aLabels + ', '
aLabels = ("%s'%s'") % (aLabels, s)
if aDaten != '':
aDaten = aDaten + ', '
aDaten = ("%s %s") % (aDaten, betrag)
return aLabels, aDaten
def chartjsAlleMonateEinkommen(self):
"""
aLabels = "'Jan', 'Feb', 'Mar'"
aEKU = " 1000 , 1500 , 2000"
aEKS = " 4000 , 5000 , 6000"
"""
aLabels = ''
aEKU = ''
aEKS = ''
cur = self.openAlleMonateEinkommen()
if (cur == None):
return aLabels, aEKU, aEKS
for row in cur:
if aLabels != '':
aLabels = ', ' + aLabels
aLabels = ("'%s/%s'%s") % (row[1], row[0], aLabels)
if aEKU != '':
aEKU = ', ' + aEKU
aEKU = ("%s %s") % (row[3], aEKU)
if aEKS != '':
aEKS = ', ' + aEKS
aEKS = ("%s %s") % (row[4], aEKS)
return aLabels, aEKU, aEKS
def chartjsAlleJahreEinkommen(self):
"""
aLabels = "'Jan', 'Feb', 'Mar'"
aEKU = " 1000 , 1500 , 2000"
aEKS = " 4000 , 5000 , 6000"
"""
aLabels = ''
aEKU = ''
aEKS = ''
cur = self.openAlleJahreEinkommen()
if (cur == None):
return aLabels, aEKU, aEKS
for row in cur:
if aLabels != '':
aLabels = ', ' + aLabels
aLabels = ("'%s'%s") % (row[0], aLabels)
if aEKU != '':
aEKU = ', ' + aEKU
aEKU = ("%s %s") % (row[2], aEKU)
if aEKS != '':
aEKS = ', ' + aEKS
aEKS = ("%s %s") % (row[3], aEKS)
return aLabels, aEKU, aEKS
def listeAlleMonateEinkommen(self):
aSumme = 0
aAnzMonate = 0
cur = self.openAlleMonateEinkommen()
if (cur == None):
return aAnzMonate, aSumme, []
ea = []
for row in cur:
s = "%s | %20s" % (row[0], T.sDM(row[1]))
# print s
k = {'jahr': row[0],
'monat': row[1],
'betrag': row[2],
'sDMG': T.sDM(row[2]),
'sDMU': T.sDM(row[3]),
'sDMS': T.sDM(row[4])
}
aSumme = aSumme + row[2]
aAnzMonate = aAnzMonate + 1
# print k
ea.append(k)
if aAnzMonate > 0:
aSumme = aSumme / aAnzMonate
return aAnzMonate, aSumme, ea
def diagrammKostenartVonBis(self, aPfad, aDateiname, aData, aLabels):
d = Drawing(800, 800)
pie = Pie()
pie.x = 360
pie.y = 360
pie.xradius = 300
pie.yradius = 300
pie.data = aData
pie.labels = aLabels
pie.slices.strokeWidth = 0.5
# pie.slices[3].popout = 20
d.add(pie)
d.save(formats=['png'], outDir=aPfad, fnRoot=aDateiname)
def diagrammAlleJahreEA(self, aPngDateiname):
# Festlegen der Gesamtgröße in Pixel
d = Drawing(800, 600)
# Anlegen des Diagramms
diagramm = VerticalBarChart()
# Positionierung und Größe des Diagramms
diagramm.x = 50
diagramm.y = 50
diagramm.width = 700
diagramm.height = 500
# Holen der Daten
daten = []
jahre = []
aAnzJahre, aSumme, ea = self.listeAlleJahreEA()
print(ea)
for x in ea:
print
x['betrag'], x['jahr']
daten.append(float(x['betrag']))
jahre.append(str(x['jahr']))
ymin = min(daten)
ymax = max(daten)
# Daten für das Diagramm müssen als Liste von Tupeln vorliegen
daten = [tuple(daten)]
print(daten)
print(jahre)
# return False
# Hinzufügen der Daten
diagramm.data = daten
# Y-Achse (in ReportLab „valueAxis“) formatieren
diagramm.valueAxis.valueMin = ymin
diagramm.valueAxis.valueMax = ymax
diagramm.valueAxis.valueStep = 2000
# X-Achse (in ReportLab „categoryAxis“) formatieren
diagramm.categoryAxis.categoryNames = jahre
# Diagramm zeichnen
d.add(diagramm)
# ... und speichernhttp://www.reportlab.com/software/opensource/rl-toolkit/guide/
print(aPngDateiname)
renderPM.drawToFile(d, aPngDateiname, 'PNG')
def diagrammAlleMonateEinkommen(self, aPngDateiname):
# Festlegen der Gesamtgröße in Pixel
d = Drawing(800, 600)
# Anlegen des Diagramms
diagramm = VerticalBarChart()
# Positionierung und Größe des Diagramms
diagramm.x = 50
diagramm.y = 50
diagramm.width = 700
diagramm.height = 500
# Holen der Daten
daten = []
jahre = []
aAnzJahre, aSumme, ea = self.listeAlleMonateEinkommen()
print(ea)
for x in ea:
# print (x)
x['betrag'], x['jahr']
daten.append(float(x['betrag']))
jahre.append(str(x['jahr']))
ymin = min(daten)
ymax = max(daten)
# Daten für das Diagramm müssen als Liste von Tupeln vorliegen
daten = [tuple(daten)]
print(daten)
print(jahre)
# return False
# Hinzufügen der Daten
diagramm.data = daten
# Y-Achse (in ReportLab „valueAxis“) formatieren
diagramm.valueAxis.valueMin = ymin
diagramm.valueAxis.valueMax = ymax
diagramm.valueAxis.valueStep = 2000
# X-Achse (in ReportLab „categoryAxis“) formatieren
diagramm.categoryAxis.categoryNames = jahre
# Diagramm zeichnen
d.add(diagramm)
# ... und speichernhttp://www.reportlab.com/software/opensource/rl-toolkit/guide/
print(aPngDateiname)
renderPM.drawToFile(d, aPngDateiname, 'PNG')
def jsonAlleJahreEA(self):
aSumme, ea = self.listeAlleJahreEA()
j = {'summe': T.sDM(aSumme), 'ea': ea}
return j
def htmlAlleJahreEA(self):
aAnzahl, aSumme, ea = self.listeAlleJahreEA()
s = ''
for l in ea:
jahr = l['jahr']
# print type(konto),konto
betrag = l['betrag']
sSaldo = l['sDM']
sDME = l['sDME']
sDMA = l['sDMA']
if (betrag < 0):
aKlasse = 'class=table-right-currency-red'
else:
aKlasse = 'class=table-right-currency'
# print type(konto),konto
s = '%s <tr><td class=table-left>%s</td><td class=table-right-currency>%s</td><td class=table-right-currency>%s</td><td %s>%s</td></tr>' % (
s, jahr, sDME, sDMA, aKlasse, sSaldo)
return ("<table>"
"<tr><th class=table-left>Jahr</th><th class=table-right-currency>Einnahmen</th><th class=table-right-currency>Ausgaben</th><th class=table-right-currency>Saldo</th></tr>"
"%s"
"<tr><th class=table-left>Durchschnitt für %d Jahre</th><th class=table-right-currency></th><th class=table-right-currency></th><th class=table-right-currency>%s</th></tr>"
"</table>") % (s, aAnzahl, T.sDM(aSumme))
return ("<table>"
"<tr><th class=table-left>Jahr</th><th class=table-right-currency>Saldo</th></tr>"
"%s"
"<tr><th class=table-left>Durchschnitt</th><th class=table-right-currency>%s</th></tr>"
"</table>") % (s, T.sDM(aSumme))
def htmlAlleMonateEA(self):
aAnzahl, aSumme, ea = self.listeAlleMonateEA()
# print(type(ea))
s = ''
for l in ea:
# print(l)
monat = "%2d/%d" % (l['monat'], l['jahr'])
betrag = l['betrag']
sSaldo = l['sDM']
sDME = l['sDME']
sDMA = l['sDMA']
if (betrag < 0):
aKlasse = 'class=table-right-currency-red'
else:
aKlasse = 'class=table-right-currency'
# print type(konto),konto
sDME = '<a href="monatea/%d/%d/">%s</a>' % (l['jahr'], l['monat'], sDME)
s = '%s <tr>' \
'<td class=table-left>%s</td>' \
'<td %s>%s</td>' \
'<td class=table-right-currency>%s</td>' \
'<td class=table-right-currency>%s</td>' \
'</tr>' \
% (s, monat, aKlasse, sSaldo, sDME, sDMA)
# print(s)
return ("<table>"
"<tr><th class=table-left>Monat</th><th class=table-right-currency>Saldo</th><th class=table-right-currency>Einnahmen</th><th class=table-right-currency>Ausgaben</th></tr>"
"%s"
"<tr><th class=table-left>Durchschnitt für %d Monate</th><th class=table-right-currency></th><th class=table-right-currency></th><th class=table-right-currency>%s</th></tr>"
"</table>") % (s, aAnzahl, T.sDM(aSumme))
def htmlAlleMonateEinkommen(self):
aAnzahl, aSumme, ea = self.listeAlleMonateEinkommen()
s = ''
for l in ea:
monat = "%2d/%d" % (l['monat'], l['jahr'])
betrag = l['betrag']
sDMG = l['sDMG']
sDMU = l['sDMU']
sDMS = l['sDMS']
if (betrag < 0):
aKlasse = 'class=table-right-currency-red'
else:
aKlasse = 'class=table-right-currency'
# print type(konto),konto
s = '%s <tr><td class=table-left>%s</td><td class=table-right-currency>%s</td><td class=table-right-currency>%s</td><td %s>%s</td></tr>' % (
s, monat, sDMU, sDMS, aKlasse, sDMG)
return ("<table>"
"<tr><th class=table-left>Monat</th><th class=table-right-currency>Uwe</th><th class=table-right-currency>Sabine</th><th class=table-right-currency>Gesamt</th></tr>"
"%s"
"<tr><th class=table-left>Durchschnitt für %d Monate</th><th class=table-right-currency></th><th class=table-right-currency></th><th class=table-right-currency>%s</th></tr>"
"</table>") % (s, aAnzahl, T.sDM(aSumme))
def getProjekt_ID(self, aKurz):
aSQL = "SELECT MAX(ID) FROM KO_KUBPROJEKT P WHERE P.KURZ='%s'"
aSQL = aSQL % (aKurz)
print(aSQL)
cur = self.sqlOpen(aSQL)
if (cur == None):
return 0
else:
for row in cur:
if (row[0]):
return row[0]
else:
return 0
def getProjekt_ID_Wintergarten_2017(self):
return self.getProjekt_ID('Wintergarten 2017')
def htmlProjekt(self, aProjekt_ID):
aSumme, ea = self.listeProjekt(aProjekt_ID)
s = ''
for l in ea:
datum = l['ttmmjj'] # .encode('utf-8')
kurz = l['kurz'] # .encode('utf-8')
betrag = l['betrag']
s = '%s <tr><td class=table-3c-spalte1>%s</td><td class=table-3c-spalte2>%s</td><td class=table-3c-spalte3>%s</td></tr>' % (
s, datum, kurz, betrag)
return ("<table>"
"<tr><th class=table-3c-spalte1>Datum</th><th class=table-3c-spalte2>Bezeichnung</th><th class=table-3c-spalte3>Betrag</th></tr>"
"%s"
"<tr><th class=table-3c-spalte1></th><th class=table-3c-spalte2>Summe</th><th class=table-3c-spalte3>%s</th></tr>"
"</table>") % (s, T.sDM(aSumme))
def htmlProjektK(self, aProjekt_ID):
aSumme, ea = self.listeProjektK(aProjekt_ID)
s = ''
for l in ea:
datum = l['ttmmjj'] # .encode('utf-8')
kurz = l['kurz'] # .encode('utf-8')
betrag = l['betrag']
s = '%s <tr><td class=table-3c-spalte1>%s</td><td class=table-3c-spalte2>%s</td><td class=table-3c-spalte3>%s</td></tr>' % (
s, datum, kurz, betrag)
return ("<table>"
"<tr><th class=table-3c-spalte1>Datum</th><th class=table-3c-spalte2>Bezeichnung</th><th class=table-3c-spalte3>Betrag</th></tr>"
"%s"
"<tr><th class=table-3c-spalte1></th><th class=table-3c-spalte2>Summe</th><th class=table-3c-spalte3>%s</th></tr>"
"</table>") % (s, T.sDM(aSumme))
def htmlProjektWintergarten2017(self):
aProjekt_ID = self.getProjekt_ID_Wintergarten_2017()
return self.htmlProjekt(aProjekt_ID)
def htmlProjektWintergarten2017K(self):
aProjekt_ID = self.getProjekt_ID_Wintergarten_2017()
return self.htmlProjektK(aProjekt_ID)
def main():
k = dmKonten()
# k.analyseEAMonatlich()
print(k.htmlAlleMonateEA())
# print(k.chartjsAlleMonateEinkommen())
# print(k.chartjsKontoverlauf())
# print k.summeAlleKonten()
# print k.listeAlleKonten()
# print (k.listeAlleMonateEinkommen())
# print k.listeAlleJahreEA()
# print k.listeProjektK(1)
# print k.jsonAlleKonten()
# print k.jsonLetzteEA()
# print k.jsonAlleJahreEA()
# print(k.htmlProjektWintergarten2017())
# print(k.htmldiagrammLetzterMonat())
# print(k.htmldiagrammDieserMonat())
# k.csvKontoVerlauf('/home/wnf/Entwicklung/PycharmProjects/wnfportal/wnfportal_python/www/daten/kontoverlauf.csv')
# k.analyseAusgabenVonBis(
# T.wnfDateToSQL(T.wnfErsterTagVormonat()),
# T.wnfDateToSQL(T.wnfLetzterTagVormonat()))
# k.diagrammKostenartVonBis('/wnfdaten/wnfpython/wnfportal/trunk/src/wnfportal/m/diagramme/', 'kreis_2018_09',
# '01.09.2018', '30.09.2018')
# k.diagrammAlleJahreEA('/wnfdaten/wnfpython/wnfportal/trunk/src/wnfportal/m/diagramme/diagramm_alle_jahre.png')
# k.diagrammAlleJahreEinkommen(
# '/wnfdaten/wnfpython/wnfportal/trunk/src/wnfportal/m/diagramme/diagramm_alle_jahre_ek.png')
return 0
if __name__ == '__main__':
main()
|
[
"dev@wlsoft.de"
] |
dev@wlsoft.de
|
52361d8d4a6f74b1bd33bc572eee6582ff87237f
|
92a34017b2c604e3e1d5af6347f98cd05e8d0cb7
|
/orders/urls.py
|
3fbadc5750edf20f37089f51c4735544a62a7205
|
[] |
no_license
|
dandani-cs/swishtest
|
76b7c2e04193c749bef1b31e24fdd53a61707341
|
983e07930265690918d9d60b7ca972c2547d95bf
|
refs/heads/master
| 2023-02-18T03:25:23.652843
| 2020-12-12T12:54:15
| 2020-12-12T12:54:15
| 316,511,893
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
from django.urls import path
from .views import OrderListView, OrderCreateView
urlpatterns = [
path("", OrderListView.as_view(), name="order_view"),
path("new", OrderCreateView.as_view(), name="order_new")
]
|
[
"dani.cstech@gmail.com"
] |
dani.cstech@gmail.com
|
e9645d989c183c9dbac600d419ff4e6142b1a50e
|
0c6fb8fcc31eb658561ee7e244e8f0f7c8d72d6e
|
/build/ros_arduino_firmware/catkin_generated/pkg.installspace.context.pc.py
|
499a2c796c3c70019a20c66822f8a3cea544edbd
|
[] |
no_license
|
tutlebotmitica/mybot_ws
|
f71fac8e41a061d132694eb4f24f8375fd5eba8d
|
4bb2003069709a29f0868c5fc404da01cddd0af3
|
refs/heads/master
| 2021-08-18T21:41:13.355546
| 2017-11-24T00:43:04
| 2017-11-24T00:43:04
| 111,839,160
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ros_arduino_firmware"
PROJECT_SPACE_DIR = "/home/turtlebot/mybot_ws/install"
PROJECT_VERSION = "0.2.0"
|
[
"paul@modulab.ro"
] |
paul@modulab.ro
|
2cdced4eecc20d7bb193be9283df32bad706371b
|
841af9f674d910f1135b65a1e46f4a422d13a6db
|
/test_only_modified.py
|
6626cf6ccb89b554c250e63db1192603bc0e1db2
|
[] |
no_license
|
betterteam/InterSec_simulation
|
07e95215abbab4722226355261b9909c4a8d8308
|
bfccec4e58b0fe79313993cb6cad33ce53a7f672
|
refs/heads/master
| 2021-01-15T22:18:47.528359
| 2018-01-10T07:57:19
| 2018-01-10T07:57:19
| 99,894,685
| 3
| 0
| null | 2017-12-15T15:39:54
| 2017-08-10T07:30:28
|
Python
|
UTF-8
|
Python
| false
| false
| 23,851
|
py
|
# Final version of traffic signal
import sys
import numpy as np
from PyQt5.QtWidgets import QWidget, QApplication, QMainWindow, QFrame, QDesktopWidget
from PyQt5.QtGui import QPainter, QColor, QPen, QBrush
from PyQt5.QtCore import Qt, QTimer, QTime
import math
class Position:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
class Speed:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
class Size:
def __init__(self, x=5, y=10):
self.x = x
self.y = y
class Vehicle:
def __init__(self):
self._position = Position()
self._speed = Speed()
self._size = Size()
def setPosition(self, position):
self._position = position
def getPosition(self):
return self._position
def setSpeed(self, speed):
self._speed = speed
def getSpeed(self):
return self._speed
def setSize(self, size):
self._size = size
def getSize(self):
return self._size
def moveNext(self):
self._position.x += self._speed.x
self._position.y += self._speed.y
if self._position.x > 600:
self._position.x = 0
class Example(QWidget):
def __init__(self, vehicles_N, vehicles_W, vehicles_E):
super().__init__()
self.vehicles_N = vehicles_N
self.vehicles_W = vehicles_W
self.vehicles_E = vehicles_E
self.initUI()
self.timer = QTimer(self)
self.timer.timeout.connect(self.update)
self.timer.start(1000/60)#一秒間隔で更新
self.t = QTime()
self.t.start()
self.show()
def initUI(self):
self.setGeometry(300, 300, 600, 600)
self.setWindowTitle("Koku's Simulation")
self.ti = 0
self.beze_t = []
self.r = []
self.up_left_x = []
self.up_left_y = []
self.down_left_x = []
self.down_left_y = []
self.up_right_x = []
self.up_right_y = []
self.down_right_x = []
self.down_right_y = []
for i in range(10):
self.beze_t.append(0)
self.r.append(0)
self.up_left_x.append(0)
self.up_left_y.append(0)
self.down_left_x.append(0)
self.down_left_y.append(0)
self.up_right_x.append(0)
self.up_right_y.append(0)
self.down_right_x.append(0)
self.down_right_y.append(0)
self.single_0_0 = True
self.single_0_1 = True
self.collision_check = []
self.collision_check_N = []
self.collision_check_S = []
self.collision_check_W = []
self.collision_check_E = []
self.grid = {}
for i in range(270, 330, 10):
for j in range(270, 330, 10):
self.grid[(i, j)] = True
def paintEvent(self, e):
#print("!")
qp = QPainter(self)
self.drawLines(qp)
self.drawSignals_0(qp)
self.drawVehicles(qp)
def drawLines(self, qp):
# print(self.t.elapsed())
pen = QPen(Qt.black, 2, Qt.SolidLine)
pen_dash = QPen(Qt.black, 2, Qt.DotLine)
# Vertical
qp.setPen(pen)
qp.drawLine(270, 0, 270, 600)
# with grids ##################
# qp.drawLine(280, 0, 280, 600)
# qp.drawLine(290, 0, 290, 600)
# qp.drawLine(300, 0, 300, 600)
# qp.drawLine(310, 0, 310, 600)
# qp.drawLine(320, 0, 320, 600)
# with grids ##################
qp.drawLine(330, 0, 330, 600)
qp.drawLine(300, 0, 300, 270)
qp.drawLine(300, 330, 300, 600)
qp.setPen(pen_dash)
qp.drawLine(280, 330, 280, 600)
qp.drawLine(290, 330, 290, 600)
qp.drawLine(310, 330, 310, 600)
qp.drawLine(320, 330, 320, 600)
qp.drawLine(280, 0, 280, 270)
qp.drawLine(290, 0, 290, 270)
qp.drawLine(310, 0, 310, 270)
qp.drawLine(320, 0, 320, 270)
# Tropical
qp.setPen(pen)
qp.drawLine(0, 270, 600, 270)
# with grids ##################
# qp.drawLine(0, 280, 600, 280)
# qp.drawLine(0, 290, 600, 290)
# qp.drawLine(0, 300, 600, 300)
# qp.drawLine(0, 310, 600, 310)
# qp.drawLine(0, 320, 600, 320)
# with grids ##################
qp.drawLine(0, 330, 600, 330)
qp.drawLine(0, 300, 270, 300)
qp.drawLine(330, 300, 600, 300)
qp.setPen(pen_dash)
qp.drawLine(0, 280, 270, 280)
qp.drawLine(0, 290, 270, 290)
qp.drawLine(0, 310, 270, 310)
qp.drawLine(0, 320, 270, 320)
qp.drawLine(330, 280, 600, 280)
qp.drawLine(330, 290, 600, 290)
qp.drawLine(330, 310, 600, 310)
qp.drawLine(330, 320, 600, 320)
def drawSignals_0(self, qp):
#print(self.t.elapsed())
if 1000 < self.t.elapsed() < 2000:
qp.setPen(Qt.black)
qp.setBrush(Qt.red)
qp.drawEllipse(272, 262, 6, 6)
qp.drawEllipse(282, 262, 6, 6)
qp.drawEllipse(292, 262, 6, 6)
qp.setBrush(Qt.green)
qp.drawEllipse(332, 272, 6, 6)
qp.drawEllipse(332, 282, 6, 6)
qp.drawEllipse(332, 292, 6, 6)
qp.setBrush(Qt.red)
qp.drawEllipse(302, 332, 6, 6)
qp.drawEllipse(312, 332, 6, 6)
qp.drawEllipse(322, 332, 6, 6)
qp.setBrush(Qt.green)
qp.drawEllipse(262, 302, 6, 6)
qp.drawEllipse(262, 312, 6, 6)
qp.drawEllipse(262, 322, 6, 6)
self.single_0_0 = False
self.single_0_1 = True
else:
qp.setPen(Qt.black)
qp.setBrush(Qt.green)
qp.drawEllipse(272, 262, 6, 6)
qp.drawEllipse(282, 262, 6, 6)
qp.drawEllipse(292, 262, 6, 6)
qp.setBrush(Qt.red)
qp.drawEllipse(332, 272, 6, 6)
qp.drawEllipse(332, 282, 6, 6)
qp.drawEllipse(332, 292, 6, 6)
qp.setBrush(Qt.green)
qp.drawEllipse(302, 332, 6, 6)
qp.drawEllipse(312, 332, 6, 6)
qp.drawEllipse(322, 332, 6, 6)
qp.setBrush(Qt.red)
qp.drawEllipse(262, 302, 6, 6)
qp.drawEllipse(262, 312, 6, 6)
qp.drawEllipse(262, 322, 6, 6)
self.single_0_0 = True
self.single_0_1 = False
def coordinate_up_left_x(self, po_x, r):
return po_x - 5 * math.cos(math.radians(r))
def coordinate_up_left_y(self, po_y):
return po_y
def coordinate_up_right_x(self, po_x, r):
return po_x + 10 * math.cos(math.radians(r))
def coordinate_up_right_y(self, po_y):
return po_y
def coordinate_down_left_x(self, po_x, r):
return po_x - 5 * math.cos(math.radians(r))
def coordinate_down_left_y(self, po_y, r):
return po_y + 5 * math.sin(math.radians(r)) + 10 * math.cos(math.radians(r))
def coordinate_down_right_x(self, po_x, r):
return po_x + 10 * math.cos(math.radians(r))
def coordinate_down_right_y(self, po_y, r):
return po_y + 10 * math.sin(math.radians(r)) + 5 * math.cos(math.radians(r))
def drawVehicles(self, qp):
qp.setPen(Qt.black)
qp.setBrush(Qt.green)
# # Vehicles from North
for i, veh in enumerate(vehicles_N):
if (veh.getPosition().x + veh.getSpeed().x, veh.getPosition().y + veh.getSpeed().y) in self.collision_check_N:
qp.drawRect(veh.getPosition().x, veh.getPosition().y, veh.getSize().x, veh.getSize().y)
for i in range(11):
self.collision_check_N.append((veh.getPosition().x, veh.getPosition().y - i))
else:
if veh.getPosition().y + veh.getSpeed().y > 260 and veh.getPosition().y <= 260:
if self.single_0_1:
qp.drawRect(veh.getPosition().x, veh.getPosition().y, veh.getSize().x, veh.getSize().y)
for i in range(11):
self.collision_check_N.append((veh.getPosition().x, veh.getPosition().y - i))
else:
if veh.getPosition().y <= 270:
if self.grid[((veh.getPosition().x + veh.getSpeed().x) // 10 * 10,
(veh.getPosition().y + veh.getSpeed().y + veh.getSize().y) // 10 * 10)] and \
self.grid[((veh.getPosition().x + veh.getSpeed().x + veh.getSize().x) // 10 * 10,
(veh.getPosition().y + veh.getSpeed().y + veh.getSize().y) // 10 * 10)]:
veh.getPosition().y += veh.getSpeed().y
qp.drawRect(veh.getPosition().x, veh.getPosition().y, 5, 10)
for i in range(11):
self.collision_check_N.append((veh.getPosition().x, veh.getPosition().y - i))
self.grid[(veh.getPosition().x // 10 * 10, (veh.getPosition().y + veh.getSize().y) // 10 * 10)] = False
self.grid[((veh.getPosition().x + veh.getSize().x) // 10 * 10, (veh.getPosition().y + veh.getSize().y) // 10 * 10)] = False
else:
try:
if self.grid[((veh.getPosition().x + veh.getSpeed().x) // 10 * 10,
(veh.getPosition().y + veh.getSpeed().y) // 10 * 10)] and \
self.grid[((veh.getPosition().x + veh.getSpeed().x + veh.getSize().x) // 10 * 10,
(veh.getPosition().y + veh.getSpeed().y) // 10 * 10)] and \
self.grid[((veh.getPosition().x + veh.getSpeed().x) // 10 * 10,
(veh.getPosition().y + veh.getSpeed().y + veh.getSize().y) // 10 * 10)] and \
self.grid[((veh.getPosition().x + veh.getSpeed().x + veh.getSize().x) // 10 * 10,
(veh.getPosition().y + veh.getSpeed().y + veh.getSize().y) // 10 * 10)]:
self.vehicles_N[i].getPosition().y += veh.getSpeed().y
self.grid[((veh.getPosition().x + veh.getSpeed().x) // 10 * 10,
(veh.getPosition().y + veh.getSpeed().y) // 10 * 10)] = False
self.grid[((veh.getPosition().x + veh.getSpeed().x + veh.getSize().x) // 10 * 10,
(veh.getPosition().y + veh.getSpeed().y) // 10 * 10)] = False
self.grid[((veh.getPosition().x + veh.getSpeed().x) // 10 * 10,
(veh.getPosition().y + veh.getSpeed().y + veh.getSize().y) // 10 * 10)] = False
self.grid[((veh.getPosition().x + veh.getSpeed().x + veh.getSize().x) // 10 * 10,
(veh.getPosition().y + veh.getSpeed().y + veh.getSize().y) // 10 * 10)] = False
if self.vehicles_N[i].getPosition().y > 600:
self.vehicles_N[i].getPosition().y = 0
qp.drawRect(veh.getPosition().x, veh.getPosition().y, 5, 10)
for i in range(11):
self.collision_check_N.append((veh.getPosition().x, veh.getPosition().y - i))
except KeyError:
self.vehicles_N[i].getPosition().y += veh.getSpeed().y
if self.vehicles_N[i].getPosition().y > 600:
self.vehicles_N[i].getPosition().y = 0
qp.drawRect(self.vehicles_N[i].getPosition().x, self.vehicles_N[i].getPosition().y, 5, 10)
else:
# print(self.single_0_1)
veh.getPosition().y += veh.getSpeed().y
if veh.getPosition().y > 600:
veh.getPosition().y = 0
# print(self.t.elapsed())
qp.drawRect(veh.getPosition().x, veh.getPosition().y, 5, 10)
for i in range(11):
self.collision_check_N.append((veh.getPosition().x, veh.getPosition().y - i))
#print(self.collision_check)
# Vehicles from West
for i, veh in enumerate(vehicles_W):
# Check if there are vehicles ahead. If true, stop
if (veh.getPosition().x + veh.getSpeed().x, veh.getPosition().y + veh.getSpeed().y) in self.collision_check_W:
qp.drawRect(veh.getPosition().x, veh.getPosition().y, veh.getSize().x, veh.getSize().y)
# Make the room not available for other vehicles
for j in range(11):
self.collision_check_W.append((veh.getPosition().x - j, veh.getPosition().y))
# Move forward
else:
# Just before the intersection
if veh.getPosition().x + 10 + 2 > 270 and veh.getPosition().x <= 270 - 10:
# Check traffic signal. True, then stop before entering.
if self.single_0_0:
qp.drawRect(veh.getPosition().x, veh.getPosition().y, 10, 5)
for j in range(11):
self.collision_check_W.append((veh.getPosition().x - j, veh.getPosition().y))
# Enter intersection
else:
veh.getPosition().x += 2
qp.drawRect(veh.getPosition().x, veh.getPosition().y, 10, 5)
for j in range(11):
self.collision_check_W.append((veh.getPosition().x - j, veh.getPosition().y))
# Light up the grids in the intersection
# Up left
if (veh.getPosition().x // 10 * 10, veh.getPosition().y // 10 * 10) in self.grid:
self.grid[(veh.getPosition().x // 10 * 10, veh.getPosition().y // 10 * 10)] = False
#print('success, x:', veh.getPosition().x)
# Up right
if ((veh.getPosition().x + 10) // 10 * 10, veh.getPosition().y // 10 * 10) in self.grid:
self.grid[((veh.getPosition().x + 10) // 10 * 10, veh.getPosition().y // 10 * 10)] = False
#print('success, x:', veh.getPosition().x)
# Down left
if (veh.getPosition().x // 10 * 10, (veh.getPosition().y) // 10 * 10) in self.grid:
self.grid[(veh.getPosition().x // 10 * 10, (veh.getPosition().y + 5) // 10 * 10)] = False
#print('success, x:', veh.getPosition().x)
# Down right
if ((veh.getPosition().x + 10) // 10 * 10, (veh.getPosition().y) // 10 * 10) in self.grid:
self.grid[((veh.getPosition().x + 10) // 10 * 10, (veh.getPosition().y + 5) // 10 * 10)] = False
#print('success, x:', veh.getPosition().x)
# Already in the intersection
else:
if 270 < veh.getPosition().x < 328 and veh.getPosition().y < 330:
qp.save()
qp.translate(veh.getPosition().x, veh.getPosition().y)
# Calculate rotation angle
if (((veh.getPosition().x - 270 + 3) / 60) * 90 > 15):
self.r[i] = ((veh.getPosition().x - 270 + 3) / 60) * 90
qp.rotate(self.r[i])
else:
self.r[i] = 0
qp.rotate(self.r[i])
qp.translate(-veh.getPosition().x, -veh.getPosition().y)
# Calculate trajectory by using Bezier Curve
x = pow(1 - (self.beze_t[i] / 60), 2) * 272 + 2 * (self.beze_t[i] / 60) * (
1 - self.beze_t[i] / 60) * 330 + pow(
self.beze_t[i] / 60, 2) * 330
y = pow(1 - (self.beze_t[i] / 60), 2) * 273 + 2 * (self.beze_t[i] / 60) * (
1 - self.beze_t[i] / 60) * 273 + pow(
self.beze_t[i] / 60, 2) * 330
veh.setPosition(Position(x, y))
self.beze_t[i] += 2
qp.drawRect(veh.getPosition().x, veh.getPosition().y, 10, 5)
for j in range(11):
self.collision_check_W.append((veh.getPosition().x - j, veh.getPosition().y))
qp.restore()
# Calculate the big Square's coordinate
self.up_left_x[i] = self.coordinate_up_left_x(veh.getPosition().x, self.r[i])
self.up_left_y[i] = self.coordinate_up_left_y(veh.getPosition().y)
self.down_left_x[i] = self.coordinate_down_left_x(veh.getPosition().x, self.r[i])
self.down_left_y[i] = self.coordinate_down_left_y(veh.getPosition().y, self.r[i])
self.up_right_x[i] = self.coordinate_up_right_x(veh.getPosition().x, self.r[i])
self.up_right_y[i] = self.coordinate_up_right_y(veh.getPosition().y)
self.down_right_x[i] = self.coordinate_down_right_x(veh.getPosition().x, self.r[i])
self.down_right_y[i] = self.coordinate_down_right_y(veh.getPosition().y, self.r[i])
# Up left
if (self.up_left_x[i] // 10 * 10, self.up_left_y[i] // 10 * 10) in self.grid:
self.grid[(self.up_left_x[i] // 10 * 10, self.up_left_y[i] // 10 * 10)] = False
# print('success')
# Up right
if ((self.up_right_x[i]) // 10 * 10, self.up_right_y[i] // 10 * 10) in self.grid:
self.grid[((self.up_right_x[i]) // 10 * 10, self.up_right_y[i] // 10 * 10)] = False
# print('success')
# Down left
if (self.down_left_x[i] // 10 * 10, (self.down_left_y[i]) // 10 * 10) in self.grid:
self.grid[(self.down_left_x[i] // 10 * 10, (self.down_left_y[i]) // 10 * 10)] = False
# print('success')
# Down right
if ((self.down_right_x[i]) // 10 * 10, (self.down_right_y[i]) // 10 * 10) in self.grid:
self.grid[((self.down_right_x[i]) // 10 * 10, (self.down_right_y[i]) // 10 * 10)] = False
# print('success')
# Already left intersection
elif 328 <= veh.getPosition().x and veh.getPosition().y < 600:
qp.save()
qp.translate(veh.getPosition().x, veh.getPosition().y)
qp.rotate(90)
qp.translate(-veh.getPosition().x, -veh.getPosition().y)
veh.getPosition().y += 2
qp.drawRect(veh.getPosition().x, veh.getPosition().y, 10, 5)
for j in range(11):
self.collision_check_W.append((veh.getPosition().x, veh.getPosition().y - j))
qp.restore()
# Already left screen
elif veh.getPosition().y >= 600:
veh.getPosition().x = 0
veh.getPosition().y = 273
self.beze_t[i] = 0
qp.drawRect(veh.getPosition().x, veh.getPosition().y, 10, 5)
for j in range(11):
self.collision_check_W.append((veh.getPosition().x, veh.getPosition().y - j))
# Move horizontal direction(across X_axis)
else:
veh.getPosition().x += 2
qp.drawRect(veh.getPosition().x, veh.getPosition().y, 10, 5)
for j in range(11):
self.collision_check_W.append((veh.getPosition().x - j, veh.getPosition().y))
# Vehicle2
# if self.single_0_0:
# qp.drawRect(self.vehicles_E[0].getPosition().x, self.vehicles_E[0].getPosition().y, 10, 5)
# else:
try:
if self.grid[((self.vehicles_E[0].getPosition().x - 5) // 10 * 10, self.vehicles_E[0].getPosition().y // 10 * 10)] and \
self.grid[((self.vehicles_E[0].getPosition().x + 10 - 5) // 10 * 10, self.vehicles_E[0].getPosition().y // 10 * 10)] and \
self.grid[((self.vehicles_E[0].getPosition().x - 5) // 10 * 10, (self.vehicles_E[0].getPosition().y + 5) // 10 * 10)] and \
self.grid[((self.vehicles_E[0].getPosition().x + 10 - 5) // 10 * 10, (self.vehicles_E[0].getPosition().y + 5) // 10 * 10)]:
self.vehicles_E[0].getPosition().x -= 3
if self.vehicles_E[0].getPosition().x < 0:
self.vehicles_E[0].getPosition().x = 600
qp.drawPoint(self.vehicles_E[0].getPosition().x + 1, self.vehicles_E[0].getPosition().y - 1)
qp.drawRect(self.vehicles_E[0].getPosition().x, self.vehicles_E[0].getPosition().y, 10, 5)
else:
qp.drawPoint(self.vehicles_E[0].getPosition().x + 1, self.vehicles_E[0].getPosition().y - 1)
qp.drawRect(self.vehicles_E[0].getPosition().x, self.vehicles_E[0].getPosition().y, 10, 5)
except KeyError:
self.vehicles_E[0].getPosition().x -= 3
if self.vehicles_E[0].getPosition().x < 0:
self.vehicles_E[0].getPosition().x = 600
qp.drawPoint(self.vehicles_E[0].getPosition().x + 1, self.vehicles_E[0].getPosition().y - 1)
qp.drawRect(self.vehicles_E[0].getPosition().x, self.vehicles_E[0].getPosition().y, 10, 5)
self.collision_check = []
self.collision_check_N = []
self.collision_check_S = []
self.collision_check_W = []
self.collision_check_E = []
for i in range(270, 330, 10):
for j in range(270, 330, 10):
self.grid[(i, j)] = True
self.ti += 10
if self.ti > 700:
self.ti = 0
# print(self.t.elapsed())
self.t.restart()
if __name__ == '__main__':
app = QApplication(sys.argv)
# Vehicles from North
vehicles_N = []
for i in range(5):
v = Vehicle()
v.setPosition(Position(313, 0 - i * 10))
v.setSpeed(Speed(0, 2))
v.setSize(Size(5, 10))
vehicles_N.append(v)
# Vehicles from West
vehicles_W = []
for i in range(9):
v = Vehicle()
v.setPosition(Position(0 - i * 10, 273))
v.setSpeed(Speed(2, 0))
v.setSize(Size(10, 5))
vehicles_W.append(v)
# Vehicles from East
vehicles_E = []
v = Vehicle()
v.setPosition(Position(600, 302))
v.setSpeed(Speed(2, 0))
v.setSize(Size(10, 5))
vehicles_E.append(v)
ex = Example(vehicles_N, vehicles_W, vehicles_E)
sys.exit(app.exec_())
|
[
"better@opossum.itolab.nitech.ac.jp"
] |
better@opossum.itolab.nitech.ac.jp
|
af5d3531a0c3b27b202c1ef66223d898bd77ec13
|
008aada8c0e718e0220eabc5b54732a1e1b07f97
|
/sergeant/connector/_connector.py
|
ee1985d5cf05a1683d5b4b588c6a582648b9599b
|
[
"MIT"
] |
permissive
|
gabriel-yahav/sergeant
|
59259a92c4c072e317d82022f19b440b21d2c294
|
0de9bfb4fdca62f061d6588c6839c4491c5d4f9b
|
refs/heads/master
| 2022-09-30T04:38:48.414842
| 2020-05-26T10:28:50
| 2020-05-26T10:28:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,931
|
py
|
import typing
class Lock:
def acquire(
self,
timeout: typing.Optional[float] = None,
check_interval: float = 1.0,
ttl: int = 60,
) -> bool:
raise NotImplementedError()
def release(
self,
) -> bool:
raise NotImplementedError()
def is_locked(
self,
) -> bool:
raise NotImplementedError()
def set_ttl(
self,
ttl: int,
) -> bool:
raise NotImplementedError()
def get_ttl(
self,
) -> typing.Optional[int]:
raise NotImplementedError()
class Connector:
name: str
def key_set(
self,
key: str,
value: bytes,
) -> bool:
raise NotImplementedError()
def key_get(
self,
key: str,
) -> typing.Optional[bytes]:
raise NotImplementedError()
def key_delete(
self,
key: str,
) -> bool:
raise NotImplementedError()
def queue_pop(
self,
queue_name: str,
) -> typing.Optional[bytes]:
raise NotImplementedError()
def queue_pop_bulk(
self,
queue_name: str,
number_of_items: int,
) -> typing.List[bytes]:
raise NotImplementedError()
def queue_push(
self,
queue_name: str,
item: bytes,
priority: str = 'NORMAL',
) -> bool:
raise NotImplementedError()
def queue_push_bulk(
self,
queue_name: str,
items: typing.Iterable[bytes],
priority: str = 'NORMAL',
) -> bool:
raise NotImplementedError()
def queue_length(
self,
queue_name: str,
) -> int:
raise NotImplementedError()
def queue_delete(
self,
queue_name: str,
) -> bool:
raise NotImplementedError()
def lock(
self,
name: str,
) -> Lock:
raise NotImplementedError()
|
[
"gal@intsights.com"
] |
gal@intsights.com
|
beb223699fadcff443ec1b36fb64cecf67b2359c
|
b5d0a6254b54c0a778181a67bcda14cc6663e871
|
/0-notes/job-search/Cracking the Coding Interview/C10SortingSearching/questions/10.5-question.py
|
5ec618baaa19cdb2c7b27b33ac1bfb9f081b82c6
|
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
Web-Dev-Collaborative/Lambda-Final-Backup
|
113e719a76a144b86d06f3a412afe4b02689cad7
|
e9ab84928faa8364bacd863009ae9aec01ff9d1e
|
refs/heads/master
| 2023-06-07T15:34:00.682815
| 2021-04-17T01:53:14
| 2021-04-17T01:53:14
| 358,899,122
| 0
| 0
|
MIT
| 2023-05-30T04:03:16
| 2021-04-17T14:24:53
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 318
|
py
|
# Sparse Search
# Given a sorted array of strings that is interspersed with empty strings,
# write a method to find the location of a given string.
# EXAMPLE: INPUT: ball, {"at", "", "", "", "ball", "", "", "car", "", "", "dad", "", ""}
# OUTPUT: 4
# time complexity: O()
# space complexity: O()
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
3e9e5f2e2ab82da05314d2f3be4cd0c514ea872e
|
b10bae1f94f00404d8d21a563d7626486aeebe74
|
/users/models.py
|
ce385919d08af645716f84c4baa3931bbe72a02d
|
[] |
no_license
|
brotherchris65/newspaper
|
d93b91f618cc33c627f5e584fccd91bdcb6fb75c
|
7dd5f5c3e09fc38183684d9a30e01f5e1debc2ee
|
refs/heads/master
| 2021-09-28T14:20:35.043218
| 2019-12-11T01:42:05
| 2019-12-11T01:42:05
| 227,240,504
| 0
| 0
| null | 2021-09-22T18:10:49
| 2019-12-11T00:18:19
|
Python
|
UTF-8
|
Python
| false
| false
| 202
|
py
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.db import models
class CustomUser(AbstractUser):
age=models.PositiveIntegerField(null=True, blank=True)
|
[
"brotherchris65@gmail.com"
] |
brotherchris65@gmail.com
|
4090524e51e82ff0144e1f510597cdaf59752ac9
|
08d5ac4947860068f3c79edd6eed7493cee29f95
|
/theano_train.py
|
54672bc9bc60d8e2e33a5ff5e6c0dc50ddded584
|
[] |
no_license
|
MrsJLW/nslkdd-deep
|
9babc0821524247bdd4bd2afa1c197392af5ab3c
|
462705bb0f42328673a3fe6d0caf0e75802e9b87
|
refs/heads/master
| 2020-03-29T13:27:54.058568
| 2016-04-11T18:01:31
| 2016-04-11T18:01:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,049
|
py
|
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, precision_score, confusion_matrix
import numpy as np
import sklearn
import sklearn.datasets
import matplotlib
import theano
import theano.tensor as T
import timeit
import time
from theano import pp
import data as d
from theano import config
from collections import Counter
np.random.seed(0)
class_weight = []
layer_stack, w_stack, b_stack, dw_stack, db_stack = [], [], [], [], []
num_examples = 40
nn_input_dim = 200
nn_output_dim = 5
# three layers in a conical structure
nn_hdims = [500, 100]
batch_per_epoch = 3000
num_passes = 2
n_layers = 2
epsilon = np.float32(0.06)
reg_lambda = np.float32(0.0001)
def activate(name, values):
if name == 'sigmoid':
return T.nnet.sigmoid(values)
elif name == 'tanh':
return T.tanh(values)
elif name == 'relu':
return T.switch(values > 0, values, 0)
elif name == 'softmax':
return T.nnet.softmax(values)
def add_layer(activation, dim):
if not layer_stack:
W = theano.shared(np.random.randn(nn_input_dim, dim).astype('float32'),
name='W' + str(len(layer_stack) + 1))
b = theano.shared(np.zeros(dim).astype('float32'), name='b' + str(len(layer_stack) + 1))
layer_stack.append(activate(activation, X.dot(W) + b))
w_stack.append(W)
b_stack.append(b)
else:
prev = layer_stack[-1]
W = theano.shared(np.random.randn(b_stack[-1].get_value().shape[0], dim).astype('float32'),
name='W' + str(len(layer_stack) + 1))
b = theano.shared(np.zeros(dim).astype('float32'), name='b' + str(len(layer_stack) + 1))
layer_stack.append(activate(activation, prev.dot(W) + b))
w_stack.append(W)
b_stack.append(b)
# print(layer_stack)
X = theano.shared(np.array(np.random.randn(200, 2000), config.floatX))
y = theano.shared(np.array(np.random.randn(200, 5), config.floatX))
c_w = theano.shared(np.array(np.random.randn(200), config.floatX))
# W1 = theano.shared(np.random.randn(nn_input_dim, nn_hdim1).astype('float32'), name='W1')
# b1 = theano.shared(np.zeros(nn_hdim1).astype('float32'), name='b1')
# W2 = theano.shared(np.random.randn(nn_hdim1, nn_hdim2).astype('float32'), name='W2')
# b2 = theano.shared(np.zeros(nn_hdim2).astype('float32'), name='b2')
# W3 = theano.shared(np.random.randn(nn_hdim2, nn_output_dim).astype('float32'), name='W3')
# b3 = theano.shared(np.zeros(nn_output_dim).astype('float32'), name='b3')
# params=[W1,b1,W2,b2,W3,b3]
params = [w_stack, b_stack]
add_layer('sigmoid', nn_hdims[0])
add_layer('sigmoid', nn_hdims[1])
add_layer('softmax', nn_output_dim)
loss_reg = 1. / num_examples * reg_lambda / 2 * (
T.sum(T.sqr(w_stack[-3])) + T.sum(T.sqr(w_stack[-2])) + T.sum(T.sqr(w_stack[-1])))
loss = ((T.nnet.categorical_crossentropy(layer_stack[-1], y)).mean()) + loss_reg
prediction = T.argmax(layer_stack[-1], axis=1)
# dW4 = T.grad(loss, w_stack[-1])
# db4 = T.grad(loss, b_stack[-1])
# dW3 = T.grad(loss, w_stack[-2])
# db3 = T.grad(loss, b_stack[-2])
# dW2 = T.grad(loss, w_stack[-3])
# db2 = T.grad(loss, b_stack[-3])
# dW1 = T.grad(loss, w_stack[-4])
# db1 = T.grad(loss, b_stack[-4])
dw_stack = [T.grad(loss, w_stack[-n_layers+i-1]) for i in range(0, n_layers + 1)]
db_stack = [T.grad(loss, b_stack[-n_layers+i-1]) for i in range(0, n_layers + 1)]
forward_prop = theano.function([], layer_stack[-1])
calculate_loss = theano.function([], loss)
predict = theano.function([], prediction)
# u = ((w_stack[-1], w_stack[-1] - epsilon * dW4),
# (w_stack[-2], w_stack[-2] - epsilon * dW3),
# (w_stack[-3], w_stack[-3] - epsilon * dW2),
# (w_stack[-4], w_stack[-4] - epsilon * dW1),
# (b_stack[-1], b_stack[-1] - epsilon * db4),
# (b_stack[-2], b_stack[-2] - epsilon * db3),
# (b_stack[-3], b_stack[-3] - epsilon * db2),
# (b_stack[-4], b_stack[-4] - epsilon * db1))
u = tuple([tuple([w_stack[-i], w_stack[-i] - epsilon * dw_stack[n_layers - i + 1]]) for i in range(1, n_layers+2)])
u += tuple([tuple([b_stack[-i], b_stack[-i] - epsilon * db_stack[n_layers - i + 1]]) for i in range(1, n_layers+2)])
gradient_step = theano.function(
[],
updates=u)
def build_model(num_passes=5, print_loss=False):
np.random.seed(0)
w_stack[-3].set_value((np.random.randn(nn_input_dim, nn_hdims[0]) / np.sqrt(nn_input_dim)).astype('float32'))
b_stack[-3].set_value(np.zeros(nn_hdims[0]).astype('float32'))
w_stack[-2].set_value((np.random.randn(nn_hdims[0], nn_hdims[1]) / np.sqrt(nn_hdims[0])).astype('float32'))
b_stack[-2].set_value(np.zeros(nn_hdims[1]).astype('float32'))
# w_stack[-1].set_value((np.random.randn(nn_hdims[1], nn_hdims[2]) / np.sqrt(nn_hdims[1])).astype('float32'))
# b_stack[-1].set_value(np.zeros(nn_hdims[2]).astype('float32'))
w_stack[-1].set_value((np.random.randn(nn_hdims[1], nn_output_dim) / np.sqrt(nn_hdims[1])).astype('float32'))
b_stack[-1].set_value(np.zeros(nn_output_dim).astype('float32'))
for i in range(0, num_passes):
for j in range(batch_per_epoch):
a, b, c = d.train_batch_data(40)
X.set_value(a.astype('float32'))
y.set_value(b.astype('float32'))
c_w.set_value(c.astype('float32'))
if j % 500 == 0:
# print(debug(),end='\n',flush=True)
print(calculate_loss(), end=' ', flush=True)
gradient_step()
print()
if print_loss and i % 1 == 0:
print("Loss after iteration %i: %f" % (i, calculate_loss()))
build_model(num_passes=2, print_loss=True)
# save model
np.savez('out/model.npz', params=[[x.get_value() for x in params[0]], [x.get_value() for x in params[1]]])
a, b = d.fulldata('test')
X.set_value(a.astype('float32'))
y.set_value(b.astype('float32'))
predicted = predict()
actual = [np.argmax(x) for x in b]
print(accuracy_score(predicted, actual))
print(precision_score(actual, predicted, average='macro'))
print(confusion_matrix(actual, predicted))
|
[
"rajarsheem@gmail.com"
] |
rajarsheem@gmail.com
|
c40f6a5a4f47f0180df1f9da49f262d8982ae040
|
728bd80edb381d74b5d19b0d2bbd5f0c7e00412e
|
/partenaire/admin.py
|
e4d1b140508662c013b2e393f60d3fdb997f44dc
|
[] |
no_license
|
guilavogui24/applisigre
|
16b45b4448975c7872223b911745ca02d8c64a84
|
39572782b2646543129208503f8c47a5813223ce
|
refs/heads/master
| 2023-07-12T06:31:54.882558
| 2021-08-06T13:28:55
| 2021-08-06T13:28:55
| 393,380,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 540
|
py
|
from django.contrib import admin
from .models import Partenaire
from .models import TypePartenaire
from .models import FormeJuridique
from .models import Categories
from .models import SousCategorie
class SousCategorieAdmin(admin.ModelAdmin):
list_display = ('nom', 'Categories')
search_fields = ['nom']
# Register your models here.
admin.site.register(Partenaire)
admin.site.register(TypePartenaire)
admin.site.register(FormeJuridique)
admin.site.register(Categories)
admin.site.register(SousCategorie, SousCategorieAdmin)
|
[
"guilavoguijoseph@gmail.com"
] |
guilavoguijoseph@gmail.com
|
8efde5c2d68e9d8fcd988306f044844b4c316c77
|
c3796ebebb42e55878556a53abad1a2e18fa4020
|
/test/functional/wallet_resendwallettransactions.py
|
1d73e04f39a1b4d65c9ca45043ad36d0c6b036b9
|
[
"MIT"
] |
permissive
|
lycion/genex-project
|
a9e54d22138ca81339f76bba166aa9f366fa9dd8
|
fc103e93ee274dc57179d01c32b0235b29e364ca
|
refs/heads/master
| 2020-03-29T02:18:33.445995
| 2018-08-07T23:56:27
| 2018-08-07T23:56:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,322
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Genex Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test resendwallettransactions RPC."""
from test_framework.test_framework import GenexTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class ResendWalletTransactionsTest(GenexTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['--walletbroadcast=false']]
def run_test(self):
# Should raise RPC_WALLET_ERROR (-4) if walletbroadcast is disabled.
assert_raises_rpc_error(-4, "Error: Wallet transaction broadcasting is disabled with -walletbroadcast", self.nodes[0].resendwallettransactions)
# Should return an empty array if there aren't unconfirmed wallet transactions.
self.stop_node(0)
self.start_node(0, extra_args=[])
assert_equal(self.nodes[0].resendwallettransactions(), [])
# Should return an array with the unconfirmed wallet transaction.
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
assert_equal(self.nodes[0].resendwallettransactions(), [txid])
if __name__ == '__main__':
ResendWalletTransactionsTest().main()
|
[
"40029035+genexcore@users.noreply.github.com"
] |
40029035+genexcore@users.noreply.github.com
|
5b645591d5f4113d5f9bc03b5fb8121abefcbe03
|
47c12b42843d08a10655006fba6fb0c662e7bf62
|
/yonep.py
|
92b1c822b8ae4f9d036e2f38a53af06c6ffd61e1
|
[] |
no_license
|
easthgs/git-test
|
6325a8ba1bc77bec6a4b2d54fc84d08498d4b67e
|
34de227bbf017f6b78b5bcf2cd4a7a47069c797a
|
refs/heads/master
| 2021-01-10T12:23:10.331150
| 2016-02-20T17:55:13
| 2016-02-20T17:55:13
| 52,128,190
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from flask import Flask, render_template, url_for, redirect, request
app = Flask(__name__)
@app.route('/')
@app.route('/<name>')
def yo(name=None):
return render_template('yo.html', name=name)
@app.route('/add', methods=['POST'])
def add():
return redirect(url_for('yo', name=request.form['name']))
if __name__ == '__main__':
app.run(debug=True)
|
[
"eastnep@yahoo.co.jp"
] |
eastnep@yahoo.co.jp
|
f625664d16d2ea3be1e1bf2f040c0452b92aaf29
|
47541875c7be36ce612c382b7f98c92173c7144c
|
/WarDrivePiCar/Tests/test_main.py
|
47e6e1b2adc29e0bdcc292ffbd93dc17fff20045
|
[] |
no_license
|
MorenoB/WarDrivePi
|
72b6373796e9b6a5ff5c8841154da556b9471906
|
b4a29774de033df9f50043c6275a13d7a9d186cc
|
refs/heads/master
| 2021-01-17T15:26:16.733107
| 2017-01-27T11:47:12
| 2017-01-27T11:47:12
| 69,866,547
| 1
| 0
| null | 2017-01-27T11:47:13
| 2016-10-03T11:59:23
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,357
|
py
|
# Test files need to re register themselves for when using shell
import sys
import os
sys.path.insert(1, os.path.abspath(os.path.dirname(__file__)))
from unittest import TestCase
from program import Program
from Util.testing import TestThread
import sys
import time
import os
class TestMain(TestCase):
def test_movement(self):
program = Program()
# Force in some mock-up location data for the Phone module
current_dir = os.path.abspath(os.path.dirname(__file__))
file_path = os.path.join(current_dir, 'simulated_location_input.txt')
location_mockup_data = open(file_path, 'r').read()
file_path = os.path.join(current_dir, 'simulated_sensor_input.txt')
sensor_mockup_data = open(file_path, 'r').read()
program.force_phone_handler_input(location_data=location_mockup_data, sensor_data=sensor_mockup_data)
# Start the main program
new_thread = TestThread(program)
new_thread.start()
print "Simulating Keyboard Input..."
file_path = os.path.join(current_dir, 'simulatedInput.txt')
sys.stdin = open(file_path, 'r')
time.sleep(1)
print "Simulating Keyboard Interrupt..."
program.stop()
time.sleep(1)
print "Checking if program is done..."
self.assertEquals(program.is_running(), False)
|
[
"moreno_bralts@hotmail.com"
] |
moreno_bralts@hotmail.com
|
aa3069e85491124d364115e57d1a97e1ff6dbda7
|
e2589896ad0e629d933f1e9e03f9963eb922664a
|
/backend/cool_dust_27675/wsgi.py
|
297564f38beadc76f1ea37eeabd22b393dcbc0c4
|
[] |
no_license
|
crowdbotics-apps/cool-dust-27675
|
89b947ddd6c87d70febeb2af15ffab3706b6cc13
|
f2fa1d6f4206955173a2ebf1b0f824ee5d184d1a
|
refs/heads/master
| 2023-05-08T10:46:07.382608
| 2021-06-02T06:44:22
| 2021-06-02T06:44:22
| 373,066,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
"""
WSGI config for cool_dust_27675 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cool_dust_27675.settings')
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
2de0a3a5bc6d4a723ceea97771879e67868cf611
|
0543e8a6c4c45a80c3e11bbd6df694a63ad0155d
|
/diapers/migrations/0004_auto_20150822_1627.py
|
b4144d1ff6d3d09f9af4eea806abf61a693331fa
|
[] |
no_license
|
asorokoumov/compare
|
758f0cc22607db51022386d64f21c29610622b02
|
221aef6024f3c63c1a1d01f13f03166ce5b02a54
|
refs/heads/master
| 2022-08-09T16:05:33.342400
| 2019-05-08T09:54:48
| 2019-05-08T09:54:48
| 41,490,630
| 0
| 0
| null | 2022-07-06T19:18:37
| 2015-08-27T14:13:05
|
CSS
|
UTF-8
|
Python
| false
| false
| 249
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('diapers', '0003_auto_20150822_1624'),
]
operations = [
]
|
[
"sorokoumov.anton@gmail.com"
] |
sorokoumov.anton@gmail.com
|
b12892a96f4b48796a35f6700c11b1ce1875c2cf
|
94c8dd4126da6e9fe9acb2d1769e1c24abe195d3
|
/test/python/circuit/library/test_phase_estimation.py
|
8bf3d15d9ea0a395cd1d2ede7c122fdb666605b4
|
[
"Apache-2.0"
] |
permissive
|
levbishop/qiskit-terra
|
a75c2f96586768c12b51a117f9ccb7398b52843d
|
98130dd6158d1f1474e44dd5aeacbc619174ad63
|
refs/heads/master
| 2023-07-19T19:00:53.483204
| 2021-04-20T16:30:16
| 2021-04-20T16:30:16
| 181,052,828
| 1
| 0
|
Apache-2.0
| 2019-06-05T15:32:13
| 2019-04-12T17:20:54
|
Python
|
UTF-8
|
Python
| false
| false
| 5,238
|
py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test library of phase estimation circuits."""
import unittest
import numpy as np
from qiskit.test.base import QiskitTestCase
from qiskit import BasicAer, execute
from qiskit.circuit import QuantumCircuit
from qiskit.circuit.library import PhaseEstimation, QFT
from qiskit.quantum_info import Statevector
class TestPhaseEstimation(QiskitTestCase):
"""Test the phase estimation circuit."""
def assertPhaseEstimationIsCorrect(self, pec: QuantumCircuit, eigenstate: QuantumCircuit,
phase_as_binary: str):
r"""Assert that the phase estimation circuit implements the correct transformation.
Applying the phase estimation circuit on a target register which holds the eigenstate
:math:`|u\rangle` (say the last register), the final state should be
.. math::
|\phi_1\rangle \cdots |\phi_t\rangle |u\rangle
where the eigenvalue is written as :math:`e^{2\pi i \phi}` and the angle is represented
in binary fraction, i.e. :math:`\phi = 0.\phi_1 \ldots \phi_t`.
Args:
pec: The circuit implementing the phase estimation circuit.
eigenstate: The eigenstate as circuit.
phase_as_binary: The phase of the eigenvalue in a binary fraction. E.g. if the
phase is 0.25, the binary fraction is '01' as 0.01 = 0 * 0.5 + 1 * 0.25 = 0.25.
"""
# the target state
eigenstate_as_vector = Statevector.from_instruction(eigenstate).data
reference = eigenstate_as_vector
zero, one = [1, 0], [0, 1]
for qubit in phase_as_binary[::-1]:
reference = np.kron(reference, zero if qubit == '0' else one)
# the simulated state
circuit = QuantumCircuit(pec.num_qubits)
circuit.compose(eigenstate,
list(range(pec.num_qubits - eigenstate.num_qubits, pec.num_qubits)),
inplace=True)
circuit.compose(pec, inplace=True)
# TODO use Statevector for simulation once Qiskit/qiskit-terra#4681 is resolved
# actual = Statevector.from_instruction(circuit).data
backend = BasicAer.get_backend('statevector_simulator')
actual = execute(circuit, backend).result().get_statevector()
np.testing.assert_almost_equal(reference, actual)
def test_phase_estimation(self):
"""Test the standard phase estimation circuit."""
with self.subTest('U=S, psi=|1>'):
unitary = QuantumCircuit(1)
unitary.s(0)
eigenstate = QuantumCircuit(1)
eigenstate.x(0)
# eigenvalue is 1j = exp(2j pi 0.25) thus phi = 0.25 = 0.010 = '010'
# using three digits as 3 evaluation qubits are used
phase_as_binary = '0100'
pec = PhaseEstimation(4, unitary)
self.assertPhaseEstimationIsCorrect(pec, eigenstate, phase_as_binary)
with self.subTest('U=SZ, psi=|11>'):
unitary = QuantumCircuit(2)
unitary.z(0)
unitary.s(1)
eigenstate = QuantumCircuit(2)
eigenstate.x([0, 1])
# eigenvalue is -1j = exp(2j pi 0.75) thus phi = 0.75 = 0.110 = '110'
# using three digits as 3 evaluation qubits are used
phase_as_binary = '110'
pec = PhaseEstimation(3, unitary)
self.assertPhaseEstimationIsCorrect(pec, eigenstate, phase_as_binary)
with self.subTest('a 3-q unitary'):
unitary = QuantumCircuit(3)
unitary.x([0, 1, 2])
unitary.cz(0, 1)
unitary.h(2)
unitary.ccx(0, 1, 2)
unitary.h(2)
eigenstate = QuantumCircuit(3)
eigenstate.h(0)
eigenstate.cx(0, 1)
eigenstate.cx(0, 2)
# the unitary acts as identity on the eigenstate, thus the phase is 0
phase_as_binary = '00'
pec = PhaseEstimation(2, unitary)
self.assertPhaseEstimationIsCorrect(pec, eigenstate, phase_as_binary)
def test_phase_estimation_iqft_setting(self):
"""Test default and custom setting of the QFT circuit."""
unitary = QuantumCircuit(1)
unitary.s(0)
with self.subTest('default QFT'):
pec = PhaseEstimation(3, unitary)
expected_qft = QFT(3, inverse=True, do_swaps=False).reverse_bits()
self.assertEqual(pec.data[-1][0].definition, expected_qft)
with self.subTest('custom QFT'):
iqft = QFT(3, approximation_degree=2).inverse()
pec = PhaseEstimation(3, unitary, iqft=iqft)
self.assertEqual(pec.data[-1][0].definition, iqft)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
b13d0766ba150b96be1c6b817e88795d4963c5de
|
bbab26a8d60b9b1c3e0037596165d3639a5e18ae
|
/assign2.py
|
797216f15613313e889f7b7f082da1c2bdbe46a0
|
[] |
no_license
|
MidhaTahir/-Python-
|
200d6c83c8066392143e7297659ea9ecc5b57b79
|
b054e4bc82289051e4f96b9b03f8402993b42a38
|
refs/heads/master
| 2021-07-19T20:13:37.920052
| 2020-06-24T18:31:05
| 2020-06-24T18:31:05
| 186,156,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
import random
arr = []
for i in range(10):
arr.append(random.randint(1,100))
print(arr)
minimum = min(arr)
print("The minimum number in random list is " + str(minimum))
minimum_position = arr.index(min(arr))
print("The minimum number position is " + str(minimum_position+1))
maximum = max(arr)
print("The maximum number in random list is " + str(maximum))
maximum_position = arr.index(max(arr))
print("The maximum number position is " + str(maximum_position+1))
sum = 0
for i in range(len(arr)):
sum = sum + arr[i]
mean = sum/len(arr)
print(mean)
|
[
"midhatahirkhan2011@gmail.com"
] |
midhatahirkhan2011@gmail.com
|
fedd1e48e973b16043436c9c51aa37b3063a283e
|
1f2df4dfed4af1485fefab0118dd6abd437de4de
|
/listings/migrations/0001_initial.py
|
7dc50d07d346be74a195cb7f75a96f3f72bf4ebd
|
[] |
no_license
|
Rhillx/IH_project
|
90bc0ecaa200a2fb51e520dd75f6485cb21e6f17
|
4a51311e24456e0aefec16872f340685ec7dca74
|
refs/heads/master
| 2020-04-25T18:55:04.270785
| 2019-03-02T05:56:23
| 2019-03-02T05:56:23
| 173,000,886
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,156
|
py
|
# Generated by Django 2.1.7 on 2019-02-21 22:48
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('realtors', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Listing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('address', models.CharField(max_length=200)),
('city', models.CharField(max_length=100)),
('state', models.CharField(max_length=100)),
('zipcode', models.CharField(max_length=20)),
('description', models.TextField(blank=True)),
('price', models.IntegerField()),
('bedrooms', models.IntegerField()),
('bathrooms', models.DecimalField(decimal_places=1, max_digits=2)),
('garage', models.IntegerField(default=0)),
('sqft', models.IntegerField()),
('lot_size', models.DecimalField(decimal_places=1, max_digits=5)),
('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d/')),
('photo_1', models.ImageField(upload_to='photos/%Y/%m/%d/')),
('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_3', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_4', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_5', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_6', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('is_published', models.BooleanField(default=True)),
('list_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('realtor', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='realtors.Realtor')),
],
),
]
|
[
"rhillz718@gmail.com"
] |
rhillz718@gmail.com
|
f8b918dbc080c727941fe32353727591500f3f2d
|
5c61851a03dd1ac98d03c2e98f27487f188ff00f
|
/{{cookiecutter.repo_name}}/manage.py
|
13bffdcfd10dc0e98343059f47512923a6698335
|
[
"BSD-3-Clause"
] |
permissive
|
tony/cookiecutter-flask-pythonic
|
e7208a8fc9ccbde10e541f8e657dbf4da7b388b3
|
d1274ec5d5b72cab128e593ed78de88c29bd54b5
|
refs/heads/master
| 2023-05-29T20:49:21.927268
| 2021-10-05T12:39:04
| 2021-10-05T12:39:04
| 35,064,692
| 39
| 4
| null | 2023-05-01T21:06:54
| 2015-05-04T22:52:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,203
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from flask_script import Manager
from {{ cookiecutter.repo_name }} import {{ cookiecutter.repo_name | capitalize }}
"""If not using Flask-Script::
app = {{ cookiecutter.repo_name | capitalize }}.from_cli(sys.argv[1:])
Does the trick for retrieving an application object using
pure argparse. But let's hook into Flask-Script's CLI argparse
instance.
"""
def app_wrapper(*args, **kwargs):
"""App factory returns the :class:`flask.Flask` via ``__call__``,
but because of the way :class:`flask_script.Manager` handles
accepting app objects, this wrapper returns the flask object directly.
:returns: Flask object build from CLI
:rtype: :class:`flask.Flask`
"""
return {{ cookiecutter.repo_name | capitalize }}.from_file(*args, **kwargs).app
manager = Manager(app_wrapper)
manager.add_option('-c', '--config', dest='config', required=False)
@manager.command
def run_server(*args, **kwargs):
{{ cookiecutter.repo_name | capitalize }}.from_file().run()
@manager.command
def testing(*args, **kwargs):
print('Run "./run-tests.py" or "python setup.py test".')
if __name__ == "__main__":
run_server()
|
[
"tony@git-pull.com"
] |
tony@git-pull.com
|
6fb69cae212e1193fbae6999d71ad04fb456e8f7
|
be77e3ff1de69b11a427309ad5e953dfdbdb55a2
|
/main.py
|
7b29ea9b11bb750109abb536c2465b092280ee36
|
[] |
no_license
|
Richard98PL/tibiaAntyLogout
|
58bfef476453ae021d69ebc7785eac6a1b47d947
|
b790f9ffb756624c1e6d71506f15e8f9dda390cb
|
refs/heads/main
| 2023-08-26T22:12:29.090560
| 2021-11-01T16:52:21
| 2021-11-01T16:52:21
| 423,093,921
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,158
|
py
|
from pynput.keyboard import Key, Controller
import win32gui
import win32con
import re
import time
import datetime
from random import randint
keyboard = Controller()
class WindowMgr:
"""Encapsulates some calls to the winapi for window management"""
def __init__ (self):
"""Constructor"""
self._handle = None
def find_window(self, class_name, window_name=None):
"""find a window by its class_name"""
self._handle = win32gui.FindWindow(class_name, window_name)
def _window_enum_callback(self, hwnd, wildcard):
"""Pass to win32gui.EnumWindows() to check all the opened windows"""
if re.match(wildcard, str(win32gui.GetWindowText(hwnd))) is not None:
self._handle = hwnd
def find_window_wildcard(self, wildcard):
"""find a window whose title matches the wildcard regex"""
self._handle = None
win32gui.EnumWindows(self._window_enum_callback, wildcard)
def set_foreground(self):
"""put the window in the foreground"""
win32gui.SetForegroundWindow(self._handle)
def get_rectangle(self):
win32gui.GetWindowRect(self._handle)
def antyLogout():
currentWindowManager = WindowMgr()
currentWindow = win32gui.GetForegroundWindow()
currentWindowManager._handle = currentWindow
tibiaWindowManager = WindowMgr()
tibiaWindowManager.find_window_wildcard("Tibia - *")
if currentWindowManager._handle != tibiaWindowManager._handle:
win32gui.ShowWindow(tibiaWindowManager._handle, win32con.SW_MAXIMIZE)
tibiaWindowManager.set_foreground()
keyboard.press(Key.ctrl)
movementKeys = [Key.up, Key.down]
for key in movementKeys:
keyboard.tap(key)
time.sleep( randint(15,31) / 1000)
keyboard.release(Key.ctrl)
now = datetime.datetime.now()
print(now.hour, now.minute, now.second)
if currentWindowManager._handle != tibiaWindowManager._handle:
win32gui.ShowWindow(tibiaWindowManager._handle, win32con.SW_MINIMIZE)
currentWindowManager.set_foreground()
antyLogout()
while True:
time.sleep(6*60 + randint(0,13))
antyLogout()
|
[
"noreply@github.com"
] |
noreply@github.com
|
c85864db8a16d494aabf4adc4a5f186176ac2e22
|
1cd904a429a2392f47762a037e7ac8545cd673f1
|
/homework_hse/homework03/homework03.py
|
79b8c6afc95b99b94d14f1dbddd140ce4a529c33
|
[] |
no_license
|
annnyway/homework
|
e0bce8af87fc6100f20ce736e7082721005f4842
|
4fd3aa4279560d7a56af99d24636128e520945df
|
refs/heads/master
| 2021-07-12T16:06:44.210852
| 2019-03-03T20:40:05
| 2019-03-03T20:40:05
| 148,026,613
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,018
|
py
|
import random
def adjective():
with open('adjectives.tsv', 'r') as f:
adjectives = f.read().split()
return random.choice(adjectives)
def noun():
with open('nouns.tsv', 'r') as f:
nouns = f.read().split()
return random.choice(nouns)
def transitive_verb():
with open('trans_verbs.tsv', 'r') as f:
verbs = f.read().split()
return random.choice(verbs)
def intransitive_verb():
with open('intrans_verbs.tsv', 'r') as f:
verbs = f.read().split()
return random.choice(verbs)
def transitive_verb_with_s():
with open('trans_verbs.tsv', 'r') as f:
verbs = f.read().split()
verb = random.choice(verbs)
ends = ['ss', 'x', 'z', 'ch', 'sh']
vowels = 'aeiou'
if verb.endswith('y') and verb[-2] not in vowels:
verb = verb[0:-1] + 'ies'
elif verb.endswith(tuple(ends)):
verb += 'es'
else:
verb += 's'
return verb
def intransitive_verb_with_s():
with open('intrans_verbs.tsv', 'r') as f:
verbs = f.read().split()
verb = random.choice(verbs)
ends = ['ss', 'x', 'z', 'ch', 'sh']
vowels = 'aeiou'
if verb.endswith('y') and verb[-2] not in vowels:
verb = verb[0:-1] + 'ies'
elif verb.endswith(tuple(ends)):
verb = verb + 'es'
else:
verb = verb + 's'
return verb
def adverb():
with open('adverbs.tsv', 'r') as f:
adverbs = f.read().split()
return random.choice(adverbs)
def comparative_adjective():
with open('comparative_adjectives.tsv', 'r') as f:
adjectives = f.read().split()
return random.choice(adjectives)
def affirmative_sentence():
sentence = adjective().capitalize() + ' ' + noun() + ' ' + transitive_verb_with_s() +\
' ' + noun() + ' that ' + intransitive_verb_with_s() + ' ' + adverb() + '.'
return sentence
def interrogative_sentence():
sentence = 'Did ' + noun() + ' ' + transitive_verb() + ' ' + adjective() + ' ' +\
noun() + ' ' + 'yesterday?'
return sentence
def negative_sentence():
sentence = adjective().capitalize() + ' ' + noun() + ' will not ' + transitive_verb() +\
' ' + noun() + '.'
return sentence
def conditional_sentence():
sentence = 'If ' + noun() + ' ' + transitive_verb_with_s() + ' ' + noun() + ', ' +\
noun() + ' will ' + intransitive_verb() + ' ' + comparative_adjective() + '.'
return sentence
def imperative_sentence():
sentence = transitive_verb().capitalize() + ' the ' + adjective() + ' ' + noun() + ', please.'
return sentence
def random_sentences():
sentences = [affirmative_sentence(), interrogative_sentence(), negative_sentence(), conditional_sentence(),
imperative_sentence()]
random.shuffle(sentences)
return sentences
def main():
with open('random_sentences.txt', 'w') as f:
for sent in random_sentences():
f.write(sent + '\n')
return 0
if __name__ == '__main__':
main()
|
[
"nuticbooms@gmail.com"
] |
nuticbooms@gmail.com
|
fc751a4c8a4c39cb45786c720a5ed4aa6a9bfb76
|
5cd9518f9a869a355019c743a5c2e65b0f3c50ba
|
/problem4.py
|
a96d8f797444928c1432d958ac34449608efcd17
|
[] |
no_license
|
astan54321/PA3
|
0d2ce6a1e9bce4ae9cafcedb930459e6b0bf66ee
|
13d4048c12ea9b42cf9990389cf7894b2430cb3c
|
refs/heads/master
| 2023-01-06T09:51:21.428543
| 2020-10-15T08:17:30
| 2020-10-15T08:17:30
| 303,866,798
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,484
|
py
|
from mrjob.job import MRJob
from mrjob.step import MRStep
import re
WORD_RE = re.compile(r"[\w']+")
class MRMostUsedWord(MRJob):
def steps(self):
return [
MRStep(mapper=self.mapper_get_words,
combiner=self.combiner_count_words,
reducer=self.reducer_count_words),
MRStep(reducer=self.reducer_find_max_word)
]
def mapper_get_words(self, _, line):
# yield each word in the line
for word in WORD_RE.findall(line):
word = word.lower()
yield (word, 1)
def combiner_count_words(self, word, counts):
# optimization: sum the words we've seen so far
yield (word, sum(counts))
def reducer_count_words(self, word, counts):
# send all (num_occurrences, word) pairs to the same reducer.
# num_occurrences is so we can easily use Python's max() function.
yield None, (sum(counts), word)
# discard the key; it is just None
def reducer_find_max_word(self, _, word_count_pairs):
# each item of word_count_pairs is (count, word),
# so yielding one results in key=counts, value=word
words = []
for word in word_count_pairs:
words.append(word)
for i in range(10):
temp_max = max(words)
yield temp_max
words.remove(temp_max)
if __name__ == '__main__':
MRMostUsedWord.run()
|
[
"astan54321@gmail.com"
] |
astan54321@gmail.com
|
aebbafdcebef5f7edbb5985af8c57816dee40ee3
|
a8e132c3cb716021064ad30b3d5a61a093d8ae6d
|
/Tugas 1/Find GCD of a Number/Find GCD of a Number.py
|
c0eecb79b70470313db3e3ae0e98674f3fa75f5c
|
[] |
no_license
|
Ngurah30/Kriptoanalisis
|
2ad971eaec14c9967c27a4ec8270bc0cc4bd0dcd
|
e3a0e26f4005b060b9f8833525151ad4616ccaa4
|
refs/heads/main
| 2023-03-30T12:41:59.854953
| 2021-03-22T11:31:38
| 2021-03-22T11:31:38
| 343,434,756
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
# Menggunakan fungsi rekursif
def gcd(x, y):
if (y == 0):
return x
else:
return gcd(y, x % y)
x =int (input ("Masukkan bilangan pertama : ")) # Menginputkan bilangan pertama
y =int (input ("Masukkan bilangan kedua : ")) # Menginputkan bilangan kedua
bil = gcd(x, y) #Memanggil gcd untuk mencari hasil
print("Faktor persekutuan terbesarnya adalah ", bil)
|
[
"noreply@github.com"
] |
noreply@github.com
|
b9d3d581f68fcf8fdc2b060485db026dfba0d13f
|
7cb7553a04ce0eace07f331a493b96f2bdb2fd26
|
/week13_H.W/IDS_20200529-8.py
|
fbc674aded1485154d1c326dc00a9f40c3df7241
|
[] |
no_license
|
hjkyuit1226/Introduction-to-DS
|
2308f0780e2c6944f98ba3c0a559bca96b23bfbf
|
1cf26d1c7b080bb518f2f131c26091461a844fd3
|
refs/heads/master
| 2021-02-19T15:32:10.344138
| 2020-06-06T19:04:13
| 2020-06-06T19:04:13
| 245,314,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
def get_fahrenheit(x):
"""
Transform a Celsius degree into Farenheit scale
"""
f = x*9/5 + 32
return f
print(get_fahrenheit(25))
|
[
"noreply@github.com"
] |
noreply@github.com
|
8041a1fb64300f05fe3fc25480204f1c264af1d5
|
4c205e65142adcde0e55693e63aadc0f32b585a7
|
/Appliction_Generate_ETL/Fact/fact_commande_vente.py
|
2de5b3fc4f7fd4dfd663b5fbe28c2c9805ee8b0a
|
[] |
no_license
|
HamzaLebcir/Generic-DW-ERP-realization
|
3d42809a48c012b2cbc96860350a5aa61529ddb3
|
e13de06158fbbc55b8760485131951aa7006d5ca
|
refs/heads/main
| 2023-01-30T04:51:52.603268
| 2020-11-26T11:48:48
| 2020-11-26T11:48:48
| 316,214,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,315
|
py
|
from configparser import ConfigParser
config = ConfigParser()
class fact_commande_vente:
def __init__(self, file_config):
config.read(file_config, encoding='utf-8-sig')
self.ID = config.get('Fact_Commande_Vente','ID')
self.Key_Produit = config.get('Fact_Commande_Vente','Key_Produit')
self.Key_Client = config.get('Fact_Commande_Vente','Key_Client')
self.Key_DC = config.get('Fact_Commande_Vente','Key_DC')
self.ID_Magasin =config.get('Fact_Commande_Vente','ID_Magasin')
self.Description = config.get('Fact_Commande_Vente','Description')
self.Quantite = config.get('Fact_Commande_Vente','Quantite')
self.Prix_unitaire = config.get('Fact_Commande_Vente','Prix_unitaire')
self.Prix_HT = config.get('Fact_Commande_Vente','Prix_HT')
self.Remise = config.get('Fact_Commande_Vente','Remise')
self.Prix_Total = config.get('Fact_Commande_Vente','Prix_Total')
self.Date_commande =config.get('Fact_Commande_Vente','Date_commande')
self.Table = config.get('Fact_Commande_Vente','Table')
self.server = config.get('Linked_server','server')
def ETL(self):
if(self.ID==""):
return("\n")
else:
return(
"INSERT INTO Fact_Commande_Vente(ID,Key_Produit,Key_Client,Key_DC,ID_Magasin,Description,Quantite,Prix_unitaire,Prix_HT,Remise,Prix_total,Date_Commande) \n"
+"SELECT ID,Key_Produit,Key_Client,Key_DC,ID_Magasin,Description,Quantite,Prix_unitaire,Prix_HT,Remise,Prix_total,Date_Commande \n"
+"FROM OPENQUERY ("+self.server+",\n'select "
+self.ID +" as ID, "
+self.Key_Produit+ " as Key_Produit, "
+self.Key_Client+ " as Key_Client, "
+self.Key_DC+ " as Key_DC, "
+self.ID_Magasin+ " as ID_Magasin, "
+self.Description+ " as Description, "
+self.Quantite+ " as Quantite, "
+self.Prix_unitaire+ " as Prix_unitaire, "
+self.Prix_HT+ " as Prix_HT, "
+self.Remise+ " as Remise, "
+self.Prix_Total+ " as Prix_Total,"
+self.Date_commande+ " as Date_Commande \n"
+"FROM " +self.Table+ "'); \n"
)
|
[
"lebcirhamza7@gmail.com"
] |
lebcirhamza7@gmail.com
|
816a6e3ddb957c0b742e5e7ca3543a7b6de38cf7
|
fff4db9bd3408d881168a4838bd3d342b6415583
|
/codesort/tests/test_data/sorted_1.py
|
27b8982e7e8ff7740a54d846e70e116697b5bb1d
|
[
"MIT"
] |
permissive
|
dougthor42/CodeSort
|
853da348ddaa7461b6374fce7d4c62d66e437a12
|
4e4b5b862b903a258433cfd399db124a5abfa67e
|
refs/heads/master
| 2021-01-01T20:01:05.040710
| 2014-10-09T00:32:13
| 2014-10-09T00:32:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,472
|
py
|
# -*- coding: utf-8 -*-
"""
Docstring!
"""
from __future__ import print_function, division
def ClassA(object):
""" ClassA, for sorting! """
def __init__(self):
""" Nothing needed here! """
pass
def _private_a(self):
""" And things! """
pass
def _private_b(self):
""" Stuff! """
pass
def public_a(self):
""" I'm tired... """
pass
def public_b(self):
""" That's all for now """
pass
def public_c(self):
""" More stuff! """
pass
class ClassB(object):
""" ClassA, for sorting! """
def __init__(self):
""" Nothing needed here! """
pass
def _private_a(self):
""" And things! """
pass
def _private_b(self):
""" Stuff! """
pass
def public_a(self):
""" I'm tired... """
pass
def public_b(self):
""" That's all for now """
pass
def public_c(self):
""" More stuff! """
pass
def module_func_a(a):
""" I hope this works """
pass
def module_func_b(a, b):
""" Please don't hate me """
pass
def module_func_c(c):
""" But in the original file, it's not! """
pass
if __name__ == "__main__":
""" This should be last """
pass
# A comment that starts the orphaned module code
x = 5
y = 27
print(x+y)
|
[
"dougthor42@users.noreply.github.com"
] |
dougthor42@users.noreply.github.com
|
f08c6bfd9d1ae8773db3560b2864739bc460de2e
|
4716314a21040600df9c186eefa9182ad6aab9d7
|
/civil_war/settings.py
|
b81ccbfb701633189dc2ec406661c89e9c140834
|
[] |
no_license
|
muilee/politician_analytics
|
164a7127ca5fe74a399cf83abc8b25d2e64bff24
|
54b2761f880e5f441f299909266ebbfb4e4a5a29
|
refs/heads/master
| 2021-05-12T13:43:24.811319
| 2018-01-31T12:21:14
| 2018-01-31T12:21:14
| 116,940,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,121
|
py
|
"""
Django settings for civil_war project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&=a0pel91(_ih8hjypmo^@t^(htn3&^i1c%ka3)@7&$2nm@ep$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'politician',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'civil_war.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'civil_war.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Taipei'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"matt@localhost.localdomain"
] |
matt@localhost.localdomain
|
b94eb3cd9714f1550d11a2faa1808f08db720be0
|
bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d
|
/lib/surface/storage/delete.py
|
b0dd92d45fc1d77f4de21763de0131975f546827
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
05fbb473d629195f25887fc5bfaa712f2cbc0a24
|
392abf004b16203030e6efd2f0af24db7c8d669e
|
refs/heads/master
| 2023-08-31T05:40:41.317697
| 2023-08-23T18:23:16
| 2023-08-23T18:23:16
| 335,182,594
| 9
| 2
|
NOASSERTION
| 2022-10-29T20:49:13
| 2021-02-02T05:47:30
|
Python
|
UTF-8
|
Python
| false
| false
| 5,897
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2013 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to list Cloud Storage objects."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.storage import storage_api
from googlecloudsdk.api_lib.storage import storage_util
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.storage import expansion
from googlecloudsdk.command_lib.storage import flags
from googlecloudsdk.command_lib.storage import storage_parallel
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
@base.Hidden
@base.Deprecate(is_removed=False, warning='This command is deprecated. '
'Use `gcloud alpha storage rm` instead.')
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Delete(base.Command):
"""Delete Cloud Storage objects and buckets."""
detailed_help = {
'DESCRIPTION': """\
*{command}* lets you delete Cloud Storage objects and buckets. You can
specify one or more paths (including wildcards) and all matching objects
and buckets will be deleted.
""",
'EXAMPLES': """\
To delete an object, run:
$ *{command}* gs://mybucket/a.txt
To delete all objects in a directory, run:
$ *{command}* gs://mybucket/remote-dir/*
The above command will delete all objects under remote-dir/ but not its sub-directories.
To delete a directory and all its objects and subdirectories, run:
$ *{command}* --recursive gs://mybucket/remote-dir
$ *{command}* gs://mybucket/remote-dir/**
To delete all objects and subdirectories of a directory, without deleting the directory
itself, run:
$ *{command}* --recursive gs://mybucket/remote-dir/*
or
$ *{command}* gs://mybucket/remote-dir/**
To delete all objects and directories in a bucket without deleting the bucket itself, run:
$ *{command}* gs://mybucket/**
To delete all text files in a bucket or a directory, run:
$ *{command}* gs://mybucket/*.txt
$ *{command}* gs://mybucket/remote-dir/*.txt
To go beyond directory boundary and delete all text files in a bucket or a directory, run:
$ *{command}* gs://mybucket/**/*.txt
$ *{command}* gs://mybucket/remote-dir/**/*.txt
To delete a bucket, run:
$ *{command}* gs://mybucket
You can use wildcards in bucket names. To delete all buckets with prefix of `my`, run:
$ *{command}* --recursive gs://my*
""",
}
@staticmethod
def Args(parser):
parser.add_argument(
'path',
nargs='+',
help='The path of objects and directories to delete. The path must '
'begin with gs:// and may or may not contain wildcard characters.')
parser.add_argument(
'--recursive',
action='store_true',
help='Recursively delete the contents of any directories that match '
'the path expression.')
parser.add_argument(
'--num-threads',
type=int,
hidden=True,
default=16,
help='The number of threads to use for the delete.')
flags.add_additional_headers_flag(parser)
def Run(self, args):
paths = args.path or ['gs://']
expander = expansion.GCSPathExpander()
objects, dirs = expander.ExpandPaths(paths)
if dirs and not args.recursive:
raise exceptions.RequiredArgumentException(
'--recursive',
'Source path matches directories but --recursive was not specified.')
buckets = []
dir_paths = []
for d in dirs:
obj_ref = storage_util.ObjectReference.FromUrl(d, allow_empty_object=True)
if not obj_ref.name:
buckets.append(obj_ref.bucket_ref)
dir_paths.append(d + '**')
sub_objects, _ = expander.ExpandPaths(dir_paths)
objects.update(sub_objects)
tasks = []
for o in sorted(objects):
tasks.append(storage_parallel.ObjectDeleteTask(
storage_util.ObjectReference.FromUrl(o)))
if buckets:
# Extra warnings and confirmation if any buckets will be deleted.
log.warning('Deleting a bucket is irreversible and makes that bucket '
'name available for others to claim.')
message = 'This command will delete the following buckets:\n '
message += '\n '.join([b.bucket for b in buckets])
console_io.PromptContinue(
message=message, throw_if_unattended=True, cancel_on_no=True)
# TODO(b/120033753): Handle long lists of items.
message = 'You are about to delete the following:'
message += ''.join(['\n ' + b.ToUrl() for b in buckets])
message += ''.join(['\n ' + t.obj_ref.ToUrl() for t in tasks])
console_io.PromptContinue(
message=message, throw_if_unattended=True, cancel_on_no=True)
storage_parallel.ExecuteTasks(tasks, num_threads=args.num_threads,
progress_bar_label='Deleting Files')
log.status.write(
'Deleted [{}] file{}.\n'.format(
len(tasks), 's' if len(tasks) > 1 else ''))
storage_client = storage_api.StorageClient()
for b in buckets:
storage_client.DeleteBucket(b)
log.DeletedResource(b.ToUrl(), kind='bucket')
|
[
"cloudsdk.mirror@gmail.com"
] |
cloudsdk.mirror@gmail.com
|
2cd11da1a7669bc9c9ef03e9c656328abf9a4495
|
f32f45a84f296392fa5433402bf126885fb2df23
|
/learningTemplates/basic_app/urls.py
|
ae5ad4e2a7806bef5b64fb1740555b78e54b3277
|
[] |
no_license
|
samjonescode/Python-Anywhere-First-Deployment
|
8878f2b4d3c25a4b6f05795cd712c1def58f03ed
|
8608956168363442988acc3468886131787db4d6
|
refs/heads/master
| 2021-10-22T23:32:56.691641
| 2019-03-13T17:08:50
| 2019-03-13T17:08:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
from django.urls import path
from basic_app import views
# template tagging
app_name = 'basic_app'
urlpatterns = [
path('index/',views.index,name='index'),
path('relative/',views.relative,name='relative'),
path('other/',views.other, name='other'),
]
|
[
"sjonesmusician@gmail.com"
] |
sjonesmusician@gmail.com
|
7ce22c0d3c1840a8b15bf306b73f4eaade25b3f0
|
c80ae9c1decd51252f8d623ded02b1003eb369c2
|
/web/movies/migrations/0003_auto_20170827_1727.py
|
27edc80f52dabe8be01eff1bc0d86b5e77aa52e9
|
[] |
no_license
|
wudizhangzhi/demo
|
5fa605709fb6f3d90b530d65149298d98b45c7fd
|
679933b7fdacbf7942c1cceb4f69da8b9e7d4bdd
|
refs/heads/master
| 2021-01-20T12:42:40.050376
| 2017-09-30T02:15:11
| 2017-09-30T02:15:11
| 90,399,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-27 09:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movies', '0002_auto_20170826_1532'),
]
operations = [
migrations.AddField(
model_name='movies',
name='category',
field=models.IntegerField(choices=[(0, b'\xe7\x94\xb5\xe5\xbd\xb1'), (1, b'\xe7\x94\xb5\xe8\xa7\x86\xe5\x89\xa7'), (2, b'\xe7\xbb\xbc\xe8\x89\xba')], default=0),
),
migrations.AlterField(
model_name='movies',
name='film_type',
field=models.IntegerField(choices=[(0, b'\xe6\xad\xa3\xe7\x89\x87'), (1, b'\xe9\xa2\x84\xe5\x91\x8a')], default=0),
),
]
|
[
"zhangzhichao@promote.cache-dns.local"
] |
zhangzhichao@promote.cache-dns.local
|
6faafc33a8fa0bcb600ee25a8837c929a209e065
|
9054a65b931267d15d74ad992f947d348e0a039f
|
/hw0/code/python_tutorial.py
|
2f781d96bfa2f67f823357da87ca917398da18dd
|
[] |
no_license
|
saunair/Autonomy
|
e0ba30b3e6301477efabc125880fcd7ee47bc0de
|
01621595250daeb858ddd096dfa67b8e1d4fe0c5
|
refs/heads/master
| 2021-06-13T07:09:04.409729
| 2017-02-24T02:09:47
| 2017-02-24T02:09:47
| 81,028,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,670
|
py
|
#!/usr/bin/env python
import numpy as np
import operator
def print_list(l):
print l
def sort_manual(shops):
shops_sorted = []
values = [ v for v in shops.values() ]
#TODO: Here implement manual sorting using loops
for i in range(0, len(values)):
for j in range(0, len(values)-i-1):
if values[j] < values[j+1]:
values[j], values[j+1] = values[j+1], values[j]
#print values
shops_sorted = len(values)*[0]
i = 0
for kk in shops.keys():
shops_sorted[values.index(shops[kk])] = [kk,shops[kk]]
print 'Manual sorting result:'
print_list(shops_sorted)
def sort_python(shops):
shops_sorted = sorted(shops.items(), key=operator.itemgetter(1), reverse=True)
#print shops_sorted
#TODO: Here implement sorting using pythons build in sorting functions
#shops_sorted = [ [k,v] for k, v in shops.items() ]
print 'Python sorting result: '
print_list(shops_sorted)
def sort_numpy(shops):
shops_sorted = []
# TODO: Here implement sorting using numpys build-in sorting function
x = np.array(shops)
y = np.array(shops.items())
print 'Numpy sorting result: '
y = y[y[:,1].argsort()[::-1]]
shops_sorted = y.tolist()
print_list(shops_sorted)
def main():
shops = {}
shops['21st Street'] = 0.9
shops['Voluto'] = 0.6
shops['Coffee Tree'] = 0.45
shops['Tazza D\' Oro'] = 0.75
shops['Espresso a Mano'] = 0.95
shops['Crazy Mocha'] = 0.35
shops['Commonplace'] = 0.5
sort_manual(shops)
sort_python(shops)
sort_numpy(shops)
if __name__ == "__main__":
main()
|
[
"snnair@andrew.cmu.edu"
] |
snnair@andrew.cmu.edu
|
939c05361fb679aea62692cdc376799186c3289d
|
2bbb75129fa4e1d28b7fcfe58f585bcbfdc33dde
|
/lib/models/__init__.py
|
98f6d9847c0667734e0d47537fc46076fd0f5152
|
[] |
no_license
|
Gyyz/Targeted-Sentiment-analysis-with-memory-network-attention
|
1165ba850fd1a61a1ddbfd0d8a1ec4fa408ecf92
|
2208188eb6bd150e739acfd6b16ec810eac15e43
|
refs/heads/master
| 2021-06-22T10:09:52.490474
| 2017-08-10T08:27:58
| 2017-08-10T08:27:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 53
|
py
|
from nn import NN
import rnn
from sentiment import *
|
[
"yuz@fff"
] |
yuz@fff
|
c19e0105d28469432092df67fa6a94f2cb27922c
|
4a6f3bc478dcd0c32f376bd0369940526a0242c9
|
/main.py
|
71941dc9451847800a7fe93dae92f57aa8973e66
|
[
"Apache-2.0"
] |
permissive
|
1999foxes/python-gobang-bilibililive
|
179495d2f7c8094a490dcb90a386a42cffafe61b
|
efc1f631a8c9686177b92a3e54c183ee227a45d2
|
refs/heads/master
| 2023-02-24T14:46:29.337799
| 2021-01-31T18:00:25
| 2021-01-31T18:00:25
| 334,715,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,352
|
py
|
# _*_ encoding:utf-8 _*_
import os
def installlibs():
libs = {"numpy","requests","pygame"}
try:
for lib in libs:
os.system("pip3 install "+lib)
print("Successful")
except Exception as err:
print("Failed:", Exception)
try:
import pygame
from pygame.locals import *
import sys
import math
import storn
from socket import *
import select
import chessboard
import time
import danmuji
except:
installlibs()
roomID = '646'
pygame.init()
bg_size = 615, 615
WHITE = (255, 255, 255)
font1 = pygame.font.Font('font/12345.TTF', 35)
win_text = font1.render(u"黑棋胜利", True, WHITE)
win_text_rect = win_text.get_rect()
win_text_rect.left, win_text_rect.top = (bg_size[0] - win_text_rect.width) // 2, \
(bg_size[1] - win_text_rect.height) // 2
lose_text = font1.render(u"白棋胜利", True, WHITE)
lose_text_rect = lose_text.get_rect()
lose_text_rect.left, lose_text_rect.top = (bg_size[0] - lose_text_rect.width) // 2, \
(bg_size[1] - lose_text_rect.height) // 2
class StateMachine():
def __init__(self):
# state constant
self.BLACKTURN = 'BLACKTURN'
self.WHITETURN = 'WHITETURN'
self.BLACKWIN = 'BLACKWIN'
self.WHITEWIN = 'WHITEWIN'
self.GAMEOVER = 'GAMEOVER'
# current state
self.state = self.GAMEOVER
# Players ('all' or 'any' or some nickname, or 'ai' for white)
self.black = 'all'
self.white = 'mouse'
# deadlines
self.deadline = 0
self.allDeadline = 0
self.promptCountdown = 0
# new chess data, [[pos1, num1], [pos2, num2], ...]
self.data = []
# chessboard
self.board = chessboard.Chessboard()
# render screen
self.screen = pygame.display.set_mode(bg_size)
# danmuji
self.dmj = danmuji.Gift(roomID)
self.dmj.run()
print('hello')
def newGame(self):
self.state = self.BLACKTURN
self.setDeadline()
self.board.clear()
def setDeadline(self):
self.deadline = time.time() + 120
self.allDeadline = time.time() + 60
def player(self):
if self.state == self.WHITETURN:
return self.white
elif self.state == self.BLACKTURN:
return self.black
def nextTurn(self):
# add chess
print('data =', self.data)
print('add Chess to', self.data[0][0])
self.board.addChess(self.data[0][0])
# check who wins
if self.board.isWin():
if self.state == self.WHITETURN:
self.state = self.WHITEWIN
else:
self.state = self.BLACKWIN
# init for next turn
if self.state == self.WHITETURN:
self.state = self.BLACKTURN
elif self.state == self.BLACKTURN:
self.state = self.WHITETURN
self.setDeadline()
self.data = []
def addData(self, pos):
for i in self.data:
if i[0] == pos:
i[1] += 1
return
self.data.append([pos, 1])
def getData(self):
# get data from danmuji
if self.player() == 'ai':
self.data.append([self.board.ai(), 1])
elif self.player() == 'mouse':
return
else:
self.dmj.lock.acquire()
for danmu in self.dmj.danmuList:
if (self.player() == 'all' or self.player() == 'any' or self.player() == danmu[0]) and self.board.is_valid(danmu[1]):
self.addData(danmu[1])
self.dmj.danmuList = []
self.dmj.lock.release()
self.data.sort(key=lambda a:a[1], reverse = True)
def update(self):
if self.state == self.GAMEOVER or self.state == self.WHITEWIN or self.state == self.BLACKWIN:
if self.promptCountdown == 0:
self.promptCountdown = time.time() + 10
elif time.time() > self.promptCountdown:
self.promptCountdown = 0
self.newGame()
else:
self.getData()
if len(self.data) == 0:
if time.time() > self.deadline or (self.player() == 'all' and time.time() > self.allDeadline):
if self.state == self.BLACKTURN:
self.state = self.WHITEWIN
else:
self.state = self.BLACKWIN
elif self.player() != 'all' or time.time() > self.allDeadline:
self.nextTurn()
def renderScreen(self):
# 绘制棋盘
screen.blit(bg_image, (0, 0))
for i in self.board.black_chesses:
screen.blit(i.image, i.image_rect())
for i in self.board.white_chesses:
screen.blit(i.image, i.image_rect())
# draw gameover prompt
if self.state == self.BLACKWIN:
screen.blit(win_text, win_text_rect)
elif self.state == self.WHITEWIN:
screen.blit(lose_text, lose_text_rect)
# draw countdown
if self.player() == 'all':
text_countdown = font1.render('倒计时:'+str(int(self.allDeadline - time.time())), True, WHITE)
else:
text_countdown = font1.render('倒计时:'+str(int(self.deadline - time.time())), True, WHITE)
text_countdown_rect = text_countdown.get_rect()
text_countdown_rect.left, text_countdown_rect.top = (20, 0)
screen.blit(text_countdown, text_countdown_rect)
# draw player 'all' statistic
if self.player() == 'all':
for danmu in self.data:
tmp = font1.render(str(danmu[1]), True, WHITE)
tmp_rect = tmp.get_rect()
tmp_rect.left, tmp_rect.top = self.board.getPixel(danmu[0])
tmp_rect.left -= tmp_rect.width/2
tmp_rect.top -= 20
screen.blit(tmp, tmp_rect)
pygame.display.flip()
clock = pygame.time.Clock()
screen = pygame.display.set_mode(bg_size)
pygame.display.set_caption('五子棋')
bg_image = pygame.image.load('image/bg.png').convert_alpha() # 背景图片
def main():
state_machine = StateMachine()
state_machine.newGame()
running = True
while running:
state_machine.renderScreen()
state_machine.update()
for event in pygame.event.get():
if event.type == QUIT:
client_socket.close()
server_socket.close()
pygame.quit()
sys.exit()
if event.type == MOUSEBUTTONDOWN:
if event.button == 1:
print('click', event.pos)
if state_machine.player() == 'mouse' and state_machine.board.is_valid(state_machine.board.getPos(event.pos)):
state_machine.data = []
state_machine.data.append([state_machine.board.getPos(event.pos), 1])
state_machine.nextTurn()
clock.tick(60)
if __name__ == '__main__':
main()
|
[
"hl1999@yeah.net"
] |
hl1999@yeah.net
|
0e162d2c63ae0f43fffc22bc0537ef43c0b7ff22
|
baa4b4e6a4aae7df3236cc97f600262f969fe827
|
/project/server.py
|
39d0f9cd9df5c6768b9fbdd29f1e2e3bac6fc873
|
[] |
no_license
|
elihan27/CS-131
|
97517d86d0d92033ab207185d2a64cdbe55cfa50
|
088d05a138c9f7383faa048a500748f43e6c789b
|
refs/heads/master
| 2020-04-21T23:13:11.225262
| 2019-02-10T03:37:02
| 2019-02-10T03:37:02
| 169,938,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,303
|
py
|
import asyncio
import string
import aiohttp
import sys
import time
import logging
#As of 9:30 AM, 06/08/2018:
#has implemented "WHATSAT", "IAMAT", and "AT", dictionary, servers talking to each other
#need to implement: logger, buffer
server_info={}
#buffer=[]
portnumbers= {
'Goloman': 12717,
'Hands': 12718,
'Holiday': 12719,
'Welsh': 12720,
'Wilkes': 12721
}
logs={
'Goloman': 'Goloman.log',
'Hands': 'Hands.log',
'Holiday': 'Holiday.log',
'Welsh': 'Welsh.log',
'Wilkes': 'Wilkes.log'
}
comm_network = {'Goloman': ['Hands','Holiday', 'Wilkes'],
'Hands': ['Goloman', 'Wilkes'],
'Holiday': ['Goloman','Welsh', 'Wilkes'],
'Welsh':['Holiday'],
'Wilkes':['Goloman', 'Hands', 'Holiday']
}
def error(arg):
print("? ", arg)
sys.exit(0)
def messenger(client_id):
message = 'AT ' + server_info[client_id]['last_serv'] + " " + server_info[client_id]['time_diff'] + " " + client_id + " " + server_info[client_id]['location'] + " " + server_info[client_id]['time_stamp'] + '\r\n'
logging.basicConfig(filename=logs[server_info[client_id]['last_serv']],level=logging.INFO)
logging.info(message)
return message
def update_server(client_id, server, location, time_stamp):
server_info[client_id]={}
server_info[client_id]['last_serv']= server
server_info[client_id]['location']= location
server_info[client_id]['time_stamp']=time_stamp
value = time.time()-float(time_stamp)
sign=""
if (value>0):
sign="+"
server_info[client_id]['time_diff']=sign+str(value)
async def send_data(loop, serv_id, message):
for server in comm_network[serv_id]:
try:
coro = await (loop).create_connection(lambda:ClientProtocol(message,loop), '0.0.0.0', portnumbers[server])
logging.basicConfig(filename=logs[serv_id],level=logging.INFO)
logging.info("Opened connection to " +server)
logging.info("Sent: "+ message)
logging.info("Closed connection to " +server)
except ConnectionRefusedError:
logging.basicConfig(filename=logs[serv_id],level=logging.INFO)
logging.info("Could not connect to " +server)
async def fetch(session, url):
async with session.get(url) as response:
return await response.text()
async def called(location, radius, bound, proto):
async with aiohttp.ClientSession() as session:
size =len(location)
i=0
while (i!=size):
if ((location[i]=='+' or location[i]=='-') and i!=0):
break
i=i+1
true_location = location[:i]+ "," + location[i+1:]
info = await fetch(session, "https://maps.googleapis.com/maps/api/place/nearbysearch/json?location="+ true_location + "&radius=" + radius + "&key=AIzaSyD8h2eN8gP1G5wWc8FKc55sd93aEA8DrJI")
i=0
j=0
lines = info.split("\n")
length=len(lines)
while((i+1)!=length):
if ((lines[i]== " },") and (lines[i+1]==" {")):
j=j+1
# print(j)
if (j==bound):
break;
i=i+1
final = ""
if (j==bound):
final="\n".join(lines[:i])+"\n }\n"
else:
final = info
print(final)
proto.transport.write(final.encode())
class ClientProtocol(asyncio.Protocol):
def __init__(self, message, loop):
self.message = message
self.loop = loop
def connection_made(self, transport):
transport.write(self.message.encode())
print('Connection made: {!r}'.format(self.message))
class ServerProtocol(asyncio.Protocol):
def __init__(self, serv_id, loop):
self.serv_id=serv_id
self.loop = loop
self.portnumber= portnumbers[serv_id]
self.network = comm_network[serv_id]
def connection_made(self, transport):
self.transport = transport
self.peername = transport.get_extra_info('peername')
print('Connection from {}'.format(self.peername))
logging.basicConfig(filename=logs[self.serv_id],level=logging.INFO)
logging.info('Connection from {}'.format(self.peername))
def data_received(self, data):
message = data.decode()
print('Data received: {!r}'.format(message))
logging.basicConfig(filename=logs[self.serv_id],level=logging.INFO)
logging.info('Data received: {!r}'.format(message))
size = len(message)
words=[]
word=""
i=0
while (i!=size):
if (message[i] in string.whitespace):
if (word!= ""):
words.append(word)
word=""
i=i+1
else:
word+=message[i]
i=i+1
if (words[0]=="IAMAT"):
if (len(words)!=4): #arg lengh error
error(message)
if (len(words[2])==1):
error(message) #invalid location error
i =(words[2])[1:].find("-")
j=(words[2])[1:].find("+")
if ((i==-1) and (j==-1)):
error(message)
if (i==-1):
i==j
try:
x=float(words[2][:i+1])
y=float(words[2][i+1:])
z=float(words[3])
except ValueError:
error(message)
return_message=""
client_id=words[1]
if (server_info.get(client_id, -1)!=-1): #if it does exist
if (float(words[3])<= float(server_info[client_id]['time_stamp'])):
return_message=messenger(client_id)
else:
update_server(client_id, self.serv_id, words[2], words[3])
return_message=messenger(client_id)
asyncio.ensure_future(send_data(self.loop, self.serv_id, return_message))
else:
update_server(client_id, self.serv_id, words[2], words[3])
return_message=messenger(client_id)
asyncio.ensure_future(send_data(self.loop, self.serv_id, return_message))
self.transport.write(return_message.encode())
elif(words[0]=="WHATSAT"):
if (len(words)!=4): #arg lengh error
error(message)
try:
temp1=float(words[2])
temp2=int(words[3])
if (not((temp1<=50) and (temp1>=0) and (temp2>=0) and (temp2<=20))):
error(message)
except ValueError:
error(message)
client_id=words[1]
return_message=messenger(client_id)
radius=words[2]
bound =int(words[3])
print('Send: {!r}'.format(return_message))
self.transport.write(return_message.encode())
asyncio.ensure_future(called(server_info[client_id]['location'],radius,bound, self))
elif(words[0]=="AT"):
if (len(words)!=6): #arg lengh error
error(message)
client_id=words[3]
if (server_info.get(client_id, -1)!=-1):
try:
if(float(words[5])<= float(server_info[client_id]['time_stamp'])):
return_message=messenger(client_id)
print('Send: {!r}'.format(return_message))
self.transport.write(return_message.encode())
except ValueError:
error(message)
else:
update_server(client_id, words[1], words[4], words[5])
server_info[client_id]['time_diff']=words[2]
return_message=messenger(client_id)
print('Send: {!r}'.format(return_message))
asyncio.ensure_future(send_data(self.loop, self.serv_id, return_message))
else:
error(message)
def connection_lost(self, exc):
print('Lost connection of {}'.format(self.peername))
self.transport.close()
def main():
if (len(sys.argv) <2):
error(sys.argv)
serv_id = sys.argv[1]
loop = asyncio.get_event_loop()
if (portnumbers.get(serv_id, -1)!=-1):
coro = loop.create_server(lambda:ServerProtocol(serv_id, loop), '0.0.0.0', portnumbers[serv_id])
else:
error(sys.argv)
server = loop.run_until_complete(coro)
loop = asyncio.get_event_loop()
# Each client connection will create a new protocol instance
# The IP address or hostname can be used.
# 127.0.0.1 is intended for the 'localhost' loopback devices.
# If you have multiple NIC(Network Interface Card)s, you may specify the specific IP address to be used (listen).
# 0.0.0.0 is to use any available NIC device.
# Serve requests until Ctrl+C is pressed
print('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
if __name__=="__main__":
main()
|
[
"ehan2016@gmail.com"
] |
ehan2016@gmail.com
|
de555bff1a32f8e3a9d93047cae238e8a3aa29ce
|
b0a9e9df50749086a7db7e135b2582d87cb8dbc2
|
/energy_estimation/nd/fhc/svm/hadron/sample_weighted_estimator_2d.py
|
aac40196ce6aae3c0ebd3a02e163dc5b2568c47b
|
[] |
no_license
|
kaikai581/sklearn-nova
|
6ff204bfb4351395c8029b80fedfbb6b8702f06d
|
e16644c0651123fe3ebdf42a9fb3dc50fbd2c2de
|
refs/heads/master
| 2020-03-08T17:26:46.222451
| 2020-01-07T01:12:36
| 2020-01-07T01:12:36
| 128,268,443
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,781
|
py
|
#!/usr/bin/env python
"""
This script weights each point with inverse square root of its true hadronic
energy when fitting. This is equivalent to modifying the SVM loss function.
Two predictors are used, namely calibrated hadronic energy, and one swappable
varisble.
"""
from __future__ import print_function
print(__doc__)
from array import array
from matplotlib.colors import LogNorm
from ROOT import *
from root_numpy import root2array
from scipy.spatial.distance import pdist
from scipy.stats import tstd, skew, kurtosis
from sklearn import preprocessing
from sklearn.externals import joblib
from sklearn.svm import SVR
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
def fit_with_subdata(var, cpar, gpar, scaledown, offset):
# retrieve training data and official reco hadronic energy for comparison
cut = 'mustopz<1275&&isnumucc==1'
X = root2array('../training_data.root',
branches=['calehad', var],
selection=cut,
step=scaledown, start=offset)
X = X.view(np.float32).reshape(X.shape + (-1,))
recoemu_official = root2array('../training_data.root', branches='recoemu',
selection=cut,
step=scaledown, start=offset)
trueenu = root2array('../training_data.root', branches='trueenu',
selection=cut,
step=scaledown, start=offset)
y = trueenu - recoemu_official
yoff = root2array('../training_data.root', branches='recoehad',
selection=cut,
step=scaledown, start=offset)
# rescale the regressors
scaler = preprocessing.StandardScaler().fit(X)
# calculate the mean pairwise squared distance between regressors
Xstd = scaler.transform(X)
if gpar == 'auto':
mean_squared_dist = np.mean(pdist(Xstd, 'sqeuclidean'))
gpar = '{0:.3g}'.format(1./mean_squared_dist)
# save the scaler
os.system('mkdir -p models/2d')
joblib.dump(scaler, 'models/2d/sample_weighted_hadronic_scaler_{}_c{}g{}step{}offset{}.pkl'.format(var, cpar, gpar, scaledown, offset))
# make an array for sample weights
swei = np.copy(y)
#~ swei[y != 0] = 1./np.sqrt(np.abs(swei[y != 0]))
swei[y != 0] = 1./np.abs(swei[y != 0])
swei[y == 0.] = 1.
# train svm with standardized regressors
svr = SVR(kernel='rbf', C=float(cpar), gamma=float(gpar), verbose=True)
y_pred = svr.fit(Xstd, y, swei).predict(Xstd)
# save the model
joblib.dump(svr, 'models/2d/sample_weighted_hadronic_energy_estimator_{}_c{}g{}step{}offset{}.pkl'.format(var, cpar, gpar, scaledown, offset))
# make plots
#~ plt.figure(1)
#~ xbin = np.linspace(-.05,2,80)
#~ ybin = np.linspace(-.2,5,80)
#~ plt.hist2d(X,y,[xbin,ybin], norm=LogNorm())
#~ plt.colorbar()
#~ plt.scatter(X, y_pred, s=2, c='red', alpha=0.5)
# save plots
#~ os.system('mkdir -p plots/2d')
#~ plt.savefig('plots/2d/sample_weighted_estimator_overlaid_on_data_{}_c{}g{}step{}offset{}.pdf'.format(var, cpar, gpar, scaledown, offset))
# estimate various reco values
yest = y_pred
rest = (yest-y)/y
roff = (yoff-y)/y
# save root file
os.system('mkdir -p output_root_files/2d')
toutf = TFile('output_root_files/2d/sample_weighted_resolution_{}_c{}g{}step{}offset{}.root'.format(var, cpar, gpar, scaledown, offset), 'recreate')
tr = TTree( 'tr', 'resolution tree' )
r1 = array( 'f', [ 0. ] )
r2 = array( 'f', [ 0. ] )
svmehad = array( 'f', [ 0. ] )
offehad = array( 'f', [ 0. ] )
trueehad = array( 'f', [ 0. ] )
tr.Branch( 'rest', r1, 'rest/F' )
tr.Branch( 'roff', r2, 'roff/F' )
tr.Branch('svmehad', svmehad, 'svmehad/F')
tr.Branch('offehad', offehad, 'offehad/F')
tr.Branch('trueehad', trueehad, 'trueehad/F')
for i in range(len(rest)):
r1[0] = rest[i]
r2[0] = roff[i]
svmehad[0] = yest[i]
offehad[0] = yoff[i]
trueehad[0] = y[i]
tr.Fill()
tr.Write()
toutf.Close()
if __name__ == '__main__':
# list of second variable
varlist = ['cvnpi0', 'cvnchargedpion', 'cvnneutron', 'cvnproton', 'npng']
# parse command line arguments
parser = argparse.ArgumentParser(description='Hadronic energy SVM with sample weights.')
parser.add_argument('-c','--cpar',type=str,default='100')
parser.add_argument('-g','--gpar',type=str,default='auto')
parser.add_argument('-s','--step',type=int,default='500')
parser.add_argument('-o','--offset',type=int,default='0')
parser.add_argument('-v','--variable',type=int,default='0')
args = parser.parse_args()
# specified parameters
cpar = args.cpar
gpar = args.gpar
scaledown = args.step
offset = args.offset
var = varlist[args.variable]
# fit model with arguments
fit_with_subdata(var, cpar, gpar, scaledown, offset)
|
[
"shihkailin78@gmail.com"
] |
shihkailin78@gmail.com
|
9ff2745dddde91c4bb375c36b46f51ff6af9493f
|
fe19282d91746bd21d1daed624d6d5102a871d8d
|
/assign5/models.py
|
05082fd5e43de3bfec644355f7fc8ca81186f2b1
|
[] |
no_license
|
vivekpradhan/autograder
|
344410aa8f7fa8c93137ec56762b949be77c24ae
|
0cd8c24d0ba1c8f2af129c1c2a7bd7a7bbe0853d
|
refs/heads/master
| 2021-01-24T10:34:04.424425
| 2017-10-09T12:07:32
| 2017-10-09T12:07:32
| 69,992,405
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,500
|
py
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Submission(models.Model):
eid = models.CharField(max_length=10, blank=False)
document = models.FileField(upload_to='media/')
uploaded_at = models.DateTimeField(auto_now_add=True)
assignment_num = models.IntegerField(default = 0)
password = models.CharField(max_length=10, default='')
class SubResults(models.Model):
password = models.CharField(max_length=10, default='')
processed_at = models.DateTimeField(auto_now_add=True)
assignment_num = models.IntegerField(default = 0)
pomlocation = models.CharField(max_length=500, blank=False)
eid = models.CharField(max_length=10, blank=False)
mvnstarted = models.BooleanField(default=False)
mvnended = models.BooleanField(default=False)
hadoopstarted = models.BooleanField(default=False)
hadoopended = models.BooleanField(default=False)
foundoutput = models.BooleanField(default=False)
numberoflines = models.IntegerField(default=-999)
numberofkeymatches = models.IntegerField(default=-999)
events_sorted = models.BooleanField(default=False)
features_sorted = models.BooleanField(default=False)
completed = models.BooleanField(default=False)
errors = models.CharField(max_length=500, default='')
def convert_to_string(self):
output = "<div id='output'>"
output+= '<li> Autograder started at '+ str(self.processed_at) + ' </li>\n'
#output+= '<li> EID: '+str(self.eid) + ' </li>\n'
output+= '<li> mvn clean package started?: ' +str(self.mvnstarted)+'</li>\n'
output+= '<li> mvn clean package ended?: ' +str(self.mvnended)+'</li>\n'
output+= '<li> hadoop job started?: ' +str(self.hadoopstarted)+'</li>\n'
output+= '<li> hadoop job ended?: ' +str(self.hadoopended)+'</li>\n'
output+= '<li> found output file?: '+str(self.foundoutput)+ '</li>\n'
output+= '<li> Number of lines in output is '+str(self.numberoflines)+' of xx</li>\n'
output+= '<li> Number of matching keys in output is '+str(self.numberofkeymatches)+' of xx</li>\n'
output+= '<li> Events sorted? : '+str(self.events_sorted)+'</li>\n'
output+= '<li> Features sorted? : '+str(self.features_sorted)+'</li>\n'
output+= '<li> Autograder is finished? '+str(self.completed)+'</li>\n'
output+= '<li> ERRORS: '+str(self.errors)+'</li>\n'
output+= '</div>'
return output
|
[
"vivkripra@gmail.com"
] |
vivkripra@gmail.com
|
cc22681d605c52facf8d17b0ff1cd2612d797397
|
eb5ab5ce3763f5e5b80a38f77ee98b7b954d726e
|
/cciaa/portlet/calendar/tests/base.py
|
34f3751741116d33cc8d925131230f0af4066c0c
|
[] |
no_license
|
PloneGov-IT/cciaa.portlet.calendar
|
088e400c567066ce486f6dfb6eaa2f482abd0471
|
81aa9deb5e082520604946387a6a60dd229db21a
|
refs/heads/master
| 2021-01-21T19:28:56.055750
| 2013-11-20T16:48:08
| 2013-11-20T16:48:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,309
|
py
|
from Products.Five import zcml
from Products.Five import fiveconfigure
from Testing import ZopeTestCase as ztc
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import onsetup
@onsetup
def setup_product():
"""Set up additional products and ZCML required to test this product.
The @onsetup decorator causes the execution of this body to be deferred
until the setup of the Plone site testing layer.
"""
# Load the ZCML configuration for this package and its dependencies
fiveconfigure.debug_mode = True
import cciaa.portlet.calendar
zcml.load_config('configure.zcml', cciaa.portlet.calendar)
fiveconfigure.debug_mode = False
# We need to tell the testing framework that these products
# should be available. This can't happen until after we have loaded
# the ZCML.
ztc.installPackage('cciaa.portlet.calendar')
# The order here is important: We first call the deferred function and then
# let PloneTestCase install it during Plone site setup
setup_product()
ptc.setupPloneSite(products=['cciaa.portlet.calendar'])
class TestCase(ptc.PloneTestCase):
"""Base class used for test cases
"""
class FunctionalTestCase(ptc.FunctionalTestCase):
"""Test case class used for functional (doc-)tests
"""
|
[
"keul@db7f04ef-aaf3-0310-a811-c281ed44c4ad"
] |
keul@db7f04ef-aaf3-0310-a811-c281ed44c4ad
|
7397068550e96dd401ecb0de352f442531574858
|
78a4379f22f1a1f8b801a26c4bb5357bdce24cb6
|
/apps/pay/migrations/0001_initial.py
|
c0f63180f3030475c4991ea0eef92a94c718e380
|
[] |
no_license
|
18801166104/TravelWebSite
|
706890ecc901b7fea277dd5794d81c0e54424e98
|
131399ecc3df3344105677b9e6745fb2f23750e7
|
refs/heads/master
| 2020-04-30T15:12:34.745958
| 2019-03-21T09:38:16
| 2019-03-21T09:38:16
| 176,461,032
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,163
|
py
|
# Generated by Django 2.1.7 on 2019-02-22 13:27
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='OrderItems',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('good_name', models.CharField(default='', max_length=30, verbose_name='购买商品名称')),
('good_num', models.IntegerField(default=1, verbose_name='购买数量')),
('good_price', models.FloatField(default=0, verbose_name='单价')),
('good_image', models.ImageField(default='', upload_to='', verbose_name='商品主图')),
('good_id', models.IntegerField(default=0, verbose_name='商品id')),
('order_num', models.CharField(max_length=25, verbose_name='订单号')),
],
options={
'verbose_name': '用户购买商品信息',
'verbose_name_plural': '用户购买商品信息',
},
),
]
|
[
"75197440@qq.com"
] |
75197440@qq.com
|
8072e2425675f6565ceecb16805e4ef341e0456e
|
4cb37eaa0bdcf002c26aba4656df1287ce3cfe56
|
/main/settings.py
|
6abbf86caad648c174cc9b0af91a9fcf6aa0f5f6
|
[] |
no_license
|
jiuniuone/app-mall-backend
|
86ef3ec8cd78e019d90c8885173b68a6ecfdf6ca
|
07dcb5840bc0d5f2d98522c5eb3d33de74bdcb6f
|
refs/heads/master
| 2020-03-26T07:27:43.297704
| 2018-08-21T05:10:30
| 2018-08-21T05:10:30
| 144,655,502
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,210
|
py
|
import os
import socket
import sys
import time
from unipath import Path
APP_NAME = 'mall'
FUNCTION_NAME = "shopping mall"
DEBUG = socket.gethostname() not in ['public', 'stage']
# DEBUG = False
SHOW_SQL = 'runserver' in sys.argv
if DEBUG: SHOW_SQL = False
BASE_DIR = Path(__file__).ancestor(2)
SECRET_KEY = 'i%25adry^l0r87l+228213a^%67q015z7j9^uc96jm=n%%0e^l'
ALLOWED_HOSTS = ["*"]
ROOT_URLCONF = 'main.urls'
WSGI_APPLICATION = 'main.wsgi.application'
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
DATE_TIME_FORMAT = '%Y-%m-%d'
PAGINATE_BY = 10
START_TIME = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
AUTH_USER_MODEL = f'{APP_NAME}.User'
LOGIN_URL = f'/{APP_NAME}/user/login/'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
f'{APP_NAME}.middlewares.HttpsCheckMiddleware',
f'{APP_NAME}.middlewares.LogMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# 'DIRS': [BASE_DIR.child('templates')],
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'acmin.processors.extra_context'
],
},
},
]
def rotating_handler(name):
return {
'level': 'DEBUG',
'filters': ['f1'],
'formatter': 'simple',
'class': 'logging.handlers.TimedRotatingFileHandler',
'when': 'midnight',
'interval': 1,
'backupCount': 100,
'filename': f'/var/log/{APP_NAME}/{name}.log',
}
def file_handler(name):
return {
'level': 'DEBUG',
'filters': ['f1'],
'formatter': 'simple',
'class': 'logging.FileHandler',
'filename': f'/var/log/{APP_NAME}/{name}.log',
}
def console_handler():
return {'level': 'DEBUG', 'filters': ['f1'], 'formatter': 'simple', 'class': 'logging.StreamHandler', }
def get_log_setting(debug):
log_modules = [APP_NAME]
return {
'version': 1,
'disable_existing_loggers': True,
'filters': {'f1': {'()': 'django.utils.log.RequireDebug' + str(debug)}},
'formatters': {'simple': {'format': '%(levelname)s %(asctime)s %(message)s'}, },
'handlers': dict({key: file_handler(key) for key in log_modules}, **{'console': console_handler()}),
'loggers': {key: {'level': 'INFO', 'handlers': ['console', key]} for key in log_modules}
}
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'acmin',
APP_NAME
]
AUTH_PASSWORD_VALIDATORS = [
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', },
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', },
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', },
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', },
]
CACHALOT_UNCACHABLE_TABLES = (
'django_migrations',
)
from acmin.utils import get_ip, is_windows
MEDIA_ROOT = "e:/var/www/media/" if is_windows() else "/var/www/media/"
MEDIA_URL = f'http://{get_ip()}/media/'
STATIC_URL = '/static/'
STATICFILES_DIRS = [
BASE_DIR.child(APP_NAME, "static")
]
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
USE_SQLITE3 = True
if DEBUG:
name = 'test' if 'test' in sys.argv else 'app'
if USE_SQLITE3:
DATABASES = {'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR.child(f'{APP_NAME}.db'),
'TEST_NAME': BASE_DIR.child(f'{APP_NAME}-test.db'),
}}
else:
DATABASES = {'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': APP_NAME,
'USER': APP_NAME,
'PASSWORD': '123456',
'HOST': '127.0.0.1',
'PORT': '5432',
}}
else:
DATABASES = {'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': APP_NAME,
'USER': APP_NAME,
'PASSWORD': '123456',
'HOST': '127.0.0.1',
'PORT': '5432',
}}
LOGGING = get_log_setting(DEBUG)
|
[
"hikelee@gmail.com"
] |
hikelee@gmail.com
|
66bd585e840a22d0e4615b07176d861991dd6612
|
da617dbc147d7720490866b279068cdef87e00c9
|
/q7.py
|
510b9e8113cec4c9b681288211d223574fb7efc6
|
[] |
no_license
|
Shailaj97/Python-Assignment-1
|
444750a816bd593fdafb9eebdd9b6b5dd646915f
|
55e11b541fbc517da360afc41ea915a1a9af43ea
|
refs/heads/main
| 2023-07-17T06:10:59.775212
| 2021-09-01T04:32:43
| 2021-09-01T04:32:43
| 401,912,524
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
""" Write a Python program to combine two lists into a dictionary, where the elements of the first one serve as the keys and the elements of the second one serve as the values. The values of the first list need to be unique and hashable.
Sample Output:
Original lists:
['a', 'b', 'c', 'd', 'e', 'f']
[1, 2, 3, 4, 5]
Combine the values of the said two lists into a dictionary:
{'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}
"""
list1= ['a', 'b', 'c', 'd', 'e', 'f']
list2 = [1, 2, 3, 4, 5]
a=dict(zip(list1,list2))
print(str(a))
|
[
"noreply@github.com"
] |
noreply@github.com
|
dff8c7bdbed01f6050d2849d52ffd0a9b798193a
|
bcf09471588530a543b1eca8fd2938ee10c1a9c5
|
/api-service/api/service.py
|
320dbfe7cf29e4250393d9fe4930558d7373abd8
|
[] |
no_license
|
dlops-io/video-in-out
|
34bf5ded0a3d48a678cd4f47f4edade11ef6131a
|
c2cd180e5f00f67d9fcce532b728a92950ae25aa
|
refs/heads/main
| 2023-08-20T13:21:37.479774
| 2021-10-25T12:48:55
| 2021-10-25T12:48:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 752
|
py
|
from fastapi import FastAPI
from starlette.middleware.cors import CORSMiddleware
from starlette.requests import Request
from starlette.responses import JSONResponse
from starlette.staticfiles import StaticFiles
from api.routers import video
# Setup FastAPI app
app = FastAPI(
title="API Service",
description="API Service",
version="0.1.0"
)
# Enable CORSMiddleware
app.add_middleware(
CORSMiddleware,
allow_credentials=False,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
# Routes
@app.get(
"/",
summary="Index",
description="Root api"
)
async def get_index():
return {
"message": "API Server Running!"
}
# Additional routers here
app.include_router(video.router)
|
[
"shivasj@gmail.com"
] |
shivasj@gmail.com
|
28b5d104d8e8eb1a445ac2a3359068286e7227cf
|
dde3f20bc5d50bb87f735f88645a07d9789ada2a
|
/contrib/seeds/generate-seeds.py
|
885030bca90708d6cc21d0386331627e2401a544
|
[
"MIT"
] |
permissive
|
wai1496/Quaz
|
6c96dd0009df9254127d4e37d5398e0e09b355c9
|
8cea70d86e0b38cd580a8de457d1890b4710c0d2
|
refs/heads/master
| 2021-04-15T03:37:17.426013
| 2018-03-16T08:43:39
| 2018-03-16T08:43:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,366
|
py
|
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef QUAZ_CHAINPARAMSSEEDS_H\n')
g.write('#define QUAZ_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the quaz network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9992)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19992)
g.write('#endif // QUAZ_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
|
[
"devquaz@gmail.com"
] |
devquaz@gmail.com
|
b60bee7fa7a89fdb6ab3d2d5194fd86e56b4801b
|
0f0a7e594c53acbce5e93fad653abed2a3d02466
|
/zhidao/middlewares.py
|
190647c84d6e3a0b2e42f946e2fa9359d599844a
|
[] |
no_license
|
ElliottYan/crawler
|
de1e5a7ae1dbf0bd3bd3faa9224180ebb7051964
|
cab3e078ec85b13219bee836fa136c2f43eb4f2f
|
refs/heads/master
| 2020-03-24T06:12:58.864182
| 2018-07-27T03:05:52
| 2018-07-27T03:05:52
| 142,520,505
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,880
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class ZhidaoSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
[
"elliottyan37@gmail.com"
] |
elliottyan37@gmail.com
|
8602f77cf80a23e828ccb71e0edd430dbdbb77c2
|
b97fd4dd496456bbbeda8d774bb179f1b03b0dba
|
/day11/part2.py
|
c85016a17ab75f04ef04b920eb6f2beffec81e34
|
[] |
no_license
|
woranov/aoc2020
|
7666cabc96267936fda04a58244193db19e9489e
|
671ef866e0c81a5f70898a7e6ec725b1f78378fb
|
refs/heads/master
| 2023-02-11T19:03:01.437839
| 2020-12-25T15:29:46
| 2020-12-25T15:29:46
| 317,584,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,312
|
py
|
import functools
import itertools
_TESTCASE = """\
L.LL.LL.LL
LLLLLLL.LL
L.L.L..L..
LLLL.LL.LL
L.LL.LL.LL
L.LLLLL.LL
..L.L.....
LLLLLLLLLL
L.LLLLLL.L
L.LLLLL.LL
""".strip().splitlines()
def compute(data):
"""
>>> compute(_TESTCASE)
26
"""
grid = [*map(list, data)]
rows = len(grid)
cols = len(grid[0])
# doing some memoization but its pretty pointless
@functools.lru_cache(maxsize=None)
def get_directions(r, c):
return {
"↓": tuple(zip(itertools.repeat(r), range(c + 1, cols))),
"↑": tuple(zip(itertools.repeat(r), range(c - 1, -1, -1))),
"→": tuple(zip(range(r + 1, rows), itertools.repeat(c))),
"←": tuple(zip(range(r - 1, -1, -1), itertools.repeat(c))),
"↗": tuple(zip(range(r + 1, rows), range(c + 1, cols))),
"↖": tuple(zip(range(r + 1, rows), range(c - 1, -1, -1))),
"↙": tuple(zip(range(r - 1, -1, -1), range(c + 1, cols))),
"↘": tuple(zip(range(r - 1, -1, -1), range(c - 1, -1, -1))),
}
non_floor_seats = [(r, c) for r in range(rows) for c in range(cols) if grid[r][c] != "."]
while True:
new_grid = [row[:] for row in grid]
for r_idx, c_idx in non_floor_seats:
sym = new_grid[r_idx][c_idx]
if sym == ".":
continue
else:
count = 0
for direction in get_directions(r_idx, c_idx).values():
for nb_r, nb_c in direction:
if grid[nb_r][nb_c] == ".":
continue
elif grid[nb_r][nb_c] == "#":
count += 1
break
if sym == "L" and count == 0:
new_grid[r_idx][c_idx] = "#"
elif sym == "#" and count >= 5:
new_grid[r_idx][c_idx] = "L"
if new_grid == grid:
break
else:
grid = new_grid
return sum(seat == "#" for row in grid for seat in row)
def main():
import pathlib
input_path = pathlib.Path(__file__).with_name("input.txt")
with input_path.open() as f:
print(compute(f.read().strip().splitlines()))
if __name__ == "__main__":
main()
|
[
"wor4nov@gmail.com"
] |
wor4nov@gmail.com
|
9bf4e35570827087d92050fcab5d0ddbc721c47b
|
e8469a38083f28633b74cfd2cf4399aac11a6514
|
/ver.1/selectors.py
|
4197f85bcdd90c359aaf3cf34c0d506aec0db4a4
|
[] |
no_license
|
JadenHeo/FO4-data-crawling
|
e67ff2b611e9f3b5c83b3e237e5d06065094d091
|
e1d4be6a144985d953c9e57b2aef409d7eddea8a
|
refs/heads/main
| 2023-08-07T17:59:28.884544
| 2021-10-04T07:57:22
| 2021-10-04T07:57:22
| 395,150,481
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,343
|
py
|
selector = {"name" : "#middle > div > div > div:nth-child(2) > div.content.data_detail > div > div.content_header > div.info_wrap > div.info_line.info_name > div.name",
"pay" : "#middle > div > div > div:nth-child(2) > div.content.data_detail > div > div.content_header > div.info_wrap > div.info_line.info_name > div.side_utils > div",
"position" : "#middle > div > div > div:nth-child(2) > div.content.data_detail > div > div.content_header > div.info_wrap > div.info_line.info_ab > span > span.txt",
"live up" : "#middle > div > div > div:nth-child(2) > div.content.data_detail > div > div.content_header > div.info_wrap > div.info_line.info_ab > span > span.live.up",
"birth" : "#middle > div > div > div:nth-child(2) > div.content.data_detail > div > div.content_header > div.info_wrap > div.info_line.info_etc > span.etc.birth",
"height" : "#middle > div > div > div:nth-child(2) > div.content.data_detail > div > div.content_header > div.info_wrap > div.info_line.info_etc > span.etc.height",
"weight" : "#middle > div > div > div:nth-child(2) > div.content.data_detail > div > div.content_header > div.info_wrap > div.info_line.info_etc > span.etc.weight",
"physical" : "#middle > div > div > div:nth-child(2) > div.content.data_detail > div > div.content_header > div.info_wrap > div.info_line.info_etc > span.etc.physical",
"skill" : "#middle > div > div > div:nth-child(2) > div.content.data_detail > div > div.content_header > div.info_wrap > div.info_line.info_etc > span.etc.skill > span",
"foot" : "#middle > div > div > div:nth-child(2) > div.content.data_detail > div > div.content_header > div.info_wrap > div.info_line.info_etc > span.etc.foot",
"class" : "#middle > div > div > div:nth-child(2) > div.content.data_detail > div > div.content_header > div.info_wrap > div.info_line.info_etc > span.etc.season",
"team" : "#middle > div > div > div:nth-child(2) > div.content.data_detail > div > div.content_header > div.info_wrap > div.info_line.info_team > div.etc.team",
"nation" : "#middle > div > div > div:nth-child(2) > div.content.data_detail > div > div.content_header > div.info_wrap > div.info_line.info_team > div.etc.nation",
"speciality" : "#middle > div > div > div:nth-child(2) > div.content.data_detail > div > div.content_header > div.info_wrap > div.skill_wrap > span",
"position_overalls" : "#middle > div > div > div:nth-child(2) > div.content.data_detail > div > div.content_header > div.ovr_set > div",
"club_history" : "#middle > div > div > div:nth-child(4) > div:nth-child(1) > div.content.data_detail_club > div.data_table > ul > li > div",
"stat" : "#middle > div > div > div:nth-child(2) > div.content.data_detail > div > div.content_bottom"
}
position_overalls = ["ST", "LW", "CF", "RW", "CAM", "LM", "CM", "RM", "CDM", "LWB", "CB", "RWB", "LB", "SW", "RB", "GK"]
position_index = {"GK" : 0, "SW" : 1, "RWB" : 2, "RB" : 3, "RCB" : 4, "CB" : 5, "LCB" : 6, "LB" : 7, "LWB" : 8,
"RDM" : 9, "CDM" : 10, "LDM" : 11, "RM" : 12, "RCM" : 13, "CM" : 14, "LCM" : 15, "LM" : 16, "RAM" : 17,
"CAM" : 18, "LAM" : 19, "RF" : 20, "CF" : 21, "LF" : 22, "RW" : 23, "RT" : 24, "ST" : 25, "LT" : 26, "LW" : 27}
|
[
"hhj801@gmail.com"
] |
hhj801@gmail.com
|
425b4486fa1fd6169f158a9b5cac855c66475095
|
7d5d8492c2d88b88bdc57e3c32db038a7e7e7924
|
/IPCC-CMIP5/bias_correction/bc_extract_gcm.py
|
ef32727acb01fb3b19decbfd3c17f5d35dbfc4ff
|
[] |
no_license
|
CIAT-DAPA/dapa-climate-change
|
80ab6318d660a010efcd4ad942664c57431c8cce
|
2480332e9d61a862fe5aeacf6f82ef0a1febe8d4
|
refs/heads/master
| 2023-08-17T04:14:49.626909
| 2023-08-15T00:39:58
| 2023-08-15T00:39:58
| 39,960,256
| 15
| 17
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,770
|
py
|
# ---------------------------------------------------------------------------------
# Author: Jaime Tarapues
# Date: September 23th, 2014
# Updated: July 28th, 2014
# Purpose: Purpose: extract values daily data of cmip5
# ----------------------------------------------------------------------------------
import os, sys, string,glob, shutil
# python D:\jetarapues\_scripts\bc_extract_gcm.py T:\gcm\cmip5\raw\daily\rcp45\gfdl_esm2m\r1i1p1\pr_day_GFDL-ESM2M_rcp45_r1i1p1_20060101-21001231.nc D:\jetarapues\Request\Request_cnavarro\bc\tes.tab 2006 2100 -72.301412 5.339301 YES cdo
#Syntax
if len(sys.argv) < 8:
os.system('cls')
print "\n Too few args"
print " Syntax : <Extract_MaskGCM.py> <dirout> <mask> <dataset> <sres> <resolution> <models> <periods> <variable> <ascii> <descfile>"
print " - ie: "
sys.exit(1)
#Set variables
ifile = sys.argv[1]
odat = sys.argv[2]
yi = sys.argv[3]
yf = sys.argv[4]
lon=sys.argv[5]
lat=sys.argv[6]
vartype=sys.argv[7]
dircdo = sys.argv[8]
# Clean screen
os.system('cls')
name=os.path.basename(ifile)
if not os.path.exists(odat):
# print '\n...Extracting',name,'lon:'+str(lon)+' lat:'+lat,'Date:'+str(yi)+'-'+str(yf),'\n'
if vartype == 'NO':
os.system(dircdo+" -s -outputtab,date,value -remapnn,lon="+str(lon)+"_lat="+lat+' -selyear,'+str(yi)+'/'+str(yf)+' '+ifile+" > "+odat)
else:
var=name.split("_")[0]
if var == 'hur':
os.system(dircdo+" -s -outputtab,date,value -remapnn,lon="+str(lon)+"_lat="+lat+' -selyear,'+str(yi)+'/'+str(yf)+' -selname,'+var+' -sellevel,85000 '+ifile+" > "+odat)
else:
os.system(dircdo+" -s -outputtab,date,value -remapnn,lon="+str(lon)+"_lat="+lat+' -selyear,'+str(yi)+'/'+str(yf)+' -selname,'+var+' '+ifile+" > "+odat)
# else:
# print '\t...Extracted by coordinate',name
|
[
"jaime.tm8@gmail.com"
] |
jaime.tm8@gmail.com
|
8b97fb6b8a7718a7b273586c5c11230785335bf5
|
51d348426c6e5fa79f2e77baf59bdbf8357d9f12
|
/week10/Инфоматрикс/d.массивы/1.py
|
39e914161d08fae03f9bd90984ada04bfe926359
|
[] |
no_license
|
Zhansayaas/webdev
|
c01325b13abf92cef13138d7ffc123cf9bc4f81a
|
dd054d0bcafc498eccc5f4626ab45fd8b46b3a3f
|
refs/heads/main
| 2023-04-10T23:33:30.469465
| 2021-04-17T10:21:53
| 2021-04-17T10:21:53
| 322,049,225
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 82
|
py
|
n=int(input())
a=input().split()
for i in range(0,n,2):
print(a[i],end=' ')
|
[
"noreply@github.com"
] |
noreply@github.com
|
e14ac3d06dfe4effe84493b1c1438edb268348ab
|
ab692ff0773367a0190309d3e7c3785a46a205d3
|
/main_finetune_imagenet.py
|
ffeb8bccfc2c0e2dac3ba392891c83eea971c116
|
[
"MIT"
] |
permissive
|
sAviOr287/imagenet_ICLR
|
f6da8149bf4280c923c584c4e6aade81fc469cf4
|
1ac83d799f5335355161156aa9bba63e0d82a063
|
refs/heads/main
| 2023-01-13T02:45:53.450571
| 2020-11-15T22:12:36
| 2020-11-15T22:12:36
| 313,132,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,010
|
py
|
import argparse
import os
import random
import shutil
import time
import warnings
import sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from tensorboardX import SummaryWriter
from pprint import pprint
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--resume_pruned', default='', type=str, metavar='PATH',
help='path to latest pruned network (default: none)')
# resume_pruned
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--grad_loop', default=1, type=int,
help='GPU id to use.')
best_acc1 = 0
writer = None
def main():
args = parser.parse_args()
args.lr /= args.grad_loop
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1, writer
args.gpu = gpu
args.save_dir = None
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
def forward_pre_hook(m, x):
m.mask.requires_grad_(False)
mask = m.mask
# mask.requires_grad_(False)
# mask.cuda(m.weight.get_device())
m.weight.data.mul_(mask.to(m.weight.get_device()))
if args.resume_pruned:
if os.path.isfile(args.resume_pruned):
print("=> loading checkpoint '{}'".format(args.resume_pruned))
checkpoint = torch.load(args.resume_pruned)
model = checkpoint['net'].cpu()
masks = checkpoint['mask']
ratio = checkpoint['ratio']
print("=> Ratios:")
pprint(ratio)
# optimizer.load_state_dict(checkpoint['optimizer'])
print("Loaded check point from %s." % args.resume_pruned)
print('=> Registering masks for each layer')
for m in model.modules():
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
m.mask = nn.Parameter(masks[m]).requires_grad_(False).cpu()
m.register_forward_pre_hook(forward_pre_hook)
args.save_dir = os.path.join(*args.resume_pruned.split('/')[:-1])
writer = SummaryWriter(args.save_dir)
print('=> Will save to %s.' % args.save_dir)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
elif args.pretrained:
import pdb ; pdb.set_trace()
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
import pdb ; pdb.set_trace()
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int(args.workers / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
def to_cpu(m):
if isinstance(m, dict):
for k in m.keys():
m[k] = to_cpu(m[k])
return m
elif isinstance(m, list):
return [to_cpu(_) for _ in m]
elif isinstance(m, torch.Tensor):
return m.cpu()
else:
return m
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
checkpoint['state_dict'] = to_cpu(checkpoint['state_dict'])
checkpoint['optimizer'] = to_cpu(checkpoint['optimizer'])
# for k in checkpoint['state_dict'].keys():
# checkpoint['state_dict'][k] = checkpoint['state_dict'][k].cpu()
#
# for k in checkpoint['optimizer'].keys():
# checkpoint['optimizer'][k] = checkpoint['optimizer'][k].cpu()
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
del checkpoint
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train_loss, train_top1, train_top5 = train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1, val_loss, val_top1, val_top5 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best, args=args)
# global writer
if writer is not None:
writer.add_scalar('train/loss', train_loss.avg, epoch)
writer.add_scalar('train/top1', train_top1.avg, epoch)
writer.add_scalar('train/top5', train_top5.avg, epoch)
writer.add_scalar('val/loss', val_loss.avg, epoch)
writer.add_scalar('val/top1', val_top1.avg, epoch)
writer.add_scalar('val/top5', val_top5.avg, epoch)
def train(train_loader, model, criterion, optimizer, epoch, args):
total = args.epochs
intv = total // 3
lr = args.lr * (0.1 ** (epoch // intv))
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(train_loader), batch_time, data_time, losses, top1,
top5, prefix="[lr={}, grad_loop={}] Epoch: [{}]".format(lr, args.grad_loop, epoch))
# switch to train mode
model.train()
end = time.time()
optimizer.zero_grad()
all_loss = 0
counts = 0
acc_top1 = 0
acc_top5 = 0
all_samples = 0
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
all_samples += input.shape[0]
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# compute gradient and do SGD step
loss.backward()
if (i+1) % args.grad_loop == 0:
optimizer.step()
optimizer.zero_grad()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
all_loss += loss.item()
counts += 1
if i % args.print_freq == 0:
progress.print(i)
return losses, top1, top5
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5,
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.print(i)
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, losses, top1, top5
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar', args=None):
if args.save_dir is not None:
filename = os.path.join(args.save_dir, filename)
torch.save(state, filename)
if is_best:
best_location = 'model_best.pth.tar'
if args.save_dir is not None:
best_location = os.path.join(args.save_dir, 'model_best.pth.tar')
shutil.copyfile(filename, best_location)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, *meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def print(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
total = args.epochs
intv = total // 3
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // intv))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
|
[
"jeanfrancois287@hotmail.fr"
] |
jeanfrancois287@hotmail.fr
|
24a9afca2f817f33c7ce171ef49ab354c5dd6efc
|
d925eb9cf278a67c7714ffa26f25060ae176cb1a
|
/09_ingredient_splitter_v3.py
|
0cd41a40b08cf10dc8fe487ef18f3d9e721adf97
|
[] |
no_license
|
wenqitoh/Recipe-Moderniser
|
bf188697706d69e836396fbf99e7708962ec9f96
|
e257b35f82b70d15bda812dca774f089ce40e68e
|
refs/heads/main
| 2023-06-13T23:24:50.556741
| 2021-07-11T05:04:02
| 2021-07-11T05:04:02
| 372,673,168
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,474
|
py
|
"""Further version of ingredient splitter which splits the ingredients from one lin of input
into quantity, unit, and ingredient
version 3 - testing on full recipe
created by Wen-Qi Toh
7/7/21"""
import re # this is the Regular Expression module
# ingredient has mixed fraction followed by unit and ingredient
full_recipe = [
"1 1/2 ml flour",
"3/4 cup milk",
"1 cup flour",
"2 tablespoons white sugar",
"1 3/4 cups flour",
"1.5 tsp baking powder",
"pinch of cinnamon"
]
# the regex format below is expecting: number <space> number
mixed_regex = r"\d{1,3}\s\d{1,3}\/\d{1,3}"
# \d for a digit, /d{1,3} allows 1-3 digits, /s for space, \/ for divide
for recipe_line in full_recipe:
recipe_line = recipe_line.strip()
# get amount
if re.match(mixed_regex, recipe_line): # checking for mixed fraction
# get mixed number by matching the regex
pre_mixed_num = re.match(mixed_regex, recipe_line)
mixed_num = pre_mixed_num.group()
# .group returns the part of the string where there was a match
# replace the space in the mixed number with '+' sign
amount = mixed_num.replace(" ","+")
# changes the string into a float using python's evaluation method
amount = eval(amount)
# get unit and ingredient
compile_regex = re.compile(mixed_regex)
# compiles the regex into a string object - so we can search for patterns
unit_ingredient = re.split(compile_regex, recipe_line)
# produces the recipe line unit and amount as a list
unit_ingredient = (unit_ingredient[1]).strip()
# removes the extra white space before and after the unit
# 2nd element in list, converting into a string
else:
# splits the line at the first space
get_amount = recipe_line.split(" ", 1)
try:
amount = eval(get_amount[0]) # convert amount to float if possible
except NameError:
amount = get_amount[0]
unit_ingredient = get_amount[1]
# get unit and ingredient
# splits the string into a list containing just the unit and ingredient
get_unit = unit_ingredient.split(" ", 1)
unit = get_unit[0] # making the 1st item in the list 'unit'
ingredient = get_unit[1] # making the 2nd item in the list 'ingredient'
# all 3 elements of original reciple line now broken into the 3 variables
print("{} {} {}".format(amount, unit, ingredient))
|
[
"tohw@middleton.school.nz"
] |
tohw@middleton.school.nz
|
a7ba513d9dbf489347919fd23a3cec055f45149c
|
e4bf5f3ad3beecf288507470b20ae30849effdb1
|
/node_modules/mongoose/node_modules/mongodb/node_modules/mongodb-core/node_modules/kerberos/build/config.gypi
|
fbceeb251c1b9d148e8e13e1a6ab16086dcc9128
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
JohnnyLamb/scotch_jwt_token
|
6356e30026db88fdac64484e4ca0770b40d0701f
|
505db6a587deaefb75df7ad24b718bbac72472db
|
refs/heads/master
| 2021-01-10T16:36:39.379628
| 2015-10-17T23:32:49
| 2015-10-17T23:32:49
| 44,454,151
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,717
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 1,
"host_arch": "x64",
"icu_data_file": "icudt54l.dat",
"icu_data_in": "../../deps/icu/source/data/in/icudt54l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "./deps/icu",
"icu_small": "true",
"icu_ver_major": "54",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_mdb": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_no_asm": 0,
"python": "/usr/bin/python",
"target_arch": "x64",
"uv_library": "static_library",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/Users/johnny/.node-gyp/0.12.7",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"shell": "/bin/zsh",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"fetch_retries": "2",
"npat": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"spin": "true",
"cache_lock_retries": "10",
"cafile": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"access": "",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/johnny/.npm-init.js",
"userconfig": "/Users/johnny/.npmrc",
"node_version": "0.12.7",
"user": "",
"save": "true",
"editor": "vi",
"tag": "latest",
"global": "",
"optional": "true",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/johnny/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "npm/2.11.3 node/v0.12.7 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "1.0.0",
"umask": "0022",
"git": "git",
"init_author_name": "",
"scope": "",
"onload_script": "",
"tmp": "/var/folders/3q/_3b6n01s6yd817hd60lsckk80000gn/T",
"unsafe_perm": "true",
"prefix": "/usr/local",
"link": ""
}
}
|
[
"agnusjack@gmail.com"
] |
agnusjack@gmail.com
|
114910137765ee9246494ef8b775990951da0d1f
|
b321ca6310cd84bd8603fa9685365bb2a4acc945
|
/公司真题/拼多多/phone_number.py
|
144534cc23631ee5da9b7f732598e83ae9e6c492
|
[] |
no_license
|
baixiaoyanvision/python-algorithm
|
71b2fdf7d6b57be8a2960c44160f2a7459e153ae
|
6cbb61213af8264e083af1994522929fb7711616
|
refs/heads/master
| 2020-08-27T03:41:08.332322
| 2019-10-02T13:28:49
| 2019-10-02T13:28:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,096
|
py
|
# line1 = input()
# line2 = input()
line1 = '6 5'
line2 = '787585'
N, K = [int(i) for i in line1.split()]
line2 = [int(i) for i in line2]
result = []
line2_set = set(line2)
min_money = 99999999
for val in line2_set:
sub_vals = [abs(val - number) for number in line2]
sort_sub_vals = sorted( list(range(len(sub_vals))), key=lambda x: sub_vals[x] )
pay_money = sum([sub_vals[i] for i in sort_sub_vals[:K]])
equal_val = sub_vals[sort_sub_vals[K-1]]
copy_line2 = line2[:]
for i in sort_sub_vals[:K-1]:
copy_line2[i] = val
last_change = None
for i in range(len(copy_line2)):
if abs(copy_line2[i]-val) == equal_val:
last_change = i
copy_line2[last_change] = val
copy_line2 = [str(i) for i in copy_line2]
copy_line2 = ''.join(copy_line2)
if pay_money > min_money:
continue
elif pay_money < min_money:
result = []
result.append(copy_line2)
min_money = pay_money
else:
result.append(copy_line2)
result = sorted(result)
print(min_money)
print(result[0])
|
[
"18310523922@163.com"
] |
18310523922@163.com
|
74d689c8c85d5d2561a6abc2a06ba077a7496e0e
|
0fa82ccc0b93944c4cbb8255834b019cf16d128d
|
/Az/temp.py
|
caf3bc211fbf8fccda75e10e1fee9d32caddc4ec
|
[] |
no_license
|
Akashdeepsingh1/project
|
6ad477088a3cae2d7eea818a7bd50a2495ce3ba8
|
bdebc6271b39d7260f6ab5bca37ab4036400258f
|
refs/heads/master
| 2022-12-13T23:09:35.782820
| 2020-08-27T14:22:37
| 2020-08-27T14:22:37
| 279,722,741
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 563
|
py
|
def mincostTickets (days, costs):
dp = [0] * 366
for i in range (1,max (days)+1):
if i in days:
dp[i] = min (dp[i - 1] + costs[0], dp[i - 7] + costs[1], dp[i - 30] + costs[2])
else:
dp[i] = dp[i-1]
return dp[:max (days) + 1][-1]
def mincostTickets2( days, costs):
dp = [0]*366
for i in range(1,max(days)+1):
dp[i] = min(dp[i-1] + costs[0] , dp[i-7] + costs[1], dp[i-30] + costs[2])
return dp[:max(days)+1][-1]
days = [1,4,6,7,8,20]
costs= [2,7,15]
print (mincostTickets2 (days, costs))
|
[
"Akashdeep_S@Dell.com"
] |
Akashdeep_S@Dell.com
|
7df32dbb48957659309409618ea967eed738d6a8
|
ff1dbdb9baed0be909aaf60b7b78bef9441bfcd9
|
/review_homework/ifstatement_review.py
|
330b7c81edabb8d705433d5648705605630043d7
|
[] |
no_license
|
singh-sonali/AP-Compsci
|
ab2c923c79e67eebc021168c720de10618c58583
|
1f26e40b88532cfb9aee39d18fb2ea254d8a1083
|
refs/heads/master
| 2020-03-28T06:43:33.963516
| 2019-03-20T12:04:25
| 2019-03-20T12:04:25
| 147,855,508
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,381
|
py
|
# Partner 1:
# Partner 2:
''' Instructions:
Work with a partner to complete these tasks. Assume that all variables are declared; you need only write the if-statement using the variables indicated in the description. Write your solution below the commented description.
'''
''' 1.
Variable grade is a character. If it is an A, print good work.
'''
''' 2.
Variable yards is an int. If it is less than 17, multiply yards by 2.
'''
''' 3.
Variable success is a boolean. If something is a success, print congratulations.
'''
''' 4.
Variable word is a String. If the string's second letter is 'f', print fun.
'''
''' 5.
Variable temp is a float. Variable celsius is a boolean. If celsius is true, convert to fahrenheit, storing the result in temp. F = 1.8C + 32.
'''
''' 6.
Variable numItems is an int. Variable averageCost and totalCost are floats. If there are items, calculate the average cost. If there are no items, print no items.
'''
''' 7.
Variable pollution is a float. Variable cutoff is a float. If pollution is less than the cutoff, print safe condition. If pollution is greater than or equal to cutoff, print unsafe condition.
'''
''' 8.
Variable score is a float, and grade is a char. Store the appropriate letter grade in the grade variable according to this chart.
F: <60; B: 80-89; D: 60-69; A: 90-100; C: 70-79.
'''
''' 9.
Variable letter is a char. If it is a lowercase letter, print lowercase. If it is an uppercase, print uppercase. If it is 0-9, print digit. If it is none of these, print symbol.
'''
''' 10.
Variable neighbors is an int. Determine where you live based on your neighbors.
50+: city; 25+: suburbia; 1+: rural; 0: middle of nowhere.
'''
''' 11.
Variables doesSignificantWork, makesBreakthrough, and nobelPrizeCandidate are booleans. A nobel prize winner does significant work and makes a break through. Store true in nobelPrizeCandidate if they merit the award and false if they don't.
'''
''' 12.
Variable tax is a boolean, price and taxRate are floats. If there is tax, update price to reflect the tax you must pay.
'''
''' 13.
Variable word and type are Strings. Determine (not super accurately) what kind of word it is by looking at how it ends.
-ly: adverb; -ing; gerund; -s: plural; something else: error
'''
''' 14.
If integer variable currentNumber is odd, change its value so that it is now 3 times currentNumber plus 1, otherwise change its value so that it is now half of currentNumber (rounded down when currentNumber is odd).
'''
''' 15.
Assign true to the boolean variable leapYear if the integer variable year is a leap year. (A leap year is a multiple of 4, and if it is a multiple of 100, it must also be a multiple of 400.)
'''
''' 16.
Determine the smallest of three ints, a, b and c. Store the smallest one of the three in int result.
'''
''' 17.
If an int, number, is even, a muliple of 5, and in the range of -100 to 100, then it is a special number. Store whether a number is special or not in the boolean variable special.
'''
''' 18.
Variable letter is a char. Determine if the character is a vowel or not by storing a letter code in the int variable code.
a/e/o/u/i: 1; y: -1; everything else: 0
'''
''' 19.
Given a string dayOfWeek, determine if it is the weekend. Store the result in boolean isWeekend.
'''
''' 20.
Given a String variable month, store the number of days in the given month in integer variable numDays.
'''
''' 21.
Three integers, angle1, angle2, and angle3, supposedly made a triangle. Store whether the three given angles make a valid triangle in boolean variable validTriangle.
'''
''' 22.
Given an integer, electricity, determine someone's monthly electric bill, float payment, following the rubric below.
First 50 units: 50 cents/unit
Next 100 units: 75 cents/unit
Next 100 units: 1.20/unit
For units above 250: 1.50/unit, plus an additional 20% surcharge.
'''
''' 23.
String, greeting, stores a greeting. String language stores the language. If the language is English, greeting is Hello. If the language is French, the greeting is Bonjour. If the language is Spanish, the greeting is Hola. If the language is something else, the greeting is something of your choice.
'''
''' 24.
Generate a phrase and store it in String phrase, given an int number and a String noun. Here are some sample phrases:
number: 5; noun: dog; phrase: 5 dogs
number: 1; noun: cat; phrase: 1 cat
number: 0; noun: elephant; phrase: 0 elephants
number: 3; noun: human; phrase: 3 humans
number: 1; noun: home; phrase: 3 homes
'''
''' 25.
If a string, userInput, is bacon, print out, "Why did you type bacon?". If it is not bacon, print out, "I like bacon."
'''
''' 26.
Come up with your own creative tasks someone could complete to practice if-statements. Also provide solutions.
'''
''' Task 1:
'''
# solution
''' Task 2:
'''
# solution
''' Task 3:
'''
# solution
''' Sources
http://www.bowdoin.edu/~ltoma/teaching/cs107/spring05/Lectures/allif.pdf
http://www.codeforwin.in/2015/05/if-else-programming-practice.html
Ben Dreier for pointing out some creative boolean solutions.
'''
|
[
"ssingh20@choate.edu"
] |
ssingh20@choate.edu
|
25055392f3e0fab759ac48b34d2c85f1944c0249
|
dbbb304265437b2d591d6d42953efc96bd8b4a24
|
/blog/views.py
|
c19f66271a7b0a97d3aca9fd694b2641d6ce7c38
|
[
"Apache-2.0"
] |
permissive
|
echessa/django-ex
|
647edbea8a9dd7159420af96dd3414bdaaf8aff3
|
33758d254886d3298acba20c0d231bcd6bb6c09a
|
refs/heads/master
| 2020-12-02T21:12:24.753773
| 2017-07-05T09:09:18
| 2017-07-05T09:09:18
| 96,271,914
| 1
| 1
| null | 2017-07-05T03:11:15
| 2017-07-05T03:11:15
| null |
UTF-8
|
Python
| false
| false
| 1,463
|
py
|
from django.shortcuts import render, redirect
from django.utils import timezone
from django.shortcuts import render, get_object_or_404
from .forms import PostForm
from .models import Post
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'blog/post_edit.html', {'form': form})
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'blog/post_edit.html', {'form': form})
|
[
"jokhessa@yahoo.com"
] |
jokhessa@yahoo.com
|
0312b66b3e7867e0ab472288a6c5e79ae7d68dd8
|
dacdc8eca8258abc9c82b3a87d97f6a461503edf
|
/data/process_data.py
|
5318bb50a55db8751ba09ebc42d4a40d09cfc52d
|
[] |
no_license
|
eherdter/MessageClassifier
|
ce8f6503c9980068f038e1aa777c9c6a99921bf6
|
449431fab917b9294acce82977eb1d7c44e42bb3
|
refs/heads/master
| 2020-06-12T12:10:07.883068
| 2019-07-10T19:15:54
| 2019-07-10T19:15:54
| 194,294,707
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,016
|
py
|
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
''' Loads messages and category datasets and joins them
together using the common id.'''
''' Returns: pandas.DataFrame '''
#Load data.
messages = pd.read_csv(messages_filepath, dtype=str, encoding ='latin1')
categories = pd.read_csv(categories_filepath, dtype=str, encoding ='latin1')
#Merge datasets using common id.
df = categories.set_index('id').join(messages.set_index('id'))
return df
def clean_data(df):
''' Tidy and clean df. Tidies the categories columns, cleans up duplicates
rows,removes rows with non binary (0/1) category entries, removes rows where
there is no category selected (1).'''
''' Returns: pandas.DataFrame '''
#split categories into separate category columns and assign column names
categories = df.categories.str.split(';', expand=True)
categories.columns = categories.iloc[0].apply(lambda x: x[:len(x)-2])
#convert category values to just binary (0/1)
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].astype(str).str[-1]
categories[column] = categories[column].astype(int)
#replace categories column in df with new categories
df = df.drop(columns=['categories'])
df = pd.concat([categories,df], axis=1)
#removes duplicate rows
df = df.drop_duplicates()
#remove child alone category because there are no notes for it
df = df.drop(['child_alone'], axis=1)
#change level 2 for related column to level 0 b/c they are not related posts
df['related'][df['related'] == 2] = 0
return df
def save_data(df, database_filepath):
''' Saves cleaned df to a SQL database.'''
engine = create_engine('sqlite:///' + database_filepath)
df.to_sql('messages', con=engine, if_exists='replace', index=False)
return None
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main()
|
[
"eherdter@mail.usf.edu"
] |
eherdter@mail.usf.edu
|
e7336bb7129d0dba26d5d33533f5b0997f133562
|
b53869d9c0b38ecc5f2bef40f03e0146d05c67c5
|
/send_mail/core/kafka_connect.py
|
420f3ba573dc426a1325a1038a87f916c876b163
|
[] |
no_license
|
manhcuong2801/send_sms
|
564435736e163b41b06c6d53c79d41ac9ca1886d
|
8734ffa74ddf723852d01a4b4ecc9dc3dd4accef
|
refs/heads/master
| 2023-04-03T13:35:43.433808
| 2021-04-23T12:42:59
| 2021-04-24T03:52:22
| 360,880,700
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,481
|
py
|
import json as j
from django.conf import settings as s
from kafka import KafkaConsumer as KC, KafkaProducer as KP
class KafkaConnector:
_bootstrap_server = s.KAFKA_SERVER
_topic = "topic_example"
_group_id = "group_example"
def get_consumer(
self,
topic: str = _topic,
group_id: str = _group_id,
is_handle_json: bool = True,
):
if not is_handle_json:
return KC(
topic, group_id=group_id, bootstrap_servers=[self._bootstrap_server]
)
return KC(
topic,
group_id=group_id,
bootstrap_servers=[self._bootstrap_server],
value_deserializer=lambda v: j.loads(v),
)
def get_producer(self, is_handle_json: bool = True):
if not is_handle_json:
return KP(bootstrap_servers=[self._bootstrap_server])
return KP(
bootstrap_servers=[self._bootstrap_server],
value_serializer=lambda v: j.dumps(v).encode("utf-8"),
)
def send_message_to_topic(
self,
topic: str,
bytes_msg: bytes = b"",
json_msg: dict = {},
is_handle_json: bool = True,
):
if not is_handle_json:
producer = self.get_producer(is_handle_json=False)
future = producer.send(topic, bytes_msg)
else:
producer = self.get_producer()
future = producer.send(topic, json_msg)
return future
|
[
"cuongle@tamdongtam.vn"
] |
cuongle@tamdongtam.vn
|
c246469f12df1abde9b82eeadc65ac655fed42e2
|
add649416e475ef5febb207ec3c90ef504a4d5b7
|
/Marie Laure/django/mycalendar2/mycalendar2/wsgi.py
|
11e4e77fa13f33700e778d5217741c38e3dbf3d6
|
[] |
no_license
|
juliencampus/python
|
e3bc77453d449533db02bc69376ea6d1a4f2e9ba
|
323013fa32be74571ccd665cd3faa74ff8a905f3
|
refs/heads/main
| 2022-12-29T09:09:08.917610
| 2020-10-23T14:55:14
| 2020-10-23T14:55:14
| 303,637,772
| 0
| 4
| null | 2020-10-21T11:12:06
| 2020-10-13T08:29:19
|
Python
|
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for mycalendar2 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mycalendar2.settings')
application = get_wsgi_application()
|
[
"marie-laure.riedinger@le-campus-numerique.fr"
] |
marie-laure.riedinger@le-campus-numerique.fr
|
34fc9717d6ba5477e1aa8e8cc9c71b46b8ee7fd2
|
2f2feae3dee5847edbf95c1eeb14e656490dae35
|
/2022/day_13_distress_signal_1.py
|
e89f9fb5f20ecbd78b7b38f8d58eca40028031af
|
[] |
no_license
|
olga3n/adventofcode
|
32597e9044e11384452410b7a7dda339faf75f32
|
490a385fb8f1c45d22deb27bf21891e193fe58a2
|
refs/heads/master
| 2023-01-07T09:19:04.090030
| 2022-12-25T13:31:22
| 2022-12-25T13:31:22
| 163,669,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,209
|
py
|
#!/usr/bin/env python3
import sys
import json
from typing import Iterable, List, Any, Tuple, Optional
def is_right_order(left: List[Any], right: List[Any]) -> Optional[bool]:
iter_left = iter(left)
iter_right = iter(right)
while True:
item_left = next(iter_left, None)
item_right = next(iter_right, None)
if item_left is None and item_right is None:
return None
if item_left is None:
return True
if item_right is None:
return False
if isinstance(item_left, int) and isinstance(item_right, int):
if item_left < item_right:
return True
if item_left > item_right:
return False
if item_right == item_left:
continue
if isinstance(item_left, int):
item_left = [item_left]
if isinstance(item_right, int):
item_right = [item_right]
value = is_right_order(item_left, item_right)
if value is not None:
return value
def build_pairs(data: Iterable[str]) -> Iterable[Tuple[List[Any], List[Any]]]:
buf = []
for line in data:
if not line.strip():
continue
buf.append(line)
if len(buf) == 2:
yield json.loads(buf[0]), json.loads(buf[1])
buf = []
def right_order_pairs(data: Iterable[str]) -> int:
return sum(
index + 1 for index, pair in enumerate(build_pairs(data))
if is_right_order(pair[0], pair[1])
)
def test_right_order_pairs():
data = [
'[1,1,3,1,1]',
'[1,1,5,1,1]',
'',
'[[1],[2,3,4]]',
'[[1],4]',
'',
'[9]',
'[[8,7,6]]',
'',
'[[4,4],4,4]',
'[[4,4],4,4,4]',
'',
'[7,7,7,7]',
'[7,7,7]',
'',
'[]',
'[3]',
'',
'[[[]]]',
'[[]]',
'',
'[1,[2,[3,[4,[5,6,7]]]],8,9]',
'[1,[2,[3,[4,[5,6,0]]]],8,9]'
]
assert right_order_pairs(data) == 13
def main():
data = sys.stdin
result = right_order_pairs(data)
print(result)
if __name__ == '__main__':
main()
|
[
"olga3n@gmail.com"
] |
olga3n@gmail.com
|
0d9c11e159814d4603e92a7da12ac0993d494912
|
db012fc9716bb27e9ff22078bc17edb7745b3f13
|
/examples/plot_classifier_dl85_plot_tree.py
|
c1d3f1657cd0965bb5b814f5562272bc9936fd33
|
[
"MIT"
] |
permissive
|
VivanVatsa/pydl8.5
|
df15b99c313da29b32a0571c59e90225aaa71566
|
8686839d000e47375b5ed70ad42828e6c3eef6e6
|
refs/heads/master
| 2023-08-14T11:25:06.511972
| 2021-09-21T18:50:13
| 2021-09-21T18:50:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,406
|
py
|
"""
==============================================
DL85Classifier example to export tree as image
==============================================
"""
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from dl85 import DL85Classifier
import graphviz
print("######################################################################\n"
"# DL8.5 default classifier #\n"
"######################################################################")
# read the dataset and split into features and targets
dataset = np.genfromtxt("../datasets/anneal.txt", delimiter=' ')
X, y = dataset[:, 1:], dataset[:, 0]
# split the dataset into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
clf = DL85Classifier(max_depth=2)
clf.fit(X, y)
y_pred = clf.predict(X_test)
# show results
print("Model built in", round(clf.runtime_, 4), "seconds")
print("Found tree:", clf.tree_)
print("Confusion Matrix below\n", confusion_matrix(y_test, y_pred))
print("Accuracy on training set =", round(clf.accuracy_, 4))
print("Accuracy on test set =", round(accuracy_score(y_test, y_pred), 4))
# print the tree
dot = clf.export_graphviz()
graph = graphviz.Source(dot, format="png")
graph.render("plots/anneal_odt")
|
[
"aglingael@gmail.com"
] |
aglingael@gmail.com
|
c0d29ea3e56d0a9a1129476105c243a8a2566772
|
8d2a124753905fb0455f624b7c76792c32fac070
|
/pytnon-month01/周六练习-practice on saturday/独立完成/OOP-fanb-1_student_manager_system.py
|
370a4186757ac84e2f949eca27cb01e393c5348c
|
[] |
no_license
|
Jeremy277/exercise
|
f38e4f19aae074c804d265f6a1c49709fd2cae15
|
a72dd82eb2424e4ae18e2f3e9cc66fc4762ec8fa
|
refs/heads/master
| 2020-07-27T09:14:00.286145
| 2019-09-17T11:31:44
| 2019-09-17T11:31:44
| 209,041,629
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,533
|
py
|
#学生信息管理系统:
# 数据模型类:StudentModel
# 数据:编号 id,姓名 name,年龄 age,成绩 score
class StudentModel:
def __init__(self,name,age,score,id = 0):
self.name = name
self.age = age
self.score = score
self.id = id
# 逻辑控制类:StudentManagerController
# 数据:学生列表 __stu_list
# (#私有属性,提供只读)
# 行为:获取列表 stu_list,添加学生 add_student,删除学生remove_student,
# 修改学生update_student,根据成绩排序order_by_score。
class StudentManagerController:
__stu_id = 1000
def __init__(self): #函数中不需要定义行参
self.__stu_list = [] #赋值空列表
@property
def stu_list(self):
return self.__stu_list
def add_student(self,stu):
StudentManagerController.__stu_id += 1
stu.id = StudentManagerController.__stu_id
self.__stu_list.append(stu)
def remove_student(self,id):
for item in self.__stu_list:
if item.id == id:
self.__stu_list.remove(item)
return True
def update_student(self,stu):
for item in self.__stu_list:
if item.id == stu.id:
item.name = stu.name
item.age = stu.age
item.score = stu.score
return True
def order_by_score(self):
for i in range(len(self.__stu_list)-1):
for j in range(i+1,len(self.__stu_list)):
if self.__stu_list[i].score > self.__stu_list[j].score:
self.__stu_list[i],self.__stu_list[j] = self.__stu_list[j],self.__stu_list[i]
# 界面视图类:StudentManagerView
# 数据:逻辑控制对象__manager
# 行为:显示菜单__display_menu,选择菜单项__select_menu_item,入口逻辑main,
# 输入学生__input_students,输出学生__output_students,
# 删除学生__delete_student,修改学生信息__modify_student
class StudentManagerView():
def __init__(self):
self.__manager = StudentManagerController()
def __display_menu(self):
print('''
学生信息管理系统1.0
+-----------------------+
| 0)退出管理系统 |
| 1)添加学生信息 |
| 2)显示学生信息 |
| 3)删除学生信息 |
| 4)修改学生信息 |
| 5)按照成绩排序 |
+-----------------------+
''')
def main(self):
choice = None
while choice != 0:
self.__display_menu()
choice = input('请输入选项:')
if choice == '0':
print('谢谢使用,退出!')
break
elif choice == '1':
self.__input_students()
elif choice == '2':
self.__output_students()
elif choice == '3':
self.__delete_student()
elif choice == '4':
self.__modify_student()
elif choice == '5':
self.__sort_by_score()
else:
print('请重新输入选项!')
def __input_students(self):
name = input('请输入学生姓名:')
age = int(input('请输入学生年龄:'))
score = int(input('请输入学生成绩:'))
stu = StudentModel(name,age,score)
self.__manager.add_student(stu)
print('添加学生信息成功!')
def __output_students(self):
print('学生信息:')
for item in self.__manager.stu_list:
print(item.id,item.name,item.age,item.score)
def __delete_student(self):
stu_id = int(input('请输入学生编号:'))
if self.__manager.remove_student(stu_id):
print('删除学生信息成功!')
else:
print('删除学生信息失败!')
def __modify_student(self):
id = int(input('请输入需要修改的学生ID:'))
name = input('请输入修改后学生姓名:')
age = int(input('请输入修改后学生年龄:'))
score = int(input('请输入修改后学生成绩:'))
stu = StudentModel(name, age, score, id)
if self.__manager.update_student(stu):
print('修改学生信息成功!')
else:
print('修改学生信息失败!')
def __sort_by_score(self):
self.__manager.order_by_score()
print('排序成功!')
view = StudentManagerView()
view.main()
#1.测试逻辑控制代码
#测试添加学员
# manger = StudentManagerController()
# s01 = StudentModel('许瑶',18,98)
# s02 = StudentModel('许仙',16,99)
# s03 = StudentModel('小青',15,79)
# s04 = StudentModel('姐夫',15,79)
# manger.add_student(s01)
# manger.add_student(s02)
# manger.add_student(s03)
# manger.add_student(s04)
# for item in manger.stu_list:
# print(item.id,item.name,item.age,item.score)
# # #manger.stu_list列表 保存学生对象
# # print(manger.stu_list[1].name)
# # #测试删除学员
# manger.remove_student(1004)
# for item in manger.stu_list:
# print('删除后:',item.id,item.name)
# # #测试修改学员
# manger.update_student(StudentModel('娘子',19,80,1001))
# for item in manger.stu_list:
# print('修改后:',item.id,item.name,item.age,item.score)
# # #测试按成绩排序
# manger.order_by_score()
# for item in manger.stu_list:
# print('按分数升序排列:',item.id,item.name,item.age,item.score)
|
[
"13572093824@163.com"
] |
13572093824@163.com
|
98cbe8d9f4b12239cf1d517d7cd9c165e3ac2876
|
83df3d7773a4c7c3da8824cd3e6b3f0d6e6cfa07
|
/world.py
|
92f49a7703868c98e84f84d8b7d82c02437552a6
|
[] |
no_license
|
azmzing/c
|
d4f8690ae0ad43c6441b6559995042ebe730108e
|
69b2511683d709961034ec46814400bccae0ae5b
|
refs/heads/master
| 2020-03-15T23:42:45.118141
| 2018-05-07T06:13:12
| 2018-05-07T06:13:12
| 132,399,355
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16
|
py
|
print"b hellow
|
[
"amazing.zanjun@outlook.com"
] |
amazing.zanjun@outlook.com
|
ef3863b249697253ae98c02f073111c5d9fb56a5
|
2406724e872efc10c25fff5fb4b551c65cf4f298
|
/Codes/Pong_game.py
|
48b58da41228fb79140ac11d9c18925f1ca9ffb7
|
[] |
no_license
|
Pradhyuman12/Beginner-Hacktoberfest
|
c6d57b692f34f19b3334a7b364ca084f2b85bf95
|
5e0184d0825e5bafbb4625c31396bca9e6792c7a
|
refs/heads/main
| 2023-08-30T11:31:58.281561
| 2021-10-29T18:19:26
| 2021-10-29T18:19:26
| 412,496,299
| 0
| 0
| null | 2021-10-01T14:20:41
| 2021-10-01T14:20:40
| null |
UTF-8
|
Python
| false
| false
| 2,339
|
py
|
import turtle
wn = turtle.Screen()
wn.title("Pong Game")
wn.bgcolor("black")
wn.setup(width=800, height=600)
wn.tracer()
#Score
score_a = 0
score_b = 0
#Paddle A
paddle_a = turtle.Turtle()
paddle_a.speed(0)
paddle_a.shape("square")
paddle_a.color("white")
paddle_a.shapesize(stretch_wid=5, stretch_len=1)
paddle_a.penup()
paddle_a.goto(-350, 0)
#Paddle B
paddle_b = turtle.Turtle()
paddle_b.speed(0)
paddle_b.shape("square")
paddle_b.color("white")
paddle_b.shapesize(stretch_wid=5, stretch_len=1)
paddle_b.penup()
paddle_b.goto(350, 0)
#Ball
ball = turtle.Turtle()
ball.speed(0)
ball.shape("square")
ball.color("white")
ball.penup()
ball.goto(0, 0)
ball.dx = 2
ball.dy = -2
# Pen
pen = turtle.Turtle()
pen.speed(0)
pen.color("white")
pen.penup()
pen.hideturtle()
pen.goto(0, 260)
pen.write("Player A : 0 Player B : 0", align="center", font=("Courier", 24, "normal"))
#Function
def paddle_a_up():
y = paddle_a.ycor()
y += 20
paddle_a.sety(y)
def paddle_a_down():
y = paddle_a.ycor()
y -= 20
paddle_a.sety(y)
def paddle_b_up():
y = paddle_b.ycor()
y += 20
paddle_b.sety(y)
def paddle_b_down():
y = paddle_b.ycor()
y -= 20
paddle_b.sety(y)
#Keyboard binding
wn.listen()
wn.onkeypress(paddle_a_up, "w")
wn.onkeypress(paddle_a_down, "s")
wn.onkeypress(paddle_b_up, "Up")
wn.onkeypress(paddle_b_down, "Down")
#Main game loop
while True:
wn.update()
#MOVE THE BALL
ball.setx(ball.xcor() + ball.dx)
ball.sety(ball.ycor() + ball.dy)
# Border checking
if ball.ycor() > 290:
ball.sety(290)
ball.dy *= -1
if ball.ycor() < -290:
ball.sety(-290)
ball.dy *= -1
if ball.xcor() > 390:
ball.goto(0,0)
ball.dx *= -1
score_a += 1
pen.clear()
pen.write("Player A : {} Player B : {}".format(score_a, score_b), align="center", font=("Courier", 24, "normal"))
if ball.xcor() < -390:
ball.goto(0,0)
ball.dx *= -1
score_b += 1
pen.clear()
pen.write("Player A : {} Player B : {}".format(score_a, score_b), align="center", font=("Courier", 24, "normal"))
if (ball.xcor() > 340 and ball.xcor() < 350) and (ball.ycor() < paddle_b.ycor() + 40 and ball.ycor() > paddle_b.ycor() -40):
ball.setx(340)
ball.dx *= -1
if (ball.xcor() < -340 and ball.xcor() > -350) and (ball.ycor() < paddle_a.ycor() + 40 and ball.ycor() > paddle_a.ycor() -40):
ball.setx(-340)
ball.dx *= -1
|
[
"noreply@github.com"
] |
noreply@github.com
|
00333130e10a7ca7f1207ec2896cc61f017a3839
|
24e2b10c6d13deac27bd4b8af07907de4c9e8bcd
|
/traingle.py
|
413d3bf8ba2ac1ff1e4a678063e1efb7d2d49f1a
|
[] |
no_license
|
adilreza/docker-env-var-python
|
a5b2e0269511d588bc2de30ab35f09a86138419a
|
156d3fa9b3fc25e56a40edc091818d6c86ba017b
|
refs/heads/main
| 2023-03-29T12:02:06.158341
| 2021-04-08T12:25:56
| 2021-04-08T12:25:56
| 355,896,730
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
import os
def triangle(n):
k = n - 1
for i in range(0, n):
for j in range(0, k):
print(end=" ")
k = k - 1
for j in range(0, i+1):
print("* ", end="")
print("\r")
n = os.environ.get('TR_NUMBER')
# TR_NUMBER=8 python3 traingle.py
# docker run -e TR_NUMBER=7 bff2ba77dbd1
nn = int(n)
triangle(nn)
|
[
"adil.reza@selise.ch"
] |
adil.reza@selise.ch
|
edbc5843172b296c275bf4d38092d8dabd6213fe
|
bd3b1eaedfd0aab45880c100b86bc4714149f5cd
|
/student/dyp1/11.py
|
c6e63aa6b223b8b5cdbb13353fe5872beeeea0a7
|
[] |
no_license
|
ophwsjtu18/ohw19f
|
a008cd7b171cd89fa116718e2a5a5eabc9f7a93e
|
96dedf53a056fbb4d07c2e2d37d502171a6554a6
|
refs/heads/master
| 2020-08-08T12:59:38.875197
| 2020-04-01T10:38:14
| 2020-04-01T10:38:14
| 213,835,959
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,178
|
py
|
import numpy as np
import cv2
capture = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier('C:\\Users\\DING-DING\\AppData\\Local\\Programs\\Python\\Python36\\Lib\\site-packages\\cv2\\data\\haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('C:\\Users\\DING-DING\\AppData\\Local\\Programs\\Python\\Python36\\Lib\\site-packages\\cv2\\data\\haarcascade_eye.xml')
while(True):
ret, frame = capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
img = cv2.rectangle(gray,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
def hhh( lists ):
for (x,y,w,h) in lists:
a = x
for num in range(1,4):
for num in range(1,4):
cv2.rectangle(img,(x,y),(x+int(w/3),y+int(h/3)),(255,0,0),2)
x+=int(w/3)
x=a
y+=int(h/3)
hhh(faces)
cv2.imshow('frame',gray)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
noreply@github.com
|
9ee04dc56eec32ca912fa8b81136a49356550e03
|
dfbe04629c68e49c0671b0ed1890d82d96180164
|
/graphsage/datafetcher.py
|
28421aa404bbc8ea1a9ba08303cc535fc7c67f45
|
[] |
no_license
|
bluelancer/MLAdv2020Proj
|
417d9c3853297541d48036f3af5999aea68465ad
|
7057492f7b4f171aa1bdf5a6f792c5fbdc679d35
|
refs/heads/main
| 2023-04-30T02:47:49.581387
| 2021-01-12T11:47:17
| 2021-01-12T11:47:17
| 325,494,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,263
|
py
|
from collections import defaultdict
from scipy.io import mmread
import numpy as np
class DataFetcher:
def __init__(self):
return
def load(self, dataset):
if dataset == 'cora':
return self.load_cora()
elif dataset == 'pubmed':
return self.load_pubmed()
def load_cora(self):
num_nodes = 2708
num_feats = 1433
feat_data = np.zeros((num_nodes, num_feats))
labels = np.empty((num_nodes, 1), dtype=np.int64)
node_map = {}
label_map = {}
with open("../dataset_UNRL/citation/cora/cora.content") as fp:
for i, line in enumerate(fp):
info = line.strip().split()
for j in range(len(info) - 2):
feat_data[i, j] = float(info[j + 1])
# feat_data[i,:] = map(float(), info[1:-1])
node_map[info[0]] = i
if not info[-1] in label_map:
label_map[info[-1]] = len(label_map)
labels[i] = label_map[info[-1]]
adj_lists = defaultdict(set)
link_list = []
with open("../dataset_UNRL/citation/cora/cora.cites") as fp:
for i, line in enumerate(fp):
info = line.strip().split()
paper1 = node_map[info[0]]
paper2 = node_map[info[1]]
adj_lists[paper1].add(paper2)
adj_lists[paper2].add(paper1)
link_list.append((paper1, paper2, 1))
return feat_data, labels, adj_lists, link_list
def load_pubmed(self):
# hardcoded for simplicity...
num_nodes = 19717
num_feats = 500
feat_data = np.zeros((num_nodes, num_feats))
labels = np.empty((num_nodes, 1), dtype=np.int64)
node_map = {}
with open("../dataset_UNRL/citation/pubmed-data/Pubmed-Diabetes.NODE.paper.tab") as fp:
fp.readline()
feat_map = {entry.split(":")[1]: i - 1 for i, entry in enumerate(fp.readline().split("\t"))}
for i, line in enumerate(fp):
info = line.split("\t")
node_map[info[0]] = i
labels[i] = int(info[1].split("=")[1]) - 1
for word_info in info[2:-1]:
word_info = word_info.split("=")
feat_data[i][feat_map[word_info[0]]] = float(word_info[1])
adj_lists = defaultdict(set)
with open("../dataset_UNRL/citation/pubmed-data/Pubmed-Diabetes.DIRECTED.cites.tab") as fp:
fp.readline()
fp.readline()
link_list = []
for line in fp:
info = line.strip().split("\t")
paper1 = node_map[info[1].split(":")[1]]
paper2 = node_map[info[-1].split(":")[1]]
adj_lists[paper1].add(paper2)
adj_lists[paper2].add(paper1)
link_list.append((paper1, paper2, 1))
return feat_data, labels, adj_lists, link_list
def load_blogcata(self):
filename = '../dataset_UNRL/soc/soc-BlogCatalog/soc-BlogCatalog.mtx'
link_matrix = mmread(filename)
return link_matrix
if __name__ == '__main__':
datafetcher = DataFetcher()
datafetcher.load_blogcata()
|
[
"zhaoyu@zhaoyudeMacBook-Pro.local"
] |
zhaoyu@zhaoyudeMacBook-Pro.local
|
a6d438db0bc275cba915649a275ab52409197ac7
|
059b6f2963515af4ee4a5342f45ab05a4f431b60
|
/string reverse.py
|
2e5ff44a2f7f05c080f8e1c3b9e5f2ab8162ad8f
|
[] |
no_license
|
Surya-Narayanan0503/Python-Programs
|
70c20c0b1e07420b3058bbdd6ac88bcb9b1c273a
|
2fcdc31ed3c4cc068f2a23ef02a465673c0700e7
|
refs/heads/master
| 2020-06-20T02:40:16.488314
| 2019-07-19T08:49:07
| 2019-07-19T08:49:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29
|
py
|
n=input()
i=n[::-1]
print(i)
|
[
"noreply@github.com"
] |
noreply@github.com
|
1f54af48b0de5de3deb1326d6dfc2e3b9b08012e
|
7246faf9a222269ce2612613f58dc5ff19091f10
|
/baekjoon/3000~5999/4949_균형잡힌세상.py
|
69e300ec26003ff839d8917a542427b2e7f68cc4
|
[] |
no_license
|
gusdn3477/Algorithm_Study
|
87a2eb72a8488d9263a86db70dadc7944434d41d
|
3fefe1dcb40122157845ffc542f41cb097711cc8
|
refs/heads/main
| 2023-08-30T12:18:21.412945
| 2021-09-28T13:00:11
| 2021-09-28T13:00:11
| 308,364,230
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
a = input()
while a != '.':
poc = []
for i in range(len(a)):
if a[i] == '(' or a[i] == '[':
poc.append(a[i])
if a[i] == ')':
if not poc or poc[-1] != '(':
poc.append(a[i])
break
if poc[-1] == '(':
poc.pop()
if a[i] == ']':
if not poc or poc[-1] != '[':
poc.append(a[i])
break
if poc[-1] == '[':
poc.pop()
if not poc:
print("yes")
else:
print("no")
poc.clear()
a = input()
|
[
"gusdn3477@naver.com"
] |
gusdn3477@naver.com
|
c4d7e36734ac5ab04af3745ef7239a1f37d315fd
|
1005b44f86523c377b607c69d96a0f6cbfbd62c7
|
/Weather ToPy_bot/bot.py
|
9cb6e7c3d32246ca654b13e6d664c8f6a65b966c
|
[] |
no_license
|
Tofan93/Python
|
7e3308b99ea8f0db374a74d778d0fe6229bc0f59
|
20c0085bf8592bc2bee6ca35360701054993fd2f
|
refs/heads/master
| 2022-11-11T17:53:13.606553
| 2020-07-09T06:31:24
| 2020-07-09T06:31:24
| 258,720,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 912
|
py
|
import pyowm
import telebot
owm = pyowm.OWM('b819a91869f45bb714b2cb0a0c647732', language = 'ru')
bot = telebot.TeleBot('1223510132:AAEg4XkxdnSa92GP4dfcamjb3uznTB9rkzM')
@bot.message_handler(content_types=['text'])
def send_echo(message):
obs = owm.weather_at_place(message.text)
city = obs.get_weather()
temp = city.get_temperature('celsius')['temp']
answer = 'В городе ' + message.text + ' сейчас ' + city.get_detailed_status()
answer += 'Температура в районе ' + str(round(temp)) + ' градусов' + '\n\n'
if temp<10:
answer += 'Очень холодно, оденься потеплее))'
elif temp<17:
answer += 'Прохладно, лучше оденься:)'
else:
answer += 'Не холодно, хоть в трусах иди:)'
bot.send_message(message.chat.id, answer)
bot.polling(none_stop = True)
|
[
"noreply@github.com"
] |
noreply@github.com
|
6696d49ec45e3e7ddf84f6655775d70902a6d38f
|
9fbec0955358e0dc3c24e2b7e5e7aeb4fa12f963
|
/info.py
|
5eacb8d09d67a42912218c1f4e3c65871f39fe3c
|
[] |
no_license
|
AyselHavutcu/FlaskBlog
|
9b0142d7a400a6b73f9367d42a09e421e1ab1a0b
|
820e1e4740eb1f31ed302b208f61af0ba32f8f14
|
refs/heads/master
| 2020-07-26T01:20:28.171975
| 2019-09-14T18:30:30
| 2019-09-14T18:30:30
| 208,485,109
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,188
|
py
|
from flask import Flask,render_template,flash,redirect,url_for,session,logging,request#web sunucumuzu aga kaldıracak
from flask_mysqldb import MySQL
#mysql icin gerekli olan fromları dahil ettik
from wtforms import From,StringField,PasswordField,validators
#bunlarda formda kullanmak icin dahil ediyourz
from passlib.hash import sha256_crypt #parola gizliligi icin ekledik
app = Flask(__name__)
#her bir pyhton dosyamız aslında bir modul
#biz bu pyhton dosyalarını 2 turlu kullanabilriz
#1. si:Biz bu python dosyalarının icine degisik fonksiyonlar yazarız ve
#daha sonra bu fonksiyonları pyhtonda fonksiyon cagrısı yaparak kullanablirz
#2.si:biz bu python dosyalarını modul olarak baska bir python dosyasında calıstırabilirz
#ve sadece fonksiyonları almak isteyebilir ve fonksiyon cagrılarının calısmamasını saglayabilir
#eger python dosyasını terminalde calıstırısak name in degeri main olursa terminalde calısmıs
#eger degilse baska bir modul olarak aktarılmıs demektir o zaman ben bu fonksiyon cagrılarını yapamam
@app.route("/") #her url adresi istedigimizde kullanılan bir decorator
def index():
#yukarda request yaptık ve bu fonksiyon direk bu requestten sonra calısacaktır
#response donmemiz lazım
numbers = [1,2,3,4,5]
return render_template("index.html",numbers = numbers)
#baska requestler de yapabilirz
@app.route("/about")
def about():
return render_template("about.html")
#dinamik url tanımlayacaz
@app.route("/article/<string:id>")
def detail(id):
return "Article ID:" +id
if __name__ == "__main__":
app.run(debug=True)
#debug true dememizin sebebi debug ı aktiflestiriyoruz
#yani herhangi bir yerde hatamız olursa bize bir uyarı mesajı fırlatılacak
#jinja templater ı icinde html css kodlarını ve boostrap modullerimiz
#kullanıyoruz aynı zamanda python kodlarımızıda bu template e uygun bir sekilde kullanabiliyoruz
#boylelikle biz fonksiyonlarımızda herhangi bir deger urettigimiz zaman bu template a bu degeri verip template i response olarak donebilioruz
#bunun icin bizim bu template render etmemiz gerekiyor boylece biz bu template bir pythom degeri gonderebiliyoruz
|
[
"42868243+AyselHavutcu@users.noreply.github.com"
] |
42868243+AyselHavutcu@users.noreply.github.com
|
9c7357576d312b577fde01d5955822e944b46c7b
|
d0f11aa36b8c594a09aa06ff15080d508e2f294c
|
/leecode/1-500/401-500/472-连接词.py
|
4edb1540db15225aeb711ca0bd0954fa23641a7b
|
[] |
no_license
|
saycmily/vtk-and-python
|
153c1fe9953fce685903f938e174d3719eada0f5
|
5045d7c44a5af5c16df5a3b72c157e9a2928a563
|
refs/heads/master
| 2023-01-28T14:02:59.970115
| 2021-04-28T09:03:32
| 2021-04-28T09:03:32
| 161,468,316
| 1
| 1
| null | 2023-01-12T05:59:39
| 2018-12-12T10:00:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,016
|
py
|
class Solution:
def findAllConcatenatedWordsInADict(self, words):
def search(word, pre_dict):
if len(word)==0:
return True
cur_dict = pre_dict
for i,c in enumerate(word):
cur_dict = cur_dict.get(c,None)
if not cur_dict:
return False
if '#' in cur_dict:
if search(word[i+1:], pre_dict):
return True
return False
def insert(word, cur_dict):
for c in word:
if c not in cur_dict:
cur_dict[c] = {}
cur_dict = cur_dict[c]
cur_dict['#'] ={}
words.sort(key=lambda x: len(x))
ret = []
pre_dict = {}
for word in words:
if len(word)==0:
continue
if search(word, pre_dict):
ret.append(word)
else:
insert(word, pre_dict)
return ret
|
[
"1786386686@qq.com"
] |
1786386686@qq.com
|
92c7ef3d0dd3f76c0176fecc684bb6e824e0fa9f
|
1521645e97dfa364ce4ecc34ef97aa00510a5d7c
|
/cart/test_forms.py
|
aa48460ebb4a3c7ccf62ff39049c915e09f89007
|
[] |
no_license
|
kajamiko/u_m_website
|
0327903646ae21a024e0d95df937c49605c9e615
|
158953e5e375856c80ab34859c581b628681657e
|
refs/heads/master
| 2022-12-14T03:38:42.702543
| 2018-10-23T16:08:30
| 2018-10-23T16:08:30
| 139,005,832
| 0
| 0
| null | 2022-12-08T02:48:16
| 2018-06-28T10:53:31
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 838
|
py
|
from django.test import TestCase
from .forms import CartAddTicketForm
from django.contrib.auth.models import User
class TestTicketForm(TestCase):
def test_for_updating(self):
"""
Normal, as from a real view
"""
form = CartAddTicketForm({'donation': 20, 'update': True})
self.assertTrue(form.is_valid())
def test_for_update_with_no_donation_value(self):
"""
Donation value is required
"""
form = CartAddTicketForm({'update': True})
self.assertFalse(form.is_valid())
def test_for_add_with_no_update(self):
"""
Update is not required, as it is also an add form
"""
form = CartAddTicketForm({'donation': 20})
self.assertTrue(form.is_valid())
|
[
"kajaths@gmail.com"
] |
kajaths@gmail.com
|
05fa6c5fd8aacf359f1d9088e7f7c6cdb1a8d9ab
|
08fe9b6afba5708f8d51d79c1d503b8e87ff96a8
|
/Sets/symmetric_diff.py
|
844629f4671501b4b5d6caaf92c07ccc6faf3d96
|
[] |
no_license
|
Harsh-2909/HackerRank-Python-Solutions
|
667e83290c1421150b8ce782a2ffad82b84c4f57
|
2a6c5f013870791eb45c34e470c2a797a49f4cc1
|
refs/heads/master
| 2020-05-30T21:54:37.185556
| 2019-08-10T06:17:10
| 2019-08-10T06:17:10
| 189,983,460
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
m = int(input())
m_set = set(map(int, input().split()))
n = int(input())
n_set = set(map(int, input().split()))
m_set, n_set = list(m_set - n_set), list(n_set - m_set)
l = [*m_set, *n_set]
l.sort()
for i in l:
print(i)
|
[
"harsh29092000@gmail.com"
] |
harsh29092000@gmail.com
|
3351932d3d3a75e35b6b1fcbd967fa8b054bd65b
|
13a32b92b1ba8ffb07e810dcc8ccdf1b8b1671ab
|
/home--tommy--mypy/mypy/lib/python2.7/site-packages/theano/sandbox/cuda/tests/test_tensor_op.py
|
cb9162354ac7fa9120cf4dd3b05d616e784e0f36
|
[
"Unlicense"
] |
permissive
|
tommybutler/mlearnpy2
|
8ec52bcd03208c9771d8d02ede8eaa91a95bda30
|
9e5d377d0242ac5eb1e82a357e6701095a8ca1ff
|
refs/heads/master
| 2022-10-24T23:30:18.705329
| 2022-10-17T15:41:37
| 2022-10-17T15:41:37
| 118,529,175
| 0
| 2
|
Unlicense
| 2022-10-15T23:32:18
| 2018-01-22T23:27:10
|
Python
|
UTF-8
|
Python
| false
| false
| 5,283
|
py
|
"""
This file test tensor op that should also operate on CudaNdaray.
"""
from __future__ import absolute_import, print_function, division
from nose.plugins.skip import SkipTest
from nose_parameterized import parameterized
import numpy
import theano
from theano import tensor
import theano.tensor as T
import theano.tests.unittest_tools as utt
# Skip test if cuda_ndarray is not available.
import theano.sandbox.cuda as cuda
from theano.tensor.nnet.tests import test_conv3d2d
if cuda.cuda_available is False:
raise SkipTest('Optional package cuda disabled')
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
mode_without_gpu = theano.compile.mode.get_mode('FAST_RUN').excluding('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
mode_without_gpu = theano.compile.mode.get_default_mode().excluding('gpu')
def test_shape_i():
x = cuda.ftensor3()
v = cuda.CudaNdarray(numpy.zeros((3, 4, 5), dtype='float32'))
f = theano.function([x], x.shape[1])
topo = f.maker.fgraph.toposort()
assert f(v) == 4
if theano.config.mode != 'FAST_COMPILE':
assert len(topo) == 1
assert isinstance(topo[0].op, T.opt.Shape_i)
def test_shape():
x = cuda.ftensor3()
v = cuda.CudaNdarray(numpy.zeros((3, 4, 5), dtype='float32'))
f = theano.function([x], x.shape)
topo = f.maker.fgraph.toposort()
assert numpy.all(f(v) == (3, 4, 5))
if theano.config.mode != 'FAST_COMPILE':
assert len(topo) == 4
assert isinstance(topo[0].op, T.opt.Shape_i)
assert isinstance(topo[1].op, T.opt.Shape_i)
assert isinstance(topo[2].op, T.opt.Shape_i)
assert isinstance(topo[3].op, T.opt.MakeVector)
def test_softmax_optimizations():
from theano.tensor.nnet.nnet import softmax, crossentropy_categorical_1hot
x = tensor.fmatrix('x')
one_of_n = tensor.lvector('one_of_n')
op = crossentropy_categorical_1hot
op(x, one_of_n)
fgraph = theano.gof.FunctionGraph(
[x, one_of_n],
[op(softmax(x), one_of_n)])
assert fgraph.outputs[0].owner.op == op
mode_with_gpu.optimizer.optimize(fgraph)
assert str(fgraph.outputs[0].owner.op) == 'OutputGuard'
assert fgraph.outputs[0].owner.inputs[0].owner.op == cuda.host_from_gpu
assert fgraph.outputs[0].owner.inputs[0].owner.inputs[0].owner.op == cuda.nnet.gpu_crossentropy_softmax_argmax_1hot_with_bias
def test_may_share_memory_cuda():
from theano.misc.may_share_memory import may_share_memory
a = cuda.CudaNdarray(numpy.zeros((3, 4), dtype='float32'))
b = cuda.CudaNdarray(numpy.zeros((3, 4), dtype='float32'))
na = numpy.zeros((3, 4))
nb = numpy.zeros((3, 4))
va = a.view()
vb = b.view()
ra = a.reshape((4, 3))
rb = b.reshape((4, 3))
# can't test the transpose as ta._strides = is not implemented
# manual transpose of a
# ta = a.reshape((4,3))
# ta._strides = (ta._strides[1],ta._strides[0])#not implemented
# elem_size=elem_size = numpy.zeros(0,dtype=a.dtype).dtype.itemsize
# ta.gpudata += ta.size*elem_size
for a_, b_, rep in [(a, a, True), (b, b, True), (a, b, False),
(a, na, False), (b, nb, False),
(na, b, False), (nb, a, False),
(a, va, True), (b, vb, True),
(va, b, False), (a, vb, False),
(a, ra, True), (b, rb, True),
(ra, b, False), (a, rb, False), ]:
assert may_share_memory(a_, b_) == rep
assert may_share_memory(b_, a_) == rep
# test that it raise error when needed.
for a_, b_, rep in [(a, (0,), False), (a, 1, False), (a, None, False)]:
assert may_share_memory(a_, b_, False) == rep
assert may_share_memory(b_, a_, False) == rep
try:
may_share_memory(a_, b_)
raise Exception("An error was expected")
except TypeError:
pass
try:
may_share_memory(b_, a_)
raise Exception("An error was expected")
except TypeError:
pass
def test_deepcopy():
a = cuda.fmatrix()
a_v = cuda.CudaNdarray(numpy.zeros((3, 4), dtype='float32'))
# We force the c code to check that we generate c code
mode = theano.Mode("c", mode_with_gpu.optimizer)
f = theano.function([a], a, mode=mode)
theano.printing.debugprint(f)
out = f(a_v)
assert out is not a_v
assert numpy.allclose(numpy.asarray(a_v), numpy.asarray(out))
# We force the python linker as the default code should work for this op
mode = theano.Mode("py", mode_with_gpu.optimizer)
f = theano.function([a], a, mode=mode)
theano.printing.debugprint(f)
out = f(a_v)
assert out is not a_v
assert numpy.allclose(numpy.asarray(a_v), numpy.asarray(out))
def test_get_diagonal_subtensor_view():
test_conv3d2d.test_get_diagonal_subtensor_view(wrap=cuda.CudaNdarray)
@parameterized.expand(('valid', 'full'), utt.custom_name_func)
def test_conv3d(border_mode):
test_conv3d2d.check_conv3d(border_mode=border_mode,
mode=mode_with_gpu,
shared=cuda.shared_constructor)
|
[
"tbutler.github@internetalias.net"
] |
tbutler.github@internetalias.net
|
713b479653ed7764eabad8e061233c7fc1086f24
|
0c2ca3b3c7f307c29f45957e87ed940c23571fae
|
/fhirclient/models/bodysite_tests.py
|
a3aaa3593967b5390640ec04095fcc47317b4e4a
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
myungchoi/client-py-1.0.3
|
49c3d15b8dfb845e7cbc933084ed5fcc37e7c4ed
|
08e4e5828fb461c105907fd454b19dfc8463aad8
|
refs/heads/master
| 2021-06-25T04:36:26.952685
| 2021-02-11T16:27:26
| 2021-02-11T16:27:26
| 209,669,881
| 0
| 0
|
NOASSERTION
| 2021-03-20T01:45:42
| 2019-09-20T00:11:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,663
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 on 2016-03-23.
# 2016, SMART Health IT.
import os
import io
import unittest
import json
from . import bodysite
from .fhirdate import FHIRDate
class BodySiteTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("BodySite", js["resourceType"])
return bodysite.BodySite(js)
def testBodySite1(self):
inst = self.instantiate_from("bodysite-example.json")
self.assertIsNotNone(inst, "Must have instantiated a BodySite instance")
self.implBodySite1(inst)
js = inst.as_json()
self.assertEqual("BodySite", js["resourceType"])
inst2 = bodysite.BodySite(js)
self.implBodySite1(inst2)
def implBodySite1(self, inst):
self.assertEqual(inst.code.coding[0].code, "53120007")
self.assertEqual(inst.code.coding[0].display, "Arm")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.code.text, "Arm")
self.assertEqual(inst.description, "front of upper left arm directly below the tattoo")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier[0].system, "http://www.acmehosp.com/bodysites")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "12345")
self.assertEqual(inst.image[0].contentType, "image/png;base64")
self.assertEqual(inst.image[0].title, "ARM")
self.assertEqual(inst.modifier[0].coding[0].code, "419161000")
self.assertEqual(inst.modifier[0].coding[0].display, "Unilateral left")
self.assertEqual(inst.modifier[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.modifier[0].text, "Left")
self.assertEqual(inst.modifier[1].coding[0].code, "261183002")
self.assertEqual(inst.modifier[1].coding[0].display, "Upper")
self.assertEqual(inst.modifier[1].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.modifier[1].text, "Upper")
self.assertEqual(inst.modifier[2].coding[0].code, "255549009")
self.assertEqual(inst.modifier[2].coding[0].display, "Anterior")
self.assertEqual(inst.modifier[2].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.modifier[2].text, "Anterior")
self.assertEqual(inst.text.status, "generated")
|
[
"myungchoi@gmail.com"
] |
myungchoi@gmail.com
|
c57c61d86c5572b1948a7a0502578e0809ad170e
|
e36472948f74fd5ed35fc64801a59db4efa27070
|
/part_1/04_6_test.py
|
174139579a5653b3edaa5a336c79a87ac1f5214f
|
[] |
no_license
|
anton1k/python_crash_course
|
051aad7c5a043830d8cc9e5fd314f568bf0f4a53
|
80f302074e5fef48fb40e72f7d79ab4b8658b38a
|
refs/heads/master
| 2020-07-18T23:28:00.871466
| 2019-09-04T14:06:12
| 2019-09-04T14:06:12
| 206,333,934
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 75
|
py
|
square = list(range(1, 21, 2))
print(square)
for i in square:
print(i)
|
[
"40913464+anton1k@users.noreply.github.com"
] |
40913464+anton1k@users.noreply.github.com
|
9f704f4065654f070e858bc08858abfdfaeb1745
|
796198b4613ae30ff7735d7a8473064b8ecb0247
|
/abc140/D.py
|
cc3f77d7f92a52f460d82787bdd62c4304943b30
|
[] |
no_license
|
Tomoki-Kikuta/atcoder
|
993cb13ae30435d02ea2e743cf3cead1a7882830
|
97b886de867575084bd1a70310a2a9c1c514befe
|
refs/heads/master
| 2021-07-16T15:14:00.706609
| 2020-06-29T06:15:13
| 2020-06-29T06:15:13
| 184,001,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
n,k = map(int,input().split())
s = input()
h = 0
for i in range(n-1):
if s[i] == s[i+1]:
h+=1
print(min([n-1,h+2*k]))
|
[
"tomoki0819@akane.waseda.jp"
] |
tomoki0819@akane.waseda.jp
|
2602f0d5d20194361a9b3aaf0ea2586b950fa49b
|
5af72fd35f3f967be5b6c195eaedd8739df3ee47
|
/SnakeBlock.py
|
fb9c5afcab27983b4ec802adfbba6fe9c6d19186
|
[] |
no_license
|
tomluko/PythonSnake
|
0d43e0d7ba37fd4ca29b37856294d5f63502412b
|
e336cf1853fa060f3b33445d485a2b353bb0dccc
|
refs/heads/master
| 2021-01-10T02:43:08.590884
| 2015-06-02T19:52:54
| 2015-06-02T19:52:54
| 36,740,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
import Block
#snakes body segment
class SnakeBlock(Block.Block):
def __init__(self, size, position):
solid = True
removable = False
color = (0, 153, 0)
super(SnakeBlock, self).__init__(solid, removable, position, size, color)
|
[
"tomazasax@yahoo.com"
] |
tomazasax@yahoo.com
|
4d5620e88c6380c33efa91a55293aac691fa505e
|
64cacb589af13865d8c1aef9b27e65bfed742967
|
/md_image_backup_py3.py
|
576aa084a8a8b6120e98074c4de692836ce5a214
|
[] |
no_license
|
eggfly/markdown-img-backup
|
6f984a380ca837b69446f3e805cfeba19a8dd40f
|
6d0e5bb2e679b081ca0b960869d87a17b5e1cdda
|
refs/heads/master
| 2022-01-11T17:53:49.165213
| 2018-12-26T00:30:34
| 2018-12-26T00:30:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,552
|
py
|
# coding=utf-8
import sys
import os
import re
import requests
import urllib.request
def backup():
try:
# 备份指定文件的img
download(str('你的markdown文件路径' + sys.argv[1]))
except IndexError:
# 备份文件夹下的所有img
search('你的markdown文件路径', '.md')
def search(path, word):
for filename in os.listdir(path):
fp = os.path.join(path, filename)
if os.path.isfile(fp) and word in filename:
print(fp)
download(str(fp))
elif os.path.isdir(fp):
search(fp, word)
def download(file_path):
# filename = "test"
name = file_path.split(u"\\")
filename = name[-1]
f_md = open(file_path, 'rb')
# all text of md file
text = f_md.read().decode('utf-8')
# regex
# img_reg = r'\!{1}\[(.*?)\]\((.*?)\)'
result = re.findall('!\[(.*?)\]\((.*?)\)', text)
print(result)
for i in range(len(result)):
img_quote = result[i][0]
img_url = result[i][1]
# download img
request = urllib.request.Request(img_url)
response = urllib.request.urlopen(request)
img_contents = response.read()
# img name spell
urlname = img_url.split(u"/")
img_name = filename + '_' + \
str(i) + '_' + img_quote + str(urlname[len(urlname) - 1])
print (img_name + '~~~' + img_url)
# write to file
f_img = open('img/' + img_name, 'wb')
f_img.write(img_contents)
f_img.close()
f_md.close()
backup()
|
[
"hj531@live.com"
] |
hj531@live.com
|
c49836f2e56130e13c2dacd1c19ed24793523e5b
|
16f3cffc0e62d660103ada3185112caa9ee2ad4f
|
/marksheet.py
|
dabf959855ebef4b01e86ce099927e2117779242
|
[] |
no_license
|
hirak0373/AI-Practices
|
5601adcf91427cf57de4703b16b794e931c4b05b
|
4c23dc90c193f64f7511f1c1b9fc8cf79c776998
|
refs/heads/master
| 2020-06-25T10:03:15.001939
| 2019-07-28T11:25:42
| 2019-07-28T11:25:42
| 199,278,865
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
eng =input("Enter marks of English: ")
bio = input("Enter marks of Biology: ")
chem = input("Enter marks of Chemistry: ")
pak = input("Enter marks of Pakistan Sudies: ")
sin = input("Enter marks of Sindhi: " )
obtain =int(eng)+int(bio)+int(chem)+int(pak)+int(sin)
print (obtain)
per =int(obtain)/425
per1=per*100
print("your percentage is: "+str(per1))
if per1 >= 80 and per1 <= 100:
print("Grade: A+")
elif per1 >= 70 and per1 <=79.99:
print("Grade: A")
elif per1 >= 60 and per1 <=69.99:
print("Grade: B")
elif per1 >= 50 and per1 <=59.99:
print("Grade: C")
elif per1 >= 40 and per1 <=49.99:
print("Grade: D")
elif obtain <40:
print("Grade: fail")
|
[
"Hirak0373"
] |
Hirak0373
|
1c69ce72d7668c9f655b4077f3fe6fef28f0a157
|
eeaeb0f39262fa04233614ce377ac9dcd04f9b5e
|
/pyQt 5/my library pyqt5/QComboBox.py
|
a7d29c80b1d80e2e2fc041a97ce4e8fc09f32595
|
[] |
no_license
|
akashian4/python_example
|
70811300c97c3e9874b2c6f47a05569e7451e4df
|
64d57bfd9fe2d5ce71db3e1a03f0e6c4dfe3497f
|
refs/heads/main
| 2023-08-12T10:03:21.417469
| 2021-09-30T19:41:03
| 2021-09-30T19:41:03
| 412,203,121
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,004
|
py
|
###QComboBox
import sys
from PyQt5.QtWidgets import QWidget,QLabel,QApplication,QComboBox
class F(QWidget):
def __init__(self):
super().__init__()
self.setUI()
def setUI(self):
self.lbl = QLabel(" ", self)
self.lbl.resize(50,20)
self.lbl.move(50, 150)
self.setGeometry(300, 300, 300, 200)
self.setWindowTitle('faradars')
self.com1=QComboBox(self)
self.com1.addItem("python",self)
self.com1.addItem("java",self)
self.com1.addItem("C++",self)
self.com1.addItem("PyQt5",self)
self.com1.move(100,100)
self.com1.resize(200,20)
self.com1.activated.connect(lambda : self.combItem())
self.show()
def combItem(self):
text=self.com1.currentText()
index=self.com1.currentIndex()
print(text,' ',index)
self.lbl.setText(text)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = F()
sys.exit(app.exec_())
|
[
"akashian4@gmail.com"
] |
akashian4@gmail.com
|
27687dfa574c3d83cfd55d79f9b60849782917ff
|
09a32f98165c20fbc0c519f826cbd445b1906d54
|
/merge_opt.py
|
03c8ad504fd12016c93a0f874d11647442ea508f
|
[] |
no_license
|
huuthai37/LSTM-Consensus
|
d2ec2d63073a1a996c827c1f71a8edc4e763a8dc
|
97233cf0a4176b0a0fc69c4c7051a5289628e48f
|
refs/heads/master
| 2020-03-17T05:53:33.663364
| 2018-07-13T04:12:50
| 2018-07-13T04:12:50
| 133,332,710
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,447
|
py
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataset', help='Dataset', default='ucf101')
parser.add_argument('-s', '--segment', help='Number of segments', default=3, type=int)
parser.add_argument('-debug', '--debug', help='Number of classes', default=1, type=int)
args = parser.parse_args()
print args
import cv2
import os
import sys
import random
import numpy as np
import config
import pickle
# Lay tuy chon
dataset = args.dataset
num_seq = args.segment
if args.debug == 1:
debug = True
else:
debug = False
# Cau hinh folder du lieu
server = config.server()
data_input_folder = config.data_input_path()
output_path = config.data_output_path()
out_file_folder = r'{}database/'.format(output_path)
data_file = r'{}data-{}-{}.pickle'.format(out_file_folder,dataset,num_seq)
count = 0
with open(data_file,'rb') as f1:
data = pickle.load(f1)
length_data = len(data)
data_folder_opt = r'{}{}-opt/'.format(output_path,dataset)
data_folder_seq_opt = r'{}{}-seq-opt/'.format(output_path,dataset)
if not os.path.isdir(data_folder_seq_opt + 'u'):
os.makedirs(data_folder_seq_opt + 'u') # tao data_folder_seq_opt + 'u'/
print 'Create directory ' + data_folder_seq_opt + 'u'
if not os.path.isdir(data_folder_seq_opt + 'v'):
os.makedirs(data_folder_seq_opt + 'v') # tao data_folder_seq_opt + 'v'/
print 'Create directory ' + data_folder_seq_opt + 'v'
for l in range(length_data):
path_video = data[l][0]
render_opt = data[l][1]
name_video = path_video.split('/')[1]
u = data_folder_opt + 'u/' + name_video + '/frame'
v = data_folder_opt + 'v/' + name_video + '/frame'
if not os.path.isdir(data_folder_seq_opt + 'u/' + name_video):
os.makedirs(data_folder_seq_opt + 'u/' + name_video) # tao data_folder_seq_opt + 'u/' + name_video/
print 'Create directory ' + data_folder_seq_opt + 'u/' + name_video
if not os.path.isdir(data_folder_seq_opt + 'v/' + name_video):
os.makedirs(data_folder_seq_opt + 'v/' + name_video) # tao data_folder_seq_opt + 'v/' + name_video/
print 'Create directory ' + data_folder_seq_opt + 'v/' + name_video
return_data = []
if (render_opt[0] >= 0):
render = render_opt
else:
render = [render_opt[1]]
len_render_opt = len(render)
for k in range(len_render_opt):
nstack_u = np.zeros((2560,340))
nstack_v = np.zeros((2560,340))
for i in range(10):
img_u = cv2.imread(u + str(render[k] + 5 + i).zfill(6) + '.jpg', 0)
img_v = cv2.imread(v + str(render[k] + 5 + i).zfill(6) + '.jpg', 0)
# img_u = np.ones((240,320))
# img_v = np.ones((240,320))
if (img_u is None) | (img_v is None):
print 'Error render optical flow'
print(u + str(render[k] + 5 + i).zfill(6) + '.jpg')
sys.exit()
hh, ww = img_u.shape
if (hh != 256) | (ww != 340):
img_u = cv2.resize(img_u, (340, 256))
img_v = cv2.resize(img_v, (340, 256))
nstack_u[(256*i):(256*(i+1)),:] = img_u
nstack_v[(256*i):(256*(i+1)),:] = img_v
os.chdir(data_folder_seq_opt + 'u/' + name_video)
cv2.imwrite('{}.jpg'.format(k),nstack_u)
os.chdir(data_folder_seq_opt + 'v/' + name_video)
cv2.imwrite('{}.jpg'.format(k),nstack_v)
if l%1000 == 0:
print l
|
[
"huuthai37@gmail.com"
] |
huuthai37@gmail.com
|
c269309d5a7e596f3b4d827d0729f1e6c2e2640b
|
5bcbf7fc2fd10bfd51df37aa806cc2305d0fe077
|
/mongokit_ng/schema_document.py
|
0a0063bf73fc7e9d786e72cf9a10b383720c63aa
|
[
"MIT"
] |
permissive
|
Windfarer/mongokit-ng
|
29f9e10af48af77a44e829a2910c88f92af1f218
|
52844c9242c4483694d4f3f65cf0d70a13df1c35
|
refs/heads/master
| 2023-08-20T02:39:45.240670
| 2020-02-20T03:37:05
| 2020-02-20T03:37:05
| 216,472,772
| 15
| 3
|
MIT
| 2023-09-14T17:11:44
| 2019-10-21T03:47:34
|
Python
|
UTF-8
|
Python
| false
| false
| 42,541
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2011, Nicolas Clairon
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California, Berkeley nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import bson
import datetime
import logging
from copy import deepcopy
log = logging.getLogger(__name__)
from .operators import SchemaOperator, IS
from .helpers import DotCollapsedDict
from .helpers import DotExpandedDict
from .helpers import i18nDotedDict
from .helpers import DotedDict
__all__ = [
'AuthorizedTypeError',
'BadKeyError',
'CustomType',
'DefaultFieldTypeError',
'DotCollapsedDict',
'DotedDict',
'DotExpandedDict',
'DuplicateDefaultValueError',
'DuplicateRequiredError',
'i18n',
'i18nError',
'ModifierOperatorError',
'RequireFieldError',
'SchemaDocument',
'SchemaDocumentError',
'SchemaProperties',
'SchemaTypeError',
'Set',
'StructureError',
'ValidationError',
]
class CustomType(object):
init_type = None
mongo_type = None
python_type = None
def __init__(self):
if self.mongo_type is None:
raise TypeError("`mongo_type` property must be specify in %s" %
self.__class__.__name__)
if self.python_type is None:
raise TypeError("`python_type` property must be specify in %s" %
self.__class__.__name__)
def to_bson(self, value):
"""convert type to a mongodb type"""
raise NotImplementedError
def to_python(self, value):
"""convert type to a mongodb type"""
raise NotImplementedError
def validate(self, value, path):
"""
This method is optional. It add a validation layer.
This method is been called in Document.validate()
value: the value of the field
path: the field name (ie, 'foo' or 'foo.bar' if nested)
"""
pass
# field wich does not need to be declared into the structure
STRUCTURE_KEYWORDS = []
class SchemaDocumentError(Exception):
pass
class RequireFieldError(SchemaDocumentError):
pass
class StructureError(SchemaDocumentError):
pass
class BadKeyError(SchemaDocumentError):
pass
class AuthorizedTypeError(SchemaDocumentError):
pass
class ValidationError(SchemaDocumentError):
pass
class DuplicateRequiredError(SchemaDocumentError):
pass
class DuplicateDefaultValueError(SchemaDocumentError):
pass
class ModifierOperatorError(SchemaDocumentError):
pass
class SchemaTypeError(SchemaDocumentError):
pass
class DefaultFieldTypeError(SchemaDocumentError):
pass
class i18nError(SchemaDocumentError):
pass
class DeprecationError(Exception):
pass
class DuplicateI18nError(Exception):
pass
class SchemaProperties(type):
def __new__(mcs, name, bases, attrs):
attrs['_protected_field_names'] = set(
['_protected_field_names', '_namespaces', '_required_namespace'])
for base in bases:
parent = base.__mro__[0]
if not hasattr(parent, 'structure'):
continue
if parent.structure is not None:
#parent = parent()
if parent.structure:
if 'structure' not in attrs and parent.structure:
attrs['structure'] = parent.structure.copy()
else:
obj_structure = attrs.get('structure', {}).copy()
attrs['structure'] = parent.structure.copy()
attrs['structure'].update(obj_structure)
if parent.required_fields:
attrs['required_fields'] = list(set(
attrs.get('required_fields', [])+parent.required_fields))
if parent.default_values:
obj_default_values = attrs.get('default_values', {}).copy()
attrs['default_values'] = parent.default_values.copy()
attrs['default_values'].update(obj_default_values)
if parent.validators:
obj_validators = attrs.get('validators', {}).copy()
attrs['validators'] = parent.validators.copy()
attrs['validators'].update(obj_validators)
if parent.i18n:
attrs['i18n'] = list(set(
attrs.get('i18n', [])+parent.i18n))
if attrs.get('authorized_types'):
attrs['authorized_types'] = list(set(parent.authorized_types).union(set(attrs['authorized_types'])))
for mro in bases[0].__mro__:
attrs['_protected_field_names'] = attrs['_protected_field_names'].union(list(mro.__dict__))
attrs['_protected_field_names'] = list(attrs['_protected_field_names'])
if attrs.get('structure') and name not in \
["SchemaDocument", "Document", "VersionedDocument", "RevisionDocument"]:
base = bases[0]
if not attrs.get('authorized_types'):
attrs['authorized_types'] = base.authorized_types
base._validate_structure(attrs['structure'], name, attrs.get('authorized_types'))
attrs['_namespaces'] = list(base._SchemaDocument__walk_dict(attrs['structure']))
if [1 for i in attrs['_namespaces'] if type(i) is type]:
raise DeprecationError("%s: types are not allowed as structure key anymore" % name)
mcs._validate_descriptors(attrs)
## building required fields namespace
attrs['_required_namespace'] = set([])
for rf in attrs.get('required_fields', []):
splited_rf = rf.split('.')
for index in range(len(splited_rf)):
attrs['_required_namespace'].add(".".join(splited_rf[:index+1]))
attrs['_collapsed_struct'] = DotCollapsedDict(attrs['structure'], remove_under_type=True)
elif attrs.get('structure') is not None and name not in \
["SchemaDocument", "Document", "VersionedDocument", "RevisionDocument"]:
attrs['_collapsed_struct'] = {}
attrs['_i18n_namespace'] = []
if attrs.get('i18n'):
attrs['_i18n_namespace'] = set(['.'.join(i.split('.')[:-1]) for i in attrs['i18n']])
return type.__new__(mcs, name, bases, attrs)
@classmethod
def _validate_descriptors(mcs, attrs):
# TODO i18n validator
for dv in attrs.get('default_values', {}):
if not dv in attrs['_namespaces']:
raise ValueError("Error in default_values: can't find %s in structure" % dv)
for required in attrs.get('required_fields', []):
if required not in attrs['_namespaces']:
raise ValueError("Error in required_fields: can't find %s in structure" % required)
for validator in attrs.get('validators', {}):
if validator not in attrs['_namespaces']:
raise ValueError("Error in validators: can't find %s in structure" % validator)
# required_field
if attrs.get('required_fields'):
if len(attrs['required_fields']) != len(set(attrs['required_fields'])):
raise DuplicateRequiredError("duplicate required_fields : %s" % attrs['required_fields'])
# i18n
if attrs.get('i18n'):
if len(attrs['i18n']) != len(set(attrs['i18n'])):
raise DuplicateI18nError("duplicated i18n : %s" % attrs['i18n'])
for _i18n in attrs['i18n']:
if _i18n not in attrs['_namespaces']:
raise ValueError("Error in i18n: can't find {} in structure".format(_i18n))
class SchemaDocument(dict, metaclass=SchemaProperties):
"""
A SchemaDocument is dictionary with a building structured schema
The validate method will check that the document match the underling
structure. A structure must be specify in each SchemaDocument.
>>> class TestDoc(SchemaDocument):
... structure = {
... "foo":str,
... "bar":int,
... "nested":{
... "bla":float}}
`str`, `int`, `float` are python types listed in `mongokit.authorized_types`.
>>> doc = TestDoc()
>>> doc
{'foo': None, 'bar': None, 'nested': {'bla': None}}
A SchemaDocument works just like dict:
>>> doc['bar'] = 3
>>> doc['foo'] = "test"
We can describe fields as required with the required attribute:
>>> TestDoc.required_fields = ['bar', 'nested.bla']
>>> doc = TestDoc()
>>> doc['bar'] = 2
Validation is made with the `validate()` method:
>>> doc.validate() # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
RequireFieldError: nested.bla is required
Default values can be set by using the attribute default_values :
>>> TestDoc.default_values = {"bar":3, "nested.bla":2.0}
>>> doc = TestDoc()
>>> doc
{'foo': None, 'bar': 3, 'nested': {'bla': 2.0}}
>>> doc.validate()
Validators can be added in order to validate some values :
>>> TestDoc.validators = {"bar":lambda x: x>0, "nested.bla": lambda x: x<0}
>>> doc = TestDoc()
>>> doc['bar'] = 3
>>> doc['nested']['bla'] = 2.0
>>> doc.validate()
Traceback (most recent call last):
...
ValidationError: nested.bla does not pass the validator <lambda>
If you want to use the dot notation (ala json), you must set the
`use_dot_notation` attribute to True:
>>> class TestDotNotation(SchemaDocument):
... structure = {
... "foo":{ "bar":str}
... }
... use_dot_notation=True
>>> doc = TestDotNotation()
>>> doc.foo.bar = u"bla"
>>> doc
{"foo":{"bar":u"bla}}
"""
structure = None
required_fields = []
default_values = {}
validators = {}
i18n = []
raise_validation_errors = True
skip_validation = False
# if you want to have all schemaless benefits (default False but should change)
# warning, if use_schemaless is True, Migration features can not be used.
use_schemaless = False
# If you want to use the dot notation, set this to True:
use_dot_notation = False
dot_notation_warning = False
authorized_types = [
type(None),
bool,
int,
float,
str,
list,
dict,
bytes,
datetime.datetime,
CustomType,
]
def __init__(self, doc=None, gen_skel=True, _gen_auth_types=True, _validate=True, lang='en', fallback_lang='en'):
"""
doc : a dictionary
gen_skel : if True, generate automatically the skeleton of the doc
filled with NoneType each time validate() is called. Note that
if doc is not {}, gen_skel is always False. If gen_skel is False,
default_values cannot be filled.
gen_auth_types: if True, generate automatically the self.authorized_types
attribute from self.authorized_types
"""
super(SchemaDocument, self).__init__()
if self.structure is None:
self.structure = {}
self._current_lang = lang
self._fallback_lang = fallback_lang
self.validation_errors = {}
# init
if doc:
for k, v in doc.items():
self[k] = v
gen_skel = False
if gen_skel:
self.generate_skeleton()
if self.default_values:
self._set_default_fields(self, self.structure)
else:
self._process_custom_type('python', self, self.structure)
if self.use_dot_notation:
self.__generate_doted_dict(self, self.structure)
if self.i18n:
self._make_i18n()
def generate_skeleton(self):
"""
validate and generate the skeleton of the document
from the structure (unknown values are set to None)
"""
self.__generate_skeleton(self, self.structure)
def validate(self):
"""
validate the document.
This method will verify if :
* the doc follow the structure,
* all required fields are filled
Additionally, this method will process all
validators.
"""
if self.validators:
self._process_validators(self, self.structure)
self._process_custom_type('bson', self, self.structure)
self._validate_doc(self, self.structure)
self._process_custom_type('python', self, self.structure)
if self.required_fields:
self._validate_required(self, self.structure)
def __setattr__(self, key, value):
if key not in self._protected_field_names and self.use_dot_notation and key in self:
if isinstance(self.structure[key], i18n):
self[key][self._current_lang] = value
else:
self[key] = value
else:
if self.dot_notation_warning and not key.startswith('_') and key not in \
['db', 'collection', 'versioning_collection', 'connection', 'fs']:
log.warning("dot notation: {} was not found in structure. Add it as attribute instead".format(key))
dict.__setattr__(self, key, value)
def __getattr__(self, key):
if key not in self._protected_field_names and self.use_dot_notation and key in self:
if isinstance(self[key], i18n):
if self._current_lang not in self[key]:
return self[key].get(self._fallback_lang)
return self[key][self._current_lang]
return self[key]
else:
return dict.__getattribute__(self, key)
#
# Public API end
#
@classmethod
def __walk_dict(cls, dic):
# thanks jean_b for the patch
for key, value in list(dic.items()):
if isinstance(value, dict) and len(value):
if type(key) is type:
yield '$%s' % key.__name__
else:
yield key
for child_key in cls.__walk_dict(value):
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
#if type(child_key) is type:
# new_child_key = "$%s" % child_key.__name__
#else:
if type(child_key) is not type:
new_child_key = child_key
yield '%s.%s' % (new_key, new_child_key)
elif type(key) is type:
yield '$%s' % key.__name__
# elif isinstance(value, list) and len(value):
# if isinstance(value[0], dict):
# for child_key in cls.__walk_dict(value[0]):
# #if type(key) is type:
# # new_key = "$%s" % key.__name__
# #else:
# if type(key) is not type:
# new_key = key
# #if type(child_key) is type:
# # new_child_key = "$%s" % child_key.__name__
# #else:
# if type(child_key) is not type:
# new_child_key = child_key
# yield '%s.%s' % (new_key, new_child_key)
# else:
# if type(key) is not type:
# yield key
# #else:
# # yield ""
else:
if type(key) is not type:
yield key
#else:
# yield ""
@classmethod
def _validate_structure(cls, structure, name, authorized_types):
"""
validate if all fields in self.structure are in authorized types.
"""
##############
def __validate_structure(struct, name, _authorized):
if type(struct) is type:
if struct not in authorized_types:
if struct not in authorized_types:
raise StructureError("%s: %s is not an authorized type" % (name, struct))
elif isinstance(struct, dict):
for key in struct:
if isinstance(key, str):
if "." in key:
raise BadKeyError("%s: %s must not contain '.'" % (name, key))
if key.startswith('$'):
raise BadKeyError("%s: %s must not start with '$'" % (name, key))
elif type(key) is type:
if not key in authorized_types:
raise AuthorizedTypeError("%s: %s is not an authorized type" % (name, key))
else:
raise StructureError("%s: %s must be a str or a type" % (name, key))
if struct[key] is None:
pass
elif isinstance(struct[key], dict):
__validate_structure(struct[key], name, authorized_types)
elif isinstance(struct[key], list):
__validate_structure(struct[key], name, authorized_types)
elif isinstance(struct[key], tuple):
__validate_structure(struct[key], name, authorized_types)
elif isinstance(struct[key], CustomType):
__validate_structure(struct[key].mongo_type, name, authorized_types)
elif isinstance(struct[key], SchemaProperties):
pass
elif isinstance(struct[key], SchemaOperator):
__validate_structure(struct[key], name, authorized_types)
elif hasattr(struct[key], 'structure'):
__validate_structure(struct[key], name, authorized_types)
elif struct[key] not in authorized_types:
ok = False
for auth_type in authorized_types:
if struct[key] is None:
ok = True
else:
try:
if isinstance(struct[key], auth_type) or issubclass(struct[key], auth_type):
ok = True
except TypeError:
raise TypeError("%s: %s is not a type" % (name, struct[key]))
if not ok:
raise StructureError(
"%s: %s is not an authorized type" % (name, struct[key]))
elif isinstance(struct, list) or isinstance(struct, tuple):
for item in struct:
__validate_structure(item, name, authorized_types)
elif isinstance(struct, SchemaOperator):
if isinstance(struct, IS):
for operand in struct:
if type(operand) not in authorized_types:
raise StructureError("%s: %s in %s is not an authorized type (%s found)" % (
name, operand, struct, type(operand).__name__))
else:
for operand in struct:
if operand not in authorized_types:
raise StructureError("%s: %s in %s is not an authorized type (%s found)" % (
name, operand, struct, type(operand).__name__))
elif isinstance(struct, SchemaProperties):
pass
else:
ok = False
for auth_type in authorized_types:
if isinstance(struct, auth_type):
ok = True
if not ok:
raise StructureError("%s: %s is not an authorized_types" % (name, struct))
#################
if structure is None:
raise StructureError("%s.structure must not be None" % name)
if not isinstance(structure, dict):
raise StructureError("%s.structure must be a dict instance" % name)
__validate_structure(structure, name, authorized_types)
def _raise_exception(self, exception, field, message):
if self.raise_validation_errors:
raise exception(message)
else:
if not field in self.validation_errors:
self.validation_errors[field] = []
self.validation_errors[field].append(exception(message))
def _validate_doc(self, doc, struct, path=""):
"""
check if doc field types match the doc field structure
"""
if type(struct) is type or struct is None:
if struct is None:
if type(doc) not in self.authorized_types:
self._raise_exception(AuthorizedTypeError, type(doc).__name__,
"%s is not an authorized types" % type(doc).__name__)
elif not isinstance(doc, struct) and doc is not None:
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of %s not %s" % (
path, struct.__name__, type(doc).__name__))
elif isinstance(struct, CustomType):
if not isinstance(doc, struct.mongo_type) and doc is not None:
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of %s not %s" % (
path, struct.mongo_type.__name__, type(doc).__name__))
struct.validate(doc, path=path)
elif isinstance(struct, SchemaOperator):
if not struct.validate(doc) and doc is not None:
if isinstance(struct, IS):
self._raise_exception(SchemaTypeError, path,
"%s must be in %s not %s" % (path, struct._operands, doc))
else:
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of %s not %s" % (path, struct, type(doc).__name__))
elif isinstance(struct, dict):
if not isinstance(doc, type(struct)):
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of %s not %s" % (
path, type(struct).__name__, type(doc).__name__))
struct_length = len(struct) if not '_id' in struct else len(struct) - 1
if len(doc) != struct_length:
struct_doc_diff = list(set(struct).difference(set(doc)))
if struct_doc_diff:
for field in struct_doc_diff:
if (type(field) is not type) and (not self.use_schemaless):
self._raise_exception(StructureError, None,
"missed fields %s in %s" % (struct_doc_diff, type(doc).__name__))
else:
struct_struct_diff = list(set(doc).difference(set(struct)))
bad_fields = [s for s in struct_struct_diff if s not in STRUCTURE_KEYWORDS]
if bad_fields and not self.use_schemaless:
self._raise_exception(StructureError, None,
"unknown fields %s in %s" % (bad_fields, type(doc).__name__))
for key in struct:
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
if new_key.split('.')[-1].startswith("$"):
for doc_key in doc:
if not isinstance(doc_key, key):
self._raise_exception(SchemaTypeError, path,
"key of %s must be an instance of %s not %s" % (
path, key.__name__, type(doc_key).__name__))
self._validate_doc(doc[doc_key], struct[key], new_path)
else:
if key in doc:
self._validate_doc(doc[key], struct[key], new_path)
elif isinstance(struct, list):
if not isinstance(doc, list) and not isinstance(doc, tuple):
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of list not %s" % (path, type(doc).__name__))
if not len(struct):
struct = None
else:
struct = struct[0]
for obj in doc:
self._validate_doc(obj, struct, path)
elif isinstance(struct, tuple):
if not isinstance(doc, list) and not isinstance(doc, tuple):
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of list not %s" % (
path, type(doc).__name__))
if len(doc) != len(struct):
self._raise_exception(SchemaTypeError, path, "%s must have %s items not %s" % (
path, len(struct), len(doc)))
for i in range(len(struct)):
self._validate_doc(doc[i], struct[i], path)
def _process_validators(self, doc, _struct, _path=""):
doted_doc = DotCollapsedDict(doc)
for key, validators in self.validators.items():
if key in doted_doc and doted_doc[key] is not None:
if not hasattr(validators, "__iter__"):
validators = [validators]
for validator in validators:
try:
if not validator(doted_doc[key]):
raise ValidationError("%s does not pass the validator " + validator.__name__)
except Exception as e:
self._raise_exception(ValidationError, key,
str(e) % key)
def _process_custom_type(self, target, doc, struct, path="", root_path=""):
for key in struct:
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
#
# if the value is a dict, we have a another structure to validate
#
#
# It is not a dict nor a list but a simple key:value
#
if isinstance(struct[key], CustomType):
if target == 'bson':
if key in doc:
if struct[key].python_type is not None:
if not isinstance(doc[key], struct[key].python_type) and doc[key] is not None:
self._raise_exception(SchemaTypeError, new_path,
"%s must be an instance of %s not %s" % (
new_path, struct[key].python_type.__name__,
type(doc[key]).__name__))
doc[key] = struct[key].to_bson(doc[key])
else:
if key in doc:
doc[key] = struct[key].to_python(doc[key])
elif isinstance(struct[key], dict):
if doc: # we don't need to process an empty doc
if type(key) is type:
for doc_key in doc: # process type's key such {str:int}...
self._process_custom_type(target, doc[doc_key], struct[key], new_path, root_path)
else:
if key in doc: # we don't care about missing fields
self._process_custom_type(target, doc[key], struct[key], new_path, root_path)
#
# If the struct is a list, we have to validate all values into it
#
elif type(struct[key]) is list:
#
# check if the list must not be null
#
if struct[key]:
l_objs = []
if isinstance(struct[key][0], CustomType):
for obj in doc[key]:
if target == 'bson':
if struct[key][0].python_type is not None:
if not isinstance(obj, struct[key][0].python_type) and obj is not None:
self._raise_exception(SchemaTypeError, new_path,
"%s must be an instance of %s not %s" % (
new_path, struct[key][0].python_type.__name__,
type(obj).__name__))
obj = struct[key][0].to_bson(obj)
else:
obj = struct[key][0].to_python(obj)
l_objs.append(obj)
doc[key] = l_objs
elif isinstance(struct[key][0], dict):
if doc.get(key):
for obj in doc[key]:
self._process_custom_type(target, obj, struct[key][0], new_path, root_path)
def _set_default_fields(self, doc, struct, path=""):
# TODO check this out, this method must be restructured
for key in struct:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
#
# default_values :
# if the value is None, check if a default value exist.
# if exists, and it is a function then call it otherwise,
# juste feed it
#
if type(key) is not type:
if doc[key] is None and new_path in self.default_values:
new_value = self.default_values[new_path]
if callable(new_value):
new_value = new_value()
elif isinstance(new_value, dict):
new_value = deepcopy(new_value)
elif isinstance(new_value, list):
new_value = new_value[:]
if isinstance(struct[key], CustomType):
if not isinstance(new_value, struct[key].python_type):
self._raise_exception(DefaultFieldTypeError, new_path,
"%s must be an instance of %s not %s" % (
new_path, struct[key].python_type.__name__,
type(new_value).__name__))
doc[key] = new_value
#
# if the value is a dict, we have a another structure to validate
#
if isinstance(struct[key], dict) and new_path not in self.i18n:
#
# if the dict is still empty into the document we build
# it with None values
#
if len(struct[key]) and not [i for i in list(struct[key].keys()) if type(i) is type]:
self._set_default_fields(doc[key], struct[key], new_path)
else:
if new_path in self.default_values:
new_value = self.default_values[new_path]
if callable(new_value):
new_value = new_value()
elif isinstance(new_value, dict):
new_value = deepcopy(new_value)
elif isinstance(new_value, list):
new_value = new_value[:]
doc[key] = new_value
elif isinstance(struct[key], list):
if new_path in self.default_values:
for new_value in self.default_values[new_path]:
if callable(new_value):
new_value = new_value()
elif isinstance(new_value, dict):
new_value = deepcopy(new_value)
elif isinstance(new_value, list):
new_value = new_value[:]
if isinstance(struct[key][0], CustomType):
if not isinstance(new_value, struct[key][0].python_type):
self._raise_exception(DefaultFieldTypeError, new_path,
"%s must be an instance of %s not %s" % (
new_path, struct[key][0].python_type.__name__,
type(new_value).__name__))
doc[key].append(new_value)
else: # what else
if new_path in self.default_values:
new_value = self.default_values[new_path]
if callable(new_value):
new_value = new_value()
elif isinstance(new_value, dict):
new_value = deepcopy(new_value)
elif isinstance(new_value, list):
new_value = new_value[:]
if new_path in self.i18n:
doc[key] = i18n(
field_type=struct[key],
field_name=key
)
doc[key].update(new_value)
else:
doc[key] = new_value
def _validate_required(self, doc, _struct, _path="", _root_path=""):
doted_struct = DotCollapsedDict(self.structure)
doted_doc = DotCollapsedDict(doc, reference=doted_struct)
for req in self.required_fields:
if doted_doc.get(req) is None and doted_struct.get(req) is not dict:
if not isinstance(doted_struct.get(req), CustomType):
self._raise_exception(RequireFieldError, req, "%s is required" % req)
elif isinstance(doted_struct.get(req), CustomType) and doted_struct[req].mongo_type is not dict:
self._raise_exception(RequireFieldError, req, "%s is required" % req)
elif doted_doc.get(req) == []:
self._raise_exception(RequireFieldError, req, "%s is required" % req)
elif doted_doc.get(req) == {}:
self._raise_exception(RequireFieldError, req, "%s is required" % req)
def __generate_skeleton(self, doc, struct, path=""):
for key in struct:
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
#
# Automatique generate the skeleton with NoneType
#
if type(key) is not type and key not in doc:
if isinstance(struct[key], dict):
if type(struct[key]) is dict and self.use_dot_notation:
if new_path in self._i18n_namespace:
doc[key] = i18nDotedDict(doc.get(key, {}), self)
else:
doc[key] = DotedDict(doc.get(key, {}), warning=self.dot_notation_warning)
else:
if callable(struct[key]):
doc[key] = struct[key]()
else:
doc[key] = type(struct[key])()
elif struct[key] is dict:
doc[key] = {}
elif isinstance(struct[key], list):
doc[key] = type(struct[key])()
elif isinstance(struct[key], CustomType):
if struct[key].init_type is not None:
doc[key] = struct[key].init_type()
else:
doc[key] = None
elif struct[key] is list:
doc[key] = []
elif isinstance(struct[key], tuple):
doc[key] = [None for _ in range(len(struct[key]))]
else:
doc[key] = None
#
# if the value is a dict, we have a another structure to validate
#
if isinstance(struct[key], dict) and type(key) is not type:
self.__generate_skeleton(doc[key], struct[key], new_path)
def __generate_doted_dict(self, doc, struct, path=""):
for key in struct:
#
# Automatique generate the skeleton with NoneType
#
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
if type(key) is not type: # and key not in doc:
if isinstance(struct[key], dict):
if type(struct[key]) is dict:
if new_path in self._i18n_namespace:
doc[key] = i18nDotedDict(doc.get(key, {}), self)
else:
doc[key] = DotedDict(doc.get(key, {}), warning=self.dot_notation_warning)
#
# if the value is a dict, we have a another structure to validate
#
if isinstance(struct[key], dict) and type(key) is not type:
self.__generate_doted_dict(doc[key], struct[key], new_path)
def _make_i18n(self):
doted_dict = DotCollapsedDict(self.structure)
for field in self.i18n:
if field not in doted_dict:
self._raise_exception(ValidationError, field,
"%s not found in structure" % field)
if not isinstance(doted_dict[field], i18n):
doted_dict[field] = i18n(
field_type=doted_dict[field],
field_name=field
)
self.structure.update(DotExpandedDict(doted_dict))
def set_lang(self, lang):
self._current_lang = lang
def get_lang(self):
return self._current_lang
class i18n(dict, CustomType):
""" CustomType to deal with i18n """
mongo_type = list
def __init__(self, field_type=None, field_name=None):
super(i18n, self).__init__()
self.python_type = self.__class__
self._field_type = field_type
self._field_name = field_name
def __call__(self):
return i18n(self._field_type, self._field_name)
def to_bson(self, value):
if value is not None:
for l, v in value.items():
if isinstance(v, list) and isinstance(self._field_type, list):
for i in v:
if not isinstance(i, self._field_type[0]):
raise SchemaTypeError("%s (%s) must be an instance of %s not %s" % (
self._field_name, l, self._field_type[0], type(i).__name__))
else:
if not isinstance(v, self._field_type):
raise SchemaTypeError("%s (%s) must be an instance of %s not %s" % (
self._field_name, l, self._field_type, type(v).__name__))
return [{'lang': l, 'value': v} for l, v in value.items()]
def to_python(self, value):
if value is not None:
i18n_dict = self.__class__(self._field_type)
for i in value:
i18n_dict[i['lang']] = i['value']
return i18n_dict
class Set(CustomType):
""" SET custom type to handle python set() type """
init_type = set
mongo_type = list
python_type = set
def __init__(self, structure_type=None):
super(Set, self).__init__()
self._structure_type = structure_type
def to_bson(self, value):
if value is not None:
return list(value)
def to_python(self, value):
if value is not None:
return set(value)
def validate(self, value, path):
if value is not None and self._structure_type is not None:
for val in value:
if not isinstance(val, self._structure_type):
raise ValueError('%s must be an instance of %s not %s' %
(path, self._structure_type.__name__, type(val).__name__))
|
[
"windfarer@gmail.com"
] |
windfarer@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.