blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
113af3e207e4b01797c11ec0d406ac5a136b56c2
|
801418efbd049078c8aad4cd17297f3ece571412
|
/temp/toy/python/238. Product of Array Except Self.py
|
d7da2b067439b8c2b107a462617c0fb4b8eac579
|
[] |
no_license
|
xixihaha1995/CS61B_SP19_SP20
|
2b654f0c864a80a0462fdd4b1561bdc697a8c1e2
|
7d6599596f7f49b38f1c256ece006b94555c1900
|
refs/heads/master
| 2023-01-01T18:41:48.027058
| 2020-10-29T04:50:01
| 2020-10-29T04:50:01
| 240,976,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
res, p, q = [1], 1, 1
for i in range(len(nums)-1):
p *= nums[i]
res.append(p)
for i in range(len(nums)-1, 0, -1):
q *= nums[i]
res[i-1] *= q
return res
|
[
"wulicheneason@gmail.com"
] |
wulicheneason@gmail.com
|
30dfff8bcb7876a52d6a99c2cd8349866f1eb587
|
9da79476a3002a4af98cc76effdabdbec9613adf
|
/Extended_Methods/Heuristic_2/GLOBAL_VAR.py
|
3bb5558e36235af3c35eb3a14bab112ea60dec5a
|
[
"CC-BY-4.0"
] |
permissive
|
heyuan7676/ts_eQTLs
|
1cb0517dbe1faac616fef6e5ebc87ffb6d47899a
|
62b04f5477183f5c0cb60f21264b3147fd8bd82a
|
refs/heads/master
| 2021-07-15T13:23:20.491904
| 2020-10-09T16:03:53
| 2020-10-09T16:03:53
| 211,209,498
| 20
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,354
|
py
|
import os
import sys
import numpy as np
import pandas as pd
import pdb
r2 = '1'
FDR = 0.05
fmDir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/FL/coph'
ll_dir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/LL'
prefix = 'v8_cbset_95_allPairs_filteredGenes.ciseQTL_results.complete_filteredSNPs.LDblocks_%s' % str(r2)
LDprefix = '_LD1'
#FMfn = 'SparseMF_coph_%s_topPair_K30_a11_l110' % prefix.replace(r2, '0.2')
#FMfn = 'SparseMF_coph_%s_topPair_K25_a125_l15000' % prefix
#LMfn= '%s%s_Loadings_beta_BH_corrected_alpha%s' % (FMfn, LDprefix, str(FDR))
#LMfn = '%s%s_Loadings_projection' % (FMfn, LDprefix)
FMfn = 'Thresholding_Refined'
if 1:
FOLDS = 100
PROP = 0.5
PVALUE = 0.001
N1 = 5
LMfn = 'ts_closeToTop_FOLDS%d_PROP%s_PVALUE%s_N1%d' % (FOLDS, str(PROP), str(PVALUE), N1)
bg_cluster_id = 0
inputdatadir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/datasets/cbset_datasets/input_pairs'
inputdir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/datasets/cbset_datasets/input_pairs_fitModel'
inputdatafn = 'v8_cbset_95_allPairs_filteredGenes.ciseQTL_results.complete.tss_distance.txt'
pairdir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/downstream/pairSets'
#pairdir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/downstream/pairSets_0907'
allSNPfeaturedir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/datasets/annotations/allPairs'
SNPfeaturedir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/datasets/annotations/cbset_pairs'
datasetName = 'v8_cbset_95_allPairs_filteredGenes.ciseQTL_results.complete.SNP_loc'
activeSNPdir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/downstream/SNP/SNPset_active'
sigSNPfeaturedir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/downstream/SNP/SNPset_features'
activeSNPfeaturedir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/downstream/SNP/SNPset_active_features'
active_proportion = 0.0
gsea_dir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/downstream/enrichmentTest/GSEA/'
gsea_file_used = 'c5.bp.v6.2.symbols.gmt.txt'
def get_tis_groups():
tissue_groups = [[x for x in tissues if 'Adipose' in x],
['Adrenal_Gland'],
[x for x in tissues if 'Artery' in x],
[x for x in tissues if 'Brain' in x],
['Cells_EBV-transformed_lymphocytes'],
['Cells_Cultured_fibroblasts'],
[x for x in tissues if 'Colon' in x],
[x for x in tissues if 'Esophagus' in x],
[x for x in tissues if 'Heart' in x],
['Kidney_Cortex'],
['Liver'],
['Lung'],
['Minor_Salivary_Gland'],
['Muscle_Skeletal'],
['Nerve_Tibial'],
['Ovary'],
['Pancreas'],
['Pituitary'],
['Prostate'],
[x for x in tissues if 'Skin' in x],
['Small_Intestine_Terminal_Ileum'],
['Spleen'],
['Stomach'],
['Testis'],
['Thyroid'],
['Uterus'],
['Vagina'],
['Whole_Blood']]
return tissue_groups
tissues = pd.read_csv('tissues.txt', sep='\t', header=None)
tissues = np.array(tissues[0])
Comp_tissues = get_tis_groups()
|
[
"yuanhe777tt@hotmail.com"
] |
yuanhe777tt@hotmail.com
|
f481ea5b3e5bc16aabe05bdd0923e9f989d4f03d
|
a9f508078a6950ec72c7bd4fbf6b2cac664c31a9
|
/proxypool/importer.py
|
976e76680792e8db97cea4bf72fea86830e54f56
|
[] |
no_license
|
0x1un/proxypool-china
|
6bcaf7bd3ed7fa440bc6ef132dada7e1cc0185d7
|
b480ffe62c802c579e754fcb0bcceca8fe246a74
|
refs/heads/master
| 2020-03-20T09:48:47.860726
| 2018-06-16T13:36:51
| 2018-06-16T13:36:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
from proxypool.db import RedisClient
conn = RedisClient()
#导入函数
def set(proxy):
result = conn.add(proxy)
print(proxy)
print('录入成功' if result else '录入失败')
#手动导入代理
def scan():
print('请输入代理, 输入exit退出读入')
while True:
proxy = input()
if proxy == 'exit':
break
set(proxy)
if __name__ == '__main__':
scan()
|
[
"aumujun@gmail.com"
] |
aumujun@gmail.com
|
32b8a244de29e4e047b0af3c9a8d1030db2a951d
|
4d06cc4dc18358741cc30a68a97c3a5e0755ad40
|
/image_recovery/imglib.py
|
765d5fceb6e2c400085c1918e84b6fa67532c62f
|
[] |
no_license
|
maridonskova/color_image_recovery_ozon_masters_nla_project
|
047e755f074a0dca04cfe6b9d476dd65c5dadca8
|
17760cb93c36d9a192b99e746d7fdcbbab8734f3
|
refs/heads/master
| 2022-04-06T17:20:30.479329
| 2019-12-20T18:50:40
| 2019-12-20T18:50:40
| 215,392,053
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,005
|
py
|
# ================================
import numpy as np
import cv2
from itertools import product
# ================================
def img2qm(filepath: str) -> np.array:
"""
Reads image from file in filesystem and transforms it to NumPy 3-tensor
Parameters:
----------------
filepath: str
Path to file
Returns:
----------------
img: np.array
3-tensor representing the image. Last axis has dimension 4, and contains color channels: (0, R, G, B)
"""
img = cv2.cvtColor(cv2.imread(filepath), cv2.COLOR_BGR2RGB).astype(np.float64)/255.0
return np.concatenate([
np.zeros((*img.shape[:2], 1)),
img[:, :, ]
], axis=2)
# ================================
def add_random_missing_pixels(img: np.array, q: float, mode: str = "uniform",
random_state: int = None, **kwargs) -> (np.array, np.array):
"""
Randomly removes pixels from picture in selected fashion, returning altered picture and
boolean mask
Parameters:
----------------
img: np.array
3-tensor representing the image
q: 0.0 <= float <= 1.0
proportion of pixels to erase
mode: str
Pixels removal fashion. Should be one of:
uniform: each pixel has equal probability to get erased
square: erases square randomly positioned within picture. Square area is q*(picure area)
Returns:
----------------
imgx: np.array
3-tensor representing the image with pixels erased
mask: np.array
boolean 2-array, True values correspond to pixels that have not been erased
Raises:
----------------
ValueError:
if q not in [0.0, 1.0] or tensor last axis has dimension different from 4
"""
if not ((0.0 <= q <= 1.0) or (img.shape[2] != 4)):
raise ValueError("Wrong tensor shape or erased pixels proportion not in [0, 1]")
else:
mask = np.zeros(img.shape[:2], dtype=np.bool)
np.random.seed(random_state)
if mode == "uniform":
idxs = np.random.choice(np.prod(img.shape[:2]), size=int(np.prod(img.shape[:2])*q), replace=False)
mask[[x // img.shape[1] for x in idxs], [x % img.shape[1] for x in idxs]] = True
# ================================
elif mode == "normal_clusters":
n_clusters = kwargs.get("n_clusters", 3)
stdd = kwargs.get("std", min(img.shape[:2])/10)
max_tries = kwargs.get("max_tries", 10)
cluster_centers = np.array([
np.random.randint(img.shape[0], size=n_clusters),
np.random.randint(img.shape[1], size=n_clusters)
]).T
pix_prop = 0.0
tries = 0
while (tries < max_tries) and (pix_prop < q):
new_pixs = np.concatenate([
np.random.multivariate_normal(xcl, np.eye(2) * stdd ** 2,
size=int(np.ceil(img.size * 0.1 / n_clusters)))
for xcl in cluster_centers
]).astype(np.int32)
new_pixs[:, 0] = np.clip(new_pixs[:, 0], 0, img.shape[0] - 1)
new_pixs[:, 1] = np.clip(new_pixs[:, 1], 0, img.shape[1] - 1)
mask[new_pixs[:, 0], new_pixs[:, 1]] = True
pix_prop = mask.sum()/mask.size
tries += 1
# ================================
elif mode == "square":
sqsz = int(np.sqrt(np.prod(img.shape[:2])*q))
startpos = (np.random.choice(img.shape[0] - sqsz), np.random.choice(img.shape[1] - sqsz))
mask[startpos[0]:(startpos[0] + sqsz), startpos[1]:(startpos[1] + sqsz)] = True
else:
raise ValueError(f"Unknown option {mode}")
imgx = img.copy()
imgx[np.tile(mask[:, :, None], (1, 1, 4))] = 0.0
return imgx, ~mask
|
[
"ayiserov@gmail.com"
] |
ayiserov@gmail.com
|
0758d42be8c51765633d9a7297cba762e8b04a25
|
cb55abd80671e898f08e07710bd87c72ba559477
|
/backend/provider/sendgridservice.py
|
23c8e65589e43d8381fa04ceaaad61cb32086739
|
[
"MIT"
] |
permissive
|
laughinging/yaes
|
ef307a27806ebbd9e6bb0f318825b7bdf4ad25b3
|
0893f7848ee0530fa6c3bd553f89aa430f9b2f02
|
refs/heads/master
| 2020-03-18T05:14:14.684146
| 2018-05-30T12:29:55
| 2018-05-30T12:29:55
| 134,331,994
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,356
|
py
|
import os
import logging
import sendgrid
from set_up import sg_client
from sendgrid.helpers.mail import *
from backend.provider.provider_exceptions import *
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class SendgridMail(object):
SENDGRID_ERROR = {
400: "BAD REQUEST",
401: "UNAUTHORIZED: You do not have authorization to make the request.",
403: "FORBIDDEN",
404: "NOT FOUND : The resource you tried to locate could not be found or does not exist.",
405: "METHOD NOT ALLOWED",
413: "PAYLOAD TOO LARGE: The JSON payload you have included in your request is too large.",
415: "UNSUPPORTED MEDIA TYPE",
429: "TOO MANY REQUESTS: The number of requests you have made exceeds SendGrid’s rate limitations",
500: "SERVER UNAVAILABLE: An error occurred on a SendGrid server.",
503: "SERVICE NOT AVAILABLE: The SendGrid v3 Web API is not available."
}
def __init__(self):
self.client = sg_client
def send_mail(self, **kwargs):
from_email = Email(kwargs['sender'])
to_email = Email(kwargs['recipient'])
subject = kwargs['subject']
content = Content("text/plain", kwargs['body'])
mail = Mail(from_email, subject, to_email, content)
logger.info('Attempt to send an email with sendgrid')
try:
response = self.client.client.mail.send.post(request_body=mail.get())
except Exception as e:
if e.status_code in (400, 403, 404, 405, 413, 415, 429):
message = "SendGrid Client Error {}: {}".format(e.status_code,
self.SENDGRID_ERROR[e.status_code])
logger.exception(message)
raise ClientError(message)
elif e.status_code in (401, 500, 503):
message = "SendGrid Server Error {}: {}".format(e.status_code,
self.SENDGRID_ERROR[e.status_code])
logger.exception(message)
raise ProviderServerError(message)
if __name__ == "__main__":
SendgridMail().send_mail(
sender="test@test.com",
recipient="test@test.com",
subject="test",
body="This is a test email."
)
|
[
"qianyunguo@gmail.com"
] |
qianyunguo@gmail.com
|
a262ae2e16a1c482655881051aec3552ba5cef76
|
3e8f63c0e45de6df395f41c62889330ad1a2f839
|
/lesson10/basic/warehouse.py
|
551ee21a9ee3bdfb327fde49840ae8bccb54269e
|
[] |
no_license
|
lvshaokang/python
|
336de2ec7d532fc777bf221dece307ee6625562f
|
71807622f19922e1ea718c38c544fc77d666d274
|
refs/heads/master
| 2020-03-28T19:41:44.530856
| 2018-10-01T14:56:45
| 2018-10-01T14:56:45
| 149,000,805
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
class WarehouseManageSys:
def __init__(self):
self.item_detail = {"老坛酸菜": 5, "红烧牛肉": 4, "酸辣粉": 6, "拉面": 7, "老干妈": 10, "乌江": 2, "王中王": 2, "蒜肠": 12, "淀粉肠": 8}
def get_item_list(self, item_type):
pm_list = ["老坛酸菜", "红烧牛肉", "酸辣粉", "拉面"]
zc_list = ["老干妈", "乌江"]
xc_list = ["王中王", "蒜肠", "淀粉肠"]
if item_type == "pm":
return pm_list
elif item_type == "zc":
return zc_list
elif item_type == "xc":
return xc_list
|
[
"lvshaokang@hotmail.com"
] |
lvshaokang@hotmail.com
|
a3048e66ffb33cc2d58ad1fbd2181763d17a7bc4
|
37e95a54c78b2cad61a8977833c3b69a9d757a5c
|
/Excepciones4.py
|
73dba6ebeb5910b26a297d8e9511abc603c9b74b
|
[] |
no_license
|
solrac205/curso_python
|
b8ab869440887523a0be56fc9b5ab4f53921ac98
|
c372dde614918a66e323da0ce16184f7d84c3a1e
|
refs/heads/master
| 2023-02-06T01:43:13.612237
| 2020-12-31T03:25:09
| 2020-12-31T03:25:09
| 325,693,852
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,989
|
py
|
print("******************************************************")
print("Curso No. 23 Excepciones" )
print("******************************************************")
def divide():
try:
op1=(float(input("Ingrese el primer valor a operar: ")))
op2=(float(input("Ingrese el segundo valor a operar: ")))
print("La división es: " + str(op1/op2))
except ValueError:
print("El valor introducido es erróneo")
except ZeroDivisionError:
print("No se puede dividir entre 0!")
print("Cálculo finalizado...")
def divide2():
try:
op1=(float(input("Ingrese el primer valor a operar: ")))
op2=(float(input("Ingrese el segundo valor a operar: ")))
print("La división es: " + str(op1/op2))
except ValueError:
print("El valor introducido es erróneo")
except ZeroDivisionError:
print("No se puede dividir entre 0!")
#la instruccion finally se ejecuta
#siempre aun asi se haya capturado una excepción.
finally:
print("Cálculo finalizado...")
#si un try no tiene la captura de error except y si finally el finally se ejecutara
#pero luego el programa cae pues se exterioriza el error detectado anteriormente.
def divide3():
try:
op1=(float(input("Ingrese el primer valor a operar: ")))
op2=(float(input("Ingrese el segundo valor a operar: ")))
print("La división es: " + str(op1/op2))
except:
print("se registro un error")
print("Cálculo finalizado...")
def EvaluaEdad(edad):
if edad<0:
raise ZeroDivisionError("No se permiten edades negativas...")
if edad <20:
return "Eres muy Joven"
elif edad < 40:
return "Eres Joven"
elif edad < 65:
return "Eres maduro"
elif edad < 100:
return "Cuidate..."
import math
def CalculaRaiz(num1):
if num1 < 0:
raise ValueError("Error en valor, este no puede ser negativo")
else:
return math.sqrt(num1)
op1=(int(input("introduce un número: ")))
try:
print(CalculaRaiz(op1))
except ValueError as ErrorDeNumeroNegativo:
print(ErrorDeNumeroNegativo)
print("finalizo programa")
|
[
"carlos_205ram@hotmail.com"
] |
carlos_205ram@hotmail.com
|
75d2f93063a4feaf6b869a50b0e5a88d40500e00
|
2bcf18252fa9144ece3e824834ac0e117ad0bdf3
|
/httpy/tags/0.7/tests/TestCaseHttpy.py
|
08a1fc6dd3fb6eb41284fefc3f7dc8c1602cb96c
|
[] |
no_license
|
chadwhitacre/public
|
32f65ba8e35d38c69ed4d0edd333283a239c5e1d
|
0c67fd7ec8bce1d8c56c7ff3506f31a99362b502
|
refs/heads/master
| 2021-05-10T14:32:03.016683
| 2010-05-13T18:24:20
| 2010-05-13T18:24:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,199
|
py
|
"""The idea and code for running a test._server in another thread are from the
standard library's test/test_socke._server.py.
TODO: This is out of date now that we are using asyncore (via httpy._zope._server).
"""
import asyncore
import os
import select
import socket
import threading
import time
import unittest
from httpy._zope.server.taskthreads import ThreadedTaskDispatcher
from httpy._zope.server.tests.asyncerror import AsyncoreErrorHook
from httpy.Config import Config
from httpy.Request import Request, ZopeRequest
from httpy.Server import Server
td = ThreadedTaskDispatcher()
opts = [ '--mode', 'development'
, '--sockfam', 'AF_INET'
, '--root', 'root'
, '--address', ':65370'
, '--verbosity', '99'
#, '--apps', '/' discover automatically
]
class TestCaseHttpy(unittest.TestCase, AsyncoreErrorHook):
# unittest.TestCase hooks
# =======================
want_config = False
def setUp(self):
self.scrubenv()
# [re]build a temporary website tree in ./root
self.removeTestSite()
self.buildTestSite()
if self.server:
self.startServer()
if self.want_config:
self.config = Config()
def tearDown(self):
if self.server:
self.stopServer()
self.removeTestSite()
self.restoreenv()
# server support
# ==============
server = False # Override to True if your subclass needs a server
def startServer(self):
if len(asyncore.socket_map) != 1:
# Let sockets die off.
# TODO tests should be more careful to clear the socket map.
asyncore.poll(0.1)
self.orig_map_size = len(asyncore.socket_map)
#self.hook_asyncore_error()
config = Config(opts)
self._server = Server(config, threads=4)
self._server.accept_connections()
self.port = self._server.socket.getsockname()[1]
self.run_loop = 1
self.counter = 0
self.thread_started = threading.Event()
self.thread = threading.Thread(target=self.loop)
self.thread.setDaemon(True)
self.thread.start()
self.thread_started.wait(10.0)
self.assert_(self.thread_started.isSet())
def stopServer(self):
self.run_loop = 0
self.thread.join()
td.shutdown()
self._server.close()
# Make sure all sockets get closed by asyncore normally.
timeout = time.time() + 5
while 1:
if len(asyncore.socket_map) == self.orig_map_size:
# Clean!
break
if time.time() >= timeout:
self.fail('Leaked a socket: %s' % `asyncore.socket_map`)
asyncore.poll(0.1)
#self.unhook_asyncore_error()
def loop(self):
self.thread_started.set()
while self.run_loop:
self.counter = self.counter + 1
asyncore.poll(0.1)
# environment
# ===========
def scrubenv(self):
save = {}
for opt in Config.options:
envvar = 'HTTPY_%s' % opt.upper()
if os.environ.has_key(envvar):
save[envvar] = os.environ[envvar]
del os.environ[envvar]
self.env = save
def restoreenv(self):
for k, v in self.env.items():
os.environ[k] = v
self.env = {}
# test site
# =========
# testsite is a list of strings and tuples. If a string, it is interpreted
# as a path to a directory that should be created. If a tuple, the first
# element is a path to a file, the second is the contents of the file.
# We do it this way to ease cross-platform testing.
#
# siteroot is the filesystem path under which to create the test site.
siteroot = 'root'
testsite = []
def buildTestSite(self):
"""Build the site described in self.testsite
"""
os.mkdir(self.siteroot)
for item in self.testsite:
if isinstance(item, basestring):
path = self.convert_path(item.lstrip('/'))
path = os.sep.join([self.siteroot, path])
os.mkdir(path)
elif isinstance(item, tuple):
filepath, contents = item
path = self.convert_path(filepath.lstrip('/'))
path = os.sep.join([self.siteroot, path])
file(path, 'w').write(contents)
def removeTestSite(self):
if os.path.isfile('httpy.conf'):
os.remove('httpy.conf')
if not os.path.isdir(self.siteroot):
return
for root, dirs, files in os.walk(self.siteroot, topdown=False):
for name in dirs:
os.rmdir(os.path.join(root, name))
for name in files:
os.remove(os.path.join(root, name))
os.rmdir(self.siteroot)
def convert_path(self, path):
"""Given a Unix path, convert it for the current platform.
"""
return os.sep.join(path.split('/'))
def convert_paths(self, paths):
"""Given a tuple of Unix paths, convert them for the current platform.
"""
return tuple([self.convert_path(p) for p in paths])
# utils
# =====
@staticmethod
def neuter_traceback(tb):
"""Given a traceback, return just the system-independent lines.
"""
tb_list = tb.split(os.linesep)
if not tb_list[-1]:
tb_list = tb_list[:-1]
neutered = []
for i in range(0,len(tb_list),2):
neutered.append(tb_list[i])
neutered.append(tb_list[-1])
return os.linesep.join(neutered)
@staticmethod
def dict2tuple(d):
return tuple(sorted(d.iteritems()))
@staticmethod
def make_request(uri, headers=None, Zope=False):
if headers is None:
headers = {}
request = ZopeRequest()
request.received("GET %s HTTP/1.1\r\n" % uri)
for header in headers.items():
request.received("%s: %s\r\n" % header)
request.received('\r\n')
if Zope:
return request
else:
return Request(request)
|
[
"chad@zetaweb.com"
] |
chad@zetaweb.com
|
861bd0f912326d66adf883ca271ce7af6319eb44
|
21906e3edd0cebc04a43f198fce4946c4cf3cf4f
|
/main_opencv.py
|
842ea1a4595658042f44649009e93cde77649e9a
|
[] |
no_license
|
chincherpa/slit_scan_image
|
f675a070b56b9f7b5f26d2d6fb53d11e827f721a
|
f5868d43296467f72ea33754a33f21640d75b1bf
|
refs/heads/master
| 2020-06-29T21:57:47.557903
| 2019-08-05T10:56:15
| 2019-08-05T10:56:15
| 200,635,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 900
|
py
|
import os
import sys
import numpy as np
import cv2
this_path = os.path.dirname(os.path.realpath(__file__))
filename = 'big_buck_bunny_720p_5mb.mp4'
path_to_file = os.path.join(this_path, filename)
output_filename = os.path.splitext(os.path.basename(path_to_file))[0] + '.png'
clip = cv2.VideoCapture(path_to_file)
first_frame = clip.read()
height, width, dpth = first_frame[1].shape
slitwidth = 1
slitpoint = width // 2
# np.zeros is how we generate an empty ndarray
img = np.zeros((height, 1, dpth), dtype='uint8')
while True:
frame = clip.read()
if frame[0] is False:
break
frame = np.array(frame[1])
slit = frame[:,slitpoint:slitpoint+slitwidth,:]
img = np.hstack((img, slit))
cv2.imshow("Frames", img)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
output = cv2.imwrite(os.path.join(this_path, output_filename), img)
clip.release()
|
[
"accounts@mail.de"
] |
accounts@mail.de
|
6651590162295a4127549a42719282d946ad2af4
|
a679c7624dd97779858bb7695b9e113bde09b6c6
|
/Python/Medium/firstNonRepeatedChar.py
|
cb6b0102b768d492ec5a2e63051bc8e09b366610
|
[] |
no_license
|
lgminh/Challenges
|
8f2fe24bae97a0343a6ccfa20ffcfa95b92469e7
|
1da5c2b7b49f36f2e8d8786ed54834e9af34086c
|
refs/heads/master
| 2021-10-29T01:34:40.692029
| 2019-04-25T09:10:41
| 2019-04-25T09:10:41
| 75,462,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
from string import ascii_lowercase
def firstNonRepeatedChar(s):
x = {}
for i in ascii_lowercase:
x[i] = []
for idx, c in enumerate(s):
x[c].append(idx)
for k,v in x.items():
if len(v) != 1:
del x[k]
print min(x.values())
for k,v in x.items():
if min(x.values())[0] == v[0]:
return k
if __name__ == '__main__':
print firstNonRepeatedChar('sdadsaasuwqf')
|
[
"minhlg@ahamove.com"
] |
minhlg@ahamove.com
|
41b8a35c5311c10f292a99c2ef0c63c9c5713fa9
|
0a5aeb6d170e14fc53d07c0523d1b995db1fd341
|
/Lab 7/file sequential.py
|
2f54749f2e24d636a6a30e96c47e57cb0177c025
|
[] |
no_license
|
iweyy/WIA2004-Operating-Systems
|
f664f8b9a32654de7ab1887131410dd69475edca
|
3106393626ee05171637404cee68fc4e8c2acee2
|
refs/heads/main
| 2023-08-08T14:26:11.961866
| 2021-06-01T15:55:43
| 2021-06-01T15:55:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,478
|
py
|
maximum = 50
files = [0]*maximum
repeat = 1
while repeat == 1:
start = int(input (f"Enter the starting block of the files (0-{maximum-1}): "))
while start<0 or start>=maximum:
if start>=maximum:
print ("Exceed maximum number of file")
if start<0:
print ("Cannot be a negative number")
start = int(input ("Enter the starting block of the files: "))
length = int(input ("Enter the length of the files: "))
while length<0 or length+start>maximum:
if length+start>maximum:
print ("Exceed maximum number of file")
if length<0:
print ("Cannot be less of equal; to 0")
length = int(input ("Enter the length of the files: "))
count = 0
for i in range (length):
if files[start+i] == 0:
count += 1
if count == length:
for i in range (length):
files[start+i] = 1
print (f"files[{start+i}] = 1")
print("The file is allocated to the disk")
else:
print("The file is not allocated to the disk")
repeat = 3
while repeat == 3:
ans = input("Do you want to enter more files? (Yes/No): ")
if (ans.lower() == "yes"):
repeat = 1
elif (ans.lower() == "no"):
repeat = 0
else:
print("Invalid answer.")
repeat = 3
print("Files Allocated are :")
for i in range (maximum):
print (f"files[{i}] = {files[i]}")
|
[
"megathilmi49@gmail.com"
] |
megathilmi49@gmail.com
|
1b498a36f1e5cddb3c338c90afdb44c34630961f
|
794543da14ede49acde50acfac76681e87f31673
|
/src/training_scripts/sprp_onmt_copy_512/validate.py
|
58e8a364d3f0da808802f433ecd18845a2bb7706
|
[] |
no_license
|
roeeaharoni/sprp-acl2018
|
0f404dd27e5ea09f427df920e3d47b0d45d6c5d7
|
2d215999cd72cc0f59d7a6733e1b1f1d7ea54777
|
refs/heads/master
| 2020-03-07T23:33:59.874847
| 2019-02-15T10:06:01
| 2019-02-15T10:06:01
| 127,784,457
| 16
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,835
|
py
|
from src import evaluate
import os
def main():
model_name = 'sprp_onmt_copy_512'
base_path = '/home/nlp/aharonr6'
moses_path = base_path + '/git/mosesdecoder'
test_dirs_path_prefix = base_path + '/git/Split-and-Rephrase/evaluation-directories/validation/'
# the file containing the ids of the test sentences
test_sent_ids_path = base_path + '/git/Split-and-Rephrase/benchmark/complex-sents/validation.id'
# a directory that will hold single sentence files for the hypotheses
test_hypothesis_sents_dir = base_path + '/git/phrasing/models/{}/validation_complex_output_sents/'.format(model_name)
if not os.path.exists(test_hypothesis_sents_dir):
os.mkdir(test_hypothesis_sents_dir)
test_target = base_path + '/git/phrasing/models/{}/validation.complex.unique.output'.format(model_name)
print 'starting multi-ref evaluation...'
avg_bleu, avg_tokens_per_sent, avg_simple_sents_per_complex = evaluate.evaluate_avg_concat_bleu(moses_path,
test_sent_ids_path,
test_hypothesis_sents_dir,
test_target,
test_dirs_path_prefix,
splitter='. ')
print 'avg BLEU:{} avg tokens/sent: {} avg split: {}'.format(avg_bleu, avg_tokens_per_sent,
avg_simple_sents_per_complex)
return
if __name__ == '__main__':
main()
|
[
"roee.aharoni@gmail.com"
] |
roee.aharoni@gmail.com
|
3c0d2f6529512fe74b919bbd685a3ca9f69943c6
|
00c56919bc5919b2f728b3d631ad4b2d2fdb14fa
|
/missingvalues/Student/missingvalues_student.py
|
29efce54c92d59c33e70bcedcbc0eb810628f351
|
[] |
no_license
|
Tcintra/Missing-Values-and-Feature-Engineering
|
b108299176826a1124af52bd0102edcc11ed362b
|
f0b751253ce921b3e22d9310413a204517dfa212
|
refs/heads/master
| 2020-06-12T15:15:36.480988
| 2019-07-11T23:28:36
| 2019-07-11T23:28:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 743
|
py
|
"""
Author : Thomas Cintra and Yun Zhang
Class : CS 181R
Date : 2019 June 20
Description : Credit Score analysis
Name :
Homework 3
"""
# seaborn module
import seaborn as sns
# python modules
import os
# numpy module
import numpy as np
# pandas module
import pandas as pd
# matplotlib module
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
# import scikit learn module
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.tree import DecisionTreeClassifier
from sklearn.dummy import DummyClassifier
from sklearn.preprocessing import LabelEncoder
path = os.path.join("..", "Data")
def main():
print()
|
[
"noreply@github.com"
] |
Tcintra.noreply@github.com
|
25efd543c95c0f31ed446fd5997a5882b21497e1
|
b992ccd52327348af6d647b078ce43a356be5ff4
|
/ScrapyTest/ScrapyTest/pipelines.py
|
3ea8bba409b53ba8500899bad694824cfe4d854e
|
[] |
no_license
|
moritzwilksch/ScrapyProjects
|
b22ddd259484bf3604bba64eb64df00f2fb2443e
|
a29a07b760defd398a44048cb92e0d4b7f623d9c
|
refs/heads/main
| 2023-01-12T17:01:54.746172
| 2020-11-22T13:55:12
| 2020-11-22T13:55:12
| 315,046,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 911
|
py
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from scrapy.exporters import JsonItemExporter
import re
class ScrapytestPipeline:
def open_spider(self, spider):
self.file = open("exportdata.json", 'w+b')
self.exporter = JsonItemExporter(self.file)
self.exporter.start_exporting()
def close_spider(self, spider):
self.exporter.finish_exporting()
self.file.close()
def process_item(self, item, spider):
# dirty_preis = item['preis']
# match = re.match(r'\d+\.\d+', dirty_preis)
# item['preis'] = match.group().replace(".", "") if match else "0"
self.exporter.export_item(item)
return item
|
[
"moritzwilksch@gmail.com"
] |
moritzwilksch@gmail.com
|
e074302c25447ad18fcf0611616ce9b72342db7e
|
276c023c6c051611724eca97595511d422152f4b
|
/tests/test_server.py
|
22863dfb74ece7697161d765f8d914eaec4cdb39
|
[
"MIT"
] |
permissive
|
perrinjerome/cmake-language-server
|
2aa1a03ee2a4b1df8acdee953da1fb7b3270c36d
|
66af586b2aa4da1a21b54e566f81d250acb0a848
|
refs/heads/master
| 2020-12-07T08:15:00.568358
| 2020-01-03T17:17:26
| 2020-01-03T17:17:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,732
|
py
|
from concurrent import futures
from pathlib import Path
from typing import Optional
from pygls.features import (COMPLETION, FORMATTING, HOVER, INITIALIZE,
TEXT_DOCUMENT_DID_OPEN)
from pygls.server import LanguageServer
from pygls.types import (CompletionContext, CompletionParams,
CompletionTriggerKind, DidOpenTextDocumentParams,
DocumentFormattingParams, FormattingOptions,
InitializeParams, Position, TextDocumentIdentifier,
TextDocumentItem, TextDocumentPositionParams)
CALL_TIMEOUT = 2
def _init(client: LanguageServer, root: Path):
retry = 3
while retry > 0:
try:
client.lsp.send_request(
INITIALIZE,
InitializeParams(
process_id=1234, root_uri=root.as_uri(),
capabilities=None)).result(timeout=CALL_TIMEOUT)
except futures.TimeoutError:
retry -= 1
else:
break
def _open(client: LanguageServer, path: Path, text: Optional[str] = None):
if text is None:
with open(path) as fp:
text = fp.read()
client.lsp.notify(
TEXT_DOCUMENT_DID_OPEN,
DidOpenTextDocumentParams(
TextDocumentItem(path.as_uri(), 'cmake', 1, text)))
def test_initialize(client_server, datadir):
client, server = client_server
assert server._api is None
_init(client, datadir)
assert server._api is not None
def test_completions_invoked(client_server, datadir):
client, server = client_server
_init(client, datadir)
path = datadir / 'CMakeLists.txt'
_open(client, path, 'projec')
response = client.lsp.send_request(
COMPLETION,
CompletionParams(TextDocumentIdentifier(path.as_uri()), Position(
0, 6), CompletionContext(
CompletionTriggerKind.Invoked))).result(timeout=CALL_TIMEOUT)
item = next(filter(lambda x: x.label == 'project', response.items), None)
assert item is not None
assert '<PROJECT-NAME>' in item.documentation
def test_completions_triggercharacter_variable(client_server, datadir):
client, server = client_server
_init(client, datadir)
path = datadir / 'CMakeLists.txt'
_open(client, path, '${')
response = client.lsp.send_request(
COMPLETION,
CompletionParams(
TextDocumentIdentifier(path.as_uri()), Position(0, 2),
CompletionContext(CompletionTriggerKind.TriggerCharacter,
'{'))).result(timeout=CALL_TIMEOUT)
assert 'PROJECT_VERSION' in [x.label for x in response.items]
def test_completions_triggercharacter_module(client_server, datadir):
client, server = client_server
_init(client, datadir)
path = datadir / 'CMakeLists.txt'
_open(client, path, 'include(')
response = client.lsp.send_request(
COMPLETION,
CompletionParams(
TextDocumentIdentifier(path.as_uri()), Position(0, 8),
CompletionContext(CompletionTriggerKind.TriggerCharacter,
'('))).result(timeout=CALL_TIMEOUT)
assert 'GoogleTest' in [x.label for x in response.items]
def test_completions_triggercharacter_package(client_server, datadir):
client, server = client_server
_init(client, datadir)
path = datadir / 'CMakeLists.txt'
_open(client, path, 'find_package(')
response = client.lsp.send_request(
COMPLETION,
CompletionParams(
TextDocumentIdentifier(path.as_uri()), Position(0, 13),
CompletionContext(CompletionTriggerKind.TriggerCharacter,
'('))).result(timeout=CALL_TIMEOUT)
assert 'Boost' in [x.label for x in response.items]
def test_formatting(client_server, datadir):
client, server = client_server
_init(client, datadir)
path = datadir / 'CMakeLists.txt'
_open(client, path, 'a ( b c ) ')
response = client.lsp.send_request(
FORMATTING,
DocumentFormattingParams(TextDocumentIdentifier(path.as_uri()),
FormattingOptions(
2, True))).result(timeout=CALL_TIMEOUT)
assert response[0].newText == 'a(b c)\n'
def test_hover(client_server, datadir):
client, server = client_server
_init(client, datadir)
path = datadir / 'CMakeLists.txt'
_open(client, path, 'project()')
response = client.lsp.send_request(
HOVER,
TextDocumentPositionParams(TextDocumentIdentifier(path.as_uri()),
Position())).result(timeout=CALL_TIMEOUT)
assert '<PROJECT-NAME>' in response.contents.value
|
[
"regen100@users.noreply.github.com"
] |
regen100@users.noreply.github.com
|
bce22db2adda5234a705ff0d1fb719565b3bddd8
|
9692a20a1e7a224a72785e4495f31421639b9f3b
|
/frex/pipeline_stages/filters/candidate_filterer.py
|
2d79e3b31e1ec3776b5978e1f52488af2826dfdb
|
[] |
no_license
|
solashirai/FREx
|
6b0cb040930761a0e269f4591d7dde36e3f636d1
|
36ad09a0cb0020661ee990c7800bafd110e2ec04
|
refs/heads/master
| 2023-08-14T08:49:49.270281
| 2021-09-29T14:58:23
| 2021-09-29T14:58:23
| 291,760,109
| 0
| 0
| null | 2021-09-24T22:41:19
| 2020-08-31T15:57:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,535
|
py
|
from abc import abstractmethod
from typing import Generator, Optional, Any
from frex.models import Explanation, Candidate
from frex.pipeline_stages import PipelineStage
class CandidateFilterer(PipelineStage):
"""
CandidateFilterer is a PipelineStage that determines whether input candidates should be removed from consideration
or continue on through the FREx Pipeline.
A new CandidateFilterer class can be minimally defined by creating a new subclass of CandidateFilterer and
defining the filter() function.
"""
def __init__(
self, *, filter_explanation: Explanation, filter_score: float = 0, **kwargs
):
"""
:param filter_explanation: The explanation to add to the Candidate if it passes the filter function.
:param filter_score: The score to apply to the candidate if it passes the filter. This is 0 by default.
"""
self.filter_explanation = filter_explanation
self.filter_score = filter_score
@abstractmethod
def filter(self, *, candidate: Candidate) -> bool:
"""
A filter to determine whether or not the current candidate is suitable to move on through the Pipeline.
This function should return True when the candidate should be removed and False when it should continue on.
:param candidate: A domain-specific candidate to filter
:return: True if the candidate should be removed, False if it should be kept and passed on to later stages.
"""
pass
def __call__(
self, *, candidates: Generator[Candidate, None, None], context: Any
) -> Generator[Candidate, None, None]:
"""
For each of candidate being yielded by the Generator, apply a filtering function to decide whether or not
to yield the candidate forward to the next PipelineStage.
:param candidates: A Generator yielding candidates. In the setup of a FREx Pipeline, this is typically another
PipelineStage that is yielding candidates into the next stage.
:param context: The current context being used to execute the Pipeline.
:return: A Generator, yielding updated Candidate objects that have not been caught by this stage's
filtering function.
"""
for candidate in candidates:
if not self.filter(candidate=candidate):
candidate.applied_explanations.append(self.filter_explanation)
candidate.applied_scores.append(self.filter_score)
yield candidate
|
[
"solashakashirai@gmail.com"
] |
solashakashirai@gmail.com
|
c7fa88552bf8e2d09b066cfbefe9f1deb2738348
|
b4b5b755eb767c8b8224df7d05f94ab49e9eae1d
|
/lib/model/test.py
|
635fdbb590850b0c96dd8f9556abb5e419b7d099
|
[
"MIT"
] |
permissive
|
107618024/Windows-Faster-RCNN-TensorFlow
|
8aa18d96df569251eeebec7c877bc2904e590035
|
95e73edffd0c0a556a2de8b832db53509d3db1f9
|
refs/heads/master
| 2020-06-22T19:00:17.663216
| 2019-07-09T17:06:49
| 2019-07-09T17:06:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,246
|
py
|
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
import os
import math
from utils.timer import Timer
from utils.blob import im_list_to_blob
from model.config import cfg, get_output_dir
from model.bbox_transform import clip_boxes, bbox_transform_inv
from model.nms_wrapper import nms
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_blobs(im):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {}
blobs['data'], im_scale_factors = _get_image_blob(im)
return blobs, im_scale_factors
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes
def _rescale_boxes(boxes, inds, scales):
"""Rescale boxes according to image rescaling."""
for i in range(boxes.shape[0]):
boxes[i,:] = boxes[i,:] / scales[int(inds[i])]
return boxes
def im_detect(sess, net, im):
blobs, im_scales = _get_blobs(im)
assert len(im_scales) == 1, "Only single-image batch implemented"
im_blob = blobs['data']
blobs['im_info'] = np.array([im_blob.shape[1], im_blob.shape[2], im_scales[0]], dtype=np.float32)
_, scores, bbox_pred, rois = net.test_image(sess, blobs['data'], blobs['im_info'])
boxes = rois[:, 1:5] / im_scales[0]
scores = np.reshape(scores, [scores.shape[0], -1])
bbox_pred = np.reshape(bbox_pred, [bbox_pred.shape[0], -1])
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = _clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
return scores, pred_boxes
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]
for cls_ind in range(num_classes):
for im_ind in range(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
inds = np.where((x2 > x1) & (y2 > y1))[0]
dets = dets[inds,:]
if dets == []:
continue
keep = nms(dets, thresh)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def test_net(sess, net, imdb, weights_filename, max_per_image=100, thresh=0.3):
np.random.seed(cfg.RNG_SEED)
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in range(num_images)]
for _ in range(imdb.num_classes)]
output_dir = get_output_dir(imdb, weights_filename)
# timers
_t = {'im_detect' : Timer(), 'misc' : Timer()}
for i in range(num_images):
im = cv2.imread(imdb.image_path_at(i))
_t['im_detect'].tic()
scores, boxes = im_detect(sess, net, im)
_t['im_detect'].toc()
_t['misc'].tic()
# skip j = 0, because it's the background class
for j in range(1, imdb.num_classes):
inds = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep, :]
all_boxes[j][i] = cls_dets
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
_t['misc'].toc()
print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time,
_t['misc'].average_time))
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
imdb.evaluate_detections(all_boxes, output_dir)
|
[
"32533059+gwspotex@users.noreply.github.com"
] |
32533059+gwspotex@users.noreply.github.com
|
e827ef9de12fa0211e6677aa82084594cd16d444
|
6b76819d395bb76591fc12e9de83161b37d61672
|
/woot/apps/expt/management/commands/step02_zmod.py
|
f30ef4f4d650e4b9e4688253eed2cfb7feb067a9
|
[] |
no_license
|
NicholasPiano/img
|
8426530512ee80a4ed746874c4219b1de56acbfd
|
3a91c65c3c9680ba7ed7c94308a721dd0cff9ad5
|
refs/heads/master
| 2020-05-18T15:48:50.566974
| 2015-07-16T00:01:17
| 2015-07-16T00:01:17
| 38,632,176
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,707
|
py
|
# expt.command: step03_zmod
# django
from django.core.management.base import BaseCommand, CommandError
# local
from apps.img.models import Composite
from apps.expt.util import *
# util
from optparse import make_option
spacer = ' ' * 20
### Command
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--expt', # option that will appear in cmd
action='store', # no idea
dest='expt', # refer to this in options variable
default='050714-test', # some default
help='Name of the experiment to import' # who cares
),
make_option('--series', # option that will appear in cmd
action='store', # no idea
dest='series', # refer to this in options variable
default='13', # some default
help='Name of the series' # who cares
),
)
args = ''
help = ''
def handle(self, *args, **options):
'''
1. What does this script do?
> Make images that can be recognized by CellProfiler by multiplying smoothed GFP with the flattened Brightfield
2. What data structures are input?
> Channel
3. What data structures are output?
> Channel
4. Is this stage repeated/one-time?
> One-time
Steps:
1. Select composite
2. Call pmod mod on composite
3. Run
'''
# 1. select composite
composite = Composite.objects.get(experiment__name=options['expt'], series__name=options['series'])
# 2. Call pmod mod
mod = composite.mods.create(id_token=generate_id_token('img', 'Mod'), algorithm='mod_zmod')
# 3. Run mod
print('step02 | processing mod_zmod...', end='\r')
mod.run()
print('step02 | processing mod_zmod... done.{}'.format(spacer))
|
[
"nicholas.d.piano@gmail.com"
] |
nicholas.d.piano@gmail.com
|
a65b97abab28068022d29611ca85cf695c6bb4c6
|
6e8eef0a0523382c3505e8f1d8f390e1a10afd07
|
/scraper/ku_scraper.py
|
8682d97d937d478bdbee43b21ec5371937887e38
|
[
"MIT"
] |
permissive
|
israfelsr/course-summarization
|
a81edcfb23ca17edf4fdc0e01255f0e92887aeec
|
0a3ec432d3690167f036dc5b01625908f37a1da3
|
refs/heads/master
| 2023-08-22T22:47:35.500127
| 2021-09-23T11:41:39
| 2021-09-23T11:41:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,995
|
py
|
# Python Imports
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import time
from tqdm import tqdm, trange
# Local Imports
from scraper.utils import *
class KuScraper():
def __init__(self):
self.masters_url = 'https://studies.ku.dk/masters/programmes/'
self.university_name = 'University of Copenhagen'
self.university_id = 4
def update_programs_info(self):
browser = connect_to_browser()
browser.get(self.masters_url)
cookies_reject = browser.find_element_by_class_name("btn.btn-default.btn-lg")
browser.execute_script("$(arguments[0]).click();", cookies_reject)
boxes = browser.find_elements_by_class_name("boxlink")
programs_list = []
programs_urls = []
for box in tqdm(boxes, desc='Scraping programs:'):
time.sleep(0.5)
programs_list.append(box.text.lower())
programs_urls.append(box.get_attribute("href"))
grade = ["msc"]*len(programs_list)
data = {'program': programs_list, 'grade': grade, 'url': programs_urls}
self.programs_info = pd.DataFrame(data=data)
browser.quit()
def extract_courses_with_link(self, browser, courses_list, courses_url):
links = browser.find_elements_by_tag_name("a")
for link in links:
url = link.get_attribute("href")
name = link.text.lower()
try:
if _is_course(name, url):
if (name not in courses_list) and (url not in courses_url):
courses_list.append(name)
courses_url.append(url)
except:
continue
return courses_list, courses_url
def extract_specialisation(self, browser, program, program_url, courses_list,
courses_url, special_layouts):
banned = ["global health", "it and cognition", "anthropology"]
if program in special_layouts:
specialisations = browser.find_elements_by_css_selector("li.active a")
elif program not in banned:
browser.get(program_url + 'specialisations/')
specialisations = browser.find_elements_by_css_selector("li.active li a")
else:
if program == "global health":
courses_list, courses_url = self.extract_courses_with_link(browser,
courses_list,
courses_url)
return courses_list, courses_url
for specialisation in specialisations:
url = specialisation.get_attribute("href")
subbrowser = connect_to_browser()
subbrowser.get(url)
courses_list, courses_url = self.extract_courses_with_link(subbrowser,
courses_list,
courses_url)
subbrowser.quit()
return courses_list, courses_url
def update_programs_courses(self):
self.programs_courses = pd.DataFrame(columns=['program', 'course', 'url'])
browser = connect_to_browser()
browser.get(self.masters_url)
cookies_reject = browser.find_element_by_class_name("btn.btn-default.btn-lg")
browser.execute_script("$(arguments[0]).click();", cookies_reject)
special_layouts = ["medicinal chemistry", "bioinformatics",
"pharmaceutical sciences"]
for i in trange(len(self.programs_info), desc='Scraping programs courses'):
program, _, program_url = self.programs_info.iloc[i]
program_url += 'programme-structure/'
browser.get(program_url)
courses_list = []
courses_url = []
if _has_specialisations(browser) or program in special_layouts:
courses_list, courses_url = self.extract_specialisation(browser, program,
program_url,
courses_list,
courses_url,
special_layouts)
else:
courses_list, courses_url = self.extract_courses_with_link(browser,
courses_list,
courses_url)
program_list = [program] * len(courses_list)
df = pd.DataFrame(data={'program':program_list, 'course':courses_list,
'url': courses_url})
self.programs_courses = self.programs_courses.append(df)
browser.quit()
# TODO: extend to the courses with only a table but no links
# TODO: delete programs without courses in the programs_info (?)
def extract_course_description(self, browser):
course_html = BeautifulSoup(browser.page_source, 'html.parser')
content = course_html.find("div", {"id": "course-content"})
description = course_html.find("div", {"id": "course-description"})
text = content.find_all(text=True)
text += description.find_all(text=True)
output = ''
for txt in text:
output += '{} '.format(txt)
return output
def update_courses_info(self):
self.courses_info = pd.DataFrame(columns=['course', 'program', 'description'])
browser = connect_to_browser()
browser.get(self.masters_url)
cookies_reject = browser.find_element_by_class_name("btn.btn-default.btn-lg")
browser.execute_script("$(arguments[0]).click();", cookies_reject)
for i in trange(len(self.programs_courses), desc='Scraping courses info'):
program_name, course_name, course_url = self.programs_courses.iloc[i]
browser.get(course_url)
df = self.courses_info.loc[self.courses_info.course == course_name]
if not df.empty: continue
try:
output = self.extract_course_description(browser)
df = pd.DataFrame(data={'course':[course_name], 'program':[program_name],
'description':[output]})
self.courses_info = self.courses_info.append(df)
except:
try:
options = browser.find_elements_by_css_selector("ul.list-unstyled a")
course_url = options[-1].get_attribute("href")
browser.get(course_url)
output = self.extract_course_description(browser)
df = pd.DataFrame(data={'course':[course_name], 'program':[program_name],
'description':[output]})
self.courses_info = self.courses_info.append(df)
except:
self.programs_courses.loc[(self.programs_courses.program == program_name) \
& (self.programs_courses.course == course_name),
'url'] = np.nan
print("Problem adding course:", course_url)
continue
browser.quit()
# AUTO SCRAPER
def update(self):
work_time = time.localtime()
print('Starting update at {}:{}'.format(work_time.tm_hour, work_time.tm_min))
print('This will take several minutes')
self.update_programs_info()
print('1/3 Updates Finished')
self.update_programs_courses()
print('2/3 Updates Finished')
self.update_courses_info()
print('3/3 Updates Finished')
work_time = time.localtime()
print('Finished at {}:{}'.format(work_time.tm_hour, work_time.tm_min))
def export_data(self):
programs_info = self.programs_info.drop(labels='url',
axis=1).dropna().reset_index(drop=True)
programs_info.insert(1, 'university_id', self.university_id)
programs_courses = self.programs_courses.drop(labels='url',
axis=1).dropna().reset_index(drop=True)
programs_courses.insert(2, 'university_id', self.university_id)
courses_info = self.courses_info.reset_index(drop=True)
courses_info.insert(2, 'university_id', self.university_id)
data = {'programs_info': programs_info,
'courses_info': courses_info,
'programs_courses': programs_courses}
return data
# KU.UTILS
def _is_course(name, url):
banned = ['see the course catalogue', 'course catalog', 'see the courses', '',
'the course catalogue', 'thesis 45 ects', 'master thesis project',
"master’s thesis 30 ects", "master's thesis", "master’s thesis", " ",
"medicinal chemistry courses in the ucph course catalogue"]
if "kurser.ku" in url:
if url != "https://kurser.ku.dk/":
if name not in banned:
return True
return False
def _has_specialisations(browser):
left_menu = browser.find_elements_by_css_selector("ul.nav#leftmenu a")
for item in left_menu:
if item.text == 'Specialisations':
return True
body_links = browser.find_elements_by_css_selector("div.col-xs-12.col-sm-8.col-md-6.main-content a")
for link in body_links:
try:
if "specialisation" in link.get_attribute("href"):
return True
except:
continue
return False
|
[
"israfel.sr@gmail.com"
] |
israfel.sr@gmail.com
|
8268a59d42801e2ee2cd8a1c58941a45940bc16a
|
f6a3de837ac401c464ada6d980b084425ef45791
|
/alexa-iot/device-broker.py
|
df6cfdf87a18510e1d4b32847420767a3692e0d9
|
[] |
no_license
|
johanlu4st/python-alexa-skills
|
4f9a0bf39c0d94cac9ef3318b9e094f2da275560
|
e71f5d4e1f49469dd9321fafbc166d2e65509ef8
|
refs/heads/master
| 2020-09-14T09:54:49.013271
| 2018-05-11T11:22:34
| 2018-05-11T11:22:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,067
|
py
|
from flask import Flask, render_template
from flask_mqtt import Mqtt
from pymongo import MongoClient
import datetime
app = Flask(__name__)
client = MongoClient('mongodb://pyiot:password123456@ds133166.mlab.com:33166/pyiot-stackup')
db = client['pyiot-stackup']
app = Flask(__name__)
app.config['MQTT_BROKER_URL'] = 'm12.cloudmqtt.com'
app.config['MQTT_BROKER_PORT'] = 13743
app.config['MQTT_USERNAME'] = 'kqcqutsu'
app.config['MQTT_PASSWORD'] = 'MP86zXZ6Zkds'
app.config['MQTT_REFRESH_TIME'] = 1.0 # refresh time in seconds
mqtt = Mqtt(app)
mqtt.subscribe('room/temp')
@mqtt.on_message()
def handle_mqtt_message(client, userdata, message):
data = dict(
topic=message.topic,
payload=message.payload.decode()
)
print(data['topic'])
print(data['payload'])
tempValue = {
"value" : data['payload'],
"modified-date" : datetime.datetime.utcnow()
}
temp = db.temperature
temperValue_id = temp.insert_one(tempValue).inserted_id
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5050, debug=False)
|
[
"bunnyppl@gmail.com"
] |
bunnyppl@gmail.com
|
3931ee5baf8bc1af8cebd1dfcacdfcb03668a43e
|
6cf909646199c6aa4e7a32a2f804c750013b4d2d
|
/property crriter.py
|
7343516c2d36acb6d67f5eb1d26bb246d3fe478a
|
[] |
no_license
|
LJ0401/pizzapy
|
3613dbba44e243fcb52672ee15389d07e4f7a7f5
|
21f76af524a822a841a7ab7ee385263ce4ad49b0
|
refs/heads/master
| 2021-01-22T21:37:22.828565
| 2017-04-13T14:21:28
| 2017-04-13T14:21:28
| 85,444,138
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 765
|
py
|
#Property Critter
#演示属性
class Critter(object):
"""A virtual pet"""
def _init_(self, name):
print("A new critter has been born!")
self._name = name
@property
def name(self):
return self._name
#访问属性
def talk(self):
print("\nHi, I'm", self.name)
#程序主体
crit = Critter("Poochie")
crit.talk()
#为小动物改个名字
print("\nAttempting to change my critter's name to Randoph...")
crit.name = "Randoph"
print("My critter's name is:", end=" ")
print(crit.name)
#将名字改成空格符
print("\nAttempting to change my critter's name to the empty string.")
crit.name =""
print("My critter's name is:", end=" ")
print(crit.name)
input("\n\nPress the enter key to exit.")
|
[
"1351507858@qq.com"
] |
1351507858@qq.com
|
1515e9a592a06f8c05dc0dec9c9fd9eb17031857
|
15563e9aff20ceeb813bc89f02b7832f5fef8a89
|
/tests/test_filerecorder.py
|
988a9258575b12423828e9d590bc5d36d8a43e80
|
[
"BSD-3-Clause"
] |
permissive
|
openxc/openxc-python
|
5cc9a3b6ddf9ce7ecf6bca3163c306c0d5f4fbd9
|
5341180fea6c364027dedc9bc4c8027b2831325f
|
refs/heads/master
| 2022-05-12T22:26:54.176224
| 2021-08-11T14:35:16
| 2021-08-11T14:35:16
| 6,508,031
| 91
| 33
|
BSD-3-Clause
| 2022-03-24T11:23:19
| 2012-11-02T15:20:10
|
Python
|
UTF-8
|
Python
| false
| false
| 162
|
py
|
import unittest
from openxc.sinks import FileRecorderSink
class FileRecorderSinkTest(unittest.TestCase):
def test_create(self):
FileRecorderSink()
|
[
"chris.peplin@rhubarbtech.com"
] |
chris.peplin@rhubarbtech.com
|
3da95531c372cce9a2250fcbe7c834b331cfe810
|
22f4146b560571bfc646b7f0b500a4540f0db936
|
/Exercises/hand.py
|
3849da666c9b6fa084d014601ac091b1b7fdd5e7
|
[] |
no_license
|
xFluke/MITx-6.00.1x
|
86f3801593ce0dadfd468b039731b70c9e23a660
|
a973bddeae9312a936f5989bb124728b044f34a6
|
refs/heads/master
| 2020-03-28T16:36:08.046350
| 2018-09-13T23:56:34
| 2018-09-13T23:56:34
| 148,711,552
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,136
|
py
|
import random
class Hand(object):
def __init__(self, n):
'''
Initialize a Hand.
n: integer, the size of the hand.
'''
assert type(n) == int
self.HAND_SIZE = n
self.VOWELS = 'aeiou'
self.CONSONANTS = 'bcdfghjklmnpqrstvwxyz'
# Deal a new hand
self.dealNewHand()
def dealNewHand(self):
'''
Deals a new hand, and sets the hand attribute to the new hand.
'''
# Set self.hand to a new, empty dictionary
self.hand = {}
# Build the hand
numVowels = self.HAND_SIZE // 3
for i in range(numVowels):
x = self.VOWELS[random.randrange(0,len(self.VOWELS))]
self.hand[x] = self.hand.get(x, 0) + 1
for i in range(numVowels, self.HAND_SIZE):
x = self.CONSONANTS[random.randrange(0,len(self.CONSONANTS))]
self.hand[x] = self.hand.get(x, 0) + 1
def setDummyHand(self, handString):
'''
Allows you to set a dummy hand. Useful for testing your implementation.
handString: A string of letters you wish to be in the hand. Length of this
string must be equal to self.HAND_SIZE.
This method converts sets the hand attribute to a dictionary
containing the letters of handString.
'''
assert len(handString) == self.HAND_SIZE, "Length of handString ({0}) must equal length of HAND_SIZE ({1})".format(len(handString), self.HAND_SIZE)
self.hand = {}
for char in handString:
self.hand[char] = self.hand.get(char, 0) + 1
def calculateLen(self):
'''
Calculate the length of the hand.
'''
ans = 0
for k in self.hand:
ans += self.hand[k]
return ans
def __str__(self):
'''
Display a string representation of the hand.
'''
output = ''
hand_keys = sorted(self.hand.keys())
for letter in hand_keys:
for j in range(self.hand[letter]):
output += letter
return output
def update(self, word):
"""
Does not assume that self.hand has all the letters in word.
Updates the hand: if self.hand does have all the letters to make
the word, modifies self.hand by using up the letters in the given word.
Returns True if the word was able to be made with the letter in
the hand; False otherwise.
word: string
returns: Boolean (if the word was or was not made)
"""
copy_Hand = self.hand.copy()
for letter in word:
if letter in copy_Hand:
copy_Hand[letter] = copy_Hand.get(letter) - 1
if copy_Hand[letter] < 0:
del copy_Hand[letter]
else:
return False
self.hand = copy_Hand
return True
myHand = Hand(7)
print(myHand)
print(myHand.calculateLen())
myHand.setDummyHand('aazzmsp')
print(myHand)
print(myHand.calculateLen())
myHand.update('za')
print(myHand)
|
[
"noreply@github.com"
] |
xFluke.noreply@github.com
|
d393bbe033abd2b8272cc0170156ef42703866a4
|
12342d6bf6635bf2bfc734f3a8e9bcb40e8782ce
|
/day5_2017.py
|
5b129fa811622a09731c9bd23ae465913e23c384
|
[] |
no_license
|
ngocmtu/adventofcode
|
6d06d008eae4b07b303dcf814a2e2ba42d599909
|
cde11d15284faa0433e3a62fc69fca7c9b93aaba
|
refs/heads/master
| 2020-03-23T08:01:23.707800
| 2018-07-24T18:49:00
| 2018-07-24T18:49:00
| 141,303,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
# s = [int(line) for line in open('input.txt','r').readlines()]
# s= [0,3,0,1,-3]
def solve_part1(s):
new_pos = 0
cur_pos = 0
count = 0
while True:
if new_pos >= len(s) or new_pos < 0:
return count
else:
count += 1
cur_pos = new_pos
step = s[cur_pos]
new_pos = cur_pos + step
s[cur_pos] += 1
def solve_part2(s):
new_pos = 0
cur_pos = 0
count = 0
while True:
if new_pos >= len(s) or new_pos < 0:
return count
else:
count += 1
cur_pos = new_pos
step = s[cur_pos]
new_pos = cur_pos + step
s[cur_pos] += 1 if s[cur_pos] <3 else -1
print(str(solve_part1(s)))
print(str(solve_part2(s)))
|
[
"noreply@github.com"
] |
ngocmtu.noreply@github.com
|
2ac108f270cf5ffa0bfbca7755b958d446b3a030
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/merra_scripts/01_netCDF_extraction/merra902Combine/21-tideGauge.py
|
784ddb0d0f655471f76357e1f1df6c7540900599
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465
| 2021-06-25T21:00:44
| 2021-06-25T21:00:44
| 229,080,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,374
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 17 11:28:00 2020
--------------------------------------------
Load predictors for each TG and combine them
--------------------------------------------
@author: Michael Tadesse
"""
import os
import pandas as pd
#define directories
# dir_name = 'F:\\01_erainterim\\01_eraint_predictors\\eraint_D3'
dir_in = "/lustre/fs0/home/mtadesse/merraLocalized"
dir_out = "/lustre/fs0/home/mtadesse/merraAllCombined"
def combine():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
x = 21
y = 22
for tg in range(x, y):
os.chdir(dir_in)
tg_name = tg_list_name[tg]
print(tg_name, '\n')
#looping through each TG folder
os.chdir(tg_name)
#check for empty folders
if len(os.listdir()) == 0:
continue
#defining the path for each predictor
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp.csv'),\
"wnd_u": os.path.join(where, 'wnd_u.csv'),\
'wnd_v' : os.path.join(where, 'wnd_v.csv')}
first = True
for pr in csv_path.keys():
print(tg_name, ' ', pr)
#read predictor
pred = pd.read_csv(csv_path[pr])
#remove unwanted columns
pred.drop(['Unnamed: 0'], axis = 1, inplace=True)
#sort based on date as merra files are scrambled
pred.sort_values(by = 'date', inplace=True)
#give predictor columns a name
pred_col = list(pred.columns)
for pp in range(len(pred_col)):
if pred_col[pp] == 'date':
continue
pred_col[pp] = pr + str(pred_col[pp])
pred.columns = pred_col
#merge all predictors
if first:
pred_combined = pred
first = False
else:
pred_combined = pd.merge(pred_combined, pred, on = 'date')
#saving pred_combined
os.chdir(dir_out)
tg_name = str(tg)+"_"+tg_name;
pred_combined.to_csv('.'.join([tg_name, 'csv']))
os.chdir(dir_in)
print('\n')
#run script
combine()
|
[
"michaelg.tadesse@gmail.com"
] |
michaelg.tadesse@gmail.com
|
16bf0ef9ec53acb6b4376b1146bb236b50565626
|
fddad101c7be2fcbc05131081e708f31948c002f
|
/329. Longest Increasing Path in a Matrix/answer_bfs.py
|
a9141a61f5be8c4c3d3ff273a059e79b03652077
|
[] |
no_license
|
LennyDuan/AlgorithmPython
|
a10c9278c676829ab5a284a618f6352414888061
|
523c11e8a5728168c4978c5a332e7e9bc4533ef7
|
refs/heads/master
| 2021-07-16T12:31:08.284846
| 2021-03-28T20:31:28
| 2021-03-28T20:31:28
| 244,040,362
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
def longestIncreasingPath(self, matrix) -> int:
if not matrix:
return 0
res = 0
visited = set()
rows, cols = len(matrix), len(matrix[0])
directions = ((0, 1), (0, -1), (1, 0), (-1, 0))
def traverse(i, j, visited):
if (i, j) in visited:
return 0
res = 1
for direction in directions:
next_i, next_j = i + direction[0], j + direction[1]
direction_count = 0
if 0 <= next_i < rows and 0 <= next_j < cols:
if matrix[next_i][next_j] > matrix[i][j]:
direction_count = 1 + traverse(next_i, next_j, visited)
res = max(res, direction_count)
return res
for row in range(rows):
for col in range(cols):
res = max(traverse(row, col, visited), res)
return res
nums = [
[3, 4, 5],
[3, 2, 6],
[2, 2, 1]
]
print(longestIncreasingPath(None, nums))
|
[
"hod8@aber.ac.uk"
] |
hod8@aber.ac.uk
|
abe3042622f37a3eb01601fb6fef3fa398f676a7
|
6438528fd3b380b1f5dcb94f4ccb18dc9add06e2
|
/py/size.py
|
fa1f0756b62d1a4ec6dbc5ac640d3b20c49d444f
|
[] |
no_license
|
JanStoeckmann/point_cloud_denoising
|
df4b6ce61095bbcd2a7c48485debef946654dacf
|
2da1a93b30d83541f8388df3cf609809bcb49fb5
|
refs/heads/main
| 2023-02-07T14:22:20.094419
| 2020-12-21T14:38:18
| 2020-12-21T14:38:18
| 322,682,380
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 723
|
py
|
#! /usr/bin/python
import sys
import numpy as np
import os
def load_pcd(input):
file1 = open(input, 'r')
Lines = file1.readlines()
cloud = []
for line in Lines:
if (line[0].isnumeric()) or (line[0] == "-"):
point = (line[0:-1]).split(" ", 2)
cloud.append([float(point[0]), float(point[1]), float(point[2])])
return cloud
def cloud_size(input):
file1 = open(input, 'r')
Lines = file1.readlines()
cloud = []
for line in Lines:
if (line[0].isnumeric()) or (line[0] == "-"):
point = (line[0:-1]).split(" ", 2)
cloud.append([float(point[0]), float(point[1]), float(point[2])])
cloud = load_pcd(input)
return len(cloud)
|
[
"jansto@web.de"
] |
jansto@web.de
|
0588bf589dc53ee0422f074a5ff5c91ed6377dba
|
d8e0d76faf67f5f466aa72b5515b1e84f30f2750
|
/resources/spotipy/util.py
|
1ca2a057fd7fc8906f4f1923abdf7c1202422a46
|
[] |
no_license
|
NicolasHaeffner/spotimark
|
edf23a1a1ce7256dc8c5e2b6e73e3ad5c4f54441
|
cea4067f4aca0f78e1c996e5a72bbe3ae5eae66d
|
refs/heads/master
| 2021-05-13T16:41:10.508334
| 2018-01-09T10:07:05
| 2018-01-09T10:07:05
| 116,799,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,981
|
py
|
# shows a user's playlists (need to be authenticated via oauth)
from __future__ import print_function
from . import oauth2
import webbrowser
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import urlparse as urlparse
import threading
from time import sleep
class TokenHandler(BaseHTTPRequestHandler):
global path
path = False
def do_GET(self):
global path
print("Just received a GET request")
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(b'You may close the browser now!')
path = self.path
# parsed_path = urlparse(self.path)
# print('=====================================')
# print(parsed_path)
# try:
# params = dict([p.split('=') for p in parsed_path[4].split('&')])
# except:
# params = {}
return
def log_request(self, code=None, size=None):
pass
def log_message(self, format, *args):
print('Message')
def prompt_for_user_token(username, cachepath=None, scope=None, client_id=None,
client_secret=None, ip=None, port=None):
# redirect_uri = 'http://localhost:12345/'
redirect_uri = 'http://' + ip + ':' + port + '/'
# print('The redirect uri is: ' + redirect_uri)
scope = 'user-read-playback-state user-modify-playback-state playlist-read-private playlist-read-collaborative playlist-modify-public playlist-modify-private user-follow-modify user-follow-read user-library-read user-library-modify user-read-private user-read-email user-read-birthdate user-top-read'
if not cachepath:
cachepath = ".cache-" + username
# request the token
sp_oauth = oauth2.SpotifyOAuth(client_id, client_secret, redirect_uri,
scope=scope, cache_path=cachepath + '/' + username + '.cache')
# try to get a valid token for this user, from the cache,
# if not in the cache, the create a new (this will send
# the user to a web page where they can authorize this app)
token_info = sp_oauth.get_cached_token()
if not token_info:
server = HTTPServer((ip, int(port)), TokenHandler)
t = threading.Thread(target=server.handle_request)
t.deamon = True
t.start()
auth_url = sp_oauth.get_authorize_url()
if ip == 'localhost':
webbrowser.open(auth_url)
print("Opened %s in your browser" % auth_url)
else:
print("Please navigate here: %s" % auth_url)
while not path:
print('ConnectControl: wait for token')
sleep(1)
response = 'http://' + ip + ':' + port + path
code = sp_oauth.parse_response_code(response)
token_info = sp_oauth.get_access_token(code)
# Auth'ed API request
if token_info:
return token_info['access_token']
else:
return None
|
[
"nicolas.haeffner@me.com"
] |
nicolas.haeffner@me.com
|
7e4604149921d96cee1e6d69a3b590c27c1da8f1
|
fb5fe577eaf98073ebe458ee51b3ef288e956353
|
/week5/9.py
|
ab76ec63b57426393f2e57bfd99d4a6eb4908de2
|
[] |
no_license
|
askarakshabayev/PP2_2021
|
f41b9703411602877c037f4a2099eb29f262125c
|
366793190ae7e6461e659697bfbefed28ae5e26e
|
refs/heads/master
| 2023-04-09T11:59:32.046566
| 2021-04-17T07:51:40
| 2021-04-17T07:51:40
| 334,356,647
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
import os
import shutil
# dir_path = "/Users/askar/Documents/KBTU/PP2/week5/test1/test2/test3"
# os.makedirs(dir_path)
# src = "/Users/askar/Documents/KBTU/PP2/week5/test1/test2/test3/input.txt"
# dst = "/Users/askar/Documents/KBTU/PP2/week5/test1/test2/test3/input_1.txt"
# os.rename(src, dst)
src = "/Users/askar/Documents/KBTU/PP2/week5/test1/test2/test_3"
dst = "/Users/askar/Documents/KBTU/PP2/week5/test1/test2/test_33"
# shutil.move(src, dst)
# shutil.copytree(src, dst)
# shutil.rmtree(src)
print(os.listdir(dst))
|
[
"askar.akshabayev@gmail.com"
] |
askar.akshabayev@gmail.com
|
6d8269da5291ae9167422229e4273a6016767dd4
|
843bb82a466376ca0a74a6e28bffa8bf43df72b8
|
/covid_tracker/users/migrations/0001_initial.py
|
a6439db3963e263f35577e52584547014c08f095
|
[] |
no_license
|
heyswatisrivastava/covid_tracker
|
a27210db400a83a466d21258fa6e4c062ac46c1f
|
843efed94d1df47d19a00f7d1fb7d3e40086c9a6
|
refs/heads/main
| 2023-05-10T12:04:57.788635
| 2021-06-28T11:11:34
| 2021-06-28T11:11:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,573
|
py
|
# Generated by Django 3.2.4 on 2021-06-27 06:38
from django.db import migrations, models
import django.db.models.deletion
import django_mysql.models
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('mobile_number', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region=None)),
('pincode', models.IntegerField()),
],
options={
'db_table': 'users',
},
),
migrations.CreateModel(
name='UserSymptom',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('symptoms', django_mysql.models.ListCharField(models.CharField(max_length=255), max_length=255, size=None)),
('travel_hostory', models.BooleanField(default=False)),
('covid_contact', models.BooleanField(default=False)),
('covid_risk', models.IntegerField(default=5)),
('user_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='users.user')),
],
options={
'db_table': 'user_symptoms',
},
),
]
|
[
"swatisrivastava162@gmail.com"
] |
swatisrivastava162@gmail.com
|
bb085c931ea83f30ef777b2ca2a79f3eddced1d0
|
1953ad2d8cc8a36e29d3d48e5458aeb69bf17bdd
|
/[9372]패션왕 신해빈.py
|
8d1c75cf0e13cc09638262f1edfb0beec15e5d53
|
[] |
no_license
|
wookkl/backjoon-problemsolving
|
8b75ac4575ffdc15615bc5672f1d5358ac3016a4
|
fbe7e051c7513f52b2ac26472dfc34955013549d
|
refs/heads/master
| 2023-04-10T22:33:49.614340
| 2021-04-25T00:50:47
| 2021-04-25T00:50:47
| 219,535,847
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
for _ in range(int(input())):
items = [input().split() for i in range(int(input()))]
d = {k: 0 for _, k in items}
res = 1
for v, k in items:
d[k] += 1
for v in d.values():
res *= (v + 1)
print(res - 1)
|
[
"wjddnr315@gmail.com"
] |
wjddnr315@gmail.com
|
67c00ebedf8cd9fafcd55f4e5118aa391ff74616
|
a397c77c92522252a7333aa712949b8001e7f443
|
/freebot/modules/life.py
|
a9be7d70ec8a94577154af42daee6c072e0a2c1b
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
vashadow/Kopachris
|
a7b3fa64cd0b45afc12f389c61770c5c1a13d6da
|
fa791f00df9b5e332b82cd39f9ceb704579218b9
|
refs/heads/master
| 2023-09-04T10:30:10.761319
| 2021-09-26T03:23:31
| 2021-09-26T03:23:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 854
|
py
|
import re
## Description stored in db.bot_modules
description = "The answer"
## Prefix stored in db.bot_modules
## Each module should have its own prefix for bot_vars entries
prefix = "42_"
## Event type handled by this module
event_type = "PRIVMSG"
## Additional global vars
#H_HTTP = {'User-Agent': 'Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11'}
#G_URL = 'http://www.google.com/search?q={}&btnI'
def init(db):
pass
def remove(db):
pass
def run(bot, event, db):
m = event.message.lower()
regex = re.compile('[!"#$%&\'()*+,./:;<=>?@\\^_`{|}~-]')
mr = regex.sub('', m).split()
ms = set(mr)
q = {'what', 'the', 'life', 'universe', 'everything'}
q2 = {'meaning', 'question', 'answer'}
if q < ms and len(ms.intersection(q2)) == 1:
bot.bot_reply(event, "Everyone knows it's 42, duh!")
|
[
"chris@WS01-Chris.kopachris.net"
] |
chris@WS01-Chris.kopachris.net
|
8679eb15e7abddc2ffc51114e648c08423ab7ebd
|
2aec9c5e8c72b731d3abf22f2a407fe09c1cde09
|
/QDS_Test/case/dbwytest.py
|
22710e1c97b825043ebe5514995dd8e8038a0300
|
[] |
no_license
|
jiangyg/ZWFproject
|
8b24cc34970ae0a9c2a2b0039dc527c83a5862b5
|
aa35bc59566d92721f23d2dd00b0febd268ac2dd
|
refs/heads/master
| 2020-09-26T17:01:00.229380
| 2019-11-15T13:16:21
| 2019-11-15T13:16:21
| 226,297,631
| 0
| 1
| null | 2019-12-06T09:55:37
| 2019-12-06T09:55:36
| null |
UTF-8
|
Python
| false
| false
| 3,860
|
py
|
# coding=utf-8
import time
import logging
from selenium.webdriver import ActionChains
from utils.mytestcase import MyTestCase
from utils.logincookie import DengLuPage
from utils.random import unicode
from utils.screenshort import get_screenshort
class DbWyTest(MyTestCase):
"""担保无忧测试集"""
def test_dbwy(self):
"""担保无忧测试"""
# logging.basicConfig(filename='../LOG/' + __name__ + '.log',
# format='[%(asctime)s-%(filename)s-%(levelname)s: %(message)s]', level=logging.DEBUG,
# filemode='a', datefmt='%Y-%m-%d%I:%M:%S %p')
dl = DengLuPage(self.driver)
# 官方推荐有find_element(By.*(""))代替find_element_by_*("")
# self.driver.find_element_by_id()
# self.driver.find_element()
dl.login()
time.sleep(2)
ActionChains(self.driver).move_to_element(self.driver.find_element_by_css_selector(
"body > div.section-banner > div.public-navbar > div > div > h3 > span")).perform()
time.sleep(2)
ActionChains(self.driver).move_to_element(self.driver.find_element_by_css_selector(
"body > div.section-banner > div.public-navbar > div > div > div > ul:nth-child(1) > li:nth-child(1) > h3 > a")).perform()
ActionChains(self.driver).release()
self.driver.find_element_by_css_selector(
"body > div.section-banner > div.public-navbar > div > div > div > ul:nth-child(1) > li:nth-child(1) > div > dl:nth-child(3) > dd > a:nth-child(2)").click()
# 获取打开的多个窗口句柄
windows = self.driver.window_handles
# 切换到当前最新打开的窗口
self.driver.switch_to.window(windows[-1])
time.sleep(2)
self.driver.set_window_size(1920, 1080)
time.sleep(3)
self.assertIn("商标担保注册|商标注册费用|商标申请流程-权大师", self.driver.title)
print(self.driver.title)
# abwy注册
self.driver.find_element_by_css_selector(
"body > div.section-product.width1200 > dl > dd > div.cont-serviceItems > table > tbody > tr > td.td-cont > ul > li:nth-child(2)").click()
for a in self.driver.find_elements_by_css_selector("#total-price"):
print("费用总计:"+a.text)
aa=a.text
self.driver.find_element_by_css_selector(
"body > div.section-product.width1200 > dl > dd > div.cont-btnBuy > a.btn.btn-next.buynow").click()
self.driver.find_element_by_name("ownerContactPerson").send_keys("{}".format(unicode()))
self.driver.find_element_by_name("ownerContactPhone").send_keys("15624992498")
self.driver.find_element_by_name("contactMail").send_keys("145647@qq.com")
self.driver.find_element_by_css_selector("#remark").send_keys(time.strftime("%Y-%m-%d_%H-%M-%S") + "测试订单")
get_screenshort(self.driver, "test_dbwy.png")
for i in self.driver.find_elements_by_css_selector("body > div.myOrder-wrap > div.section-myorder.orderinfo-wrap.width1200 > div:nth-child(6) > div.last-pay.personal-last-pay > ul > li.row-sense > em > i"):
print("总价:"+i.text)
ii=i.text
self.assertIn(aa,ii)
print("价格一致")
self.driver.find_element_by_css_selector(
"body > div.myOrder-wrap > div.section-myorder.orderinfo-wrap.width1200 > div:nth-child(6) > div.btns > a.btn-next.submitOrder").click()
for o in self.driver.find_elements_by_class_name("payable"):
print("订单提交成功,应付金额:"+o.text)
oo=o.text
self.assertIn(oo,ii)
print("测试通过")
self.driver.find_element_by_css_selector("#alisubmit").click()
|
[
"34021500@qq.com"
] |
34021500@qq.com
|
238056c98f81b88787366589bde5eb878cd01528
|
a93af8744c2a7c6fd0b8e353c9a6a6af563376e1
|
/venv/exporter.py
|
6ad74bd3e42333b0d0897cbb1b5b72ddd6c93337
|
[] |
no_license
|
ohjiwoo123/Python-JobScrapper
|
9189cba701cac1d7ee52932ed2e2cde2f81f7d63
|
e2591233b0c9c1473ce7682b41b6959b20944965
|
refs/heads/main
| 2023-06-02T22:06:20.385638
| 2021-06-20T13:23:05
| 2021-06-20T13:23:05
| 373,442,512
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
import csv
def save_to_file(jobs):
file = open("jobs.csv",mode ="w")
writer = csv.writer(file)
writer.writerow(["title","company","location","link"])
for job in jobs:
writer.writerow(list(job.values()))
return
|
[
"ohjiwoo123@naver.com"
] |
ohjiwoo123@naver.com
|
ba3c5206735c47b78b510cda18d0093d61793a4f
|
5822544fcead26c64e6d05ba57ba2ab0fb446b39
|
/Program/FunctionList.py
|
a3c63b08a3e4b2af213ea65bd5f0989db3e1c811
|
[] |
no_license
|
R-second/PyAlgorithmProblem
|
7ba72b152b9f1356c448fb292aea5380c86e8b84
|
32ba15b9d9459731ee6dc5553bbaa756114c5323
|
refs/heads/master
| 2020-05-20T00:01:16.972241
| 2019-05-11T11:31:42
| 2019-05-11T11:31:42
| 185,279,094
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,140
|
py
|
# FunctionList.py
import tkinter
import PrimeFactor
import Fibonacci
import Gcd
import MaxMin
import Sort
import Calendar
class FunctionList:
# functionListにアルゴリズムの名称を設定
functionList = ["素数判定", "フィボナッチ数列の出力", "最大公約数出力", "最大値判定", "ソート", "万年カレンダー"]
@classmethod
def functionMain(cls, num):
# subWindowを作成
application2 = tkinter.Tk()
application2.title("subWindow")
# 引数numに応じてインスタンスの生成を変更
if num == 0:
subWindow = PrimeFactor.PrimeFactor(application2)
elif num == 1:
subWindow = Fibonacci.Fibonacci(application2)
elif num == 2:
subWindow = Gcd.Gcd(application2)
elif num == 3:
subWindow = MaxMin.MaxMin(application2)
elif num == 4:
subWindow = Sort.Sort(application2)
else:
subWindow = Calendar.Calendar(application2)
application2.protocol("WM_DELETE_WINDOW", subWindow.quit)
application2.mainloop()
|
[
"R-second@yamadarigatsu-no-MacBook-Air.local"
] |
R-second@yamadarigatsu-no-MacBook-Air.local
|
11e357c42da450eb40840e5bb31b4c09f2f26c89
|
edb9121984ef9d6647ea7462c1401bdc6b9451e7
|
/grid_modules/__init__.py
|
1712334588c3eebf6859a51bae6265f6929e4b86
|
[] |
no_license
|
yang0110/controllable_agent
|
eceb6b17da4ea22cf8de753dd5260da37b2a403c
|
befeb7386d22ef8806725740ee08cbf9c87a5a0d
|
refs/heads/main
| 2023-08-25T21:25:49.977459
| 2021-10-14T13:08:53
| 2021-10-14T13:08:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 37
|
py
|
# from grid_modules.common import MDP
|
[
"ahmed.touati90@gmail.com"
] |
ahmed.touati90@gmail.com
|
2dd2ab1673c7b063eaa173cb49cb2e9ec01c412c
|
5aa3e81a9b7b251ee510208aab5f6a000c8d9e67
|
/Selenium/21 Comment(未完成).py
|
4223aeb1e7f17659ff81a1494ec9171ae95d40f6
|
[] |
no_license
|
PhenomK/Project
|
d99aae43d837863e86a8aee56eb92ec08c632f2b
|
7a5a1481ff29023c0a2d5cbcac24d1d7ccf9c684
|
refs/heads/master
| 2021-09-05T01:31:08.874681
| 2018-01-23T12:38:10
| 2018-01-23T12:38:10
| 104,861,165
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,613
|
py
|
import time
from selenium.webdriver.common.action_chains import ActionChains
from selenium import webdriver
#登陆
driver = webdriver.Chrome()
driver.get("http://172.16.29.37:8080/user/login") #测试网址
elem1 = driver.find_element_by_id("user_slug")
elem1.send_keys("chrome") #用户名
elem2 = driver.find_element_by_id("pwd")
elem2.send_keys("123456") #密码
button = driver.find_element_by_id("submit_button")
button.click()
time.sleep(2)
#跳转个人文件
elem3 = driver.find_element_by_link_text("个人文件")
elem3.click()
time.sleep(1)
driver.refresh()
#新建评论文档
time.sleep(1)
above = driver.find_element_by_id("addfile") #新建
ActionChains(driver).move_to_element(above).perform()
time.sleep(1)
elem4 = driver.find_element_by_class_name("addword") #新建文档
elem4.click()
time.sleep(1)
ActionChains(driver).move_by_offset(xoffset=400,yoffset=400).perform()
elem5 = driver.find_element_by_class_name("box")
elem5.send_keys("评论测试文档")
elem6 = driver.find_element_by_css_selector(".sure")
elem6.click()
time.sleep(2)
#评论
elem7 = driver.find_element_by_xpath("a//[@class='display-name' and @title='评论测试文档.docx'/../../../..]").find_element_by_class_name("item-checkbox")
elem7.click()
time.sleep(1)
elem8 = driver.find_element_by_id("review_contBox")
elem8.click()
time.sleep(1)
elem9 = driver.find_element_by_class_name("review_text")
elem9.send_keys("评论测试")
elem10 = driver.find_element_by_id("review_message_submit")
elem10.click()
|
[
"Ghope.plus@gmail.com"
] |
Ghope.plus@gmail.com
|
18f607375d344ca11cc2a0c33fc9166c84602bde
|
58f314bc2df12c3c3b1ce7eacd5baaf60193008b
|
/tests/song_test.py
|
de5dcf007fcd024d43318168e73a2645656fb53c
|
[] |
no_license
|
portypy/Caraoke_bar_w2
|
e75fe57d357fe6bebf5b9b6372cbf0e6ec357796
|
3010052e1c371b345be31c7ce72ffdb48fc47658
|
refs/heads/main
| 2023-01-13T13:33:17.367773
| 2020-11-28T23:41:31
| 2020-11-28T23:41:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 608
|
py
|
import unittest
from src.song import Song
class TestSong(unittest.TestCase):
def setUp(self):
self.song_1 = Song("Gotta Go", "Agnostic Front", 3.2)
self.song_2 = Song("On My Radio", "Selecter", 3.52)
self.song_3 = Song("Divorce a I'ltalienne", "Mungo's Hifi", 3.46)
def test_song_has_title(self):
self.assertEqual("Gotta Go", self.song_1.title)
def test_song_has_artist_name(self):
self.assertEqual("Agnostic Front", self.song_1.artist)
def test_song_has_duration(self):
self.assertEqual(True,isinstance(self.song_2.duration,float))
|
[
"macdorphoto@yahoo.com"
] |
macdorphoto@yahoo.com
|
5edaa1b154eb40102fe6ec6a4a37b893c4eab07f
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/hv572GaPtbqwhJpTb_2.py
|
8e0bb6a39e996aa650ed4adf5f67abcc31d4539a
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,831
|
py
|
"""
In this challenge, you must think about words as elastics. What happens when
do you tend an elastic applying a constant traction force at both ends? Every
part (or letter, in this case) of the elastic will expand, with the minimum
expansion at the ends, and the maximum expansion in the center.
If the word has an odd length, the effective central character of the word
will be the pivot that splits the word into two halves.
"ABC" -> Left = "A" | Center = "B" | Right = "C"
If the word has an even length, you will consider two parts of equal length,
with the last character of the left half and the first character of the right
half being the center.
"ABCD" -> Left = "AB" | Right = "CD"
You will represent the expansion of a letter repeating it as many times as its
numeric position (so counting the indexes from/to 1, and not from 0 as usual)
in its half, with a crescent order in the left half and a decrescent order in
the right half.
Word = "ANNA"
Left = "AN"
Right = "NA"
Left = "A" * 1 + "N" * 2 = "ANN"
Right = "N" * 2 + "A" * 1 = "NNA"
Word = Left + Right = "ANNNNA"
If the word has an odd length, the pivot (the central character) will be the
peak (as to say, the highest value) that delimits the two halves of the word.
Word = "KAYAK"
Left = "K" * 1 + "A" * 2 = "KAA"
Pivot = "Y" * 3 = "YYY"
Right = "A" * 2 + "K" * 1 = "AAK"
Word = Left + Pivot + Right = "KAAYYYAAK"
Given a `word`, implement a function that returns the elasticized version of
the word as a string.
### Examples
elasticize("ANNA") ➞ "ANNNNA"
elasticize("KAYAK") ➞ "KAAYYYAAK"
elasticize("X") ➞ "X"
### Notes
* For words with less than three characters, the function must return the same word (no traction appliable).
* Remember, into the left part characters are counted from 1 to the end, and, in reverse order until 1 is reached, into the right.
"""
def elasticize(word):
def is_even(n):
return n%2==0
def first_half(word, n):
l8rs = {}
for num in range(n):
l8rs[num] = word[num] * (num+1)
return l8rs
def last_half(word, n):
l8rs = {}
y = 1
while len(word) - y > n-1:
l8rs[y] = word[len(word)-y]*y
y += 1
return l8rs
def combine(fh, lh):
lst = []
for key in sorted(list(fh.keys())):
lst.append(fh[key])
for key in reversed(sorted(list(lh.keys()))):
lst.append(lh[key])
return lst
if len(word) < 3:
return word
if is_even(len(word)) == False:
x = 0
y = 1
while x != len(word) - y:
x += 1
y += 1
middle = x
else:
middle = int(len(word)/2)
fh = first_half(word, middle)
lh = last_half(word, middle)
combined = combine(fh, lh)
return ''.join(combined)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
729ab8bbd28235101a138824f2811f233210925f
|
a4f6e1caef32ddfd45fdf5475fbec24020e8be19
|
/KNN from scratch for mushroom dataset.py
|
4451375f94947ed573902870c40da4a363849fcd
|
[] |
no_license
|
rishabkr/KNN-classifier-from-scratch-for-Mushroom-Dataset
|
1c4ef8740ad63f7c5c8b0a7774478d15de654c6a
|
3fe49a331ac45346c719ff8ca433838fe3605b66
|
refs/heads/master
| 2020-12-29T16:11:21.315946
| 2020-02-06T10:41:59
| 2020-02-06T10:41:59
| 238,664,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,587
|
py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pprint import pprint
import random
import math
from collections import Counter
class KNNClassifier:
def __init__(self):
pass
def train_test_split(self, dataframe,test_size):
dataframe_size=len(dataframe)
if isinstance(test_size,float):#if test size is passed as a proportion
test_size=round(test_size*dataframe_size)
#pick random samples from the data for train test split
indexes=dataframe.index.tolist()
test_indices=random.sample(population=indexes,k=test_size)
#now putting the values of train and test data into the respective df's
test_dataframe=dataframe.loc[test_indices]
cropped_dataframe=dataframe.drop(test_indices)
train_dataframe=cropped_dataframe
return train_dataframe,test_dataframe
def minkowski(self,test_value,p):
if(p==2):
distance=np.sum((self.train_values - test_value)**2,axis=1)
return distance
elif(p==1):
distance=np.sum(abs(self.train_values - test_value),axis=1)
return distance
def KNeighbors(self, k, test_value,p=2):
neighbors=[]
train_length=self.train_values.shape[0]
if(p==2):
distance=self.minkowski(test_value,p=2)
elif(p==1):
distance=self.minkowski(test_value,p=1)
k_neighbors=np.argsort(distance)
k_neighbors=k_neighbors[:k]
return k_neighbors
def find_majority(self, k_index):
ans = Counter(self.train_labels[k_index]).most_common()
return ans[0][0]
def train(self, train_path):
df=pd.read_csv(train_path,header=None)
letters={"a":int(ord('a')),"b":int(ord('b')),"c":int(ord('c')),"d":int(ord('d')),"e":int(ord('e')),"f":int(ord('f')),"g":int(ord('g')),"h":int(ord('h'))
,"i":int(ord('i')),"j":int(ord('j')),"k":int(ord('k')),"l":int(ord('l')),"m":int(ord('m')),"n":int(ord('n')),"o":int(ord('o')),"p":int(ord('p')),"q":int(ord('q')),"r":int(ord('r'))
,"s":int(ord('s')),"t":int(ord('t')),"u":int(ord('u')),"v":int(ord('v')),"w":int(ord('w')),"x":int(ord('x')),"y":int(ord('y')),"z":int(ord('z'))}
for column in df.columns:
df[column]=df[column].replace(to_replace ="?",value =df[column].mode()[0])
for column in df.columns:
df[column]=df[column].replace(to_replace=letters)
df = df.apply(pd.to_numeric)
train_df,val_df=self.train_test_split(df,0.3)
train_digits=train_df.to_numpy()
train_digits=np.array(train_digits)
val_digits=val_df.to_numpy()
val_digits=np.array(val_digits)
self.train_values=train_digits[:,1:]
self.train_labels=train_digits[:,0]
self.val_values=val_digits[:,1:]
self.val_labels=val_digits[:,0]
def predict(self, test_path):
df_test=pd.read_csv(test_path,header=None)
letters={"a":int(ord('a')),"b":int(ord('b')),"c":int(ord('c')),"d":int(ord('d')),"e":int(ord('e')),"f":int(ord('f')),"g":int(ord('g')),"h":int(ord('h'))
,"i":int(ord('i')),"j":int(ord('j')),"k":int(ord('k')),"l":int(ord('l')),"m":int(ord('m')),"n":int(ord('n')),"o":int(ord('o')),"p":int(ord('p')),"q":int(ord('q')),"r":int(ord('r'))
,"s":int(ord('s')),"t":int(ord('t')),"u":int(ord('u')),"v":int(ord('v')),"w":int(ord('w')),"x":int(ord('x')),"y":int(ord('y')),"z":int(ord('z'))}
for column in df_test.columns:
df_test[column]=df_test[column].replace(to_replace ="?",value =df_test[column].mode()[0])
for column in df_test.columns:
df_test[column]=df_test[column].replace(to_replace=letters)
test_vals=df_test.to_numpy()
test_vals=np.array(test_vals)
prediction=[]
length=test_vals.shape[0]
for i in range(length):
k_index=self.KNeighbors(5,test_vals[i])
result=self.find_majority(k_index)
prediction.append(result)
if i % 10 == 0:
#print(chr(result))
predictions=[]
for i in range(0,len(prediction)):
predictions.append(chr(prediction[i]))
return predictions
if __name__ == '__main__':
knn = KNNClassifier()
knn.train("train.csv")
preds = knn.predict("test.csv")
print("Done Testing")
df_labels=pd.read_csv("test_labels.csv", header=None)
label_vals=df_labels.iloc[:, 0].to_numpy()
label_vals=np.array(label_vals)
#print(preds.shape)
#print(label_vals.shape)
preds=np.array(preds)
acc = np.sum(preds == label_vals)/preds.shape[0]
print(acc)
# df_test_labels=pd.read_csv("test_labels.csv",header=None)
# # In[254]:
# for column in df_test_labels.columns:
# df_test_labels[column]=df_test_labels[column].replace(to_replace=letters)
# # In[255]:
# test_vals=df_test.to_numpy()
# test_vals=np.array(test_vals)
# label_vals=df_test_labels.to_numpy()
# label_vals=np.array(label_vals)
# # In[256]:
# def KNeighbors(k,train_values,train_labels,test_value):
# neighbors=[]
# train_length=train_values.shape[0]
# distance=np.sum((train_values - test_value)**2,axis=1)
# k_neighbors=np.argsort(distance)
# k_neighbors=k_neighbors[:k]
# return k_neighbors
# # In[257]:
# def find_majority(k_index,train_labels):
# from collections import Counter
# ans = Counter(train_labels[k_index]).most_common()
# return ans[0][0]
# # In[258]:
# predictions=[]
# length=test_vals.shape[0]
# for i in range(length):
# k_index=KNeighbors(6,train_values,train_labels,test_vals[i])
# result=find_majority(k_index,train_labels)
# predictions.append(result)
# predictions
# # In[ ]:
# # In[259]:
# cnt=0
# for i in range(0,length):
# if(predictions[i]==label_vals[i]):
# cnt+=1
# print(cnt/label_vals.shape[0])
# # In[260]:
# from sklearn.neighbors import KNeighborsClassifier
# classifier=KNeighborsClassifier(n_neighbors=6,metric='minkowski',p=2)
# classifier.fit(train_values,train_labels)
# # In[261]:
# y_pred=classifier.predict(test_vals)
# # In[262]:
# cnt=0
# for i in range(0,length):
# if(y_pred[i]==label_vals[i]):
# cnt+=1
# print(cnt/label_vals.shape[0])
# # In[281]:
# from sklearn.metrics import confusion_matrix
# y_pred=y_pred.reshape(1000,1)
|
[
"noreply@github.com"
] |
rishabkr.noreply@github.com
|
6c166f5f43ac352167d81f6b03513dddb17c94c5
|
d8f78d99e742806c963981ed227174ce16533b70
|
/ABC051/c.py
|
44e516975f8b8ef61484ae59e992d94dfda17eeb
|
[] |
no_license
|
pekopekojun/atcoder
|
28e3ad4e8a0741e31fbfa4ff6d6a29b964ef67c8
|
56bcad4855c11b12fcc8f11a65c33c501da3dff2
|
refs/heads/master
| 2023-03-04T00:44:15.742378
| 2021-02-12T14:53:06
| 2021-02-12T14:53:06
| 336,311,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
sx, sy, tx, ty = map(int, input().split())
dx = tx-sx
dy = ty-sy
print("U"*dy + "R"*dx, end="")
print("D"*dy + "L"*dx, end="")
print("L" + "U"*(dy+1) + "R"*(dx+1) + "D", end="")
print("R" + "D"*(dy+1) + "L"*(dx+1) + "U", end="")
|
[
"jun805@gmail.com"
] |
jun805@gmail.com
|
117b4771a177a59bd2de0e47c5d4fb55e40f5dcf
|
39373e3f2a4e1a70cdf6e5cbbe189c2748f2b8b5
|
/Project2A/wrapper.py
|
d8b77ad60b37346858e3d3be2269570fd9284aaa
|
[] |
no_license
|
justkk/Penn-Computer-Vision
|
d00a7c3af2364643ac4a15f222a5f3656fdacf75
|
cb3305ec3a2cbed6f1aeadb534eb7ebea26c5472
|
refs/heads/main
| 2023-02-13T10:55:50.812795
| 2021-01-13T20:45:33
| 2021-01-13T20:45:33
| 314,481,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,021
|
py
|
from morph_tri import *
from click_correspondences import *
import imageio
sourceImage = np.array(Image.open("cat.jpeg"))
destImage = np.array(Image.open("tiger.png"))
#sourceImage = np.array(Image.open("leo.jpg"))
#destImage = np.array(Image.open("sachin.png"))
sourcePoints, destinationPoints = click_correspondences(sourceImage, destImage)
##
#ex1
'''
sourcePoints = np.array([[ 73.34256952 , 8.7195157 ],
[107.2168735, 27.77381168],
[136.85688947, 16.12951969],
[164.37976145, 35.18381568],
[209.89835742, 21.95166569],
[194.01977743, 70.11669165],
[188.72691743, 103.99099562],
[162.79190345, 133.1017256 ],
[126.27116948, 143.68744559],
[ 91.33829351, 133.1017256 ],
[ 70.69613953, 99.22742163],
[ 68.57899553, 64.29454565],
[ 97.16043951, 80.17312564],
[116.74402149, 83.87812764],
[115.15616349, 100.28599363],
[ 96.63115351, 99.22742163],
[144.26689347, 87.05384364],
[160.67475945, 88.11241563],
[162.26261745, 100.81527962],
[144.26689347, 103.99099562],
[118.33187949, 119.34028961],
[132.62260148, 119.34028961],
[ 0. , 0. ],
[256. , 256. ],
[256. , 0. ],
[ 0. , 256. ]]
)
destinationPoints = np.array([[ 47.7227213, 9.2488017 ],
[ 95.35846126, 36.77167368],
[137.70134123, 35.71310168],
[194.33494318, 48.41596567],
[232.97282115, 27.24452568],
[204.39137717, 80.17312564],
[222.38710116, 119.86957561],
[210.21352317, 175.97389156],
[116.00061524, 228.90249152],
[ 41.3712893 , 191.32318555],
[ 22.84627932, 139.98244359],
[ 40.8420033 , 84.40741364],
[ 80.00916727, 96.58099163],
[103.29775126, 97.11027763],
[ 95.88774726, 121.98671961],
[ 76.83345128, 111.93028562],
[156.75563721, 100.28599363],
[178.98564919, 104.52028162],
[176.8685052 , 118.28171761],
[158.34349521, 121.45743361],
[ 99.59274926, 165.38817157],
[138.23062723, 168.56388757],
[ 0. , 0. ],
[256. , 256. ],
[256. , 0. ],
[ 0. , 256. ]])
'''
'''
sourcePoints = np.array ([[120.22083907, 178.34197527],
[123.39655507, 122.23765931],
[144.56799505, 53.43047937],
[194.32087901, 19.55617539],
[252.54233896, 7.9118834 ],
[308.64665492, 23.79046339],
[348.87239089, 39.66904338],
[392.27384285, 108.47622332],
[393.33241485, 188.92769526],
[140.33370705, 276.78917119],
[161.50514704, 327.60062715],
[195.37945101 ,365.70921912],
[252.54233896, 397.46637909],
[306.52951092, 374.17779511],
[349.93096289, 333.95205914],
[375.33669087, 274.67202719],
[119.16226707, 229.15343123],
[383.80526686, 231.27057523],
[154.09514304, 211.15770724],
[197.49659501, 181.51769127],
[239.83947497, 209.04056324],
[272.65520695, 211.15770724],
[318.17380291, 183.63483526],
[354.16525088, 214.33342324],
[172.09086703, 231.27057523],
[209.140887 , 242.91486722],
[292.76807493 ,236.56343522],
[324.52523491, 240.79772322],
[219.72660699, 265.1448792 ],
[199.61373901, 289.49203518],
[191.14516301 ,319.13205116],
[293.82664693, 267.2620232 ],
[317.11523091, 286.31631918],
[322.40809091, 319.13205116],
[215.49231899 ,315.95633516],
[259.95234296 ,308.54633116],
[295.94379093, 317.01490716],
[255.71805496, 342.42063514],
[ 0. , 0. ],
[512. , 512. ],
[512. , 0. ],
[ 0. , 512. ]])
destinationPoints = np.array([[111.32402259, 227.03628723],
[ 98.6211586, 141.2919553 ],
[121.90974258, 75.66049135],
[171.66262654 , 17.4390314 ],
[249.99695448 , 2.61902341],
[328.33128241 , 17.4390314 ],
[379.14273837 , 60.84048336],
[411.95847035 ,127.53051931],
[407.72418235 ,220.68485523],
[138.84689456, 364.65064712],
[164.25262254, 404.87638309],
[207.65407451, 438.75068706],
[270.10982246, 458.86355504],
[336.79985841, 435.57497106],
[374.90845038, 397.46637909],
[395.02131836, 351.94778313],
[ 97.5625866 , 301.13632717],
[410.89989835, 288.43346318],
[153.66690255, 247.14915521],
[204.47835851, 214.33342324],
[258.46553047, 248.20772721],
[291.28126244, 250.32487121],
[330.44842641, 210.09913524],
[370.67416238, 240.79772322],
[182.24834653, 274.67202719],
[225.64979849, 276.78917119],
[313.51127442, 276.78917119],
[354.79558239, 271.49631119],
[218.2397945 , 328.65919915],
[200.24407052, 361.47493112],
[194.95121052, 390.0563751 ],
[328.33128241, 327.60062715],
[349.5027224 , 342.42063514],
[354.79558239, 362.53350312],
[229.88408649, 376.29493911],
[275.40268246, 367.82636312],
[323.03842242, 371.00207911],
[282.81268645, 398.52495109],
[ 0. , 0. ],
[512. , 512. ],
[512. , 0. ],
[ 0. , 512. ]])
'''
print(sourcePoints)
print(destinationPoints)
w = np.arange(0, 1.1, 0.1)
morphed_set = morph_tri(sourceImage, destImage, sourcePoints, destinationPoints, w, w)
res_list = []
k = 0
while k < morphed_set.shape[0]:
res_list.append(morphed_set[k, :, :, :])
k += 1
imageio.mimsave('./morph_1.gif', res_list)
|
[
"nikhilt@nikhils-MacBook-Pro.local"
] |
nikhilt@nikhils-MacBook-Pro.local
|
1c37e0a39fb706b42f03597283cf1a50dd03b413
|
f9888153e33dc29324d868ca7f1f0ec14b6b4bd4
|
/aireal/roof/view_results.py
|
e0d9c783b3c96475a711cee90d1d2ea8e98a11d0
|
[] |
no_license
|
gsvigruha/images
|
a029a51bf7bbcc5384ddb34c26e52eaf98261e04
|
6aac3deca36b09049f9f403ba438fdb7a98ee92e
|
refs/heads/master
| 2021-07-21T06:34:57.383519
| 2020-05-25T01:32:20
| 2020-05-25T01:32:20
| 173,651,473
| 0
| 0
| null | 2020-05-25T01:32:21
| 2019-03-04T01:18:23
|
Python
|
UTF-8
|
Python
| false
| false
| 3,163
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.image as mpimg
from matplotlib.pyplot import figure
import tensorflow as tf
from images.aireal.roof.image_loader import LABELS, TRAIN, BATCH_SIZE
def feature_iter_1(test_file_list, model_file):
sess = tf.Session('', tf.Graph())
with sess.graph.as_default():
train_images=[]
model = tf.keras.models.load_model(model_file)
for filename in test_file_list:
rgb_image = tf.read_file(TRAIN + filename + ".jpg")
rgb_image_decoded = tf.image.decode_jpeg(rgb_image, channels=3)
rgb_image_decoded = tf.to_float(rgb_image_decoded) / 255.0
cir_fn = tf.strings.regex_replace(filename, '2005050310033_78642723578549', '2005050310034_78642723578549_CIR')
cir_image = tf.read_file(TRAIN + cir_fn + ".jpg")
cir_image_decoded = tf.image.decode_jpeg(cir_image, channels=3)
cir_image_decoded = tf.to_float(cir_image_decoded) / 255.0
train_image_decoded = tf.concat([rgb_image_decoded, cir_image_decoded], axis=2)
input_tf = sess.run(tf.stack([train_image_decoded], axis=0))
train_images.append(model.predict(input_tf))
sess.close()
return np.squeeze(np.stack(train_images, axis=0))
def feature_iter_2(test_file_list, model_file, output_dirs):
sess = tf.Session('', tf.Graph())
with sess.graph.as_default():
train_images=[]
model = tf.keras.models.load_model(model_file)
for filename in test_file_list:
rgb_image = tf.read_file(TRAIN + filename + ".jpg")
rgb_image_decoded = tf.image.decode_jpeg(rgb_image, channels=3)
rgb_image_decoded = tf.to_float(rgb_image_decoded) / 255.0
cir_fn = tf.strings.regex_replace(filename, '2005050310033_78642723578549', '2005050310034_78642723578549_CIR')
cir_image = tf.read_file(TRAIN + cir_fn + ".jpg")
cir_image_decoded = tf.image.decode_jpeg(cir_image, channels=3)
cir_image_decoded = tf.to_float(cir_image_decoded) / 255.0
feature_tensors = [rgb_image_decoded, cir_image_decoded]
for output_dir in output_dirs:
prev_image = tf.read_file(TRAIN + output_dir + filename + "_roof_output.png")
prev_image_decoded = tf.image.decode_jpeg(prev_image, channels=1)
prev_image_decoded = tf.to_float(prev_image_decoded) / 255.0
feature_tensors.append(prev_image_decoded)
train_image_decoded = tf.concat(feature_tensors, axis=2)
input_tf = sess.run(tf.stack([train_image_decoded], axis=0))
train_images.append(model.predict(input_tf))
sess.close()
return np.squeeze(np.stack(train_images, axis=0))
def show(test_file_list, y):
f = figure(num=None, figsize=(16, 32), dpi=80, facecolor='w', edgecolor='k')
N = len(test_file_list)
for i in range(0, N):
name = test_file_list[i]
print(name)
f.add_subplot(N,3,i*3+1)
plt.imshow(np.squeeze(y[i]), cmap='gray', vmin=0, vmax=1)
img=mpimg.imread('/home/gsvigruha/aireal/Classification/'+name+'.jpg')
f.add_subplot(N,3,i*3+2)
plt.imshow(img)
img_s=mpimg.imread('/home/gsvigruha/aireal/Classification/'+name+'_shapes.png')
f.add_subplot(N,3,i*3+3)
plt.imshow(img_s)
plt.show()
|
[
"noreply@github.com"
] |
gsvigruha.noreply@github.com
|
07c1baec5353d639e78ede2a77b169e7d80091d1
|
a5dd79505b3e9d0c089f623d479aac75da47a426
|
/Scatterplot Matrix.py
|
84372143a36c5f711f8b684da4d0ebe8cc459554
|
[] |
no_license
|
BsRam07/Data-Visualization---Python
|
e0cbbaa32baeaea4697a9cfcf4393ece7a3c636c
|
d4978b7699589e54e4c8ca9147b2d4b979bad80e
|
refs/heads/master
| 2020-04-25T20:16:38.595310
| 2019-02-28T05:34:44
| 2019-02-28T05:34:44
| 173,047,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as stats
import seaborn as sns
sns.set()
df = sns.load_dataset("iris")
sns.pairplot(df, hue="species", size=2.5)
plt.show()
|
[
"noreply@github.com"
] |
BsRam07.noreply@github.com
|
9d503b337aa4b2aae5d968b8480f250c62f28706
|
4ac006cf216a2aac21dfdbf66db51b195066676f
|
/Proj5/tracking_sp16/bustersAgents.py
|
3482936d7e35ce0949627de786d0accfea16849a
|
[] |
no_license
|
Snedakerwalker1/cs188
|
af98c3549ee0dede3bc546f265f97966c65ac5cc
|
9b7662b03a0be57e1702d454472990ec0b4036fa
|
refs/heads/master
| 2020-06-12T02:44:06.673108
| 2019-07-23T19:21:17
| 2019-07-23T19:21:17
| 194,171,860
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,441
|
py
|
# bustersAgents.py
# ----------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import util
from game import Agent
from game import Directions
from keyboardAgents import KeyboardAgent
import inference
import busters
class NullGraphics:
"Placeholder for graphics"
def initialize(self, state, isBlue = False):
pass
def update(self, state):
pass
def pause(self):
pass
def draw(self, state):
pass
def updateDistributions(self, dist):
pass
def finish(self):
pass
class KeyboardInference(inference.InferenceModule):
"""
Basic inference module for use with the keyboard.
"""
def initializeUniformly(self, gameState):
"Begin with a uniform distribution over ghost positions."
self.beliefs = util.Counter()
for p in self.legalPositions: self.beliefs[p] = 1.0
self.beliefs.normalize()
def observeUpdate(self, observation, gameState):
noisyDistance = observation
pacmanPosition = gameState.getPacmanPosition()
allPossible = util.Counter()
for p in self.legalPositions:
trueDistance = util.manhattanDistance(p, pacmanPosition)
if noisyDistance != None and \
busters.getObservationProbability(noisyDistance, trueDistance) > 0:
allPossible[p] = 1.0
allPossible.normalize()
self.beliefs = allPossible
def elapseTime(self, gameState):
pass
def getBeliefDistribution(self):
return self.beliefs
class BustersAgent:
"An agent that tracks and displays its beliefs about ghost positions."
def __init__( self, index = 0, inference = "ExactInference", ghostAgents = None, observeEnable = True, elapseTimeEnable = True):
inferenceType = util.lookup(inference, globals())
self.inferenceModules = [inferenceType(a) for a in ghostAgents]
self.observeEnable = observeEnable
self.elapseTimeEnable = elapseTimeEnable
def registerInitialState(self, gameState):
"Initializes beliefs and inference modules"
import __main__
self.display = __main__._display
for inference in self.inferenceModules:
inference.initialize(gameState)
self.ghostBeliefs = [inf.getBeliefDistribution() for inf in self.inferenceModules]
self.firstMove = True
def observationFunction(self, gameState):
"Removes the ghost states from the gameState"
agents = gameState.data.agentStates
gameState.data.agentStates = [agents[0]] + [None for i in range(1, len(agents))]
return gameState
def getAction(self, gameState):
"Updates beliefs, then chooses an action based on updated beliefs."
for index, inf in enumerate(self.inferenceModules):
if not self.firstMove and self.elapseTimeEnable:
inf.elapseTime(gameState)
self.firstMove = False
if self.observeEnable:
inf.observe(gameState)
self.ghostBeliefs[index] = inf.getBeliefDistribution()
self.display.updateDistributions(self.ghostBeliefs)
return self.chooseAction(gameState)
def chooseAction(self, gameState):
"By default, a BustersAgent just stops. This should be overridden."
return Directions.STOP
class BustersKeyboardAgent(BustersAgent, KeyboardAgent):
"An agent controlled by the keyboard that displays beliefs about ghost positions."
def __init__(self, index = 0, inference = "KeyboardInference", ghostAgents = None):
KeyboardAgent.__init__(self, index)
BustersAgent.__init__(self, index, inference, ghostAgents)
def getAction(self, gameState):
return BustersAgent.getAction(self, gameState)
def chooseAction(self, gameState):
return KeyboardAgent.getAction(self, gameState)
from distanceCalculator import Distancer
from game import Actions
from game import Directions
class GreedyBustersAgent(BustersAgent):
"An agent that charges the closest ghost."
def registerInitialState(self, gameState):
"Pre-computes the distance between every two points."
BustersAgent.registerInitialState(self, gameState)
self.distancer = Distancer(gameState.data.layout, False)
def chooseAction(self, gameState):
"""
First computes the most likely position of each ghost that has
not yet been captured, then chooses an action that brings
Pacman closest to the closest ghost (according to mazeDistance!).
"""
pacmanPosition = gameState.getPacmanPosition()
legal = [a for a in gameState.getLegalPacmanActions()]
livingGhosts = gameState.getLivingGhosts()
livingGhostPositionDistributions = \
[beliefs for i, beliefs in enumerate(self.ghostBeliefs)
if livingGhosts[i+1]]
"*** YOUR CODE HERE ***"
"first lets find the most likely position of each of the remaining ghosts."
ghostpositions = []
#print livingGhostPositionDistributions
for distribution in livingGhostPositionDistributions:
pos = (0.0,0.0)
value = -float('inf')
for spot, prob in distribution.iteritems():
if prob >= value:
value = prob
pos = spot
#print probposition
ghostpositions.append(pos)
#print ghostpositions
"now use these positions to find the action that brings packman toward the closest ghost"
actionPair = (float('inf'), Directions.STOP)
for ghostPos in ghostpositions:
newactionPair = min((self.distancer.getDistance(ghostPos, Actions.getSuccessor(pacmanPosition,a)), a) for a in legal)
actionPair = min(actionPair, newactionPair)
return actionPair[1]
|
[
"wsnedaker@berkeley.edu"
] |
wsnedaker@berkeley.edu
|
72228f507a4ac8d98397a992ca802e652f3d5c8f
|
2207cf4fb992b0cb106e2daf5fc912f23d538d0d
|
/src/catalog/serializers.py
|
1e85a0316ce6f1e7fa4b866254126cb6dd9a095a
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
litedesk/litedesk-webserver-provision
|
95bc75f61532c5f1c7cb21fb5372ff288999689e
|
1576b9d3e5e2e64d1136d276767c2710cfb1938f
|
refs/heads/master
| 2021-05-15T01:35:31.984067
| 2020-08-18T10:55:20
| 2020-08-18T10:55:20
| 25,595,412
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,021
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014, Deutsche Telekom AG - Laboratories (T-Labs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rest_framework import serializers
import models
class OfferSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='offer-detail')
class Meta:
model = models.Offer
fields = ('url', 'name', 'currency', 'price', 'setup_price', 'status')
read_only_fields = ('name', 'asset', 'currency', )
|
[
"raphael@lullis.net"
] |
raphael@lullis.net
|
f382e321982d7046239348d2a2c1037f961777ea
|
012d619f174805e5aef31d3a51e5954542e5f270
|
/Dj030101/Dj030101/urls.py
|
81417811b7d92bc96bd871880e68821a0346eb75
|
[] |
no_license
|
thunderdrum/self-service-supermarket
|
8174a591bb91f5e67b86a51af56784b458788cf1
|
6e98511e37f09547f7f625626abd11fdedc41ab9
|
refs/heads/master
| 2022-04-01T21:43:21.988989
| 2020-01-23T03:05:57
| 2020-01-23T03:05:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,598
|
py
|
""" Dj030101 URL Configuration
The 'urlpatterns' list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from shop import views as shop_views
urlpatterns = [
path('admin/', admin.site.urls),
path('', shop_views.index, name='index'),
path('login/', shop_views.login, name='login'),
path('cashier/', shop_views.cashier, name='cashier'),
path('cashier/add_product/', shop_views.add_product, name="add_product"),
path('cashier/delete_product/', shop_views.delete_product, name="delete_product"),
path('cashier/get_return/', shop_views.get_return_money, name="get_return"),
path('cashier/cancel/', shop_views.cashier_cancel, name="cashier_cancel"),
path('cashier/submit/', shop_views.cashier_submit, name="cashier_submit"),
path('main/', shop_views.main, name='main'),
path('main/sales_query/', shop_views.sales_query, name='sales_query'),
path('main/salesdetail_query/', shop_views.salesdetail_query, name='salesdetail_query'),
]
|
[
"noreply@github.com"
] |
thunderdrum.noreply@github.com
|
4f0706689cac0ecb6adf7fe4ca0138f3f97c9ef1
|
7a77660d3205640c049cd2802954faaa819ce5b3
|
/Visualize.py
|
e329f25aede26bc2afcd8f91e1d8ea46ddc593b3
|
[] |
no_license
|
jagan-hazard/Convolutional-Neural-Network
|
b197c2e0277751a874a2bf00f752e5cd0e0faf31
|
7fb24b9a8578a69cbfa52f1a3cb896532265669b
|
refs/heads/master
| 2020-03-20T08:04:24.625616
| 2018-06-14T04:00:20
| 2018-06-14T04:00:20
| 137,298,241
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,490
|
py
|
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
from keras.models import model_from_json
import numpy as np
from keras.utils.np_utils import to_categorical
from keras.utils import plot_model
import matplotlib.pyplot as plt
import math
import cv2
import os
# dimensions of our images.
img_width, img_height = 150, 150
train_data_dir = '/home/jagan/Desktop/visualize/data/train'
validation_data_dir = '/home/jagan/Desktop/visualize/data/validation'
nb_train_samples = 3000
nb_validation_samples = 600
epochs = 1
batch_size = 150
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2 )
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
history=model.fit_generator(
train_generator,
epochs=epochs,
steps_per_epoch=nb_train_samples // batch_size,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
#history = model.fit(epochs, batch_size)
# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights('iter_20_16_cnn32_64_128_fc256.h5')
print("Saved model to disk")
#plot_model(model, to_file='model.png')
#print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
# summarize history for accuracy
x=history.history['acc']
y=history.history['val_acc']
#plt.plot(history.history['acc'])
#plt.plot(history.history['val_acc'])
plt.plot(x,'bs',y,'g^')
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
# summarize history for loss
a=history.history['loss']
b=history.history['val_loss']
#plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
plt.plot(a,'bs',b,'g^')
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
# Training with callbacks
model.summary()
model.get_config()
model.layers[0].get_config()
model.layers[0].input_shape
model.layers[0].output_shape
model.layers[0].get_weights()
np.shape(model.layers[0].get_weights()[0])
model.layers[0].trainable
#hist = model.fit(X_train, y_train, batch_size=16, nb_epoch=num_epoch, verbose=1, validation_data=(X_test, y_test),callbacks=callbacks_list)
test_image = cv2.imread('/home/jagan/Desktop/1.jpg') #provide the image we want to visualize at each layer
#test_image=cv2.cvtColor(test_image, cv2.COLOR_BGR2GRAY)
test_image=cv2.resize(test_image,(150,150))
test_image = np.array(test_image)
test_image = test_image.astype('float32')
test_image /= 255
print (test_image.shape)
test_image= np.expand_dims(test_image, axis=0)
#test_image=np.rollaxis(test_image,2,0)
print (test_image.shape)
# Predicting the test image
print((model.predict(test_image)))
print(model.predict_classes(test_image))
def get_featuremaps(model, layer_idx, test_image):
get_activations = K.function([model.layers[0].input, K.learning_phase()],[model.layers[layer_idx].output,])
activations = get_activations([test_image,0])
return activations
layer_num= #Enter the layer number (Zero Indexing)
filter_num= ##Enter the filter number (Zero Indexing)
activations = get_featuremaps(model, int(layer_num),test_image)
print (np.shape(activations))
feature_maps = activations[0][0]
print (np.shape(feature_maps))
#For all the filters
fig=plt.figure(figsize=(30,30))
plt.imshow(feature_maps[:,:,filter_num])#,cmap='gray'
plt.savefig("feature_maps:{}".format(layer_num))
#plt.savefig("featurmemaps-layer-{}".format(layer_num) + "-filternum-{}".format(filter_num)+'.jpg')
num_of_featuremaps=feature_maps.shape[2]
fig=plt.figure(figsize=(30,30))
plt.title("featuremaps-layer-{}".format(layer_num))
subplot_num=int(np.ceil(np.sqrt(num_of_featuremaps)))
c=int(num_of_featuremaps)
print (c)
for i in range(c):
ax = fig.add_subplot(subplot_num, subplot_num, i+1)
#ax.imshow(output_image[0,:,:,i],interpolation='nearest' ) #to see the first filter
ax.imshow(feature_maps[:,:,i])#,cmap='gray'
plt.xticks([])
plt.yticks([])
plt.tight_layout()
plt.show()
fig.savefig("featuremaps-layer-{}".format(layer_num) + '.jpg')
|
[
"noreply@github.com"
] |
jagan-hazard.noreply@github.com
|
807e0194c93653a7d59c4c00259d0f1ece84c914
|
43fd8d4a7395ec3f5ff85d06da045d8646775d51
|
/06 - Extração de Informação/main.py
|
dc5c5590d20b141da3d483c024cb6e566218a3fb
|
[] |
no_license
|
odvieira/db201901
|
0725136b819b0515185bbb90511fb8a2ea07123f
|
c42a7971c5add37265fdb3a1192f3a4d821d9835
|
refs/heads/master
| 2020-05-14T11:17:47.500534
| 2019-05-16T02:14:18
| 2019-05-16T02:14:18
| 181,775,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,857
|
py
|
#!/usr/bin/env python3
from psycopg2 import extras, connect
from os import system
from bs4 import BeautifulSoup
import requests, xml.etree.cElementTree as ET
import urllib.request, json, wikipedia
if __name__ == "__main__":
credentials = "dbname='1901EquipePGDR' user='1901EquipePGDR' host='200.134.10.32' password='793953'"
with connect(credentials) as connection:
with connection.cursor(cursor_factory=extras.DictCursor) as cur:
# HEADER PARA PESQUISA EM INGLES
headers = {"Accept-Language": "en-US, en;q=0.5"}
query_artistas = 'SELECT * FROM ArtistasMusicais'
cur.execute(query_artistas)
# ORGANIZANDO ARTISTAS EM LISTA
lista_artistas = []
for artista_musical in cur.fetchall():
if(artista_musical[0].isdigit()):
continue
lista_artistas.append(artista_musical[0])
counter = 1
total = len(lista_artistas)
# CRIANDO XML E ITERANDO ARTISTAS
xml_art = ET.Element("ArtistasMusicais")
for nome_artista in lista_artistas:
print('REALIZANDO BUSCA '+str(counter)+'/'+str(total)+' (ARTISTA): ' + nome_artista)
url = requests.get("https://en.wikipedia.org/wiki/"+nome_artista,headers=headers)
data = url.text
soup = BeautifulSoup(data,"lxml")
info_box_wiki = soup.findAll("table", {"class": "infobox"})
if len(info_box_wiki) < 1:
continue
# NOME ARTISTA/GRUPO
nome_banda = info_box_wiki[0].find("th").find(text=True)
itunes_link = ''
url_alt = "https://itunes.apple.com/search?term="+nome_banda.replace(' ','+')+"&entity=musicArtist"
try:
with urllib.request.urlopen(url_alt) as asd:
data = json.loads(asd.read().decode())
itunes_link = data['results'][0]['artistLinkUrl']
except:
itunes_link = ''
origem = ''
generos = ''
for text in info_box_wiki[0].findAll("tr"):
if len(text.findAll("td")) < 1:
continue
# CIDADE/PAIS DE ORIGEM
if 'Origin' in str(text):
cell_origin = text.findAll("td")
for cell in cell_origin[0].contents:
try:
origem += cell.find(text=True)
except:
origem += cell
# GENEROS MUSICAIS
if 'Genres' in str(text):
cell_genres = text.findAll("td")
for cell in cell_genres[0].findAll("a"):
if '[' and ']' in str(cell):
continue
try:
generos += cell.find(text=True)+','
except:
generos += cell+','
generos = generos[:-1]
system('clear')
counter+=1
ET.SubElement(xml_art, "Artista", uri=nome_artista,nome=nome_banda,
origem=origem,generos=generos,link_itunes=itunes_link)
tree = ET.ElementTree(xml_art)
tree.write("music.xml")
# FIM MUSICAS
# INICIO FILMES
query_filmes = 'SELECT * FROM Filme'
cur.execute(query_filmes)
# ORGANIZANDO FILMES EM LISTA
lista_filmes = []
for filme in cur.fetchall():
if(filme[0].isdigit()):
continue
lista_filmes.append(filme[0])
# CRIANDO XML E ITERANDO FILMES
xml_mov = ET.Element("Filmes")
counter = 1
total = len(lista_filmes)
for id_filme in lista_filmes:
print('REALIZANDO BUSCA '+str(counter)+'/'+str(total)+' (FILME): ' + id_filme)
# REQUEST IMDB
url = requests.get('https://www.imdb.com/title/'+id_filme,headers=headers)
data = url.text
soup = BeautifulSoup(data,"lxml")
info_imdb = soup.findAll("div", {"class": "title_wrapper"})
# NOME DO FILME
nome = info_imdb[0].findAll("h1")
nome = nome[0].find(text=True)
####################################
try:
wiki_page = wikipedia.search(nome)[0]
resumo_filme = wikipedia.page(title=wiki_page).summary
resumo_filme = resumo_filme.replace('\n',' ')
except:
resumo_filme = ''
####################################
# GENEROS E LANCAMENTO
gen_lan = info_imdb[0].findAll("a")
lista_generos = []
for link in gen_lan:
if "genres" in str(link):
lista_generos.append(link.find(text=True))
elif 'releaseinfo' in str(link):
data_lancamento = link.find(text=True)
# DIRETOR(ES)
info_diretor = soup.findAll("div", {"class": "credit_summary_item"})
link_diretor = info_diretor[0].findAll("a")
lista_diretores = []
for link in link_diretor:
if "name" in str(link):
lista_diretores.append(link.find(text=True))
genero = ''
for x in lista_generos:
genero = x+','
genero = genero[:-1]
diretor = ''
for x in lista_diretores:
diretor = x+','
diretor = diretor[:-1]
data_lancamento = data_lancamento.replace('(Brazil)','')
system('clear')
counter+=1
ET.SubElement(xml_mov, "Filme", uri=id_filme,nome=nome,resumo=resumo_filme,
diretor=diretor,generos=genero,data_lancamento=data_lancamento)
tree = ET.ElementTree(xml_mov)
tree.write("movie.xml")
|
[
"rpasserino75@gmail.com"
] |
rpasserino75@gmail.com
|
78a5d26431e1c5228b1257e275fe51fbf8ab3863
|
09f49c5dae6a3abe0e8fc15437539f93dd3844fc
|
/common/vp_mail/publish_notes.py
|
a457bebf4a684419b1e93db8b223bf0679e9116b
|
[] |
no_license
|
JAZimmermann/samples
|
aeacfdc8281862c299ec950a2621e57dadc3110a
|
969c90d764f1df266aeea7c47cac02ef93a3d43d
|
refs/heads/master
| 2021-01-11T08:07:21.672933
| 2016-09-22T05:03:57
| 2016-09-22T05:03:57
| 68,860,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,507
|
py
|
#
# Copyright (c) [2014] John Zimmermann
#
# $URL$
# $Date: 2014-09-04$
# $Revision: 1.0$
# $Author: johnz $
#
class PublishNotes(object):
'''
main class for gathering publish notes from user by providing
a prompt dialog based on what application they are in, ie. maya or mobu
'''
def __init__(self):
'''
initialize instance
'''
self._win_title = "Publish Notes"
self._win_msg = "Enter publish notes / updates."
self._application = None
self.notes = None
# attempt to determine current application
self._determine_application()
def _determine_application(self):
'''
attempt to determine what is current application before
proceeding to get notes input from user
'''
if not self._check_for_maya() and not self._check_for_mobu():
raise ImportError("Unable to ascertain / import current "
+ "application as Maya or MotionBuilder. "
+ "Make sure application is correct.")
if self._application.lower() == "maya":
self._get_maya_notes()
if self._application.lower() == "mobu":
self._get_mobu_notes()
def _check_for_maya(self):
'''
try to determine if current application is maya related
'''
found = False
try:
import maya.cmds as mc
self._application = mc.about(query=True, application=True)
found = True
except:
pass
return found
def _check_for_mobu(self):
'''
try to determine if current application is motionbuilder related
'''
found = False
try:
import re
from pyfbsdk import FBSystem
mobu_patt = re.compile("motionbuilder", re.IGNORECASE)
if mobu_patt.search(FBSystem().ApplicationPath):
self._application = "mobu"
found = True
except:
pass
return found
def _get_maya_notes(self):
'''
prompt for and retrieve publish notes from user in maya
'''
import maya.cmds as mc
confirm = mc.promptDialog(
title=self._win_title,
messageAlign="center",
message=self._win_msg,
button=["OK", "Cancel"],
defaultButton="OK",
cancelButton="Cancel",
dismissString="Cancel"
)
if confirm == "OK":
self.notes = mc.promptDialog(query=True, text=True)
def _get_mobu_notes(self):
'''
prompt for and retrieve publish notes from user in maya
'''
from pyfbsdk import FBMessageBoxGetUserValue, FBPopupInputType
cancelBtn = 0
confirm, notes = FBMessageBoxGetUserValue(self._win_title,
self._win_msg,
"",
FBPopupInputType.kFBPopupString,
"Ok", "Cancel", None,
1, cancelBtn)
print confirm, notes
if confirm == 1:
self.notes = notes
|
[
"john.zimm.zimmermann@gmail.com"
] |
john.zimm.zimmermann@gmail.com
|
bd5348b75265914180c074a798ffa40de4204954
|
126a8cc805589b9313e9a67b1f50eefb24b6370f
|
/arp_mon_win.py
|
2a19c4dcce8abe9d093b6e56d0cd45dd756f74a1
|
[] |
no_license
|
stevery/coco
|
73bb3a8bc2e4ccfd1c924cb289fefeadf86720a5
|
9341f4d86faa6fd2abbbe346e4ca04ce2c973b55
|
refs/heads/master
| 2021-08-16T09:00:31.449415
| 2017-11-19T13:21:16
| 2017-11-19T13:21:16
| 110,894,608
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,057
|
py
|
#-*- encoding:utf8 -*-
import subprocess
import re
from time import sleep
while True:
arp_r = subprocess.check_output(["arp","-a"],
stderr=subprocess.STDOUT,
shell=True)
my_arp = {}
a = arp_r.split('\r\n\r\n')
for i in a:
tmp_list = i.strip().split('\r\n')
if re.search("\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", tmp_list[0]):
tmp_interface = re.search("\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", tmp_list[0]).group()
my_arp[tmp_interface] = {}
for j in tmp_list[2:]:
try:
tmp_body = j.strip().split()
if tmp_body[1] in my_arp[tmp_interface]:
print 'arp spoofing found {}:{}'.format(tmp_body[0],tmp_body[1])
print my_arp[tmp_interface][tmp_body[1]]
else:
my_arp[tmp_interface].update({tmp_body[1]:tmp_body[0]})
except:
pass
sleep(30)
|
[
"triptokyw@gmail.com"
] |
triptokyw@gmail.com
|
424a153fb67403733012e88be8f95f8f6783f4bc
|
4872375eeb0b2a45c0d3046bbfb5cd2d202b2295
|
/quiz.py
|
e89862b18df06fdbf7fe361b76ea3a36a1613f18
|
[
"MIT"
] |
permissive
|
ash018/discordQuizBot
|
ee3aae7171220f39bd9a0bb057c2fa5eab017dd5
|
b00441553bbbeeab2c4da0264eeed8480a33c3a1
|
refs/heads/master
| 2020-04-13T09:00:18.410702
| 2018-12-25T17:05:28
| 2018-12-25T17:05:28
| 163,098,967
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,308
|
py
|
# -*- coding: utf-8 -*-
"""
Quiz / Question classes for quizbot.
@author: drkatnz
"""
import asyncio
import random
import re
import os
#todo: probably need to remove punctuation from answers
class Quiz:
def __init__(self, client, win_limit=10, hint_time=30):
#initialises the quiz
self.__running = False
self.current_question = None
self._win_limit = win_limit
self._hint_time = hint_time
self._questions = []
self._asked = []
self.scores = {}
self._client = client
self._quiz_channel = None
self._cancel_callback = True
#load in some questions
datafiles = os.listdir('quizdata')
for df in datafiles:
filepath = 'quizdata' + os.path.sep + df
self._load_questions(filepath)
print('Loaded: ' + filepath)
print('Quiz data loading complete.\n')
def _load_questions(self, question_file):
# loads in the questions for the quiz
with open(question_file, encoding='utf-8',errors='replace') as qfile:
lines = qfile.readlines()
question = None
category = None
answer = None
regex = None
position = 0
while position < len(lines):
if lines[position].strip().startswith('#'):
#skip
position += 1
continue
if lines[position].strip() == '': #blank line
#add question
if question is not None and answer is not None:
q = Question(question=question, answer=answer,
category=category, regex=regex)
self._questions.append(q)
#reset everything
question = None
category = None
answer = None
regex = None
position += 1
continue
if lines[position].strip().lower().startswith('category'):
category = lines[position].strip()[lines[position].find(':') + 1:].strip()
elif lines[position].strip().lower().startswith('question'):
question = lines[position].strip()[lines[position].find(':') + 1:].strip()
elif lines[position].strip().lower().startswith('answer'):
answer = lines[position].strip()[lines[position].find(':') + 1:].strip()
elif lines[position].strip().lower().startswith('regexp'):
regex = lines[position].strip()[lines[position].find(':') + 1:].strip()
#else ignore
position += 1
def started(self):
#finds out whether a quiz is running
return self.__running
def question_in_progress(self):
#finds out whether a question is currently in progress
return self.__current_question is not None
async def _hint(self, hint_question, hint_number):
#offers a hint to the user
if self.__running and self.current_question is not None:
await asyncio.sleep(self._hint_time)
if (self.current_question == hint_question
and self._cancel_callback == False):
if (hint_number >= 5):
await self.next_question(self._channel)
hint = self.current_question.get_hint(hint_number)
await self._client.send_message(self._channel, 'Hint {}: {}'.format(hint_number, hint), tts=True)
if hint_number < 5:
await self._hint(hint_question, hint_number + 1)
async def start(self, channel):
#starts the quiz in the given channel.
if self.__running:
#don't start again
await self._client.send_message(channel,
'Quiz already started in channel {}, you can stop it with !stop or !halt'.format(self._channel.name), tts=True)
else:
await self.reset()
self._channel = channel
await self._client.send_message(self._channel, '@here Quiz starting in 10 seconds...', tts=True)
await asyncio.sleep(10)
self.__running = True
await self.ask_question()
async def reset(self):
if self.__running:
#stop
await self.stop()
#reset the scores
self.current_question = None
self._cancel_callback = True
self.__running = False
self._questions.append(self._asked)
self._asked = []
self.scores = {}
async def stop(self):
#stops the quiz from running
if self.__running:
#print results
#stop quiz
await self._client.send_message(self._channel, 'Quiz stopping.', tts=True)
if(self.current_question is not None):
await self._client.send_message(self._channel,
'The answer to the current question is: {}'.format(self.current_question.get_answer()), tts=True)
await self.print_scores()
self.current_question = None
self._cancel_callback = True
self.__running = False
else:
await self._client.send_message(self._channel, 'No quiz running, start one with !ask or !quiz', tts=True)
async def ask_question(self):
#asks a question in the quiz
if self.__running:
#grab a random question
qpos = random.randint(0,len(self._questions) - 1)
self.current_question = self._questions[qpos]
self._questions.remove(self.current_question)
self._asked.append(self.current_question)
await self._client.send_message(self._channel,
'Question {}: {}'.format(len(self._asked), self.current_question.ask_question()), tts=True)
self._cancel_callback = False
await self._hint(self.current_question, 1)
async def next_question(self, channel):
#moves to the next question
if self.__running:
if channel == self._channel:
await self._client.send_message(self._channel,
'Moving onto next question. The answer I was looking for was: {}'.format(self.current_question.get_answer()), tts=True)
self.current_question = None
self._cancel_callback = True
await self.ask_question()
async def answer_question(self, message):
#checks the answer to a question
if self.__running and self.current_question is not None:
if message.channel != self._channel:
pass
if self.current_question.answer_correct(message.content):
#record success
self._cancel_callback = True
if message.author.name in self.scores:
self.scores[message.author.name] += 1
else:
self.scores[message.author.name] = 1
await self._client.send_message(self._channel,
'Well done, {}, the correct answer was: {}'.format(message.author.name, self.current_question.get_answer()), tts=True)
self.current_question = None
#check win
if self.scores[message.author.name] == self._win_limit:
await self.print_scores()
await self._client.send_message(self._channel, '{} has won! Congratulations.'.format(message.author.name), tts=True)
self._questions.append(self._asked)
self._asked = []
self.__running = False
#print totals?
elif len(self._asked) % 5 == 0:
await self.print_scores()
await self.ask_question()
async def print_scores(self):
#prints out a table of scores.
if self.__running:
await self._client.send_message(self._channel,'Current quiz results:', tts=True)
else:
await self._client.send_message(self._channel,'Most recent quiz results:', tts=True)
highest = 0
for name in self.scores:
await self._client.send_message(self._channel,'{}:\t{}'.format(name,self.scores[name]), tts=True)
if self.scores[name] > highest:
highest = self.scores[name]
if len(self.scores) == 0:
await self._client.send_message(self._channel,'No results to display.', tts=True)
leaders = []
for name in self.scores:
if self.scores[name] == highest:
leaders.append(name)
if len(leaders) > 0:
if len(leaders) == 1:
await self._client.send_message(self._channel,'Current leader: {}'.format(leaders[0]), tts=True)
else:
await self._client.send_message(self._channel,'Print leaders: {}'.format(leaders), tts=True)
class Question:
# A question in a quiz
def __init__(self, question, answer, category=None, author=None, regex=None):
self.question = question
self.answer = answer
self.author = author
self.regex = regex
self.category = category
self._hints = 0
def ask_question(self):
# gets a pretty formatted version of the question.
question_text = ''
if self.category is not None:
question_text+='({}) '.format(self.category)
else:
question_text+='(General) '
if self.author is not None:
question_text+='Posed by {}. '.format(self.author)
question_text += self.question
return question_text
def answer_correct(self, answer):
#checks if an answer is correct or not.
#should check regex
if self.regex is not None:
match = re.fullmatch(self.regex.strip(),answer.strip())
return match is not None
#else just string match
return answer.lower().strip() == self.answer.lower().strip()
def get_hint(self, hint_number):
# gets a formatted hint for the question
hint = []
for i in range(len(self.answer)):
if i % 5 < hint_number:
hint = hint + list(self.answer[i])
else:
if self.answer[i] == ' ':
hint += ' '
else:
hint += '-'
return ''.join(hint)
def get_answer(self):
# gets the expected answer
return self.answer
|
[
"sadatakash018@gmail.com"
] |
sadatakash018@gmail.com
|
79d8384fe316eec7a4be279f68c1b2184e764229
|
757b2b6bfadc89c13aff30575445dc210529a384
|
/src/optimize_nn.py
|
48415572591084666117a31a386dfe9e90c7967a
|
[
"BSD-3-Clause"
] |
permissive
|
pgniewko/Protein-Secondary-Structure-Prediction
|
15d5b204787604f4a5dd0764d606a24649d5c9e2
|
5fdc58d7b9d59e314f873eb7784b16b5539d2df9
|
refs/heads/master
| 2018-09-08T00:28:08.313884
| 2018-06-04T21:30:55
| 2018-06-04T21:30:55
| 109,162,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,595
|
py
|
#! /usr/bin/env python
# BioE 134, Fall 2017
# Author: Pawel Gniewek (pawel.gniewek@berkeley.edu)
# License: BSD
#
# Point to an input file (../data/db/aa_w5_a3.dat), and sec.str. classes file (../data/db/ss_a3.dat)
# Usage: ./optimize_clf.py ../data/db/aa_w5_a3.dat ../data/db/ss_a3.dat
from __future__ import print_function
import sys
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.model_selection import cross_val_score
from utils import svd_pca, cross_decomp
if __name__ == "__main__":
# Read the data
X = np.loadtxt(sys.argv[1])
Y = np.loadtxt(sys.argv[2])
# X = cross_decomp(X, Y, 12)
X = svd_pca(X, 50)
clf = MLPClassifier(activation='logistic',solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(25,11,3))
scores = cross_val_score(clf, X, Y, cv=5, scoring='accuracy')
print("Accuracy: %0.2f (+/- %0.2f) [ %s ]" % (scores.mean(), scores.std(), "MLPClassifier: CV=5"))
# Set the parameters by cross-validation
parameters = [ {'activation':['logistic','relu','tanh'],\
'solver':['lbfgs','sgd','adam'], \
'learning_rate':['constant','adaptive'],\
'hidden_layer_sizes':[(100,),(50,50),(50,25,3),(25,11,5)] } ]
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.5, random_state=0)
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV( MLPClassifier() , parameters,\
cv=2, scoring='%s_macro' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
|
[
"gniewko.pablo@gmail.com"
] |
gniewko.pablo@gmail.com
|
36dc2128246ab9955f721cf3b4751c1493ded948
|
17c90beebbe2551255eacd009e0033f738d265f0
|
/python/02-Linux_loop_update/full-app-bootload.py
|
ac2b7454de4b926c7dc551cdd65f9435cda9c36a
|
[] |
no_license
|
xiangliangliang/python
|
533e766da1551f37695b44df8bbbddd807ede63c
|
8519c53582ddb82ec106d96b2a47e61258498825
|
refs/heads/master
| 2020-04-17T20:41:27.536570
| 2019-02-12T07:45:49
| 2019-02-12T07:45:49
| 166,916,742
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,401
|
py
|
'''
为确保程序运行,请执行以下操作
1. 使Linux的串口放在第一个tab
2. 使升级设备处于第二个tab
3. 使电源串口处于第三个tab
'''
# $language = "python"
# $interface = "1.0"
import re
import time
import datetime
import string
import random
filename = re.sub(r'[^0-9]', '_', str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")), 0)
file_path = crt.Dialog.FileOpenDialog(title='Please select a text file', defaultFilename=filename+'_log.log',filter = 'Log Files (*.log)|*.log')
def main():
upgrade_SW = ['app_16.bin','app_17.bin','app_18.bin','app_19.bin','app_20.bin','app_22.bin','app_23.bin','app_24.bin','app_25.bin','app_26.bin']
upgrade_FULL_SW = ['app_full_16.bin','app_full_17.bin','app_full_18.bin','app_full_19.bin','app_full_20.bin','app_full_22.bin','app_full_23.bin','app_full_24.bin','app_full_25.bin','app_full_26.bin']
upgrade_boot=['boot_8M.bin','boot_8M.bin']
check_version = ['00.01.24','00.01.24','00.01.24','00.01.24','00.01.24','00.01.24','00.01.24','00.01.24','00.01.24']
#check_version = ['00.01.16','00.01.17','00.01.18','00.01.19','00.01.20','00.01.21','00.01.22','00.01.23','00.01.24']
crt.Screen.Synchronous = False
i=0
boot_count=0
while 1:
initialTab = crt.GetScriptTab()
tab_1 = crt.GetTab(1)
tab_1.Activate()
tab_1.Screen.Send('\r\n')
#tab_1.Screen.Send("./sample_upgrade "+sw+ '\r\n') # --------------改改改
if (boot_count%2) == 0:
tab_1.Screen.Send("boot_8M.bin" + '\r\n') # 升级8M BootLoader
boot_count = boot_count +1
filep = open(file_path, 'a+')
filep.write(str(i)+' '+str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))+' '+check_version[i]+' PASS'+'\r\n')
i = i+1
else:
tab_1.Screen.Send("boot_4M.bin" + '\r\n')
filep = open(file_path, 'a+')
filep.write(str(i)+' '+str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))+' '+check_version[i]+' PASS'+'\r\n')
i = i+1
time.sleep(4)
tab_3 = crt.GetTab(3)
tab_3.Activate()
time.sleep(2)
tab_3.Screen.Send('\r\n')
tab_3.Screen.Send('\r\n')
on = str.upper('out')+'1'
off = str.upper('out')+'0'
tab_3.Screen.Send(off +'\r\n\r')
time.sleep(5)
tab_3.Screen.Send(on +'\r\n\r')
for i in range(8) : #-----------随机升级app8次
#升级app
tab_1 = crt.GetTab(1)
tab_1.Activate()
tab_1.Screen.Send('\r\n')
time.sleep(2)
#tab_1.Screen.Send("./sample_upgrade "+sw+ '\r\n') # --------------改改改
a = random.randint(0,10)
tab_1.Screen.Send(upgrade_SW[a]+ '\r\n') # --------------升级app
time.sleep(2) # --------------改改改
#重启平台,直流源的串口必须放在第三个tab
tab_3 = crt.GetTab(3)
tab_3.Activate()
time.sleep(2)
tab_3.Screen.Send('\r\n')
tab_3.Screen.Send('\r\n')
on = str.upper('out')+'1'
off = str.upper('out')+'0'
tab_3.Screen.Send(off +'\r\n\r')
time.sleep(5)
tab_3.Screen.Send(on +'\r\n\r')
#打开C201-D串口,串口必须放在第二个
tab_2 = crt.GetTab(2)
tab_2.Activate()
time.sleep(5)
tab_2.Screen.Send('\r\n')
tab_2.Screen.Send('getVersion'+'\r\n')
version_result = tab_2.Screen.WaitForString('command_getVersion',5)
if version_result == 1:
current_sw = tab_2.Screen.ReadString('CPU0').strip()
current_sw = current_sw[:8]
#crt.Dialog.MessageBox(current_sw)
time.sleep(2)
#crt.Dialog.MessageBox(check_version)
else:
crt.Dialog.MessageBox("版本升级失败,请终止升级","session",32|3)
time.sleep(1)
break
return
WEnd
if (current_sw == check_version[a]):
filep = open(file_path, 'a+')
filep.write(str(i)+' '+str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))+' '+check_version[i]+' PASS'+'\r\n')
i = i+1
else:
filep = open(file_path, 'a+')
filep.write(str(i)+' '+str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))+' '+check_version[i]+' Fail'+'\r\n')
i = i+1
crt.Dialog.MessageBox("版本升级失败,请终止升级","session",32|3)
time.sleep(1)
break
return
WEnd
tab_1 = crt.GetTab(1)
tab_1.Activate()
tab_1.Screen.Send('\r\n')
#tab_1.Screen.Send("./sample_upgrade "+sw+ '\r\n') # --------------改改改
tab_1.Screen.Send("app_25.bin" + '\r\n') # 升级app_25.bin
time.sleep(2) # ------改改改
tab_3 = crt.GetTab(3)
tab_3.Activate()
time.sleep(2)
tab_3.Screen.Send('\r\n')
tab_3.Screen.Send('\r\n')
on = str.upper('out')+'1'
off = str.upper('out')+'0'
tab_3.Screen.Send(off +'\r\n\r')
time.sleep(5)
tab_3.Screen.Send(on +'\r\n\r')
tab_2 = crt.GetTab(2)
tab_2.Activate()
time.sleep(5)
tab_2.Screen.Send('\r\n')
tab_2.Screen.Send('getVersion'+'\r\n')
version_result = tab_2.Screen.WaitForString('command_getVersion',5)
if version_result == 1:
current_sw = tab_2.Screen.ReadString('CPU0').strip()
current_sw = current_sw[:8]
#crt.Dialog.MessageBox(current_sw)
time.sleep(2)
#crt.Dialog.MessageBox(check_version)
else:
crt.Dialog.MessageBox("版本升级失败,请终止升级","session",32|3)
time.sleep(1)
break
return
WEnd
if (current_sw == '00.01.25'):
filep = open(file_path, 'a+')
filep.write(str(i)+' '+str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))+' '+'00.01.25 app'+' PASS'+'\r\n')
i = i+1
else:
filep = open(file_path, 'a+')
filep.write(str(i)+' '+str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))+' '+'00.01.25 app'+' Fail'+'\r\n')
i = i+1
crt.Dialog.MessageBox("版本升级失败,请终止升级","session",32|3)
time.sleep(1)
break
return
tab_1 = crt.GetTab(1)
tab_1.Activate()
tab_1.Screen.Send('\r\n')
#tab_1.Screen.Send("./sample_upgrade "+sw+ '\r\n') # --------------改改改
ij=random.randint(0.10)
tab_1.Screen.Send(upgrade_FULL_SW[ij] + '\r\n') # 升级随机一个full image.bin
time.sleep(2) # --------------改改改
tab_3 = crt.GetTab(3)
tab_3.Activate()
time.sleep(2)
tab_3.Screen.Send('\r\n')
tab_3.Screen.Send('\r\n')
on = str.upper('out')+'1'
off = str.upper('out')+'0'
tab_3.Screen.Send(off +'\r\n\r')
time.sleep(5)
tab_3.Screen.Send(on +'\r\n\r')
tab_2 = crt.GetTab(2)
tab_2.Activate()
time.sleep(5)
tab_2.Screen.Send('\r\n')
tab_2.Screen.Send('getVersion'+'\r\n')
version_result = tab_2.Screen.WaitForString('command_getVersion',5)
if version_result == 1:
current_sw = tab_2.Screen.ReadString('CPU0').strip()
current_sw = current_sw[:8]
#crt.Dialog.MessageBox(current_sw)
time.sleep(2)
#crt.Dialog.MessageBox(check_version)
else:
crt.Dialog.MessageBox("版本升级失败,请终止升级","session",32|3)
time.sleep(1)
break
return
WEnd
if (current_sw == check_version[ij]):
filep = open(file_path, 'a+')
filep.write(str(i)+' '+str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))+' '+check_version[ij]+' PASS'+'\r\n')
i = i+1
else:
filep = open(file_path, 'a+')
filep.write(str(i)+' '+str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))+' '+check_version[ij]+' Fail'+'\r\n')
i = i+1
crt.Dialog.MessageBox("版本升级失败,请终止升级","session",32|3)
time.sleep(1)
break
return
main()
|
[
"284604666@qq.com"
] |
284604666@qq.com
|
7d39d82e21a62f66317e371a926cb296d6850163
|
687684850a677f9cfd40077d036a9e25abb6ed51
|
/pms/core/migrations/0001_initial.py
|
4261d4b42f9d61542c04c42383f3c42fb9323791
|
[] |
no_license
|
PhoenixCSCD/pms_backend
|
0f0889a70f58c477d5dc2ee8feec2c0d348e6276
|
bbf08354a0cc7d98f63408be460ae0522dedf96e
|
refs/heads/production
| 2022-12-11T01:08:41.065726
| 2020-07-23T10:25:01
| 2020-07-23T10:25:01
| 244,187,846
| 0
| 1
| null | 2022-12-08T10:57:17
| 2020-03-01T16:56:03
|
Python
|
UTF-8
|
Python
| false
| false
| 3,114
|
py
|
# Generated by Django 3.0.8 on 2020-07-18 10:37
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='Allergy',
fields=[
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Branch',
fields=[
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Drug',
fields=[
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('name', models.CharField(max_length=250)),
('selling_price', models.DecimalField(decimal_places=2, max_digits=19)),
('cost_price_per_pack', models.DecimalField(decimal_places=2, max_digits=19)),
('quantity_per_pack', models.IntegerField(default=1)),
],
),
migrations.CreateModel(
name='User',
fields=[
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('email', models.CharField(max_length=50, unique=True)),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1)),
('date_of_birth', models.DateField()),
('phone_number', models.CharField(max_length=100)),
('is_staff', models.BooleanField(default=False)),
('avatar', models.URLField(null=True)),
('branches', models.ManyToManyField(related_name='users', to='core.Branch')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
[
"developer.akabojohnkennedy@gmail.com"
] |
developer.akabojohnkennedy@gmail.com
|
c100093dfec1709874cac7a3d2d395b9a4d1626c
|
b5bc88b6fc90a9910387ae32e4152859eae57116
|
/src/lib/trains/base_trainer.py
|
bcedf49fb5c5b7a1a918dc83bbaac8e7261c850c
|
[
"MIT"
] |
permissive
|
Frankhe303/GGNet
|
f0344f005bbb9cfa869d62980751df8ad9789ba4
|
5fd113711960200929b979724f2d9b5647b4719e
|
refs/heads/main
| 2023-04-13T23:53:07.135492
| 2021-04-16T01:38:54
| 2021-04-16T01:38:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,008
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import torch
from progress.bar import Bar
from models.data_parallel import DataParallel
from utils.utils import AverageMeter
class ModleWithLoss(torch.nn.Module):
def __init__(self, model, loss,opt):
super(ModleWithLoss, self).__init__()
self.model = model
self.loss = loss
self.opt = opt
def forward(self, batch):
outputs = self.model(batch['input'])
loss, loss_stats = self.loss(outputs, batch)
return outputs[-1], loss, loss_stats
class BaseTrainer(object):
def __init__(
self, opt, model, optimizer=None):
self.opt = opt
self.optimizer = optimizer
self.loss_stats, self.loss = self._get_losses(opt)
self.model_with_loss = ModleWithLoss(model, self.loss, self.opt)
def set_device(self, gpus, chunk_sizes, device):
if len(gpus) > 1:
self.model_with_loss = DataParallel(
self.model_with_loss, device_ids=gpus,
chunk_sizes=chunk_sizes).to(device)
else:
self.model_with_loss = self.model_with_loss.to(device)
for state in self.optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(device=device, non_blocking=True)
def run_epoch(self, phase, epoch, data_loader):
model_with_loss = self.model_with_loss
if phase == 'train':
model_with_loss.train()
else:
if len(self.opt.gpus) > 1:
model_with_loss = self.model_with_loss.module
model_with_loss.eval()
torch.cuda.empty_cache()
opt = self.opt
results = {}
data_time, batch_time = AverageMeter(), AverageMeter()
avg_loss_stats = {l: AverageMeter() for l in self.loss_stats}
num_iters = len(data_loader) if opt.num_iters < 0 else opt.num_iters
bar = Bar('{}/{}'.format(opt.task, opt.exp_id), max=num_iters)
end = time.time()
for iter_id, batch in enumerate(data_loader):
if iter_id >= num_iters:
break
data_time.update(time.time() - end)
for k in batch:
if k != 'meta':
batch[k] = batch[k].to(device=opt.device, non_blocking=True)
output, loss, loss_stats = model_with_loss(batch)
loss = loss.mean()
if phase == 'train':
self.optimizer.zero_grad()
loss.backward()
for key, value in model_with_loss.named_parameters():
if value.grad is None:
print(key)
assert False
elif (value.grad == 0).all():
print(key, "none")
self.optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
Bar.suffix = '{phase}: [{0}][{1}/{2}]|Tot: {total:} |ETA: {eta:} '.format(
epoch, iter_id, num_iters, phase=phase,
total=bar.elapsed_td, eta=bar.eta_td)
for l in avg_loss_stats:
avg_loss_stats[l].update(
loss_stats[l].mean().item(), batch['input'].size(0))
Bar.suffix = Bar.suffix + '|{} {:.4f} '.format(l, avg_loss_stats[l].avg)
if not opt.hide_data_time:
Bar.suffix = Bar.suffix + '|Data {dt.val:.3f}s({dt.avg:.3f}s) ' \
'|Net {bt.avg:.3f}s'.format(dt=data_time, bt=batch_time)
if opt.print_iter > 0:
if iter_id % opt.print_iter == 0:
print('{}/{}| {}'.format(opt.task, opt.exp_id, Bar.suffix))
else:
bar.next()
if opt.test:
self.save_result(output, batch, results)
del output, loss, loss_stats
bar.finish()
ret = {k: v.avg for k, v in avg_loss_stats.items()}
ret['time'] = bar.elapsed_td.total_seconds() / 60.
return ret, results
def save_result(self, output, batch, results):
raise NotImplementedError
def _get_losses(self, opt):
raise NotImplementedError
def val(self, epoch, data_loader):
return self.run_epoch('val', epoch, data_loader)
def train(self, epoch, data_loader):
return self.run_epoch('train', epoch, data_loader)
|
[
"973162258@qq.com"
] |
973162258@qq.com
|
d26c162667d757caa31ccc5aa285f4f67e0e43f7
|
597e9361ba5508e1252a595791c7739ab7b7bf6d
|
/venv/Scripts/pip-script.py
|
550cd4c7602fded3c3fff7c55bfebf6c8340020c
|
[] |
no_license
|
Shivani-781/Sorting_Algorithms_Python_Implementation
|
ec1d2598322665f94a059a3ac8bc2074db814a56
|
ddaa2b47b6ed7fe9e65e259d47fa3a73d5f255ad
|
refs/heads/master
| 2022-12-06T14:41:42.282435
| 2020-08-29T18:58:34
| 2020-08-29T18:58:34
| 291,290,202
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
#!"C:\Users\Shivani Chauhan\PycharmProjects\Sorting_Algorithms_Python_Implementation\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
|
[
"shivanichauhan781@gmail.com"
] |
shivanichauhan781@gmail.com
|
327169a1cb6be4099ccb7f13fab70dfa92f4742e
|
7deda84f7a280f5a0ee69b98c6a6e7a2225dab24
|
/Receptionist/migrations/0027_package_manage_reception.py
|
45248c462110a952feffbb09a7008787a2c97129
|
[] |
no_license
|
Cornex-Inc/Coffee
|
476e30f29412373fb847b2d518331e6c6b9fdbbf
|
fcd86f20152e2b0905f223ff0e40b1881db634cf
|
refs/heads/master
| 2023-01-13T01:56:52.755527
| 2020-06-08T02:59:18
| 2020-06-08T02:59:18
| 240,187,025
| 0
| 0
| null | 2023-01-05T23:58:52
| 2020-02-13T05:47:41
|
Python
|
UTF-8
|
Python
| false
| false
| 549
|
py
|
# Generated by Django 2.1.15 on 2020-05-19 15:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Receptionist', '0026_package_manage_grouping'),
]
operations = [
migrations.AddField(
model_name='package_manage',
name='reception',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.DO_NOTHING, to='Receptionist.Reception'),
preserve_default=False,
),
]
|
[
"khm4321@naver.com"
] |
khm4321@naver.com
|
845f77bc8d39737647f4a55d183df4f8f7afdbf3
|
43aeee48c1f6fc468a43f9bb0d4edae8ee0dbee1
|
/LPTW-SRC/例3_21.py
|
8430bd36f542e524ac1f1798a936dc9eba351ed6
|
[] |
no_license
|
wiky2/mytestproject
|
f694cf71dd3031e4597086f3bc90d246c4b26298
|
e7b79df6304476d76e87f9e8a262f304b30ca312
|
refs/heads/master
| 2021-09-07T20:54:19.569970
| 2018-02-28T23:39:00
| 2018-02-28T23:39:00
| 100,296,844
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,333
|
py
|
#这个循环用来保证必须输入大于2的整数作为评委人数
while True:
try:
n = int(input('请输入评委人数:'))
if n <= 2:
print('评委人数太少,必须多于2个人。')
else:
#如果输入大于2的整数,就结束循环
break
except:
Pass
#用来保存所有评委的打分
scores = []
for i in range(n):
#这个while循环用来保证用户必须输入0到100之间的数字
while True:
try:
score = input('请输入第{0}个评委的分数:'.format(i+1))
#把字符串转换为实数
score = float(score)
#用来保证输入的数字在0到100之间
assert 0<=score<=100
scores.append(score)
#如果数据合法,跳出while循环,继续输入下一个评委的得分
break
except:
print('分数错误')
#计算并删除最高分与最低分
highest = max(scores)
lowest = min(scores)
scores.remove(highest)
scores.remove(lowest)
#计算平均分,保留2位小数
finalScore = round(sum(scores)/len(scores), 2)
formatter = '去掉一个最高分{0}\n去掉一个最低分{1}\n最后得分{2}'
print(formatter.format(highest, lowest, finalScore))
|
[
"jerry_136510@aliyun.com"
] |
jerry_136510@aliyun.com
|
a734b3373cd26121facac575215b185ffb3f1f82
|
caa70852a42cc70ef81573539da1f3efedc9d0e5
|
/venv/bin/easy_install
|
94a82250d36e04e5e6e7dafdd4e3c20e0b4119cd
|
[] |
no_license
|
nejelnejel/bubble_sort_2
|
62aa4977f606ae2d6055c4cad9853393d415c93a
|
7fa37e55aa1ce7dd9b422688ad6a3b2b87283ed3
|
refs/heads/master
| 2020-11-26T12:01:03.276878
| 2019-12-19T14:38:08
| 2019-12-19T14:38:08
| 229,065,696
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
#!/home/rent/PycharmProjects/sphinx/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
|
[
"nejelnejel@gmail.com"
] |
nejelnejel@gmail.com
|
|
fce8102b6fabca507bec27dbceb55dbf1eaf4c0c
|
87e80e3e91dbb23b857b8fd44427bb03e3a0be29
|
/API/decision.py
|
38eb0adc8f8a33d738917e1901d6ca2c0503017e
|
[
"Unlicense"
] |
permissive
|
ClementRoyer/TwitchAFK-API
|
fc68da62bd6393d155a4ff6523f886636db0aec5
|
92164d776930bd6c8e371a5d2b8ef7fe07b76ea9
|
refs/heads/master
| 2023-01-22T09:00:07.838758
| 2020-12-07T22:01:59
| 2020-12-07T22:01:59
| 318,622,586
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
import time
def betStrategy(driver, bet):
output = {
"choice": "",
"amount": 0
}
output['choice'] = "A" if bet.coteA > bet.coteB else "B" # Take the most advantageous odds
output['amount'] = min(round(int("".join(bet.amount.split())) * 0.05), 250000) # Caped to 250 000, bet 5% of the balance
return output
|
[
"clement.royer@epitech.eu"
] |
clement.royer@epitech.eu
|
8c9c989a540b08b69f3d5efe25cb428555c4f3ac
|
f756d72da9a7a8b05399d7982ad83ab71170e3ce
|
/test_scraping.py
|
04dcbb7f21c523a5a0aa983b920849402b7cdba3
|
[] |
no_license
|
polmuz/GoGetWeb
|
c0fddda946d3950fc606af3b1bd0148f88589723
|
36a369c648e61c953437a2d7ee1a8017d7bb5636
|
refs/heads/master
| 2021-01-15T15:25:54.672987
| 2016-08-22T21:22:45
| 2016-08-22T21:22:45
| 63,362,626
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,971
|
py
|
# -*- coding: utf-8 -*-
import unittest
from unittest.mock import patch, MagicMock
from scraping import get_webpage_content, extract_xpaths, Not200Exception
class TestGetWebpageContent(unittest.TestCase):
@patch('scraping.requests.get')
def test_base(self, requests_get_mock):
expected_content = b'<h1>Chuck Norris</h1>'
requests_get_mock.return_value = MagicMock(
content=expected_content,
status_code=200
)
content = get_webpage_content("http://test.url")
self.assertEqual(expected_content, content)
@patch('scraping.requests.get')
def test_status_not_ok(self, requests_get_mock):
requests_get_mock.return_value = MagicMock(
content=b'Not Found',
status_code=404
)
with self.assertRaises(Not200Exception):
content = get_webpage_content("http://test.url")
@patch('scraping.requests.get')
def test_requests_exception(self, requests_get_mock):
requests_get_mock.side_effect = Exception("Requests Exception!")
with self.assertRaises(Exception):
content = get_webpage_content("http://test.url")
class TestExtractXpath(unittest.TestCase):
def test_base(self):
content = """
<html>
<body>
<h1>Title!</h1>
<p>Bla</p>
</body>
</html>
"""
xpaths = {
"title": "//h1"
}
extracted = extract_xpaths(content, xpaths)
self.assertEqual(extracted, {"title": "Title!"})
def test_multiple_elements(self):
content = """
<html>
<body>
<h1>Title!</h1>
<p>Bla</p>
<p>Ble</p>
</body>
</html>
"""
xpaths = {
"description": "//p"
}
extracted = extract_xpaths(content, xpaths)
self.assertEqual(extracted, {"description": "Bla\nBle"})
|
[
"pablomouzo@gmail.com"
] |
pablomouzo@gmail.com
|
8046f0cbdb65b170b5dabaff186ad81fb6a24843
|
f3a73a2db52b9ec1b052d5f8de362991a19af1ba
|
/singletone_decorator.py
|
031b40a63e1ebe570022ea0cac4e4b52af249363
|
[] |
no_license
|
mikaevnikita/python
|
ffc9fe9a295f3291379482d08f72bac8fd3d98c1
|
ddd7a4f51ad1d1433b41c7e58db2227f41d19400
|
refs/heads/master
| 2021-09-02T02:27:26.029084
| 2017-12-29T18:39:40
| 2017-12-29T18:39:40
| 115,745,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
import functools
def singleton(cls):
instance = None
@functools.wraps(cls)
def inner(*args,**kwargs):
nonlocal instance
if instance is None:
instance = cls(*args,**kwargs)
return instance
return inner
@singleton
class Noop:
pass
a=Noop()
b=Noop()
print(id(a),id(b))
|
[
"n.v.mikaev@gmail.com"
] |
n.v.mikaev@gmail.com
|
7bdb37872ffc8f66ece48618e51d91c6e015762c
|
d8e9a2dfedaace3b96a7a4c3d105c06950b52e3a
|
/profesores/admin.py
|
6a0b853bfda8230e3084aa45aac60fe562f2d11b
|
[] |
no_license
|
HeribertoLara/DjangoTareaSem4
|
a55ee33427407564588a7933fd80ea1f2661859d
|
c3c0c50f8cf81c352603f803a727c2d201e90bd2
|
refs/heads/master
| 2023-01-20T03:28:14.173094
| 2020-12-02T05:05:10
| 2020-12-02T05:05:10
| 315,838,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
from django.contrib import admin
from profesores.models import Profesor
# Register your models here.
admin.site.register(Profesor)
|
[
"ing.quimico.heriberto.lara@gmil.com"
] |
ing.quimico.heriberto.lara@gmil.com
|
d50c94eacde9d6b5811b845d34524432308590f4
|
b7d4bd854e0052c6f7ee8b8a42fa1145de76a61f
|
/src/7_3/7_3_test.py
|
89c48b13edd8b0a319c02587782f62dc7ed53ce0
|
[] |
no_license
|
WeslyG/labs
|
b22f61512293a2e00545b7ee02df1bf4a62961f1
|
976c06a3b2e10082638ae6b0cf55b8400fe2d4ab
|
refs/heads/master
| 2022-12-25T21:16:24.604494
| 2020-10-01T12:32:15
| 2020-10-01T12:32:15
| 297,427,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from s7_3 import number_is_even
def test_s7_3():
assert number_is_even(7) is False
assert number_is_even(6) is True
assert number_is_even(0) is True
|
[
"weslyg22@gmail.com"
] |
weslyg22@gmail.com
|
b8992e4670a1ea151e7a9491438ccf75e9e869fb
|
accf5e4bb9b0d0b0fe2a1ef900fcb9c726f664ba
|
/Network/MixAttNet.py
|
c142ac0ad2bf20b933716f7501d4068ada17799e
|
[] |
no_license
|
pdedumast/FetalCPSeg
|
43eab35dc3379e69818fa9e203f83442e4a4e8c6
|
713dc1b88ed42e4e5cdbc5b876449660e533cccb
|
refs/heads/master
| 2022-11-19T20:47:29.161663
| 2020-07-28T09:43:23
| 2020-07-28T09:43:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,306
|
py
|
import torch
from torch import nn
from torch.nn import functional as F
def convolution_block(in_chan, out_chan, ksize=3, pad=1, stride=1, bias=False):
"""
Convolution Block
Convolution + Normalization + NonLinear
"""
return nn.Sequential(
nn.Conv3d(in_chan, out_chan, kernel_size=ksize, padding=pad, stride=stride, bias=bias),
nn.BatchNorm3d(out_chan),
nn.PReLU()
)
def up_sample3d(x, t, mode="trilinear"):
"""
3D Up Sampling
"""
return F.interpolate(x, t.size()[2:], mode=mode, align_corners=False)
class ResStage(nn.Module):
"""
3D Res stage
"""
def __init__(self, in_chan, out_chan, stride=1):
super(ResStage, self).__init__()
self.conv1 = convolution_block(in_chan, out_chan, stride=stride)
self.conv2 = nn.Sequential(
nn.Conv3d(out_chan, out_chan, kernel_size=3, padding=1),
nn.BatchNorm3d(out_chan)
)
self.non_linear = nn.PReLU()
self.down_sample = nn.Sequential(
nn.Conv3d(in_chan, out_chan, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm3d(out_chan))
def forward(self, x):
out = self.conv2(self.conv1(x))
shortcut = self.down_sample(x)
out = self.non_linear(out + shortcut)
return out
def down_stage(in_chan, out_chan):
return nn.Sequential(
nn.Conv3d(in_chan, out_chan, kernel_size=1, bias=False),
nn.BatchNorm3d(out_chan),
nn.PReLU()
)
class MixBlock(nn.Module):
def __init__(self, in_chan, out_chan):
super(MixBlock, self).__init__()
self.conv1 = nn.Conv3d(in_chan, out_chan // 4, 3, padding=1, bias=False)
self.conv3 = nn.Conv3d(in_chan, out_chan // 4, 5, padding=2, bias=False)
self.conv5 = nn.Conv3d(in_chan, out_chan // 4, 7, padding=3, bias=False)
self.conv7 = nn.Conv3d(in_chan, out_chan // 4, 9, padding=4, bias=False)
self.bn1 = nn.BatchNorm3d(out_chan // 4)
self.bn3 = nn.BatchNorm3d(out_chan // 4)
self.bn5 = nn.BatchNorm3d(out_chan // 4)
self.bn7 = nn.BatchNorm3d(out_chan // 4)
self.nonlinear = nn.PReLU()
def forward(self, x):
k1 = self.bn1(self.conv1(x))
k3 = self.bn3(self.conv3(x))
k5 = self.bn5(self.conv5(x))
k7 = self.bn7(self.conv7(x))
return self.nonlinear(torch.cat((k1, k3, k5, k7), dim=1))
class Attention(nn.Module):
def __init__(self, in_chan, out_chan):
super(Attention, self).__init__()
self.mix1 = MixBlock(in_chan, out_chan)
self.conv1 = nn.Conv3d(out_chan, out_chan, kernel_size=1)
self.mix2 = MixBlock(out_chan, out_chan)
self.conv2 = nn.Conv3d(out_chan, out_chan, kernel_size=1)
self.norm1 = nn.BatchNorm3d(out_chan)
self.norm2 = nn.BatchNorm3d(out_chan)
self.relu = nn.PReLU()
def forward(self, x):
shortcut = x
mix1 = self.conv1(self.mix1(x))
mix2 = self.mix2(mix1)
att_map = F.sigmoid(self.conv2(mix2))
out = self.norm1(x*att_map) + self.norm2(shortcut)
return self.relu(out), att_map
def out_stage(in_chan, out_chan):
return nn.Sequential(
nn.Conv3d(in_chan, out_chan, kernel_size=3, padding=1),
nn.BatchNorm3d(out_chan),
nn.PReLU(),
nn.Conv3d(out_chan, 1, kernel_size=1)
)
class MixAttNet(nn.Module):
def __init__(self):
super(MixAttNet, self).__init__()
self.init_block = convolution_block(1, 16)
self.enc1 = ResStage(16, 16, 1)
self.enc2 = ResStage(16, 32, 2)
self.enc3 = ResStage(32, 64, 2)
self.enc4 = ResStage(64, 128, 2)
self.enc5 = ResStage(128, 128, 2)
self.dec4 = ResStage(128+128, 64)
self.dec3 = ResStage(64+64, 32)
self.dec2 = ResStage(32+32, 16)
self.dec1 = ResStage(16+16, 16)
self.down4 = down_stage(64, 16)
self.down3 = down_stage(32, 16)
self.down2 = down_stage(16, 16)
self.down1 = down_stage(16, 16)
self.mix1 = Attention(16, 16)
self.mix2 = Attention(16, 16)
self.mix3 = Attention(16, 16)
self.mix4 = Attention(16, 16)
self.mix_out1 = nn.Conv3d(16, 1, kernel_size=1)
self.mix_out2 = nn.Conv3d(16, 1, kernel_size=1)
self.mix_out3 = nn.Conv3d(16, 1, kernel_size=1)
self.mix_out4 = nn.Conv3d(16, 1, kernel_size=1)
self.down_out1 = nn.Conv3d(16, 1, kernel_size=1)
self.down_out2 = nn.Conv3d(16, 1, kernel_size=1)
self.down_out3 = nn.Conv3d(16, 1, kernel_size=1)
self.down_out4 = nn.Conv3d(16, 1, kernel_size=1)
self.out = out_stage(16*4, 64)
def forward(self, x):
x = self.init_block(x)
enc1 = self.enc1(x)
enc2 = self.enc2(enc1)
enc3 = self.enc3(enc2)
enc4 = self.enc4(enc3)
enc5 = self.enc5(enc4)
dec4 = self.dec4(
torch.cat((enc4, up_sample3d(enc5, enc4)), dim=1))
dec3 = self.dec3(
torch.cat((enc3, up_sample3d(dec4, enc3)), dim=1))
dec2 = self.dec2(
torch.cat((enc2, up_sample3d(dec3, enc2)), dim=1))
dec1 = self.dec1(
torch.cat((enc1, up_sample3d(dec2, enc1)), dim=1))
down1 = up_sample3d(self.down1(dec1), x)
down4 = up_sample3d(self.down4(dec4), x)
down3 = up_sample3d(self.down3(dec3), x)
down2 = up_sample3d(self.down2(dec2), x)
down_out1 = self.down_out1(down1)
down_out2 = self.down_out2(down2)
down_out3 = self.down_out3(down3)
down_out4 = self.down_out4(down4)
mix1, att1 = self.mix1(down1)
mix2, att2 = self.mix2(down2)
mix3, att3 = self.mix3(down3)
mix4, att4 = self.mix4(down4)
mix_out1 = self.mix_out1(mix1)
mix_out2 = self.mix_out2(mix2)
mix_out3 = self.mix_out3(mix3)
mix_out4 = self.mix_out4(mix4)
out = self.out(torch.cat((mix1, mix2, mix3, mix4), dim=1))
if self.training:
return out, mix_out1, mix_out2, mix_out3, mix_out4, down_out1, down_out2, down_out3, down_out4
else:
return torch.sigmoid(out)
if __name__ == '__main__':
net = MixAttNet().cuda()
torch.save(net.state_dict(), "MixAttNet.pth.gz")
|
[
"noreply@github.com"
] |
pdedumast.noreply@github.com
|
ee4433ee5b0534fc569bbd443da946bf6e41e51e
|
93857d07c3391a1a10843bc67bb0b7ae93edca97
|
/Mysql_CodeGen.py
|
92e33cfa3392dc71a9dcf7d6ed9db329f2df3c01
|
[] |
no_license
|
xumingxsh/Mysql_CodeGen
|
dffd3ff59f9b35c616fd85ab6296134333eb7293
|
4d43ad50cd994df2ba478a0695cda690460381b6
|
refs/heads/master
| 2021-01-10T01:20:43.885279
| 2016-02-28T12:47:52
| 2016-02-28T12:47:52
| 52,719,962
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,731
|
py
|
# 徐敏荣
# 2014-04-18
# 整理,重新编写
# 该代码主要实现从MySQL中读取数据库表结构,并自动生成某些有规律的代码
import MySQLdb
from StringIO import StringIO
from string import Template
#表结构
class TableInfo:
# 构造函数
def __init__(self):
self.table_Name = "" # 数据表英文名称
self.table_Comment = "" # 数据表中文名称或相关注释
self.nameLower = "" # 所属用户
# 扩展表结构
class TableEx:
def __init__(self):
self.base = None # 表结构
self.columns = [] # 表的列集合
self.keys = [] # 表的主键集合
# 列结构
class ColumnInfo:
# 构造函数
def __init__(self):
self.table_Name = "" # 所属表名称
self.column_Name = "" # 列名称
self.data_Type = "" # 数据类型
self.column_Default = "" # 默认值
self.is_Nullable = "" # 是否允许为空
self.column_Comment = "" # 列中文名称或注释
self.charLen = None # 是否允许为空
self.len = None # 数据长度
self.isKey = 0 # 是否主键
self.extra = "" # 是否是自增字段
self.precision=None # 双精度
self.scale=None #
# 表结构的SQL辅助类,主要用于生成SQL语句
class TableExForSQL:
def __init__(self, tb):
self.tb = tb # 使用的表
# 打印表结构信息
def show(self):
print '%s\t%s' % (self.tb.base.table_Name, self.tb.base.table_Comment)
print "---------------------------------------"
for i in self.tb.columns:
nullStr = "\t not null "
if i.is_Nullable.lower() != 'no':
nullStr = ""
print '%s\t%s%s\t%s\t%s' % (i.column_Name, i.data_Type,self.getColumnLen(i),nullStr, i.column_Comment)
# 打印列结构信息
# i-列结构
def columnSQL(self, column):
nullStr = "\t not null " # 是否允许为空
if column.is_Nullable.lower() != 'no':
nullStr = " "
defStr = "\t default " # 默认值
if column.column_Default == '' or\
column.column_Default == None or \
column.column_Default == '0000-00-00 00:00:00':
defStr = ""
else:
defStr += " " + column.column_Default + " "
comStr = "" # 注释信息
if column.column_Comment != '' and column.column_Comment != None:
comStr = "\t comment '" + column.column_Comment + "'"
autoCreat = "" # 是否是自增字段
if column.extra == "auto_increment":
autoCreat = "\t auto_increment "
return '\t' + column.column_Name + '\t' + column.data_Type + self.getColumnLen(column) + nullStr + autoCreat + defStr + comStr
# 数据库表创建语句
def createSQL(self):
print "/*%s\t%s*/" % (self.tb.base.table_Name, self.tb.base.table_Comment) # 注释信息
print "create table %s\n(" % self.tb.base.table_Name
# 打印列语句
for column in self.tb.columns:
print self.columnSQL(column) + ','
# 打印主键语句
key = ''
for i in self.tb.keys:
if key != '':
key += ','
key += i.column_Name
key = '\tprimary key (' + key + ')'
print key + "\n);\n"
# 打印注释信息
if self.tb.base.table_Comment != '' and self.tb.base.table_Comment != None:
print "alter table %s comment '%s';" % (self.tb.base.table_Name, self.tb.base.table_Comment)
# 表移除SQL
def dropSQL(self):
print "drop table if exists %s;" % (self.tb.base.table_Name)
# 单条记录查询
def getSQL(self):
cls = ""
for i in self.tb.columns:
if cls != "\t":
cls += "\n\t,"
cls += i.column_Name.lower()
print "\tSelect " + cls + " From " + self.tb.base.table_Name
# 添加列
def addColumns(self):
for i in self.tb.columns:
print "alter table %s add %d " % (self.tb.base.table_Name, self.column(i))
# 获得列长度
def getColumnLen(self, i):
if i.data_Type == "decimal"\
or i.data_Type == "double" \
or i.data_Type == "float":
if i.scale is None:
return "(%d)"%(i.precision)
else:
return "(%d,%d)"%(i.precision, i.scale)
if i.data_Type == "text":
return ''
if i.charLen != None and i.charLen != "":
return '(%d)'%i.charLen
return ''
# 获得列数据类型
def getParamType_Java_MyBatis(self, i):
if i.data_Type == "int"\
or i.data_Type == "decimal"\
or i.data_Type == "double" \
or i.data_Type == "smallint" \
or i.data_Type == "tinyint" \
or i.data_Type == "float":
return "NUMERIC"
if i.data_Type == "timestamp":
return "TIME"
return "VARCHAR"
# 添加记录SQL(适用于mybatis)
def insertSQL_Java_MyBatis(self):
params = ""
values = ""
for i in self.tb.columns:
if i.extra == "auto_increment" or\
i.column_Comment == "CURRENT_TIMESTAMP comment":
continue
if params != "":
params += "\t, "
else:
params += "\t"
params += i.column_Name.lower() + "\n"
if values != "":
values += "\t, "
else:
values += "\t"
t = Template('#{${name},jdbcType=${type}}\n')
values += t.substitute(name=i.column_Name.lower(), type=self.getParamType_Java_MyBatis(i))
print "\tInsert Into %s(\n%s\t) Values (\n%s\t)" % (self.tb.base.table_Name,params,values)
# 编辑记录SQL(适用于mybatis)
def updateSQL_Java_MyBatis(self):
params = ""
for i in self.tb.columns:
if i.extra == "auto_increment" or i.column_Comment == "CURRENT_TIMESTAMP comment":
continue
if params != "":
params += "\t, "
else:
params += "\t"
t = Template('${name} = #{${name},jdbcType=${type}}\n')
params += t.substitute(name=i.column_Name.lower(), type=self.getParamType_Java_MyBatis(i))
values = ""
for i in self.tb.keys:
if values != "":
values += "\t, "
else:
values += "\t"
t = Template('${name} = #{${name},jdbcType=${type}}\n')
values += t.substitute(name=i.column_Name.lower(), type=self.getParamType(i))
print "\tUpdate %s SET \n%s\t Where %s " % (self.tb.base.table_Name, params, values)
TableExForSQL.insertSQL = insertSQL_Java_MyBatis
TableExForSQL.updateSQL = updateSQL_Java_MyBatis
TableExForSQL.getParamType_Java_MyBatis = getParamType_Java_MyBatis
class TableExForMapper:
def __init__(self, tb):
self.tb = tb
self.sql = TableExForSQL(tb)
def insert(self):
print '<insert id="add" parameterType="' + self.tb.base.table_Name.capitalize() + 'PO">'
self.sql.insertSQL()
print '</insert>'
def update(self):
print '<update id="update" parameterType="' + self.tb.base.table_Name.capitalize() + 'PO">'
self.sql.updateSQL()
print '</update>'
def selectList(self):
print '<select id="getList" parameterType="' + self.tb.base.table_Name.capitalize() + \
'QO" resultType="' + self.tb.base.table_Name.capitalize() + 'VO">'
self.sql.getSQL()
print "\t LIMIT #{recordStart, jdbcType=NUMERIC},#{rows, jdbcType=NUMERIC}"
print '</select>'
class TableExForJava:
def __init__(self, tb):
self.tb = tb
def createPO(self):
propertys = ""
for i in self.tb.columns:
typ = "String"
if i.data_Type == "int":
typ = "int"
if i.data_Type == "timestamp":
typ = "Date"
if i.column_Comment != '' and i.column_Comment != None:
print "\t/**"
print "\t*" + i.column_Comment
print "\t*/"
print "\tprivate " + typ + " " + i.column_Name.lower() + ";"
t = Template("\tpublic ${type} get ${nameU}() {\n"
"\t\treturn this.${name};\n"\
"\t}\n\n"\
"\tpublic void set ${nameU}(${type} ${name}) {\n"\
"\t\tthis.${name} = ${name};\n"\
"\t}\n\n")
#propertys += "\tpublic " + typ + " get" + i.column_Name.lower().capitalize() + "() {\n"
#propertys += "\t\treturn this." + i.column_Name.lower() + ";\n"
#propertys += "\t}\n\n"
#propertys += "\tpublic void set" + i.column_Name.lower().capitalize() + "(" + typ + " " + i.column_Name.lower() + " ) {\n"
#propertys += "\t\tthis." + i.column_Name.lower() + " = " + i.column_Name.lower() + ";\n"
#propertys += "\t}\n\n"
propertys = t.ssubstitute(type=typ, nameU=i.column_Name.lower().capitalize(), name=i.column_Name.lower())
print ""
if i.data_Type != "timestamp":
continue
print "\tprivate String " + i.column_Name.lower() + "Str;"
propertys += "\tpublic String get" + i.column_Name.lower().capitalize() + "Str() {\n"
propertys += "\t\treturn TypeCommon.ConvertToString(this." + i.column_Name.lower() + ");\n"
propertys += "\t}\n\n"
print propertys
def dataGridColums(self):
for i in self.tb.columns:
comment = i.column_Name
if i.column_Comment != '' and i.column_Comment != None:
comment = i.column_Comment
print '\t\t <th field="' + i.column_Name.lower() + '" width="100px">' + comment + '</th>'
class DBGencode:
def __init__(self, host, port, db, user, pwd):
self.host = host
self.port = port
self.user = user
self.pwd = pwd
self.db = db
self.con = MySQLdb.connect(host=self.host,port=self.port,\
db='information_schema',user=self.user,passwd=self.pwd,\
charset="gbk")
cur = self.con.cursor()
cur.execute("select table_Name, table_Comment from tables where TABLE_SCHEMA='" + self.db + "'")
self.tables=[]
self.tableExs=[]
self.columns=[]
for i in cur.fetchall():
t = TableInfo()
t.table_Name = i[0]
t.nameLower = t.table_Name.lower()
arr = i[1].split(";")
if len(arr) > 1:
t.table_Comment = arr[0]
else:
t.table_Comment = ""
self.tables.append(t)
cur.execute("select Table_Name, Column_Name," \
+ "Data_Type,Column_Default,Is_Nullable,Column_Comment,"\
"CHARACTER_MAXIMUM_LENGTH, COLUMN_KEY, extra,NUMERIC_PRECISION,NUMERIC_SCALE from COLUMNS where TABLE_SCHEMA='"\
+ self.db + "' ")
for i in cur.fetchall():
c = ColumnInfo()
c.table_Name = i[0]
c.column_Name = i[1]
c.data_Type = i[2]
c.column_Default = i[3]
c.is_Nullable = i[4]
c.column_Comment = i[5]
c.charLen = i[6]
if i[7] == 'PRI':
c.isKey = 1
c.extra = i[8]
c.precision = i[9]
c.scale = i[10]
self.columns.append(c)
for i in self.tables:
tb = TableEx()
tb.base = i
for it in self.columns:
if it.table_Name.lower() != i.table_Name.lower():
continue
tb.columns.append(it)
if it.isKey == 1:
tb.keys.append(it)
self.tableExs.append(tb)
def showTables(self):
for i in self.tables:
#print str(i)
print '%s\t%s' % (i.table_Name, i.table_Comment)
#print i.table_Comment
def showColumns(self):
for i in self.columns:
print '%s\t%s\t%s' % (i.column_Name, i.data_Type,i.column_Comment)
def getTable(self, name):
nameLw = name.lower()
for i in self.tableExs:
if i.base.nameLower == nameLw:
return i
return None
def showTable(self, name):
tb = self.getTable(name)
if tb == None:
print u"没有查找到数据库表:" + name
return
sql = TableExForSQL(tb)
sql.show()
def showDataBase(self):
for i in self.tableExs:
sql = TableExForSQL(i)
sql.show()
print ""
print ""
def showCreateSQLs(self):
for i in self.tableExs:
sql = TableExForSQL(i)
sql.createSQL()
print ""
print ""
def dropSQLs(self):
for i in self.tableExs:
sql = TableExForSQL(i)
sql.dropSQL()
def insertSQLs(self):
for i in self.tableExs:
sql = TableExForSQL(i)
print ""
print ""
sql.insertSQL()
def updateSQLs(self):
for i in self.tableExs:
sql = TableExForSQL(i)
print ""
print ""
sql.updateSQL()
def sqls(self):
for i in self.tableExs:
sql = TableExForSQL(i)
print ""
print i.base.table_Name
print "----------------------------"
print u"添加语句"
sql.insertSQL()
print ""
print u"更新语句"
sql.updateSQL()
print ""
print u"查询语句"
sql.getSQL()
print ""
print u"添加列"
sql.addColumns()
def insertXMLs(self):
for i in self.tableExs:
mapper = TableExForMapper(i)
print ""
print ""
mapper.insert()
def updateXMLs(self):
for i in self.tableExs:
mapper = TableExForMapper(i)
print ""
print ""
mapper.update()
def XMLs(self):
for i in self.tableExs:
mapper = TableExForMapper(i)
print ""
print i.base.table_Name
print "----------------------------"
print u"添加语句"
mapper.insert()
print ""
print u"更新语句"
mapper.update()
print ""
print u"查询语句"
mapper.selectList()
def javas(self):
for i in self.tableExs:
jv = TableExForJava(i)
print ""
print i.base.table_Name
print "----------------------------"
print u"PO属性"
jv.createPO()
print ""
print u"列表列"
jv.dataGridColums()
def createSQLs(self):
for i in seinsertSQLslf.tableExs:
sql = TableExForSQL(i)
mylookup = TemplateLookup(directories=['docs'],\
module_directory='tmp/mako_modules', collection_size=500, output_encoding='utf-8', encoding_errors='replace')
mytemplate = mylookup.get_template('createSQL.sql')
print mytemplate.render(table=i, tb=i.base, sql=sql)
code=DBGencode("127.0.0.1", 3306, "ivsm", "root", "root")
code.insertSQLs()
code.showCreateSQLs()
|
[
"xumingxsh21@126.com"
] |
xumingxsh21@126.com
|
864bc49ad51f2d4f534f07456b571d961588a63d
|
766ada1da22829e7a0e56dfe56139d75c68c0d1d
|
/traffic_simulation.py
|
bd62b43bc09c26954ef9decc567c516c894cdf98
|
[] |
no_license
|
grizax/traffic-simulation
|
5f31341490058eaefe5b5db636f4deeadb220381
|
425ef24fed0164ee61037cd9d3b207cb208d00bf
|
refs/heads/master
| 2021-01-18T03:31:40.834686
| 2015-02-02T13:40:02
| 2015-02-02T13:40:02
| 30,035,613
| 0
| 0
| null | 2015-01-29T18:52:01
| 2015-01-29T18:52:01
| null |
UTF-8
|
Python
| false
| false
| 1,836
|
py
|
"""TO DO: Still needs a lot of work with matplotlib, stats, reporting, and ipython notebook"""
import numpy as np
import matplotlib.pyplot as plt
from traffic.simulation import Simulation
from traffic.road import Road
from traffic.car import Car
def multiple_simulations(num_simulations=100):
output_car_speeds = np.array([]).reshape(0, 30)
output_tracks = np.array(np.zeros((1, 1000)))
for _ in range(num_simulations):
track_results, car_speeds = one_simulation()
output_car_speeds = np.vstack([output_car_speeds, [car_speeds]])
output_tracks = np.append(output_tracks, track_results, axis=0)
output_tracks = np.delete(output_tracks, 0, 0)
return output_tracks, output_car_speeds
def one_simulation(time=60):
car_list = car_factory()
sim = Simulation()
for _ in range(time):
output = loop(car_list, sim)
car_speeds = [car.speed for car in car_list]
return output, car_speeds
def loop(cars, simulation):
the_road = Road()
simulation.driving_rules(cars)
simulation.car_collision(cars)
simulation.drive_cars(the_road, cars)
simulation.accelerate_cars(cars)
return the_road.track
def car_factory(car_fleet=30):
car_list = []
counter = 33
for car in range(car_fleet):
car = Car()
car.location += counter
car_list.append(car)
counter += 33
return car_list
def reporting():
track_results, speed = multiple_simulations()
speed_mean = Simulation.metric_conversion(np.mean(speed))
speed_std = Simulation.metric_conversion(np.std(speed))
rec_speed = speed_mean + speed_std
plotting(track_results)
return rec_speed
def plotting(track_results):
x = track_results
plt.imshow(x, cmap="binary_r", interpolation="gaussian")
plt.show()
reporting()
|
[
"ndbfoster@gmail.com"
] |
ndbfoster@gmail.com
|
7a3484ca24eee71aa63e4e1eb0f4a392f1f4784a
|
41b4702e359e3352116eeecf2bdf59cb13c71cf2
|
/contextual_bcq/rand_param_envs/mujoco_py/mjlib.py
|
8f2cf8a780c82d64a893cfd22c85aaf7d6219ce8
|
[] |
no_license
|
CaralHsi/Multi-Task-Batch-RL
|
b0aad53291c1713fd2d89fa4fff4a85c98427d4d
|
69d29164ab7d82ec5e06a929ed3b96462db21853
|
refs/heads/master
| 2022-12-22T19:23:45.341092
| 2020-10-01T00:05:36
| 2020-10-01T00:05:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,301
|
py
|
from ctypes import *
import os
from .util import *
from .mjtypes import *
from rand_param_envs.mujoco_py import config
path_prefix = config.mjpro_path
if sys.platform.startswith("darwin"):
libfile = os.path.join(path_prefix, "bin/libmujoco131.dylib")
elif sys.platform.startswith("linux"):
libfile = os.path.join(path_prefix, "bin/libmujoco131.so")
elif sys.platform.startswith("win"):
libfile = os.path.join(path_prefix, "bin/mujoco131.lib")
else:
raise RuntimeError("Unrecognized platform %s" % sys.platform)
if not os.path.exists(libfile):
raise RuntimeError("Missing path: %s. (HINT: you should have unzipped the mjpro131.zip bundle without modification.)" % libfile)
mjlib = cdll.LoadLibrary(os.path.abspath(libfile))
mjlib.mj_loadXML.argtypes = [String, String, c_char_p, c_int]
mjlib.mj_loadXML.restype = POINTER(MJMODEL)
mjlib.mj_saveXML.argtypes = [String, POINTER(MJMODEL), String]
mjlib.mj_saveXML.restype = c_int
#mjlib.mj_printSchema.argtypes = [String, String, c_int, c_int, c_int]
#mjlib.mj_printSchema.restype = c_int
mjlib.mj_activate.argtypes = [String]
mjlib.mj_activate.restype = c_int
mjlib.mj_step.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
mjlib.mj_step.restype = None
mjlib.mj_step1.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
mjlib.mj_step1.restype = None
mjlib.mj_step2.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
mjlib.mj_step2.restype = None
mjlib.mj_forward.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
mjlib.mj_forward.restype = None
#mjlib.mj_inverse.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_inverse.restype = None
#mjlib.mj_forwardSkip.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int]
#mjlib.mj_forwardSkip.restype = None
#mjlib.mj_inverseSkip.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int]
#mjlib.mj_inverseSkip.restype = None
#mjlib.mj_sensor.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_sensor.restype = None
#mjlib.mj_energy.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_energy.restype = None
#mjlib.mj_defaultSolRefImp.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mj_defaultSolRefImp.restype = None
#mjlib.mj_defaultOption.argtypes = [POINTER(mjOption)]
#mjlib.mj_defaultOption.restype = None
#mjlib.mj_defaultVisual.argtypes = [POINTER(mjVisual)]
#mjlib.mj_defaultVisual.restype = None
#mjlib.mj_copyModel.argtypes = [POINTER(MJMODEL), POINTER(MJMODEL)]
#mjlib.mj_copyModel.restype = POINTER(MJMODEL)
#mjlib.mj_saveModel.argtypes = [POINTER(MJMODEL), String, c_int, POINTER(None)]
#mjlib.mj_saveModel.restype = None
#mjlib.mj_loadModel.argtypes = [String, c_int, POINTER(None)]
#mjlib.mj_loadModel.restype = POINTER(MJMODEL)
mjlib.mj_deleteModel.argtypes = [POINTER(MJMODEL)]
mjlib.mj_deleteModel.restype = None
#mjlib.mj_sizeModel.argtypes = [POINTER(MJMODEL)]
#mjlib.mj_sizeModel.restype = c_int
mjlib.mj_makeData.argtypes = [POINTER(MJMODEL)]
mjlib.mj_makeData.restype = POINTER(MJDATA)
#mjlib.mj_copyData.argtypes = [POINTER(MJDATA), POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_copyData.restype = POINTER(MJDATA)
mjlib.mj_resetData.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
mjlib.mj_resetData.restype = None
#mjlib.mj_stackAlloc.argtypes = [POINTER(MJDATA), c_int]
#mjlib.mj_stackAlloc.restype = POINTER(c_double)
mjlib.mj_deleteData.argtypes = [POINTER(MJDATA)]
mjlib.mj_deleteData.restype = None
#mjlib.mj_resetCallbacks.argtypes = []
#mjlib.mj_resetCallbacks.restype = None
#mjlib.mj_setConst.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int]
#mjlib.mj_setConst.restype = None
#mjlib.mj_printModel.argtypes = [POINTER(MJMODEL), String]
#mjlib.mj_printModel.restype = None
#mjlib.mj_printData.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), String]
#mjlib.mj_printData.restype = None
#mjlib.mju_printMat.argtypes = [POINTER(c_double), c_int, c_int]
#mjlib.mju_printMat.restype = None
#mjlib.mj_fwdPosition.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdPosition.restype = None
#mjlib.mj_fwdVelocity.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdVelocity.restype = None
#mjlib.mj_fwdActuation.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdActuation.restype = None
#mjlib.mj_fwdAcceleration.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdAcceleration.restype = None
#mjlib.mj_fwdConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdConstraint.restype = None
#mjlib.mj_Euler.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_Euler.restype = None
#mjlib.mj_RungeKutta.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int]
#mjlib.mj_RungeKutta.restype = None
#mjlib.mj_invPosition.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_invPosition.restype = None
#mjlib.mj_invVelocity.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_invVelocity.restype = None
#mjlib.mj_invConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_invConstraint.restype = None
#mjlib.mj_compareFwdInv.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_compareFwdInv.restype = None
#mjlib.mj_checkPos.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_checkPos.restype = None
#mjlib.mj_checkVel.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_checkVel.restype = None
#mjlib.mj_checkAcc.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_checkAcc.restype = None
#mjlib.mj_kinematics.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_kinematics.restype = None
#mjlib.mj_comPos.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_comPos.restype = None
#mjlib.mj_tendon.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_tendon.restype = None
#mjlib.mj_transmission.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_transmission.restype = None
#mjlib.mj_crb.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_crb.restype = None
#mjlib.mj_factorM.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_factorM.restype = None
#mjlib.mj_backsubM.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_backsubM.restype = None
#mjlib.mj_backsubM2.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_backsubM2.restype = None
#mjlib.mj_comVel.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_comVel.restype = None
#mjlib.mj_passive.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_passive.restype = None
#mjlib.mj_rne.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, POINTER(c_double)]
#mjlib.mj_rne.restype = None
#mjlib.mj_rnePostConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_rnePostConstraint.restype = None
#mjlib.mj_collision.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_collision.restype = None
#mjlib.mj_makeConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_makeConstraint.restype = None
#mjlib.mj_projectConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_projectConstraint.restype = None
#mjlib.mj_referenceConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_referenceConstraint.restype = None
#mjlib.mj_isPyramid.argtypes = [POINTER(MJMODEL)]
#mjlib.mj_isPyramid.restype = c_int
#mjlib.mj_isSparse.argtypes = [POINTER(MJMODEL)]
#mjlib.mj_isSparse.restype = c_int
#mjlib.mj_mulJacVec.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double)]
#mjlib.mj_mulJacVec.restype = None
#mjlib.mj_mulJacTVec.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double)]
#mjlib.mj_mulJacTVec.restype = None
#mjlib.mj_jac.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jac.restype = None
#mjlib.mj_jacBody.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacBody.restype = None
#mjlib.mj_jacBodyCom.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacBodyCom.restype = None
#mjlib.mj_jacGeom.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacGeom.restype = None
#mjlib.mj_jacSite.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacSite.restype = None
#mjlib.mj_jacPointAxis.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacPointAxis.restype = None
mjlib.mj_name2id.argtypes = [POINTER(MJMODEL), c_int, String] # The middle term is a mjtObj (an enum) in C.
mjlib.mj_name2id.restype = c_int
#mjlib.mj_id2name.argtypes = [POINTER(MJMODEL), mjtObj, c_int]
#mjlib. mj_id2name.restype = ReturnString
#mjlib.else:
#mjlib. mj_id2name.restype = String
#mjlib. mj_id2name.errcheck = ReturnString
#mjlib.mj_fullM.argtypes = [POINTER(MJMODEL), POINTER(c_double), POINTER(c_double)]
#mjlib.mj_fullM.restype = None
#mjlib.mj_mulM.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double)]
#mjlib.mj_mulM.restype = None
#mjlib.mj_applyFT.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, POINTER(c_double)]
#mjlib.mj_applyFT.restype = None
#mjlib.mj_objectVelocity.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, c_int, POINTER(c_double), mjtByte]
#mjlib.mj_objectVelocity.restype = None
#mjlib.mj_objectAcceleration.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, c_int, POINTER(c_double), mjtByte]
#mjlib.mj_objectAcceleration.restype = None
#mjlib.mj_contactForce.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, POINTER(c_double)]
#mjlib.mj_contactForce.restype = None
#mjlib.mj_integratePos.argtypes = [POINTER(MJMODEL), POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mj_integratePos.restype = None
#mjlib.mj_normalizeQuat.argtypes = [POINTER(MJMODEL), POINTER(c_double)]
#mjlib.mj_normalizeQuat.restype = None
#mjlib.mj_local2Global.argtypes = [POINTER(MJDATA), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_local2Global.restype = None
#mjlib.mj_getTotalmass.argtypes = [POINTER(MJMODEL)]
#mjlib.mj_getTotalmass.restype = c_double
#mjlib.mj_setTotalmass.argtypes = [POINTER(MJMODEL), c_double]
#mjlib.mj_setTotalmass.restype = None
#mjlib.mj_version.argtypes = []
#mjlib.mj_version.restype = c_double
mjlib.mjv_makeObjects.argtypes = [POINTER(MJVOBJECTS), c_int]
mjlib.mjv_makeObjects.restype = None
mjlib.mjv_freeObjects.argtypes = [POINTER(MJVOBJECTS)]
mjlib.mjv_freeObjects.restype = None
mjlib.mjv_defaultOption.argtypes = [POINTER(MJVOPTION)]
mjlib.mjv_defaultOption.restype = None
#mjlib.mjv_defaultCameraPose.argtypes = [POINTER(MJVCAMERAPOSE)]
#mjlib.mjv_defaultCameraPose.restype = None
mjlib.mjv_defaultCamera.argtypes = [POINTER(MJVCAMERA)]
mjlib.mjv_defaultCamera.restype = None
mjlib.mjv_setCamera.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(MJVCAMERA)]
mjlib.mjv_setCamera.restype = None
mjlib.mjv_updateCameraPose.argtypes = [POINTER(MJVCAMERA), c_double]
mjlib.mjv_updateCameraPose.restype = None
#mjlib.mjv_convert3D.argtypes = [POINTER(c_double), POINTER(c_double), c_double, POINTER(MJVCAMERAPOSE)]
#mjlib.mjv_convert3D.restype = None
#mjlib.mjv_convert2D.argtypes = [POINTER(c_double), mjtMouse, c_double, c_double, c_double, POINTER(MJVCAMERAPOSE)]
#mjlib.mjv_convert2D.restype = None
mjlib.mjv_moveCamera.argtypes = [c_int, c_float, c_float, POINTER(MJVCAMERA), c_float, c_float]
mjlib.mjv_moveCamera.restype = None
#mjlib.mjv_moveObject.argtypes = [mjtMouse, c_float, c_float, POINTER(MJVCAMERAPOSE), c_float, c_float, POINTER(c_double), POINTER(c_double)]
#mjlib.mjv_moveObject.restype = None
mjlib.mjv_mousePerturb.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)]
mjlib.mjv_mousePerturb.restype = None
#mjlib.mjv_mouseEdit.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, c_int, POINTER(c_double), POINTER(c_double)]
#mjlib.mjv_mouseEdit.restype = None
mjlib.mjv_makeGeoms.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(MJVOBJECTS), POINTER(MJVOPTION), c_int, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)]
mjlib.mjv_makeGeoms.restype = None
mjlib.mjv_makeLights.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(MJVOBJECTS)]
mjlib.mjv_makeLights.restype = None
mjlib.mjr_overlay.argtypes = [MJRRECT, c_int, c_int, String, String, POINTER(MJRCONTEXT)]
mjlib.mjr_overlay.restype = None
#mjlib.mjr_rectangle.argtypes = [c_int, MJRRECT, c_double, c_double, c_double, c_double, c_double, c_double, c_double, c_double]
#mjlib.mjr_rectangle.restype = None
#mjlib.mjr_finish.argtypes = []
#mjlib.mjr_finish.restype = None
#mjlib.mjr_text.argtypes = [String, POINTER(MJRCONTEXT), c_int, c_float, c_float, c_float, c_float, c_float, c_float]
#mjlib.mjr_text.restype = None
#mjlib.mjr_textback.argtypes = [String, POINTER(MJRCONTEXT), c_float, c_float, c_float, c_float, c_float, c_float]
#mjlib.mjr_textback.restype = None
#mjlib.mjr_textWidth.argtypes = [String, POINTER(MJRCONTEXT), c_int]
#mjlib.mjr_textWidth.restype = c_int
mjlib.mjr_defaultOption.argtypes = [POINTER(MJROPTION)]
mjlib.mjr_defaultOption.restype = None
mjlib.mjr_defaultContext.argtypes = [POINTER(MJRCONTEXT)]
mjlib.mjr_defaultContext.restype = None
#mjlib.mjr_uploadTexture.argtypes = [POINTER(MJMODEL), POINTER(MJRCONTEXT), c_int]
#mjlib.mjr_uploadTexture.restype = None
mjlib.mjr_makeContext.argtypes = [POINTER(MJMODEL), POINTER(MJRCONTEXT), c_int]
mjlib.mjr_makeContext.restype = None
mjlib.mjr_freeContext.argtypes = [POINTER(MJRCONTEXT)]
mjlib.mjr_freeContext.restype = None
mjlib.mjr_render.argtypes = [c_int, MJRRECT, POINTER(MJVOBJECTS), POINTER(MJROPTION), POINTER(MJVCAMERAPOSE), POINTER(MJRCONTEXT)]
mjlib.mjr_render.restype = None
#mjlib.mjr_select.argtypes = [MJRRECT, POINTER(MJVOBJECTS), c_int, c_int, POINTER(c_double), POINTER(c_double), POINTER(MJROPTION), POINTER(MJVCAMERAPOSE), POINTER(MJRCONTEXT)]
#mjlib.mjr_select.restype = c_int
#mjlib.mjr_showOffscreen.argtypes = [c_int, c_int, POINTER(MJRCONTEXT)]
#mjlib.mjr_showOffscreen.restype = None
#mjlib.mjr_showBuffer.argtypes = [POINTER(c_ubyte), c_int, c_int, c_int, c_int, POINTER(MJRCONTEXT)]
#mjlib.mjr_showBuffer.restype = None
#mjlib.mjr_getOffscreen.argtypes = [POINTER(c_ubyte), POINTER(c_float), MJRRECT, POINTER(MJRCONTEXT)]
#mjlib.mjr_getOffscreen.restype = None
#mjlib.mjr_getBackbuffer.argtypes = [POINTER(c_ubyte), POINTER(c_float), MJRRECT, POINTER(MJRCONTEXT)]
#mjlib.mjr_getBackbuffer.restype = None
#mjlib.
#mjlib.
#mjlib.mju_error.argtypes = [String]
#mjlib.mju_error.restype = None
#mjlib.mju_error_i.argtypes = [String, c_int]
#mjlib.mju_error_i.restype = None
#mjlib.mju_error_s.argtypes = [String, String]
#mjlib.mju_error_s.restype = None
#mjlib.mju_warning.argtypes = [String]
#mjlib.mju_warning.restype = None
#mjlib.mju_warning_i.argtypes = [String, c_int]
#mjlib.mju_warning_i.restype = None
#mjlib.mju_warning_s.argtypes = [String, String]
#mjlib.mju_warning_s.restype = None
#mjlib.mju_clearHandlers.argtypes = []
#mjlib.mju_clearHandlers.restype = None
#mjlib.mju_malloc.argtypes = [c_size_t]
#mjlib.mju_malloc.restype = POINTER(None)
#mjlib.mju_free.argtypes = [POINTER(None)]
#mjlib.mju_free.restype = None
#mjlib.mj_warning.argtypes = [POINTER(MJDATA), c_int]
#mjlib.mj_warning.restype = None
#mjlib.mju_zero3.argtypes = [POINTER(c_double)]
#mjlib.mju_zero3.restype = None
#mjlib.mju_copy3.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_copy3.restype = None
#mjlib.mju_scl3.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_scl3.restype = None
#mjlib.mju_add3.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_add3.restype = None
#mjlib.mju_sub3.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_sub3.restype = None
#mjlib.mju_addTo3.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_addTo3.restype = None
#mjlib.mju_addToScl3.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_addToScl3.restype = None
#mjlib.mju_addScl3.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_addScl3.restype = None
#mjlib.mju_normalize3.argtypes = [POINTER(c_double)]
#mjlib.mju_normalize3.restype = c_double
#mjlib.mju_norm3.argtypes = [POINTER(c_double)]
#mjlib.mju_norm3.restype = c_double
#mjlib.mju_dot3.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_dot3.restype = c_double
#mjlib.mju_dist3.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_dist3.restype = c_double
#mjlib.mju_rotVecMat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_rotVecMat.restype = None
#mjlib.mju_rotVecMatT.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_rotVecMatT.restype = None
#mjlib.mju_cross.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_cross.restype = None
#mjlib.mju_zero.argtypes = [POINTER(c_double), c_int]
#mjlib.mju_zero.restype = None
#mjlib.mju_copy.argtypes = [POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_copy.restype = None
#mjlib.mju_scl.argtypes = [POINTER(c_double), POINTER(c_double), c_double, c_int]
#mjlib.mju_scl.restype = None
#mjlib.mju_add.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_add.restype = None
#mjlib.mju_sub.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_sub.restype = None
#mjlib.mju_addTo.argtypes = [POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_addTo.restype = None
#mjlib.mju_addToScl.argtypes = [POINTER(c_double), POINTER(c_double), c_double, c_int]
#mjlib.mju_addToScl.restype = None
#mjlib.mju_addScl.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_double, c_int]
#mjlib.mju_addScl.restype = None
#mjlib.mju_normalize.argtypes = [POINTER(c_double), c_int]
#mjlib.mju_normalize.restype = c_double
#mjlib.mju_norm.argtypes = [POINTER(c_double), c_int]
#mjlib.mju_norm.restype = c_double
#mjlib.mju_dot.argtypes = [POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_dot.restype = c_double
#mjlib.mju_mulMatVec.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int]
#mjlib.mju_mulMatVec.restype = None
#mjlib.mju_mulMatTVec.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int]
#mjlib.mju_mulMatTVec.restype = None
#mjlib.mju_transpose.argtypes = [POINTER(c_double), POINTER(c_double), c_int, c_int]
#mjlib.mju_transpose.restype = None
#mjlib.mju_mulMatMat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int, c_int]
#mjlib.mju_mulMatMat.restype = None
#mjlib.mju_mulMatMatT.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int, c_int]
#mjlib.mju_mulMatMatT.restype = None
#mjlib.mju_sqrMat.argtypes = [POINTER(c_double), POINTER(c_double), c_int, c_int, POINTER(c_double), c_int]
#mjlib.mju_sqrMat.restype = None
#mjlib.mju_mulMatTMat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int, c_int]
#mjlib.mju_mulMatTMat.restype = None
#mjlib.mju_transformSpatial.argtypes = [POINTER(c_double), POINTER(c_double), mjtByte, POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_transformSpatial.restype = None
#mjlib.mju_rotVecQuat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_rotVecQuat.restype = None
#mjlib.mju_negQuat.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_negQuat.restype = None
#mjlib.mju_mulQuat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_mulQuat.restype = None
#mjlib.mju_mulQuatAxis.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_mulQuatAxis.restype = None
#mjlib.mju_axisAngle2Quat.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_axisAngle2Quat.restype = None
#mjlib.mju_quat2Vel.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_quat2Vel.restype = None
#mjlib.mju_quat2Mat.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_quat2Mat.restype = None
#mjlib.mju_mat2Quat.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_mat2Quat.restype = None
#mjlib.mju_derivQuat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_derivQuat.restype = None
#mjlib.mju_quatIntegrate.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_quatIntegrate.restype = None
#mjlib.mju_quatZ2Vec.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_quatZ2Vec.restype = None
#mjlib.mju_cholFactor.argtypes = [POINTER(c_double), POINTER(c_double), c_int, c_double, c_double, POINTER(c_double)]
#mjlib.mju_cholFactor.restype = c_int
#mjlib.mju_cholBacksub.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int, c_int]
#mjlib.mju_cholBacksub.restype = None
#mjlib.mju_eig3.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_eig3.restype = c_int
#mjlib.mju_muscleFVL.argtypes = [c_double, c_double, c_double, c_double, POINTER(c_double)]
#mjlib.mju_muscleFVL.restype = c_double
#mjlib.mju_musclePassive.argtypes = [c_double, c_double, c_double, POINTER(c_double)]
#mjlib.mju_musclePassive.restype = c_double
#mjlib.mju_pneumatic.argtypes = [c_double, c_double, c_double, POINTER(c_double), c_double, c_double, c_double, POINTER(c_double)]
#mjlib.mju_pneumatic.restype = c_double
#mjlib.mju_encodePyramid.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_encodePyramid.restype = None
#mjlib.mju_decodePyramid.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_decodePyramid.restype = None
#mjlib.mju_springDamper.argtypes = [c_double, c_double, c_double, c_double, c_double]
#mjlib.mju_springDamper.restype = c_double
#mjlib.mju_min.argtypes = [c_double, c_double]
#mjlib.mju_min.restype = c_double
#mjlib.mju_max.argtypes = [c_double, c_double]
#mjlib.mju_max.restype = c_double
#mjlib.mju_sign.argtypes = [c_double]
#mjlib.mju_sign.restype = c_double
#mjlib.mju_round.argtypes = [c_double]
#mjlib.mju_round.restype = c_int
#mjlib.mju_type2Str.argtypes = [c_int]
#mjlib. mju_type2Str.restype = ReturnString
#mjlib.else:
#mjlib. mju_type2Str.restype = String
#mjlib. mju_type2Str.errcheck = ReturnString
#mjlib.mju_str2Type.argtypes = [String]
#mjlib.mju_str2Type.restype = mjtObj
#mjlib.mju_warningText.argtypes = [c_int]
#mjlib. mju_warningText.restype = ReturnString
#mjlib.else:
#mjlib. mju_warningText.restype = String
#mjlib. mju_warningText.errcheck = ReturnString
#mjlib.mju_isBad.argtypes = [c_double]
#mjlib.mju_isBad.restype = c_int
#mjlib.mju_isZero.argtypes = [POINTER(c_double), c_int]
#mjlib.mju_isZero.restype = c_int
|
[
"jil021@eng.ucsd.edu"
] |
jil021@eng.ucsd.edu
|
2b4c7736ef6d059602290437ae4a47765822bc7a
|
c247035975b8ca12eff9cfe8bac7c57402d1e728
|
/Unit10/T10.7.py
|
f0b0092787214e70119d976390273dd4cd573d7e
|
[] |
no_license
|
xlxwalex/pythonew
|
cf896707a72cbb7fa44c8fa9e504e400628e3ddd
|
ff3601641f695ce01888518b970eccc457b4a238
|
refs/heads/master
| 2021-01-22T21:48:47.038598
| 2017-03-21T15:53:47
| 2017-03-21T15:54:22
| 85,477,387
| 9
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 443
|
py
|
#10.7
import requests
import bs4
import time
def baiduclock():
tim=0
agent={'User-agent':'Mozilla/5.0'}
timer=time.clock()
while time.clock() - timer <= 30:
print(time.clock() - timer)
httpbaidu=requests.get('http://www.baidu.com',headers=agent)
if httpbaidu.status_code == 200:
tim+=1
else:
print("30s内爬虫访问了百度{}次".format(tim))
baiduclock()
|
[
"xlxw@xlxw.org"
] |
xlxw@xlxw.org
|
b7f12b68859daa4b1cac219a0c23a17281619f5b
|
6ebd192d228c04152ea914a3130d1e34226ac412
|
/tests/unit/generate/test_context.py
|
eb565ca18a0832a2a5d8b33495d7e1aba7d2833d
|
[
"MIT"
] |
permissive
|
STAMP-project/camp
|
3acfa6746e30914e159735305328ef3ccc51eabe
|
e8652ddf3e2e84ffbf2b9dff3fb5ee678b209246
|
refs/heads/master
| 2022-12-23T09:35:43.045284
| 2021-11-18T15:56:05
| 2021-11-18T15:56:05
| 114,260,055
| 10
| 11
|
MIT
| 2022-12-16T02:41:51
| 2017-12-14T14:28:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,347
|
py
|
#
# CAMP
#
# Copyright (C) 2017 -- 2019 SINTEF Digital
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
#
from unittest import TestCase
from camp.entities.model import Model, Component, Service, Feature, Variable, Goals
from camp.generate import Context
from ozepy import start_over
class LoadedContextIncludes(TestCase):
def setUp(self):
start_over()
self._context = Context()
components = [
Component("c1",
provided_services=[Service("S1")],
provided_features=[Feature("F1")],
variables=[Variable("memory",
str,
values=["1GB",
"2GB"])])
]
self._model = Model(components,
Goals(services=[Service("S1")]))
self._context.load_metamodel()
self._context.load_model(self._model)
def test_all_metaclasses(self):
for each in self.EXPECTED_CLASSES:
self.assertIn(each, self._context)
EXPECTED_CLASSES = ["Value", "Variable", "Service",
"Feature", "Component", "CInstance"]
def test_all_services(self):
for each in self._model.services:
self.assertIn(each.name, self._context)
def test_all_features(self):
for each in self._model.features:
self.assertIn(each.name, self._context)
def test_all_components(self):
for each in self._model.components:
self.assertIn(each.name, self._context)
def test_all_variables_with_qualified_names(self):
for each_component in self._model.components:
for each_variable in each_component.variables:
qualified_name = "_".join([each_component.name, each_variable.name])
self.assertIn(qualified_name, self._context)
def test_all_values_slots_with_qualified_names(self):
for each_component in self._model.components:
for each_variable in each_component.variables:
qualified_name = "_".join([each_component.name, "0", each_variable.name])
self.assertIn(qualified_name, self._context)
|
[
"franck.chauvel@gmail.com"
] |
franck.chauvel@gmail.com
|
c7e4005fc61db2b565fdd6d8e80b1c661ea470d3
|
94bd08e95ae0c31973f500a7bab3aa5378f7ec7b
|
/snippets/migrations/0003_auto_20190429_0033.py
|
8228906cde11f126db33ecf58304050ad10be0a8
|
[] |
no_license
|
freddiemo/django_rest_framework_3_sample
|
eec2f3315e9c79ca3af6aa39423337606eb3aca8
|
ca299957f8a7e666c31d71be74028e18392f65dc
|
refs/heads/master
| 2020-05-17T03:14:45.963863
| 2019-04-29T17:23:27
| 2019-04-29T17:23:27
| 183,473,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
# Generated by Django 2.2 on 2019-04-29 00:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('snippets', '0002_auto_20190429_0026'),
]
operations = [
migrations.AlterField(
model_name='snippet',
name='highlighted',
field=models.TextField(),
),
]
|
[
"freddiejmo@gmail.com"
] |
freddiejmo@gmail.com
|
3d0f58b74138d3d783dd0a71510afd2354a9ac4e
|
243eddaee6dff4551da9c10f725d8828e13840ac
|
/get_premium/apps.py
|
d24dcc77204cd467ed804330c4f12a0a7f693080
|
[
"MIT"
] |
permissive
|
BarunBlog/Link_People
|
46b0c2c141ae042b481893aee869977755790dc8
|
1ffd07bc5b31a715133c99efbbb478efe18d632b
|
refs/heads/master
| 2023-01-24T04:54:13.545951
| 2020-12-03T05:56:33
| 2020-12-03T05:56:33
| 304,888,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 96
|
py
|
from django.apps import AppConfig
class GetPremiumConfig(AppConfig):
name = 'get_premium'
|
[
"bhattacharjeebarun25@gmail.com"
] |
bhattacharjeebarun25@gmail.com
|
bafe2f617364ca4a66f1e271cba1e72f7d29aa53
|
2e46b786bd17c27f794b56c505b774fadd1ee7d4
|
/vente.py
|
27bcfdf60f1a9ab04c3ac6e59c65e96026426985
|
[] |
no_license
|
aniskchaou/PRODUCTION-ERP-MODULE
|
73c6193be5eade1beddafdc9204109ac654e88a7
|
f50f6f7193c3bd8ae8911dbe4e51579bfe77082f
|
refs/heads/master
| 2023-04-17T01:32:24.665057
| 2021-04-24T22:04:41
| 2021-04-24T22:04:41
| 351,488,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36,260
|
py
|
# -*- coding: utf-8 -*-
import sys
import openerp
from openerp import models, fields, api, _
from openerp import tools
from datetime import date
from datetime import datetime
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
import re
import base64
from openerp.exceptions import except_orm, Warning, RedirectWarning
#----------------------------------------------------------
# taux_tva
#----------------------------------------------------------
class taux_tva(models.Model):
_name = 'taux.tva'
_rec_name = 'taux_tva'
client_ids = fields.One2many('vente.client', 'taux_tva_id', 'Clients')
taux_tva = fields.Float('Taux TVA', required=True)
default = fields.Boolean('Défaut')
@api.model
def create(self, values):
if values['default'] == True:
obj_ids = self.search([('default', '=', True)])
if len(obj_ids) > 0:
raise Warning(_('Erreur!'),
_('Il faut un seul valeur par défaut'))
#taux_tva doit etre unique
taux_tva_count = self.search_count([('taux_tva', '=', values['taux_tva'])])
if taux_tva_count > 0:
raise Warning(_('Erreur!'),
_('( %s ) : Cette valeur existe déja')% (values['taux_tva']))
obj_id = super(taux_tva, self).create(values)
return obj_id
@api.multi
def write(self, values):
if values.get("default", False) == True:
obj_ids = self.search([('default', '=', True)])
if len(obj_ids) > 0:
raise Warning(_('Erreur!'),
_('Il faut un seul valeur par défaut'))
#taux_tva doit etre unique
if values.get("taux_tva", False) != False and values.get("taux_tva", False) != self.taux_tva:
taux_tva_count = self.search_count([('taux_tva', '=', values.get("taux_tva", False))])
if taux_tva_count > 0:
raise Warning(_('Erreur!'),
_('( %s ) : Cette valeur existe déja')% (values.get("taux_tva", False)))
obj_id = super(taux_tva, self).write(values)
return obj_id
#----------------------------------------------------------
# article_commande_rel
#----------------------------------------------------------
class article_commande_rel(models.Model):
@api.one
@api.depends('commande_id', 'article_id')
def _get_quantite_livre(self):
for rec in self:
qte = 0
bl_ids = self.env['bon.livraison'].search([('commande_id', '=', rec.commande_id.id),
('article_id', '=', rec.article_id.id)])
for bl in bl_ids:
qte += bl.quantite
self.quantite_livre = qte
@api.one
@api.depends('commande_id', 'article_id')
def _get_quantite_reserve(self):
for rec in self:
qte = 0
br_ids = self.env['bon.reservation'].search([('commande_id', '=', rec.commande_id.id),
('article_id', '=', rec.article_id.id)])
for br in br_ids:
qte += br.quantite
self.quantite_reserve = qte
@api.one
@api.depends('quantite', 'quantite_livre')
def _get_progress(self):
if self.quantite > 0 and self.quantite_livre > 0:
self.progress = self.quantite_livre / self.quantite * 100
else:
self.progress = 0
_name = "article.commande.rel"
article_id = fields.Many2one('production.article', 'Article', ondelete='cascade', required=True)
commande_id = fields.Many2one('production.commande', 'Commande', ondelete='cascade', required=True)
quantite = fields.Float('Quantité', required=True)
unite = fields.Selection([('u','U'),
('kg','Kg'),
('m2','m²'),
('m','m')], related='article_id.unite', readonly=True, string='Unite')
date_limit = fields.Date('Date limite', required=True)
quantite_livre = fields.Float(compute='_get_quantite_livre', string='Qte_Livré')
quantite_reserve = fields.Float(compute='_get_quantite_reserve', string='Qte_Rés')
progress = fields.Float(compute='_get_progress', string='Progression')
stock_non_reserve = fields.Float(string='Stk_Non_Rés', related='article_id.stock_non_reserve')
@api.multi
def creer_of(self):
#pour creer un of il faut que la commande en etat demarre
if self.commande_id.state != 'nonplanifie':
raise Warning(_('Erreur!'),
_('OF est dejà Planifié'))
return {
'name': _("Ordre fabrication"),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'production.ordre.fabrication',
'view_id': False,
'context': {
'default_commande_id': self.commande_id.id,
'default_article_sortant': self.article_id.id,
'default_quantite': self.quantite,
'default_date_fin': self.date_limit,
'default_line_commande_id': self.id,
'default_famille_id':self.article_id.famille_id.id,
'default_quantite':self.quantite
},
}
@api.multi
def creer_bon_reservation(self):
#pour creer un bon réservation il faut que la commande en etat demarre
if self.commande_id.state == 'planifie':
raise Warning(_('Erreur!'),
_('La commande n\'est pas encore démarré'))
return {
'name': _("Bon de réservation"),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'bon.reservation',
'view_id': False,
'context': {
'default_client_id': self.commande_id.client_id.id,
'default_commande_id': self.commande_id.id,
'default_article_id': self.article_id.id,
'default_quantite_commande': self.quantite
},
}
@api.multi
def creer_bon_livraison(self):
#pour creer un bon livraison il faut que la commande en etat demarre
if self.commande_id.state == 'planifie':
raise Warning(_('Erreur!'),
_('La commande n\'est pas encore démarré'))
return {
'name': _("Bon de livraison"),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'bon.livraison',
'view_id': False,
'context': {
'default_client_id': self.commande_id.client_id.id,
'default_commande_id': self.commande_id.id,
'default_article_id': self.article_id.id,
'default_quantite_commande': self.quantite
},
}
#----------------------------------------------------------
# production_commande
#----------------------------------------------------------
class production_commande(models.Model):
@api.one
@api.depends('state')
def _check_color(self):
for rec in self:
color = 0
color_value = self.env['color.status'].search([('state', '=', rec.state)], limit=1).color
if color_value:
color = color_value
self.member_color = color
#button workflow Démarrer
@api.one
def action_demarrer_commande(self):
if self.article_commande_ids:
self.write({'state': 'demarre'})
else:
raise Warning(_('Erreur!'),
_('Cette commande (%s) ne contient aucun article')% (self.num_commande))
@api.one
def action_confirmer_commande(self):
self.write({'state': 'nonplanifie'})
#button workflow Terminer
@api.one
def action_terminer_commande(self):
self.write({'state': 'termine'})
_name = 'production.commande'
_rec_name = 'num_commande'
member_color = fields.Integer(compute='_check_color', string='Color')
of_ids = fields.One2many('production.ordre.fabrication', 'commande_id', 'Ordres de fabrication')
num_commande = fields.Char('Num commande', required=True)
client_id = fields.Many2one('vente.client', 'Client', required=True, ondelete='cascade')
date_creation = fields.Date('Date création', required=True, default= lambda *a:datetime.now().strftime('%Y-%m-%d'))
date_limit_cmd = fields.Date('Date limite', required=True)
article_commande_ids = fields.One2many('article.commande.rel', 'commande_id', 'Articles')
state = fields.Selection([('nonconfirme','Non Confirmé'),('nonplanifie','Non Planifié'),('planifie','Planifié'),
('demarre','Demarré'),
('termine','Terminé')], 'Etat', readonly=True, default='nonconfirme')
bon_livraison_ids = fields.One2many('bon.livraison', 'commande_id', 'Bons de livraiosn')
bon_reservation_ids = fields.One2many('bon.reservation', 'commande_id', 'Bons de réservation')
@api.model
def create(self, values):
#test num_commande doit etre unique
if self.env['production.commande'].search_count([('num_commande', '=', values['num_commande'])]) > 0:
raise Warning(_('Erreur!'),
_('Numéro commande existe déjà [ %s ].')% (values['num_commande']))
# test date_creation <= date_limit_cmd
if values['date_creation'] > values['date_limit_cmd']:
raise Warning(_('Erreur!'),
_('Il faut que : Date création <= Date limite'))
obj_id = super(production_commande, self).create(values)
#test si les lignes articles sont distinct
ids = []
for obj in self.browse(obj_id.id):
for line in obj.article_commande_ids:
if line.article_id.id in ids:
raise Warning(_('Erreur!'),
_("Même article ajouté plusieurs fois : %s") % line.article_id.code_article)
ids.append(line.article_id.id)
#récupérer les lignes de commande
article_lines = self.env['article.commande.rel'].search([('commande_id', '=', obj_id.id)])
for l in article_lines:
#test date_creation <= date_limit (article) <= date_limit_cmd
if l.date_limit > values['date_limit_cmd'] or l.date_limit < values['date_creation']:
raise Warning(_('Erreur!'),
_('Les dates des lignes articles doivent êtres dans [ %s , %s].\n %s qui est séléctionnée')% (values['date_creation'], values['date_limit_cmd'], l.date_limit))
#vérifier quantité
if float(l.quantite) <= 0:
raise Warning(_('Erreur!'),
_('La quantité doit être supérieur à zero'))
return obj_id
@api.multi
def creer_of(self):
return {
'name': _("Ordre fabrication"),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'production.ordre.fabrication',
'view_id': False,
'context': {
'default_commande_id': self.id,
},
}
@api.multi
def write(self, values):
obj_id=super(production_commande,self).write(values)
for obj in self:
#test num_commande doit etre unique
self.env.cr.execute('select * from production_commande where num_commande = %s',(obj.num_commande,))
lines = self.env.cr.dictfetchall()
if len(lines) > 1:
raise Warning(_('Erreur!'),
_('Numéro commande existe déjà [ %s ].')% (obj.num_commande))
# test date_creation <= date_limit_cmd
if obj.date_creation > obj.date_limit_cmd:
raise Warning(_('Erreur!'),
_('Il faut que : Date création <= Date limite'))
#test si les lignes articles sont distinct
ids = []
for line in obj.article_commande_ids:
if line.article_id.id in ids:
raise Warning(_('Erreur!'),
_("Même article ajouté plusieurs fois : %s") % line.article_id.code_article)
ids.append(line.article_id.id)
#récupérer les lignes de commande
article_lines = self.env['article.commande.rel'].search([('commande_id', '=', obj.id)])
for l in article_lines:
#test date_creation <= date_limit (article) <= date_limit_cmd
if l.date_limit > obj.date_limit_cmd or l.date_limit < obj.date_creation:
raise Warning(_('Erreur!'),
_('Les dates des lignes articles doivent êtres dans [ %s , %s].\n %s qui est séléctionnée')% (obj.date_creation, obj.date_limit_cmd, l.date_limit))
#vérifier commande
if float(l.quantite) <= 0:
raise Warning(_('Erreur!'),
_('La quantité doit être supérieur à zero'))
return obj_id
#----------------------------------------------------------
# bon_reservation
#----------------------------------------------------------
class bon_reservation(models.Model):
@api.one
@api.depends('commande_id', 'article_id')
def _get_quantite_commande(self):
qte = 0
if self.commande_id and self.article_id:
self.quantite_commande = self.env['article.commande.rel'].search([('article_id', '=', self.article_id.id),
('commande_id', '=', self.commande_id.id)],
limit=1).quantite
@api.one
@api.depends('commande_id', 'article_id')
def _get_quantite_livre(self):
qte = 0
if self.commande_id and self.article_id:
bon_livraison_ids = self.env['bon.livraison'].search([('commande_id', '=', self.commande_id.id),
('article_id', '=', self.article_id.id)])
for bl in bon_livraison_ids:
qte += bl.quantite
self.quantite_livre = qte
@api.one
@api.depends('commande_id', 'article_id')
def _get_quantite_reserve(self):
qte = 0
if self.commande_id and self.article_id:
bon_reservation_ids = self.env['bon.reservation'].search([('commande_id', '=', self.commande_id.id),
('article_id', '=', self.article_id.id)])
for br in bon_reservation_ids:
qte += br.quantite
self.quantite_reserve = qte
@api.one
@api.depends('quantite_commande', 'quantite_reserve')
def _get_progress_reserve_commande(self):
if self.quantite_commande > 0 and self.quantite_reserve > 0:
self.progress_reserve_commande = self.quantite_reserve / self.quantite_commande * 100
else:
self.progress_reserve_commande = 0
_name = 'bon.reservation'
code_bon = fields.Char('Code bon :', readonly=True)
date_bon = fields.Date('Date bon', required=True, default= lambda *a:datetime.now().strftime('%Y-%m-%d'))
client_id = fields.Many2one('vente.client', 'Code client', ondelete='cascade', required=True, domain=[('id', 'in', [])])
commande_id = fields.Many2one('production.commande', 'Code commande', ondelete='cascade', required=True,
domain="[('state', '=', 'demarre')]" )
article_id = fields.Many2one('production.article', 'Code article', ondelete='cascade', required=True)
quantite = fields.Float('Quantité ', required=True)
#ajouter qte satisfaite=
remarque = fields.Text('Remarque')
quantite_commande = fields.Float(compute='_get_quantite_commande', string='Quantité commandé')
quantite_livre = fields.Float(compute='_get_quantite_livre', string='Quantité livré')
quantite_reserve = fields.Float(compute='_get_quantite_reserve', string='Quantité réservé')
stock_disponible = fields.Float('Stock disponible', related='article_id.stock_disponible')
stock_non_reserve = fields.Float('Stock non réservé', related='article_id.stock_non_reserve')
unite = fields.Selection([('u','U'),
('kg','Kg'),
('m2','m²'),
('m','m')], related='article_id.unite', readonly=True, string='Unite')
progress_reserve_commande = fields.Float(compute='_get_progress_reserve_commande', string='Progression quantité réservé')
@api.model
def fields_view_get(self, view_id=None, view_type=False, toolbar=False, submenu=False):
res = super(bon_reservation, self).fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
for field in res['fields']:
if field == 'article_id':
res['fields'][field]['domain'] = [('id','in', [])]
return res
@api.model
def create(self, values):
#test si quantite <= 0 on genere exception
if values['quantite'] <= 0:
raise Warning(_('Erreur!'),
_('La quantité doit étre supérieur strictement à zero ( %s )')% (values['quantite']))
#test si quantite à réservé > stock_non_réservé ==> exception
article_obj = self.env['production.article'].browse(values['article_id'])
if values['quantite'] > article_obj.stock_non_reserve:
raise Warning(_('Erreur!'),
_('La quantité à réservé est supérieur à la quantité stock disponible'))
#Trouver quantité commandé
values['quantite_commande'] = self.env['article.commande.rel'].search([('article_id', '=', values['article_id']),
('commande_id', '=', values['commande_id'])],
limit=1).quantite
#Calcul quantite réservé
bon_reservation_ids = self.env['bon.reservation'].search([('commande_id', '=', values['commande_id']),
('article_id', '=', values['article_id'])])
qte_reserve = 0
for b in bon_reservation_ids:
qte_reserve += b.quantite
#test si quantite réservé > quantite commandé ==> exception
qte_reserve_total = qte_reserve + values['quantite']
if qte_reserve_total > values['quantite_commande']:
raise Warning(_('Erreur!'),
_('La quantité à réservé est supérieur à la quantité demandé :\n \
(qantite_à_réservé : %s / quantite_demandé : %s)')% (qte_reserve_total, values['quantite_commande']))
#augmenter le stock_reserve
article_obj.stock_reserve += values['quantite']
#generer code sequence "code_bon"
values['code_bon'] = self.env['ir.sequence'].get('bon.reservation')
new_id = super(bon_reservation, self).create(values)
return new_id
@api.multi
def write(self, values):
nouv_article = values.get('article_id', None)
nouv_quantite = values.get('quantite', None)
ancien_article_obj = self.env['production.article'].browse(self.article_id.id)
if nouv_article:
nouv_article_obj = self.env['production.article'].browse(nouv_article)
#si il y a une nouvelle quantité
if nouv_quantite:
#test si quantite <= 0 on genere exception
if nouv_quantite <= 0:
raise Warning(_('Erreur!'),
_('La quantité doit étre supérieur strictement à zero ( %s )')% (nouv_quantite))
#test si quantite à réservé > stock_non_réservé ==> exception
if nouv_quantite > nouv_article_obj.stock_non_reserve:
raise Warning(_('Erreur!'),
_('La quantité à réservé est supérieur à la quantité stock disponible'))
#modifier le stock
ancien_article_obj.stock_reserve -= self.quantite
nouv_article_obj.stock_reserve += nouv_quantite
else:#meme quantite
#test si quantite à réservé > stock_non_réservé ==> exception
if self.quantite > nouv_article_obj.stock_non_reserve:
raise Warning(_('Erreur!'),
_('La quantité à réservé est supérieur à la quantité stock disponible'))
#modifier le stock
ancien_article_obj.stock_reserve -= self.quantite
nouv_article_obj.stock_reserve += self.quantite
else:
if nouv_quantite:
#test si quantite <= 0 on genere exception
if nouv_quantite <= 0:
raise Warning(_('Erreur!'),
_('La quantité doit étre supérieur strictement à zero ( %s )')% (nouv_quantite))
#test si quantite à réservé > stock_non_réservé ==> exception
if (nouv_quantite - self.quantite) > ancien_article_obj.stock_non_reserve:
raise Warning(_('Erreur!'),
_('La quantité à réservé est supérieur à la quantité stock disponible'))
#modifier le stock
ancien_article_obj.stock_reserve += nouv_quantite - self.quantite
obj_id=super(bon_reservation, self).write(values)
return obj_id
@api.multi
def unlink(self):
for rec in self:
article_obj = self.env['production.article'].browse(rec.article_id.id)
article_obj.stock_reserve -= rec.quantite
return super(bon_reservation, self).unlink()
@api.onchange('commande_id')
def onchange_commande_id(self):
res = {}
ids = []
default_commande = self._context.get('default_commande_id', False)
default_article = self._context.get('default_article_id', False)
if self.commande_id:
if default_article == False:
self.article_id = []
if default_commande:
if self.commande_id.id != default_commande:
self.article_id = []
#filter sur le champ article_id selon commande_id séléctionné
for ligne in self.commande_id.article_commande_ids:
ids.append(ligne.article_id.id)
#select client_id selon commande_id séléctionné
self.client_id = self.commande_id.client_id
else:#si commande_id vide
self.article_id = []
res['domain'] = {'article_id': [('id', 'in', ids)]}
return res
#----------------------------------------------------------
# bon_livraison
#----------------------------------------------------------
class bon_livraison(models.Model):
@api.one
@api.depends('commande_id', 'article_id')
def _get_quantite_commande(self):
qte = 0
if self.commande_id and self.article_id:
self.quantite_commande = self.env['article.commande.rel'].search([('article_id', '=', self.article_id.id),
('commande_id', '=', self.commande_id.id)],
limit=1).quantite
@api.one
@api.depends('commande_id', 'article_id')
def _get_quantite_livre(self):
qte = 0
if self.commande_id and self.article_id:
bon_livraison_ids = self.env['bon.livraison'].search([('commande_id', '=', self.commande_id.id),
('article_id', '=', self.article_id.id)])
qte = 0
for bl in bon_livraison_ids:
qte += bl.quantite
self.quantite_livre = qte
@api.one
@api.depends('commande_id', 'article_id')
def _get_quantite_reserve(self):
qte = 0
if self.commande_id and self.article_id:
bon_reservation_ids = self.env['bon.reservation'].search([('commande_id', '=', self.commande_id.id),
('article_id', '=', self.article_id.id)])
qte = 0
for br in bon_reservation_ids:
qte += br.quantite
self.quantite_reserve = qte
@api.one
@api.depends('quantite_commande', 'quantite_livre')
def _get_progress_livre_commande(self):
if self.quantite_commande > 0 and self.quantite_livre > 0:
self.progress_livre_commande = self.quantite_livre / self.quantite_commande * 100
else:
self.progress_livre_commande = 0
@api.one
@api.depends('quantite_commande', 'quantite_reserve')
def _get_progress_reserve_commande(self):
if self.quantite_commande > 0 and self.quantite_reserve > 0:
self.progress_reserve_commande = self.quantite_reserve / self.quantite_commande * 100
else:
self.progress_reserve_commande = 0
_name = 'bon.livraison'
code_bon = fields.Char('Code bon :', readonly=True)
date_bon = fields.Date('Date bon', required=True, default= lambda *a:datetime.now().strftime('%Y-%m-%d'))
client_id = fields.Many2one('vente.client', 'Code client', ondelete='cascade', required=True, domain=[('id', 'in', [])])
commande_id = fields.Many2one('production.commande', 'Code commande', ondelete='cascade', required=True,
domain="[('state', '=', 'demarre')]" )
article_id = fields.Many2one('production.article', 'Code article', ondelete='cascade', required=True)
quantite = fields.Float('Quantité', required=True)
quantite_commande = fields.Float(compute='_get_quantite_commande', string='Quantité commandé')
quantite_commande2 = fields.Float('Quantité commandé', related='quantite_commande')
quantite_livre = fields.Float(compute='_get_quantite_livre', string='Quantité livré')
quantite_reserve = fields.Float(compute='_get_quantite_reserve', string='Quantité réservé')
stock_disponible = fields.Float('Stock disponible', related='article_id.stock_disponible')
stock_non_reserve = fields.Float('Stock non réservé', related='article_id.stock_non_reserve')
unite = fields.Selection([('u','U'),
('kg','Kg'),
('m2','m²'),
('m','m')], related='article_id.unite', readonly=True, string='Unite')
progress_reserve_commande = fields.Float(compute='_get_progress_reserve_commande', string='Progression quantité réservé')
progress_livre_commande = fields.Float(compute='_get_progress_livre_commande', string='Progression quantité livré')
@api.model
def fields_view_get(self, view_id=None, view_type=False, toolbar=False, submenu=False):
res = super(bon_livraison, self).fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
for field in res['fields']:
if field == 'article_id':
res['fields'][field]['domain'] = [('id','in', [])]
return res
@api.model
def create(self, values):
#test si quantite <= 0 on genere exception
if values['quantite'] <= 0:
raise Warning(_('Erreur!'),
_('La quantité doit étre supérieur strictement à zero ( %s )')% (values['quantite']))
#Calcul quantite réservé
bon_reservation_ids = self.env['bon.reservation'].search([('commande_id', '=', values['commande_id']),
('article_id', '=', values['article_id'])])
qte_res = 0
for b in bon_reservation_ids:
qte_res += b.quantite
#test si aucun quantite réservé
if qte_res == 0:
raise Warning(_('Erreur!'),
_('Aucun quantité réservé dans le stock'))
#Calcul quantite livré
bon_livraison_ids = self.env['bon.livraison'].search([('commande_id', '=', values['commande_id']),
('article_id', '=', values['article_id'])])
qte_livre = 0
for b in bon_livraison_ids:
qte_livre += b.quantite
#test si quantite livre > quantite reserve ==> exception
qte_livre_total = qte_livre + values['quantite']
if qte_livre_total > qte_res:
raise Warning(_('Erreur!'),
_('La quantité à livrer est supérieur à la quantité réservé:\n \
(quantite_à_livré : %s / quantite_réservé : %s)')% (qte_livre_total, qte_res))
#generer code sequence "code_bon"
values['code_bon'] = self.env['ir.sequence'].get('bon.livraison')
# stock_reel -= qte
# stock_reserve -= qte
article_obj = self.env['production.article'].browse(values['article_id'])
if article_obj:
article_obj.stock_reel -= values['quantite']
article_obj.stock_reserve -= values['quantite']
#test stock minimale
article_obj.verifier_stock()
new_id = super(bon_livraison, self).create(values)
return new_id
@api.multi
def write(self, values):
commande = values.get('commande_id', None)
if commande == None:
commande = self.commande_id.id
nouv_article = values.get('article_id', None)
nouv_quantite = values.get('quantite', None)
ancien_article_obj = self.env['production.article'].browse(self.article_id.id)
if nouv_article:
#Calcul quantite réservé
bon_reservation_ids = self.env['bon.reservation'].search([('commande_id', '=', commande),
('article_id', '=', nouv_article)])
qte_res = 0
for b in bon_reservation_ids:
qte_res += b.quantite
#Calcul quantite livré
bon_livraison_ids = self.env['bon.livraison'].search([('commande_id', '=', commande),
('article_id', '=', nouv_article)])
qte_livre = 0
for b in bon_livraison_ids:
qte_livre += b.quantite
nouv_article_obj = self.env['production.article'].browse(nouv_article)
#si il y a une nouvelle quantité
if nouv_quantite:
#test si quantite <= 0 on genere exception
if nouv_quantite <= 0:
raise Warning(_('Erreur!'),
_('La quantité doit étre supérieur strictement à zero ( %s )')% (nouv_quantite))
#test si quantite livre > quantite reserve ==> exception
qte_livre_total = qte_livre + nouv_quantite
if qte_livre_total > qte_res:
raise Warning(_('Erreur!'),
_('La quantité à livrer est supérieur à la quantité réservé:\n \
(quantite_à_livré : %s / quantite_réservé : %s)')% (qte_livre_total, qte_res))
#modifier le stock
ancien_article_obj.stock_reel += self.quantite
ancien_article_obj.stock_reserve += self.quantite
nouv_article_obj.stock_reel -= nouv_quantite
nouv_article_obj.stock_reserve -= nouv_quantite
else:#meme quantite
#test si quantite livre > quantite reserve ==> exception
qte_livre_total = qte_livre + self.quantite
if qte_livre_total > qte_res:
raise Warning(_('Erreur!'),
_('La quantité à livrer est supérieur à la quantité réservé:\n \
(quantite_à_livré : %s / quantite_réservé : %s)')% (qte_livre_total, qte_res))
#modifier le stock
ancien_article_obj.stock_reel += self.quantite
ancien_article_obj.stock_reserve += self.quantite
nouv_article_obj.stock_reel -= self.quantite
nouv_article_obj.stock_reserve -= self.quantite
else:
if nouv_quantite:
#test si quantite <= 0 on genere exception
if nouv_quantite <= 0:
raise Warning(_('Erreur!'),
_('La quantité doit étre supérieur strictement à zero ( %s )')% (nouv_quantite))
#Calcul quantite réservé
bon_reservation_ids = self.env['bon.reservation'].search([('commande_id', '=', commande),
('article_id', '=', self.article_id.id)])
qte_res = 0
for b in bon_reservation_ids:
qte_res += b.quantite
#Calcul quantite livré
bon_livraison_ids = self.env['bon.livraison'].search([('commande_id', '=', commande),
('article_id', '=', self.article_id.id)])
qte_livre = 0
for b in bon_livraison_ids:
qte_livre += b.quantite
#test si quantite livre > quantite reserve ==> exception
if nouv_quantite > self.quantite:
qte_livre_total = qte_livre + nouv_quantite - self.quantite
if qte_livre_total > qte_res:
raise Warning(_('Erreur!'),
_('La quantité à livrer est supérieur à la quantité réservé:\n \
(quantite_à_livré : %s / quantite_réservé : %s)')% (qte_livre_total, qte_res))
#modifier le stock
ancien_article_obj.stock_reel += self.quantite - nouv_quantite
ancien_article_obj.stock_reserve += self.quantite - nouv_quantite
obj_id=super(bon_livraison,self).write(values)
return obj_id
@api.multi
def unlink(self):
for rec in self:
article_obj = self.env['production.article'].browse(rec.article_id.id)
article_obj.stock_reel += rec.quantite
article_obj.stock_reserve += rec.quantite
return super(bon_livraison, self).unlink()
@api.onchange('commande_id')
def onchange_commande_id(self):
res = {}
ids = []
default_commande = self._context.get('default_commande_id', False)
default_article = self._context.get('default_article_id', False)
if self.commande_id:
if default_article == False:
self.article_id = []
if default_commande:
if self.commande_id.id != default_commande:
self.article_id = []
#filter sur le champ article_id selon commande_id séléctionné
for ligne in self.commande_id.article_commande_ids:
ids.append(ligne.article_id.id)
#select client_id selon commande_id séléctionné
self.client_id = self.commande_id.client_id
else:#si commande_id vide
self.article_id = []
res['domain'] = {'article_id': [('id', 'in', ids)]}
return res
|
[
"kchaouanis26@gmail.com"
] |
kchaouanis26@gmail.com
|
30c051b3af9691fb1d09fd55b0cfda9348103df1
|
c1d7cb2489d19fca3bcb1627c2be62745af9c075
|
/Section-4/Source Code/iterator_example.py
|
0b71e1f52c7984d5d05a5d3c6adc8b71f92e7fb7
|
[] |
no_license
|
Yasser-Abbass/Python-From-Scratch
|
01c13726ff94fba8796b80eca5c4d02c93d4b658
|
4973535fa8252a4de0755290964f418e708b21fd
|
refs/heads/master
| 2022-12-11T10:05:27.826773
| 2020-09-10T21:09:55
| 2020-09-10T21:09:55
| 294,151,668
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 560
|
py
|
import sys
class Fib:
def __init__(self, num):
self.num = num
self.a = 0
self.b = 1
self.result = 0
def __iter__(self):
return self
def __next__(self):
if self.a < self.num:
self.result = self.a
self.a, self.b = self.b, self.b + self.a
return self.result
else:
raise StopIteration
if __name__ == "__main__":
x = int(sys.argv[1])
y = Fib(x)
results = []
for result in y:
results.append(result)
print(results)
|
[
"mongo.yasso@gmail.com"
] |
mongo.yasso@gmail.com
|
a1bc32ee0d27ba4faf285733b776292f4fc063d1
|
be3263f52e4c7b76d1d1d2afa81317967f8b4105
|
/coursea_course/mini-project_week-06.py
|
aff116685a5636f34a3c5d4f4513d5dcd15d9cbd
|
[] |
no_license
|
eastmanjoe/python_bucket
|
b1724ba035928ec6dc5364db33f9c23ea85c5fbb
|
0df4e77e415716dec9d059c592b287024b2cdac5
|
refs/heads/master
| 2021-01-17T08:54:15.115953
| 2018-08-09T15:39:13
| 2018-08-09T15:39:13
| 5,634,366
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,147
|
py
|
#!/usr/bin/env python
# URL for assignment template
# http://www.codeskulptor.org/#examples-blackjack_template.py
# URL for completed assignment
# http://www.codeskulptor.org/#user38_vITLjG598O_0.py
# http://www.codeskulptor.org/#user38_vITLjG598O_1.py
# http://www.codeskulptor.org/#user38_vITLjG598O_2.py
# http://www.codeskulptor.org/#user38_vITLjG598O_3.py
# Copy and paste below the line into CodeSkulptor
'''
- Card Class Testing: http://www.codeskulptor.org/#examples-card_template.py
- Hand Class Testing: http://www.codeskulptor.org/#examples-hand_template.py
- Draw Class Testing: http://www.codeskulptor.org/#examples-deck_template.py
'''
#------------------------------------------------------------------------------
'''
Mini-project - Week 06
Blackjack: The Game
'''
# Mini-project #6 - Blackjack
import simplegui
import random
DEBUG = False
# load card sprite - 936x384 - source: jfitz.com
CARD_SIZE = (72, 96)
CARD_CENTER = (36, 48)
card_images = simplegui.load_image("http://storage.googleapis.com/codeskulptor-assets/cards_jfitz.png")
CARD_BACK_SIZE = (72, 96)
CARD_BACK_CENTER = (36, 48)
card_back = simplegui.load_image("http://storage.googleapis.com/codeskulptor-assets/card_jfitz_back.png")
# initialize some useful global variables
in_play = False
outcome = ""
score = 0
# define globals for cards
SUITS = ('C', 'S', 'H', 'D')
RANKS = ('A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K')
VALUES = {'A':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, 'T':10, 'J':10, 'Q':10, 'K':10}
# define card class
class Card:
def __init__(self, suit, rank):
if (suit in SUITS) and (rank in RANKS):
self.suit = suit
self.rank = rank
else:
self.suit = None
self.rank = None
print "Invalid card: ", suit, rank
def __str__(self):
return self.suit + self.rank
def get_suit(self):
return self.suit
def get_rank(self):
return self.rank
def draw(self, canvas, pos):
card_loc = (CARD_CENTER[0] + CARD_SIZE[0] * RANKS.index(self.rank),
CARD_CENTER[1] + CARD_SIZE[1] * SUITS.index(self.suit)
)
canvas.draw_image(
card_images, card_loc, CARD_SIZE,
[pos[0] + CARD_CENTER[0], pos[1] + CARD_CENTER[1]], CARD_SIZE
)
# define hand class
class Hand:
def __init__(self):
self.hand = []
self.value = 0
self.ace_in_hand = False
def __str__(self):
card_string = ''
for i in range(len(self.hand)):
card_string += ' ' + str(self.hand[i])
return 'Hand contains' + card_string
def add_card(self, card):
self.hand.append(card)
def get_value(self):
# count aces as 1, if the hand has an ace, then add 10 to hand value if it doesn't bust
self.value = 0
for c in self.hand:
# print self.value, c.get_rank(), c.get_suit(), self.ace_in_hand
self.value += VALUES[c.get_rank()]
if c.get_rank() == 'A': self.ace_in_hand = True
if not self.ace_in_hand:
return self.value
else:
if self.value + 10 <= 21:
return self.value + 10
else:
return self.value
def draw(self, canvas, pos):
for c in self.hand:
c.draw(canvas, pos)
pos = [pos[0] + CARD_SIZE[0] + 5, pos[1]]
# define deck class
class Deck:
def __init__(self):
self.deck = [Card(s, r) for s in SUITS for r in RANKS]
def shuffle(self):
# shuffle the deck
random.shuffle(self.deck)
def deal_card(self):
# pull the card from the end of the deck
return self.deck.pop(-1)
def __str__(self):
deck_string = ''
for i in range(len(self.deck)):
deck_string += ' ' + str(self.deck[i])
return 'Deck contains' + deck_string
#define event handlers for buttons
def deal():
global outcome, in_play, dealer_hand, player_hand, game_deck, score
#if new cards dealt while game in play, player automatically loses
if in_play:
score -= 1
# always start a new game with a new deck
game_deck = Deck()
game_deck.shuffle()
player_hand = Hand()
dealer_hand = Hand()
player_hand.add_card(game_deck.deal_card())
dealer_hand.add_card(game_deck.deal_card())
player_hand.add_card(game_deck.deal_card())
dealer_hand.add_card(game_deck.deal_card())
if DEBUG:
print 'Dealer', dealer_hand
print 'Player', player_hand
outcome = 'Hit or Stand?'
in_play = True
def hit():
global outcome, in_play, score, player_hand
# if the hand is in play, hit the player
if in_play:
player_hand.add_card(game_deck.deal_card())
if player_hand.get_value() > 21:
outcome = "Player Busted"
in_play = False
score -= 1
if DEBUG:
print 'Dealer', dealer_hand
print 'Player', player_hand
print 'Outcome:', outcome
print 'Score:', score
def stand():
global outcome, in_play, score, player_hand
# if hand is in play, repeatedly hit dealer until his hand has value 17 or more
while in_play:
if dealer_hand.get_value() > 21:
outcome = "Dealer Busted"
in_play = False
score += 1
elif dealer_hand.get_value() >= 17:
in_play = False
if dealer_hand.get_value() >= player_hand.get_value():
outcome = "Dealer WINS !"
score -= 1
elif dealer_hand.get_value() < player_hand.get_value():
outcome = "Player WINS !"
score += 1
else:
dealer_hand.add_card(game_deck.deal_card())
if DEBUG:
print 'Dealer', dealer_hand
print 'Player', player_hand
print 'Outcome:', outcome
print 'Score:', score
# draw handler
def draw(canvas):
canvas.draw_text('BlackJack', [170, 75], 72, 'Black')
canvas.draw_text(outcome, [190, 125], 48, 'Red')
canvas.draw_text("Dealer's Hand", [170, 175], 32, 'Black')
dealer_hand.draw(canvas, [50, 200])
if in_play:
canvas.draw_image(
card_back, CARD_BACK_CENTER, CARD_BACK_SIZE,
[50 + CARD_BACK_CENTER[0], 200 + CARD_BACK_CENTER[1]],
CARD_BACK_SIZE
)
canvas.draw_text("Player's Hand", [170, 375], 32, 'Black')
player_hand.draw(canvas, [50, 400])
if not in_play:
canvas.draw_text('New Deal?', [50, 460], 48, 'Green')
canvas.draw_text('Score', [400, 550], 32, 'Black')
canvas.draw_text(str(score), [425, 590], 48, 'Black')
# initialization frame
frame = simplegui.create_frame("Blackjack", 600, 600)
frame.set_canvas_background("Green")
#create buttons and canvas callback
frame.add_button("Deal", deal, 200)
frame.add_button("Hit", hit, 200)
frame.add_button("Stand", stand, 200)
frame.set_draw_handler(draw)
# get things rolling
player_hand = Hand()
dealer_hand = Hand()
deal()
frame.start()
|
[
"eastman.joseph@gmail.com"
] |
eastman.joseph@gmail.com
|
d8756586064d46abf0b01f2f255a4408170c98ca
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/galex_j19485-4225/sdB_GALEX_J19485-4225_lc.py
|
ad5e79f01dd4bec1f067eebd2a8c3dee9507a2f5
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[297.137792,-42.429325], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_GALEX_J19485-4225 /sdB_GALEX_J19485-4225_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
2e6274aeecb4abc551445e441ed62ced40c33285
|
6ae058253aeff9ee9d4a166bab1c6898fb2fa042
|
/hackerrank/artificial-intelligence/document-classification/document-classification.py
|
d58f437f054a7f3600a34b2252cff4f05a152261
|
[
"MIT"
] |
permissive
|
gnuaha7/programming-problems
|
054136b840a8323ca6d5c20e579dc63e19543138
|
3ed43b306c19718f00bf77ed191e7a3f2ba8da57
|
refs/heads/master
| 2021-01-11T02:56:34.788550
| 2016-09-04T19:38:53
| 2016-09-04T19:38:53
| 70,878,741
| 0
| 0
| null | 2016-10-14T06:00:42
| 2016-10-14T06:00:42
| null |
UTF-8
|
Python
| false
| false
| 1,457
|
py
|
# https://www.hackerrank.com/challenges/document-classification
import sys
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.cross_validation import cross_val_score
def load_corpus(f, has_classes=True):
num_docs = int(f.readline())
corpus = ([], [])
for i in range(num_docs):
line = f.readline()
if has_classes:
i = line.find(' ')
class_ = int(line[:i])
doc = line[i+1:]
corpus[1].append(class_)
else:
doc = line
corpus[0].append(doc)
return corpus
def train_classifier(corpus):
model = Pipeline([
('tfidf', TfidfVectorizer(stop_words='english')),
('classifier', SGDClassifier(loss='log', penalty='none', n_iter=100)),
])
# scores = cross_val_score(model, corpus[0], corpus[1], cv=10, n_jobs=-1)
# print('CV score:', np.mean(scores))
model.fit(corpus[0], corpus[1])
return model
if __name__ == '__main__':
np.random.seed(sum(map(ord, 'document-classification')))
with open('trainingdata.txt') as f:
training_data = load_corpus(f, has_classes=True)
classifier = train_classifier(training_data)
test_data = load_corpus(sys.stdin, has_classes=False)
classes = classifier.predict(test_data[0])
print('\n'.join(str(class_) for class_ in classes))
|
[
"yasserglez@gmail.com"
] |
yasserglez@gmail.com
|
8cafbf132ca8eb8b86bedf45f6b404078bcc3054
|
1f127d9c25b2a3ff842019fffeaad4e8ff861ca7
|
/Articles/models.py
|
83b8b3da0b6e63692281463036ccb144aa4d55c0
|
[
"MIT"
] |
permissive
|
Hady-Eslam/Articles_Analyzing
|
b8caa49b5b21589e8ec5b101e5a52c92f747ff3e
|
885232db89cec88fc39e8260e7fde4241f4d7280
|
refs/heads/master
| 2021-07-09T01:43:18.097163
| 2020-07-21T09:42:26
| 2020-07-21T09:42:26
| 156,908,808
| 0
| 2
| null | 2020-07-21T09:42:28
| 2018-11-09T19:30:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,226
|
py
|
from django.db import models
from Python import init
class Posts(models.Model):
User_Email = models.CharField(max_length=init.Email_Len)
ArticleTitle = models.CharField(max_length=init.ArticleTitle_Len)
Article = models.CharField(max_length=init.Article_Len)
Tags = models.CharField(max_length=init.ArticleTags_Len, default='')
Deleted = models.BooleanField()
Date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return 'User Email Who Write The Post : ' + str(self.User_Email)
class LikesDisLikes(models.Model):
User_Email = models.CharField(max_length=110)
Post_id = models.CharField(max_length=11)
Status = models.BooleanField()
Date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return 'User : ' + str(self.User_Email) + ' Like Or DisLike Post : ' + str(self.Post_id)
class Comments(models.Model):
Post_id = models.IntegerField()
User_Email = models.CharField(max_length=init.Email_Len)
Comment = models.CharField(max_length=init.Comment_Len)
Date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return 'User : ' + str(self.User_Email) + ' Comment in Post : ' + str(self.Post_id)
|
[
"abdoaslam000@gmail.com"
] |
abdoaslam000@gmail.com
|
afa9a1d0944e4af29df98932dd9113870175e138
|
3ac0a169aa2a123e164f7434281bc9dd6373d341
|
/singleNumber.py
|
4a7b92101b0350685936c92368994f2cf80679bc
|
[] |
no_license
|
sfeng77/myleetcode
|
02a028b5ca5a0354e99b8fb758883902a768f410
|
a2841fdb624548fdc6ef430e23ca46f3300e0558
|
refs/heads/master
| 2021-01-23T02:06:37.569936
| 2017-04-21T20:31:06
| 2017-04-21T20:31:06
| 85,967,955
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 208
|
py
|
class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
s = 0
for v in nums:
s = s ^ v
return s
|
[
"sfeng77@gmail.com"
] |
sfeng77@gmail.com
|
36ff37b0f61328d72dfe6e5c252ff5a249ce364e
|
4c3d19edd4e7e6325fd131134a28f5e0e78e1221
|
/accounts/user/registration.py
|
510bfe1fe853619048311613240ddecd78d0acf9
|
[] |
no_license
|
carpancan/producthunt
|
1e8e6e0793e24d049a5a95f84341fe0d977bbc79
|
ee51d0d6bf26f34dd4849c26603e9d0c43e45c54
|
refs/heads/master
| 2023-05-03T23:52:16.889611
| 2022-02-10T10:49:13
| 2022-02-10T10:49:13
| 234,498,693
| 0
| 0
| null | 2023-04-21T20:44:59
| 2020-01-17T07:51:30
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,560
|
py
|
from django.contrib.auth.models import User
from .exceptions import CustomExceptions
class Register:
__new_user_dto = None
def create_user(self, new_user_dto):
self.__initialize(new_user_dto)
self.__check_if_user_exists()
return self.__persist_user()
def __initialize(self, new_user_dto):
self.__new_user_dto = new_user_dto
def __check_if_user_exists(self):
try:
User.objects.get(username=self.__new_user_dto.get_username())
raise CustomExceptions.UserExistsException('User already in use')
except User.DoesNotExist as exception:
return False
def __persist_user(self):
return User.objects.create_user(
username=self.__new_user_dto.get_username(),
password=self.__new_user_dto.get_password()
)
class NewUserDto:
__register_data = None
def __init__(self, request):
self.__initialize(request)
def __initialize(self, request):
self.__register_data = self.__prepare_register_data(request)
def __prepare_register_data(self, request):
data = {}
for item, value in request.items():
if 'csrfmiddlewaretoken' in item:
continue
data[item] = value
return data
def get_username(self):
return self.__register_data.get('username')
def get_password(self):
return self.__register_data.get('password')
def get_password_confirm(self):
return self.__register_data.get('password_confirm')
|
[
"carlos.paniagua@sngular.team"
] |
carlos.paniagua@sngular.team
|
f0350d9b871c404aac008aac028f9754e35939df
|
cdf14be1c33ad4dad24dfc4823443caf629e8787
|
/reversi.py
|
fc9053bf0d31a6c6d2fd296fabaac28f20d53bb8
|
[] |
no_license
|
fhalambek/reversi
|
541f63048d57e44bdf6510ca65184c889f3fcea4
|
47dad459b8bcd8fec62614865196a11a7e2d7e00
|
refs/heads/master
| 2021-09-07T15:06:00.695585
| 2018-02-24T15:49:01
| 2018-02-24T15:49:01
| 114,654,912
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35,116
|
py
|
from tkinter import *
from time import sleep
from threading import Thread
from bot import easy, greedy, weighted
from os import replace, name
from _tkinter import TclError
pilImported = False
try:
from PIL import Image, ImageTk
pilImported = True
except ImportError:
try:
try:
import pip
except ImportError:
print("pip and Pillow libraries are not installed. Install one of them to view transitions.\n" +
"Linux: sudo apt-get install python3-pip\n pip install Pillow\n" +
"Windows: pip is installed, find it among installation files, e.g.: Python/Python36-32/Scripts/")
raise ImportError
def install(package):
pip.main(['install', package])
install("Pillow")
from PIL import Image, ImageTk
pilImported = True
except PermissionError:
print("PIL not installed. Try running as administrator to view transitions.")
pilImported = False
except ImportError:
print()
pilImported = False
MARGIN_X = 10
MARGIN_Y = 10
WINDOW_DIMS = "1000x500"
WINDOW_BG = ["#00BB00", "#44FF44"]
CELL_BG = "#228822"
CELL_BG_HIGHLIGHT = "#44CC44"
CELL_SIZE = 56
DIRECTIONS = [((i//3)-1, (i%3)-1) for i in range(9)]
PLAYERS = [None, "black", "white"]
BOTS = [easy, greedy, weighted]
LANGUAGES = ("hrvatski", "English")
OPTION_BUTTON_WIDTH = 250
OPTION_BUTTON_HEIGHT = 50
blockInOut = (False, False) #program input i output
co = False #cells input
ci = False #cells output
botSpeed = .5
animationSpeed = .01
animationsEnabled = True
stopBot = True
pause = False
class Game(Frame): #glavni frame unutar kojega se sve nalazi. self.master je Tk()
def __init__(self):
Frame.__init__(self)
global IMAGES, backImage, DISKS
IMAGES = (PhotoImage(), PhotoImage(file = "res/drawables/disks/black74.png"), PhotoImage(file = "res/drawables/disks/white74.png"))
DISKS = []
for i in range(9):
DISKS.append([PhotoImage(file = "res/drawables/disks/black" + str(j) + str(i) + ".png") for j in range(7)])
DISKS.append([PhotoImage(file = "res/drawables/disks/white" + str(i) + "4.png") for i in range(7)])
backImage = PhotoImage(file = "res/drawables/back.png")
self.master.title("Reversi")
self.master.resizable(False, False)
self.master.geometry(WINDOW_DIMS)
SettingsView.loadSettings()
Game.loadStrings()
self.halves = [None, ImageView(self, position = 1, color = WINDOW_BG[0], hierarchy = (0, 0)), MenuView(master = self, position = 2, color = WINDOW_BG[1], hierarchy = (1, 0)), None]
self.pack(expand = YES, fill = BOTH)
self.master.protocol("WM_DELETE_WINDOW", lambda: closeWindow(self.master))
def switch(self, target, reverse, position): #poziva se kada trebaju mijenjati polovice ekrana
global stopBot, pause
pause = False
stopBot = True
cBlock()
if(blockInOut[0] or blockInOut[1]): return
block(o = False)
self.halves[3 - reverse * 3] = HIERARCHY[target[0]][target[1]][0](master = self, position = 3 - reverse * 3, color = WINDOW_BG[target[0]%2], hierarchy = target)
runnables = [self.frameSwapAnimationRight, self.frameSwapAnimation]
runnables[position + reverse > 1](reverse)
def frameSwapAnimation(self, reverse): #ako se mijenjaju obije polovice
def postProcessing(self, reverse):
flag = None
for i, j in enumerate(self.halves):
if(j):
again = j.replace(i + (reverse*2 - 1))
if(again[0]): flag = again[1]
del self.halves[self.halves.index(None)]
self.halves[-reverse].destroy()
self.halves[-reverse] = None
self.halves.insert(3 - 3*reverse, None)
block(0, 0)
if(flag and reverse):
self.switch(HIERARCHY[HIERARCHY[flag[0]][flag[1]][2][0]][HIERARCHY[flag[0]][flag[1]][2][1]][2], reverse, 2)
elif(flag):
self.switch((flag[0] + 1, flag[1]), 0, 2)
if(pilImported):
transition = TransitionImage(master = self,
position = 2 - reverse,
transparent = False,
hierarchy = self.halves[3*(1-reverse)].myHierarchy)
self.after(10, self.halves[2-reverse].move, 0, transition, postProcessing, self, reverse)
else:
postProcessing(self, reverse)
def frameSwapAnimationRight(self, reverse): #ako se mijenja desna polovica
def postProcessing(self, reverse):
again = self.halves[3].replace(2)
print(again)
self.halves[2].destroy()
del self.halves[2]
self.halves.append(None)
block(0, 0)
if(again[0]): self.switch((again[1][0] + 1, again[1][1]), 0, 2)
if(pilImported):
transition = TransitionImage(master = self,
position = 2,
transparent = True,
hierarchy = self.halves[3].myHierarchy,
hierarchy2 = self.halves[2].myHierarchy)
self.after(10, transition.setAlpha, 0, postProcessing, self, reverse)
else:
postProcessing(self, reverse)
def loadStrings():
if(language == "eng"):
Game.initializeStrings()
return
file = open("res/strings/" + language + ".txt", "r", encoding = "cp1250")
lines = file.readlines()
global stringsDict
for line in lines:
key, value = tuple(line.split(":")) #maknuti ovaj repr ako nam ne treba
stringsDict[key] = value[:-1].upper()
file.close()
def initializeStrings():
global stringsDict, selectBotText, mainMenuButtonText, modeMenuButtonText, botMenuButtonText
stringsDict = {"Stats":"STATS",
"Wins by player":"WINS BY PLAYER",
"Wins by color":"WINS BY COLOR",
"Disks":"DISKS",
"Main Menu":"MAIN MENU",
"Play":"PLAY",
"Mode":"MODE",
"Rules":"RULES",
"Settings":"SETTINGS",
"About":"ABOUT",
"Bot VS Bot":"BOT VS BOT",
"Human VS Bot":"HUMAN VS BOT",
"Human VS Human":"HUMAN VS HUMAN",
"Easy":"EASY",
"Medium":"MEDIUM",
"Hard":"HARD",
"Select bot 1":"SELECT BOT 1",
"Select bot 2":"SELECT BOT 2",
"Language":"LANGUAGE",
"Bot speed":"BOT SPEED",
"Animations":"ANIMATIONS",
"On":"ON",
"Off":"OFF",
"Pause":"PAUSE",
"Resume":"RESUME"}
selectBotText = ("Select bot 1", "Select bot 2")
mainMenuButtonText = ("Play", "Rules", "Settings", "About")
modeMenuButtonText = ("Bot VS Bot", "Human VS Bot", "Human VS Human")
botMenuButtonText = ("Easy", "Medium", "Hard")
class TransitionImage(Label): #overlay sa screenshotom za fade in efekat
def __init__(self, master, position, transparent, hierarchy, hierarchy2 = None):
Label.__init__(self, master, image = IMAGES[0], width = 500, height = 500, bd = 0, highlightthickness = 0, bg = WINDOW_BG[hierarchy[0]%2])
self.transparent = transparent
if(transparent):
self.lastImage = Image.open("res/drawables/ss/pic"+str(hierarchy2[0])+str(hierarchy2[1])+".png")
self.rawImage = Image.open("res/drawables/ss/pic"+str(hierarchy[0])+str(hierarchy[1])+".png")
self.sourceImage = self.rawImage.copy()
self.rawImage.close()
self.place(x = 500*(position - 1), y = 0)
def setAlpha(self, frameNumber, postProcess, master, reverse):
if(frameNumber > 50):
self.destroy()
postProcess(master, reverse)
return
if(self.transparent):
self.frameImage = ImageTk.PhotoImage(Image.blend(self.lastImage, self.sourceImage, frameNumber*1/50))
else:
self.sourceImage.putalpha(frameNumber * 5)
self.frameImage = ImageTk.PhotoImage(self.sourceImage)
self.config(image = self.frameImage)
master.after(10, self.setAlpha, frameNumber + 1, postProcess, master, reverse)
class Half(Frame): #ono sto je zajednicko svim tim frameovima/polovicama prozora
def __init__(self, master, position, color, hierarchy):
Frame.__init__(self, master, bg = color, padx = MARGIN_X, pady = MARGIN_Y)
self.place(x = (position-1) * 500, y = 0, width = 500, height = 500)
self.myHierarchy = hierarchy
self.myColor = color
self.myPosition = position
def replace(self, newPosition):
self.place(x = (newPosition - 1) * 500)
self.myPosition = newPosition
return (0,)
def move(self, frameNumber, t, pp, master, reverse):
if(frameNumber > 50):
t.setAlpha(0, pp, master, reverse)
return
self.place(x = 500*(1 - reverse) + (2*reverse - 1)*frameNumber*10, y = 0)
self.lift(t)
master.after(10, self.move, frameNumber + 1, t, pp, master, reverse)
class ImageView(Half): #na pocetnom zaslonu s lijeve strane.. tu bi mogla doci neka zgodna slika
def __init__(self, master, position, color, hierarchy):
Half.__init__(self, master, position, color, hierarchy)
self.picture = PhotoImage(file = "front.png")
self.label = Label(self, image = self.picture)
self.label.place(x = 0, y = 0, width = 480, height = 480)
class TextView(Half): #pravila i o igri
def __init__(self, master, position, color, hierarchy):
Half.__init__(self, master, position, color, hierarchy)
self.actionBar = ActionBar(self, HIERARCHY[hierarchy[0]][hierarchy[1]][2], color)
file = open("res/strings/"+HIERARCHY[hierarchy[0]][hierarchy[1]][1] + "_" + language + ".txt", "r", encoding = "cp1250")
text = ""
for line in file.readlines():
text += line
self.textMsg = Message(self, text = text, bg = color, justify = CENTER, anchor = CENTER)
self.textMsg.pack()
file.close()
class SettingsView(Half): #onaj frame s postavkama
def __init__(self, master, position, color, hierarchy):
Half.__init__(self, master, position, color, hierarchy)
self.actionBar = ActionBar(self, HIERARCHY[hierarchy[0]][hierarchy[1]][2], color)
self.languageLabel = Label(self, text = stringsDict["Language"], bg = color, highlightthickness = 0)
self.languageLabel.pack(pady = (50, 0))
var = StringVar()
var.set(language)
self.languageOM = OptionMenu(self, var, *LANGUAGES, command = self.omCommand)
self.languageOM.config(bg = color, highlightthickness = 0, width = 15)
self.languageOM.pack()
self.botSpeedLabel = Label(self, text = stringsDict["Bot speed"], bg = color, highlightthickness = 0)
self.botSpeedLabel.pack(pady = (20, 0))
self.botSpeedScale = Scale(self, from_ = 1,
to = 100,
orient = HORIZONTAL,
command = SettingsView.sCommand,
length = OPTION_BUTTON_WIDTH,
bg = color,
troughcolor = WINDOW_BG[1],
highlightthickness = 0,
cursor = "hand2")
self.botSpeedScale.set((1-botSpeed)*100)
self.botSpeedScale.pack()
self.animationsLabel = Label(self, text = stringsDict["Animations"], bg = color, highlightthickness = 0)
self.animationsLabel.pack(pady = (20, 0))
v = IntVar()
v.set(animationsEnabled)
self.rb = []
for i, j in enumerate(("Off", "On")):
self.rb.append(Radiobutton(self,
text = stringsDict[j],
variable = v,
value = i,
command = lambda: SettingsView.rbCommand(v.get()),
indicatoron = 0,
bg = color,
width = 20,
selectcolor = WINDOW_BG[1]))
self.rb[i].pack()
def rbCommand(var): #kad se stisne na radiobutton
global animationsEnabled
animationsEnabled = var
SettingsView.saveSettings()
def sCommand(var): #kad se pomakne onaj slider
global botSpeed
botSpeed = 1-(int(var)/100)
SettingsView.saveSettings()
def refreshLanguage(self):
self.actionBar.refreshLanguage()
for i, j in zip(("Language", "Bot speed", "Animations", "Off", "On"), (self.languageLabel, self.botSpeedLabel, self.animationsLabel, self.rb[0], self.rb[1])):
j.config(text = stringsDict[i])
def omCommand(self, var): #kad se u optionMenuu promijeni jezik
global language
language = var[:3].lower()
Game.loadStrings()
for i in self.master.halves:
if i: i.refreshLanguage()
SettingsView.saveSettings()
def loadSettings():
file = open("Preferences/Settings.txt", "r")
global language, botSpeed, animationsEnabled
for i in file.readlines():
key, value = tuple(i.split(":"))
if(key == "language"):
language = value[:-1]
elif(key == "botSpeed"):
botSpeed = float(value[:-1])
elif(key == "animations"):
animationsEnabled = ("On" in value)
file.close()
return
def saveSettings():
option = "Off\n"
if(animationsEnabled):
option = "On\n"
text = "language:" + language + "\nbotSpeed:" + str(botSpeed) + "\nanimations:" + option
file = open("Preferences/SettingsTemp.txt","w")
file.write(text)
file.close()
replace("Preferences/SettingsTemp.txt", "Preferences/Settings.txt")
return
class MenuView(Half): #oni frameovi s nekoliko gumba za odabir
def __init__(self, master, position, color, hierarchy):
Half.__init__(self, master, position, color, hierarchy)
self.actionBar = ActionBar(self, HIERARCHY[hierarchy[0]][hierarchy[1]][2], color)
self.optionButtons = []
for i in range(len(HIERARCHY[hierarchy[0]][hierarchy[1]][-1])):
self.optionButtons.append(Button(self, text = stringsDict[HIERARCHY[hierarchy[0]][hierarchy[1]][-1][i]], highlightthickness = 0, font = ("Verdana", 14)))
self.optionButtons[i].targetFrame = HIERARCHY[hierarchy[0]][hierarchy[1]][3][i]
self.optionButtons[i].bind("<Button-1>", self.buttonClick)
self.optionButtons[i].place(x = (500 - 2*MARGIN_X) // 2, y = int((i+1)*(500 - 2*MARGIN_Y)/(len(HIERARCHY[hierarchy[0]][hierarchy[1]][-1])+1)), width = OPTION_BUTTON_WIDTH, height = OPTION_BUTTON_HEIGHT, anchor = CENTER)
def replace(self, newPosition):
super(MenuView, self).replace(newPosition)
self.actionBar.enableButton(not(newPosition - 1), 0)
return (0,)
def buttonClick(self, event):
if(blockInOut[0] or blockInOut[1]):
return
if(self.myHierarchy[0] < 3):
PM.bots = [None, 8, 8]
for i, j in enumerate(botMenuButtonText):
if(event.widget.cget("text") == stringsDict[j]):
PM.bots[-1] = 8
PM.bots[selectBotText.index(HIERARCHY[self.myHierarchy[0]][self.myHierarchy[1]][1]) + 1] = i
self.master.switch(event.widget.targetFrame, 0, self.myPosition)
def refreshLanguage(self):
self.actionBar.refreshLanguage()
for i in range(len(self.optionButtons)):
self.optionButtons[i].config(text = stringsDict[HIERARCHY[self.myHierarchy[0]][self.myHierarchy[1]][-1][i]])
class GameView(Half): #prikazuje plocu
stats = None
def __init__(self, master, position, color, hierarchy):
Half.__init__(self, master, position, color, hierarchy)
global table, winCount
table = []
winCount = [[0, 0, 0], [0, 0, 0]]
for i in range(8):
table.append([])
for j in range(8):
table[i].append(Cell(self, (i, j), 0))
def replace(self, newPosition):
super(GameView, self).replace(newPosition)
if(newPosition == 2):
global stopBot
resetBoard()
stopBot = False
PM.startGame()
return (0,)
def setStats(stats):
GameView.stats = stats
class StatsView(Half): #ono s lijeve strane ploce sto prikazuje info o igri
def __init__(self, master, position, color, hierarchy):
Half.__init__(self, master, position, color, hierarchy)
self.actionBar = ActionBar(self, HIERARCHY[hierarchy[0]][hierarchy[1]][2], color)
self.turnFrame = Frame(self, bg = color)
self.labels = [None, TurnLabel(self.turnFrame, 1), TurnLabel(self.turnFrame, -1)]
self.turnFrame.pack(side = TOP, expand = YES, fill = X)
self.charts = [ChartFrame(self, i, color) for i in (0, 1, 2)]
self.pauseButton = Button(self, text = stringsDict["Pause"].upper(), command = self.pause, highlightthickness = 0, height = 1, width = 10, font = ("Verdana", 15))
self.pauseButton.place(x = 240, y = 120, anchor = CENTER)
GameView.setStats(self)
def pause(self):
global pause
pause = not(pause)
text = ("Pause", "Resume")
self.pauseButton.config(text = stringsDict[text[pause]])
def upDate(self):
if(blockInOut[1]):
return
try:
for i in (1, -1): self.labels[i].upDate()
for i in self.charts: i.upDate()
except(TclError, RuntimeError):
return
def replace(self, newPosition): #sto se dogodi kad mijenja poziciju na ekranu
super(StatsView, self).replace(newPosition)
self.actionBar.enableButton(not(newPosition - 1), 0)
return (newPosition == 2, self.myHierarchy)
Game.initializeStrings()
HIERARCHY = (((ImageView, "", (0, 0), (1, 0)),),
((MenuView, "Main Menu", (0, 0), ((2, 0), (2, 1), (2, 2), (2, 3)), mainMenuButtonText),),
((MenuView, "Mode", (1, 0), ((3, 0), (3, 1), (3, 2)), modeMenuButtonText), (TextView, "Rules", (1, 0)), (SettingsView, "Settings", (1, 0)), (TextView, "About", (1, 0))),
((MenuView, "Select bot 1", (2, 0), ((4, 0),)*4, botMenuButtonText), (MenuView, "Select bot 1", (2, 0), ((4, 1),)*4, botMenuButtonText), (StatsView, "Stats", (2, 0), (4, 2))),
((MenuView, "Select bot 2", (3, 0), ((5, 0),)*4, botMenuButtonText), (StatsView, "Stats", (3, 1), (5, 1)), (GameView, "Human VS Human", (3, 2))),
((StatsView, "Stats", (4, 0), (6, 0)), (GameView, "Human VS Bot", (4, 1))),
((GameView, "Bot VS Bot", (5, 0)),))#struktura programa (klasa, naslov, kamoNazad[, kamoNaprijed, stringoviZaGumbePremaNaprijed])
class PM(object): #brine o tijeku igre, koji su botovi, tko je na redu itd.
player = 1
bots = [None, 8, 8]
bot = 8
seatChanged = 1
def switchPlayer(newPlayer):
PM.player = newPlayer
PM.bot = PM.bots[newPlayer*PM.seatChanged]
def startGame():
if(PM.bots != [None, 8, 8]):
myThread = Thread(target = PM.runnable)
myThread.start()
def runnable():
while(not(blockInOut[1] or stopBot)):
if(botSpeed or animationsEnabled):
sleep(botSpeed + animationsEnabled/200)
if(blockInOut[0] or ci or pause):
continue
if(PM.bot != 8 and not(blockInOut[1]) and len(Cell.availableCoordinates[PM.player])): #kompleksnost len je O(1), pa se mogu razbacivati ovako njome
x, y = BOTS[PM.bot](table, Cell.availableCoordinates[PM.player], PM.player, -PM.player)
cellPress(x, y)
def changeSeats():
PM.seatChanged *= -1
class Cell(Button):
availableCoordinates = [[],[],[]]
def __init__(self, master, coordinates, fill):
Button.__init__(self,
master = master,
image = IMAGES[fill],
width = CELL_SIZE,
height = CELL_SIZE,
bg = CELL_BG, bd = 1,
highlightthickness = name == "nt")
self.bind("<Button>", cellPress)
self.coordinates = coordinates
self.reset()
self.grid(row = coordinates[0], column = coordinates[1])
def switch(self, fill): #mijenja boju polja
self.fill = fill
try:
self.config(image = IMAGES[fill])
except(TclError, RuntimeError):
return
def reset(self):
self.availableCoordinates = [[],[],[]] #ovo mozda i ne treba, dosta memorije uzima - tu je za svaki slucaj
self.lenAC = [[0] * 9, [0] * 9, [0] * 9]
if(self.coordinates == (3, 3) or self.coordinates == (4, 4)):
self.switch(-1)
elif(self.coordinates == (3, 4) or self.coordinates == (4, 3)):
self.switch(1)
else:
self.switch(0)
def p(x, y): #pazi na rub ploce
if (x >= 0 and y >= 0 and x < 8 and y < 8):
return True
return False
def block(i = True, o = True): #blokira sav IO programa
global blockInOut
blockInOut = (i, o)
def cBlock(i = True, o = True): #blokira klikanje i bojanje celija
global ci, co
ci = i
co = o
def closeWindow(window): #poziva se kad netko stisne X gumb gore desno, gasi program
cBlock()
block()
for i in range(25):
window.attributes("-alpha", 1-i/25)
sleep(.03)
window.destroy()
def getAvailableCoordinates(): #totalno neoptimizirana funkcija, ali zanemarivo - slobodno optimiziraj ako ti se da xD
Cell.availableCoordinates = [[],[],[]] #resetiraj matricu u kojoj su spremljena ona highlightana polja
for i in range(8):
for j in range(8): #za svako polje u tablici
table[i][j].availableCoordinates = [[],[],[]] #resetiraj polja koja pritisak na to polje mijenja
table[i][j].lenAC = [[0] * 9, [0] * 9, [0] * 9] #resetiraj duljine po smjerovima
if(table[i][j].fill == 0): #ako je polje prazno
for r, s in DIRECTIONS: #za svaki smjer
if p(i + r, j + s) and table[i + r][j + s].fill: #ako nije preko ruba to polje i nije prazno
for k in (-1, 1): #za obije boje
temp = len(table[i][j].availableCoordinates[k]) #duljina trenutne liste s poljima koja ce se obojati pritiskom na trenutno polje
table[i][j].availableCoordinates[k] += getCellsToColor((r, s), (i, j), k) #ubaci u tu listu polja koja ce se obojati za ovo polje, ovaj smjer i ovu boju
table[i][j].lenAC[k][DIRECTIONS.index((r, s))] = len(table[i][j].availableCoordinates[k]) - temp #broj polja u ovom smjeru
if(len(table[i][j].availableCoordinates[k]) > 0 and not((i, j) in Cell.availableCoordinates[k])): #ako postoji nesto sto ce se obojati pritiskom na ovo polje
Cell.availableCoordinates[k].append((i, j)) #dodaj polje u listu polja za highlightanje
def markAvailableCoordinates(mark = True): #oznacava polja na koja se moze stati
if mark: #mark znaci oznacujemo li nove ili oDznacujemo stare
bgd = CELL_BG_HIGHLIGHT
else:
bgd = CELL_BG
if(botSpeed < 0.125 and PM.bot != 8): return
try:
for i in Cell.availableCoordinates[PM.player]:
table[i[0]][i[1]].config(bg = bgd)
if not(mark):
for i in Cell.availableCoordinates[-PM.player]:
table[i[0]][i[1]].config(bg = bgd)
except(TclError, RuntimeError):
return
def resetBoard(): #prije pocetka svake partije resetira/postavlja plocu
def createDisksAnimation():
cBlock(o = False)
try:
createDisks(((3, 3, -1), (4, 4, -1), (3, 4, 1), (4, 3, 1)))
getAvailableCoordinates()
markAvailableCoordinates()
GameView.stats.upDate()
except(TclError, RuntimeError):
return
cBlock(False, False)
PM.changeSeats()
PM.switchPlayer(1)
for i in range(8):
for j in range(8):
table[i][j].reset()
if(animationsEnabled and name != "posix"):
cdaThread = Thread(target = createDisksAnimation)
cdaThread.start()
else:
createDisksAnimation()
def getScore(): #ovo broji diskove na ploci
l = [0, 0, 0]
for i in range(8):
for j in range(8):
l[table[i][j].fill] += 1
return l
def getFrame(fn, p, d): #ovo se koristi u cellAnimation - daje sliku za neki frame u animaciji
if(fn == 7):
return IMAGES[p]
else:
if(d == 4 and (p == -1 or p == 2)):
return DISKS[-1][fn]
return DISKS[(d-4)*p+4][(fn-3)*p+3]
def gameOver(): #kad je jedna partija gotova, ovo biljezi tko je pobjedio i poziva funkciju za resetirati plocu
global winCount
score = getScore()
if(score[-1] > score[1]):
winColor = -1
elif(score[1] > score[-1]):
winColor = 1
else:
winColor = 0
winCount[0][winColor] += 1
winCount[1][winColor * PM.seatChanged] += 1
resetBoard()
def cellPress(event, y = 8): #kad se klikne na neko polje
if(y == 8):
coordinates = event.widget.coordinates
else: coordinates = event, y
if(coordinates in Cell.availableCoordinates[PM.player] and not(blockInOut[0] or blockInOut[1] or (y == 8 and PM.bot != 8) or ci or co or pause)):
cBlock(o = False)
markAvailableCoordinates(False)
if(animationsEnabled):
cellAnimationThread = Thread(target = cellAnimation, args = (coordinates,
table[coordinates[0]][coordinates[1]].availableCoordinates[PM.player],
table[coordinates[0]][coordinates[1]].lenAC[PM.player]))
cellAnimationThread.start()
else:
table[coordinates[0]][coordinates[1]].switch(PM.player)
for i in range(max(table[coordinates[0]][coordinates[1]].lenAC[PM.player])):
for j in range(9):
if(i < table[coordinates[0]][coordinates[1]].lenAC[PM.player][j] and not(blockInOut[1] or co)):
temp = [coordinates[k] + DIRECTIONS[j][k]*(i+1) for k in (0, 1)]
table[temp[0]][temp[1]].switch(PM.player)
PM.switchPlayer(-PM.player)
getAvailableCoordinates()
markAvailableCoordinates()
GameView.stats.upDate()
if(not(len(Cell.availableCoordinates[PM.player]) or len(Cell.availableCoordinates[-PM.player]))):
gameOver()
return
elif(not(len(Cell.availableCoordinates[PM.player]))):
PM.switchPlayer(-PM.player)
markAvailableCoordinates()
cBlock(False, False)
def createDisks(diskInfo): #postavlja novi disk na plocu
if(animationsEnabled):
for i in range(len(DISKS[0])):
for j in diskInfo:
if(not(blockInOut[1] or co)):
table[j[0]][j[1]].config(image = getFrame(fn = i, p = j[2], d = 4))
sleep(.03)
if(blockInOut[1] or co): return
for i in diskInfo:
table[i[0]][i[1]].switch(i[2])
def cellAnimation(coordinates, cellsToColor, directionLengths): #animacija zamjene diskova
createDisks(((coordinates[0], coordinates[1], PM.player),))
for i in range(max(directionLengths)):
for frameNumber in range(len(DISKS[0])+1):
for j in range(9):
if(i < directionLengths[j] and not(blockInOut[1] or co)):
temp = [coordinates[k] + DIRECTIONS[j][k]*(i+1) for k in (0, 1)]
try:
table[temp[0]][temp[1]].config(image = getFrame(fn = frameNumber, p = PM.player, d = j))
except(TclError, RuntimeError):
return
if(frameNumber == 7):
table[temp[0]][temp[1]].fill = PM.player
sleep(animationSpeed)
PM.switchPlayer(-PM.player)
getAvailableCoordinates()
markAvailableCoordinates()
GameView.stats.upDate()
if(not(len(Cell.availableCoordinates[PM.player]) or len(Cell.availableCoordinates[-PM.player]))):
gameOver()
return
elif(not(len(Cell.availableCoordinates[PM.player]))):
PM.switchPlayer(-PM.player)
markAvailableCoordinates()
cBlock(False, False)
def getCellsToColor(direction, coordinates, fill): #vraca listu polja koja ce se obojati pritiskom na odredeno polje
if(direction == (0, 0)):
return 0
cR, cC = coordinates[0] + direction[0], coordinates[1] + direction[1]
c = []
while (p(cR, cC) and table[cR][cC].fill == fill * -1):
c.append((cR, cC,))
cR += direction[0]
cC += direction[1]
if(p(cR, cC) and table[cR][cC].fill):
return c
return []
class ActionBar(Frame): #onaj dio na vrhu MenuViewa i StatsViewa gdje pise naslov i gdje je gumb za nazad
def __init__(self, master, targetFrame, color):
Frame.__init__(self, master)
self.config(bg = color)
self.backButton = Label(self, bg = color, image = backImage, highlightthickness = 0, relief = FLAT, bd = 0)
self.backButton.bind("<Button-1>", lambda f: self.master.master.switch(targetFrame, 1, self.master.myPosition))
self.nameLabel = Label(self, text = stringsDict[HIERARCHY[self.master.myHierarchy[0]][self.master.myHierarchy[1]][1]], bg = color)
self.nameLabel.pack(side = TOP, expand = NO, fill = Y)
self.place(x = 0, y = 0, width = 480, height = 480)
self.myColor = color
self.backEnabled = False
if(pilImported):
self.rawImage = Image.open("res/drawables/back" + str(WINDOW_BG.index(color)) + ".png")
self.sourceImage = self.rawImage.copy()
self.rawImage.close()
def enableButton(self, enabled, frameNumber):
if(pilImported and (enabled != self.backEnabled)):
if(frameNumber > 25):
self.backEnabled = enabled
if(not(enabled)):
self.backButton.place_forget()
return
self.sourceImage.putalpha((25*(1-enabled) + (2*enabled-1)*frameNumber)*10)
self.im = ImageTk.PhotoImage(self.sourceImage)
self.backButton.config(image = self.im)
if(frameNumber == 0):
self.backButton.place(x = 0, y = 0, height = 48, width = 48)
self.after(10, self.enableButton, enabled, frameNumber+1)
else:
if(enabled):
self.backButton.place(x = 0, y = 0)
else:
self.backButton.place_forget()
def refreshLanguage(self):
self.nameLabel.config(text = stringsDict[HIERARCHY[self.master.myHierarchy[0]][self.master.myHierarchy[1]][1]])
class TurnLabel(Label): #oni kvadrati crni i bijeli koji oznacuju tko je na redu
def __init__(self, master, color):
Label.__init__(self, master, bg = PLAYERS[color], image = IMAGES[0],
width = 50, height = 50, highlightcolor = PLAYERS[color], highlightbackground = PLAYERS[color])
if (color > 0):
self.pack(side = LEFT)
else:
self.pack(side = RIGHT)
self.myColor = color
def upDate(self):
if(co or blockInOut[1]):
return
if(self.myColor == PM.player):
self.config(highlightthickness = 5)
self.pack(padx = 5, pady = 5)
else:
self.config(highlightthickness = 0)
self.pack(padx = 10, pady = 10)
class ChartFrame(Frame): #dio StatsViewa gdje su chartovi
chartNames = ["Disks", "Wins by color", "Wins by player"]
def __init__(self, master, order, color):
Frame.__init__(self, master, bg = color)
self.chartName = Label(self, text = stringsDict[ChartFrame.chartNames[order]], bg = color, highlightthickness = 0)
self.chart = Frame(self, height = 50, bg = "white")
self.blackLabel = Label(self.chart, bg = "black", bd = 0, highlightthickness = 0, height = 50, width = 0, image = IMAGES[0])
self.blackLabel.pack(side = LEFT, fill = Y)
self.chartName.pack(side = TOP, fill = X, anchor = W)
self.chart.pack(side = TOP, fill = X)
self.pack(side = TOP, fill = X)
self.order = order
def upDate(self):
if (not(self.order)):
score = getScore()
else:
score = winCount[self.order - 1]
try:
self.blackLabel.config(width = int((500 - 2*MARGIN_X)*score[1]/(score[-1]+score[1])), bg = "black")
except(ZeroDivisionError):
self.blackLabel.config(width = 0, bg = "white")
Game().mainloop()
'''
print()
print(table[event.widget.grid_info()["row"]][event.widget.grid_info()["column"]].availableCoordinates[player])
print(table[event.widget.grid_info()["row"]][event.widget.grid_info()["column"]].lenAC[player])
if __name__ == "__main__":
root =
Game()
root.mainloop()
dok drzis polje ono se mrvicu smanji
kad pustis pretvori se u tvoju boju, a onda se ostala polja mijenjaju sirenje boje
'''
#koristim None, 1 i -1 radi jednostavnosti - da manje bugova ima
|
[
"noreply@github.com"
] |
fhalambek.noreply@github.com
|
22aa0e448f5799c401421eb60b5b4237dab843a8
|
e655fafdf3a675d917135f333d6fa3d7fb9c59f6
|
/Exercícios/ex047.py
|
d29906501bb3ed21ba27de51cd130cdf17b1a1f1
|
[] |
no_license
|
Sabrina-AP/python-curso_em_video
|
7b595242a174f10e7e903cb73c8ea395d01ba609
|
43fef2a0aa67d2a67181bc5b73fb3b5de163b1d9
|
refs/heads/main
| 2023-03-25T22:52:42.811127
| 2021-03-23T17:49:44
| 2021-03-23T17:49:44
| 350,805,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 105
|
py
|
#contagem de pares
for i in range(1,51):
if i%2==0:
print(i, end=' ')
print('Acabou')
|
[
"noreply@github.com"
] |
Sabrina-AP.noreply@github.com
|
ecc65f736adcdf7ef2646a8f02c86e70de9ca226
|
1528b9eff25b03adf3c24229d08555aeaea5ac2b
|
/Client/Libraries/__init__.py
|
eb5d1888bc5f5b22e95fdccb22debe6f4fd1fcf0
|
[] |
no_license
|
Erez-Atir/Kahoot
|
b0cb40c0fefa86af6668a00be48807314955aa05
|
7fe5ba5df7e1ce0a7d10a6e84049b94043e2d42b
|
refs/heads/master
| 2021-08-17T02:13:32.473433
| 2020-05-24T19:39:58
| 2020-05-24T19:39:58
| 184,925,432
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,221
|
py
|
IP = None
my_socket = None
import os
import sys
sys.path.insert(0, os.getcwd()+'/files')
sys.dont_write_bytecode = True
import ServerDitection
import socket
import pygame
import textbox
import subprocess
RED = (204, 0, 0)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
GREEN = (0, 153, 0)
BLUE = (53, 119, 252)
PURPLE = (176, 71, 246)
GREY = (85, 77, 77)
ORANGE = (255, 181, 30)
def resfix(x=None, y=None):
"""
:param x: the x coordinate or a Pygame image. If sent None means only y need a conversion
:param y: the y coordinate. can be not sent for only x conversion.
:return: The new coordinates on the new screen with the same proportions. Tuple for (x,y). int for only one number.
"""
global WIDTH, HEIGHT
if x is not None:
if type(x) == type(42):
if y is not None:
return int(x/1500.*WIDTH), int(y/800.*HEIGHT)
return int(x/1500.*WIDTH)
else:
sizee = x.get_rect().size
return
if y is not None:
return int(y/800.*HEIGHT)
return None
if True:
IP = ServerDitection.server_scout()
if IP:
my_socket = socket.socket()
my_socket.connect((IP, 23))
else:
screen = pygame.display.set_mode((1500, 800))
WIDTH, HEIGHT = pygame.display.Info().current_w, pygame.display.Info().current_h
pygame.display.set_caption("Kaboot")
screen.fill(PURPLE)
a = textbox.OutputBox(screen=screen, text="No Game\nRunning!", size=resfix(650, 750), place=resfix(825, 0), color=None,
border_width=0, border_color=None, text_color=RED, font="files\\montserrat\\Montserrat-Black.otf")
b = textbox.OutputBox(screen=screen, text="Ask your teacher to run a game and then try again", size=resfix(650, 750), place=resfix(825, 0), color=None,
border_width=0, border_color=None, text_color=BLACK, font="files\\montserrat\\Montserrat-Black.otf")
c = textbox.OutputBox(screen=screen, text=" EXIT ", size=resfix(310, 100), place=resfix((825+(825+650))/2-310/2, 600), color=WHITE,
border_width=0, border_color=BLACK, text_color=BLACK, font="files\\montserrat\\Montserrat-Black.otf")
img = pygame.transform.scale(pygame.image.load("files\\sadog.jpg"), (int(WIDTH*1.066), HEIGHT))
finish = False
while not finish:
mouse = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:#user presses the X
exit()
if event.type == pygame.KEYDOWN:
# If pressed key is ESC quit program
if event.key == pygame.K_ESCAPE:
exit()
if resfix((825+(825+650))/2-310/2+310) > mouse[0] > resfix((825+(825+650))/2-310/2) and resfix(None, 600+100) > mouse[1] > resfix(None, 600):
c.border_width = 5
if pygame.mouse.get_pressed()[0]:
sys.exit()
else:
c.border_width = 0
screen.blit(img, (0, 0))
a.draw()
b.draw()
c.draw()
pygame.display.flip()
|
[
"50252440+Erez-Atir@users.noreply.github.com"
] |
50252440+Erez-Atir@users.noreply.github.com
|
d8b576d9d8573f3e6f58e5e34ae445177ff8f207
|
62c9d736470c3f535de4fe5be56fea3334a081c4
|
/scripts/download-data.py
|
9ab361bf04e11aa1c721bea228c4e375dc378f4c
|
[] |
no_license
|
abachman/interactive-spaces-1
|
646f002b8136d054224c555cfb96cfa64b61babf
|
494e8dd82dc3848ad01a583453bd4f94aaff4508
|
refs/heads/master
| 2022-03-17T06:31:46.245542
| 2019-11-20T16:19:08
| 2019-11-20T16:19:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,883
|
py
|
# download all data from the 4 MICAVIBE feeds
import sys
import os
import re
import time
import json
import re
from io import StringIO
if sys.version_info < (3, 0):
print("make sure you're using python3 or python version 3.0 or higher")
os.exit(1)
import urllib.parse
import http.client
def parse_next_value(instr):
if not instr:
return None
for link in [h.strip() for h in instr.split(';')]:
if re.match('rel="next"', link):
try:
nurl = re.search("<(.+)>", link)[1]
return nurl
except:
print('no URL found in link header', link)
return None
def download(url, out_file, label):
source = urllib.parse.urlparse(url)
conn = http.client.HTTPSConnection(source.hostname, source.port)
conn.request("GET", url)
response = conn.getresponse()
body = response.read()
body_json = json.loads(body)
if len(body_json) > 0:
for record in body_json:
ts = record['created_epoch']
line = "{},{}\n".format(record['created_epoch'], record['value'])
out_file.write(line)
print(
"< {} {} ending on {} {} ({} total)".format(
len(body_json),
label,
record['id'], record['created_at'],
response.getheader('X-Pagination-Total')
)
)
return parse_next_value(response.getheader('Link'))
return None
def get_all_data(url, file_path, label):
data = StringIO()
next_page = download(url, data, label)
while next_page:
time.sleep(1)
next_page = download(next_page, data, label)
with open(file_path, 'w') as out_file:
out_file.write(data.getvalue())
data.close()
if __name__ == "__main__":
#
# https://io.adafruit.com/api/v2/mica_ia/feeds/mood/data
# https://io.adafruit.com/api/v2/mica_ia/feeds/split-motion/data
# https://io.adafruit.com/api/v2/mica_ia/feeds/sound/data
# https://io.adafruit.com/api/v2/mica_ia/feeds/sound-2/data
#
destination = "/var/www/app/shared/data/"
collections = (
("Mood", "https://io.adafruit.com/api/v2/mica_ia/feeds/mood/data", destination + 'mood.csv'),
("Motion", "https://io.adafruit.com/api/v2/mica_ia/feeds/split-motion/data", destination + 'motion.csv'),
("Sound 1", "https://io.adafruit.com/api/v2/mica_ia/feeds/sound/data", destination + 'sound-1.csv'),
("Sound 2", "https://io.adafruit.com/api/v2/mica_ia/feeds/sound-2/data", destination + 'sound-2.csv'),
)
for label, url, filepath in collections:
print("---------------------------------------------------------")
print(time.time(), "getting", url, "into", filepath)
print("---------------------------------------------------------")
get_all_data(url, filepath, label)
|
[
"adam.bachman@gmail.com"
] |
adam.bachman@gmail.com
|
856b7d69a10ecf05d2a77cc576da385d6056ddd8
|
1ab2c3dbe3b8323c9167236160af263daca0ec5d
|
/maxmara_image_hashes.py
|
66371f9a6bec550085bf0e756cc82256f430e9eb
|
[
"MIT"
] |
permissive
|
Ziyu-Chen/image_hashing
|
d2a79ff610bf5bdfb35a451a05d99bdf95bb64ec
|
d48443b59959f2f785b864908e9b0979de59fe7a
|
refs/heads/master
| 2022-11-30T01:04:41.453363
| 2020-07-30T01:55:01
| 2020-07-30T01:55:01
| 274,356,173
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,107
|
py
|
import imagehash
import pandas as pd
import os
from PIL import Image
from collections import defaultdict
from square_crop import square_crop
directory_path = '/Users/Ziyu/OneDrive - Clarivate Analytics/Desktop/weekendmaxmara_images/'
data_path = '/Users/Ziyu/OneDrive - Clarivate Analytics/Desktop/weekendmaxmara_images/hashes.csv'
data_dict = defaultdict(list)
for image_name in os.listdir(directory_path):
image_path = directory_path + image_name
try:
with Image.open(image_path) as image:
image = square_crop(image)
ahash = imagehash.average_hash(image)
dhash = imagehash.dhash(image)
phash = imagehash.phash(image)
whash = imagehash.whash(image)
data_dict['image_name'].append(image_name)
data_dict['ahash'].append(ahash)
data_dict['dhash'].append(dhash)
data_dict['phash'].append(phash)
data_dict['whash'].append(whash)
print('Finished No. %s' % image_name)
except Exception:
continue
data = pd.DataFrame(data_dict)
data.to_csv(data_path)
|
[
"zc839@nyu.edu"
] |
zc839@nyu.edu
|
246673571092c9cb746b6d104d3951ebd7995526
|
d7aee9bee25dc3c1665fa5f0eb735d0ad3eb78f1
|
/tests/test_parser.py
|
cfa1428f879ce72c29e67b6aa61d0e75564c3354
|
[] |
no_license
|
hariton27sy/exeparser
|
bd1572c45b2ea0a6c663c12a6bab843d3bd6064d
|
b5eb7e5bd13c43645db62be45f202a30dbb11ced
|
refs/heads/master
| 2022-12-23T11:56:58.437613
| 2022-12-14T19:34:01
| 2022-12-14T19:34:01
| 200,896,027
| 0
| 0
| null | 2020-02-07T18:21:07
| 2019-08-06T17:29:42
|
Python
|
UTF-8
|
Python
| false
| false
| 6,769
|
py
|
import os
import sys
import unittest
PARENT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.path.pardir)
sys.path.append(PARENT_DIR)
import core.exefile as x
def full_path(path):
return os.path.join(PARENT_DIR, path)
class TestOnFileQoobExe(unittest.TestCase):
path = 'examples/qoob.exe'
def setUp(self):
self.file = x.ExeFile(full_path(self.path))
def test_rva_to_raw(self):
expected = 0x6e00
res = self.file.rva_to_raw(0x13000)
self.assertEqual(expected, res[1])
def test_rva_to_raw2(self):
expected = 0x6e02
res = self.file.rva_to_raw(0x13002)
self.assertEqual(expected, res[1])
def test_resources(self):
expected = ('+ root\n| + ICON\n| | + 1\n| + RCDATA\n| | + 2\n| | + 8\n'
'| | + 10\n| | + 17\n| | + 18\n| | + 20\n| | + 21\n'
'| | + 30\n| | + 101\n| | + 102\n| | + 103\n| | + 104\n'
'| + GROUP_ICON\n| | + 1\n| + VERSION\n| | + 1')
actual = str(self.file.resources())
self.assertEqual(expected, actual)
def test_no_export_table(self):
actual = self.file.export_table()
self.assertEqual('', str(actual))
def test_relocations(self):
actual = self.file.relocations()
self.assertIsNone(actual)
def test_raw_section_header(self):
expected = (b'<\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00|p@\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00')
actual = b"".join(self.file.raw_section_data(2))
self.assertEqual(expected, actual)
class TestOnFileApp2Exe(unittest.TestCase):
path = 'examples/App2.exe'
def setUp(self):
self.file = x.ExeFile(full_path(self.path))
def test_relocations(self):
expected = ('BASE RELOCATIONS:\n 0x2000 RVA 12 SizeOfBlock 2 Count '
'of relocations\n 0x0006F4 HIGHLOW\n 0x000000 '
'ABSOULUTE')
actual = self.file.relocations()
self.assertEqual(expected, str(actual))
def test_import_table(self):
expected = {
'originalFirstThunk': 9927,
'timeDateStamp': 0,
'forwarderChain': 0,
'name': "mscoree.dll",
'firstThunk': 8192
}
actual = self.file.import_table()
self.assertEqual(1, len(actual.table))
for field in expected:
self.assertEqual(expected[field], actual.table[0][field])
self.assertEqual(1, len(actual.table[0]['functions']))
def test_resources(self):
expected = """+ root\n| + VERSION\n| | + 1\n| + MANIFEST\n| | + 1"""
actual = self.file.resources()
self.assertEqual(expected, str(actual))
class TestOnFileFirefox2Exe(unittest.TestCase):
path = 'examples/firefox2.exe'
def setUp(self):
self.file = x.ExeFile(full_path(self.path))
def test_data_directory(self):
expected = [(b'\x05\x04\x04\x00', b'\xcf\x07\x00\x00'),
(b'\xd4\x0b\x04\x00', b'h\x01\x00\x00'),
(b'\x00\x80\x04\x00', b'\xb0%\x03\x00'),
(b'\x00\x00\x00\x00', b'\x00\x00\x00\x00'),
(b'\x00\x80\x07\x00', b' \x1e\x00\x00'),
(b'\x00\xb0\x07\x00', b"p'\x00\x00"),
(b'j\xfb\x03\x00', b'\x1c\x00\x00\x00'),
(b'\x00\x00\x00\x00', b'\x00\x00\x00\x00'),
(b'\x00\x00\x00\x00', b'\x00\x00\x00\x00'),
(b'\xd4\xcc\x03\x00', b'\x18\x00\x00\x00'),
(b'\xb8\xb0\x03\x00', b'\xa0\x00\x00\x00'),
(b'\x00\x00\x00\x00', b'\x00\x00\x00\x00'),
(b'\x8c\x12\x04\x00', b'P\x05\x00\x00'),
(b'\xa4\x00\x04\x00', b'\xe0\x00\x00\x00'),
(b'\x00\x00\x00\x00', b'\x00\x00\x00\x00'),
(b'\x00\x00\x00\x00', b'\x00\x00\x00\x00')]
actual = self.file.optional_header['dataDirectory']
self.assertEqual(16, len(actual))
self.assertEqual(expected, actual)
def test_export_table(self):
with open(full_path('tests/firefox2_expected/exportTable.txt')) as f:
expected = f.read()
actual = self.file.export_table()
self.assertEqual(expected, str(actual))
def test_dependents(self):
expected = {
"ADVAPI32.dll",
"KERNEL32.dll",
"MSVCP140.dll",
"VCRUNTIME140.dll",
"api-ms-win-crt-convert-l1-1-0.dll",
"api-ms-win-crt-environment-l1-1-0.dll",
"api-ms-win-crt-filesystem-l1-1-0.dll",
"api-ms-win-crt-heap-l1-1-0.dll",
"api-ms-win-crt-locale-l1-1-0.dll",
"api-ms-win-crt-math-l1-1-0.dll",
"api-ms-win-crt-runtime-l1-1-0.dll",
"api-ms-win-crt-stdio-l1-1-0.dll",
"api-ms-win-crt-string-l1-1-0.dll",
"api-ms-win-crt-time-l1-1-0.dll",
"api-ms-win-crt-utility-l1-1-0.dll",
"mozglue.dll",
"ntdll.dll"
}
actual = self.file.import_table().get_dependencies()
self.assertEqual(expected, set(actual))
# That no repeats
self.assertEqual(len(set(actual)), len(actual))
def test_get_when_resource_is_png(self):
resources = self.file.resources()
resource = resources.table.elements[0].elements[11].elements[0]
actual = self.file.get_resource(resource)
self.assertEqual(b"\x89PNG", actual[:4])
class TestCommonRaises(unittest.TestCase):
def test_file_not_found(self):
self.assertRaises(FileNotFoundError, lambda: x.ExeFile(
full_path('WrongPath/nofile.exe')))
def test_wrong_file_format(self):
with self.assertRaises(x.BrokenFileError) as excInfo:
x.ExeFile(full_path('index.py'))
self.assertIn('Broken file. No "MZ" in begin', str(excInfo.exception))
def test_no_mz_signature(self):
with self.assertRaises(x.BrokenFileError) as exc:
x.ExeFile(full_path('examples/NoMZSignature.exe'))
self.assertIn('Broken file. No "MZ" in begin', str(exc.exception))
def test_no_pe_signature(self):
with self.assertRaises(x.BrokenFileError) as exc:
x.ExeFile(full_path('examples/NoPESignature.exe'))
self.assertIn('Broken File. No "PE\\0\\0" in begin of PEHeader',
str(exc.exception))
if __name__ == "__main__":
unittest.main()
|
[
"hariton27sy@gmail.com"
] |
hariton27sy@gmail.com
|
fca6a52051658560f6a522ad47c53141010c8e4c
|
1396656a60be72e0dbe42a70750a7b775bad40bc
|
/CodeWars/GreatestWarrior.py
|
9b9eb5a08f15cd46f085a42b4158a3a2d56bf188
|
[] |
no_license
|
dianayuying/Python
|
f3b4bf9d2f9866869f811f9f327f1ccdc0de40a9
|
37d2e5b87261b4cc2d05d4e8aeacee2cea87cda2
|
refs/heads/master
| 2023-04-27T07:23:20.339945
| 2018-12-23T20:26:49
| 2018-12-23T20:26:49
| 162,921,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,446
|
py
|
"""
Create a class called Warrior which calculates and keeps track of their level and skills, and ranks them as the warrior they've proven to be.
Business Rules:
A warrior starts at level 1 and can progress all the way to 100.
A warrior starts at rank "Pushover" and can progress all the way to "Greatest".
The only acceptable range of rank values is "Pushover", "Novice", "Fighter", "Warrior", "Veteran", "Sage", "Elite", "Conqueror", "Champion", "Master", "Greatest".
Warriors will compete in battles. Battles will always accept an enemy level to match against your own.
With each battle successfully finished, your warrior's experience is updated based on the enemy's level.
The experience earned from the battle is relative to what the warrior's current level is compared to the level of the enemy.
A warrior's experience starts from 100. Each time the warrior's experience increases by another 100, the warrior's level rises to the next level.
A warrior's experience is cumulative, and does not reset with each rise of level. The only exception is when the warrior reaches level 100, with which the experience stops at 10000
At every 10 levels, your warrior will reach a new rank tier. (ex. levels 1-9 falls within "Pushover" tier, levels 80-89 fall within "Champion" tier, etc.)
A warrior cannot progress beyond level 100 and rank "Greatest".
Battle Progress Rules & Calculations:
If an enemy level does not fall in the range of 1 to 100, the battle cannot happen and should return "Invalid level".
Completing a battle against an enemy with the same level as your warrior will be worth 10 experience points.
Completing a battle against an enemy who is one level lower than your warrior will be worth 5 experience points.
Completing a battle against an enemy who is two levels lower or more than your warrior will give 0 experience points.
Completing a battle against an enemy who is one level higher or more than your warrior will accelarate your experience gaining. The greater the difference between levels, the more experinece your warrior will gain. The formula is 20 * diff * diff where diff equals the difference in levels between the enemy and your warrior.
However, if your warrior is at least one rank lower than your enemy, and at least 5 levels lower, your warrior cannot fight against an enemy that strong and must instead return "You've been defeated".
Every successful battle will also return one of three responses: "Easy fight", "A good fight", "An intense fight". Return "Easy fight" if your warrior is 2 or more levels higher than your enemy's level. Return "A good fight" if your warrior is either 1 level higher or equal to your enemy's level. Return "An intense fight" if your warrior's level is lower than the enemy's level.
"""
class Warrior():
ranking = {0:"Pushover",1:"Novice",2:"Fighter",3:"Warrior",4:"Veteran",
5:"Sage",6:"Elite",7:"Conqueror", 8:"Champion", 9:"Master", 10:"Greatest"}
def __init__(self):
self.level = 1
self.rank = "Pushover"
self.experience = 100
self.achievements=[]
def training(self, desc_list):
if desc_list[2]>self.level:
return "Not strong enough"
else:
self.achievements.append(desc_list[0])
if self.experience+desc_list[1]>10000:
self.experience=10000
else:
self.experience +=desc_list[1]
self.level=int(self.experience/100)
self.rank=Warrior.ranking[int(self.level/10)]
return desc_list[0]
def battle(self, n):
if n<1 or n>100:
return "Invalid level"
else:
diff = n-self.level
if n==self.level:
self.experience +=10
elif n==self.level-1:
self.experience +=5
elif n>self.level:
if n>=self.level+5 and int(n/10)>=int(self.level/10)+1:
return "You've been defeated"
else:
self.experience += 20*((n-self.level)**2)
if self.experience>10000:
self.experience=10000
self.level=int(self.experience/100)
self.rank=Warrior.ranking[int(self.level/10)]
if diff<=-2: return "Easy fight"
elif diff==-1 or diff==0: return "A good fight"
else: return "An intense fight"
|
[
"diana.yuying@gmail.com"
] |
diana.yuying@gmail.com
|
be016283897b8b97fcd923c3c66271b85639e383
|
10d98fecb882d4c84595364f715f4e8b8309a66f
|
/rl_metrics_aaai2021/utils.py
|
fdb1f66a5371b5960ba1746220fe5dec986ad621
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
afcarl/google-research
|
51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42
|
320a49f768cea27200044c0d12f394aa6c795feb
|
refs/heads/master
| 2021-12-02T18:36:03.760434
| 2021-09-30T20:59:01
| 2021-09-30T21:07:02
| 156,725,548
| 1
| 0
|
Apache-2.0
| 2018-11-08T15:13:53
| 2018-11-08T15:13:52
| null |
UTF-8
|
Python
| false
| false
| 7,577
|
py
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions."""
import collections
from absl import logging
import numpy as np
from rl_metrics_aaai2021 import bisimulation
from rl_metrics_aaai2021 import d_delta
from rl_metrics_aaai2021 import d_delta_star
from rl_metrics_aaai2021 import discrete_bisimulation
from rl_metrics_aaai2021 import discrete_lax_bisimulation
from rl_metrics_aaai2021 import lax_bisimulation
MetricData = collections.namedtuple('metric_data', ['constructor', 'label'])
MDPStats = collections.namedtuple(
'MDPStats', ['time', 'num_iterations', 'min_gap', 'avg_gap', 'max_gap'])
# Dictionary mapping metric name to constructor and LaTeX label.
METRICS = {
'bisimulation':
MetricData(bisimulation.Bisimulation, r'$d^{\sim}$'),
'discrete_bisimulation':
MetricData(discrete_bisimulation.DiscreteBisimulation, r'$e^{\sim}$'),
'lax_bisimulation':
MetricData(lax_bisimulation.LaxBisimulation, r'$d^{\sim_{lax}}$'),
'discrete_lax_bisimulation':
MetricData(discrete_lax_bisimulation.DiscreteLaxBisimulation,
r'$e^{\sim_{lax}}$'),
'd_delta_1':
MetricData(d_delta.DDelta1, r'$d_{\Delta1}$'),
'd_delta_5':
MetricData(d_delta.DDelta5, r'$d_{\Delta5}$'),
'd_delta_10':
MetricData(d_delta.DDelta10, r'$d_{\Delta10}$'),
'd_delta_15':
MetricData(d_delta.DDelta15, r'$d_{\Delta15}$'),
'd_delta_20':
MetricData(d_delta.DDelta20, r'$d_{\Delta20}$'),
'd_delta_50':
MetricData(d_delta.DDelta50, r'$d_{\Delta50}$'),
'd_delta_100':
MetricData(d_delta.DDelta100, r'$d_{\Delta100}$'),
'd_delta_500':
MetricData(d_delta.DDelta500, r'$d_{\Delta500}$'),
'd_delta_1000':
MetricData(d_delta.DDelta1000, r'$d_{\Delta1000}$'),
'd_delta_5000':
MetricData(d_delta.DDelta5000, r'$d_{\Delta5000}$'),
'd_Delta_star':
MetricData(d_delta_star.DDeltaStar, r'$d_{\Delta^*}$'),
}
def value_iteration(env, tolerance, verbose=False):
"""Run value iteration on env.
Args:
env: a MiniGrid environment, including the MDPWrapper.
tolerance: float, error tolerance used to exit loop.
verbose: bool, whether to print verbose messages.
Returns:
Numpy array with V* and Q*.
"""
values = np.zeros(env.num_states)
q_values = np.zeros((env.num_states, env.num_actions))
error = tolerance * 2
i = 0
while error > tolerance:
new_values = np.copy(values)
for s in range(env.num_states):
for a in range(env.num_actions):
q_values[s, a] = (
env.rewards[s, a] +
env.gamma * np.matmul(env.transition_probs[s, a, :], values))
new_values[s] = np.max(q_values[s, :])
error = np.max(abs(new_values - values))
values = new_values
i += 1
if i % 1000 == 0 and verbose:
logging.info('Error after %d iterations: %f', i, error)
if verbose:
logging.info('Found V* in %d iterations', i)
logging.info(values)
return values, q_values
def q_value_iteration(env, tolerance):
"""Run q value iteration on env.
Args:
env: a MiniGrid environment, including the MDPWrapper.
tolerance: float, error tolerance used to exit loop.
Returns:
Numpy array with V* and Q*.
"""
q_values = np.zeros((env.num_states, env.num_actions))
error = tolerance * 2
i = 0
while error > tolerance:
for s in range(env.num_states):
for a in range(env.num_actions):
old_q_values = np.copy(q_values[s, a])
q_values[s, a] = (
env.rewards[s, a] + env.gamma *
np.matmul(env.transition_probs[s, a, :], np.max(q_values, axis=1)))
error = np.max(abs(old_q_values - q_values[s, a]))
i += 1
return q_values
def policy_iteration(env, tolerance, verbose=False):
"""Run policy iteration on env.
Args:
env: a MiniGrid environment, including the MDPWrapper.
tolerance: float, evaluation stops when the value function change is less
than the tolerance.
verbose: bool, whether to print verbose messages.
Returns:
Numpy array with V*
"""
values = np.zeros(env.num_states)
# Random policy
policy = np.ones((env.num_states, env.num_actions)) / env.num_actions
policy_stable = False
i = 0
while not policy_stable:
# Policy evaluation
while True:
delta = 0.
for s in range(env.num_states):
v = np.sum(env.rewards[s, :] * policy[s, :] + env.gamma * policy[s, :] *
np.matmul(env.transition_probs[s, :, :], values))
delta = max(delta, abs(v - values[s]))
values[s] = v
if delta < tolerance:
break
# Policy improvement
policy_stable = True
for s in range(env.num_states):
old = policy[s].copy()
g = np.zeros(env.num_actions, dtype=float)
for a in range(env.num_actions):
g[a] = (
env.rewards[s, a] +
env.gamma * np.matmul(env.transition_probs[s, a, :], values))
greed_actions = np.argwhere(g == np.amax(g))
for a in range(env.num_actions):
if a in greed_actions:
policy[s, a] = 1 / len(greed_actions)
else:
policy[s, a] = 0
if not np.array_equal(policy[s], old):
policy_stable = False
i += 1
if i % 1000 == 0 and verbose:
logging.info('Error after %d iterations: %f', i, delta)
if verbose:
logging.info('Found V* in %d iterations', i)
logging.info(values)
return values
def q_policy_iteration(env, tolerance, verbose=False):
"""Run policy iteration on env.
Args:
env: a MiniGrid environment, including the MDPWrapper.
tolerance: float, evaluation stops when the value function change is less
than the tolerance.
verbose: bool, whether to print verbose messages.
Returns:
Numpy array with V*
"""
q_values = np.zeros((env.num_states, env.num_actions))
# Random policy
policy = np.ones((env.num_states, env.num_actions)) / env.num_actions
policy_stable = False
i = 0
while not policy_stable:
# Policy evaluation
while True:
delta = 0.
for s in range(env.num_states):
v = env.rewards[s, :] + env.gamma * np.matmul(
env.transition_probs[s, :, :], np.sum(q_values * policy, axis=1))
delta = max(delta, np.max(abs(v- q_values[s])))
q_values[s] = v
if delta < tolerance:
break
# Policy improvement
policy_stable = True
for s in range(env.num_states):
old = policy[s].copy()
greedy_actions = np.argwhere(q_values[s] == np.amax(q_values[s]))
for a in range(env.num_actions):
if a in greedy_actions:
policy[s, a] = 1 / len(greedy_actions)
else:
policy[s, a] = 0
if not np.array_equal(policy[s], old):
policy_stable = False
i += 1
if i % 1000 == 0 and verbose:
logging.info('Error after %d iterations: %f', i, delta)
if verbose:
logging.info('Found V* in %d iterations', i)
logging.info(q_values)
return q_values
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
dc2cf902fa9faa242b7a3024eb996183b816db91
|
e48bc8299aa342a74edf09945fac10f812130604
|
/www/transwarp/web.py
|
4e20b8e6d16fb71c05e55ca8baeab9c140d4ab96
|
[] |
no_license
|
zhongsihang/blog-python-app
|
af4be1221baccf501e91e3dc7e39e3a0abdb2b21
|
bc5eb1bef4298ff4ceff7d3aafc4d235651a27ab
|
refs/heads/master
| 2021-01-21T01:34:58.196036
| 2015-09-18T06:22:36
| 2015-09-18T06:22:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 48,867
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
这是一个简单的, 轻量级的, WSGI兼容(Web Server Gateway Interface)的web 框架
WSGI概要:
工作方式: WSGI server -----> WSGI 处理函数
作用:将HTTP原始的请求、解析、响应 这些交给WSGI server 完成,
让我们专心用Python编写Web业务,也就是 WSGI 处理函数
所以WSGI 是HTTP的一种高级封装。
例子:
def application(environ, start_response):
method = environ['REQUEST_METHOD']
path = environ['PATH_INFO']
if method=='GET' and path=='/':
return handle_home(environ, start_response)
if method=='POST' and path='/signin':
return handle_signin(environ, start_response)
设计web框架的原因:
1. WSGI提供的接口虽然比HTTP接口高级了不少,但和Web App的处理逻辑比,还是比较低级,
我们需要在WSGI接口之上能进一步抽象,让我们专注于用一个函数处理一个URL,
至于URL到函数的映射,就交给Web框架来做。
设计web框架接口:
1. URL路由: 用于URL 到 处理函数的映射
2. URL拦截: 用于根据URL做权限检测
3. 视图: 用于HTML页面生成
4. 数据模型: 用于抽取数据(见models模块)
5. 事物数据:request数据和response数据的封装(thread local)
"""
import types, os, re, cgi, sys, time, datetime, functools, mimetypes, threading, logging, traceback, urllib
from db import Dict
import utils
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
#################################################################
# 实现事物数据接口, 实现request 数据和response数据的存储,
# 是一个全局ThreadLocal对象
#################################################################
ctx = threading.local()
_RE_RESPONSE_STATUS = re.compile(r'^\d\d\d(\ [\w\ ]+)?$')
_HEADER_X_POWERED_BY = ('X-Powered-By', 'transwarp/1.0')
# 用于时区转换
_TIMEDELTA_ZERO = datetime.timedelta(0)
_RE_TZ = re.compile('^([\+\-])([0-9]{1,2})\:([0-9]{1,2})$')
# response status
_RESPONSE_STATUSES = {
# Informational
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing',
# Successful
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi Status',
226: 'IM Used',
# Redirection
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
# Client Error
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
418: "I'm a teapot",
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
# Server Error
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
507: 'Insufficient Storage',
510: 'Not Extended',
}
_RESPONSE_HEADERS = (
'Accept-Ranges',
'Age',
'Allow',
'Cache-Control',
'Connection',
'Content-Encoding',
'Content-Language',
'Content-Length',
'Content-Location',
'Content-MD5',
'Content-Disposition',
'Content-Range',
'Content-Type',
'Date',
'ETag',
'Expires',
'Last-Modified',
'Link',
'Location',
'P3P',
'Pragma',
'Proxy-Authenticate',
'Refresh',
'Retry-After',
'Server',
'Set-Cookie',
'Strict-Transport-Security',
'Trailer',
'Transfer-Encoding',
'Vary',
'Via',
'Warning',
'WWW-Authenticate',
'X-Frame-Options',
'X-XSS-Protection',
'X-Content-Type-Options',
'X-Forwarded-Proto',
'X-Powered-By',
'X-UA-Compatible',
)
class UTC(datetime.tzinfo):
"""
tzinfo 是一个基类,用于给datetime对象分配一个时区
使用方式是 把这个子类对象传递给datetime.tzinfo属性
传递方法有2种:
1. 初始化的时候传入
datetime(2009,2,17,19,10,2,tzinfo=tz0)
2. 使用datetime对象的 replace方法传入,从新生成一个datetime对象
datetime.replace(tzinfo= tz0)
>>> tz0 = UTC('+00:00')
>>> tz0.tzname(None)
'UTC+00:00'
>>> tz8 = UTC('+8:00')
>>> tz8.tzname(None)
'UTC+8:00'
>>> tz7 = UTC('+7:30')
>>> tz7.tzname(None)
'UTC+7:30'
>>> tz5 = UTC('-05:30')
>>> tz5.tzname(None)
'UTC-05:30'
>>> from datetime import datetime
>>> u = datetime.utcnow().replace(tzinfo=tz0)
>>> l1 = u.astimezone(tz8)
>>> l2 = u.replace(tzinfo=tz8)
>>> d1 = u - l1
>>> d2 = u - l2
>>> d1.seconds
0
>>> d2.seconds
28800
"""
def __init__(self, utc):
utc = str(utc.strip().upper())
mt = _RE_TZ.match(utc)
if mt:
minus = mt.group(1) == '-'
h = int(mt.group(2))
m = int(mt.group(3))
if minus:
h, m = (-h), (-m)
self._utcoffset = datetime.timedelta(hours=h, minutes=m)
self._tzname = 'UTC%s' % utc
else:
raise ValueError('bad utc time zone')
def utcoffset(self, dt):
"""
表示与标准时区的 偏移量
"""
return self._utcoffset
def dst(self, dt):
"""
Daylight Saving Time 夏令时
"""
return _TIMEDELTA_ZERO
def tzname(self, dt):
"""
所在时区的名字
"""
return self._tzname
def __str__(self):
return 'UTC timezone info object (%s)' % self._tzname
__repr__ = __str__
UTC_0 = UTC('+00:00')
# 用于异常处理
class _HttpError(Exception):
"""
HttpError that defines http error code.
>>> e = _HttpError(404)
>>> e.status
'404 Not Found'
"""
def __init__(self, code):
"""
Init an HttpError with response code.
"""
super(_HttpError, self).__init__()
self.status = '%d %s' % (code, _RESPONSE_STATUSES[code])
self._headers = None
def header(self, name, value):
"""
添加header, 如果header为空则 添加powered by header
"""
if not self._headers:
self._headers = [_HEADER_X_POWERED_BY]
self._headers.append((name, value))
@property
def headers(self):
"""
使用setter方法实现的 header属性
"""
if hasattr(self, '_headers'):
return self._headers
return []
def __str__(self):
return self.status
__repr__ = __str__
class _RedirectError(_HttpError):
"""
RedirectError that defines http redirect code.
>>> e = _RedirectError(302, 'http://www.apple.com/')
>>> e.status
'302 Found'
>>> e.location
'http://www.apple.com/'
"""
def __init__(self, code, location):
"""
Init an HttpError with response code.
"""
super(_RedirectError, self).__init__(code)
self.location = location
def __str__(self):
return '%s, %s' % (self.status, self.location)
__repr__ = __str__
class HttpError(object):
"""
HTTP Exceptions
"""
@staticmethod
def badrequest():
"""
Send a bad request response.
>>> raise HttpError.badrequest()
Traceback (most recent call last):
...
_HttpError: 400 Bad Request
"""
return _HttpError(400)
@staticmethod
def unauthorized():
"""
Send an unauthorized response.
>>> raise HttpError.unauthorized()
Traceback (most recent call last):
...
_HttpError: 401 Unauthorized
"""
return _HttpError(401)
@staticmethod
def forbidden():
"""
Send a forbidden response.
>>> raise HttpError.forbidden()
Traceback (most recent call last):
...
_HttpError: 403 Forbidden
"""
return _HttpError(403)
@staticmethod
def notfound():
"""
Send a not found response.
>>> raise HttpError.notfound()
Traceback (most recent call last):
...
_HttpError: 404 Not Found
"""
return _HttpError(404)
@staticmethod
def conflict():
"""
Send a conflict response.
>>> raise HttpError.conflict()
Traceback (most recent call last):
...
_HttpError: 409 Conflict
"""
return _HttpError(409)
@staticmethod
def internalerror():
"""
Send an internal error response.
>>> raise HttpError.internalerror()
Traceback (most recent call last):
...
_HttpError: 500 Internal Server Error
"""
return _HttpError(500)
@staticmethod
def redirect(location):
"""
Do permanent redirect.
>>> raise HttpError.redirect('http://www.itranswarp.com/')
Traceback (most recent call last):
...
_RedirectError: 301 Moved Permanently, http://www.itranswarp.com/
"""
return _RedirectError(301, location)
@staticmethod
def found(location):
"""
Do temporary redirect.
>>> raise HttpError.found('http://www.itranswarp.com/')
Traceback (most recent call last):
...
_RedirectError: 302 Found, http://www.itranswarp.com/
"""
return _RedirectError(302, location)
@staticmethod
def seeother(location):
"""
Do temporary redirect.
>>> raise HttpError.seeother('http://www.itranswarp.com/')
Traceback (most recent call last):
...
_RedirectError: 303 See Other, http://www.itranswarp.com/
>>> e = HttpError.seeother('http://www.itranswarp.com/seeother?r=123')
>>> e.location
'http://www.itranswarp.com/seeother?r=123'
"""
return _RedirectError(303, location)
_RESPONSE_HEADER_DICT = dict(zip(map(lambda x: x.upper(), _RESPONSE_HEADERS), _RESPONSE_HEADERS))
class Request(object):
"""
请求对象, 用于获取所有http请求信息。
"""
def __init__(self, environ):
"""
environ wsgi处理函数里面的那个 environ
wsgi server调用 wsgi 处理函数时传入的
包含了用户请求的所有数据
"""
self._environ = environ
def _parse_input(self):
"""
将通过wsgi 传入过来的参数,解析成一个字典对象 返回
比如: Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('a=1&b=M%20M&c=ABC&c=XYZ&e=')})
这里解析的就是 wsgi.input 对象里面的字节流
"""
def _convert(item):
if isinstance(item, list):
return [utils.to_unicode(i.value) for i in item]
if item.filename:
return MultipartFile(item)
return utils.to_unicode(item.value)
fs = cgi.FieldStorage(fp=self._environ['wsgi.input'], environ=self._environ, keep_blank_values=True)
inputs = dict()
for key in fs:
inputs[key] = _convert(fs[key])
return inputs
def _get_raw_input(self):
"""
将从wsgi解析出来的 数据字典,添加为Request对象的属性
然后 返回该字典
"""
if not hasattr(self, '_raw_input'):
self._raw_input = self._parse_input()
return self._raw_input
def __getitem__(self, key):
"""
实现通过键值访问Request对象里面的数据,如果该键有多个值,则返回第一个值
如果键不存在,这会 raise KyeError
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('a=1&b=M%20M&c=ABC&c=XYZ&e=')})
>>> r['a']
u'1'
>>> r['c']
u'ABC'
>>> r['empty']
Traceback (most recent call last):
...
KeyError: 'empty'
>>> b = '----WebKitFormBoundaryQQ3J8kPsjFpTmqNz'
>>> pl = ['--%s' % b, 'Content-Disposition: form-data; name=\\"name\\"\\n', 'Scofield', '--%s' % b, 'Content-Disposition: form-data; name=\\"name\\"\\n', 'Lincoln', '--%s' % b, 'Content-Disposition: form-data; name=\\"file\\"; filename=\\"test.txt\\"', 'Content-Type: text/plain\\n', 'just a test', '--%s' % b, 'Content-Disposition: form-data; name=\\"id\\"\\n', '4008009001', '--%s--' % b, '']
>>> payload = '\\n'.join(pl)
>>> r = Request({'REQUEST_METHOD':'POST', 'CONTENT_LENGTH':str(len(payload)), 'CONTENT_TYPE':'multipart/form-data; boundary=%s' % b, 'wsgi.input':StringIO(payload)})
>>> r.get('name')
u'Scofield'
>>> r.gets('name')
[u'Scofield', u'Lincoln']
>>> f = r.get('file')
>>> f.filename
u'test.txt'
>>> f.file.read()
'just a test'
"""
r = self._get_raw_input()[key]
if isinstance(r, list):
return r[0]
return r
def get(self, key, default=None):
"""
实现了字典里面的get功能
和上面的__getitem__一样(request[key]),但如果没有找到key,则返回默认值。
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('a=1&b=M%20M&c=ABC&c=XYZ&e=')})
>>> r.get('a')
u'1'
>>> r.get('empty')
>>> r.get('empty', 'DEFAULT')
'DEFAULT'
"""
r = self._get_raw_input().get(key, default)
if isinstance(r, list):
return r[0]
return r
def gets(self, key):
'''
Get multiple values for specified key.
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('a=1&b=M%20M&c=ABC&c=XYZ&e=')})
>>> r.gets('a')
[u'1']
>>> r.gets('c')
[u'ABC', u'XYZ']
>>> r.gets('empty')
Traceback (most recent call last):
...
KeyError: 'empty'
'''
r = self._get_raw_input()[key]
if isinstance(r, list):
return r[:]
return [r]
def input(self, **kw):
"""
返回一个由传入的数据和从environ里取出的数据 组成的Dict对象,Dict对象的定义 见db模块
Get input as dict from request, fill dict using provided default value if key not exist.
i = ctx.request.input(role='guest')
i.role ==> 'guest'
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('a=1&b=M%20M&c=ABC&c=XYZ&e=')})
>>> i = r.input(x=2008)
>>> i.a
u'1'
>>> i.b
u'M M'
>>> i.c
u'ABC'
>>> i.x
2008
>>> i.get('d', u'100')
u'100'
>>> i.x
2008
"""
copy = Dict(**kw)
raw = self._get_raw_input()
for k, v in raw.iteritems():
copy[k] = v[0] if isinstance(v, list) else v
return copy
def get_body(self):
"""
从HTTP POST 请求中取得 body里面的数据,返回为一个str对象
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('<xml><raw/>')})
>>> r.get_body()
'<xml><raw/>'
"""
fp = self._environ['wsgi.input']
return fp.read()
@property
def remote_addr(self):
"""
Get remote addr. Return '0.0.0.0' if cannot get remote_addr.
>>> r = Request({'REMOTE_ADDR': '192.168.0.100'})
>>> r.remote_addr
'192.168.0.100'
"""
return self._environ.get('REMOTE_ADDR', '0.0.0.0')
@property
def document_root(self):
"""
Get raw document_root as str. Return '' if no document_root.
>>> r = Request({'DOCUMENT_ROOT': '/srv/path/to/doc'})
>>> r.document_root
'/srv/path/to/doc'
"""
return self._environ.get('DOCUMENT_ROOT', '')
@property
def query_string(self):
"""
Get raw query string as str. Return '' if no query string.
>>> r = Request({'QUERY_STRING': 'a=1&c=2'})
>>> r.query_string
'a=1&c=2'
>>> r = Request({})
>>> r.query_string
''
"""
return self._environ.get('QUERY_STRING', '')
@property
def environ(self):
"""
Get raw environ as dict, both key, value are str.
>>> r = Request({'REQUEST_METHOD': 'GET', 'wsgi.url_scheme':'http'})
>>> r.environ.get('REQUEST_METHOD')
'GET'
>>> r.environ.get('wsgi.url_scheme')
'http'
>>> r.environ.get('SERVER_NAME')
>>> r.environ.get('SERVER_NAME', 'unamed')
'unamed'
"""
return self._environ
@property
def request_method(self):
"""
Get request method. The valid returned values are 'GET', 'POST', 'HEAD'.
>>> r = Request({'REQUEST_METHOD': 'GET'})
>>> r.request_method
'GET'
>>> r = Request({'REQUEST_METHOD': 'POST'})
>>> r.request_method
'POST'
"""
return self._environ['REQUEST_METHOD']
@property
def path_info(self):
"""
Get request path as str.
>>> r = Request({'PATH_INFO': '/test/a%20b.html'})
>>> r.path_info
'/test/a b.html'
"""
return urllib.unquote(self._environ.get('PATH_INFO', ''))
@property
def host(self):
"""
Get request host as str. Default to '' if cannot get host..
>>> r = Request({'HTTP_HOST': 'localhost:8080'})
>>> r.host
'localhost:8080'
"""
return self._environ.get('HTTP_HOST', '')
def _get_headers(self):
"""
从environ里 取得HTTP_开通的 header
"""
if not hasattr(self, '_headers'):
hdrs = {}
for k, v in self._environ.iteritems():
if k.startswith('HTTP_'):
# convert 'HTTP_ACCEPT_ENCODING' to 'ACCEPT-ENCODING'
hdrs[k[5:].replace('_', '-').upper()] = v.decode('utf-8')
self._headers = hdrs
return self._headers
@property
def headers(self):
"""
获取所有的header, setter实现的属性
Get all HTTP headers with key as str and value as unicode. The header names are 'XXX-XXX' uppercase.
>>> r = Request({'HTTP_USER_AGENT': 'Mozilla/5.0', 'HTTP_ACCEPT': 'text/html'})
>>> H = r.headers
>>> H['ACCEPT']
u'text/html'
>>> H['USER-AGENT']
u'Mozilla/5.0'
>>> L = H.items()
>>> L.sort()
>>> L
[('ACCEPT', u'text/html'), ('USER-AGENT', u'Mozilla/5.0')]
"""
return dict(**self._get_headers())
def header(self, header, default=None):
"""
获取指定的header的值
Get header from request as unicode, return None if not exist, or default if specified.
The header name is case-insensitive such as 'USER-AGENT' or u'content-Type'.
>>> r = Request({'HTTP_USER_AGENT': 'Mozilla/5.0', 'HTTP_ACCEPT': 'text/html'})
>>> r.header('User-Agent')
u'Mozilla/5.0'
>>> r.header('USER-AGENT')
u'Mozilla/5.0'
>>> r.header('Accept')
u'text/html'
>>> r.header('Test')
>>> r.header('Test', u'DEFAULT')
u'DEFAULT'
"""
return self._get_headers().get(header.upper(), default)
def _get_cookies(self):
"""
从environ里取出cookies字符串,并解析成键值对 组成的字典
"""
if not hasattr(self, '_cookies'):
cookies = {}
cookie_str = self._environ.get('HTTP_COOKIE')
if cookie_str:
for c in cookie_str.split(';'):
pos = c.find('=')
if pos > 0:
cookies[c[:pos].strip()] = utils.unquote(c[pos+1:])
self._cookies = cookies
return self._cookies
@property
def cookies(self):
"""
setter 以Dict对象返回cookies
Return all cookies as dict. The cookie name is str and values is unicode.
>>> r = Request({'HTTP_COOKIE':'A=123; url=http%3A%2F%2Fwww.example.com%2F'})
>>> r.cookies['A']
u'123'
>>> r.cookies['url']
u'http://www.example.com/'
"""
return Dict(**self._get_cookies())
def cookie(self, name, default=None):
"""
获取指定的cookie
Return specified cookie value as unicode. Default to None if cookie not exists.
>>> r = Request({'HTTP_COOKIE':'A=123; url=http%3A%2F%2Fwww.example.com%2F'})
>>> r.cookie('A')
u'123'
>>> r.cookie('url')
u'http://www.example.com/'
>>> r.cookie('test')
>>> r.cookie('test', u'DEFAULT')
u'DEFAULT'
"""
return self._get_cookies().get(name, default)
class Response(object):
def __init__(self):
self._status = '200 OK'
self._headers = {'CONTENT-TYPE': 'text/html; charset=utf-8'}
def unset_header(self, name):
"""
删除指定的header
>>> r = Response()
>>> r.header('content-type')
'text/html; charset=utf-8'
>>> r.unset_header('CONTENT-type')
>>> r.header('content-type')
"""
key = name.upper()
if key not in _RESPONSE_HEADER_DICT:
key = name
if key in self._headers:
del self._headers[key]
def set_header(self, name, value):
"""
给指定的header 赋值
>>> r = Response()
>>> r.header('content-type')
'text/html; charset=utf-8'
>>> r.set_header('CONTENT-type', 'image/png')
>>> r.header('content-TYPE')
'image/png'
"""
key = name.upper()
if key not in _RESPONSE_HEADER_DICT:
key = name
self._headers[key] = utils.to_str(value)
def header(self, name):
"""
获取Response Header 里单个 Header的值, 非大小写敏感
>>> r = Response()
>>> r.header('content-type')
'text/html; charset=utf-8'
>>> r.header('CONTENT-type')
'text/html; charset=utf-8'
>>> r.header('X-Powered-By')
"""
key = name.upper()
if key not in _RESPONSE_HEADER_DICT:
key = name
return self._headers.get(key)
@property
def headers(self):
"""
setter 构造的属性,以[(key1, value1), (key2, value2)...] 形式存储 所有header的值,
包括cookies的值
>>> r = Response()
>>> r.headers
[('Content-Type', 'text/html; charset=utf-8'), ('X-Powered-By', 'transwarp/1.0')]
>>> r.set_cookie('s1', 'ok', 3600)
>>> r.headers
[('Content-Type', 'text/html; charset=utf-8'), ('Set-Cookie', 's1=ok; Max-Age=3600; Path=/; HttpOnly'), ('X-Powered-By', 'transwarp/1.0')]
"""
L = [(_RESPONSE_HEADER_DICT.get(k, k), v) for k, v in self._headers.iteritems()]
if hasattr(self, '_cookies'):
for v in self._cookies.itervalues():
L.append(('Set-Cookie', v))
L.append(_HEADER_X_POWERED_BY)
return L
@property
def content_type(self):
"""
setter 方法实现的属性,用户保存header: Content-Type的值
>>> r = Response()
>>> r.content_type
'text/html; charset=utf-8'
>>> r.content_type = 'application/json'
>>> r.content_type
'application/json'
"""
return self.header('CONTENT-TYPE')
@content_type.setter
def content_type(self, value):
"""
让content_type 属性可写, 及设置Content-Type Header
"""
if value:
self.set_header('CONTENT-TYPE', value)
else:
self.unset_header('CONTENT-TYPE')
@property
def content_length(self):
"""
获取Content-Length Header 的值
>>> r = Response()
>>> r.content_length
>>> r.content_length = 100
>>> r.content_length
'100'
"""
return self.header('CONTENT-LENGTH')
@content_length.setter
def content_length(self, value):
"""
设置Content-Length Header 的值
>>> r = Response()
>>> r.content_length = '1024'
>>> r.content_length
'1024'
>>> r.content_length = 1024 * 8
>>> r.content_length
'8192'
"""
self.set_header('CONTENT-LENGTH', str(value))
def delete_cookie(self, name):
"""
Delete a cookie immediately.
Args:
name: the cookie name.
"""
self.set_cookie(name, '__deleted__', expires=0)
def set_cookie(self, name, value, max_age=None, expires=None, path='/', domain=None, secure=False, http_only=True):
"""
Set a cookie.
Args:
name: the cookie name.
value: the cookie value.
max_age: optional, seconds of cookie's max age.
expires: optional, unix timestamp, datetime or date object that indicate an absolute time of the
expiration time of cookie. Note that if expires specified, the max_age will be ignored.
path: the cookie path, default to '/'.
domain: the cookie domain, default to None.
secure: if the cookie secure, default to False.
http_only: if the cookie is for http only, default to True for better safty
(client-side script cannot access cookies with HttpOnly flag).
>>> r = Response()
>>> r.set_cookie('company', 'Abc, Inc.', max_age=3600)
>>> r._cookies
{'company': 'company=Abc%2C%20Inc.; Max-Age=3600; Path=/; HttpOnly'}
>>> r.set_cookie('company', r'Example="Limited"', expires=1342274794.123, path='/sub/')
>>> r._cookies
{'company': 'company=Example%3D%22Limited%22; Expires=Sat, 14-Jul-2012 14:06:34 GMT; Path=/sub/; HttpOnly'}
>>> dt = datetime.datetime(2012, 7, 14, 22, 6, 34, tzinfo=UTC('+8:00'))
>>> r.set_cookie('company', 'Expires', expires=dt)
>>> r._cookies
{'company': 'company=Expires; Expires=Sat, 14-Jul-2012 14:06:34 GMT; Path=/; HttpOnly'}
"""
if not hasattr(self, '_cookies'):
self._cookies = {}
L = ['%s=%s' % (utils.quote(name), utils.quote(value))]
if expires is not None:
if isinstance(expires, (float, int, long)):
L.append('Expires=%s' % datetime.datetime.fromtimestamp(expires, UTC_0).strftime('%a, %d-%b-%Y %H:%M:%S GMT'))
if isinstance(expires, (datetime.date, datetime.datetime)):
L.append('Expires=%s' % expires.astimezone(UTC_0).strftime('%a, %d-%b-%Y %H:%M:%S GMT'))
elif isinstance(max_age, (int, long)):
L.append('Max-Age=%d' % max_age)
L.append('Path=%s' % path)
if domain:
L.append('Domain=%s' % domain)
if secure:
L.append('Secure')
if http_only:
L.append('HttpOnly')
self._cookies[name] = '; '.join(L)
def unset_cookie(self, name):
"""
Unset a cookie.
>>> r = Response()
>>> r.set_cookie('company', 'Abc, Inc.', max_age=3600)
>>> r._cookies
{'company': 'company=Abc%2C%20Inc.; Max-Age=3600; Path=/; HttpOnly'}
>>> r.unset_cookie('company')
>>> r._cookies
{}
"""
if hasattr(self, '_cookies'):
if name in self._cookies:
del self._cookies[name]
@property
def status_code(self):
"""
Get response status code as int.
>>> r = Response()
>>> r.status_code
200
>>> r.status = 404
>>> r.status_code
404
>>> r.status = '500 Internal Error'
>>> r.status_code
500
"""
return int(self._status[:3])
@property
def status(self):
"""
Get response status. Default to '200 OK'.
>>> r = Response()
>>> r.status
'200 OK'
>>> r.status = 404
>>> r.status
'404 Not Found'
>>> r.status = '500 Oh My God'
>>> r.status
'500 Oh My God'
"""
return self._status
@status.setter
def status(self, value):
"""
Set response status as int or str.
>>> r = Response()
>>> r.status = 404
>>> r.status
'404 Not Found'
>>> r.status = '500 ERR'
>>> r.status
'500 ERR'
>>> r.status = u'403 Denied'
>>> r.status
'403 Denied'
>>> r.status = 99
Traceback (most recent call last):
...
ValueError: Bad response code: 99
>>> r.status = 'ok'
Traceback (most recent call last):
...
ValueError: Bad response code: ok
>>> r.status = [1, 2, 3]
Traceback (most recent call last):
...
TypeError: Bad type of response code.
"""
if isinstance(value, (int, long)):
if 100 <= value <= 999:
st = _RESPONSE_STATUSES.get(value, '')
if st:
self._status = '%d %s' % (value, st)
else:
self._status = str(value)
else:
raise ValueError('Bad response code: %d' % value)
elif isinstance(value, basestring):
if isinstance(value, unicode):
value = value.encode('utf-8')
if _RE_RESPONSE_STATUS.match(value):
self._status = value
else:
raise ValueError('Bad response code: %s' % value)
else:
raise TypeError('Bad type of response code.')
#################################################################
# 实现URL路由功能
# 将URL 映射到 函数上
#################################################################
# 用于捕获变量的re
_re_route = re.compile(r'(:[a-zA-Z_]\w*)')
# 方法的装饰器,用于捕获url
def get(path):
"""
A @get decorator.
@get('/:id')
def index(id):
pass
>>> @get('/test/:id')
... def test():
... return 'ok'
...
>>> test.__web_route__
'/test/:id'
>>> test.__web_method__
'GET'
>>> test()
'ok'
"""
def _decorator(func):
func.__web_route__ = path
func.__web_method__ = 'GET'
return func
return _decorator
def post(path):
"""
A @post decorator.
>>> @post('/post/:id')
... def testpost():
... return '200'
...
>>> testpost.__web_route__
'/post/:id'
>>> testpost.__web_method__
'POST'
>>> testpost()
'200'
"""
def _decorator(func):
func.__web_route__ = path
func.__web_method__ = 'POST'
return func
return _decorator
def _build_regex(path):
r"""
用于将路径转换成正则表达式,并捕获其中的参数
>>> _build_regex('/path/to/:file')
'^\\/path\\/to\\/(?P<file>[^\\/]+)$'
>>> _build_regex('/:user/:comments/list')
'^\\/(?P<user>[^\\/]+)\\/(?P<comments>[^\\/]+)\\/list$'
>>> _build_regex(':id-:pid/:w')
'^(?P<id>[^\\/]+)\\-(?P<pid>[^\\/]+)\\/(?P<w>[^\\/]+)$'
"""
re_list = ['^']
var_list = []
is_var = False
for v in _re_route.split(path):
if is_var:
var_name = v[1:]
var_list.append(var_name)
re_list.append(r'(?P<%s>[^\/]+)' % var_name)
else:
s = ''
for ch in v:
if '0' <= ch <= '9':
s += ch
elif 'A' <= ch <= 'Z':
s += ch
elif 'a' <= ch <= 'z':
s += ch
else:
s = s + '\\' + ch
re_list.append(s)
is_var = not is_var
re_list.append('$')
return ''.join(re_list)
def _static_file_generator(fpath, block_size=8192):
"""
读取静态文件的一个生成器
"""
with open(fpath, 'rb') as f:
block = f.read(block_size)
while block:
yield block
block = f.read(block_size)
class Route(object):
"""
动态路由对象,处理 装饰器捕获的url 和 函数
比如:
@get('/:id')
def index(id):
pass
在构造器中 path、method、is_static、route 和url相关
而 func 则指的装饰器里的func,比如上面的index函数
"""
def __init__(self, func):
"""
path: 通过method的装饰器捕获的path
method: 通过method装饰器捕获的method
is_static: 路径是否含变量,含变量为True
route:动态url(含变量)则捕获其变量的 re
func: 方法装饰器里定义的函数
"""
self.path = func.__web_route__
self.method = func.__web_method__
self.is_static = _re_route.search(self.path) is None
if not self.is_static:
self.route = re.compile(_build_regex(self.path))
self.func = func
def match(self, url):
"""
传入url,返回捕获的变量
"""
m = self.route.match(url)
if m:
return m.groups()
return None
def __call__(self, *args):
"""
实例对象直接调用时,执行传入的函数对象
"""
return self.func(*args)
def __str__(self):
if self.is_static:
return 'Route(static,%s,path=%s)' % (self.method, self.path)
return 'Route(dynamic,%s,path=%s)' % (self.method, self.path)
__repr__ = __str__
class StaticFileRoute(object):
"""
静态文件路由对象,和Route相对应
"""
def __init__(self):
self.method = 'GET'
self.is_static = False
self.route = re.compile('^/static/(.+)$')
def match(self, url):
if url.startswith('/static/'):
return (url[1:], )
return None
def __call__(self, *args):
fpath = os.path.join(ctx.application.document_root, args[0])
if not os.path.isfile(fpath):
raise HttpError.notfound()
fext = os.path.splitext(fpath)[1]
ctx.response.content_type = mimetypes.types_map.get(fext.lower(), 'application/octet-stream')
return _static_file_generator(fpath)
class MultipartFile(object):
"""
Multipart file storage get from request input.
f = ctx.request['file']
f.filename # 'test.png'
f.file # file-like object
"""
def __init__(self, storage):
self.filename = utils.to_unicode(storage.filename)
self.file = storage.file
#################################################################
# 实现视图功能
# 主要涉及到模板引擎和View装饰器的实现
#################################################################
class Template(object):
def __init__(self, template_name, **kw):
"""
Init a template object with template name, model as dict, and additional kw that will append to model.
>>> t = Template('hello.html', title='Hello', copyright='@2012')
>>> t.model['title']
'Hello'
>>> t.model['copyright']
'@2012'
>>> t = Template('test.html', abc=u'ABC', xyz=u'XYZ')
>>> t.model['abc']
u'ABC'
"""
self.template_name = template_name
self.model = dict(**kw)
class TemplateEngine(object):
"""
Base template engine.
"""""
def __call__(self, path, model):
return '<!-- override this method to render template -->'
class Jinja2TemplateEngine(TemplateEngine):
"""
Render using jinja2 template engine.
>>> templ_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'test')
>>> engine = Jinja2TemplateEngine(templ_path)
>>> engine.add_filter('datetime', lambda dt: dt.strftime('%Y-%m-%d %H:%M:%S'))
>>> engine('jinja2-test.html', dict(name='Michael', posted_at=datetime.datetime(2014, 6, 1, 10, 11, 12)))
'<p>Hello, Michael.</p><span>2014-06-01 10:11:12</span>'
"""
def __init__(self, templ_dir, **kw):
from jinja2 import Environment, FileSystemLoader
if 'autoescape' not in kw:
kw['autoescape'] = True
self._env = Environment(loader=FileSystemLoader(templ_dir), **kw)
def add_filter(self, name, fn_filter):
self._env.filters[name] = fn_filter
def __call__(self, path, model):
return self._env.get_template(path).render(**model).encode('utf-8')
def _debug():
"""
:return:
"""
pass
def _default_error_handler(e, start_response, is_debug):
"""
用于处理异常,主要是响应一个异常页面
:param e:
:param start_response: wsgi里面的 start_response 函数
:param is_debug:
:return:
"""
if isinstance(e, HttpError):
logging.info('HttpError: %s' % e.status)
headers = e.headers[:]
headers.append(('Content-Type', 'text/html'))
start_response(e.status, headers)
return ('<html><body><h1>%s</h1></body></html>' % e.status)
logging.exception('Exception:')
start_response('500 Internal Server Error', [('Content-Type', 'text/html'), _HEADER_X_POWERED_BY])
if is_debug:
return _debug()
return ('<html><body><h1>500 Internal Server Error</h1><h3>%s</h3></body></html>' % str(e))
def view(path):
"""
被装饰的函数 需要返回一个字典对象,用于渲染
装饰器通过Template类将 path 和 dict 关联在一个 Template对象上
A view decorator that render a view by dict.
>>> @view('test/view.html')
... def hello():
... return dict(name='Bob')
>>> t = hello()
>>> isinstance(t, Template)
True
>>> t.template_name
'test/view.html'
>>> @view('test/view.html')
... def hello2():
... return ['a list']
>>> t = hello2()
Traceback (most recent call last):
...
ValueError: Expect return a dict when using @view() decorator.
"""
def _decorator(func):
@functools.wraps(func)
def _wrapper(*args, **kw):
r = func(*args, **kw)
if isinstance(r, dict):
logging.info('return Template')
return Template(path, **r)
raise ValueError('Expect return a dict when using @view() decorator.')
return _wrapper
return _decorator
#################################################################
# 实现URL拦截器
# 主要interceptor的实现
#################################################################
_RE_INTERCEPTOR_STARTS_WITH = re.compile(r'^([^\*\?]+)\*?$')
_RE_INTERCEPTOR_ENDS_WITH = re.compile(r'^\*([^\*\?]+)$')
def _build_pattern_fn(pattern):
"""
传入需要匹配的字符串: URL
返回一个函数,该函数接收一个字符串参数,检测该字符串是否
符合pattern
"""
m = _RE_INTERCEPTOR_STARTS_WITH.match(pattern)
if m:
return lambda p: p.startswith(m.group(1))
m = _RE_INTERCEPTOR_ENDS_WITH.match(pattern)
if m:
return lambda p: p.endswith(m.group(1))
raise ValueError('Invalid pattern definition in interceptor.')
def interceptor(pattern='/'):
"""
An @interceptor decorator.
@interceptor('/admin/')
def check_admin(req, resp):
pass
"""
def _decorator(func):
func.__interceptor__ = _build_pattern_fn(pattern)
return func
return _decorator
def _build_interceptor_fn(func, next):
"""
拦截器接受一个next函数,这样,一个拦截器可以决定调用next()继续处理请求还是直接返回
"""
def _wrapper():
if func.__interceptor__(ctx.request.path_info):
return func(next)
else:
return next()
return _wrapper
def _build_interceptor_chain(last_fn, *interceptors):
"""
Build interceptor chain.
>>> def target():
... print 'target'
... return 123
>>> @interceptor('/')
... def f1(next):
... print 'before f1()'
... return next()
>>> @interceptor('/test/')
... def f2(next):
... print 'before f2()'
... try:
... return next()
... finally:
... print 'after f2()'
>>> @interceptor('/')
... def f3(next):
... print 'before f3()'
... try:
... return next()
... finally:
... print 'after f3()'
>>> chain = _build_interceptor_chain(target, f1, f2, f3)
>>> ctx.request = Dict(path_info='/test/abc')
>>> chain()
before f1()
before f2()
before f3()
target
after f3()
after f2()
123
>>> ctx.request = Dict(path_info='/api/')
>>> chain()
before f1()
before f3()
target
after f3()
123
"""
L = list(interceptors)
L.reverse()
fn = last_fn
for f in L:
fn = _build_interceptor_fn(f, fn)
return fn
def _load_module(module_name):
"""
Load module from name as str.
>>> m = _load_module('xml')
>>> m.__name__
'xml'
>>> m = _load_module('xml.sax')
>>> m.__name__
'xml.sax'
>>> m = _load_module('xml.sax.handler')
>>> m.__name__
'xml.sax.handler'
"""
last_dot = module_name.rfind('.')
if last_dot == (-1):
return __import__(module_name, globals(), locals())
from_module = module_name[:last_dot]
import_module = module_name[last_dot+1:]
m = __import__(from_module, globals(), locals(), [import_module])
return getattr(m, import_module)
#################################################################
# 全局WSGIApplication的类,实现WSGI接口
# WSGIApplication 封装了 wsgi Server(run方法) 和 wsgi 处理函数(wsgi静态方法)
# 上面的所有的功能都是对 wsgi 处理函数的装饰
#################################################################
class WSGIApplication(object):
def __init__(self, document_root=None, **kw):
"""
Init a WSGIApplication.
Args:
document_root: document root path.
"""
self._running = False
self._document_root = document_root
self._interceptors = []
self._template_engine = None
self._get_static = {}
self._post_static = {}
self._get_dynamic = []
self._post_dynamic = []
def _check_not_running(self):
"""
检测app对象 是否运行
"""
if self._running:
raise RuntimeError('Cannot modify WSGIApplication when running.')
@property
def template_engine(self):
return self._template_engine
@template_engine.setter
def template_engine(self, engine):
"""
设置app 使用的模板引擎
"""
self._check_not_running()
self._template_engine = engine
def add_module(self, mod):
self._check_not_running()
m = mod if type(mod) == types.ModuleType else _load_module(mod)
logging.info('Add module: %s' % m.__name__)
for name in dir(m):
fn = getattr(m, name)
if callable(fn) and hasattr(fn, '__web_route__') and hasattr(fn, '__web_method__'):
self.add_url(fn)
def add_url(self, func):
"""
添加URL,主要是添加路由
"""
self._check_not_running()
route = Route(func)
if route.is_static:
if route.method == 'GET':
self._get_static[route.path] = route
if route.method == 'POST':
self._post_static[route.path] = route
else:
if route.method == 'GET':
self._get_dynamic.append(route)
if route.method == 'POST':
self._post_dynamic.append(route)
logging.info('Add route: %s' % str(route))
def add_interceptor(self, func):
"""
添加拦截器
"""
self._check_not_running()
self._interceptors.append(func)
logging.info('Add interceptor: %s' % str(func))
def run(self, port=9000, host='127.0.0.1'):
"""
启动python自带的WSGI Server
"""
from wsgiref.simple_server import make_server
logging.info('application (%s) will start at %s:%s...' % (self._document_root, host, port))
server = make_server(host, port, self.get_wsgi_application(debug=True))
server.serve_forever()
def get_wsgi_application(self, debug=False):
self._check_not_running()
if debug:
self._get_dynamic.append(StaticFileRoute())
self._running = True
_application = Dict(document_root=self._document_root)
def fn_route():
request_method = ctx.request.request_method
path_info = ctx.request.path_info
if request_method == 'GET':
fn = self._get_static.get(path_info, None)
if fn:
return fn()
for fn in self._get_dynamic:
args = fn.match(path_info)
if args:
return fn(*args)
raise HttpError.notfound()
if request_method == 'POST':
fn = self._post_static.get(path_info, None)
if fn:
return fn()
for fn in self._post_dynamic:
args = fn.match(path_info)
if args:
return fn(*args)
raise HttpError.notfound()
raise HttpError.badrequest()
fn_exec = _build_interceptor_chain(fn_route, *self._interceptors)
def wsgi(env, start_response):
"""
WSGI 处理函数
"""
ctx.application = _application
ctx.request = Request(env)
response = ctx.response = Response()
try:
r = fn_exec()
if isinstance(r, Template):
r = self._template_engine(r.template_name, r.model)
if isinstance(r, unicode):
r = r.encode('utf-8')
if r is None:
r = []
start_response(response.status, response.headers)
return r
except _RedirectError, e:
response.set_header('Location', e.location)
start_response(e.status, response.headers)
return []
except HttpError, e:
start_response(e.status, response.headers)
return ['<html><body><h1>', e.status, '</h1></body></html>']
except Exception, e:
logging.exception(e)
if not debug:
start_response('500 Internal Server Error', [])
return ['<html><body><h1>500 Internal Server Error</h1></body></html>']
exc_type, exc_value, exc_traceback = sys.exc_info()
fp = StringIO()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=fp)
stacks = fp.getvalue()
fp.close()
start_response('500 Internal Server Error', [])
return [
r'''<html><body><h1>500 Internal Server Error</h1><div style="font-family:Monaco, Menlo, Consolas, 'Courier New', monospace;"><pre>''',
stacks.replace('<', '<').replace('>', '>'),
'</pre></div></body></html>']
finally:
del ctx.application
del ctx.request
del ctx.response
return wsgi
if __name__ == '__main__':
sys.path.append('.')
import doctest
doctest.testmod()
|
[
"719118794@qq.com"
] |
719118794@qq.com
|
da4534207fbc24e56d3c2408862c3063b04a07fc
|
c2d018005ea56960a23faf173e4999bf3802eff6
|
/todo_app/settings.py
|
32db07f5ce7985623784e775068493a9dc80417f
|
[] |
no_license
|
AhmedMoustafaa/to-do-app
|
fb3a8c5188715559440b17649c0dba063f5cebd3
|
d9486f230dd93564b73a41e10f880718792aabd3
|
refs/heads/master
| 2023-08-12T02:31:40.391955
| 2020-06-07T16:13:31
| 2020-06-07T16:13:31
| 270,361,568
| 0
| 0
| null | 2021-09-22T19:14:31
| 2020-06-07T16:16:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,396
|
py
|
"""
Django settings for todo_app project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j!h1e6_v68(_xfc19mtq49c!-c62g1w^^3un6wqhmc6qv6da96'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
# local apps
'todo.apps.TodoConfig',
'users.apps.UsersConfig',
# third parties
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
|
[
"brian.laggy@gmail.com"
] |
brian.laggy@gmail.com
|
6a6aa3aae9981033e00458a70b5c856684bf33a6
|
8276a999272873c655b3a7c2bed0f3fb50a9c029
|
/Google_Cloud_Vision_Cv2.py
|
99045e6e7dfef858380f5bf4a50e24a51556ea74
|
[] |
no_license
|
varul29/Google_Cloud_Vision
|
589cce47e34df6eaffdd63607a4a46d57b815f28
|
1c0801fdc936d85caa197b6823aaba39dfcd57b8
|
refs/heads/master
| 2020-05-25T06:19:34.665203
| 2019-05-20T15:27:36
| 2019-05-20T15:27:36
| 187,666,048
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 871
|
py
|
import io
import os
import cv2
from google.cloud import vision
def non_ascii(text):
return ''.join(i if ord(i)<128 and i.isalnum() else '' for i in text]).strip()
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'apikey.json'
# The name of the image file to annotate (Change the line below 'image_path.jpg' ******)
path = os.path.join(os.path.dirname(__file__), '3.jpg') # Your image path from current directory
client = vision.ImageAnnotatorClient()
image = cv2.imread(path)
# Encode the frame using CV2 functions
success, encoded_image = cv2.imencode('.jpg', image)
content2 = encoded_image.tobytes()
# OCR Image to text process
image_cv2 = vision.types.Image(content=content2)
response = client.text_detection(image=image_cv2)
texts = response.text_annotations
full_text = non_ascii_remove(response.full_text_annotations)
print('Full Text:', full_text)
|
[
"noreply@github.com"
] |
varul29.noreply@github.com
|
2acbc2e004d4d067218de078794ec2dd281455fd
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/cosmos/azure-mgmt-cosmosdb/generated_samples/cosmos_db_sql_container_create_update.py
|
4eb9b7c581d3ad5045f9f14afe3e0ab5a7f5f6c1
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080
| 2023-06-08T02:53:04
| 2023-06-08T02:53:04
| 222,384,897
| 1
| 0
|
MIT
| 2023-09-08T08:38:48
| 2019-11-18T07:09:24
|
Python
|
UTF-8
|
Python
| false
| false
| 3,434
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.cosmosdb import CosmosDBManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-cosmosdb
# USAGE
python cosmos_db_sql_container_create_update.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = CosmosDBManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.sql_resources.begin_create_update_sql_container(
resource_group_name="rg1",
account_name="ddb1",
database_name="databaseName",
container_name="containerName",
create_update_sql_container_parameters={
"location": "West US",
"properties": {
"options": {},
"resource": {
"clientEncryptionPolicy": {
"includedPaths": [
{
"clientEncryptionKeyId": "keyId",
"encryptionAlgorithm": "AEAD_AES_256_CBC_HMAC_SHA256",
"encryptionType": "Deterministic",
"path": "/path",
}
],
"policyFormatVersion": 2,
},
"conflictResolutionPolicy": {"conflictResolutionPath": "/path", "mode": "LastWriterWins"},
"defaultTtl": 100,
"id": "containerName",
"indexingPolicy": {
"automatic": True,
"excludedPaths": [],
"includedPaths": [
{
"indexes": [
{"dataType": "String", "kind": "Range", "precision": -1},
{"dataType": "Number", "kind": "Range", "precision": -1},
],
"path": "/*",
}
],
"indexingMode": "consistent",
},
"partitionKey": {"kind": "Hash", "paths": ["/AccountNumber"]},
"uniqueKeyPolicy": {"uniqueKeys": [{"paths": ["/testPath"]}]},
},
},
"tags": {},
},
).result()
print(response)
# x-ms-original-file: specification/cosmos-db/resource-manager/Microsoft.DocumentDB/stable/2023-04-15/examples/CosmosDBSqlContainerCreateUpdate.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
openapi-env-test.noreply@github.com
|
b6a99fdefc842c281a110c3f4728fbd2907c0806
|
eb011bbc2e7f572d10b29c78f417645fc9eef247
|
/deepchem/models/torch_models/pagtn.py
|
340ac011ec73de75afa60a169fa6ce822d899dd0
|
[
"MIT"
] |
permissive
|
tianqin91/deepchem
|
1cc81310101e5ac4e9886db6ad97820a54bba61f
|
9c36987f735af3ebf602247ddf06c575ede85d44
|
refs/heads/master
| 2021-08-02T07:55:41.828374
| 2021-07-29T19:02:47
| 2021-07-29T19:02:47
| 191,236,748
| 0
| 0
|
MIT
| 2019-06-10T19:57:40
| 2019-06-10T19:57:40
| null |
UTF-8
|
Python
| false
| false
| 10,834
|
py
|
"""
DGL-based PAGTN for graph property prediction.
"""
import torch.nn as nn
import torch.nn.functional as F
from deepchem.models.losses import Loss, L2Loss, SparseSoftmaxCrossEntropy
from deepchem.models.torch_models.torch_model import TorchModel
class Pagtn(nn.Module):
"""Model for Graph Property Prediction
This model proceeds as follows:
* Update node representations in graphs with a variant of GAT, where a
linear additive form of attention is applied. Attention Weights are derived
by concatenating the node and edge features for each bond.
* Update node representations with multiple rounds of message passing.
* For each layer has, residual connections with its previous layer.
* The final molecular representation is computed by combining the representations
of all nodes in the molecule.
* Perform the final prediction using a linear layer
Examples
--------
>>> import deepchem as dc
>>> import dgl
>>> from deepchem.models import Pagtn
>>> smiles = ["C1CCC1", "C1=CC=CN=C1"]
>>> featurizer = dc.feat.PagtnMolGraphFeaturizer(max_length=5)
>>> graphs = featurizer.featurize(smiles)
>>> print(type(graphs[0]))
<class 'deepchem.feat.graph_data.GraphData'>
>>> dgl_graphs = [graphs[i].to_dgl_graph() for i in range(len(graphs))]
>>> batch_dgl_graph = dgl.batch(dgl_graphs)
>>> model = Pagtn(n_tasks=1, mode='regression')
>>> preds = model(batch_dgl_graph)
>>> print(type(preds))
<class 'torch.Tensor'>
>>> preds.shape == (2, 1)
True
References
----------
.. [1] Benson Chen, Regina Barzilay, Tommi Jaakkola. "Path-Augmented
Graph Transformer Network." arXiv:1905.12712
Notes
-----
This class requires DGL (https://github.com/dmlc/dgl) and DGL-LifeSci
(https://github.com/awslabs/dgl-lifesci) to be installed.
"""
def __init__(self,
n_tasks: int,
number_atom_features: int = 94,
number_bond_features: int = 42,
mode: str = 'regression',
n_classes: int = 2,
output_node_features: int = 256,
hidden_features: int = 32,
num_layers: int = 5,
num_heads: int = 1,
dropout: float = 0.1,
nfeat_name: str = 'x',
efeat_name: str = 'edge_attr',
pool_mode: str = 'sum'):
"""
Parameters
----------
n_tasks: int
Number of tasks.
number_atom_features : int
Size for the input node features. Default to 94.
number_bond_features : int
Size for the input edge features. Default to 42.
mode: str
The model type, 'classification' or 'regression'. Default to 'regression'.
n_classes: int
The number of classes to predict per task
(only used when ``mode`` is 'classification'). Default to 2.
output_node_features : int
Size for the output node features in PAGTN layers. Default to 256.
hidden_features : int
Size for the hidden node features in PAGTN layers. Default to 32.
num_layers : int
Number of PAGTN layers to be applied. Default to 5.
num_heads : int
Number of attention heads. Default to 1.
dropout : float
The probability for performing dropout. Default to 0.1
nfeat_name: str
For an input graph ``g``, the model assumes that it stores node features in
``g.ndata[nfeat_name]`` and will retrieve input node features from that.
Default to 'x'.
efeat_name: str
For an input graph ``g``, the model assumes that it stores edge features in
``g.edata[efeat_name]`` and will retrieve input edge features from that.
Default to 'edge_attr'.
pool_mode : 'max' or 'mean' or 'sum'
Whether to compute elementwise maximum, mean or sum of the node representations.
"""
try:
import dgl
except:
raise ImportError('This class requires dgl.')
try:
import dgllife
except:
raise ImportError('This class requires dgllife.')
if mode not in ['classification', 'regression']:
raise ValueError("mode must be either 'classification' or 'regression'")
super(Pagtn, self).__init__()
self.n_tasks = n_tasks
self.mode = mode
self.n_classes = n_classes
self.nfeat_name = nfeat_name
self.efeat_name = efeat_name
if mode == 'classification':
out_size = n_tasks * n_classes
else:
out_size = n_tasks
from dgllife.model import PAGTNPredictor as DGLPAGTNPredictor
self.model = DGLPAGTNPredictor(
node_in_feats=number_atom_features,
node_out_feats=output_node_features,
node_hid_feats=hidden_features,
edge_feats=number_bond_features,
depth=num_layers,
nheads=num_heads,
dropout=dropout,
n_tasks=out_size,
mode=pool_mode)
def forward(self, g):
"""Predict graph labels
Parameters
----------
g: DGLGraph
A DGLGraph for a batch of graphs. It stores the node features in
``dgl_graph.ndata[self.nfeat_name]`` and edge features in
``dgl_graph.edata[self.efeat_name]``.
Returns
-------
torch.Tensor
The model output.
* When self.mode = 'regression',
its shape will be ``(dgl_graph.batch_size, self.n_tasks)``.
* When self.mode = 'classification', the output consists of probabilities
for classes. Its shape will be
``(dgl_graph.batch_size, self.n_tasks, self.n_classes)`` if self.n_tasks > 1;
its shape will be ``(dgl_graph.batch_size, self.n_classes)`` if self.n_tasks is 1.
torch.Tensor, optional
This is only returned when self.mode = 'classification', the output consists of the
logits for classes before softmax.
"""
node_feats = g.ndata[self.nfeat_name]
edge_feats = g.edata[self.efeat_name]
out = self.model(g, node_feats, edge_feats)
if self.mode == 'classification':
if self.n_tasks == 1:
logits = out.view(-1, self.n_classes)
softmax_dim = 1
else:
logits = out.view(-1, self.n_tasks, self.n_classes)
softmax_dim = 2
proba = F.softmax(logits, dim=softmax_dim)
return proba, logits
else:
return out
class PagtnModel(TorchModel):
"""Model for Graph Property Prediction.
This model proceeds as follows:
* Update node representations in graphs with a variant of GAT, where a
linear additive form of attention is applied. Attention Weights are derived
by concatenating the node and edge features for each bond.
* Update node representations with multiple rounds of message passing.
* For each layer has, residual connections with its previous layer.
* The final molecular representation is computed by combining the representations
of all nodes in the molecule.
* Perform the final prediction using a linear layer
Examples
--------
>>>
>> import deepchem as dc
>> from deepchem.models import PagtnModel
>> featurizer = dc.feat.PagtnMolGraphFeaturizer(max_length=5)
>> tasks, datasets, transformers = dc.molnet.load_tox21(
.. reload=False, featurizer=featurizer, transformers=[])
>> train, valid, test = datasets
>> model = PagtnModel(mode='classification', n_tasks=len(tasks),
.. batch_size=16, learning_rate=0.001)
>> model.fit(train, nb_epoch=50)
References
----------
.. [1] Benson Chen, Regina Barzilay, Tommi Jaakkola. "Path-Augmented
Graph Transformer Network." arXiv:1905.12712
Notes
-----
This class requires DGL (https://github.com/dmlc/dgl) and DGL-LifeSci
(https://github.com/awslabs/dgl-lifesci) to be installed.
"""
def __init__(self,
n_tasks: int,
number_atom_features: int = 94,
number_bond_features: int = 42,
mode: str = 'regression',
n_classes: int = 2,
output_node_features: int = 256,
hidden_features: int = 32,
num_layers: int = 5,
num_heads: int = 1,
dropout: float = 0.1,
pool_mode: str = 'sum',
**kwargs):
"""
Parameters
----------
n_tasks: int
Number of tasks.
number_atom_features : int
Size for the input node features. Default to 94.
number_bond_features : int
Size for the input edge features. Default to 42.
mode: str
The model type, 'classification' or 'regression'. Default to 'regression'.
n_classes: int
The number of classes to predict per task
(only used when ``mode`` is 'classification'). Default to 2.
output_node_features : int
Size for the output node features in PAGTN layers. Default to 256.
hidden_features : int
Size for the hidden node features in PAGTN layers. Default to 32.
num_layers: int
Number of graph neural network layers, i.e. number of rounds of message passing.
Default to 2.
num_heads : int
Number of attention heads. Default to 1.
dropout: float
Dropout probability. Default to 0.1
pool_mode : 'max' or 'mean' or 'sum'
Whether to compute elementwise maximum, mean or sum of the node representations.
kwargs
This can include any keyword argument of TorchModel.
"""
model = Pagtn(
n_tasks=n_tasks,
number_atom_features=number_atom_features,
number_bond_features=number_bond_features,
mode=mode,
n_classes=n_classes,
output_node_features=output_node_features,
hidden_features=hidden_features,
num_layers=num_layers,
num_heads=num_heads,
dropout=dropout,
pool_mode=pool_mode)
if mode == 'regression':
loss: Loss = L2Loss()
output_types = ['prediction']
else:
loss = SparseSoftmaxCrossEntropy()
output_types = ['prediction', 'loss']
super(PagtnModel, self).__init__(
model, loss=loss, output_types=output_types, **kwargs)
def _prepare_batch(self, batch):
"""Create batch data for Pagtn.
Parameters
----------
batch: tuple
The tuple is ``(inputs, labels, weights)``.
Returns
-------
inputs: DGLGraph
DGLGraph for a batch of graphs.
labels: list of torch.Tensor or None
The graph labels.
weights: list of torch.Tensor or None
The weights for each sample or sample/task pair converted to torch.Tensor.
"""
try:
import dgl
except:
raise ImportError('This class requires dgl.')
inputs, labels, weights = batch
dgl_graphs = [graph.to_dgl_graph() for graph in inputs[0]]
inputs = dgl.batch(dgl_graphs).to(self.device)
_, labels, weights = super(PagtnModel, self)._prepare_batch(([], labels,
weights))
return inputs, labels, weights
|
[
"mvenkataraman@ph.iitr.ac.in"
] |
mvenkataraman@ph.iitr.ac.in
|
2db64ec71071efedc4af263b7ea7732384d88f4b
|
25795fef5bc22080645b8e549da924cb7025526f
|
/app/forms.py
|
4d2f3b6ae9d04ec86f7da110268f4c6cf9b3152d
|
[] |
no_license
|
ryanermita/best-route
|
a0a68c4c9572ce73161109f198e301aaa307aab1
|
3480fd356e20d27cdd85397fea5960f4e69b4c44
|
refs/heads/master
| 2021-01-18T14:59:01.940788
| 2013-12-21T03:43:59
| 2013-12-21T03:43:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 760
|
py
|
from flask_wtf import Form
from wtforms import TextField, TextAreaField
from wtforms.validators import DataRequired
class SearchForm(Form):
departure_place = TextField('departure_place', validators = [DataRequired()])
destination_place = TextField('destination_place', validators = [DataRequired()])
class SignUpForm(Form):
username = TextField('username', validators = [DataRequired()])
email = TextField('email', validators = [DataRequired()])
pwd = TextField('pwd', validators = [DataRequired()])
class LoginForm(Form):
username = TextField('username', validators = [DataRequired()])
pwd = TextField('pwd', validators = [DataRequired()])
class AddRouteForm(Form):
suggested_route = TextAreaField('suggested_route', validators = [DataRequired()])
|
[
"ryanermita@gmail.com"
] |
ryanermita@gmail.com
|
cccd15128c4434b0606787b763c34be908546eb4
|
fe34bc1f4177753b26cfe48d38f93739dc2439c6
|
/unpickler/_nbdev.py
|
3f0f417dd44f1cc6e01032b3226c18edc27f02ef
|
[
"Apache-2.0"
] |
permissive
|
muellerzr/unpickler
|
0d9a4cc17cd8f4cf11d40775efa4624e866158cb
|
94c464abe8463f25f4b89d3770cfdfd347d87d83
|
refs/heads/master
| 2023-02-02T20:05:42.990577
| 2020-12-16T00:34:23
| 2020-12-16T00:34:23
| 321,793,188
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"get_files": "00_core.ipynb",
"UnpicklerModule": "00_core.ipynb"}
modules = ["core.py"]
doc_url = "https://muellerzr.github.io/unpickler/"
git_url = "https://github.com/muellerzr/unpickler/tree/master/"
def custom_doc_links(name): return None
|
[
"muellerzr@gmail.com"
] |
muellerzr@gmail.com
|
1247de660c728a5f32d9fabdfa9b10b2947e596d
|
3a9cee71d23cfa7176e29beb9a9e509674c0bfd9
|
/6.2.0_201312041840_apitraderapi_linux64/test3/test4/scanConf.py
|
ecb31285adb61571a54465ea5e8bb06730d53a71
|
[] |
no_license
|
fuckfuckfuckfuck/download
|
4041fba8de20a267aa001e363a005098bb93fb0e
|
e67c22dab648f9bc1ebca444785401f63f0cc2dc
|
refs/heads/master
| 2021-01-10T11:11:10.708805
| 2015-12-05T13:18:52
| 2015-12-05T13:18:52
| 47,456,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,178
|
py
|
import sys
import re
#~ config = {
#~ 'user' : 'botel',
#~ 'password': '12345678',
#~ 'database' : 'mktinfo',
#~ 'host' : 'rdsryfjb2vqrni2.mysql.rds.aliyuncs.com',
#~ 'charset' : 'utf8',
#~ 'raise_on_warnings' : True
#~ }
Handler = {
'user' : str,
'password' : str,
'database' : str,
'host' : str,
'charset' : str,
'port' : str,
'raise_on_warnings' : bool
}
dir = '/home/dell/Downloads/6.2.0_201312041840_apitraderapi_linux64/test3/test4/'
def scanParam(fileStr):
#~ reader = open(sys.argv[1], 'r')
# fileStr = dir + fileStr
reader = open(fileStr,'r')
param = {}
for line in reader:
tmp = re.search('[%\[\]]',line) #
if tmp:
print tmp.group()
continue
line = line.split('#')[0].strip()
if not line:
continue
name, value = line.split()
if name not in Handler:
print >> sys.stderr, 'Bad parameter name "%s"' % name
sys.exit(1)
if name in param:
print >> sys.stderr, 'Duplicate parameter name "%s"' % name
sys.exit(1)
conversion_func = Handler[name]
param[name] = conversion_func(value)
return param
# file = 'conf'
# scanedParams = scanParam(dir + file)
|
[
"wchongyang@foxmail.com"
] |
wchongyang@foxmail.com
|
b10a7d8f06eea9e1ba7d3bd0fad062389e44d262
|
096ecb1ae95b3bcfd002480415a04c5191e01419
|
/ttbc.py
|
3973b4f37e4719b025aea65d6b2b2d4d96188280
|
[
"Apache-2.0"
] |
permissive
|
iamaris/pystock
|
7f3c955977c662e384f23f3113c0c5ac8fc3f4ff
|
864f8beba0cf50a7a4f52bf7c67e83fdfd774a9c
|
refs/heads/master
| 2021-01-25T07:08:14.604437
| 2015-09-07T17:17:48
| 2015-09-07T17:17:48
| 16,152,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,036
|
py
|
import urllib
import pandas as pd
import pandas.io.data as web
#from datetime import datetime
import matplotlib.pyplot as plt
import pickle as pk
from pandas.tseries.offsets import BDay
# pd.datetime is an alias for datetime.datetime
#today = pd.datetime.today()
import time
#time.sleep(5) # delays for 5 seconds
#today = pd.datetime.today()
today = pd.datetime.today()
yesterday = today - BDay(5000)
p = web.DataReader("SPY", "yahoo",yesterday,today)
#p = web.DataReader("YELP", "yahoo",yesterday,today)
#print p.head()
#print p.tail()
#print len(p)
up = 0
down = 0
N = 0
for i in range(len(p)-3):
if p.at[p.index[i],'Open'] > p.at[p.index[i],'Close']:
if p.at[p.index[i+1],'Open'] > p.at[p.index[i+1],'Close']:
N = N + 1
if p.at[p.index[i+2],'Open'] >= p.at[p.index[i+2],'Close']:
down = down + 1
else:
up = up + 1
print "total = ",N
print "up = ",up,"(",float(up)/N,")"
print "down = ",down,"(",float(down)/N,")"
|
[
"aris@cmu.edu"
] |
aris@cmu.edu
|
6f87b92696de2420ba9b14956ac1d08db4e16a86
|
bc6c0cda914c23e80921793eb0ce71c45202ada4
|
/src/endoexport/export.py
|
66f3970d48311c18dc3f984c553dd2e423f77298
|
[
"MIT"
] |
permissive
|
karlicoss/endoexport
|
a2221799113a12b400e298dea8d95559926de138
|
98c8805cbcc00187822737ef32c2e0434c4f450e
|
refs/heads/master
| 2023-04-04T09:56:57.716411
| 2023-03-15T02:19:15
| 2023-03-15T02:22:45
| 230,617,833
| 3
| 0
|
MIT
| 2023-03-15T02:22:46
| 2019-12-28T14:05:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,512
|
py
|
#!/usr/bin/env python3
import argparse
import json
from .exporthelpers.export_helper import Json
import endoapi
def get_json(**params) -> Json:
endomondo = endoapi.endomondo.Endomondo(**params)
maximum_workouts = None # None means all
workouts = endomondo.get_workouts_raw(maximum_workouts)
return workouts
Token = str
def login(email: str) -> Token:
print(f"Logging in as {email}")
password = input('Your password: ')
endomondo = endoapi.endomondo.Endomondo(email=email, password=password)
token = endomondo.token
print('Your token:')
print(token)
return token
def make_parser():
from .exporthelpers.export_helper import setup_parser, Parser
parser = Parser("Tool to export your personal Endomondo data")
setup_parser(parser=parser, params=['email', 'token']) # TODO exports -- need help for each param?
parser.add_argument('--login', action='store_true', help='''
This will log you in and give you the token (you'll need your password).
You only need to do it once, after that just store the token and use it.
''')
return parser
def main() -> None:
# TODO add logger configuration to export_helper?
# TODO autodetect logzero?
args = make_parser().parse_args()
params = args.params
dumper = args.dumper
if args.login:
login(email=params['email'])
return
j = get_json(**params)
js = json.dumps(j, indent=1, ensure_ascii=False)
dumper(js)
if __name__ == '__main__':
main()
|
[
"karlicoss@gmail.com"
] |
karlicoss@gmail.com
|
98c8776b814a794b9aa792a3f3fc1eb2fc895e1c
|
ce6c84c99fc6efa26faececb6aa637b15d417271
|
/SUB/lib/automl.py
|
a5095e2c0eba303d43d381f6a5cf4224d72f1715
|
[] |
no_license
|
freekode1ko/DataScienceJourney
|
879568b084177e2518875b03c4bcec09178ecf3b
|
9eb9c63135b8682ee3cddf2500c388af989627ed
|
refs/heads/master
| 2020-04-20T22:27:24.910331
| 2019-02-04T20:03:20
| 2019-02-04T20:03:20
| 169,140,055
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,032
|
py
|
import os
import pandas as pd
import numpy as np
from lib.util import timeit, Config
from lib.read import read_df
from lib.preprocess import preprocess
from lib.model import train, predict, validate
from typing import Optional
class AutoML:
def __init__(self, model_dir: str):
os.makedirs(model_dir, exist_ok=True)
self.config = Config(model_dir)
def train(self, train_csv: str, mode: str):
self.config["task"] = "train"
self.config["mode"] = mode
self.config.tmp_dir = self.config.model_dir + "/tmp"
os.makedirs(self.config.tmp_dir, exist_ok=True)
df = read_df(train_csv, self.config)
preprocess(df, self.config)
y = df["target"]
X = df.drop("target", axis=1)
train(X, y, self.config)
def predict(self, test_csv: str, prediction_csv: str) -> (pd.DataFrame, Optional[np.float64]):
self.config["task"] = "predict"
self.config.tmp_dir = os.path.dirname(prediction_csv) + "/tmp"
os.makedirs(self.config.tmp_dir, exist_ok=True)
result = {
"line_id": [],
"prediction": [],
}
for X in pd.read_csv(
test_csv,
encoding="utf-8",
low_memory=False,
dtype=self.config["dtype"],
parse_dates=self.config["parse_dates"],
chunksize=self.config["nrows"]
):
result["line_id"] += list(X["line_id"])
preprocess(X, self.config)
result["prediction"] += list(predict(X, self.config))
result = pd.DataFrame(result)
result.to_csv(prediction_csv, index=False)
target_csv = test_csv.replace("test", "test-target")
if os.path.exists(target_csv):
score = validate(result, target_csv, self.config["mode"])
else:
score = None
return result, score
@timeit
def save(self):
self.config.save()
@timeit
def load(self):
self.config.load()
|
[
"noreply@github.com"
] |
freekode1ko.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.