blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dfdfbaa0434a4a1fbf78dda2a901886803734fe3
|
a1b6744ce474a3f611f9c80580f8846968a8e6fd
|
/utils/data.py
|
97ec7f07acb3328091cb74e9707be1dec3cacab7
|
[] |
no_license
|
connectomicslab/connectome_spectral_analysis
|
35c2a451374b33d56d393aad872c6310bdbc2ed4
|
9ce1ed29456a85d7fcfd59a04fb73caccd0513db
|
refs/heads/main
| 2023-08-13T11:52:04.139735
| 2021-09-24T06:25:27
| 2021-09-24T06:25:27
| 430,620,725
| 1
| 0
| null | 2021-11-22T08:22:04
| 2021-11-22T08:22:04
| null |
UTF-8
|
Python
| false
| false
| 8,113
|
py
|
import os
import numpy as np
import pandas as pd
import pygsp
import scipy
import scipy.io
import os
class Connectome(pygsp.graphs.Graph):
def __init__(self,sc_file):
super().__init__(sc_file, lap_type='normalized')
self.compute_fourier_basis()
def loader_sc(scale, datadir, sc_type='num'):
# Load ROI atlas info
roifname = os.path.join(datadir, 'Lausanne2008_Yeo7RSNs.xlsx')
roidata = pd.read_excel(roifname, sheet_name='SCALE {}'.format(scale))
cort = np.where(roidata['Structure'] == 'cort')[0]
# Load ROI structural connectivity (SC) info
if sc_type == 'len':
sc_fname = os.path.join(datadir, 'SC_betzel',
'SC_len_betzel_scale_{}.mat'.format(scale))
sc_file = scipy.io.loadmat(sc_fname)['dist'][cort][:, cort]
sc_file[sc_file != 0] = 1 / ((sc_file[sc_file != 0]) / np.max(sc_file))
elif sc_type == 'num':
sc_fname = os.path.join(datadir, 'SC_betzel',
'SC_num_betzel_scale_{}.mat'.format(scale))
sc_file = scipy.io.loadmat(sc_fname)['num'][cort][:, cort]
sc_file[sc_file != 0] = np.log(sc_file[sc_file != 0])
sc_file[np.isnan(sc_file)] = 0
sc_file[np.isinf(sc_file)] = 0
sc = Connectome(sc_file)
return sc
def loader_sc_surrogates(sc_rand_dir):
sc_fname_list = os.listdir(sc_rand_dir)
rand_scs = []
for sc_fname in sc_fname_list:
sc_file = np.load(os.path.join(sc_rand_dir, sc_fname)) # These are already log normalized and diag-zeroed
sc = Connectome(sc_file)
rand_scs.append(sc)
return rand_scs
def loader_sc_surrogates_geometry_preserving(sc_rand_dir,scale,datadir):
#sc_fname_list = os.listdir(sc_rand_dir)
roifname = os.path.join(datadir, 'Lausanne2008_Yeo7RSNs.xlsx')
roidata = pd.read_excel(roifname, sheet_name='SCALE {}'.format(scale))
cort = np.where(roidata['Structure'] == 'cort')[0]
sc_fname_list = [elem for elem in os.listdir(sc_rand_dir) if elem.startswith('rand_SC_')]
wwp_fname_list = [elem for elem in sc_fname_list if elem.endswith('Wwp.mat')]
wsp_fname_list = [elem for elem in sc_fname_list if elem.endswith('Wsp.mat')]
wssp_fname_list = [elem for elem in sc_fname_list if elem.endswith('Wssp.mat')]
rand_sc_wwp = []
rand_sc_wsp = []
rand_sc_wssp = []
for sc_fname in wwp_fname_list:
sc_file = scipy.io.loadmat(os.path.join(sc_rand_dir, sc_fname))['Wwp']
sc_file = sc_file[cort][:, cort]
sc_file[sc_file != 0] = np.log(sc_file[sc_file != 0])
sc_file[np.isnan(sc_file)] = 0
sc_file[np.isinf(sc_file)] = 0
sc = pygsp.graphs.Graph(sc_file, lap_type='normalized')
sc.compute_fourier_basis()
rand_sc_wwp.append(sc)
for sc_fname in wsp_fname_list:
sc_file = scipy.io.loadmat(os.path.join(sc_rand_dir, sc_fname))['Wsp']
sc_file = sc_file[cort][:, cort]
sc_file[sc_file != 0] = np.log(sc_file[sc_file != 0])
sc_file[np.isnan(sc_file)] = 0
sc_file[np.isinf(sc_file)] = 0
sc = pygsp.graphs.Graph(sc_file, lap_type='normalized')
sc.compute_fourier_basis()
rand_sc_wsp.append(sc)
for sc_fname in wssp_fname_list:
sc_file = scipy.io.loadmat(os.path.join(sc_rand_dir, sc_fname))['Wssp']
sc_file = sc_file[cort][:, cort]
sc_file[sc_file != 0] = np.log(sc_file[sc_file != 0])
sc_file[np.isnan(sc_file)] = 0
sc_file[np.isinf(sc_file)] = 0
sc = pygsp.graphs.Graph(sc_file, lap_type='normalized')
sc.compute_fourier_basis()
rand_sc_wssp.append(sc)
return rand_sc_wwp, rand_sc_wsp, rand_sc_wssp
def loader_sc_non_norm(scale, datadir, sc_type='num'):
# Load ROI atlas info
roifname = os.path.join(datadir, 'Lausanne2008_Yeo7RSNs.xlsx')
roidata = pd.read_excel(roifname, sheet_name='SCALE {}'.format(scale))
cort = np.where(roidata['Structure'] == 'cort')[0]
# Load ROI structural connectivity (SC) info
if sc_type == 'len':
sc_fname = os.path.join(datadir, 'SC_betzel',
'SC_len_betzel_scale_{}.mat'.format(scale))
sc_file = scipy.io.loadmat(sc_fname)['dist'][cort][:, cort]
elif sc_type == 'num':
sc_fname = os.path.join(datadir, 'SC_betzel',
'SC_num_betzel_scale_{}.mat'.format(scale))
sc_file = scipy.io.loadmat(sc_fname)['num'][cort][:, cort]
sc = Connectome(sc_file)
return sc
def loader_sc_surrogates_non_norm(sc_rand_dir):
sc_fname_list = os.listdir(sc_rand_dir)
rand_scs = []
for sc_fname in sc_fname_list:
sc_file = np.load(os.path.join(sc_rand_dir, sc_fname)) # These are already log normalized and diag-zeroed
sc = Connectome(sc_file)
rand_scs.append(sc)
return rand_scs
def loader_sc_surrogates_geometry_preserving_non_norm(sc_rand_dir,scale,datadir):
# sc_fname_list = os.listdir(sc_rand_dir)
roifname = os.path.join(datadir, 'Lausanne2008_Yeo7RSNs.xlsx')
roidata = pd.read_excel(roifname, sheet_name='SCALE {}'.format(scale))
cort = np.where(roidata['Structure'] == 'cort')[0]
sc_fname_list = [elem for elem in os.listdir(sc_rand_dir) if elem.startswith('rand_SC_')]
wwp_fname_list = [elem for elem in sc_fname_list if elem.endswith('Wwp.mat')]
wsp_fname_list = [elem for elem in sc_fname_list if elem.endswith('Wsp.mat')]
wssp_fname_list = [elem for elem in sc_fname_list if elem.endswith('Wssp.mat')]
rand_sc_wwp = []
rand_sc_wsp = []
rand_sc_wssp = []
for sc_fname in wwp_fname_list:
sc_file = scipy.io.loadmat(os.path.join(sc_rand_dir, sc_fname))['Wwp']
sc_file = sc_file[cort][:, cort]
sc = pygsp.graphs.Graph(sc_file, lap_type='normalized')
sc.compute_fourier_basis()
rand_sc_wwp.append(sc)
for sc_fname in wsp_fname_list:
sc_file = scipy.io.loadmat(os.path.join(sc_rand_dir, sc_fname))['Wsp']
sc_file = sc_file[cort][:, cort]
sc = pygsp.graphs.Graph(sc_file, lap_type='normalized')
sc.compute_fourier_basis()
rand_sc_wsp.append(sc)
for sc_fname in wssp_fname_list:
sc_file = scipy.io.loadmat(os.path.join(sc_rand_dir, sc_fname))['Wssp']
sc_file = sc_file[cort][:, cort]
sc = pygsp.graphs.Graph(sc_file, lap_type='normalized')
sc.compute_fourier_basis()
rand_sc_wssp.append(sc)
return rand_sc_wwp, rand_sc_wsp, rand_sc_wssp
class TsGenerator:
def __init__(self, subject_list, scale, dataset, datadir, tvec_analysis,
tvec_pre):
self.subject_list = subject_list
self.scale = scale
self.datadir = datadir
self.dataset = dataset
self.tvec_analysis = tvec_analysis
self.tvec_pre = tvec_pre
self.nsub = len(subject_list)
def loader_ts(self, subject):
tcs_1d = np.load(os.path.join(self.datadir, 'sourcedata', self.dataset,
'scale{}/sub-{}.npy'.format(self.scale,
str(self.subject_list[subject]).zfill(2))))
if self.dataset == 'Faces':
behav_fname = os.path.join(self.datadir, 'derivatives', 'eeglab',
'sub-'+str(self.subject_list[subject]).zfill(2),
'sub-'+str(self.subject_list[subject]).zfill(2) +
'_FACES_250HZ_behav.npy')
cond = np.load(behav_fname)
cond = np.array(list(map(int, (cond == 'FACES'))))
elif self.dataset == 'Motion':
behav_fname = os.path.join(self.datadir, 'derivatives', 'eeglab',
'sub-'+str(self.subject_list[subject]).zfill(2),
'sub-'+str(self.subject_list[subject]).zfill(2) +
'_MOTION_250HZ_behav.npy')
cond = np.load(behav_fname)
return tcs_1d[:, self.tvec_analysis] / np.std(tcs_1d.mean(2)), cond
|
[
"joan.rue.q@gmail.com"
] |
joan.rue.q@gmail.com
|
2370ae58987abaa1d8a816bd2660abef243c5e37
|
0a839bf5256e9ed3d407d4c95cdd891d9d1b9dcb
|
/main.py
|
770a44882daa18de757950d5b05a5fd6e25ab704
|
[] |
no_license
|
cordyrichardson3/hello-flask
|
7c788401efb9ac4e9ac3d839dbbefc89b2e3bede
|
f6601e3d12ee08b54d2040713c57b8a12c48492d
|
refs/heads/master
| 2020-03-29T12:24:59.336911
| 2018-09-22T17:41:21
| 2018-09-22T17:41:21
| 149,898,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
from flask import Flask
app = Flask(__name__)
app.config['DEBUG'] = True
@app.route("/")
def index():
return "Hello World"
@app.route("/cordy")
def cordy():
return "Hello Cordy"
app.run()
|
[
"cordyrichardson3@gmail.com"
] |
cordyrichardson3@gmail.com
|
774bd4d78ca27980370d7a59191d4404aba8a9a8
|
d971d2b9648bd656c1a65eebd162e44bdcb14aa2
|
/segment_tree.py
|
6cedda244f50d72d8e9d177bd522371710701355
|
[] |
no_license
|
AlexandraFr/sf
|
fafa2e2510942932a6909bf2622743a7b0c9f49d
|
d822996db232ee233cdf06612a7e1e834084f623
|
refs/heads/master
| 2021-05-04T07:58:23.528216
| 2016-12-26T13:12:46
| 2016-12-26T13:12:46
| 70,736,751
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,363
|
py
|
import math
class SegmentTree:
def __init__(self, string):
self.string = string
self.length = len(string)
self.segment_tree = []
self.__build(string)
def __build(self, string):
for i in range(2 * self.length):
if i < self.length:
self.segment_tree.append(Brackets(0, 0, 0))
else:
if string[i - len(string)] == '(':
self.segment_tree.append(Brackets(1, 0, 0))
else:
self.segment_tree.append(Brackets(0, 1, 0))
for i in range(self.length - 1, 0, -1):
self.segment_tree[i] = self.segment_tree[2 * i] + self.segment_tree[2 * i + 1]
def __query(self, current, left, right, l, r):
if l == left and r == right:
return self.segment_tree[current]
elif r <= (left + right) // 2:
return self.__query(current * 2, left, (left + right) // 2, l, r)
elif l > (left + right) // 2:
return self.__query(current * 2 + 1, (left + right) // 2 + 1, right, l, r)
else:
return self.__query(current * 2, left, (left + right) // 2, l, (left + right) // 2) + \
self.__query(current * 2 + 1, (left + right) // 2 + 1, right, (left + right) // 2 + 1, r)
def query(self, l, r):
q = self.__query(1, 0, len(self.string) - 1, l - 1, r - 1)
return q.bal
def print_tree(self):
count = 0
for e in self.segment_tree:
print(count, 'узел', e)
count += 1
class Brackets:
def __init__(self, open, close, bal):
self.open = open
self.close = close
self.bal = bal
def __str__(self):
return 'макс сбалансир: ' + str(self.bal)
def __add__(self, other):
bal = self.bal + other.bal
new_bal = min(self.open, other.close)
new_open = self.open + other.open - new_bal
new_close = self.close + other.close - new_bal
return Brackets(new_open, new_close, bal + new_bal)
brackets = ')(()())'
length = 2 ** math.ceil(math.log2(len(brackets)))
brackets += (length - len(brackets)) * '('
print(brackets)
segment_tree = SegmentTree(brackets)
segment_tree.print_tree()
print(segment_tree.query(1, 5))
print(segment_tree.query(2, 6))
print(segment_tree.query(2, 7))
|
[
"noreply@github.com"
] |
AlexandraFr.noreply@github.com
|
9634736a12f454ca206ee32d5cf5ef109fc498be
|
e7efae2b83216d9621bd93390959d652de779c3d
|
/datadog_checks_base/tests/base/utils/http/test_auth.py
|
9aaf1589eca5894c98461fbc44384200ce637cec
|
[
"BSD-3-Clause",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0"
] |
permissive
|
DataDog/integrations-core
|
ee1886cc7655972b2791e6ab8a1c62ab35afdb47
|
406072e4294edff5b46b513f0cdf7c2c00fac9d2
|
refs/heads/master
| 2023-08-31T04:08:06.243593
| 2023-08-30T18:22:10
| 2023-08-30T18:22:10
| 47,203,045
| 852
| 1,548
|
BSD-3-Clause
| 2023-09-14T16:39:54
| 2015-12-01T16:41:45
|
Python
|
UTF-8
|
Python
| false
| false
| 5,759
|
py
|
# (C) Datadog, Inc. 2022-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import mock
import pytest
import requests_ntlm
from aws_requests_auth import boto_utils as requests_aws
from requests import auth as requests_auth
from datadog_checks.base import ConfigurationError
from datadog_checks.base.utils.http import RequestsWrapper
pytestmark = [pytest.mark.unit]
def test_config_default():
instance = {}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['auth'] is None
def test_config_basic():
instance = {'username': 'user', 'password': 'pass'}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['auth'] == ('user', 'pass')
def test_config_basic_authtype():
instance = {'username': 'user', 'password': 'pass', 'auth_type': 'basic'}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['auth'] == ('user', 'pass')
def test_config_basic_no_legacy_encoding():
instance = {'username': 'user', 'password': 'pass', 'use_legacy_auth_encoding': False}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['auth'] == (b'user', b'pass')
def test_config_digest_authtype():
instance = {'username': 'user', 'password': 'pass', 'auth_type': 'digest'}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert isinstance(http.options['auth'], requests_auth.HTTPDigestAuth)
with mock.patch('datadog_checks.base.utils.http.requests_auth.HTTPDigestAuth') as m:
RequestsWrapper(instance, init_config)
m.assert_called_once_with('user', 'pass')
def test_config_basic_only_username():
instance = {'username': 'user'}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['auth'] is None
def test_config_basic_only_password():
instance = {'password': 'pass'}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['auth'] is None
@pytest.mark.parametrize('username,password', [('user', ''), ('', 'pass'), ('', '')])
def test_config_basic_allows_empty_strings(username, password):
instance = {'username': username, 'password': password}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['auth'] == (username, password)
def test_config_ntlm():
instance = {'auth_type': 'ntlm', 'ntlm_domain': 'domain\\user', 'password': 'pass'}
init_config = {}
# Trigger lazy import
http = RequestsWrapper(instance, init_config)
assert isinstance(http.options['auth'], requests_ntlm.HttpNtlmAuth)
with mock.patch('datadog_checks.base.utils.http.requests_ntlm.HttpNtlmAuth') as m:
RequestsWrapper(instance, init_config)
m.assert_called_once_with('domain\\user', 'pass')
def test_config_ntlm_legacy(caplog):
instance = {'ntlm_domain': 'domain\\user', 'password': 'pass'}
init_config = {}
# Trigger lazy import
http = RequestsWrapper(instance, init_config)
assert isinstance(http.options['auth'], requests_ntlm.HttpNtlmAuth)
with mock.patch('datadog_checks.base.utils.http.requests_ntlm.HttpNtlmAuth') as m:
RequestsWrapper(instance, init_config)
m.assert_called_once_with('domain\\user', 'pass')
assert (
'The ability to use NTLM auth without explicitly setting auth_type to '
'`ntlm` is deprecated and will be removed in Agent 8'
) in caplog.text
def test_config_aws():
instance = {'auth_type': 'aws', 'aws_host': 'uri', 'aws_region': 'earth', 'aws_service': 'saas'}
init_config = {}
# Trigger lazy import
http = RequestsWrapper(instance, init_config)
assert isinstance(http.options['auth'], requests_aws.BotoAWSRequestsAuth)
with mock.patch('datadog_checks.base.utils.http.requests_aws.BotoAWSRequestsAuth') as m:
RequestsWrapper(instance, init_config)
m.assert_called_once_with(aws_host='uri', aws_region='earth', aws_service='saas')
def test_config_aws_service_remapper():
instance = {'auth_type': 'aws', 'aws_region': 'us-east-1'}
init_config = {}
remapper = {
'aws_service': {'name': 'aws_service', 'default': 'es'},
'aws_host': {'name': 'aws_host', 'default': 'uri'},
}
with mock.patch('datadog_checks.base.utils.http.requests_aws.BotoAWSRequestsAuth') as m:
RequestsWrapper(instance, init_config, remapper)
m.assert_called_once_with(aws_host='uri', aws_region='us-east-1', aws_service='es')
@pytest.mark.parametrize(
'case, instance, match',
[
('no host', {'auth_type': 'aws'}, '^AWS auth requires the setting `aws_host`$'),
('no region', {'auth_type': 'aws', 'aws_host': 'uri'}, '^AWS auth requires the setting `aws_region`$'),
(
'no service',
{'auth_type': 'aws', 'aws_host': 'uri', 'aws_region': 'us-east-1'},
'^AWS auth requires the setting `aws_service`$',
),
('empty host', {'auth_type': 'aws', 'aws_host': ''}, '^AWS auth requires the setting `aws_host`$'),
(
'empty region',
{'auth_type': 'aws', 'aws_host': 'uri', 'aws_region': ''},
'^AWS auth requires the setting `aws_region`$',
),
(
'empty service',
{'auth_type': 'aws', 'aws_host': 'uri', 'aws_region': 'us-east-1', 'aws_service': ''},
'^AWS auth requires the setting `aws_service`$',
),
],
)
def test_config_aws_invalid_cases(case, instance, match):
init_config = {}
with pytest.raises(ConfigurationError, match=match):
RequestsWrapper(instance, init_config)
|
[
"noreply@github.com"
] |
DataDog.noreply@github.com
|
0d512c4dd24d4ff00806ba46ca226e9e7799731f
|
26c4254234d12acef48c51974fed2ddce52cf886
|
/medline_scripts/count_abstracts.py
|
6c3e73ed3d2a4686603e922820b422b28fc3135e
|
[] |
no_license
|
zelliott/maple
|
bb1d05f73ddf0e8cfa3b5e010abe10dab237f587
|
11cfe6889e22e89715f91270bd854762574a8cd6
|
refs/heads/master
| 2021-06-14T11:51:17.973248
| 2017-04-20T05:51:14
| 2017-04-20T05:51:14
| 67,707,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 993
|
py
|
#!/usr/bin/python
#$ -S /usr/bin/python
'''
This script examines either a single file or a dir of files
and counts (1) the number of medline citations (i.e. abstracts)
and (2) the number of citations with mesh headings specified.
'''
import xml.etree.ElementTree as et
import os
import operator
import sys
def processFile(filePath):
count = 0
countWithMesh = 0
tree = et.parse(filePath)
root = tree.getroot()
# Count each abstract/citation
for citation in root.iter('MedlineCitation'):
count += 1
# If this article has a mesh list...
if len(citation.findall('.//MeshHeadingList')) > 0:
countWithMesh += 1
return count, countWithMesh
def main():
count = 0
countWithMesh = 0
for f in sys.stdin:
fileCount, fileCountWithMesh = processFile(f[:-1])
count += fileCount
countWithMesh += fileCountWithMesh
print 'abstracts, ' + str(count)
print 'abstracts with mesh headings, ' + str(countWithMesh)
if __name__ == "__main__":
main()
|
[
"hubridnoxx@gmail.com"
] |
hubridnoxx@gmail.com
|
0684c1fa6c7c99b250db6e0daa95f8773397a7a3
|
a0230684a0781c09592032de153da91f7f002234
|
/Assignment 1/Ramp climbing karel/RampClimbingKarel.py
|
7d3fa9897f694d7b7e10f8e3bd552c3ed82607a7
|
[] |
no_license
|
pallavimahajan11/Code_in_place
|
290bb8e09eb4650fe4ff937eaabbb16e0f8b675d
|
a15c356f09f965195f7214d7a0b63ba80a6a0cc2
|
refs/heads/main
| 2023-06-04T08:01:48.381730
| 2021-06-12T19:05:57
| 2021-06-12T19:05:57
| 376,361,655
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
from karel.stanfordkarel import *
"""
File: RampClimbingKarel.py
--------------------
When you finish writing this file, RampClimbingKarel should be
able to draw a line with slope 1/2 in any odd sized world
"""
#fuction too turn right
def turn_right():
turn_left()
turn_left()
turn_left()
#to draw a diagonal line using beepers
def climb():
move()
turn_left()
move()
turn_right()
move()
put_beeper()
#main function
def main():
put_beeper()
while front_is_clear():
climb()
if __name__ == '__main__':
run_karel_program('RampKarel1.w')
|
[
"noreply@github.com"
] |
pallavimahajan11.noreply@github.com
|
bc83d60b45697492a232d29dfc684ec71ea59f98
|
f11600b9a256bf6a2b584d127faddc27a0f0b474
|
/easy/884.py
|
178f12a5ad59e80269bc9e90bd5d1d258fed01bc
|
[] |
no_license
|
longhao54/leetcode
|
9c1f0ce4ca505ec33640dd9b334bae906acd2db5
|
d156c6a13c89727f80ed6244cae40574395ecf34
|
refs/heads/master
| 2022-10-24T07:40:47.242861
| 2022-10-20T08:50:52
| 2022-10-20T08:50:52
| 196,952,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
class Solution:
def uncommonFromSentences(self, A: str, B: str) -> List[str]:
ans = []
noans = []
B = B.split()
A = A.split()
for string in A:
if string not in B and string not in noans:
ans.append(string)
elif string in noans and string in ans:
ans.remove(string)
noans.append(string)
for string in B:
if string not in A and string not in noans:
ans.append(string)
elif string in noans and string in ans:
ans.remove(string)
noans.append(string)
return ans
def faster(self, A: str, B:str) -> List[str]:
c = collections.Counter((A + " " + B).split(" "))
return [v for v in c if c[v] == 1]
|
[
"4903319173@qq.com"
] |
4903319173@qq.com
|
4d051764f9b430f58be62576f6a7271125ec61f3
|
ceb39d672f6ae3bf4f0b92ee2fc5f9efef1c27b6
|
/cards/migrations/0002_auto_20190218_1905.py
|
3774e7663972ce5d21fac3569b61f41fd13c52fb
|
[] |
no_license
|
marekrewers/webshop-backend
|
0558064893845b886a733be7f96aa6116d30e1bb
|
7e20902f0bbacb425948712cf2df07d5a2ed8179
|
refs/heads/master
| 2020-04-23T03:57:16.259959
| 2019-02-19T10:35:25
| 2019-02-19T10:35:25
| 170,893,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
# Generated by Django 2.1.7 on 2019-02-18 19:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('categories', '0001_initial'),
('cards', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='card',
name='category',
),
migrations.AddField(
model_name='card',
name='publications',
field=models.ManyToManyField(to='categories.Category'),
),
]
|
[
"rewers.marek@gmail.com"
] |
rewers.marek@gmail.com
|
fc3f5e2f58cb367874453d0f5e45d5bf3d6cef5a
|
7c705153a18bab1140754af18f58643cc53ea1aa
|
/python/knapsack/knapsack.py
|
abad44dd721c84918e960174719c3051ca37c2f1
|
[] |
no_license
|
RishitAghera/internship
|
501b02f40f80c6e09b338ae517d6adb96729e941
|
5f82ef636eb214e86de370717e09f924e893186c
|
refs/heads/master
| 2020-12-02T17:23:42.711678
| 2020-04-15T13:41:29
| 2020-04-15T13:41:29
| 231,071,238
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 481
|
py
|
from itertools import chain, combinations
def powerset(items):
return chain.from_iterable(combinations(items, r) for r in range(len(items)+1))
def maximum_value(maximum_weight, items):
result = 0
for choice in powerset(items):
weight = sum(map(lambda x: x['weight'], choice))
if weight > maximum_weight:
continue
value = sum(map(lambda x: x['value'], choice))
if value > result:
result = value
return result
|
[
"rishit4879@gmail.com"
] |
rishit4879@gmail.com
|
1bcffda49ed9845a46821b01dd50f3df43360d22
|
aca47fee5e90191f7861fe4e048e2cf6b1bdd2cf
|
/week2/skol'kosovpalo.py
|
c6b43996fce7b44d2dec67d3fcaa79290f63108c
|
[] |
no_license
|
arinalarina/python-for-beginners
|
fc8afd622ae5fe322dfba7a8f48339882add5cad
|
3a1813acfebcf9cdef19ae2364a009224320f5df
|
refs/heads/master
| 2021-01-17T05:24:20.748182
| 2017-07-07T10:20:39
| 2017-07-07T10:20:39
| 95,668,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
a = int(input())
b = int(input())
c = int(input())
if a == b == c:
print(3)
elif a == b != c or a == c != b or b == c != a:
print(2)
else:
print(0)
|
[
"arisha0147@mail.ru"
] |
arisha0147@mail.ru
|
4805a03abae11837e9ec3122e09a0d4a8d57141c
|
8f6c37ae954a1aeba9036c5a5fc025b51b247869
|
/main.py
|
9128afffc0c2353ac62cf0afe03b4be41ae1bfd1
|
[] |
no_license
|
dunkfordyce/sick-68-kmk
|
1960ba9c9655f126aa6d0db558c0914f826b760d
|
9eff4d8593458f9a268bb1f664d0946eacf9f63c
|
refs/heads/main
| 2023-06-05T03:24:06.715574
| 2021-06-27T09:01:35
| 2021-06-27T09:01:35
| 380,694,454
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,133
|
py
|
import board
from kmk.kmk_keyboard import KMKKeyboard as _KMKKeyboard
from kmk.matrix import DiodeOrientation
from kmk.keys import KC
class KMKKeyboard(_KMKKeyboard):
col_pins = (board.GP2, board.GP3, board.GP4, board.GP6, board.GP7, board.GP8, board.GP10, board.GP11, board.GP12, board.GP14, board.GP15, board.GP16, board.GP18, board.GP19, board.GP20)
row_pins = (board.GP1, board.GP5, board.GP9, board.GP13, board.GP17)
diode_orientation = DiodeOrientation.COLUMNS
keyboard = KMKKeyboard()
#keyboard.debug_enabled = True
_______ = KC.TRNS
XXXXXXX = KC.NO
BASE = 0
FN1 = 1
LAYER1 = KC.MO(FN1)
keyboard.keymap = [
[
KC.GESC, KC.N1, KC.N2, KC.N3, KC.N4, KC.N5, KC.N6, KC.N7, KC.N8, KC.N9, KC.N0, KC.MINS, KC.EQL, KC.BSPC, KC.GRAVE,
KC.TAB, KC.Q, KC.W, KC.E, KC.R, KC.T, KC.Y, KC.U, KC.I, KC.O, KC.P, KC.LBRC, KC.RBRC, KC.BSLS, KC.DEL,
KC.CAPS, KC.A, KC.S, KC.D, KC.F, KC.G, KC.H, KC.J, KC.K, KC.L, KC.SCLN, KC.QUOT, XXXXXXX, KC.ENT, KC.PGUP,
KC.LSFT, XXXXXXX, KC.Z, KC.X, KC.C, KC.V, KC.B, KC.N, KC.M, KC.COMM, KC.DOT, KC.SLSH, KC.RSFT, KC.UP, KC.PGDOWN,
KC.LCTL, KC.LGUI, KC.LALT, XXXXXXX, XXXXXXX, XXXXXXX, KC.SPC, XXXXXXX, XXXXXXX, KC.RALT, LAYER1, KC.RCTL, KC.LEFT, KC.DOWN, KC.RIGHT,
],
[
_______, KC.F1 , KC.F2 , KC.F3 , KC.F4 , KC.F5 , KC.F6 , KC.F7 , KC.F8 , KC.F9 , KC.F10 , KC.F11 , KC.F12 , _______, _______,
_______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______,
_______, _______, _______, _______, _______, _______, KC.LEFT, KC.RIGHT, KC.DOWN, KC.UP, _______, _______, XXXXXXX, _______, _______,
_______, XXXXXXX, _______, _______, _______, _______, _______, _______, _______, KC.VOLD, KC.VOLU, _______, _______, _______, _______,
_______, _______, _______, XXXXXXX, XXXXXXX, XXXXXXX, _______, XXXXXXX, XXXXXXX, _______, _______, _______, _______, _______, _______,
]
]
if __name__ == '__main__':
keyboard.go()
|
[
"noreply@github.com"
] |
dunkfordyce.noreply@github.com
|
53f6591907d165fd71f7bb22b09a6b495b873a0e
|
9b483242cfa6302b32d74729d7854508e05b0cf3
|
/YangSun/Data+Exploration.py
|
c603f29bc0fa998762a35e034bbef0b48dfbbf16
|
[] |
no_license
|
Jay-zheng/Web_Econ
|
3e6934d38a4f691d93e46e23e8b7dd71cff3ce3c
|
a3bd123cff67a7bad0ec8fb208e4b849e5a19ac8
|
refs/heads/master
| 2020-05-24T21:32:04.702895
| 2017-06-02T11:41:29
| 2017-06-02T11:41:29
| 84,882,691
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,343
|
py
|
# coding: utf-8
# In[1]:
import random
import numpy as np
import pandas as pd
# In[2]:
train = pd.read_csv("Desktop/Webeco/dataset/train.csv")
validation = pd.read_csv("Desktop/Webeco/dataset/validation.csv")
# In[3]:
train.columns
# In[4]:
df = pd.DataFrame()
df['advertiser'] = np.sort(train.advertiser.unique())
df['impressions'] = train.groupby('advertiser').size().values
df['click'] = train.groupby('advertiser').click.aggregate(np.sum).values
df['cost'] = train.groupby('advertiser').payprice.aggregate(np.sum).values
df['CTR'] = (((df.click / df.impressions) * 100).round(2)).astype(str) + '%'
df['CPM'] = (((df.cost / df.impressions) * 1000).round(2)).astype(str)
df['eCPC'] = ((df.cost / df.click).round(2)).astype(str)
# In[5]:
df
# In[6]:
def CTR(variable):
df = pd.DataFrame()
df[variable] = np.sort(train[variable].unique())
df['impressions'] = train.groupby(variable).size().values
df['click'] = train.groupby(variable).click.aggregate(np.sum).values
df['CTR'] = (((df.click / df.impressions) * 100).round(2)).astype(str) + '%'
return df.sort_values(["CTR"],ascending = False)
# In[7]:
CTR("weekday")
# In[8]:
CTR("hour")
# In[9]:
CTR("adexchange")
# In[10]:
CTR("useragent").head(10)
# In[11]:
CTR("region").head(10)
# In[12]:
CTR("city").head(10)
# In[ ]:
|
[
"noreply@github.com"
] |
Jay-zheng.noreply@github.com
|
dac2b61b2da27d83c3a8d255e0909e2e53866b01
|
6c4bc4100636a469cda76395f9a7a83ebc5f5c7c
|
/storage_node_app.py
|
0fad133bf5a64b09f57fb02a84675a54446b1d1e
|
[] |
no_license
|
DevipriyaSarkar/distributed_file_system
|
cdd3a0385ad682be7e37c133d6d653dc4455a7f5
|
11d569d3b3dc26c33cf13461c55754d5c68d2839
|
refs/heads/master
| 2023-05-25T00:50:06.915168
| 2020-04-14T14:16:27
| 2020-04-14T14:51:51
| 255,191,117
| 3
| 0
| null | 2023-05-22T23:23:01
| 2020-04-12T23:47:42
|
Python
|
UTF-8
|
Python
| false
| false
| 3,293
|
py
|
import os
import random
import flask_utilities
from celery import Celery
from flask import Flask, jsonify, request, send_from_directory
MY_NODE = os.environ['NODE']
MY_PORT = os.environ['PORT']
STORAGE_DIR = f"storage_{MY_NODE}_{MY_PORT}"
CELERY_BROKER_URL = 'redis://redis:6379/0'
CELERY_RESULT_BACKEND = 'redis://redis:6379/0'
app = Flask(__name__)
celery = Celery('tasks',
broker=CELERY_BROKER_URL,
backend=CELERY_RESULT_BACKEND)
@app.route('/health', methods=['GET'])
def health():
data = {
'message': 'Server is running'
}
resp = jsonify(data)
resp.status_code = 200
return resp
@app.route('/upload', methods=['POST'])
def upload():
try:
flask_utilities.create_storage_dir(STORAGE_DIR)
fp = request.files['input_file']
file_hash = request.form['file_hash']
filename = request.form['filename']
storage_filepath = os.path.join(STORAGE_DIR, filename)
fp.save(storage_filepath)
is_file_valid = flask_utilities.is_file_integrity_matched(
filepath=storage_filepath,
recvd_hash=file_hash
)
if is_file_valid:
app.logger.debug(f"File {storage_filepath} saved and integrity verified.")
resp = jsonify({'message': f"File {storage_filepath} saved successfully."})
resp.status_code = 200
add_replication_to_queue(filename)
except Exception as e:
resp = jsonify({
'message': f"Error while saving {storage_filepath}: {str(e)}"
})
resp.status_code = 500
return resp
@app.route('/download', methods=['GET'])
def download():
try:
flask_utilities.create_storage_dir(STORAGE_DIR)
filename = request.args['filename']
storage_filepath = os.path.join(STORAGE_DIR, filename)
resp = send_from_directory(
directory=STORAGE_DIR,
filename=filename
)
file_hash = flask_utilities.calc_file_md5(filepath=storage_filepath)
resp.headers['file_hash'] = file_hash
except Exception as e:
resp = jsonify({
'message': f"Error while saving {storage_filepath}: {str(e)}"
})
resp.status_code = 500
return resp
def add_replication_to_queue(filename):
replication_factor = flask_utilities.get_replication_factor()
all_storage_nodes = flask_utilities.get_all_storage_nodes()
cur_storage_node = f"{MY_NODE}:{MY_PORT}"
available_sns = list(set(all_storage_nodes) - {cur_storage_node})
selected_sns = random.sample(available_sns, k=replication_factor)
app.logger.debug(f"Asynchronously replicate {filename} from {cur_storage_node} to {selected_sns}")
for count, dest_sn in enumerate(selected_sns):
task = celery.send_task(
'dfs_tasks.replicate',
args=[filename, cur_storage_node, dest_sn]
)
app.logger.debug(f"#{count+1}: Added task {task.id} for replication of {filename} to {dest_sn}")
if __name__ == '__main__':
flask_utilities.create_storage_dir(dir_path=STORAGE_DIR)
app.config['NODE'] = MY_NODE
app.config['PORT'] = MY_PORT
app.config['STORAGE_DIR'] = f"storage_{app.config['NODE']}_{app.config['PORT']}"
app.run(host="0.0.0.0", port=MY_PORT)
|
[
"dsarkar@linkedin.com"
] |
dsarkar@linkedin.com
|
3d3b3273a19c26d9e8b69231ddbe241d337363be
|
77b6e36045f25206766564c1f52d7ceec9a0c595
|
/Day-1/A17.py
|
91a2b63348e9672ddba56d09ec3bb0e7b0f93fbb
|
[] |
no_license
|
VladPuzhanivskyi/practice-python
|
9db8a548d7a38e5e8bce2ef762cedb3e51a0cd92
|
adf8b17268320d80a7e9c97f33c8a7b7a840801c
|
refs/heads/master
| 2020-03-20T19:41:04.060127
| 2018-06-18T14:05:43
| 2018-06-18T14:05:43
| 137,648,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
import datetime
c = int(float(input('Введите суму:')))
f = int(input('введіть кількість років:'))
r = 0.14 * c
d = c + r
template = '{:.' + str(2) + 'f}'
if f == 1:
print(template.format(d))
if f == 2:
print(template.format(c + r * 2))
if f == 3:
print(template.format(c + r * 3))
def printTimeStamp(name):
print('Автор програми: ' + name)
print('Час компіляції: ' + str(datetime.datetime.now()))
name = 'Puzhanivskyi Vladislav'
printTimeStamp(name)
|
[
"noreply@github.com"
] |
VladPuzhanivskyi.noreply@github.com
|
6d746b73e3161fe669213564cda177a20ba02557
|
0ea556c2dc08fcca3c9204daa54a0956522e33ea
|
/eventify/core/tests/test_api_course.py
|
e963acc9dcdc7e022e6d1c5739ed2c1951aea96e
|
[] |
no_license
|
mjr/eventify
|
4da5f9e96529a37996641a96347581111801642a
|
e3c2cc93050d48ee60fca40e350120c7d999c24f
|
refs/heads/main
| 2023-06-01T06:07:01.348091
| 2021-06-23T04:23:27
| 2021-06-23T04:23:27
| 345,531,453
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,854
|
py
|
from django.core import mail
from rest_framework import status
from rest_framework.test import APITestCase
from ..models import Course
class CoursesGet(APITestCase):
def setUp(self):
Course.objects.create(
title="Rust",
description="Uma linguagem capacitando todos a construir softwares confiáveis e eficientes.",
start="08:00:00",
slots=10,
)
self.resp = self.client.get("/api/v1/courses/")
def test_get(self):
"""Get /api/v1/courses/ must return status code 200"""
self.assertEqual(status.HTTP_200_OK, self.resp.status_code)
def test_data(self):
self.assertEqual(
self.resp.data,
[
{
"title": "Rust",
"description": "Uma linguagem capacitando todos a construir softwares confiáveis e eficientes.",
"start": "08:00:00",
"slots": 10,
}
],
)
class CoursesPostValid(APITestCase):
def setUp(self):
data = {
"title": "Go",
"description": "Go is an open source programming language that makes it easy to build simple, reliable, and efficient software.",
"start": "10:00:00",
"slots": 20,
}
self.resp = self.client.post("/api/v1/courses/", data)
def test_post(self):
"""Valid POST to /api/v1/courses/ must return status code 201"""
self.assertEqual(status.HTTP_201_CREATED, self.resp.status_code)
def test_save_course(self):
self.assertTrue(Course.objects.exists())
class CoursesPostInvalid(APITestCase):
def setUp(self):
self.resp = self.client.post("/api/v1/courses/", {})
def test_post(self):
"""Invalid POST must return status code 400"""
self.assertEqual(status.HTTP_400_BAD_REQUEST, self.resp.status_code)
def test_dont_save_course(self):
self.assertFalse(Course.objects.exists())
class CoursesPatchValid(APITestCase):
def setUp(self):
Course.objects.create(
title="Rust",
description="Uma linguagem capacitando todos a construir softwares confiáveis e eficientes.",
start="08:00:00",
slots=10,
)
self.resp = self.client.patch("/api/v1/courses/1/", {"slots": 20})
def test_patch(self):
"""Patch /api/v1/courses/1/ must return status code 200"""
self.assertEqual(status.HTTP_200_OK, self.resp.status_code)
class CoursesPutValid(APITestCase):
def setUp(self):
Course.objects.create(
title="Rust",
description="Uma linguagem capacitando todos a construir softwares confiáveis e eficientes.",
start="08:00:00",
slots=10,
)
self.resp = self.client.put(
"/api/v1/courses/1/",
{
"title": "Go",
"description": "Go is an open source programming language that makes it easy to build simple, reliable, and efficient software.",
"start": "10:00:00",
"slots": 20,
},
)
def test_put(self):
"""Put /api/v1/courses/1/ must return status code 200"""
self.assertEqual(status.HTTP_200_OK, self.resp.status_code)
class CoursesDeleteValid(APITestCase):
def setUp(self):
Course.objects.create(
title="Rust",
description="Uma linguagem capacitando todos a construir softwares confiáveis e eficientes.",
start="08:00:00",
slots=10,
)
self.resp = self.client.delete("/api/v1/courses/1/")
def test_delete(self):
"""Delete /api/v1/courses/1/ must return status code 204"""
self.assertEqual(status.HTTP_204_NO_CONTENT, self.resp.status_code)
|
[
"manaiajr.23@gmail.com"
] |
manaiajr.23@gmail.com
|
d0dc1a2afe0895dca61ae299988b55c12421afcf
|
a8547f73463eef517b98d1085430732f442c856e
|
/luvina/backend/backend.py
|
485bd1595d99a78a6756bc76566aa7d2cf851708
|
[] |
no_license
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
63aece1b692225ee2fbb865200279d7ef88a1eca
|
5668b5785296b314ea1321057420bcd077dba9ea
|
refs/heads/master
| 2021-01-23T19:13:04.707152
| 2017-12-25T17:41:30
| 2017-12-25T17:41:30
| 102,808,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
from .enchant_backend import *
from .nltk_backend import *
from .spacy_backend import *
from .common import *
|
[
"raliclo@gmail.com"
] |
raliclo@gmail.com
|
20a738287bd19593383ab45f940a695ba6539015
|
17d2869fba51a6c4c296986d5c1a193c4c72c364
|
/Methodology/C_CreateDatabase/core_selection_degiro_isin.py
|
c46a5141d8064c0b892f57c060f5bff880b7d350
|
[
"MIT"
] |
permissive
|
fishwithfros/FinanceDatabase
|
b7aac10f02e106ce0dbeefce557ff7efedf6a3b3
|
f4ec9fea2333b2f9f574897b93accb100f087aed
|
refs/heads/master
| 2023-08-17T18:52:28.140668
| 2021-10-27T07:45:08
| 2021-10-27T07:45:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,996
|
py
|
isin = {'LU1681046931': 'PA',
'LU1681044480': 'PA',
'LU1681047236': 'PA',
'LU1681045370': 'PA',
'LU1681043599': 'PA',
'LU1681048804': 'PA',
'LU1681040223': 'PA',
'LU1681042609': 'PA',
'LU1681047079': 'PA',
'ES0105336038': 'MC',
'LU0488317701': 'DE',
'LU0779800910': 'DE',
'LU0839027447': 'MI',
'IE00BM67HM91': 'MI',
'IE00BM67HK77': 'MI',
'DE000A0H0728': 'AS',
'FR0012739431': 'PA',
'IE00BFYN8Y92': 'MI',
'IE00BYPLS672': 'AS',
'IE00BMW3QX54': 'AS',
'DE000ETFL037': 'DE',
'IE00BG5J1M21': 'DE',
'IE00BDDRF924': 'MI',
'IE00BDDRF700': 'MI',
'IE00B44T3H88': 'PA',
'IE00B4X9L533': 'PA',
'IE00B5KQNG97': 'PA',
'IE00BF11F565': 'L',
'IE00BRKWGL70': 'DE',
'IE00BWTN6Y99': 'DE',
'IE00BSKRJZ44': 'L',
'IE00B0M62Y33': 'AS',
'IE00B14X4T88': 'AS',
'IE00BYZK4552': 'DE',
'IE00BYWZ0333': 'DE',
'IE00B3DKXQ41': 'AS',
'IE00B3F81R35': 'AS',
'IE00B0M63516': 'AS',
'IE00B1W57M07': 'AS',
'IE00B02KXK85': 'AS',
'IE00BYPC1H27': 'AS',
'IE00BJ5JPG56': 'AS',
'IE00BDBRDM35': 'DE',
'IE00B5BMR087': 'DE',
'IE00BG0J4841': 'DE',
'IE00BG0J4C88': 'DE',
'IE00BYZK4883': 'DE',
'IE00BDFL4P12': 'DE',
'DE000A0H0728': 'DE',
'IE00B57X3V84': 'AS',
'IE00B1FZS806': 'AS',
'IE00B0M62X26': 'AS',
'IE00BP3QZ825': 'DE',
'IE00BGL86Z12': 'DE',
'IE00B0M63177': 'AS',
'IE00BKM4GZ66': 'AS',
'IE00B0M62S72': 'AS',
'IE0008471009': 'AS',
'IE00B1YZSC51': 'AS',
'IE0005042456': 'AS',
'IE00B1XNHC34': 'DE',
'IE00B1TXK627': 'DE',
'IE00B6R52036': 'DE',
'IE00BYZK4776': 'DE',
'IE00B66F4759': 'DE',
'IE00B1FZS467': 'AS',
'IE00B02KXH56': 'AS',
'IE00B0M63391': 'AS',
'IE00B27YCK28': 'AS',
'IE00BQT3WG13': 'DE',
'IE00B4L5YC18': 'AS',
'IE00BFNM3P36': 'DE',
'IE00BFNM3N12': 'DE',
'IE00BFNM3B99': 'DE',
'IE00B4K48X80': 'AS',
'IE00BFNM3D14': 'DE',
'IE00BZCQB185': 'DE',
'IE00BFNM3G45': 'DE',
'IE00B4L5Y983': 'AS',
'IE00BHZPJ569': 'DE',
'IE00BFNM3J75': 'DE',
'IE00BF4RFH31': 'DE',
'IE00BYX2JD69': 'DE',
'IE00BYVQ9F29': 'DE',
'IE00BYXG2H39': 'DE',
'DE000A0F5UF5': 'DE',
'IE00B1TXHL60': 'AS',
'IE00B1FZS244': 'AS',
'IE00B0M63284': 'AS',
'IE00B1FZS350': 'AS',
'IE00B1FZSF77': 'AS',
'IE0031442068': 'AS',
'IE00B3WJKG14': 'DE',
'IE0008470928': 'AS',
'DE000A0H08M3': 'DE',
'DE000A0H08S0': 'DE',
'DE0002635307': 'DE',
'DE0005933998': 'DE',
'DE0002635299': 'DE',
'DE000A0F5UH1': 'DE',
'DE0005933972': 'DE',
'IE00B1FZS574': 'AS',
'IE00B0M62Q58': 'AS',
'IE00B6R51Z18': 'DE',
'IE00B53SZB19': 'AS',
'IE00BFXR7892': 'AS',
'IE00BK5BCD43': 'DE',
'IE00BF0M2Z96': 'MI',
'IE00BK5BC891': 'DE',
'LU1781541179': 'MI',
'FR0010405431': 'PA',
'FR0000021842': 'BR',
'FR0007052782': 'PA',
'LU0252633754': 'PA',
'FR0007054358': 'PA',
'FR0010251744': 'MC',
'FR0010361683': 'PA',
'LU1923627092': 'PA',
'LU0496786574': 'PA',
'LU1287023185': 'PA',
'LU1691909508': 'DE',
'LU2197908721': 'DE',
'LU1900066207': 'PA',
'LU2023678282': 'MI',
'LU2023679090': 'MI',
'LU1900067601': 'PA',
'LU1792117779': 'DE',
'FR0010524777': 'DE',
'LU1838002480': 'DE',
'LU1834988278': 'PA',
'LU0908500753': 'MI',
'LU1834983477': 'PA',
'LU1834986900': 'PA',
'FR0010527275': 'DE',
'IE00B441G979': 'AS',
'LU1829218749': 'PA',
'LU1829221024': 'PA',
'IE0032077012': 'PA',
'IE00BJXRZJ40': 'MI',
'IE00BJXRZ273': 'DE',
'IE00BLRPQH31': 'MI',
'IE00B3ZW0K18': 'AS',
'IE00BYSZ5V04': 'DE',
'IE00B466KX20': 'PA',
'IE00BYTRR863': 'AS',
'IE00BYTRRB94': 'AS',
'IE00BYTRRD19': 'AS',
'IE00BJ38QD84': 'DE',
'IE00B6YX5C33': 'L',
'IE00B5M1WJ87': 'DE',
'IE00B9CQXS71': 'DE',
'IE00B6YX5D40': 'DE',
'IE00BWBXM492': 'MI',
'IE00BH4GPZ28': 'AS',
'IE00BK5H8015': 'AS',
'IE00BWBXM948': 'AS',
'FR0011550193': 'PA',
'LU1324516308': 'MI',
'LU1459801780': 'MI',
'LU0629460675': 'AS',
'LU0629459743': 'DE',
'LU0950674332': 'DE',
'LU0629460089': 'DE',
'LU1048313891': 'MI',
'NL0009272749': 'AS',
'IE00BQQP9G91': 'MI',
'NL0011683594': 'AS',
'NL0010408704': 'AS',
'IE00BMC38736': 'MI',
'NL0009690221': 'AS',
'NL0009690239': 'AS',
'IE00BQQP9F84': 'MI',
'IE00BYWQWR46': 'DE',
'IE00BZ163H91': 'AS',
'IE00BK5BQT80': 'DE',
'IE00B9F5YL18': 'AS',
'IE00B3RBWM25': 'AS',
'IE00B3VVMM84': 'AS',
'IE00B945VV12': 'AS',
'IE00B8GKDB10': 'AS',
'IE00B3XXRP09': 'AS',
'IE00BZ163M45': 'AS',
'IE00B810Q511': 'L',
'IE00BDVPNG13': 'MI',
'IE00BKLF1R75': 'MI',
'IE00BJGWQN72': 'MI',
'IE00BZ56SW52': 'MI',
'IE00BGV5VN51': 'DE',
'IE00BG370F43': 'DE',
'IE00BZ02LR44': 'DE',
'IE00BGV5VR99': 'DE',
'LU0484968812': 'DE',
'LU1109942653': 'DE',
'LU0942970798': 'DE',
'LU0378818131': 'DE',
'LU0514695690': 'DE',
'IE00BTJRMP35': 'DE',
'IE00BM67HT60': 'DE',
'IE00BL25JP72': 'DE',
'IE00BZ036J45': 'DE'}
|
[
"jer.bouma@gmail.com"
] |
jer.bouma@gmail.com
|
cea3f841d76d3edd9d8c548399f926ff946909f6
|
67a4e6565d0f5bad565fa8ea2d076684d082691a
|
/01_Arrays_and_Strings/04_Palindrome_Permutation.py
|
a5621e7a52f3d5d5f77ef2e519228284fb892d5e
|
[] |
no_license
|
DStheG/ctci
|
5b94664cf78c3a34f3e6095016bbda0ab03bb58b
|
43ca1d8933e601597b14539b0e729e83fcd65ebf
|
refs/heads/master
| 2022-02-24T22:43:13.712936
| 2019-08-12T11:26:06
| 2019-08-12T11:26:06
| 164,867,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,279
|
py
|
#!/usr/bin/python
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
1.4 Palindrome Permutation
Given a string, write a function to check if it is a permutation of a pali-
ndrome. A palindrome is a word or phrase that is the same forwards and bac-
kwards. A permutation is a rearrangement to letters. The palindrome does n-
ot need to be limited to just dictionary words.
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
import string
import random
import sys
sys.path.append("../")
from module.ctci import Exercise
from module.ctci import Solution
from collections import Counter
CHARS_SET = string.ascii_letters + ' '
class Ex_01_04(Exercise):
def setup(self):
self.param.append("Tact Coa")
class Ex0104(Solution):
# O(N)
def Naive(self, param):
Str = param[0].lower()
len_of_bucket = [0] * (len(CHARS_SET) / 2)
# When the length of string (exception space) is even
# The all each character should be occurs even.
# Otherwise, the only one character occurs odd.
Total = odd = even = 0
for s in Str:
if s != ' ':
idx = ord(s) - ord('a')
len_of_bucket[idx] += 1
if(len_of_bucket[idx] % 2):
odd += 1
if(len_of_bucket[idx] != 1):
even -= 1
else:
even += 1
odd -= 1
Total += 1
if(Total % 2):
if(odd == 1):
return True
elif(odd == 0):
return True
return False
def PyStyle(self, param):
Str = ''.join([c.replace(' ', '') for c in param[0].lower()])
return sum(v % 2 for v in Counter(Str).values()) <= 1
def ImprovedNaive(self, param):
Str = param[0].lower()
len_of_bucket = [0] * (len(CHARS_SET) / 2)
# When the length of string (exception space) is even
# The all each character should be occurs even.
# Otherwise, the only one character occurs odd.
for s in Str:
if s != ' ':
idx = ord(s) - ord('a')
len_of_bucket[idx] += 1
return sum(l % 2 for l in len_of_bucket) <= 1
def solve():
ex = Ex_01_04()
ex.add_solution(Ex0104("Naive"))
ex.add_solution(Ex0104("PyStyle"))
ex.add_solution(Ex0104("ImprovedNaive"))
ex.solve()
if __name__ == "__main__":
solve()
|
[
"xcrossx@gmail.com"
] |
xcrossx@gmail.com
|
13d06638f45753fde8cb3a3d58db63efa8d9b8b2
|
f8f0100d0bc7305ed90629ff56f7659e634c2122
|
/app/example/find_path.py
|
729fcc19eca62b53d136f54063047109cff08218
|
[] |
no_license
|
g10guang/offerSword
|
a28b89cedf2c3d617a9616c37153a8303f89c28d
|
a01a6856c7679971cf4ba0d733bcc5cf418235f7
|
refs/heads/master
| 2023-08-16T13:16:38.110034
| 2023-08-15T05:48:07
| 2023-08-15T05:48:07
| 130,460,748
| 13
| 5
| null | 2023-05-14T11:59:11
| 2018-04-21T09:50:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,405
|
py
|
# coding=utf-8
# author: Xiguang Liu<g10guang@foxmail.com>
# 2018-04-29 13:18
# 题目描述:https://www.nowcoder.com/practice/b736e784e3e34731af99065031301bca?tpId=13&tqId=11177&tPage=1&rp=1&ru=%2Fta%2Fcoding-interviews&qru=%2Fta%2Fcoding-interviews%2Fquestion-ranking
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def __init__(self):
# 记录结果的二维链表
self.result = []
self.curPath = []
self.curSum = 0
# 返回二维列表,内部每个列表表示找到的路径
def FindPath(self, root, expectNumber):
"""
因为结点的数值有可能为负数,所以必须遍历所有的叶子结点,也就是不能够提前判断当前你路径失败
"""
if root is None:
return self.result
self.curPath.append(root.val)
self.curSum += root.val
# 判断是否是叶子结点,如果是叶子结点需要判断是否 curSum == expectNumber
if root.left is None and root.right is None:
if self.curSum == expectNumber:
self.result.append(self.curPath[:])
else:
self.FindPath(root.left, expectNumber)
self.FindPath(root.right, expectNumber)
self.curSum -= root.val
self.curPath.pop()
return self.result
|
[
"g10guang@gmail.com"
] |
g10guang@gmail.com
|
9fa5defb89eb8064339a7db03c024637c19d6a51
|
fb7058d157458b3c7274754d4a6d4bd2d70cae28
|
/src/nets/vgg16.py
|
dfab9e19a4063fff30ebb2e7f83edb3df53377d2
|
[] |
no_license
|
manfreddiaz/cnn-traffic-light-evaluation
|
2c209015ae6f20750b4fbe4302c8a7a1e9957592
|
55f66e1ab4c681597b8d959d977b6a5e8e0f122f
|
refs/heads/master
| 2021-06-19T12:50:34.703146
| 2017-07-22T16:26:42
| 2017-07-22T16:26:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,848
|
py
|
import tensorflow as tf
import nets.base_layers as pr
FIRST_DEPTH = 16
FILTER_SIZE = 3
def stack(network_input, network_input_depth, output_size):
conv3_64 = pr.conv2d(network_input, filters_size=FILTER_SIZE, num_filters=FIRST_DEPTH, input_depth=network_input_depth, strides=[1,1,1,1], padding='SAME')
conv3_64 = pr.conv2d(conv3_64, filters_size=FILTER_SIZE, num_filters=FIRST_DEPTH, input_depth=FIRST_DEPTH, strides=[1,1,1,1], padding='SAME')
conv3_64 = pr.relu(conv3_64)
max1 = pr.max_pool(conv3_64)
conv3_128 = pr.conv2d(max1, filters_size=FILTER_SIZE, num_filters=2*FIRST_DEPTH, input_depth=FIRST_DEPTH, strides=[1,1,1,1], padding='SAME')
conv3_128 = pr.conv2d(conv3_128, filters_size=FILTER_SIZE, num_filters=2*FIRST_DEPTH, input_depth=2*FIRST_DEPTH, strides=[1,1,1,1], padding='SAME')
conv3_128 = pr.relu(conv3_128)
max2 = pr.max_pool(conv3_128)
conv3_256 = pr.conv2d(max2, filters_size=FILTER_SIZE, num_filters=4*FIRST_DEPTH, input_depth=2*FIRST_DEPTH, strides=[1,1,1,1], padding='SAME')
conv3_256 = pr.conv2d(conv3_256, filters_size=FILTER_SIZE, num_filters=4*FIRST_DEPTH, input_depth=4*FIRST_DEPTH, strides=[1,1,1,1], padding='SAME')
conv3_256 = pr.relu(conv3_256)
max3 = pr.max_pool(conv3_256)
conv3_512 = pr.conv2d(max3, filters_size=FILTER_SIZE, num_filters=8*FIRST_DEPTH, input_depth=4*FIRST_DEPTH, strides=[1,1,1,1], padding='SAME')
conv3_512 = pr.conv2d(conv3_512, filters_size=FILTER_SIZE, num_filters=8*FIRST_DEPTH, input_depth=8*FIRST_DEPTH, strides=[1,1,1,1], padding='SAME')
conv3_512 = pr.relu(conv3_512)
max4 = pr.max_pool(conv3_512)
conv3_512 = pr.conv2d(max4, filters_size=FILTER_SIZE, num_filters=8*FIRST_DEPTH, input_depth=8*FIRST_DEPTH, strides=[1,1,1,1], padding='SAME')
conv3_512 = pr.conv2d(conv3_512, filters_size=FILTER_SIZE, num_filters=8*FIRST_DEPTH, input_depth=8*FIRST_DEPTH, strides=[1,1,1,1], padding='SAME')
conv3_512 = pr.relu(conv3_512)
max5 = pr.max_pool(conv3_512)
print(max5.get_shape())
flatten_layer = tf.reshape(max5, [-1, 8*FIRST_DEPTH])
fully_connected = pr.fully_connected(flatten_layer, 8*FIRST_DEPTH, 16*FIRST_DEPTH)
fully_connected = pr.fully_connected(fully_connected, 16*FIRST_DEPTH, 16*FIRST_DEPTH)
fully_connected = pr.fully_connected(fully_connected, 16*FIRST_DEPTH, output_size)
return fully_connected
def vgg_stack(input, input_depth, volume_depth):
vgg_convolutional = pr.conv2d(input, filters_size=3, num_filters=volume_depth, input_depth=input_depth,
strides=[1, 1, 1, 1], padding='SAME')
vgg_convolutional = pr.conv2d(vgg_convolutional, filters_size=3, num_filters=volume_depth, input_depth=volume_depth, strides=[1, 1, 1, 1], padding='SAME')
vgg_convolutional = pr.relu(vgg_convolutional)
return pr.max_pool(vgg_convolutional)
|
[
"takeitallsource@gmail.com"
] |
takeitallsource@gmail.com
|
6bc50acbbf717bfa1c8c8b6d93a003141bace4b9
|
e043acf03569edb59d7bf71322384375931e4180
|
/proxies/xici.py
|
190e29fbd425e661c99122632639e40a3205b203
|
[] |
no_license
|
ZZINDS/Free
|
f1dedd4d7c2e3a5733ca384c7fda41e43086bfda
|
7b963d2de0a28d6043fc36c82c3de12b66ab6661
|
refs/heads/master
| 2020-03-30T00:27:38.659951
| 2018-09-28T07:14:36
| 2018-09-28T07:14:36
| 150,524,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,969
|
py
|
# -*- coding: utf-8 -*-
"""
@Time : 2018/9/27 10:23
@Author : ZZINDS
@Site :
@File : xici.py
@Software: N M $ L ®
"""
import re
import requests
'''
Function:
西刺代理类
Input:
-page: 抓取的页码
-proxy_type: 代理类型, 可选项为'all', 'http', 'https'
-quality: 高(普)匿代理/普通代理, 可选项为'all', 'anonymous', 'common'
Return:
-proxyList: 该页所有满足条件的代理列表[(ip, port), (ip, port), ...]
'''
class xici():
def __init__(self):
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
}
# 高匿和透明(依次)
self.urls = [
'http://www.xicidaili.com/nn/{}',
'http://www.xicidaili.com/nt/{}'
]
# 外部调用
def get(self, page=1, proxy_type='all', quality='all'):
ip_list = []
urls = self.__get_urls_by_quality(quality)
for url in urls:
res = requests.get(url.format(page), headers=self.headers)
if (proxy_type == 'http') or (proxy_type == 'https'):
ip_port_type = re.findall(r'\<td\>(\d+\.\d+.\d+.\d+)\</td\>.*?\<td\>(\d+)\</td\>.*?\<td\>(\w+)\</td\>', res.text.replace(' ', '').replace('\n', ''))
for ipt in ip_port_type:
if ipt[2].lower() == proxy_type:
ip_list.append(ipt[:-1])
elif proxy_type == 'all':
ip_port = re.findall(r'\<td\>(\d+\.\d+.\d+.\d+)\</td\>.*?\<td\>(\d+)\</td\>', res.text.replace(' ', '').replace('\n', ''))
ip_list += ip_port
else:
continue
return ip_list
# 根据quality选项获取urls
def __get_urls_by_quality(self, quality):
if quality == 'all':
urls = self.urls
elif quality == 'anonymous':
urls = [self.urls[0]]
elif quality == 'common':
urls = [self.urls[1]]
else:
urls = []
return urls
# 方便查看类
def __repr__(self):
return '西刺代理类'
def __str__(self):
return '西刺代理类'
# for test
if __name__ == '__main__':
print(xici().get())
|
[
"zzinds@outlook.com"
] |
zzinds@outlook.com
|
580b3aded81770b622bdae8ded5dd41658137bad
|
d6118c5e14e46dbb3d2a59483fcd024d031b9657
|
/poll.py
|
4669467264843e49b7693d9059cf58a6ade45c6f
|
[] |
no_license
|
pczb/someUtil
|
a77c6f8894e10a08cd4530399f5b3421533a3939
|
34ced16f56ffd6dfd00cb19da5557cf6adc8accb
|
refs/heads/master
| 2021-01-17T21:35:11.928471
| 2015-05-05T15:41:08
| 2015-05-05T15:41:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,700
|
py
|
#coding=utf8
from util import tryExec
import urllib2
import urllib
import re
import random
import time
class Poll:
def __init__(self, opener):
self.opener = opener
self.failedTimes = 0
self.succTimes = 0
def parsePollPage(self, htmlpage):
pattern_poll_section = re.compile(r'name="poll" method="post"(.+?)<button', re.DOTALL)
pattern_post_url = re.compile(r'action="([^"]+)"')
pattern_form_hash = re.compile(r'"formhash" value="([^"]+)"')
pattern_poll_type = re.compile(r'<strong[^>]*>(.+?)<.+?>')
pattern_poll_ans = re.compile(r'name="pollanswers\[\]" value="(\d+)".+?<label[^>]+>(.+?)<', re.DOTALL)
ret = []
for section_match in pattern_poll_section.finditer(htmlpage):
section_text = section_match.groups()[0]
post_url = pattern_post_url.search(section_text).groups()[0]
ret.append(post_url)
form_hash = pattern_form_hash.search(section_text).groups()[0]
ret.append(form_hash)
pool_type = pattern_poll_type.search(section_text).groups()[0]
for x in pattern_poll_ans.findall(section_text):
if "国际" in x[1] or "酱油" in x[1]:
ret.append(x[0])
return ret
ret.append(x[0])
return ret
def miui_poll(self, thread_url):
htmlpage = self.opener.open(thread_url).read()
ret = self.parsePollPage(htmlpage)
data = {'formhash': ret[1], 'pollanswers[]': ret[2],
'pollsubmit':'true'}
ret[0] = "".join(ret[0].split('amp;'))
request = self.opener.open('http://www.miui.com/' + ret[0], data = urllib.urlencode(data))
@tryExec(5)
def getPhonesForumPage(self):
pattern_forum_id = re.compile(r'p_pic">[^<]*<a href="forum-(\d+)-')
htmlpage = self.opener.open('http://www.miui.com/gid-1.html#tabs_0').read()
ret = []
for x in pattern_forum_id.findall(htmlpage):
ret.append(x)
return ret
def getPollUrl(self, url):
htmlpage = urllib2.urlopen(url).read()
pattern_poll_url = re.compile(r'(thread-\d+-\d+-\d+.html)" onclick')
ret = []
for x in pattern_poll_url.findall(htmlpage):
ret.append(x)
return ret
def autoPoll(self):
phonesForum = self.getPhonesForumPage()
if phonesForum == None:
return
pollUrls = []
visited = set()
succCount = 0
tryCount = 0
poll_list_url = 'http://www.miui.com/forum.php?mod=forumdisplay&fid=%s&filter=specialtype&specialtype=poll'
while succCount < 10 and tryCount < 20:
try:
pollUrls += self.getPollUrl(poll_list_url%(phonesForum[tryCount if tryCount < len(phonesForum) else len(phonesForum) - 1]))
random.shuffle(pollUrls)
if pollUrls[0] not in visited:
thread_url = 'http://www.miui.com/' + pollUrls[0]
self.miui_poll(thread_url)
succCount += 1
except Exception, e:
print e.message
finally:
if len(pollUrls) != 0:
visited.add(pollUrls[0])
tryCount += 1
time.sleep(10)
if __name__ == '__main__':
import socket
socket.setdefaulttimeout(10)
from miui_util import login
miui_url = 'http://www.miui.com/member.php?mod=logging&action=miuilogin'
poller = Poll(opener)
# poller.miui_poll('http://www.miui.com/thread-2486706-1-1.html')
poller.autoPoll()
|
[
"pczb@labServer.(none)"
] |
pczb@labServer.(none)
|
d47cb33df078e322c58fc4b4b7229051d0683daa
|
22f2a7107a66ee93fdd6b53bd93337360c44a9b5
|
/leetcode/leetcode math problems/roman to integer.py
|
bb0e99ca935554a3389761dd6dd6cf81303408fd
|
[] |
no_license
|
pradeepmaddipatla16/my_problem_solving_approach_leetcode_medium_hard_problems
|
2ea0c3f0f7c5b81afb82d59ef467e3fbd4d4f469
|
bff7ba03441bba2179acb285f62206fafcb031c5
|
refs/heads/master
| 2020-04-30T07:57:15.497033
| 2019-03-20T09:37:06
| 2019-03-20T09:37:06
| 176,700,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
def function1(roman):
sum = 0
buf_dict = {'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000}
for i in range(len(roman)):
if i !=len(roman)-1:
if buf_dict[roman[i]]>=buf_dict[roman[i+1]]:sum+=buf_dict[roman[i]]
elif buf_dict[roman[i]]<buf_dict[roman[i+1]]:sum-=buf_dict[roman[i]]
return sum+buf_dict[roman[-1]]
print(function1('MCMXCIV'))
|
[
"maddhipatlanani@gmail.com"
] |
maddhipatlanani@gmail.com
|
6f18b934f9a950dc7454b36377b37ff2c700c11e
|
74b74e817e925c3eeb45b2755d345a9ebbe9557c
|
/1991_tree.py
|
2bc457f5263b250bb1bb85970918f0e791ac4d97
|
[] |
no_license
|
xdfc1745/baekjoon
|
2b81bd2cc86a5359ff956ad524d6d42892136432
|
9dbb84223b26ae049d144699d13b6a605c0d4230
|
refs/heads/master
| 2022-12-03T00:47:17.026439
| 2020-08-26T14:49:17
| 2020-08-26T14:49:17
| 283,480,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 970
|
py
|
n = int(input())
node_list = {}
def preorder(root, node_list):
if root != '.':
left = node_list[root][0]
right = node_list[root][1]
print(root, end="")
preorder(left, node_list) # left preorder
preorder(right, node_list) # right preorder
def inorder(root, node_list):
if root != '.':
left = node_list[root][0]
right = node_list[root][1]
preorder(left, node_list) # left preorder
print(root, end="")
preorder(right, node_list) # right preorder
def postorder(root, node_list):
if root != '.':
left = node_list[root][0]
right = node_list[root][1]
preorder(left, node_list) # left preorder
print(root, end="")
preorder(right, node_list) # right preorder
for _ in range(n):
root, left, right = input().split()
node_list[root] = [left, right]
preorder("A", node_list)
inorder("A", node_list)
postorder("A", node_list)
|
[
"xdfc1745@gmail.com"
] |
xdfc1745@gmail.com
|
3d3575cf4d7f5f440476a08a88e0cc707f956442
|
5d0ba1091f3aa1154f6eb965bea689c13111090e
|
/List-1/sum3.py
|
15f241a9a93648fb200c9320c50e508122881934
|
[] |
no_license
|
aryamane/codingbat-python
|
e355d47fd3b0888b5edad6de3cf501180385cc56
|
7b89ada70248b0c4031ee5452803e73e7935da10
|
refs/heads/master
| 2022-11-23T11:25:59.564443
| 2020-07-21T02:33:45
| 2020-07-21T02:33:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
py
|
import functools
def sum3(nums):
return functools.reduce(lambda x,y: x+y, nums)
print(sum3([1, 2, 3]) )
print(sum3([5, 11, 2]) )
print(sum3([7, 0, 0]) )
#solution 2
# result = 0
# for i in range(len(nums)):
# result = result + nums[i]
# return result
|
[
"arya.mane03@gmail.com"
] |
arya.mane03@gmail.com
|
175cd500daf4a0fd308f061cc6c7add4121be01c
|
164a6664d2dceea1e20c158d7d5f0a025a4d1974
|
/graph_theory.py
|
8c14e6a1f9cf456db17b74f6cf3ef6c99d5d6e76
|
[] |
no_license
|
ryan-partlan/Graph_Theory
|
ae76e09c8081edc00bc221c5455c9f6f832a5000
|
4c9434a5c92ab2655dfe1a127a70d3e5a3c50dc3
|
refs/heads/master
| 2022-09-21T07:31:07.301682
| 2020-06-05T17:52:26
| 2020-06-05T17:52:26
| 269,725,892
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,325
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 1 11:34:32 2020
@author: Ryan
"""
import copy
import itertools
from scipy.linalg import null_space
#import tkinter as tk
#I intend to make a GUI that makes this program easy to use, but I haven't gotten to it yet
import numpy as np
import networkx as nx
#It must seem a bit strange that I am importing networkx when I am making a program that is so similar to it! I use networkx here only to draw the graph based on an adjacency matrix generated by my homemade function..
class Graph:
def __init__(self,name):
self.name = name
self.vertices = []
self.edges = []
self.connected_to = []
#"connected_to" tells you what other graphs your graph has an edge connecting it to.
def add_vertex(self,node):
'''Adds a vertex to your graph'''
self.vertices.append(node)
def add_vertices(self,list_of_nodes):
'''adds a bunch of vertices all at once'''
for node in list_of_nodes:
Node(str(node), self)
def add_edge(self,edge):
'''Adds an edge to your graph'''
if edge.node1 != edge.node2 and edge.node1 not in edge.node2.neighbors:
self.edges.append(edge)
edge.node1.degree += 1
edge.node2.degree += 1
edge.node2.neighbors.append(edge.node1)
edge.node1.neighbors.append(edge.node2)
edge.node2.cnctd.append(edge.node1)
edge.node1.cnctd.append(edge.node2)
if edge.node1.graph != edge.node2.graph:
edge.node1.graph.connected_to.append(edge.node2.graph)
edge.node2.graph.connected_to.append(edge.node1.graph)
def add_edges(self, list_of_tuples):
'''adds a bunch of edges at once, takes tuples of endpoints'''
for tup in list_of_tuples:
Graph.add_edge(self, Edge(tup[0],tup[1]))
def delete_edge(self,edge):
'''deletes the indicated edge'''
edge.node1.degree += -1
edge.node1.neighbors.remove(edge.node2)
edge.node2.degree += -1
edge.node2.neighbors.remove(edge.node1)
self.edges.remove(edge)
if edge.node1.graph != edge.node2.graph:
edge.node2.graph.edges.remove(edge)
def clear_edges(self):
'''deletes all edges in a graph'''
for edge in self.edges:
edge.node1.degree += -1
edge.node1.neighbors.remove(edge.node2)
edge.node2.degree += -1
edge.node2.neighbors.remove(edge.node1)
self.edges = []
def print_neighbors(self):
'''prints all of the vertices in a graph, along with each vertex's neighbors'''
print('Original neighbors:')
print('------------')
for vtx in self.vertices:
print(vtx.name+"'s neighbors are:")
print('------')
for neighbor in vtx.neighbors:
print(neighbor.name)
print("_______________")
print('------------')
def print_edges(self):
'''Prints all of the end nodes of each edge of your graph'''
print('ORIGINAL')
for edge in self.edges:
print(edge.node1.name)
print(edge.node2.name)
print('-------------')
print('******************************************************')
def give_info(self):
'''tells you about your graph'''
print("There are",len(self.vertices),"vertices")
print("There are",len(self.edges),"edges")
print("This graph is connected to",self.connected_to)
Graph.is_this_a_tree(self)
def find_complement(self):
'''Finds the complement graph to yours'''
self.complement = copy.deepcopy(self)
pairs_of_vertices = list(itertools.combinations(self.complement.vertices, 2))
for g in self.complement.connected_to:
self.complement.connected_to.remove(g)
Graph.clear_edges(self.complement)
for pair in pairs_of_vertices:
if pair[0] not in pair[1].cnctd:
Graph.add_edge(self.complement, Edge(pair[0],pair[1]))
return self.complement
def degree_matrix(self):
'''Finds the degree matrix of the input graph'''
array = []
for vertex in self.vertices:
array.append(vertex.degree)
array = np.array(array)
degree_mat = np.diag(array)
return degree_mat
def adjacency_matrix(self):
'''Finds the adjacency matrix of the graph'''
pairs_of_vertices = list(itertools.product(self.vertices, repeat=2))
edge_ends = [(edge.node1, edge.node2) for edge in self.edges]
rows = []
for node in self.vertices:
row = []
for pair in pairs_of_vertices:
if pair[0] == node:
if pair in edge_ends:
row.append(1)
else:
row.append(0)
rows.append(row)
adjacency_mat = np.array(rows)
adjacency_mat = adjacency_mat + adjacency_mat.transpose()
return adjacency_mat
def laplacian(self):
'''Finds the laplacian matrix of the input graph'''
lap = Graph.degree_matrix(self) - Graph.adjacency_matrix(self)
return lap
def spanning_trees(self):
'''Uses Kirchhoff's theorem to calculate the # of spanning trees'''
l = Graph.laplacian(self)
l_prime = np.delete(l,1,0)
l_prime = np.delete(l_prime,1,1)
trees = np.linalg.det(l_prime)
print( 'The number of spanning trees is', int(round(trees,0)))
return(int(round(trees,0)))
def number_of_components(self):
'''Uses the laplacian to find how many separate components there are in your graph'''
null = null_space(Graph.laplacian(self))
dim_of_null = null.shape[1]
return dim_of_null
def is_this_a_tree(self):
'''tells you whether your graph is a true (outputs Boolean)'''
if Graph.spanning_trees(self) == 1:
return True
def paths_of_length_k(self,node1,node2,k):
'''Finds the number of paths of length k between two nodes'''
counter = 1
adj_mat_k = Graph.adjacency_matrix(self)
n1_row = self.vertices.index(node1)
n2_column = self.vertices.index(node2)
if k == 1:
return Graph.adjacency_matrix(self)[n1_row][n2_column]
elif k > 1:
while counter < k:
adj_mat_k = np.dot(adj_mat_k, Graph.adjacency_matrix(self))
counter += 1
return adj_mat_k[n1_row][n2_column]
def draw(self):
'''draws the graph using nx'''
g = nx.from_numpy_matrix(np.array(Graph.adjacency_matrix(self)))
nx.draw(g)
class Node:
def __init__(self,name,graph):
#initiating a node automatically adds it to the chosen graph
self.name = name
self.graph = graph
Graph.add_vertex(self.graph,self)
self.degree = 0
self.neighbors = []
self.cnctd = []
class Edge:
def __init__(self, node1, node2, weight = 1):
#initiating an edge adds it automatically to the graphs of your endpoints.
self.node1 = node1
self.node2 = node2
self.weight = weight
Graph.add_edge(self.node1.graph, self)
def delete_edge(self):
'''deletes the indicated edge'''
self.node1.graph.edges.remove(self)
if self.node1.graph != self.node2.graph:
self.node2.graph.edges.remove(self)
self.node1.degree += -1
self.node1.neighbors.remove(self.node2)
self.node2.degree += -1
self.node2.neighbors.remove(self.node1)
G = Graph('G')
A = Node('A',G)
B = Node('B',G)
C = Node('C',G)
D = Node('D',G)
E = Node('E',G)
F = Node('F',G)
h = Node('h', G)
H = Edge(A,B)
O = Edge(B,C)
Y = Edge(A,F)
R = Edge(D,E)
L = Edge(A,C)
o = Edge(A,D)
'''user_graph = Graph('user_graph')
root = tk.Tk()
label = tk.Label(root, text='Input names of vertices:')
label.grid(row=0, column=0)
e = tk.Entry(root)
e.grid(row=1, column = 0)
e.get()
new_user_node = Node(e.get, user_graph)
button1 = tk.Button(root,text = 'Find Complement', command=Graph.find_complement(user_graph))
button1.grid(row=20, column = 0)
root.mainloop()'''
|
[
"noreply@github.com"
] |
ryan-partlan.noreply@github.com
|
b81a4f4351454e527a2f197d3429831c0a28dece
|
789e653a9f3c679e02f8f459fb72508aed9537a0
|
/scripts/count_annotation_conflicts.py
|
e2af16d361e96bc6deb06a683e25478e25bf905d
|
[
"MIT"
] |
permissive
|
gtonkinhill/panaroo
|
3df49a4ba546a83dce9d394ee5b27f89e42566f3
|
0d96fc77caa4c87f37bf16e13ad0e09b6e371f96
|
refs/heads/master
| 2023-07-08T20:49:36.096596
| 2023-05-10T16:22:13
| 2023-05-10T16:22:13
| 162,318,186
| 184
| 28
|
MIT
| 2023-06-29T16:43:12
| 2018-12-18T16:43:30
|
Python
|
UTF-8
|
Python
| false
| false
| 4,424
|
py
|
import argparse
import sys, os
import re
# import Levenshtein as lev
def read_gene_data(inputfile):
gene_data = {}
with open(inputfile, 'r') as infile:
next(infile)
for line in infile:
line = line.strip().split(",")
gene_data[line[3]] = (len(line[4]), line[-2])
return (gene_data)
def count_differences(gene_data,
pa_file,
lendiff=0.8,
col_skip=0,
sep=",",
method="panaroo"):
anno_conflicts = 0
record_anno_conflicts = []
with open(pa_file, 'r') as infile:
next(infile)
for line in infile:
realline = line
if method == "roary":
line = re.split('","', line.strip())[col_skip:]
else:
line = line.strip().split(sep)[col_skip:]
cluster = []
for l in line:
if method == "pirate":
if ":" in l: continue
elif method == "panaroo":
if ";" in l: continue
elif method == "ppanggolin":
if ";" in l: continue
l = re.split("[;:\t]", l)
for g in l:
g = g.strip("(").strip(")").strip('"')
if "refound" in g: continue
if g == "": continue
if g not in gene_data:
print(line)
print(realline)
cluster.append(g)
max_len = -1
for g in cluster:
max_len = max(max_len, gene_data[g][0])
annotations = set()
for g in cluster:
# if gene_data[g][0] <= (lendiff*max_len):
# continue
if gene_data[g][1] in ["", "hypothetical protein"]:
continue
annotations.add(gene_data[g][1])
annotations = set(
[re.sub('[0-9|_]', '', i.lower()) for i in annotations])
if len(annotations) > 1:
record_anno_conflicts.append(list(annotations))
anno_conflicts += len(annotations) - 1
# annotations = sorted(list(annotations))
# if len(annotations)>1:
# for a in annotations[1:]:
# if lev.ratio(annotations[0].lower(), a.lower()) < lev_thresh:
# anno_conflicts += 1
# record_anno_conflicts.append([annotations[0].lower(), a.lower()])
for a in record_anno_conflicts:
print(a)
return anno_conflicts
def main():
parser = argparse.ArgumentParser(
description="""Counts annotation conflicts.""")
parser.add_argument("-g",
"--gene_data",
dest="gene_data",
required=True,
help="gene data file output by Panaroo",
type=str)
parser.add_argument("-p"
"--pa",
dest="pa_file",
help="Presence absence file",
required=True)
parser.add_argument(
"--method",
dest="method",
help="Algorithm used to produce p/a file for formatting",
type=str,
choices=['panaroo', 'roary', 'pirate', 'panx', 'cogsoft', 'ppanggolin'],
default="panaroo")
args = parser.parse_args()
gene_data = read_gene_data(args.gene_data)
if args.method == "pirate":
col_skip = 22
sep = "\t"
elif args.method == "panaroo":
col_skip = 14
sep = ","
elif args.method == "roary":
col_skip = 14
sep = ","
elif args.method == "panx":
col_skip = 0
sep = ","
elif args.method == "cogsoft":
col_skip = 0
sep = ","
elif args.method == "ppanggolin":
col_skip = 14
sep = ","
anno_conlfict_count = count_differences(gene_data,
args.pa_file,
col_skip=col_skip,
sep=sep,
method=args.method)
print("Conflicting annotations: ", anno_conlfict_count)
return
if __name__ == '__main__':
main()
|
[
"g.tonkinhill@gmail.com"
] |
g.tonkinhill@gmail.com
|
c21f8f3a3d3d1c3c2952ded3ed79dfdba4129493
|
be7c73d8a7a16ee796aa5a5e1777cd50b1420c7a
|
/2398 Restore Full.py
|
838b8786cecc2a772a2fe8684fb640810a0f9eb0
|
[] |
no_license
|
ColbyBurkett/IFR2398-Backup-Restore
|
3c223f3d468eee50a03d0191b261a0978cee3673
|
99b65bf4bf43c3d58d6f6f7bcf79af00405d4f96
|
refs/heads/master
| 2022-05-29T16:57:52.271333
| 2020-04-21T22:10:29
| 2020-04-21T22:10:29
| 257,725,176
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,170
|
py
|
# IFR 2398 Calibration Dump Script
# April 2019 Colby Burkett
#
#
# This script restores all of the backed up calibration information from
# an IFR 2398 or LG SA-7270 Spectrum Analyzer
#
# Don't judge! It's quick and dirty!
#
# Last: Use at your own risk!
from __future__ import division
import datetime
import os
import sys
import time
import visa
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d_%H-%M-%S')
print st
# Create VISA object
rm = visa.ResourceManager()
# Open connection to UUT on GPIB ADDR 19
inst = rm.open_resource('GPIB0::7::INSTR')
inst.timeout = 10000
# Serial Number and Manufacturer Set
# MFC of 1 is IFR, MFC of 2 is LG
#inst.write("SN MFC[1]SN[12345678]")
# Retrieve & print the ID of the UUT
uutId = inst.query("*IDN?")
print uutId
# Replace with your backup file name created by the Dump script
backupFileName='backupFileName.txt'
if (os.path.isfile(backupFileName)):
backupFile = open(backupFileName, 'r')
else:
print 'Backup filename invalid'
caldata = {}
d = {}
a = 0
for rowofdata in backupFile:
caldata[a] = eval(rowofdata)
a += 1
# RXFLAT 0-23
# IFATT 24
# RXATT 25
# SPANATT 26
# LOGTBL 27-37
# Need to set atten level to manual first
inst.write("AT MAN;")
a = 0
atten = 0
tbl=1
print "RXFLAT Restore: "+str(atten*10)+"dB",
inst.write("AT "+str(atten*10)+";")
while a < 24:
#print a
if tbl == 4:
data = '<'+','.join(str(e) for e in caldata[a])+'>'
#print tbl, atten, data
sys.stdout.write('.')
inst.write("RXFLAT #"+str(tbl)+" #"+str(atten)+","+data+";")
#print("RXFLAT #"+str(tbl)+" #"+str(atten)+","+data+";")
tbl = 1
atten += 1
if atten < 6:
sys.stdout.write(str(atten*10)+"dB:")
inst.write("AT "+str(atten*10)+";")
else:
data = '<'+','.join(str(e) for e in caldata[a])+'>'
#print tbl, atten, data
sys.stdout.write('.')
inst.write("RXFLAT #"+str(tbl)+" #"+str(atten)+","+data+";")
#print("RXFLAT #"+str(tbl)+" #"+str(atten)+","+data+";")
tbl += 1
time.sleep(0.1)
a += 1
print "\nIFATT:",
Position = 24
entry = 0
for data in caldata[Position]:
#print data
sys.stdout.write('.')
inst.write("IFATT #"+str(entry)+","+str(data)+";")
entry += 1
print "\nRXATT:",
Position = 25
entry = 0
for data in caldata[Position]:
#print data
sys.stdout.write('.')
inst.write("RXATT #"+str(entry)+","+str(data)+";")
entry += 1
print "\nSPANATT:",
Position = 26
entry = 0
for data in caldata[Position]:
#print data
sys.stdout.write('.')
inst.write("SPANATT #"+str(entry)+","+str(data)+";")
entry += 1
print "\nLOGTBL:",
Position = 27
rbw = 0
log = 0
while Position < 38:
sys.stdout.write("\n"+str(rbw)+":")
for data in caldata[Position]:
sys.stdout.write('.')
#print "LOGTBL #"+str(rbw)+" #"+str(log)+","+str(data)+";"
inst.write("LOGTBL #"+str(rbw)+" #"+str(log)+","+str(data)+";")
log += 1
rbw += 1
Position += 1
|
[
"noreply@github.com"
] |
ColbyBurkett.noreply@github.com
|
5abc9ae6ac9dfdcec6f92f87a7773067e705ccc9
|
873f7d0c7878d26bf89af6f1efd6d9478d92019d
|
/indels-bottleneck/get_conserved_windows.py
|
75b2444f19f895a500d2f0ee6736a4ef2503eb8a
|
[
"MIT"
] |
permissive
|
glenwilson/variant_analysis
|
c5d18a95f63cb72f4cd9f6a9371cdb245f65f54c
|
0c26e9e16352817af20bbce0c759fff30b4054e5
|
refs/heads/master
| 2021-01-23T23:56:04.710563
| 2019-01-14T12:50:57
| 2019-01-14T12:50:57
| 122,743,761
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,298
|
py
|
#!/usr/bin/python
import csv
import sys
#load the variant vcf files
inputlist = sys.argv[1:]
#this opens csv of windows
win = open('nblrr-noscaffold-windows.csv', 'r')
wincsv = csv.reader(win, delimiter='\t', quotechar=None, doublequote=False)
winlist = [row for row in wincsv]
#file to write conserved regions to
out = open('conserved_regions-nblrr.csv', 'w')
outcsv = csv.writer(out, delimiter='\t', quotechar=None, doublequote=False)
#number of windows to extract
#look to left and right of window by offset base pairs for conserved regions
offset = 50
def make_list(a, start, stop, n=20):
"""a is a list of pairs of integers, (x, len) with x the start of a
closed interval, and len the length of the interval, so closed
interval is [x, x+len-1].
We union up the intervals determined by a and return the complement
[start, stop] - union(a) in terms of intervals. We describe intervals
as (x, len) again.
a is assumed to be in increasing order on the x coordinate.
output is a list of tuples which describe intervals in [start, stop]
which don't contain the elements of a, given in form (x, len)
? Is output (start, stop) or (x, len)? it is (x, len) with x the start.
"""
out = []
x = start
b = a[:]
while b:
y = b.pop(0)
if y[0]-x >= n:
out.append((x, y[0]-x))
x = max(x, y[0] + y[1])
if stop + 1 - x >= n:
out.append((x, stop-x+1))
return out
#################################################################
for row in winlist[1:]:
chrom = int(row[0][-2:])
start = int(row[2])
stop = int(row[3])
var_pos = []
for name in inputlist:
vcf = open(name, 'r')
vcfcsv = csv.reader(vcf, delimiter='\t', quotechar=None, doublequote=False)
for en in [x for x in vcfcsv if len(x)>2 and x[0][0] != "#" and int(x[0][-2:]) == chrom and start-50 <= int(x[1]) <= stop+50]:
a = en[4].split(",")
length = max(1, len(en[3]) - len(a[0]) + 1)
if len(a) == 2 :
length = max(length, len(en[3]) - len(a[1]) + 1)
var_pos.append((int(en[1]), length))
vcf.close()
var_pos = sorted(var_pos, key=lambda pair: pair[0])
newrow = row[:4]
newrow.extend(make_list(var_pos, start-50, stop+50))
outcsv.writerow(newrow)
|
[
"glenmatthewwilson@gmail.com"
] |
glenmatthewwilson@gmail.com
|
4ec9f8b7e11a6ce71a9ebc639139caceadea7601
|
2e0f419b50d86889616770b8ac8cb3367484f015
|
/deeppy/autoencoder/autoencoder.py
|
886ffb276a4ec5de6caf3692f3fd87dd357999e6
|
[
"MIT"
] |
permissive
|
ilo10/deeppy
|
e833d4d44706b779089ba780411532f849191d63
|
aa424a4bc36242bb50b51099a7db765ec8221c41
|
refs/heads/master
| 2021-01-16T00:23:39.400995
| 2015-07-24T09:49:01
| 2015-07-24T09:49:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,017
|
py
|
import numpy as np
import cudarray as ca
from ..feedforward.layers import Activation, FullyConnected
from ..loss import Loss
from ..base import Model, PickleMixin
from ..input import Input
from ..parameter import Parameter
class Autoencoder(Model, PickleMixin):
def __init__(self, n_out, weights, bias=0.0, bias_prime=0.0,
activation='sigmoid', loss='bce'):
self.name = 'autoenc'
self.n_out = n_out
self.activation = Activation(activation)
self.activation_decode = Activation(activation)
self.loss = Loss.from_any(loss)
self.weights = Parameter.from_any(weights)
self.bias = Parameter.from_any(bias)
self.bias_prime = Parameter.from_any(bias_prime)
self._initialized = False
self._tmp_x = None
self._tmp_y = None
def _setup(self, x_shape):
if self._initialized:
return
n_in = x_shape[1]
self.weights._setup((n_in, self.n_out))
if not self.weights.name:
self.weights.name = self.name + '_w'
self.bias._setup(self.n_out)
if not self.bias.name:
self.bias.name = self.name + '_b'
self.bias_prime._setup(n_in)
if not self.bias_prime.name:
self.bias_prime.name = self.name + '_b_prime'
self.loss._setup((x_shape[0], self.n_out))
self._initialized = True
@property
def _params(self):
return self.weights, self.bias, self.bias_prime
@_params.setter
def _params(self, params):
self.weights, self.bias, self.bias_prime = params
def output_shape(self, input_shape):
return (input_shape[0], self.n_out)
def encode(self, x):
self._tmp_x = x
y = ca.dot(x, self.weights.array) + self.bias.array
return self.activation.fprop(y)
def decode(self, y):
self._tmp_y = y
x = ca.dot(y, self.weights.array.T) + self.bias_prime.array
return self.activation_decode.fprop(x)
def decode_bprop(self, x_grad):
x_grad = self.activation_decode.bprop(x_grad)
ca.dot(x_grad.T, self._tmp_y, out=self.weights.grad_array)
ca.sum(x_grad, axis=0, out=self.bias_prime.grad_array)
return ca.dot(x_grad, self.weights.array)
def encode_bprop(self, y_grad):
y_grad = self.activation.bprop(y_grad)
# Because the weight gradient has already been updated by
# decode_bprop() we must add the contribution.
w_grad = self.weights.grad_array
w_grad += ca.dot(self._tmp_x.T, y_grad)
ca.sum(y_grad, axis=0, out=self.bias.grad_array)
return ca.dot(y_grad, self.weights.array.T)
def _update(self, x):
y_prime = self.encode(x)
x_prime = self.decode(y_prime)
x_prime_grad = self.loss.grad(x_prime, x)
y_grad = self.decode_bprop(x_prime_grad)
self.encode_bprop(y_grad)
return self.loss.loss(x_prime, x)
def _reconstruct_batch(self, x):
y = self.encode(x)
return self.decode(y)
def reconstruct(self, input):
""" Returns the reconstructed input. """
input = Input.from_any(input)
x_prime = np.empty(input.x.shape)
offset = 0
for x_batch in input.batches():
x_prime_batch = np.array(self._reconstruct_batch(x_batch))
batch_size = x_prime_batch.shape[0]
x_prime[offset:offset+batch_size, ...] = x_prime_batch
offset += batch_size
return x_prime
def _embed_batch(self, x):
return self.encode(x)
def embed(self, input):
""" Returns the embedding of the input. """
input = Input.from_any(input)
y = np.empty(self.output_shape(input.x.shape))
offset = 0
for x_batch in input.batches():
y_batch = np.array(self._embed_batch(x_batch))
batch_size = y_batch.shape[0]
y[offset:offset+batch_size, ...] = y_batch
offset += batch_size
return y
def feedforward_layers(self):
return [FullyConnected(self.n_out, self.weights.array,
self.bias.array),
self.activation]
class DenoisingAutoencoder(Autoencoder):
def __init__(self, n_out, weights, bias=0.0, bias_prime=0.0,
corruption=0.25, activation='sigmoid', loss='bce'):
super(DenoisingAutoencoder, self).__init__(
n_out=n_out, weights=weights, bias=bias, bias_prime=bias_prime,
activation=activation, loss=loss
)
self.corruption = corruption
def corrupt(self, x):
mask = ca.random.uniform(size=x.shape) < (1-self.corruption)
return x * mask
def _update(self, x):
x_tilde = self.corrupt(x)
y_prime = self.encode(x_tilde)
x_prime = self.decode(y_prime)
x_prime_grad = self.loss.grad(x_prime, x)
y_grad = self.decode_bprop(x_prime_grad)
self.encode_bprop(y_grad)
return self.loss.loss(x_prime, x)
|
[
"anders.bll@gmail.com"
] |
anders.bll@gmail.com
|
a22d2c2829181744bcc627e35b88a1c238b9d4b0
|
d69e5435acfcfd3c42c22e158e838645c55e02e0
|
/cop.py
|
9b6bce4c13f0409d7463d5975fdf0b2a522cf5a5
|
[
"MIT"
] |
permissive
|
AnJ95/CoP-Bot
|
4fa1329b31a63d0aa2d8ce355ceb3a3665d98799
|
39f2eeeaf335310b7134a20ad54097488b893d03
|
refs/heads/main
| 2023-04-26T05:22:24.317263
| 2021-06-04T14:06:55
| 2021-06-04T14:06:55
| 373,849,836
| 0
| 0
|
MIT
| 2021-06-04T13:26:08
| 2021-06-04T13:26:07
| null |
UTF-8
|
Python
| false
| false
| 4,111
|
py
|
from typing import Optional, List
from telegram import Update, Bot, Message, User, MessageEntity, ParseMode
from telegram.ext import Updater, CommandHandler, CallbackContext, Handler, MessageHandler, Filters
from os import environ
# import logging
# logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
# level=logging.DEBUG)
from state import state
from helpers import current_user, private, admin
def start(update: Update, context: CallbackContext):
msg: Message = update.message
bot: Bot = context.bot
with open("commands.md") as readme:
bot.send_message(msg.chat_id, readme.read(), parse_mode=ParseMode.MARKDOWN)
def highscore(update: Update, context: CallbackContext):
msg: Message = update.message
bot: Bot = context.bot
bot.send_message(msg.chat_id, state.get_hs())
@admin
def add_new_admin(update: Update, context: CallbackContext):
msg: Message = update.message
mentions = msg.parse_entities([MessageEntity.MENTION])
if len(mentions) == 0:
context.bot.send_message(msg.chat_id, "You haven't mentioned anyone!")
return
for m in mentions.values():
state.add_admin(m)
context.bot.send_message(msg.chat_id, f"Added {m}")
@admin
def remove_admin(update: Update, context: CallbackContext):
msg: Message = update.message
mentions = msg.parse_entities([MessageEntity.MENTION])
if len(mentions) == 0:
context.bot.send_message(msg.chat_id, "You haven't mentioned anyone!")
return
for m in mentions.values():
state.del_admin(m)
context.bot.send_message(msg.chat_id, f"Removed {m}")
@admin
def list_admins(update: Update, context: CallbackContext):
msg: Message = update.message
context.bot.send_message(msg.chat_id, state.get_admin_state())
@admin
def show_state(update: Update, context: CallbackContext):
msg: Message = update.message
bot: Bot = context.bot
msg_type: str = msg.chat.type # 'private', 'group', 'supergroup' or 'channel'
if msg_type != "private":
bot.send_message(msg.chat_id, f"State command only allowed in private chat. This is a {msg_type} chat.")
return
bot.send_message(msg.chat_id, state.__repr__())
@admin
def listen_here(update: Update, context: CallbackContext):
msg: Message = update.message
bot: Bot = context.bot
contains = state.update_listen_to(msg.chat_id)
bot.send_message(msg.chat_id, f"Now I'm listening here .." if contains else f"I'm not listening here anymore ..")
class NewHandler(Handler):
def __init__(self, callback):
super().__init__(callback)
def check_update(self, update):
msg: str = update.message.caption
if msg is None or not msg.startswith("/new"):
return False
if update.message.photo is not None and len(update.message.photo) > 0:
return True
return False
@current_user
@private
def image_missing(update: Update, context: CallbackContext):
msg: Message = update.message
bot: Bot = context.bot
bot.send_message(msg.chat_id, "The image is missing.")
def main():
updater = Updater(environ["Token"], use_context=True)
dp = updater.dispatcher
dp.add_handler(MessageHandler(Filters.text & (~Filters.command), state.check_answer))
dp.add_handler(CommandHandler('start', start))
dp.add_handler(CommandHandler('help', start))
dp.add_handler(CommandHandler('highscore', highscore))
dp.add_handler(CommandHandler('state', show_state))
dp.add_handler(CommandHandler('add_admin', add_new_admin))
dp.add_handler(CommandHandler('remove_admin', remove_admin))
dp.add_handler(CommandHandler('admins', list_admins))
dp.add_handler(CommandHandler('listen', listen_here))
dp.add_handler(NewHandler(state.new_challenge))
dp.add_handler(CommandHandler('new', image_missing))
dp.add_handler(CommandHandler('refine', state.refine))
dp.add_handler(CommandHandler('skip', state.skip))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
|
[
"develop@fuchss.org"
] |
develop@fuchss.org
|
43a9eb428dd8198486a10048e0b3883a2e919bc6
|
6f88246ef5640c9bbdc87168cf97cb30e258ea46
|
/action.py
|
762700f88199979fd0dd3910e8a8114b2df8d384
|
[] |
no_license
|
HackerspaceBlumenau/event_validator
|
5f20478fe8303985e69166859cd00c95353a28cb
|
02b560e1736ffb7a248dee5abd1a58309c6336da
|
refs/heads/master
| 2020-09-05T14:18:34.953870
| 2019-11-17T02:01:36
| 2019-11-17T02:46:10
| 220,130,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,781
|
py
|
import json
import os
import io
from github import Github
ERROR_COMMENT = (
"Olá! Ficamos MUITO felizes em ver seu interesse em "
"participar da nossa comunidade. Mas eu encontrei alguns "
"probleminhas. Estão faltando alguns dados. Você pode dar uma "
"olhadinha novamente? Isso foi o que encontrei:"
)
SUCCESS_COMMENT = (
"Que demais! Obrigado por ter enviado sua proposta de "
"evento. Me parece que tudo está certo! Logo logo, alguém "
"vai entrar em contato para definir o local e data definitivos. :)"
)
MISSING_DESCRIPTION_ERROR = "Está faltando a descrição"
MISSING_DATE_ERROR = "Está faltando a data ou o formato está errado"
MISSING_NAME_ERROR = "Está faltando o nome do palestrante"
MISSING_EMAIL_ERROR = "Está faltando o email de contato"
DESCRIPTION_LABEL = "Descrição"
DATE_LABEL = "Data sugerida"
NAME_LABEL = "Nome"
EMAIL_LABEL = "Email"
def get_access_token():
"""Returns the value of the environment variable GITHUB_TOKEN"""
return os.getenv("GITHUB_TOKEN")
def get_repository_name():
"""Returns the value of the environment variable GITHUB_REPOSITORY"""
return os.getenv("GITHUB_REPOSITORY")
def get_event_file():
"""Returns the value of the environment variable GITHUB_EVENT_PATH"""
return os.getenv("GITHUB_EVENT_PATH")
def has_event_label(event):
"""
Checks if the issue has the label "evento"
Returns True if the issue has the label "evento". Otherwise, returns False
"""
for label in event["issue"].get("labels", []):
if label.get("name", "") == "evento":
return True
return False
def is_first_section_line(line):
"""
Check if the line is from the begging of a section
Returns True if the line starts with the prefix of some required section
"""
line = line.strip()
return (
line.startswith(DESCRIPTION_LABEL)
or line.startswith(NAME_LABEL)
or line.startswith(DATE_LABEL)
or line.startswith(EMAIL_LABEL)
)
def get_sections(body):
"""
Get a dictionary with the issue info
Returns a dictionary with the issue data. Each key in the dictionary is the
section prefix and its value is the section content.
"""
sections = {}
section = ""
section_name = ""
buff = io.StringIO(body)
line = buff.readline().strip()
while len(line) > 0:
if is_first_section_line(line):
if len(section_name) > 0:
sections[section_name] = section.rstrip()
section_name = line[: line.find(":")].strip()
section = line[line.find(":") + 1 :].lstrip()
else:
section += line
line = buff.readline()
if len(section_name) > 0:
sections[section_name] = section.rstrip()
return sections
def validate_data(event):
"""
Check if the issue has all the required fields
"""
sections = get_sections(event["issue"].get("body", ""))
print(sections)
errors = []
if DESCRIPTION_LABEL not in sections or (
DESCRIPTION_LABEL in sections and len(sections.get(DESCRIPTION_LABEL, "")) == 0
):
errors.append(MISSING_DESCRIPTION_ERROR)
if DATE_LABEL not in sections or (
DATE_LABEL in sections and len(sections.get(DATE_LABEL, "")) == 0
):
errors.append(MISSING_DATE_ERROR)
if NAME_LABEL not in sections or (
NAME_LABEL in sections and len(sections.get(NAME_LABEL, "")) == 0
):
errors.append(MISSING_NAME_ERROR)
if EMAIL_LABEL not in sections or (
EMAIL_LABEL in sections and len(sections.get(EMAIL_LABEL, "")) == 0
):
errors.append(MISSING_EMAIL_ERROR)
return errors
def get_issue_number(event):
"""
Returns the issue number from the event
"""
return event["issue"]["number"]
def comment(g, event, msg):
"""
Write a comment in the issue from the event
"""
repo = g.get_repo(get_repository_name())
issue = repo.get_issue(get_issue_number(event))
return issue.create_comment(msg)
def main():
with open(get_event_file()) as f:
g = Github(get_access_token())
event = json.load(f)
# Verifica se a issue criada possui a label evento
if not has_event_label(event):
print("It's not a event issue")
return
# Se tem a label, verifica se possui os dados minimos necessarios
errors = validate_data(event)
if len(errors) > 0:
# Missing data. Write a comment notifying the user
errmsg = "\n".join(errors)
comment(g, event, f"{ERROR_COMMENT}\n\n{errmsg}")
else:
# All required data is there.
comment(g, event, SUCCESS_COMMENT)
if __name__ == "__main__":
main()
|
[
"jvanz@jvanz.com"
] |
jvanz@jvanz.com
|
77c635ae905cd6cb2dcab4ad1a34cfedc505dfc3
|
2b6288e63e2e3e959feafce5b3f96b0b440e4fc7
|
/data_structure/stack/match_bracket.py
|
0b2c4c7a9043bdc4956c2b8aa455a0aaa8607e96
|
[
"MIT"
] |
permissive
|
excited-tiger/python-warm-up-exercises
|
a08139ab3dd08c947fdef1794174563d1fd29477
|
141474675f05803f92b914d4d16d19c8f7ab3b4b
|
refs/heads/master
| 2023-04-09T16:29:34.536107
| 2021-04-14T14:49:06
| 2021-04-14T14:49:06
| 296,372,625
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 770
|
py
|
# -*- coding:utf-8 -*-
# @atime : 2021/2/2 9:09 下午
"""
brackets balance
https://www.luogu.com.cn/problem/UVA673
"""
def check_balance(brackets_str):
balance_dict = {']': '[', ')': '('}
check_stack = []
for i in brackets_str:
if i in balance_dict:
if not check_stack:
return False
if check_stack.pop() != balance_dict[i]:
return False
else:
check_stack.append(i)
if check_stack:
return False
return True
# @local_file_test
def solution():
pair_num = int(input())
for i in range(pair_num):
if check_balance(input()):
print('Yes')
else:
print('No')
if __name__ == '__main__':
solution()
pass
|
[
"sunzhenping@starsee.cn"
] |
sunzhenping@starsee.cn
|
fad0b70ce3176d2e59880fd778c2decc3fd09852
|
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
|
/benchmark/commons/testcase/firstcases/testcase4_020.py
|
413bc8c234b5a44ac4b9a708dcde8ced1aa82167
|
[] |
no_license
|
Prefest2018/Prefest
|
c374d0441d714fb90fca40226fe2875b41cf37fc
|
ac236987512889e822ea6686c5d2e5b66b295648
|
refs/heads/master
| 2021-12-09T19:36:24.554864
| 2021-12-06T12:46:14
| 2021-12-06T12:46:14
| 173,225,161
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,656
|
py
|
#coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'fr.free.nrw.commons',
'appActivity' : 'fr.free.nrw.commons.auth.LoginActivity',
'resetKeyboard' : True,
'androidCoverage' : 'fr.free.nrw.commons/fr.free.nrw.commons.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase020
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().resourceId(\"fr.free.nrw.commons:id/contributionImage\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"fr.free.nrw.commons:id/contributionImage\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"More options\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"fr.free.nrw.commons:id/image\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Email\")", "new UiSelector().className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"4_020\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'fr.free.nrw.commons'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
|
[
"prefest2018@gmail.com"
] |
prefest2018@gmail.com
|
0b98e7bdc94e9240c99f75211bb753b687f6e240
|
752e73f2d44d50c0aab4e0a99fca1567d824e713
|
/docs/conf.py
|
f57ce0ee0c74f508140476ac325ce9abb0dec057
|
[
"MIT"
] |
permissive
|
iandennismiller/diamond-patterns
|
b9f10c683bfd69fc3d03b3ab45f21ef7f2ef25d7
|
497fc2f3fcd15098aed4c2888d2f6fcd64072400
|
refs/heads/master
| 2021-06-27T07:40:45.488777
| 2020-10-16T13:30:09
| 2020-10-16T13:30:09
| 65,153,869
| 1
| 0
|
NOASSERTION
| 2019-11-16T21:04:55
| 2016-08-07T21:00:52
|
CSS
|
UTF-8
|
Python
| false
| false
| 9,066
|
py
|
# -*- coding: utf-8 -*-
#
# diamond documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 20 21:19:09 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import re
import datetime
from git import Repo
# from https://github.com/flask-admin/flask-admin/blob/master/setup.py
def fpath(name):
return os.path.join(os.path.dirname(__file__), name)
def read(fname):
return open(fpath(fname)).read()
file_text = read(fpath('../diamond_patterns/__meta__.py'))
def grep(attrname):
pattern = r"{0}\W*=\W*'([^']+)'".format(attrname)
strval, = re.findall(pattern, file_text)
return strval
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('_extensions'))
# custom configuration -----------------------------------------------------
this_path = os.path.dirname(os.path.abspath(__file__))
git_path = os.path.join(this_path, "..")
try:
repo = Repo(git_path)
hc = repo.head.commit
git_checksum = str(hc)[:8]
except:
git_checksum = "0000000"
html_context = {
"git_checksum": git_checksum,
"today": datetime.datetime.strftime(datetime.datetime.now(), "%Y-%m-%d"),
}
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = grep("__project__")
copyright = grep("__copyright__")
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = grep("__version__")
# The full version, including alpha/beta/rc tags.
release = grep("__version__")
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
import alabaster
# # The theme to use for HTML and HTML Help pages. See the documentation for
# # a list of builtin themes.
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = [alabaster.get_path()]
html_theme = 'alabaster'
html_theme_options = {
'github_button': False,
'show_powered_by': False,
# 'analytics_id': "",
# 'logo': '',
# 'extra_nav_links': {
# "pplapi Home": "http://pplapi.com",
# }
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/tavernier_blue-300.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
# 'about.html',
"sidebarintro.html",
'navigation.html',
# 'relations.html',
# 'searchbox.html',
# 'donate.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'diamonddoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'diamond.tex', u'Diamond Documentation',
u'Ian Dennis Miller', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'diamond', u'Diamond Documentation',
[u'Ian Dennis Miller'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'diamond', u'Diamond Documentation',
u'Ian Dennis Miller', 'diamond', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
[
"ian@iandennismiller.com"
] |
ian@iandennismiller.com
|
226089badcaa2c7ea385e85956fac83d58627096
|
42c525b135dd5e7621ec7a52157789a4907f5c66
|
/Week02/389.找不同.py
|
c9d5500788cb1db0bba1cafab8ad4aa6e696b513
|
[] |
no_license
|
hebe3456/algorithm010
|
6088164d2a2d328171d26bdbd9df717e34c516f4
|
2cb67db2547b7699df5960ec6f2547c6d874010e
|
refs/heads/master
| 2022-12-02T06:35:52.046859
| 2020-08-23T13:17:06
| 2020-08-23T13:17:06
| 272,187,227
| 0
| 0
| null | 2020-06-14T11:20:26
| 2020-06-14T11:20:26
| null |
UTF-8
|
Python
| false
| false
| 1,403
|
py
|
#
# @lc app=leetcode.cn id=389 lang=python3
#
# [389] 找不同
#
# 审题:多了一个字母,只含小写字母
# 思路:先排序,然后循环遍历比较,找出多的那个字母
# 两个都sort,然后遍历短字符串所有字母,和长字符串比较,return少的字母
# O(N)
# 生成字典,key为字母,value为个数,个数不一样,就返回对应的key,太复杂
# @lc code=start
class Solution:
def findTheDifference(self, s: str, t: str) -> str:
ans = 0
for c in s + t:
ans ^= ord(c)
return chr(ans)
class Solution2:
def findTheDifference(self, s: str, t: str) -> str:
sorted_s = sorted(s)
sorted_t = sorted(t)
if len(sorted_s) > len(sorted_t):
res = self.helped(sorted_s, sorted_t)
# 必须得接收self.fun()的返回值,并return,否则return None
else:
res = self.helped(sorted_t, sorted_s)
return res
def helped(self, s1, s2):
# 遍历较短字符串,如果不等,则返回长字符串上的字母,
for idx, letter in enumerate(s2): # 低级错误:忘了enumerate!
if letter != s1[idx]:
return s1[idx]
# 否则返回长字符最后一个字母
return s1[-1]
s = Solution()
print(s.findTheDifference("abcd", "abcde"))
# @lc code=end
|
[
"test121209@163.com"
] |
test121209@163.com
|
3eed32390288a8236c4e22d1a99782c8d46e333c
|
e80be2e2907a1256eb3b885d513a8999d23aca1e
|
/Hash_Table/Single_Number.py
|
788624c273aaf34f264331f7a1f1dea26bc6d37f
|
[] |
no_license
|
ncturoger/LeetCodePractice
|
eb38fe1c75f0c460b3f7b5410a1b3e82ca3da09e
|
f0fa1f0af9613914c12f45a218500a75f9ba3c1a
|
refs/heads/master
| 2020-04-08T06:27:18.708645
| 2018-12-21T06:58:47
| 2018-12-21T06:58:47
| 159,097,291
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
target = set(nums)
for item in target:
nums.remove(item)
diff = target - set(nums)
return diff.pop()
print(Solution().singleNumber([2,2,1]))
|
[
"Roger_Zeng@asus.com"
] |
Roger_Zeng@asus.com
|
c0030b784efe68cf06956e206fb8a3bf1c869a12
|
052b1b301fc3f139edf1d6e4cbe59f9d8dc598aa
|
/cms/urls.py
|
e3d93116c30a31e49b50073ac80a0dc9d0802198
|
[] |
no_license
|
azeemkhanreal/PaperManagement
|
4c32da5d072bd5667711d8ddb9797702d56c1719
|
a1066760678b60a9598da79a472ea7ffb651ffe9
|
refs/heads/master
| 2022-12-18T18:16:32.801051
| 2020-09-29T10:46:50
| 2020-09-29T10:46:50
| 194,628,369
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,302
|
py
|
from django.contrib import admin
from django.urls import path
from cms import views, hawker, communication
urlpatterns = [
path("", views.cms_home, name="cms_home"),
path('add_customer/', views.add_customer, name="add_customer"),
path('customer_update/<int:pk>', views.customer_update, name="customer_update"),
path('delete_customer/<int:pk>', views.delete_customer, name="delete_customer"),
path('customer_report/', views.customer_report, name="customer_report"),
path('collect_amount/<int:pk>', views.collect_amount, name="collect_amount"),
path('invoice_update/<int:pk>', views.invoice_update, name="invoice_update"),
path('delete_invoice/<int:pk>', views.delete_invoice, name="delete_invoice"),
path('search_customer/', views.search_customer, name="search_customer"),
path('manage_month_cost/', views.manage_month_cost, name="manage_month_cost"),
# Hawker Routing
path('add_hawker/', hawker.add_hawker, name="add_hawker"),
path('delete_hawker/<int:pk>', hawker.delete_hawker, name="delete_hawker"),
path('hawker_report/', hawker.hawker_report, name="hawker_report"),
path('hawker_update/<int:pk>', hawker.hawker_update, name="hawker_update"),
# Communication
path('send_email/', communication.send_email, name="send_email"),
]
|
[
"azeemkhan@Azeems-MacBook-Air.local"
] |
azeemkhan@Azeems-MacBook-Air.local
|
7dbb2fa42a018c691ae7855243ce3e747025dfab
|
bc8d9bb39ed5a03e54705e8dae94dfecdc7fb6dd
|
/tests/test_keys.py
|
ab7ba6b358c531b5dd411f491de9a94a9e1d1ace
|
[] |
no_license
|
fabioz/PyUpdater
|
344d24704c348ecc5b1cf5ab4db2c86dbe20b991
|
4673948d28962c1b174b98e67ea1dc6053e37da3
|
refs/heads/master
| 2023-09-04T05:36:00.765143
| 2017-06-04T05:04:28
| 2017-06-04T05:04:28
| 95,917,055
| 2
| 0
| null | 2017-06-30T19:14:38
| 2017-06-30T19:14:38
| null |
UTF-8
|
Python
| false
| false
| 1,638
|
py
|
# --------------------------------------------------------------------------
# Copyright (c) 2016 Digital Sapphire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
# ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
# --------------------------------------------------------------------------
from __future__ import unicode_literals
import os
import pytest
from pyupdater.key_handler.keys import Keys
@pytest.mark.usefixtures("cleandir")
class TestKeyPack(object):
def test_create_keypack(self):
k = Keys(test=True)
for name in ['one', 'two', 'three']:
assert k.make_keypack(name) is True
assert os.path.exists(k.data_dir) is True
|
[
"johnymoswag@gmail.com"
] |
johnymoswag@gmail.com
|
e39c9571a275945d74910ba4e6e1543e7103d66d
|
854d21e98f999765b88e7eb6eefe46917728d6d1
|
/Mangeoire/opencv_contrib/samples/python2/video.py
|
66b936150e7d2eedbd500a140c6e140f09188763
|
[
"BSD-3-Clause"
] |
permissive
|
MBaccount/Mangeoire_BiodiGuard
|
b405b9dbbfb1af3e92adfa1650314577d254a496
|
5afd507d8ec7ad67fbd62ea19a1ccf214fe8110a
|
refs/heads/main
| 2023-08-28T22:59:54.575989
| 2021-10-13T15:34:43
| 2021-10-13T15:34:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,822
|
py
|
#!/usr/bin/env python
'''
Video capture sample.
Sample shows how VideoCapture class can be used to acquire video
frames from a camera of a movie file. Also the sample provides
an example of procedural video generation by an object, mimicking
the VideoCapture interface (see Chess class).
'create_capture' is a convinience function for capture creation,
falling back to procedural video in case of error.
Usage:
video.py [--shotdir <shot path>] [source0] [source1] ...'
sourceN is an
- integer number for camera capture
- name of video file
- synth:<params> for procedural video
Synth examples:
synth:bg=../cpp/lena.jpg:noise=0.1
synth:class=chess:bg=../cpp/lena.jpg:noise=0.1:size=640x480
Keys:
ESC - exit
SPACE - save current frame to <shot path> directory
'''
import numpy as np
from numpy import pi, sin, cos
import cv2 as cv
# built-in modules
from time import clock
# local modules
import common
class VideoSynthBase(object):
def __init__(self, size=None, noise=0.0, bg = None, **params):
self.bg = None
self.frame_size = (640, 480)
if bg is not None:
self.bg = cv.imread(bg, 1)
h, w = self.bg.shape[:2]
self.frame_size = (w, h)
if size is not None:
w, h = map(int, size.split('x'))
self.frame_size = (w, h)
self.bg = cv.resize(self.bg, self.frame_size)
self.noise = float(noise)
def render(self, dst):
pass
def read(self, dst=None):
w, h = self.frame_size
if self.bg is None:
buf = np.zeros((h, w, 3), np.uint8)
else:
buf = self.bg.copy()
self.render(buf)
if self.noise > 0.0:
noise = np.zeros((h, w, 3), np.int8)
cv.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
buf = cv.add(buf, noise, dtype=cv.CV_8UC3)
return True, buf
def isOpened(self):
return True
class Chess(VideoSynthBase):
def __init__(self, **kw):
super(Chess, self).__init__(**kw)
w, h = self.frame_size
self.grid_size = sx, sy = 10, 7
white_quads = []
black_quads = []
for i, j in np.ndindex(sy, sx):
q = [[j, i, 0], [j+1, i, 0], [j+1, i+1, 0], [j, i+1, 0]]
[white_quads, black_quads][(i + j) % 2].append(q)
self.white_quads = np.float32(white_quads)
self.black_quads = np.float32(black_quads)
fx = 0.9
self.K = np.float64([[fx*w, 0, 0.5*(w-1)],
[0, fx*w, 0.5*(h-1)],
[0.0,0.0, 1.0]])
self.dist_coef = np.float64([-0.2, 0.1, 0, 0])
self.t = 0
def draw_quads(self, img, quads, color = (0, 255, 0)):
img_quads = cv.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0]
img_quads.shape = quads.shape[:2] + (2,)
for q in img_quads:
cv.fillConvexPoly(img, np.int32(q*4), color, cv.LINE_AA, shift=2)
def render(self, dst):
t = self.t
self.t += 1.0/30.0
sx, sy = self.grid_size
center = np.array([0.5*sx, 0.5*sy, 0.0])
phi = pi/3 + sin(t*3)*pi/8
c, s = cos(phi), sin(phi)
ofs = np.array([sin(1.2*t), cos(1.8*t), 0]) * sx * 0.2
eye_pos = center + np.array([cos(t)*c, sin(t)*c, s]) * 15.0 + ofs
target_pos = center + ofs
R, self.tvec = common.lookat(eye_pos, target_pos)
self.rvec = common.mtx2rvec(R)
self.draw_quads(dst, self.white_quads, (245, 245, 245))
self.draw_quads(dst, self.black_quads, (10, 10, 10))
classes = dict(chess=Chess)
presets = dict(
empty = 'synth:',
lena = 'synth:bg=../cpp/lena.jpg:noise=0.1',
chess = 'synth:class=chess:bg=../cpp/lena.jpg:noise=0.1:size=640x480'
)
def create_capture(source = 0, fallback = presets['chess']):
'''source: <int> or '<int>|<filename>|synth [:<param_name>=<value> [:...]]'
'''
source = str(source).strip()
chunks = source.split(':')
# handle drive letter ('c:', ...)
if len(chunks) > 1 and len(chunks[0]) == 1 and chunks[0].isalpha():
chunks[1] = chunks[0] + ':' + chunks[1]
del chunks[0]
source = chunks[0]
try: source = int(source)
except ValueError: pass
params = dict( s.split('=') for s in chunks[1:] )
cap = None
if source == 'synth':
Class = classes.get(params.get('class', None), VideoSynthBase)
try: cap = Class(**params)
except: pass
else:
cap = cv.VideoCapture(source)
if 'size' in params:
w, h = map(int, params['size'].split('x'))
cap.set(cv.CAP_PROP_FRAME_WIDTH, w)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, h)
if cap is None or not cap.isOpened():
#print 'Warning: unable to open video source: ', source
if fallback is not None:
return create_capture(fallback, None)
return cap
if __name__ == '__main__':
import sys
import getopt
#print __doc__
args, sources = getopt.getopt(sys.argv[1:], '', 'shotdir=')
args = dict(args)
shotdir = args.get('--shotdir', '.')
if len(sources) == 0:
sources = [ 0 ]
caps = map(create_capture, sources)
shot_idx = 0
while True:
imgs = []
for i, cap in enumerate(caps):
ret, img = cap.read()
imgs.append(img)
cv.imshow('capture %d' % i, img)
ch = 0xFF & cv.waitKey(1)
if ch == 27:
break
if ch == ord(' '):
for i, img in enumerate(imgs):
fn = '%s/shot_%d_%03d.bmp' % (shotdir, i, shot_idx)
cv.imwrite(fn, img)
#print fn, 'saved'
shot_idx += 1
cv.destroyAllWindows()
|
[
"manoahberal@live.fr"
] |
manoahberal@live.fr
|
b809d5d850e4c70569f8e3af88fe82dc1a687bb3
|
b09c450a8947194853b6412352dd5658c47727bb
|
/vagrant/Tournament/tournament.py
|
3ff7fa0c146464efcbc65bafe0bedb202096e8cf
|
[] |
no_license
|
apexa11/Tournament-Results-DataBase
|
7cb458f3ff9913a869e6d9399f7c900ee3449bd3
|
a920a98d44bf7d7e4fa06f70bb034d069323bc76
|
refs/heads/master
| 2021-01-25T09:08:33.306946
| 2017-10-09T01:51:34
| 2017-10-09T01:51:34
| 93,779,959
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,293
|
py
|
#!/usr/bin/env python
#
# tournament.py -- implementation of a Swiss-system tournament
#
import psycopg2
DBNAME = 'tournament'
def connect():
"""Connect to the PostgreSQL database. Returns a database connection."""
return psycopg2.connect(database = DBNAME)
def deleteMatches():
"""Remove all the match records from the database."""
DB = connect()
c = DB.cursor()
c.execute("DELETE from matches")
DB.commit()
c.close()
DB.close()
def deletePlayers():
"""Remove all the player records from the database."""
DB = connect()
c = DB.cursor()
c.execute("DELETE from players")
DB.commit()
c.close()
DB.close()
def countPlayers():
"""Returns the number of players currently registered."""
DB = connect()
c = DB.cursor()
c.execute("SELECT COUNT(*) FROM players;")
players_count = c.fetchone()
DB.close()
return players_count[0]
def registerPlayer(name):
"""Adds a player to the tournament database.
The database assigns a unique serial id number for the player. (This
should be handled by your SQL database schema, not in your Python code.)
Args:
name: the player's full name (need not be unique).
"""
DB = connect()
c = DB.cursor()
c.execute("INSERT INTO players (Player_Name) Values(%s)",(name,))
DB.commit()
DB.close()
def playerStandings():
"""Returns a list of the players and their win records, sorted by wins.
The first entry in the list should be the player in first place, or a player
tied for first place if there is currently a tie.
Returns:
A list of tuples, each of which contains (id, name, wins, matches):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
wins: the number of matches the player has won
matches: the number of matches the player has played
"""
DB = connect()
c = DB.cursor()
c.execute("select * from standings")
standings = c.fetchall()
DB.commit()
c.close()
DB.close()
return standings
def reportMatch(winner, loser):
"""Records the outcome of a single match between two players.
Args:
winner: the id number of the player who won
loser: the id number of the player who lost
"""
DB = connect()
c = DB.cursor()
c.execute(" INSERT INTO matches (win_id,lose_id) Values (%s,%s)", (winner,loser))
DB.commit()
c.close()
DB.close()
def swissPairings():
"""Returns a list of pairs of players for the next round of a match.
Assuming that there are an even number of players registered, each player
appears exactly once in the pairings. Each player is paired with another
player with an equal or nearly-equal win record, that is, a player adjacent
to him or her in the standings.
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name
"""
standings = playerStandings()
return [(standings [i-1][0] , standings [i-1][1],standings [i][0],standings[i][1])
for i in range(1, len(standings),2)]
|
[
"apexachoxi7@gmail.com"
] |
apexachoxi7@gmail.com
|
8a79268966c2dc177f92e3644f742a52bf4ea93a
|
a9a335c589bb8ef32a55f7cf5cc0afd868435aae
|
/main.py
|
0634e1c1f517c382965a206c45927cb3d91cd23c
|
[] |
no_license
|
Noah6544/RPG-GAME---UNDER-CONSTRUCTION-
|
01674ca36e8a9515aeb9488cd262ff7735802838
|
68ef3837ffa1fdcf23dcf01bb3cb1822a08dce40
|
refs/heads/master
| 2023-01-21T15:08:28.836912
| 2020-12-03T23:50:08
| 2020-12-03T23:50:08
| 283,945,871
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,948
|
py
|
class Warrior:
def __init__(self, attack, defense, speed, health, name):
self.name = name
self.attack = 150
self.defense = 75
self.speed = 25
self.health = 105
class Knight:
def __init__(self, attack, defense, speed, health, ame):
self.name = name
self.attack = 75
self.defense = 150
self.speed = 25
self.health = 115
class Scout:
def __init__(self, attack, defense, speed, health, name):
self.name = name
self.attack = 75
self.defense = 75
self.speed = 100
self.health = 110
###IMPORTS
import time
import random
from termcolor import colored,cprint
###VARIABLES
twenty = (colored("~~~~~~~~~~~~~~~~~~~~", 'yellow'))
twenty2 = (colored(twenty + "\n" + twenty, 'yellow'))
twentyalot = (colored(twenty +"\n" + twenty + "\n" + twenty + "\n" + twenty + "\n" + twenty ))
yes = ["yes", "YES", "Yes", "yEs", "yeS","YEs", "YeS", "YEs", "YeS", "YeS", "yES", "Y", "y"]
no = ["no", "No", "NO", "nO", "N", "n"]
twenty_long = (twenty + "~~~~~~~~~~")
###FUNCTIONS
def encount(encount,enstat,stat):
global factsencount
if enstat > stat:
encount + 1
elif enstat == stat:
encount + 0
elif enstat < stat:
encount - 1
factsencount = 0
factsencount = encount + factsencount
def comparing_2(attack, enattack, ATTACK):
ATTACK = str(ATTACK)
if enattack > attack:
print(twenty)
print(colored("THE ENEMY'S " + ATTACK + " IS GREATER THAN YOURS BY\n" + twenty + str(enattack - attack), 'blue'))
print(twenty)
time.sleep(2)
elif enattack < attack:
print(twenty)
print(colored("YOUR " + ATTACK + " IS GREATER THAN THE ENEMY'S BY\n" + twenty + str(attack - enattack), 'blue'))
print(twenty)
time.sleep(2)
elif enattack == attack:
print(twenty)
time.sleep(2)
print(colored("YOUR + " + ATTACK + " STATS ARE EQUAL",'blue'))
def enemy(enattack, endefense, enspeed, enhealth, enname, attack, defense, speed, health, name):
global would_you
print(twenty_long)
print(colored("THE ENEMY THAT YOU WILL BE FACING IS:", 'blue'))
print(twenty)
print(enname)
print(twenty)
print(colored("LET'S COMPARE YOUR STATS WITH THEIRS.",'blue'))
print(twenty2)
print(colored("WOULD YOU LIKE TO GET A SYNOPSIS OF THE ENEMY'S STATS - [1]", 'red'))
print(twenty2)
print("BROKEN - CHOOSE 2")
print(twenty2)
print(twenty)
time.sleep(3)
print(colored("OR - WOULD YOU LIKE TO HAVE A MORE IN DEPTH VIEW OF THE ENEMY'S STATS? - [2]", 'red'))
print(twenty)
would_you = int(input(colored("[1] OR [2]",'red')))
print(twenty)
if would_you == 1:
encount = 0
print(encount(encount,enattack,attack))
print(encount(factsencount,endefense,defense))
print(encount(factsencount,enspeed,speed))
print(encount(factsencount,enhealth,health))
if encount == 1:
print(colored("THIS WILL BE AN EASY FIGHT",'blue'))
print(twenty)
time.sleep(2)
elif encount == 2:
print(colored("THIS WILL BE A TOUGH FIGHT",'blue'))
print(twenty)
time.sleep(2)
elif encount == 3:
print(colored("THIS WILL BE A HARD FIGHT",'blue'))
print(twenty)
time.sleep(2)
elif encount == 4:
print(colored("THIS WILL BE A NEAR IMPOSSIBLE FIGHT",'blue'))
print(twenty)
time.sleep(2)
else:
print(twenty)
print(colored("THIS FIGHT WILL BE UNFATHOMABLY EASY", 'blue'))
print(twenty)
time.sleep(2)
elif would_you == 2:
print(twenty)
print("WE WILL BE COMPARING ATTACKS.")
time.sleep(1)
print(twenty)
print("THE ENEMY HAS AN ATTACK OF:\n")
print(twenty)
print(str(enattack))
print(twenty)
time.sleep(1)
print("YOU HAVE AN ATTACK OF:\n")
print(twenty)
print(str(attack))
time.sleep(1)
print(comparing_2(attack, enattack, 'ATTACK'))
print(comparing_2(defense, endefense, 'DEFENSE'))
print(comparing_2(speed, enspeed, 'SPEED'))
print(comparing_2(health, enhealth, 'HEALTH'))
arguably = int(random.randint(0,100))
if arguably > 49:
print(twenty)
print("THE ENEMY'S NAME IS ALSO ARGUABLY BETTER THAN YOURS...")
elif arguably <= 49:
print(twenty)
print("YOUR NAME IS ALSO ARGUABLY BETTER THAN YOUR ENEMY'S NAME...")
print(twenty)
time.sleep(2)
def get_name():
global name
print(twenty)
print(colored("I'VE FORGOTTEN TO GET YOUR NAME, PLEASE TELL ME.", 'blue'))
print(twenty)
time.sleep(2)
name = str(input(colored("ENTER YOUR NAME: ", 'red')))
print(twenty)
time.sleep(1)
def stat_choosing():
global attack
global defense
global speed
global health
print(colored("PICKING RANDOM (ATTACK) STAT NOW...", 'blue'))
time.sleep(1.5)
for i in range(6):
print("Picking....", + i)
time.sleep(.3)
attack = random.randint(0,100)
print(twenty)
print(colored("YOUR ATTACK STAT IS: " + str(attack), 'blue'))
print(twenty)
time.sleep(2)
print(colored("PICKING RANDOM (DEFENSE) STAT NOW...", 'blue'))
time.sleep(1.5)
for i in range(6):
print("Picking...", i)
time.sleep(.3)
defense = random.randint(0, 100)
print(twenty)
print(colored("YOUR DEFENSE STAT IS: " + str(defense), 'blue'))
print(twenty)
time.sleep(2)
print(colored("PICKING RANDOM (SPEED) STAT NOW...", 'blue'))
time.sleep(1.5)
for i in range(6):
print("Picking...", i)
time.sleep(.3)
speed = random.randint(0, 100)
print(twenty)
print(colored("YOUR SPEED STAT IS: " + str(speed), 'blue'))
print(twenty)
time.sleep(2)
print(colored("PICKING RANDOM (HEALTH) STAT NOW...", 'blue'))
time.sleep(1.5)
for i in range(6):
print(colored("Picking...", i, 'blue'))
time.sleep(.3)
health = random.randint(0, 25) + 100
print(twenty)
print(colored("YOUR HEALTH STAT IS: " + str(health), 'blue'))
print(twenty2)
def class_choosing():
global player
global choose_class
global do_you
bool = True
while bool == True:
print(twenty)
do_you = int(input(colored("DO YOU WANT TO PICK YOU CLASS - [1]\nOR RECIEVE A RANDOM CLASS [2]?\n" + twenty,'red')))
time.sleep(.5)
if do_you == 1:
print(twenty)
print(colored("YOU WILL BE PICKING A CLASS\nPICK ONE OF THE FOLLOWING:",'blue'))
print(twenty)
time.sleep(1)
print(colored("1.WARRIOR\n-ATTACK: 150\n-DEFENSE: 75\n-SPEED: 25\n-HEALTH: 105", 'blue'))
print(twenty)
time.sleep(1)
print(colored("2.KNIGHT\n-ATTACK: 75\n-DEFENSE: 150\n-SPEED: 25\n-HEALTH: 115", 'blue'))
print(twenty)
time.sleep(1)
print(colored("3.SCOUT\n-ATTACK: 75\n-DEFENSE: 100\n-SPEED: 75\n-HEALTH: 110", 'blue'))
print(twenty)
time.sleep(2)
choose_class = int(input(colored("WHICH CLASS WOULD YOU LIKE TO CHOOSE?", "red")))
if choose_class == 1:
time.sleep(2)
print(twenty)
print(colored("YOU CHOSE THE WARRIOR CLASS.",'blue'))
print(twenty)
time.sleep(1)
player = Warrior(150,75,25,105,name)
bool = False
elif choose_class == 2:
time.sleep(2)
print(twenty)
print(colored("YOU CHOSE THE KNIGHT CLASS.",'blue'))
print(twenty)
time.sleep(1)
player = Knight(75,150,25,115,name)
bool = False
elif choose_class == 3:
time.sleep(2)
print(twenty)
print(colored("YOU CHOSE THE SCOUT CLASS.",'blue'))
print(twenty)
time.sleep(1)
player = Scout(75,100,75,110,name)
bool = False
elif do_you == 2:
print(twenty)
print(colored("YOU WILL BE RECIVING A RANDOM CLASS", 'blue'))
print(stat_choosing())
bool = False
else:
print(twenty)
print(colored("INVALID INPUT, TRY AGAIN.", 'green'))
print(twenty2)
###MAIN GAME
print(colored("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 'yellow'))
intro_have_you = str(input(colored("Have you played this before?\n[YES][NO]", 'red')))
if intro_have_you in yes:
print(get_name())
print(class_choosing())
elif intro_have_you not in yes:
print(get_name())
print(twenty)
print(colored("THIS IS AN RPG GAME - THE GOAL IS TO GET AS FAR AS POSSIBLE.\nYOU WILL BE PROMPTED TO EITHER CHOOSE OR MAKE YOUR CLASS, IF YOU CHOOSE MAKE CLASS[BROKEN], THEN YOU'LL HAVE A MAX OF 250 POINTS TO DISTRIBUTE TO ALL YOUR STATS - YOUR HEALTH STAT SIMPLY ADDS THAT MUCH HEALTH TO YOUR BASE HEALTH OF 100", 'blue'))
time.sleep(1.5)
print(twenty)
print(class_choosing())
if do_you == 1:
print(colored("THIS MEANS YOUR STATS ARE: \n1.ATTACK: " + str(player.attack) + "\n2.DEFENSE: " + str(player.defense) + "\n3.SPEED: " + str(player.speed) + "\n4.HEALTH:" + str(player.health), 'blue'))
time.sleep(3)
elif do_you == 2:
print(colored("THIS MEANS YOUR STATS ARE: \n1.ATTACK: " + str(attack) + "\n2.DEFENSE: " + str(defense) + "\n3.SPEED: " + str(speed) + "\n4.HEALTH:" + str(health), 'blue'))
time.sleep(3)
print(twenty)
print(colored("NOW THAT YOU HAVE YOUR STATS, YOU CAN START PLAYING.", 'blue'))
time.sleep(2)
print(twenty2)
print(colored("PART 1 - TRAPPED ", 'green'))
print(twenty2)
time.sleep(1)
print(colored("YOU WILL FACE YOUR FIRST ENEMY, AND, I SHOULD WARN YOU. HE'S QUITE STONG.", 'blue'))
print(twenty)
time.sleep(3)
if do_you == 1:
print(enemy(400,400,12,12,'SIMA',player.attack,player.defense,player.speed,player.health,player.name))
elif do_you == 2:
print(enemy(400,400,12,12,'SIMA',attack,defense,speed,health,name))
print("1")
###PAST BROKEN STUFF - MOVE ON MAYBE
"""
(line 95)
###OPTION 1 BROKEN - FIX THIS
if do_you == 1:
print(twenty2)
print(colored("YOU WILL BE PICKING YOUR STATS", 'blue'))
print(twenty)
time.sleep(1.5)
remaining_points = 250
while remaining_points - attack < 0:
attack = int(input(colored("WHAT WOULD YOU LIKE YOUR ATTACK STAT TO BE?(MAX 250 FOR ALL STATS(attack, speed, and defense)", 'red')))
if remaining_points - attack >= 0:
break
elif remaining_points - attack < 0:
time.sleep(2)
print(twenty)
print(colored("INVALID INPUT", 'green'))
time.sleep(2)
while remaining_points - defense > 0:
remaining_points =remaining_points - attack
print(twenty)
print(colored("YOU HAVE " + str(remaining_points) + " POINTS REMAINING", 'blue'))
time.sleep(2)
print(twenty)
defense = int(input(colored("WHAT WOULD YOU LIKE YOUR DEFENSE STAT TO BE? YOU HAVE " + str(remaining_points) + " POINTS REMAINING.",'red')))
remaining_points = remaining_points - defense
time.sleep(2)
print(twenty)
print(colored("YOU HAVE " + str(remaining_points) + " POINTS REMAINING", 'blue'))
print(twenty)
time.sleep(2)
speed = int(input(colored("WHAT WOULD YOU LIKE YOUR SPEED STAT TO BE? YOU HAVE " + str(remaining_points) + " POINTS REMAINING.",'red')))
print(twenty)
time.sleep(2)
"""
|
[
"noah6544@gmail.com"
] |
noah6544@gmail.com
|
8b90e5bd1e48f2ec01ee33ff31f32d8ab418cc08
|
69d61652f1c463c8eb8507ee5dfcabe5483997ae
|
/part1 new/hill_climbing.py
|
c5308c7da8c35f1976a991ad3ebe9af4ed575c8a
|
[] |
no_license
|
yinhao0424/AI_Project
|
6225f6b1b3f5c5ef71fcf9ed6d5e90724e39b513
|
f51f6372db96bc9485930bb0ecd9bead479c8128
|
refs/heads/master
| 2022-04-17T11:17:53.791018
| 2020-04-15T03:35:12
| 2020-04-15T03:35:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 831
|
py
|
# ALGORITMO:
# HILL CLIMBING GENERICO AND HC WITH RANDOM RECOMMENDATION
from random import shuffle
def hill_climbing(problem):
# Calls neighboards with higher heuristics (because we use -h)
current = problem.initial()
while True:
neighbours = problem.nearStates(current)
if not neighbours:
break
# shuffle(neighbours)
neighbour = max(neighbours, key=lambda state: problem.heuristic(state))
if problem.heuristic(neighbour) <= problem.heuristic(current):
break
current = neighbour
return current
# HC com random restart
def random_restart(problem, limit=10):
state = problem.initial()
count = 0
while problem.goal_test(state) == False and count < limit:
state = hill_climbing(problem)
count += 1
return state
|
[
"yinhao0424@gmail.com"
] |
yinhao0424@gmail.com
|
8f120842eb30d596f06bff004ca3c6913a5a7112
|
051588d7ff688feddc1ebd483e9112f858c3ad2a
|
/rotowire/neural_nets/baseline_model.py
|
25e06c758c55a6baa7a1250c5f18928d01dd447a
|
[] |
no_license
|
gortibaldik/TTTGen
|
cedfa0fac19ae72ad22dbc00caa0591a18c82c59
|
bba791d9337a30fdadef28645525dc75bf926132
|
refs/heads/master
| 2023-06-07T12:32:36.469506
| 2021-07-01T20:44:07
| 2021-07-01T20:44:07
| 343,827,230
| 4
| 1
| null | 2021-04-13T11:48:21
| 2021-03-02T15:50:04
|
Python
|
UTF-8
|
Python
| false
| false
| 14,617
|
py
|
import tensorflow as tf
import numpy as np
import sys
from neural_nets.layers import DecoderRNNCellJointCopy
class EncoderDecoderBasic(tf.keras.Model):
""" EncoderDecoder model which allows usage of different Encoders (Encoder, EncoderCS, EncoderCSBi) and Decoders(DecoderRNNCell, DecoderRNNCellJointCopy)
Encoder should encode the input records to a representation out of which the decoder would generate (decode) the output text
"""
def __init__( self
, encoder
, decoder_cell):
""" Initialize EncoderDecoderBasic
Args:
encoder: one of Encoder, EncoderCS, EncoderCSBi
decoder_cell: one of DecoderRNNCell, DecoderRNNCellJointCopy
"""
super(EncoderDecoderBasic, self).__init__()
self._encoder = encoder
self._decoder_cell = decoder_cell
def compile( self
, optimizer
, loss_fn
, scheduled_sampling_rate
, truncation_size
, truncation_skip_step):
""" Prepare the model for training, evaluation and prediction
Assigns optimizers, losses, initiates training hyperparameters, sets up eager execution,
which enables us to use different settings for training (we use graph execution during training)
and evaluation and prediction (where we use eager execution)
Args:
optimizer (optimizer): optimizer used to minimize the txt loss
loss_fn (loss): loss function
scheduled_sampling_rate (float): frequency at which the gold outputs from the previous time-steps are fed into the network
(number between 0 and 1, 1 means regular training)
truncation_size (int): t_2 argument of TBPTT (explained in section 4.1 of the thesis)
truncation_skip_step (int): t_1 argument of TBPTT (should be lower than or equal to t_2)
"""
super(EncoderDecoderBasic, self).compile(run_eagerly=True)
self._optimizer = optimizer
self._loss_fn = loss_fn
self._train_metrics = [ tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy')
, tf.keras.metrics.SparseCategoricalCrossentropy(name='loss')]
self._val_metrics = { "val_accuracy" : tf.keras.metrics.Accuracy(name='accuracy')
, "val_loss" : tf.keras.metrics.SparseCategoricalCrossentropy(name='loss')}
self._scheduled_sampling_rate = scheduled_sampling_rate
self._truncation_skip_step = truncation_skip_step
self._truncation_size = truncation_size
self._generator = tf.random.Generator.from_non_deterministic_state()
def _calc_loss( self, x, y):
""" use only the non-pad values to calculate the loss"""
mask = tf.math.logical_not(tf.math.equal(y, 0))
loss_ = self._loss_fn(y, x)
mask = tf.cast(mask, loss_.dtype)
loss_ *= mask
for metric in self._train_metrics:
metric.update_state(y, x, sample_weight=mask)
return tf.reduce_mean(loss_)
@tf.function
def bppt_step( self
, batch_data
, last_out
, initial_state=None):
""" performs one forward and backward pass
the first pass with given arguments creates a computational graph which
is used in the other passes
Args:
batch_data: inputs and targets for the text decoder, input tables, and
generated probabilities for scheduled sampling
(dec_in, dec_targets, gen_or_teach, *tables)
last_out: last output of the decoder
initial_state: initial state for the decoder
Returns:
self._truncation_skip_step-th hidden state
argmax of self._truncation_skip_step-th prediction of the network
"""
# debugging outputs - tf.function calls python functions only
# during tracing when a computation graph is created
# we report how many times the function is traced and with which arguments
print("tracing tf.function with args:", file=sys.stderr)
print(f"len(batch_data) : {len(batch_data)}", file=sys.stderr)
for ix, d in enumerate(batch_data):
print(f"batch_data[{ix}].shape : {d.shape}", file=sys.stderr)
print(f"last_out.shape : {last_out.shape}", file=sys.stderr)
print(f"last_out.dtype : {last_out.dtype}", file=sys.stderr)
if initial_state is None:
print(f"initial_state is None", file=sys.stderr)
else:
for ix, d in enumerate(initial_state):
print(f"initial_state[{ix}].shape : {d.shape}", file=sys.stderr)
loss = 0
dec_in, targets, gen_or_teach, *tables = batch_data
final_state = None
final_last_out = None
with tf.GradientTape() as tape:
enc_outs, *last_hidden_rnn = self._encoder(tables, training=True)
if initial_state is None:
initial_state = [ last_hidden_rnn[-1]
, *last_hidden_rnn ]
# prepare states and inputs for the decoder
if isinstance(self._decoder_cell, DecoderRNNCellJointCopy):
print("using joint copy mechanism !")
enc_ins = tf.one_hot(tf.cast(tables[2], tf.int32), self._decoder_cell._word_vocab_size) # pylint: disable=unexpected-keyword-arg, no-value-for-parameter
aux_inputs = (enc_outs, enc_ins) # value portion of the record needs to be copied
else:
print("using vanilla attention")
aux_inputs = (enc_outs,)
print(f"---\n", file=sys.stderr)
states = initial_state
for t in range(dec_in.shape[1]):
if (self._truncation_skip_step is not None) and (t == self._truncation_skip_step):
final_state = states
final_last_out = last_out
if gen_or_teach[t] > self._scheduled_sampling_rate:
_input = last_out
else:
_input = dec_in[:, t, :]
last_out, states = self._decoder_cell( (_input, *aux_inputs)
, states=states
, training=True)
# calculate loss and collect metrics
loss += self._calc_loss( last_out
, targets[:, t])
# prepare new input for the decoder (used with (1 - scheduled_sampling_rate) probability)
last_out = tf.expand_dims(tf.cast(tf.argmax(last_out, axis=1), tf.int16), -1)
variables = []
# linear transformation layer is trained only when the initial_state is None
# when it prepares the initial states for the decoder
for var in self._encoder.trainable_variables + self._decoder_cell.trainable_variables:
if (initial_state is None) or (var.name != 'encoder/linear_transform/kernel:0'):
variables.append(var)
gradients = tape.gradient(loss, variables)
self._optimizer.apply_gradients(zip(gradients, variables))
if (self._truncation_skip_step is None) or (self._truncation_skip_step == dec_in.shape[1]):
final_state = states
final_last_out = last_out
return final_state, final_last_out
@property
def metrics(self):
"""returns the list of metrics that should be reset at the start and end of training epoch and evaluation"""
return self._train_metrics + list(self._val_metrics.values())
def train_step(self, batch_data):
""" perform one train_step during model.fit
Args:
batch_data: data to train on in format (summaries, *tables)
"""
summaries, *tables = batch_data
# prepare inputs for the decoder
sums = tf.expand_dims(summaries, axis=-1)
# scheduled sampling may force the text decoder to generate
# from its last prediction even at the first step
# by setting last_out = sums[:, 0] we erase differences between
# scheduled sampling and teacher forcing at the first timestep
last_out = sums[:, 0]
start = 0
length = summaries.shape[1]
state = None
for end in range(self._truncation_size, length-1, self._truncation_skip_step):
# gen_or_teach is the [0,1] vector which contains value for each time-step
# if the value for the timestep is higher than self.scheduled_sampling_rate
# the text decoder is forced to generate from its last prediction
gen_or_teach = np.zeros(shape=(end-start))
for i in range(len(gen_or_teach)):
gen_or_teach[i] = self._generator.uniform(shape=(), maxval=1.0)
# prepare data for teacher forcing, scheduled sampling etc.
truncated_data = ( sums[:, start:end, :]
, summaries[:, start+1:end+1]
, tf.convert_to_tensor(gen_or_teach)
, *tables)
# run the backpropagation on truncated sequence
state, last_out = self.bppt_step( truncated_data
, last_out
, initial_state=state)
start += self._truncation_skip_step
# finish the truncated bppt if the truncation_size cannot divide properly
# the length of sequence
if (length - self._truncation_size) % self._truncation_skip_step != 0:
# gen_or_teach is the [0,1] vector which contains value for each time-step
# if the value for the timestep is higher than self.scheduled_sampling_rate
# the text decoder is forced to generate from its last prediction
gen_or_teach = np.zeros(shape=(length-1-start))
for i in range(len(gen_or_teach)):
gen_or_teach[i] = self._generator.uniform(shape=(), maxval=1.0)
# prepare data for teacher forcing, scheduled sampling etc.
truncated_data = ( sums[:, start:length-1, :]
, summaries[:, start+1:length]
, tf.convert_to_tensor(gen_or_teach)
, *tables)
# run the backpropagation on truncated sequence
state, last_out = self.bppt_step( truncated_data
, last_out
, initial_state=state)
return dict([(metric.name, metric.result()) for metric in self._train_metrics])
def test_step(self, batch_data):
""" perform one test_step during model.evaluate
Args:
batch_data: data to train on in format (summaries, *tables)
"""
summaries, *tables = batch_data
# prepare summaries
max_sum_size = summaries.shape[1] - 1
dec_inputs = tf.expand_dims(summaries, axis=-1)[:, :max_sum_size, :]
targets = summaries[:, 1:max_sum_size+1]
enc_outs, *last_hidden_rnn = self._encoder(tables, training=False)
# prepare states and inputs for the decoder
if isinstance(self._decoder_cell, DecoderRNNCellJointCopy):
enc_ins = tf.one_hot(tf.cast(tables[2], tf.int32), self._decoder_cell._word_vocab_size) # pylint: disable=unexpected-keyword-arg, no-value-for-parameter
aux_inputs = (enc_outs, enc_ins) # value portion of the record needs to be copied
else:
aux_inputs = (enc_outs,)
initial_state = [last_hidden_rnn[-1], *last_hidden_rnn]
dec_in = dec_inputs[:, 0, :] # start tokens
result_preds = np.zeros(targets.shape, dtype=np.int)
# decode text
for t in range(targets.shape[1]):
pred, initial_state = self._decoder_cell( (dec_in, *aux_inputs)
, initial_state
, training=False)
mask = tf.math.logical_not(tf.math.equal(targets[:, t], 0))
self._val_metrics['val_loss'].update_state( targets[:, t]
, pred
, sample_weight=mask )
predicted_ids = tf.argmax(pred, axis=1).numpy()
result_preds[:, t] = predicted_ids
dec_in = tf.expand_dims(targets[:, t], axis=1)
mask = tf.math.logical_not(tf.math.equal(targets, 0))
self._val_metrics['val_accuracy'].update_state( targets
, result_preds
, sample_weight=mask)
return dict([(metric.name, metric.result()) for metric in self._val_metrics.values()])
def predict_step(self, data):
""" perform one predict_step during model.predict
Args:
batch_data: data to train on in format (summaries, *tables)
"""
summaries, *tables = data
# retrieve start tokens
dec_inputs = tf.expand_dims(summaries, axis=-1)
dec_in = dec_inputs[:, 0, :] # start tokens
enc_outs, *last_hidden_rnn = self._encoder(tables, training=False)
if isinstance(self._decoder_cell, DecoderRNNCellJointCopy):
enc_ins = tf.one_hot(tf.cast(tables[2], tf.int32), self._decoder_cell._word_vocab_size) # pylint: disable=unexpected-keyword-arg, no-value-for-parameter
aux_inputs = (enc_outs, enc_ins) # value portion of the record needs to be copied
else:
aux_inputs = (enc_outs,)
initial_state = [last_hidden_rnn[-1], *last_hidden_rnn]
result_preds = np.zeros(summaries.shape, dtype=np.int)
# greedy decoding
for t in range(summaries.shape[1]):
pred, initial_state = self._decoder_cell( (dec_in, *aux_inputs)
, initial_state
, training=False)
predicted_ids = tf.argmax(pred, axis=1).numpy()
result_preds[:, t] = predicted_ids
dec_in = tf.expand_dims(predicted_ids, axis=1)
return result_preds
|
[
"ferotre@gmail.com"
] |
ferotre@gmail.com
|
2cb29af9f9b4f370b39a8c2aa082b1ae81108164
|
ecd20066d585f1acb75a2f5407eda3df9f10822a
|
/node_modules/mongodb/node_modules/bson/build/config.gypi
|
624ae684c409d99035f4b7267abc0ad1073f2fe3
|
[
"Apache-2.0"
] |
permissive
|
dehmirandac2/contatooh
|
e3a70cc7e61d24e844da217f3c9be036366982eb
|
f6ef6eb1c0c1b42f8712d38d0f62ee1daf6c914f
|
refs/heads/master
| 2021-01-20T19:29:51.083570
| 2016-06-07T20:04:44
| 2016-06-07T20:04:44
| 60,641,724
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,100
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 48,
"host_arch": "x64",
"node_byteorder": "little",
"node_install_npm": "false",
"node_prefix": "/usr",
"node_shared_cares": "true",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "true",
"node_shared_v8": "true",
"node_shared_zlib": "true",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_systemtap": "false",
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"nodedir": "/usr/include/nodejs",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"pre": "",
"sign_git_tag": "",
"always_auth": "",
"user_agent": "node/v0.10.25 linux x64",
"bin_links": "true",
"description": "true",
"fetch_retries": "2",
"init_version": "0.0.0",
"user": "",
"force": "",
"ignore": "",
"cache_min": "10",
"editor": "vi",
"rollback": "true",
"cache_max": "null",
"userconfig": "/home/deborah/.npmrc",
"coverage": "",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/home/deborah/tmp",
"userignorefile": "/home/deborah/.npmignore",
"yes": "",
"depth": "null",
"save_dev": "",
"usage": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/usr/bin/zsh",
"prefix": "/usr/local",
"registry": "https://registry.npmjs.org/",
"__DO_NOT_MODIFY_THIS_FILE___use__etc_npmrc_instead_": "true",
"browser": "",
"cache_lock_wait": "10000",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/deborah/.npm",
"npaturl": "http://npat.npmjs.org/",
"searchsort": "name",
"version": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "18",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"global": "",
"link": "",
"save": "true",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "true",
"node_version": "v0.10.25",
"tag": "latest",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"strict_ssl": "true",
"username": "",
"globalconfig": "/etc/npmrc",
"dev": "",
"init_module": "/home/deborah/.npm-init.js",
"parseable": "",
"globalignorefile": "/etc/npmignore",
"cache_lock_retries": "10",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": ""
}
}
|
[
"dehmirandac2@gmail.com"
] |
dehmirandac2@gmail.com
|
7b80923ec47deafff37194386c720242e04c6b5e
|
5ca4a0d91f5bd119e80478b5bd3d43ed30133a42
|
/film20/regional_info/urls.py
|
d5bd968ae3e4843f38f0a9bd1c0ce218d21c9733
|
[] |
no_license
|
thuvh/filmmaster
|
1fc81377feef5a9e13f792b329ef90f840404ec5
|
dd6a2ee5a4951b2397170d5086c000169bf91350
|
refs/heads/master
| 2021-01-17T16:10:54.682908
| 2012-04-29T18:19:52
| 2012-04-29T18:19:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,315
|
py
|
#-------------------------------------------------------------------------------
# Filmaster - a social web network and recommendation engine
# Copyright (c) 2009 Filmaster (Borys Musielak, Adam Zielinski).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------
from django.conf.urls.defaults import *
from film20.config.urls import *
from film20.regional_info.views import *
regionalinfopatterns = patterns('',
# regional info (json)
(r'^'+urls["REGIONAL_INFO"]+'/json$', regional_info),
(r'^'+urls["REGIONAL_INFO"]+'/(?P<town>[\w\-_]+)/(?P<region>[\w\-_]+)/json$', regional_info_args),
)
|
[
"email@ibrahimcesar.com"
] |
email@ibrahimcesar.com
|
0000f0a78a066bbd4561a3596ef00c2b48d6fb8c
|
6830b37bfc6219123798b296bab9b432a72c1f09
|
/students/Net_Michael/lesson3/mailroom.py
|
54bf35579cdbbaa58935c798075ba2a0c286e0c8
|
[] |
no_license
|
AlyssaHong/Self_Paced-Online
|
3e9c94a1147a887ec8851c12da1ef0558d406ea2
|
7ca939fd379623a8ff593ac72156160d20bc4496
|
refs/heads/master
| 2020-03-29T17:24:29.386168
| 2019-01-13T07:57:07
| 2019-01-13T07:57:07
| 150,160,997
| 1
| 0
| null | 2018-09-24T20:00:31
| 2018-09-24T20:00:31
| null |
UTF-8
|
Python
| false
| false
| 3,578
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# Part I
import sys
def sort_key(donor_db2):
return donor_db2[0].split(" ")[1]
# update the list of donors and donation
def add_donor():
new_donor = input("Name of donor?").title()
if new_donor not in donor_name:
donor_name.append(new_donor)
new_donation1 = input("Amount of donation by {}?".format(new_donor))
idx = len(donor_name)-1
new_donation1 = float(new_donation1)
donation[idx].append(new_donation1)
elif new_donor in donor_name:
idx = donor_name.index(new_donor)
new_donation1 = float(input("Amount of donation?".format(new_donor)))
donation[idx].append(new_donation1)
print(f"{new_donor} has made a new donation of {new_donation1}")
return(new_donor, new_donation1)
# compile donors and donation information
def donors_info():
mm = len(donor_name)
donor_db2 = [''] * mm
for j in range(mm):
donor_db2[j] = (donor_name[j], donation[j])
donor_db2 = sorted(donor_db2, key=sort_key)
return(donor_db2)
# create tabular report
def create_report():
mm = len(donor_name)
data_db2 = []*mm
print("Name\t\t\tTotal Donation\t\tNum Gifts\tAverage Gift")
for j in range(mm):
total_donation = sum(donation[j])
num_gifts = len(donation[j])
avg_donation = total_donation/num_gifts
print('{:23}'.format(donor_name[j]),
'${:>10,.2f}'.format(total_donation),
'{:>15}'.format(num_gifts),
'{:10}'.format(""),
'${:>10,.2f}'.format(avg_donation))
def exit_program():
return("Done")
# In[2]:
# Part I
# list of donors and history of the amounts they have donated.
donor_name = ["William Gates, III",
"Jeff Bezos",
"Paul Allen",
"Mark Zuckerberg"]
donation = ( [653772.32, 10020.17, 58796.00],
[877.33],
[663.23, 43.87, 1.32],
[1663.23, 4300.87, 10432.0])
#part I
# This structure should be populated at first with at least five donors,
#with between 1 and 3 donations each.
#You can store that data structure in the global namespace.
donor_list1 = dict(zip(donor_name, donation))
donor_list1
# In[3]:
# part I (alternatively it can also be stored)
#You can store that data structure in the global namespace.
donor_list2 = donors_info()
donor_list2
# In[4]:
# Part I
#The script should prompt the user (you) to choose from a menu of 3 actions:
#“Send a Thank You”, “Create a Report” or “quit
prompt = "\n".join(("Welcome",
"Please choose from below options:",
"1 - Add new donation",
"2 - Create a Report ",
"3 - quit",
">>> "))
# In[6]:
# part I execute the code
if donation[len(donation)-1] != []:
donation += ([],)
if __name__ == '__main__':
while True:
response = input(prompt)
if response == "1":
addnew_donor = add_donor()
donor_list2 = donors_info()
if( len(addnew_donor) > 0 ):
print("Our institution thank you {name} for your generous gift of ${amount}", "\n"
"Thank you,", "\n"
"Bay James".format(name=addnew_donor[0], amount=addnew_donor[1]))
else:
print("No update on the donation name list")
elif response == "2":
create_report()
elif response == "3":
print("Done")
break
|
[
"nettaimam@gmail.com"
] |
nettaimam@gmail.com
|
e7635eb2a070079ff7f9b12da8599a7f7df9228b
|
d41d18d3ea6edd2ec478b500386375a8693f1392
|
/plotly/validators/area/_hoverlabel.py
|
805d8440f422a88d6b6999761e5eb14f66cfc4bc
|
[
"MIT"
] |
permissive
|
miladrux/plotly.py
|
38921dd6618650d03be9891d6078e771ffccc99a
|
dbb79e43e2cc6c5762251537d24bad1dab930fff
|
refs/heads/master
| 2020-03-27T01:46:57.497871
| 2018-08-20T22:37:38
| 2018-08-20T22:37:38
| 145,742,203
| 1
| 0
|
MIT
| 2018-08-22T17:37:07
| 2018-08-22T17:37:07
| null |
UTF-8
|
Python
| false
| false
| 1,549
|
py
|
import _plotly_utils.basevalidators
class HoverlabelValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name='hoverlabel', parent_name='area', **kwargs):
super(HoverlabelValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str='Hoverlabel',
data_docs="""
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on plot.ly for
bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on plot.ly for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the length (in number of characters) of
the trace name in the hover labels for this
trace. -1 shows the whole name regardless of
length. 0-3 shows the first 0-3 characters, and
an integer >3 will show the whole name if it is
less than that many characters, but if it is
longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for
namelength .""",
**kwargs
)
|
[
"adam.kulidjian@gmail.com"
] |
adam.kulidjian@gmail.com
|
eba10c184a75bd03c1f6cb226f430eecc5e1cf86
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_160/ch24_2019_06_06_19_38_40_160521.py
|
ca41a2bc244984c963d834bb43798083e1c4a97e
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
def classifica_triangulo(x,y,z):
if x == y and x == z:
return "equilátero"
if x == y and x != z:
return "isósceles"
if x != y and x == z:
return "isósceles"
if y == z and y != x:
return "isósceles"
else:
return "escaleno"
|
[
"you@example.com"
] |
you@example.com
|
6fd8c445fb1a3641af36bae26538cf9de60da2b4
|
295f34f4411d984f0ff6026be6e96fe134dc1550
|
/usr/lib/pypy/lib-python/2.7/pydoc.py
|
4f76a1136e279e7c10274cfe7e4e18eba1bd6611
|
[] |
no_license
|
mcashjavier/disco-linux-raspy
|
1e3fed914b6040fa9972e7cfc7357ecb72070e8c
|
8c23103cf089059fbdadfad8cfb7059c1580da83
|
refs/heads/master
| 2022-12-20T17:06:39.967203
| 2019-03-12T12:09:22
| 2019-03-12T20:01:10
| 175,072,541
| 0
| 3
| null | 2022-12-18T06:59:27
| 2019-03-11T19:44:12
| null |
UTF-8
|
Python
| false
| false
| 95,769
|
py
|
#!/usr/bin/pypy
# -*- coding: latin-1 -*-
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide online
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on a given port on the
local machine to generate documentation web pages. Port number 0 can be
used to get an arbitrary unused port.
For platforms without a command line, "pydoc -g" starts the HTTP server
and also pops up a little window for controlling it.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://docs.python.org/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__version__ = "$Revision: 88564 $"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import sys, imp, os, re, types, inspect, __builtin__, pkgutil, warnings
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
from traceback import extract_tb
try:
from collections import deque
except ImportError:
# Python 2.3 compatibility
class deque(list):
def popleft(self):
return self.pop(0)
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
result = _encode(result)
return result and re.sub('^ *\n', '', rstrip(result)) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = split(strip(doc), '\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not rstrip(lines[1]):
return lines[0], join(lines[2:], '\n')
return '', join(lines, '\n')
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = join(split(text, pairs[0]), pairs[1])
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
return _re_stripid.sub(r'\1', text)
def _is_some_method(obj):
return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant.
_hidden_names = ('__builtins__', '__doc__', '__file__', '__path__',
'__module__', '__name__', '__slots__', '__package__',
'__cached__')
if name in _hidden_names: return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return 1
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
def fixup(data):
name, kind, cls, value = data
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
return name, kind, cls, value
return map(fixup, inspect.classify_class_attrs(object))
# ----------------------------------------------------- Unicode support helpers
try:
_unicode = unicode
except NameError:
# If Python is built without Unicode support, the unicode type
# will not exist. Fake one that nothing will match, and make
# the _encode function that do nothing.
class _unicode(object):
pass
_encoding = 'ascii'
def _encode(text, encoding='ascii'):
return text
else:
import locale
_encoding = locale.getpreferredencoding()
def _encode(text, encoding=None):
if isinstance(text, unicode):
return text.encode(encoding or _encoding, 'xmlcharrefreplace')
else:
return text
def _binstr(obj):
# Ensure that we have an encoded (binary) string representation of obj,
# even if it is a unicode string.
if isinstance(obj, _unicode):
return obj.encode(_encoding, 'xmlcharrefreplace')
return str(obj)
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not strip(line):
line = file.readline()
if not line: break
line = strip(line)
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not strip(line):
line = file.readline()
if not line: break
result = strip(split(line, '"""')[0])
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (None, None))
if lastupdate is None or lastupdate < mtime:
info = inspect.getmoduleinfo(filename)
try:
file = open(filename)
except IOError:
# module can't be opened, so skip it
return None
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
result = module.__doc__.splitlines()[0] if module.__doc__ else None
del sys.modules['__temp__']
else: # text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
exc, value, tb = exc_info
self.filename = filename
self.exc = exc
self.value = value
self.tb = tb
def __str__(self):
exc = self.exc
if type(exc) is types.ClassType:
exc = exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
file.close()
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Avoid simply calling reload() because it leaves names in
# the currently loaded module lying around if they're not
# defined in the new source file. Instead, remove the
# module from sys.modules and re-import. Also remove any
# submodules because they won't appear in the newly loaded
# module's namespace if they're already in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and extract_tb(tb)[-1][2]=='safeimport':
# The import error occurred directly in this function,
# which means there is no such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in split(path, '.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError, message
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS",
"http://docs.python.org/library")
basedir = os.path.join(sys.exec_prefix, "lib",
"python"+sys.version[0:3])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'dist-packages')) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__)
else:
docloc = os.path.join(docloc, object.__name__ + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return _encode('''
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
<meta charset="utf-8">
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents), 'ascii')
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(expandtabs(text))
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, data):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = data
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif selfdot:
# Create a link for methods like 'self.method(...)'
# and use <strong> for attributes like 'self.attr'
if text[end:end+1] == '(':
results.append('self.' + self.namelink(name, methods))
else:
results.append('self.<strong>%s</strong>' % name)
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return join(results, '')
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + join(parents, ', ') + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = '<a href="file:%s">%s</a>' % (url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = _binstr(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(_binstr(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
def isnonbuiltinmodule(obj):
return inspect.ismodule(obj) and obj is not __builtin__
modules = inspect.getmembers(object, isnonbuiltinmodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda key_value, s=self: s.modulelink(key_value[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(_binstr(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(_binstr(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value, name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
try:
attrs.sort(key=lambda t: t[0])
except TypeError:
attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % join(parents, ', ')
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % self.classlink(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.im_func
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return join(map(lambda ch: ch + '\b' + ch, text), '')
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = split(text, '\n')
lines = map(lambda line, prefix=prefix: prefix + line, lines)
if lines: lines[-1] = rstrip(lines[-1])
return join(lines, '\n')
def section(self, title, contents):
"""Format a section with a given heading."""
return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = map(lambda c, m=modname: classname(c, m), bases)
result = result + '(%s)' % join(parents, ', ')
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
try:
all = object.__all__
except AttributeError:
all = None
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE DOCS', docloc)
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', join(modpkgs, '\n'))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', join(submodules, '\n'))
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', join(contents, '\n'))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', join(contents, '\n'))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', join(contents, '\n'))
if hasattr(object, '__version__'):
version = _binstr(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', _binstr(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', _binstr(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', _binstr(object.__credits__))
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % join(parents, ', ')
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value,
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % classname(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.im_func
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if type(sys.stdout) is not types.FileType:
return plainpager
if not hasattr(sys.stdin, "isatty"):
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(_encode(text))
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(_encode(text))
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = plain(_encode(plain(text), getattr(sys.stdout, 'encoding', _encoding))).split('\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
try:
h = int(os.environ.get('LINES', 0))
except ValueError:
h = 0
if h <= 1:
h = 25
r = inc = h - 1
sys.stdout.write(join(lines[:inc], '\n') + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(_encode(plain(text), getattr(sys.stdout, 'encoding', _encoding)))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
if type(thing) is types.InstanceType:
return 'instance of ' + thing.__class__.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in split(path, '.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
else:
object = __builtin__
for part in parts[n:]:
try:
object = getattr(object, part)
except AttributeError:
return None
return object
# --------------------------------------- interactive interpreter interface
text = TextDoc()
html = HTMLDoc()
class _OldStyleClass: pass
_OLD_INSTANCE_TYPE = type(_OldStyleClass())
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if object is None:
raise ImportError, 'no Python documentation found for %r' % thing
return object, thing
else:
name = getattr(thing, '__name__', None)
return thing, name if isinstance(name, str) else None
def render_doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Render text documentation, given an object or a path to an object."""
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if type(object) is _OLD_INSTANCE_TYPE:
# If the passed object is an instance of an old-style class,
# document its available methods instead of its value.
object = object.__class__
elif not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + text.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Display text documentation, given an object or a path to an object."""
try:
pager(render_doc(thing, title, forceload))
except (ImportError, ErrorDuringImport), value:
print value
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w')
file.write(page)
file.close()
print 'wrote', name + '.html'
except (ImportError, ErrorDuringImport), value:
print value
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'exec': ('exec', ''),
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS2'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'print': ('print', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "u'", '"""', '"', 'r"', 'u"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.iteritems():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING '
'TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES BACKQUOTES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS '
'SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'cmp hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS1': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS2 '
'SPECIALMETHODS'),
'SEQUENCEMETHODS2': ('sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'COERCIONS': ('coercion-rules','CONVERSIONS'),
'CONVERSIONS': ('conversions', 'COERCIONS'),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS BACKQUOTES NUMBERS '
'TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'BACKQUOTES': ('string-conversions', 'repr str STRINGS LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr '
'ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS1'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS2'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'PRINTING': 'print',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
input = property(lambda self: self._input or sys.stdin)
output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = strip(replace(request, '"', '', "'", ''))
if lower(request) in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using raw_input when available."""
if self.input is sys.stdin:
return raw_input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(split(request)[1])
elif request in self.symbols: self.showsymbol(request)
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:')
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:')
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/%s/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % tuple([sys.version[:3]]*2))
def list(self, items, columns=4, width=80):
items = items[:]
items.sort()
colw = width / columns
rows = (len(items) + columns - 1) / columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw-1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(strip(doc) + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import StringIO, formatter
buffer = StringIO.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + join(split(xrefs), ', ') + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if find(modname, '.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper()
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = lower(key)
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
try:
module_doc = __import__(modname).__doc__
except ImportError:
module_doc = None
desc = split(module_doc or '', '\n')[0]
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
loader = importer.find_module(modname)
if hasattr(loader,'get_source'):
import StringIO
desc = source_synopsis(
StringIO.StringIO(loader.get_source(modname))
) or ''
if hasattr(loader,'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
module = loader.load_module(modname)
desc = module.__doc__.splitlines()[0] if module.__doc__ else ''
path = getattr(module,'__file__',None)
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print modname, desc and '- ' + desc
def onerror(modname):
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------------------- web browser interface
def serve(port, callback=None, completer=None):
import BaseHTTPServer, mimetools, select
# Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
class Message(mimetools.Message):
def __init__(self, fp, seekable=1):
Message = self.__class__
Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
self.encodingheader = self.getheader('content-transfer-encoding')
self.typeheader = self.getheader('content-type')
self.parsetype()
self.parseplist()
class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(html.page(title, contents))
except IOError: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport, value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = filter(lambda x: x != '__main__',
sys.builtin_module_names)
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
indices.append(html.index(dir, seen))
contents = heading + join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <ping@lfw.org></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(BaseHTTPServer.HTTPServer):
def __init__(self, port, callback):
host = 'localhost'
self.address = (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import select
self.quit = False
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd: self.handle_request()
def server_activate(self):
self.base.server_activate(self)
self.url = 'http://%s:%d/' % (self.address[0], self.server_port)
if self.callback: self.callback(self)
DocServer.base = BaseHTTPServer.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
def gui():
"""Graphical interface (starts web server and pops up a control window)."""
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import Tkinter
self.server_frm = Tkinter.Frame(window)
self.title_lbl = Tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = Tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = Tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = Tkinter.Frame(window)
self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
self.search_ent = Tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = Tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = Tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = Tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = Tkinter.Frame(window)
self.goto_btn = Tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = Tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
try:
import webbrowser
webbrowser.open(url)
except ImportError: # pre-webbrowser.py compatibility
if sys.platform == 'win32':
os.system('start "%s"' % url)
else:
rc = os.system('netscape -remote "openURL(%s)" &' % url)
if rc: os.system('netscape "%s" &' % url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = split(self.result_lst.get(selection[0]))[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import Tkinter
try:
root = Tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and find(x, os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage: pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
writing = 0
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-k':
apropos(val)
return
if opt == '-p':
try:
port = int(val)
except ValueError:
raise BadUsage
def ready(server):
print 'pydoc server ready at %s' % server.url
def stopped():
print 'pydoc server stopped'
serve(port, ready, stopped)
return
if opt == '-w':
writing = 1
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print 'file %r does not exist' % arg
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport, value:
print value
except (getopt.error, BadUsage):
cmd = os.path.basename(sys.argv[0])
print """pydoc - the Python documentation tool
%s <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '%s', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
%s -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
%s -p <port>
Start an HTTP server on the given port on the local machine. Port
number 0 can be used to get an arbitrary unused port.
%s -g
Pop up a graphical interface for finding and serving documentation.
%s -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '%s', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
if __name__ == '__main__': cli()
|
[
"prog2@magneticash.com"
] |
prog2@magneticash.com
|
9697e06ec23f5f44526854900b3c097a2d3210cf
|
8b649d873b353265a4cc26ba4426507236546a0e
|
/rest_traversal/__init__.py
|
c87ba6fb9967acd92234e3731dc3abf8a7ebfa27
|
[] |
no_license
|
slykar/rest-traversal-example
|
53bd5c80158f0ddc223d626aeddc1bc48f5e00f9
|
e3f20ed4b6e40f91cb4ed7d3bd6a7bf36bc9668f
|
refs/heads/master
| 2020-04-01T22:25:26.292420
| 2016-08-25T11:07:42
| 2016-08-25T11:07:42
| 68,810,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,421
|
py
|
from pyramid.config import Configurator
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.authentication import BasicAuthAuthenticationPolicy
from rest_traversal import rest_api, db
import cornice
CUSTOMERS = {
'super': {
'customer_id': None,
'password': 'duper',
'group': 'admin'
},
'sly': {
'customer_id': None,
'password': 'zx8',
'group': 'staff'
},
'daniel': {
'customer_id': 1,
'password': 'spree',
'group': 'customers'
}
}
def auth(username, password, request):
if username not in CUSTOMERS:
return None
customer = CUSTOMERS[username]
if customer['password'] != password:
return None
return [
'c:{customer_id}'.format(**customer),
'g:{group}'.format(**customer)
]
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
db.configure(settings)
config = Configurator(settings=settings, root_factory=rest_api.bootstrap)
config.set_authentication_policy(BasicAuthAuthenticationPolicy(check=auth))
config.set_authorization_policy(ACLAuthorizationPolicy())
config.add_route('login', '/login')
config.scan('rest_traversal')
# Include REST Traversal views
config.include('spree.rest')
config.include('spree.rest.traversal.views')
return config.make_wsgi_app()
|
[
"sylwester.kardziejonek@gmail.com"
] |
sylwester.kardziejonek@gmail.com
|
c47b52394b7ca87f8a0783855dd592f0131895d0
|
582676830daade26aef570f8a15f71cda4f21d2a
|
/probe.py
|
a67ba8a1f95b1cf58e3a5877834ffeec9bc552ef
|
[
"MIT"
] |
permissive
|
ec500-software-engineering/exercise-2-ffmpeg-Wwwzff
|
31d5c9d3633812d63b77dc4a262e5d7d357496d2
|
cf34a65a69ffdbd5d4d7bdad3d462b1adbcc40ec
|
refs/heads/master
| 2020-04-23T12:27:34.117591
| 2019-03-03T17:45:10
| 2019-03-03T17:45:10
| 171,169,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
import subprocess
import json
from pathlib import Path
import shutil
if not shutil.which('ffprobe'):
raise FileNotFoundError('ffprobe not found')
def ffprobe_sync(filein: Path) -> dict:
""" get media metadata """
if not Path(filein).is_file():
raise FileNotFoundError(f'{filein} not found')
meta = subprocess.check_output(['ffprobe', '-v', 'warning','-print_format', 'json','-show_streams','-show_format',str(filein)],text=True)
return json.loads(meta)
|
[
"noreply@github.com"
] |
ec500-software-engineering.noreply@github.com
|
60aa38adf37a03e45e87e0f8002e8f86045cb1c7
|
bc988dbc15eb9b78fdfe2d414a76128f7c3e377a
|
/coast_to_coast/coast_to_coast/items.py
|
bf4883de421cc4ac4eb3662847a62ef509df68e0
|
[] |
no_license
|
ourcanadian/ocse-core
|
7226aea63c45bac8dedc22a860416492a3b2a4f0
|
ef4c13d2686cd5f9e36065fb950e06727df13933
|
refs/heads/master
| 2022-11-18T00:53:09.008647
| 2020-07-16T22:30:10
| 2020-07-16T22:30:10
| 273,785,486
| 0
| 1
| null | 2020-07-20T02:43:59
| 2020-06-20T21:16:48
|
Python
|
UTF-8
|
Python
| false
| false
| 293
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class CoastToCoastItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
|
[
"rylancole@Rylans-Air.hitronhub.home"
] |
rylancole@Rylans-Air.hitronhub.home
|
e69d0de340d586d8d11fa237a01c82f7d08b656e
|
480b6201e7be081b7535508461165822d77be4f9
|
/firenado/uimodules.py
|
fedca4cfb5adbaf913ff41e9b3a3a06a6c9a7a11
|
[
"Apache-2.0",
"CC-BY-3.0"
] |
permissive
|
piraz/firenado
|
191c366a2e8abe732cd532790fa182f0e1d9abdc
|
358084c72f8c62d6eae547deb5a7e034d25bad33
|
refs/heads/develop
| 2022-08-27T20:03:32.012351
| 2022-07-23T22:13:08
| 2022-07-23T22:13:08
| 36,203,159
| 1
| 0
|
Apache-2.0
| 2020-11-24T16:05:23
| 2015-05-25T01:35:50
|
Python
|
UTF-8
|
Python
| false
| false
| 847
|
py
|
# -*- coding: UTF-8 -*-
#
# Copyright 2015-2021 Flavio Garcia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import firenado.conf
from .util.url_util import rooted_path
import tornado.web
class RootedPath(tornado.web.UIModule):
def render(self, path):
root = firenado.conf.app['url_root_path']
return rooted_path(root, path)
|
[
"piraz@candango.org"
] |
piraz@candango.org
|
e80eb3c5b5b53d881e2fdeefb093eaa0a333eb6c
|
ab3559744255ce55a081daeff6490abae805ce62
|
/tools/options.py
|
0715fba19bcf76c68c2f32e5913b77c7fbe150f6
|
[
"Apache-2.0"
] |
permissive
|
haroonshakeel/SemanticPalette
|
9d5e89c3bc7b33699ae4db2e0d601d0233d284dc
|
a1b02a384c09881d6f1ca1a0c0ebfd87278c3d7d
|
refs/heads/main
| 2023-06-26T22:07:28.045226
| 2021-07-28T14:17:10
| 2021-07-28T14:17:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 49,870
|
py
|
import os
import sys
import torch
import pickle
import datetime
import argparse
from argparse import Namespace
from tools import utils
SEM_CITYSCAPES = ['unlabeled', 'ego vehicle', 'rectification border', 'out of roi', 'static', 'dynamic', 'ground',
'road', 'sidewalk', 'parking', 'rail track', 'building', 'wall', 'fence', 'guard rail', 'bridge',
'tunnel', 'pole', 'polegroup', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky',
'person', 'rider', 'car', 'truck', 'bus', 'caravan', 'trailer', 'train', 'motorcycle', 'bicycle',
'license plate']
SEM_IDD = ['road', 'parking', 'drivable fallback', 'sidewalk', 'rail track', 'non-drivable fallback', 'person',
'animal', 'rider', 'motorcycle', 'bicycle', 'autorickshaw', 'car', 'truck', 'bus', 'caravan', 'trailer',
'train', 'vehicle fallback', 'curb', 'wall', 'fence', 'guard rail', 'billboard', 'traffic sign',
'traffic light', 'pole', 'polegroup', 'obs-str-bar-fallback', 'building', 'bridge' , 'tunnel', 'vegetation',
'sky', 'fallback background','unlabeled', 'ego vehicle', 'rectification border', 'out of roi',
'license plate']
SEM_CELEBA = ['null', 'skin', 'nose', 'eye_g', 'l_eye', 'r_eye', 'l_brow', 'r_brow', 'l_ear', 'r_ear', 'mouth', 'u_lip',
'l_lip', 'hair', 'hat', 'ear_r', 'neck_l', 'neck', 'cloth']
SEM_ADE = [str(i) for i in range(95)]
class Options():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser = self.initialize_base(parser)
parser = self.initialize_seg_generator(parser)
parser = self.initialize_img_generator(parser)
parser = self.initialize_segmentor(parser)
parser = self.initialize_extra_dataset(parser)
self.initialized = True
return parser
def initialize_base(self, parser):
# experiment specifics
parser.add_argument('--name', type=str, default='my_experiment', help='name of the experiment, it indicates where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
# for mixed precision
parser.add_argument('--use_amp', action='store_true', help='if specified, use apex mixed precision')
parser.add_argument('--amp_level', type=str, default='O1', help='O1, O2...')
# for input / output sizes
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--true_dim', type=int, default=1024, help='resolution of saved images')
parser.add_argument('--max_dim', type=int, default=512, help='resolution up to which we wish to train our models')
parser.add_argument('--dim', type=int, default=-1, help='resolution at which to initialize training (has no effect for the seg generator)')
parser.add_argument('--seg_dim', type=int, default=-1, help='resolution at which to generate segmentation (they are then resized to dim)')
parser.add_argument('--force_seg_dim', action='store_true', help='if True, load seg at seg_dim')
parser.add_argument('--bilimax', action='store_true', help='if True, apply bilinear upsampling to seg then max discretizer')
parser.add_argument('--true_ratio', type=float, default=1.0, help='ratio width/height of saved images, final width will be max_dim * aspect_ratio')
parser.add_argument('--aspect_ratio', type=float, default=2.0, help='target width/height ratio')
parser.add_argument('--num_semantics', type=int, default=3, help='number of semantic classes including eventual unknown class')
parser.add_argument('--semantic_labels', type=str, default=[], nargs="+", help='name of the semantic class for each index')
parser.add_argument('--label_nc', type=int, default=None, help='new label for unknown class if there is any')
parser.add_argument('--not_sort', action='store_true', help='if specified, do *not* sort the input paths')
parser.add_argument('--soft_sem_seg', action='store_true', help='apply gaussian blur to semantic segmentation')
parser.add_argument('--soft_sem_prop', type=float, default=0.5, help='amount of final sem map with blur')
parser.add_argument('--transpose', action='store_true', help='transpose the input seg/img')
parser.add_argument('--imagenet_norm', action='store_true', help='normalize images the same way as it is done for imagenet')
parser.add_argument('--colorjitter', action='store_true', help='randomly change the brightness, contrast and saturation of images')
# for setting inputs
parser.add_argument('--dataroot', type=str, default='./datasets/cityscapes/')
parser.add_argument('--dataset', type=str, default='cityscapes')
parser.add_argument('--load_extra', action='store_true', help='if true, load extended version of dataset if available')
parser.add_argument('--load_minimal_info', action='store_true', help='if true, load extended version of dataset if available')
parser.add_argument('--data_idx_type', type=str, default='both', help='(even | odd | both)')
parser.add_argument('--data_city_type', type=str, default='both', help='(a | no_a | both)')
parser.add_argument('--has_tgt', action='store_true', help='if false, tgt cond overrides true cond')
parser.add_argument('--estimated_cond', action='store_true', help='if true, teach a model to generate cond and sample from it')
parser.add_argument('--nearest_cond_index', action='store_true', help='if true, sample data points which corresponds to the nearest cond')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--no_h_flip', action='store_true', help='if specified, do not horizontally flip the images for data argumentation')
parser.add_argument('--no_v_flip', action='store_true', help='if specified, do not vertically flip the images for data argumentation')
parser.add_argument('--resize_img', type=int, nargs="+", default=None, help='if specified, resize images once they are loaded')
parser.add_argument('--resize_seg', type=int, nargs="+", default=None, help='if specified, resize segmentations once they are loaded')
parser.add_argument('--min_zoom', type=float, default=1., help='parameter for augmentation method consisting in zooming and cropping')
parser.add_argument('--max_zoom', type=float, default=1., help='parameter for augmentation method consisting in zooming and cropping')
parser.add_argument('--fixed_crop', type=int, nargs="+", default=None, help='if specified, apply a random crop of the given size')
parser.add_argument('--fixed_top_centered_zoom', type=float, default=None, help='if specified, crop the image to the upper center part')
parser.add_argument('--num_workers', default=8, type=int, help='# threads for loading data')
parser.add_argument('--max_dataset_size', type=int, default=sys.maxsize, help='maximum # of samples allowed per dataset, if the dataset directory contains more than max_dataset_size, only a subset is loaded')
parser.add_argument('--load_from_opt_file', action='store_true', help='loads the options_spade from checkpoints and use that as default')
parser.add_argument('--no_pairing_check', action='store_true', help='if specified, skip sanity check of correct label-image file pairing')
# for panoptic mode
parser.add_argument('--load_panoptic', action='store_true', help='if true, loads both instance and semantic information from segmentation maps, otherwise only semantic information')
parser.add_argument('--instance_type', type=str, default='center_offset', help='combination of (center_offset | (soft_)edge | density)')
parser.add_argument('--things_idx', type=int, nargs="+", default=[], help='indexes corresponding to things (by opposition to stuff)')
parser.add_argument('--max_sigma', type=float, default=8., help='sigma of 2d gaussian representing instance centers for max dim')
parser.add_argument('--min_sigma', type=float, default=2., help='sigmaiiii of 2d gaussian representing instance centers for min dim')
parser.add_argument('--center_thresh', type=float, default=0.5, help='threshold to filter instance centers')
# for display and checkpointing
parser.add_argument('--log_freq', type=int, default=100, help='frequency at which logger is updated with images')
parser.add_argument('--save_freq', type=int, default=-1, help='frequency of saving models, if -1 don\'t save')
parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest model')
parser.add_argument('--save_path', type=str, default='./')
parser.add_argument('--colormat', type=str, default='', help='name of colormat to display semantic maps')
# for training
parser.add_argument('--niter', type=int, default=1000, help='number of training iterations')
parser.add_argument('--niter_decay', type=int, default=0, help='# of iter to linearly decay learning rate to zero')
parser.add_argument('--iter_function', type=str, default=None, help='(iter | cycle)')
# for testing
parser.add_argument('--nums_fid', type=int, default=100, help='number of samples to generate to compute fid score')
parser.add_argument('--slide_eval', action='store_true', help='if true, eval on sliding window')
parser.add_argument('--multi_scale_eval', action='store_true', help='if true, eval on two scales')
parser.add_argument('--eval_batchsize', type=int, default=16, help='batch size to compute fid')
parser.add_argument('--eval_freq', type=int, default=10, help='frequency for evaluting fid')
parser.add_argument('--no_eval', action='store_true', help='if true, dont do eval')
parser.add_argument('--eval_idx', type=int, nargs="+", default=[], help="selected classes for evaluation")
parser.add_argument('--force_eval_batch_size', type=int, default=None, help='if true, force eval batch size for segmentor')
# for engine
parser.add_argument('--local_rank', type=int, default=0, help='process rank on node')
# for sampler
parser.add_argument('--sampler_weights_method', type=str, default=None, help='(highlight-)(linear | exponential)')
parser.add_argument('--sampler_bias_method', type=str, default=None, help='(highlight-)linear')
parser.add_argument('--sampler_weights_scale', type=float, default=2., help='rescale sampling weights to range [0, sampler_scale]')
parser.add_argument('--sampler_bias_mul', type=float, default=1., help='amplify std for classes that we wish to bias')
parser.add_argument('--sampler_method', type=str, default="", help='(weights-bias | weights | bias)')
# for estimator
parser.add_argument('--estimator_load_path', type=str, default=None, help='load an estimator model from specified folder')
parser.add_argument('--estimator_min_components', type=int, default=1, help='min number of components for gmm model')
parser.add_argument('--estimator_max_components', type=int, default=5, help='max number of components for gmm model')
parser.add_argument('--estimator_force_components', type=int, default=None, help='if not None, fix number of components for gmm model (overrides min and max)')
parser.add_argument('--estimator_n_init', type=int, default=1, help='number of initializations for gmm model')
parser.add_argument('--estimator_iter_data', type=int, default=1, help='number of time to iter through data to extract cond codes')
parser.add_argument('--estimator_projection_mode', type=str, default="approx", help='(approx | iter)')
parser.add_argument('--estimator_force_bias', type=int, nargs="+", default=[], help="force bias to 1 for specified classes")
parser.add_argument('--estimator_filter_idx', type=int, nargs="+", default=[], help="prevent sem classes at given idx from being sampled")
parser.add_argument('--estimator_force_min_class_p', type=float, nargs="+", default=[], help="pair of (class, p) values where surface proportion should be at least p for class")
# for nearest cond indexor
parser.add_argument('--indexor_load_path', type=str, default=None, help='load an indexor model from specified folder')
parser.add_argument('--indexor_normalize', action='store_true', help='if true, in indexor input classes are normalized individually')
# for end-to-end
parser.add_argument('--fake_from_fake_dis', type=str, default="both", help='(d | d2 | both)')
parser.add_argument('--fake_from_real_dis', type=str, default="both", help='(d | d2 | both)')
parser.add_argument('--img_for_d_real', type=str, default="source", help='(source | target | both)')
parser.add_argument('--img_for_d_fake', type=str, default="target", help='(source | target | both)')
parser.add_argument('--img_for_d2_real', type=str, default="target", help='(source | target | both)')
parser.add_argument('--img_for_d2_fake', type=str, default="target", help='(source | target | both)')
parser.add_argument('--sem_only_real', action='store_true', help='if true, compute only semantic alignement for real data')
parser.add_argument('--lambda_d2_from_real', type=float, default=1, help='parameter for second discriminator and fake data')
parser.add_argument('--no_update_seg_model', action='store_true', help='if true, dont update seg model in end-to-end configuration')
parser.add_argument('--eval_dataset', type=str, default="base", help='(base | extra)')
# for offline generation
parser.add_argument('--save_data_path', type=str, default="datasets/cityscapes_synthetic", help='folder in which to store synthetic data')
parser.add_argument('--data_num', type=int, default=2975, help="number of synthetic pairs to generate")
parser.add_argument('--save8bit', action='store_true', help='if true, save semantic segmentation in 8 bit format')
# for visualizer
parser.add_argument('--vis_method', type=str, default="", help='method for visualization')
parser.add_argument('--vis_steps', type=int, default=32, help='method for visualization')
parser.add_argument('--vis_dataloader_bs', type=int, default=1, help='batch size for dataloader')
parser.add_argument('--extraction_path', type=str, default=None, help="folder containing mean style codes")
parser.add_argument('--mean_style_only', action='store_true', help='if true, do not recompute style from image')
parser.add_argument('--addition_mode', action='store_true', help='if true, shape target for partial edition rather than full')
parser.add_argument('--save_full_res', action='store_true', help='if true, save as individual images at full resolution')
parser.add_argument('--vis_ins', action='store_true', help='if true, visualize instance related masks')
parser.add_argument('--vis_random_style', action='store_true', help='if true, load random style instead of mean style for new elements')
# for offline generator
return parser
def initialize_seg_generator(self, parser):
# for model
parser.add_argument('--s_model', type=str, default='progressive', help='(progressive | style)')
parser.add_argument('--s_seg_type', type=str, default='generator', help='(generator | completor)')
parser.add_argument('--s_panoptic', action='store_true', help='if true, panoptic segmentation generation, otherwise semantic segmentation generation')
parser.add_argument('--s_latent_dim', type=int, default=512, help='dimension of the latent vector')
parser.add_argument('--s_max_hidden_dim', type=int, default=512, help='maximum number of hidden feature maps')
parser.add_argument('--s_discretization', type=str, default='gumbel', help='(gumbel | max)')
# for conditional generation
parser.add_argument('--s_cond_seg', type=str, default=None, help='(semantic | instance | panoptic | None)')
parser.add_argument('--s_joints_mul', type=int, default=0, help='number of assisted joints between generator blocks to refine intermediate outputs')
parser.add_argument('--s_joint_type', type=str, default="bias", help='(linear | bias | affine)')
parser.add_argument('--s_cond_mode', default='sem_recover', help='(entropy &| sem_recover &| (weakly_)assisted &| spread &| ins_recover | original_cgan)')
parser.add_argument('--s_filter_cond', action='store_true', help='if specified, sem should represent at least one pixel to be taken into account in assisted activation')
parser.add_argument('--s_pseudo_supervision', action='store_true', help='self supervision for instance related output')
parser.add_argument('--s_lambda_things', type=float, default=1., help='parameter for things related loss')
parser.add_argument('--s_lambda_stuff', type=float, default=1., help='parameter for stuff related loss')
parser.add_argument('--s_lambda_adv_things', type=float, default=1., help='parameter for things gen/dis loss')
parser.add_argument('--s_things_dis', action='store_true', help='if specified, do an extra forward pass in discriminator with things alone')
parser.add_argument('--s_ova_idx', type=int, nargs="+", default=[], help='indices for which we wish to apply the one-versus-all loss')
parser.add_argument('--s_lambda_ova', type=float, default=1., help='parameter for ova loss')
parser.add_argument('--s_lambda_spread', type=float, default=1., help='parameter for spread loss')
# for for input / output sizes
parser.add_argument('--s_things_stuff', action='store_true', help='if specified, treats things and stuff separately')
parser.add_argument('--s_override_num_semantics', type=int, default=None, help='if not None, overrides num semantics')
parser.add_argument('--s_sem_conv', type=int, nargs="+", default=None, help='convert seg classes for img generator')
# for training
parser.add_argument('--s_optimizer', type=str, default='adam')
parser.add_argument('--s_beta1', type=float, default=0.0, help='momentum term of adam')
parser.add_argument('--s_beta2', type=float, default=0.99, help='momentum term of adam')
parser.add_argument('--s_lr', type=float, default=0.001, help='initial learning rate for adam')
parser.add_argument('--s_batch_size_per_res', type=int, nargs="+", default=None, help='overrides batch_size to have a different batch size for every res')
parser.add_argument('--s_iter_function_per_res', type=str, nargs="+", default=None, help='overrides iter_function to have a different iter function for every res')
parser.add_argument('--s_step_mul_per_res', type=float, nargs="+", default=None, help='step multiplier for every res (more epochs for specified res)')
# for display and checkpointing
parser.add_argument('--s_log_per_phase', type=int, default=50, help='number of times logger is updated with images during each phase, overrides log_freq')
parser.add_argument('--s_save_at_every_res', action='store_true', help='save checkpoint when done training at a given res and moving to the next one')
# for loading
parser.add_argument('--s_load_path', type=str, default=None, help='load model from which_iter at specified folder')
parser.add_argument('--s_cont_train', action='store_true', help='continue training with model from which_iter')
parser.add_argument('--s_which_iter', type=int, default=0, help='load the model from specified iteration')
parser.add_argument('--s_force_res', type=int, default=None, help='train model from given res (instead of estimating res from iter)')
parser.add_argument('--s_force_phase', type=str, default=None, help='train model from given phase (instead of estimating phase from iter)')
parser.add_argument('--s_not_strict', action='store_true', help='whether checkpoint exactly matches network architecture')
# for output
parser.add_argument('--s_t', type=float, default=1, help='temperature in softmax')
parser.add_argument('--s_store_masks', action='store_true', help='to keep the masks information in the output')
# for completor
parser.add_argument('--s_vertical_sem_crop', action='store_true', help='if true, crop a random vertical band from sem')
parser.add_argument('--s_min_sem_crop', type=float, default=0.5, help='min prop of image to crop for vertical sem crop')
parser.add_argument('--s_sem_label_crop', type=int, nargs="+", default=[], help='class idx to be cropped')
parser.add_argument('--s_sem_label_ban', type=int, nargs="+", default=[], help='class idx to be banned from the generation process')
parser.add_argument('--s_switch_cond', action='store_true', help='if true, switch from input image cond to target cond')
parser.add_argument('--s_fill_crop_only', action='store_true', help='if true, keep original sem and only replace cropped areas with new sem')
parser.add_argument('--s_norm_G', type=str, default='spectralspadebatch3x3', help='instance normalization or batch normalization')
parser.add_argument('--s_lambda_novelty', type=float, default=1., help='parameter for novelty loss')
parser.add_argument('--s_edge_cond', action='store_true', help='if true, compute target cond by looking at edge of crop')
parser.add_argument('--s_weight_cond_crop', action='store_true', help='if true, weight the sem cond so that it fills the crop')
parser.add_argument('--s_bias_sem', type=int, nargs="+", default=[], help='bias some classes when filling crop')
parser.add_argument('--s_bias_mul', type=float, default=1., help='bias mul to bias some classes when filling crop')
parser.add_argument('--s_merged_activation', action='store_true', help='if true, merge input sem and generated sem in activation')
parser.add_argument('--s_random_soft_mix', action='store_true', help='if true, some tgt code will be close to src')
parser.add_argument('--s_random_linear', action='store_true', help='if true, some tgt code will be close to src')
parser.add_argument('--s_scalnovelty', action='store_true', help='if true, novelty loss based on bhattacharyya distance')
# for style gan 2
parser.add_argument('--s_style_dim', type=int, default=512, help='latent dimension')
parser.add_argument('--s_n_mlp', type=int, default=8, help='number of mlp layers')
parser.add_argument('--s_mixing', type=float, default=0.9, help='number of mlp layers')
return parser
def initialize_img_generator(self, parser):
# experiment specifics
parser.add_argument('--i_model', type=str, default='pix2pix', help='which model to use')
parser.add_argument('--i_img_type', type=str, default='generator', help='(generator | style_generator)')
parser.add_argument('--i_norm_G', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--i_norm_D', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--i_norm_E', type=str, default='spectralinstance', help='instance normalization or batch normalization')
# for generator
parser.add_argument('--i_netG', type=str, default='spade', help='selects model to use for netG (condconv | pix2pixhd | spade)')
parser.add_argument('--i_ngf', type=int, default=64, help='# of gen filters in first conv layer')
parser.add_argument('--i_init_type', type=str, default='xavier', help='network initialization [normal|xavier|kaiming|orthogonal]')
parser.add_argument('--i_init_variance', type=float, default=0.02, help='variance of the initialization distribution')
parser.add_argument('--i_latent_dim', type=int, default=256, help="dimension of the latent z vector")
parser.add_argument('--i_num_upsampling_layers', choices=('normal', 'more', 'most'), default='normal', help="if 'more', adds upsampling layer between the two middle resnet blocks, if 'most', also add one more upsampling + resnet layer at the end of the generator")
parser.add_argument('--i_resnet_n_downsample', type=int, default=4, help='number of downsampling layers in netG')
parser.add_argument('--i_resnet_n_blocks', type=int, default=9, help='number of residual blocks in the global generator network')
parser.add_argument('--i_resnet_kernel_size', type=int, default=3, help='kernel size of the resnet block')
parser.add_argument('--i_resnet_initial_kernel_size', type=int, default=7, help='kernel size of the first convolution')
# for discriminator
parser.add_argument('--i_netD_subarch', type=str, default='n_layer', help='architecture of each discriminator')
parser.add_argument('--i_num_D', type=int, default=2, help='number of discriminators to be used in multiscale')
parser.add_argument('--i_n_layers_D', type=int, default=3, help='# layers in each discriminator')
# for instance-wise features
parser.add_argument('--i_panoptic', action='store_true', help='if true, conditioned on panoptic segmentation, semantic segmentation otherwise')
parser.add_argument('--i_instance_type_for_img', type=str, default=None, help='combination of (center_offset | (soft_)edge | density), if None same as instance_type')
parser.add_argument('--i_nef', type=int, default=16, help='# of encoder filters in the first conv layer')
parser.add_argument('--i_use_vae', action='store_true', help='enable training with an image encoder.')
# for training
parser.add_argument('--i_optimizer', type=str, default='adam')
parser.add_argument('--i_beta1', type=float, default=0.5, help='momentum term of adam')
parser.add_argument('--i_beta2', type=float, default=0.999, help='momentum term of adam')
parser.add_argument('--i_lr', type=float, default=0.0002, help='initial learning rate for adam')
parser.add_argument('--i_D_steps_per_G', type=int, default=1, help='number of discriminator iterations per generator iterations.')
# for loading
parser.add_argument('--i_load_path', type=str, default=None, help='load a model from specified folder')
parser.add_argument('--i_load_path_d2', type=str, default=None, help='load a model from specified folder')
parser.add_argument('--i_cont_train', action='store_true', help='continue training with model from which_iter')
parser.add_argument('--i_which_iter', type=int, default=0, help='load the model from specified iteration')
parser.add_argument('--i_which_iter_d2', type=int, default=0, help='load the model from specified iteration')
parser.add_argument('--i_not_strict', action='store_true', help='whether checkpoint exactly matches network architecture')
# for discriminators
parser.add_argument('--i_ndf', type=int, default=64, help='# of discriminator filters in first conv layer')
parser.add_argument('--i_lambda_feat', type=float, default=10.0, help='weight for feature matching loss')
parser.add_argument('--i_lambda_vgg', type=float, default=10.0, help='weight for vgg loss')
parser.add_argument('--i_no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss')
parser.add_argument('--i_no_vgg_loss', action='store_true', help='if specified, do *not* use VGG feature matching loss')
parser.add_argument('--i_gan_mode', type=str, default='hinge', help='(ls|original|hinge)')
parser.add_argument('--i_netD', type=str, default='multiscale', help='(fpse|n_layers|multiscale|image)')
parser.add_argument('--i_no_TTUR', action='store_true', help='use TTUR training scheme')
parser.add_argument('--i_lambda_kld', type=float, default=0.05)
parser.add_argument('--i_use_d2', action='store_true', help='if true, use an additional discriminator to distinguish real and fake')
parser.add_argument('--i_lambda_d2', type=float, default=1.0, help='weight for d2 loss')
# for style generator
parser.add_argument('--i_status', type=str, default='train', help='status for ACE layer')
return parser
def initialize_segmentor(self, parser):
# experiment specifics
parser.add_argument('--x_model', type=str, default='pspnet', help='(pspnet | deeplabv3)')
parser.add_argument('--x_segment_eval_classes_only', action="store_true", help="reduce the classes for the segmentor to the eval classes")
# training
parser.add_argument('--x_optimizer', type=str, default='sgd')
parser.add_argument('--x_lr', type=float, default=0.01, help='initial learning rate for adam')
parser.add_argument('--x_momentum', type=float, default=0.9, help='momentum component of the optimiser')
parser.add_argument("--x_not_restore_last", action="store_true", help="if specified, do not restore last (FC) layers")
parser.add_argument("--x_power", type=float, default=0.9, help="decay parameter to compute the learning rate")
parser.add_argument("--x_weight_decay", type=float, default=0.0005, help="regularisation parameter for L2-loss")
parser.add_argument("--x_ohem", action="store_true", help="use hard negative mining")
parser.add_argument("--x_ohem_thres", type=float, default=0.6, help="choose the samples with correct probability under the threshold")
parser.add_argument("--x_ohem_keep", type=int, default=200000, help="choose the samples with correct probability under the threshold")
# for loading
parser.add_argument('--x_load_path', type=str, default=None, help='load a model from specified folder')
parser.add_argument('--x_cont_train', action='store_true', help='continue training with model from which_iter')
parser.add_argument('--x_which_iter', type=int, default=0, help='load the model from specified iteration')
parser.add_argument('--x_pretrained_path', type=str, default=None, help='load a pretrained model from specified path')
parser.add_argument('--x_not_strict', action='store_true', help='whether checkpoint exactly matches network architecture')
# for loading ensemble
parser.add_argument('--x_is_ensemble', action='store_true', help='if true, merge predictions from ensemble of two models')
parser.add_argument('--x_load_path_2', type=str, default=None, help='load an extra model from specified folder')
parser.add_argument('--x_which_iter_2', type=int, default=0, help='load the model from specified iteration')
# for setting inputs
parser.add_argument('--x_synthetic_dataset', action='store_true', help='training dataset is streaming seg/img pairs from trained generators')
parser.add_argument('--x_semi', action='store_true', help='only img are generated')
parser.add_argument('--x_duo', action='store_true', help='train from synthetic and real data')
parser.add_argument('--x_duo_cond', action='store_true', help='use base and extra datasets to get cond codes')
parser.add_argument('--x_cond_real_tgt', action='store_true', help='start from conditioning codes from real and tgt datasets')
# for uda
parser.add_argument('--x_advent', action='store_true', help='to train with adversarial-entropy uda')
parser.add_argument('--x_advent_multi', action='store_true', help='if specified, discriminate at two stages')
parser.add_argument('--x_advent_lr', type=float, default=0.0001, help='initial learning rate for adam')
parser.add_argument('--x_advent_lambda_adv_final', type=float, default=0.01, help='param for adversarial loss on final seg')
parser.add_argument('--x_advent_lambda_adv_inter', type=float, default=0.0002, help='param for adversarial loss on intermediate seg')
# for synthetic data pre processing
parser.add_argument('--x_sample_fixed_crop', type=int, nargs="+", default=None, help='if specified, apply a random crop of the given size')
parser.add_argument('--x_sample_random_crop', action='store_true', help='if specified, zoom and apply a random crop while keeping original size')
# for segmentor plus
parser.add_argument('--x_plus', action='store_true', help='to use segmentor plus')
parser.add_argument('--x_separable_conv', action='store_true', help='to use separable conv in segmentor plus')
parser.add_argument('--x_output_stride', type=int, default=16, help='output stride for segmentor plus')
return parser
def initialize_extra_dataset(self, parser):
# for input / output sizes
parser.add_argument('--d_true_dim', type=int, default=1024, help='resolution of saved images')
parser.add_argument('--d_true_ratio', type=float, default=1.0, help='ratio width/height of saved images, final width will be max_dim * aspect_ratio')
parser.add_argument('--d_num_semantics', type=int, default=3, help='number of semantic classes including eventual unknown class')
parser.add_argument('--d_semantic_labels', type=str, default=[], nargs="+", help='name of the semantic class for each index')
parser.add_argument('--d_label_nc', type=int, default=None, help='new label for unknown class if there is any')
# for setting inputs
parser.add_argument('--d_dataroot', type=str, default='./datasets/cityscapes/')
parser.add_argument('--d_dataset', type=str, default=None)
parser.add_argument('--d_data_idx_type', type=str, default='both', help='(even | odd | both)')
parser.add_argument('--d_has_tgt', action='store_true', help='if false, tgt cond overrides true cond')
parser.add_argument('--d_estimated_cond', action='store_true', help='if true, teach a model to generate cond and sample from it')
parser.add_argument('--d_no_flip', action='store_true', help='if specified, do not flip the images for data argumentation')
parser.add_argument('--d_resize_img', type=int, nargs="+", default=None, help='if specified, resize images once they are loaded')
parser.add_argument('--d_resize_seg', type=int, nargs="+", default=None, help='if specified, resize segmentations once they are loaded')
parser.add_argument('--d_max_zoom', type=float, default=1., help='parameter for augmentation method consisting in zooming and cropping')
parser.add_argument('--d_fixed_top_centered_zoom', type=float, default=None, help='if specified, crop the image to the upper center part')
parser.add_argument('--d_max_dataset_size', type=int, default=sys.maxsize, help='maximum # of samples allowed per dataset, if the dataset directory contains more than max_dataset_size, only a subset is loaded')
# for panoptic mode
parser.add_argument('--d_load_panoptic', action='store_true', help='if true, loads both instance and semantic information from segmentation maps, otherwise only semantic information')
parser.add_argument('--d_instance_type', type=str, default='center_offset', help='combination of (center_offset | (soft_)edge | density)')
parser.add_argument('--d_things_idx', type=int, nargs="+", default=[], help='indexes corresponding to things (by opposition to stuff)')
# for display and checkpointing
parser.add_argument('--d_colormat', type=str, default='', help='name of colormat to display semantic maps')
# for estimator
parser.add_argument('--d_estimator_load_path', type=str, default=None, help='load an estimator model from specified folder')
# for evaluation
parser.add_argument('--d_eval_idx', type=int, nargs="+", default=[], help="selected classes for evaluation")
return parser
def update_defaults(self, opt, parser):
# for base options_spade
if opt.dim == -1:
parser.set_defaults(dim=opt.max_dim)
if opt.seg_dim == -1:
seg_dim_default = opt.dim if opt.dim != -1 else opt.max_dim
parser.set_defaults(seg_dim=seg_dim_default)
if opt.dataset == "cityscapes":
parser.set_defaults(dataroot="datasets/cityscapes")
parser.set_defaults(num_semantics=35)
parser.set_defaults(label_nc=34)
parser.set_defaults(true_ratio=2.0)
parser.set_defaults(i_num_upsampling_layers='more')
parser.set_defaults(things_idx=[24, 25, 26, 27, 28, 29, 30, 31, 32, 33])
parser.set_defaults(semantic_labels=SEM_CITYSCAPES)
parser.set_defaults(colormat="cityscapes_color35")
parser.set_defaults(true_dim=1024)
parser.set_defaults(no_h_flip=True)
parser.set_defaults(eval_idx=[7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33])
if opt.dataset == "idd":
parser.set_defaults(dataroot="datasets/idd")
parser.set_defaults(resize_img=[720, 1280])
parser.set_defaults(num_semantics=40)
parser.set_defaults(label_nc=35)
parser.set_defaults(true_ratio=1.77777777777)
parser.set_defaults(i_num_upsampling_layers='more')
parser.set_defaults(things_idx=[6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18])
parser.set_defaults(semantic_labels=SEM_IDD)
parser.set_defaults(colormat="idd_color40")
parser.set_defaults(true_dim=720)
parser.set_defaults(no_h_flip=True)
parser.set_defaults(eval_idx=[0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34])
if opt.dataset == "celeba":
parser.set_defaults(dataroot="datasets/celeba")
# parser.set_defaults(dataroot="/datasets_local/CelebAMask-HQ/CelebAMask-HQ")
parser.set_defaults(num_semantics=19)
parser.set_defaults(label_nc=0)
parser.set_defaults(true_ratio=1.0)
parser.set_defaults(i_num_upsampling_layers='normal')
parser.set_defaults(semantic_labels=SEM_CELEBA)
parser.set_defaults(colormat="celeba_color19")
parser.set_defaults(true_dim=512)
parser.set_defaults(no_h_flip=True)
parser.set_defaults(aspect_ratio=1)
parser.set_defaults(resize_img=[512, 512])
# for extra dataset
if opt.d_dataset == "cityscapes":
parser.set_defaults(d_dataroot="datasets/cityscapes")
parser.set_defaults(d_num_semantics=35)
parser.set_defaults(d_label_nc=34)
parser.set_defaults(d_true_ratio=2.0)
parser.set_defaults(d_things_idx=[24, 25, 26, 27, 28, 29, 30, 31, 32, 33])
parser.set_defaults(d_semantic_labels=SEM_CITYSCAPES)
parser.set_defaults(d_colormat="cityscapes_color35")
parser.set_defaults(d_true_dim=1024)
parser.set_defaults(d_no_h_flip=True)
parser.set_defaults(d_eval_idx=[7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33])
if opt.d_dataset == "idd":
parser.set_defaults(d_dataroot="datasets/idd")
parser.set_defaults(d_resize_img=[720, 1280])
parser.set_defaults(d_num_semantics=40)
parser.set_defaults(d_label_nc=35)
parser.set_defaults(d_true_ratio=1.77777777777)
parser.set_defaults(d_things_idx=[6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18])
parser.set_defaults(d_semantic_labels=SEM_IDD)
parser.set_defaults(d_colormat="idd_color40")
parser.set_defaults(d_true_dim=720)
parser.set_defaults(d_no_h_flip=True)
parser.set_defaults(d_eval_idx=[0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34])
# for img generator options_spade
if opt.i_instance_type_for_img is None:
parser.set_defaults(i_instance_type_for_img=opt.instance_type)
if opt.i_netG == "spade":
parser.set_defaults(i_norm_G='spectralspadesyncbatch3x3')
if opt.i_netG == "condconv":
parser.set_defaults(i_norm_G='spectralbatch')
if opt.i_netG == "pix2pixhd":
parser.set_defaults(i_norm_G='instance')
return parser
def gather_options(self):
# initialize parser with basic options_spade
if not self.initialized:
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get options_spade
opt = parser.parse_args()
# modify some defaults based on parser options_spade
parser = self.update_defaults(opt, parser)
opt = parser.parse_args()
# if there is opt_file, load it.
# The previous default options_spade will be overwritten
if opt.load_from_opt_file:
parser = self.update_options_from_file(parser, opt)
opt = parser.parse_args()
self.parser = parser
return opt
def print_options(self, opt, opt_type, opt_prefix=""):
def dash_pad(s, length=50):
num_dash = max(length - len(s) // 2, 0)
return '-' * num_dash
opt_str = opt_type + " Options"
message = ''
message += dash_pad(opt_str) + ' ' + opt_str + ' ' + dash_pad(opt_str) + '\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(opt_prefix + k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
end_str = opt_type + " End"
message += dash_pad(end_str) + ' ' + end_str + ' ' + dash_pad(end_str) + '\n'
print(message)
def option_file_path(self, opt, signature, makedir=False):
expr_dir = os.path.join(opt.save_path, "checkpoints", signature)
if makedir:
utils.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt')
return file_name
def save_options(self, opt, signature):
file_name = self.option_file_path(opt, signature, makedir=True)
with open(file_name + '.txt', 'wt') as opt_file:
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
opt_file.write('{:>25}: {:<30}{}\n'.format(str(k), str(v), comment))
with open(file_name + '.pkl', 'wb') as opt_file:
pickle.dump(opt, opt_file)
def update_options_from_file(self, parser, opt):
new_opt = self.load_options(opt)
for k, v in sorted(vars(opt).items()):
if hasattr(new_opt, k) and v != getattr(new_opt, k):
new_val = getattr(new_opt, k)
parser.set_defaults(**{k: new_val})
return parser
def load_options(self, opt):
file_name = self.option_file_path(opt, makedir=False)
new_opt = pickle.load(open(file_name + '.pkl', 'rb'))
return new_opt
def split_options(self, opt):
base_opt = Namespace()
seg_generator_opt = Namespace()
img_generator_opt = Namespace()
segmentor_opt = Namespace()
extra_dataset_opt = Namespace()
for k, v in sorted(vars(opt).items()):
if k.startswith("s_"):
setattr(seg_generator_opt, k[2:], v)
elif k.startswith("i_"):
setattr(img_generator_opt, k[2:], v)
elif k.startswith("x_"):
setattr(segmentor_opt, k[2:], v)
elif k.startswith("d_"):
setattr(extra_dataset_opt, k[2:], v)
else:
setattr(base_opt, k, v)
return base_opt, seg_generator_opt, img_generator_opt, segmentor_opt, extra_dataset_opt
def copy_options(self, target_options, source_options, new_only=False):
for k, v in sorted(vars(source_options).items()):
if not (new_only and k in target_options):
setattr(target_options, k, v)
def override_num_semantics(self, opt):
if opt.override_num_semantics is not None:
print(f"Overriding num_semantics from {opt.num_semantics} to {opt.override_num_semantics}")
opt.num_semantics = opt.override_num_semantics
def set_cond_dim(self, opt):
if opt.cond_seg == "semantic":
cond_dim = opt.num_semantics
elif opt.cond_seg == "instance":
cond_dim = opt.num_things
elif opt.cond_seg == "panoptic":
cond_dim = opt.num_semantics + opt.num_things
else:
cond_dim = 0
opt.cond_dim = cond_dim
def set_seg_size(self, opt):
size = opt.num_semantics
if opt.panoptic:
if "density" in opt.instance_type:
size += opt.num_things
if "center_offset" in opt.instance_type:
size += 3
if "edge" in opt.instance_type:
size += 1
opt.seg_size = size
def parse(self, load_seg_generator=False, load_img_generator=False, load_segmentor=False,
load_extra_dataset=False, save=False):
opt = self.gather_options()
signature = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S") + "-" + opt.name
base_opt, seg_generator_opt, img_generator_opt, segmentor_opt, extra_dataset_opt = self.split_options(opt)
if base_opt.local_rank == 0:
if save:
self.save_options(opt, signature)
self.print_options(base_opt, "Base")
if load_seg_generator:
self.print_options(seg_generator_opt, "Segmentation Generator", "s_")
if load_img_generator:
self.print_options(img_generator_opt, "Image Generator", "i_")
if load_segmentor:
self.print_options(segmentor_opt, "Segmentor", "x_")
if load_extra_dataset:
self.print_options(extra_dataset_opt, "Extra dataset", "d_")
# set gpu ids
str_ids = base_opt.gpu_ids.split(',')
base_opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
base_opt.gpu_ids.append(id)
# set num of things
base_opt.num_things = len(base_opt.things_idx)
extra_dataset_opt.num_things = len(extra_dataset_opt.things_idx)
# set additional paths
base_opt.checkpoint_path = os.path.join(base_opt.save_path, "checkpoints", signature)
base_opt.log_path = os.path.join(base_opt.save_path, "logs", signature)
assert (base_opt.max_dim & (base_opt.max_dim - 1)) == 0, f"Max dim {base_opt.max_dim} must be power of two."
# set width size
if base_opt.fixed_crop is None:
base_opt.width_size = int(base_opt.dim * base_opt.aspect_ratio)
base_opt.height_size = int(base_opt.width_size / base_opt.aspect_ratio)
else:
base_opt.height_size, base_opt.width_size = base_opt.fixed_crop
# set semantic labels
if len(base_opt.semantic_labels) == 0:
base_opt.semantic_labels = ["noname"] * base_opt.num_semantics
# set sem_conv
if seg_generator_opt.sem_conv is not None:
def pairwise(iterable):
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return zip(a, a)
seg_generator_opt.sem_conv = {i: j for i, j in pairwise(seg_generator_opt.sem_conv)}
# set stuff idx
base_opt.stuff_idx = [i for i in range(base_opt.num_semantics) if i not in base_opt.things_idx]
# set signature
base_opt.signature = signature
self.copy_options(seg_generator_opt, base_opt)
self.copy_options(img_generator_opt, base_opt)
self.copy_options(segmentor_opt, base_opt)
self.copy_options(extra_dataset_opt, base_opt, new_only=True)
# set num semantics
self.override_num_semantics(seg_generator_opt)
# set cond dim
self.set_cond_dim(seg_generator_opt)
# set seg size
self.set_seg_size(seg_generator_opt)
self.base_opt = base_opt
self.seg_generator_opt = seg_generator_opt if load_seg_generator else None
self.img_generator_opt = img_generator_opt if load_img_generator else None
self.segmentor_opt = segmentor_opt if load_segmentor else None
self.extra_dataset_opt = extra_dataset_opt if load_extra_dataset else None
self.opt = {"base": self.base_opt,
"seg_generator": self.seg_generator_opt,
"img_generator": self.img_generator_opt,
"segmentor": self.segmentor_opt,
"extra_dataset": self.extra_dataset_opt}
return self.opt
|
[
"16lemoing@gmail.com"
] |
16lemoing@gmail.com
|
9fda4a063ff5883911845494534b63b5d074e989
|
434ba39a150d53a08ac22b03b02a3772149c87d7
|
/cognitiveToken.py
|
292ed4f879a5c91cffad1012abdd56ead0e3f391
|
[] |
no_license
|
chris-geelhoed/TranslateComparison
|
1d0de3fff4177df6a91ed0eb5001c078fcb167cb
|
c8d0033b8b82859c5ab3f9ca3b2fed80e3a1a62d
|
refs/heads/master
| 2021-06-14T12:16:41.255722
| 2017-02-27T21:28:56
| 2017-02-27T21:28:56
| 83,352,696
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,084
|
py
|
import requests
import pickle
import time
def forceToken():
Subscription_Key = "YOUR SUBSCRIPTION KEY GOES HERE!"
url = "https://api.cognitive.microsoft.com/sts/v1.0/issueToken" ##endpoint
#set the sub key in the post header
headers = {
"Ocp-Apim-Subscription-Key": Subscription_Key
}
token = requests.post(url, headers=headers).text #get token from API
#prepare object to pickle
tokenObj = {
"time": time.time(),
"token": token
}
tokenFile = open("tokenObj.pickle", "wb") #pickle our data
pickle.dump(tokenObj, tokenFile)
tokenFile.close()
return token
def getToken():
try:
tokenFile = open("tokenObj.pickle", "rb")
tokenObj = pickle.load(tokenFile)
tokenFile.close()
timePassed = abs( time.time() - tokenObj["time"] )
##as long as token is less than 8 minutes old, go ahead and use it
if timePassed < 8 * 60:
return tokenObj["token"]
except:
pass
return forceToken() #otherwise make call to API for token
|
[
"chris.geelhoed@gmail.com"
] |
chris.geelhoed@gmail.com
|
0e673f86614d4c80ec7051408adc47ce97e50aa5
|
75e8e2cb626c51b33ef8fe9afdbdb2e657b5cbb8
|
/PREDEFINIDAS 2.py
|
6358e2bf4dfcd3febb5260afd6840e96fcb7b156
|
[] |
no_license
|
lgauing/Guias-dispositivas-y-ejercicios-en-clase_Lady
|
23f6fdab9955e3cc6b719af538e1f2b4c80b29d8
|
d2f8a66d762c4028ab9817aeb098e71e42281d1a
|
refs/heads/main
| 2023-06-06T05:26:49.369585
| 2021-06-27T17:17:42
| 2021-06-27T17:17:42
| 380,863,281
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
# Lady Mishell Gauin Gusñay
# 3er semestre de software A1
# funciones de cadenas
mensaje = 'Hola ' + 'mundo ' + 'Python’
men1=mensaje.split()
men2=' '.join(men1)
print(mensaje[0],mensaje[0:4],men1,men2)
print(mensaje.find("mundo"), mensaje.lower())
|
[
"85961525+lgauing@users.noreply.github.com"
] |
85961525+lgauing@users.noreply.github.com
|
0e7d07db7b2b932cebb3c06cd3a86a09652bb50a
|
8ae894191b3e411e9fd0155c7ac3b025664d5f17
|
/chapter02/knock16.py
|
1bbb382abbd19c38e122df598df233cc30baa89f
|
[] |
no_license
|
shirai-ryo/100knock2017
|
2589dd652c474e9c1e1cfc440c0b9d3156fbca71
|
01489f589f80167fe47a492d5296b394f543f821
|
refs/heads/master
| 2020-03-07T06:11:44.502714
| 2018-03-29T16:27:27
| 2018-03-29T16:27:27
| 127,315,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
f = open('hightemp.txt', 'r')
N = int(input())
count = 0
for line in f:
if count % N == 0:
print()
# N行を出力?したところで、空白の行を入れる
print(line)
count += 1
f.close()
|
[
"noreply@github.com"
] |
shirai-ryo.noreply@github.com
|
2f523bf95ffba1f04fc4fa165b6176d6fc5f4a81
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_5/mbyken001/question2.py
|
1d7d0e32e1a97140ba0c5c5b551cb774e63aec54
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,288
|
py
|
#Assignment 5-Question 2
#Vending machine
#Enter cost
cost = eval( input ( "Enter the cost (in cents):\n" ) )
if cost <=0:
print("")
else:
deposit= eval (input ( "Deposit a coin or note (in cents):\n" ) )
i = cost-deposit
while i>0:
extra= eval (input ( "Deposit a coin or note (in cents):\n" ) )
deposit = deposit + extra
if deposit >= cost:
break
change = deposit-cost
#Calculating change
dollar = change//100
quarter = ( change%100 ) // 25
dimes = ( ( change % 100 ) % 25) // 10
nickels = ( ( (change % 100 ) % 25) %10 ) //5
pennies = ( ( ( (change % 100) % 25) % 10) % 5)
#Displaying change:
if change == 0:
print("")
else:
print( "Your change is:" )
if dollar > 0:
print( dollar, "x $1" )
if quarter > 0:
print( quarter, "x 25c" )
if dimes > 0:
print( dimes, "x 10c" )
if nickels > 0:
print( nickels, "x 5c" )
if pennies > 0:
print( pennies, "x 1c")
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
cb568bf4ed3f69faaacfeeaba7b3796ae3a90ab1
|
3b7abb4b1f7da65dc401200b183b648200325026
|
/admin_service.py
|
fd9027e79061941133d862945dd0761938a5d066
|
[] |
no_license
|
votesappteam/rest-service-python
|
b44a2f4e3434de1c3841ace32fa359aa65df895c
|
bdb5a90a2a94fc92ec4386e448045a7f044a3e90
|
refs/heads/master
| 2023-04-11T08:53:29.348654
| 2021-04-26T13:46:48
| 2021-04-26T13:46:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,928
|
py
|
import configparser
import datetime
import math
import random
# import json
import re
import string
import flask
from flask import Flask, render_template, request, redirect, url_for, session, flash
# import pymysql
from flask_pymongo import PyMongo
from flask_sqlalchemy import SQLAlchemy
from source.mail_service import sendemail
app = Flask(__name__)
# Firebase related
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
# Read the config file
configFile = '/var/www/votesapp-rest/config.ini'
firebase_key = '/var/www/votesapp-rest/testvotes-d4cd7-firebase-adminsdk-oqlux-66b40b5463.json'
config = configparser.ConfigParser()
config.read(configFile)
# PySQL configurations
userpass = config['MYSQLDB']['USERPASS']
basedir = '127.0.0.1'
dbname = '/votesapp_db'
socket = config['MYSQLDB']['SOCKET']
dbname = dbname + socket
app.config['SECRET_KEY'] = config['SECURITY']['SECRET_KEY']
app.config['SQLALCHEMY_DATABASE_URI'] = userpass + basedir + dbname
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# Upload file
UPLOAD_FOLDER = '/var/www/votesapp-rest/nsfw-images'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
mongo_connect_str = 'mongodb://localhost:27017/'
mongo_db = 'votesapp_db'
# Mongo configuration
app.config['MONGO_DBNAME'] = 'votesapp_db'
app.config['MONGO_URI'] = mongo_connect_str + mongo_db
mongo = PyMongo(app)
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
session = {}
session['valid_email'] = False
session['abuseListToHTML'] =[]
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
# Use a service account
cred = credentials.Certificate(firebase_key)
firebase_admin.initialize_app(cred)
fire_db = firestore.client()
# https://firebase.google.com/docs/firestore/quickstart#python
db = SQLAlchemy(app)
class web_user(db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(150))
created_by = db.Column(db.String(50))
role = db.Column(db.String(15))
empid = db.Column(db.String(45))
active = db.Column(db.Boolean)
created_dt = db.Column(db.DateTime, default=datetime.datetime.utcnow())
class web_user_otp(db.Model):
id = db.Column(db.Integer, primary_key=True)
request_id = db.Column(db.Integer)
otp = db.Column(db.Integer)
requested_dt = db.Column(db.DateTime, default=datetime.datetime.utcnow())
requested_by = db.Column(db.String(45))
otp_used = db.Column(db.Boolean)
otp_expired = db.Column(db.Boolean)
class new_brand_requests(db.Model):
id = db.Column(db.Integer, primary_key=True)
brandtype = db.Column(db.String(45))
branddescription = db.Column(db.String(300))
brandname = db.Column(db.String(70))
brandcategory = db.Column(db.String(45))
brandemail = db.Column(db.String(150))
brandwebpage = db.Column(db.String(250))
active = db.Column(db.Boolean)
brand_id = db.Column(db.String(45))
claimed = db.Column(db.Boolean)
posted_by_dt = db.Column(db.DateTime, default=datetime.datetime.utcnow())
status_change_dt = db.Column(db.DateTime, default=datetime.datetime.utcnow())
posted_by_user = db.Column(db.String(300))
decision = db.Column(db.String(15))
decision_reason = db.Column(db.String(100))
modified_by = db.Column(db.String(155))
def generate_otp():
# Declare a digits variable
# which stores all digits
digits = "0123456789"
OTP = ""
# length of password can be chaged
# by changing value in range
for i in range(8):
OTP += digits[math.floor(random.random() * 10)]
return OTP
def generate_request_id():
ri = ''.join(random.choices(string.ascii_letters + string.digits, k=16))
return ri
# Reference --> https://codeshack.io/login-system-python-flask-mysql/
@app.route('/', methods=['GET', 'POST'])
def login():
ip_address = flask.request.remote_addr
print(ip_address)
error = None
# Output message if something goes wrong...
msg = ''
# Check if "username" and "password" POST requests exist (user submitted form)
if request.method == 'POST' and 'email' in request.form:
# Create variables for easy access
email = request.form['email']
print(email)
# Check if account exists using MySQL
# email = username+"@votesapp.co.in"
if email:
active = web_user.query.filter(web_user.email == email).first()
if active:
if active.active == False:
return 'Account is not active'
# Create session data, we can access this data in other routes
session['id'] = active.id
session['email'] = active.email
session['valid_email'] = True
otp = int(generate_otp())
request_id = generate_request_id()
requested_by = session['email']
otp_request = web_user_otp(otp=otp, request_id=request_id, requested_by=requested_by, otp_used=False,
otp_expired=False)
db.session.add(otp_request)
db.session.commit()
sendemail(requested_by, "Your OTP for login", str(otp))
# Redirect to home page
flash('Moving to OTP auth')
return redirect(url_for('otp_verify'))
else:
session['valid_email'] = False
print("Email not found or not active")
msg = 'Email not found or not active'
else:
# Account doesnt exist or username/password incorrect
session['valid_email'] = False
print("Invalid email")
msg = 'Invalid email'
# Show the login form with message (if any)
return render_template('index.html', msg=msg)
@app.route('/otp', methods=['GET', 'POST'])
def otp_verify():
# To check whether an email validated
print(session['valid_email'])
if session['valid_email'] == False:
print("Email not validated")
return redirect(url_for('login'))
# Output message if something goes wrong...
msg = ''
# Check if "username" and "password" POST requests exist (user submitted form)
if request.method == 'POST' and 'otp_input' in request.form:
print("Inside OTP Form")
# Create variables for easy access
otp_web = request.form['otp_input']
if 'cancel' in request.form:
return redirect(url_for('login'))
if 'verify' in request.form:
print("Verify clicked")
print(otp_web)
# Check if account exists using MySQL
# email = username+"@votesapp.co.in"
otp = web_user_otp.query.filter(web_user_otp.otp == otp_web).first()
print(otp)
if otp:
if otp.otp_used == True or otp.otp_expired == True:
msg = 'OTP is expired'
return render_template('otp_verify.html', msg=msg)
# Create session data, we can access this data in other routes
session['loggedin'] = True
otp.otp_used = True
otp.otp_expired = True
db.session.commit()
# Redirect to home page
return redirect(url_for('home'))
else:
# Account doesnt exist or username/password incorrect
msg = 'Invalid OTP'
# Show the login form with message (if any)
return render_template('otp_verify.html', msg=msg)
# http://localhost:5000/python/logout - this will be the logout page
@app.route('/admin/logout')
def logout():
# Remove session data, this will log the user out
session.clear()
#response.headers["Cache-Control"] = "no-store, no-cache, must-revalidate, post-check=0, pre-check=0"
session['valid_email'] = False
session['loggedin'] = False
session.pop('loggedin', None)
session['abuseListToHTML'] = []
session.pop('id', None)
session.pop('email', None)
# Redirect to login page
return redirect(url_for('login'))
# http://localhost:5000/pythinlogin/register - this will be the registration page, we need to use both GET and POST requests
@app.route('/register', methods=['GET', 'POST'])
def register():
# Output message if something goes wrong...
msg = ''
print("Inside register")
# Check if "username", "password" and "email" POST requests exist (user submitted form)
# if request.method == 'POST' and 'username' in request.form and 'password' in request.form and 'email' in request.form:
if request.method == 'POST':
# Create variables for easy access
username = request.form['username']
password = request.form['password']
role = request.form['role']
print(username, password, role)
email = username + "@votesapp.co.in"
account = web_user.query.filter(web_user.email == email).first()
# If account exists show error and validation checks
if account:
msg = 'Account already exists!'
elif not re.match(r'[^@]+@[^@]+\.[^@]+', email):
msg = 'Invalid email address!'
elif not re.match(r'[A-Za-z0-9]+', username):
msg = 'Username must contain only characters and numbers!'
elif not username or not password or not role:
msg = 'Please fill out the form and submit!'
else:
# Account doesnt exists and the form data is valid, now insert new account into accounts table
register_user = web_user(email=email, password=password, role=role, active=False)
db.session.add(register_user)
db.session.commit()
msg = 'You have successfully registered!'
# elif request.method == 'POST':
# Form is empty... (no POST data)
# msg = 'Please fill out the form and submit!'
# Show registration form with message (if any)
return render_template('register.html', msg=msg)
# http://localhost:5000/pythinlogin/home - this will be the home page, only accessible for loggedin users
@app.route('/branactivities')
def home():
# Check if user is loggedin
if 'loggedin' in session:
# User is loggedin show them the home page
brands = new_brand_requests.query.filter(new_brand_requests.decision != 'approved',
new_brand_requests.active == False)
return render_template('Brand_approval_activities.html', brands=brands, username=session['email'])
# User is not loggedin redirect to login page
return redirect(url_for('login'))
@app.route('/abuseactivities')
def abuse():
# Check if user is loggedin
if 'loggedin' in session:
#At first time we fetch top 10 abuse records, for every back and forth, we should not go to firebase and fetch again and again.. So we action on all items in the list then on ly go to firebase
if len(session['abuseListToHTML']) > 0 :
return render_template('Abuse_questions_activities.html', questions=session['abuseListToHTML'], username=session['email'])
from google.cloud import storage
from google.cloud.storage import Blob
import datetime
storage_client = storage.Client.from_service_account_json(firebase_key)
bucket = storage_client.get_bucket("testvotes-d4cd7.appspot.com")
# User is loggedin show them the home page
abuse_ref = fire_db.collection(u'questions')
#query = abuse_ref.where(u'active', u'==', True)
query = abuse_ref.limit(10).where(u'active', u'==', True).where(u'reportabuse', u'>', 1).where(u'abuse_verified', u'==', False).order_by(u'reportabuse', direction=firestore.Query.DESCENDING).order_by(u'upvote').stream()
#results = query.stream()
for q in query:
qdict = q.to_dict()
qdict["qid"] = q.id #add the document id along with other data
#print(qdict['reportabuse'])
fname="questions/" + qdict['category']+"/" + q.id + ".jpg"
stats = storage.Blob(bucket=bucket, name=fname).exists(storage_client)
if stats:
blob = bucket.blob(fname)
image_signed_url = blob.generate_signed_url(datetime.timedelta(seconds=300), method='GET')
else:
image_signed_url="static/img/temp-image/no-image.jpeg"
qdict["image_signed_url"] = image_signed_url
print(image_signed_url)
session['abuseListToHTML'].append(qdict.copy())
return render_template('Abuse_questions_activities.html', questions=session['abuseListToHTML'], username=session['email'])
# User is not loggedin redirect to login page
return redirect(url_for('login'))
@app.route('/branactivities-view', methods=['GET', 'POST'])
def expand_brand():
if 'loggedin' in session:
selected_row = request.args.get('row_id')
brand_id = request.args.get('brand_id')
brandname = request.args.get('brandname')
branddescription = request.args.get('branddescription')
brandcategory = request.args.get('brandcategory')
brandtype = request.args.get('brandtype')
posted_by_user = request.args.get('posted_by_user')
brandemail = request.args.get('brandemail')
brandweb = request.args.get('brandweb')
print(brandname)
if request.method == 'POST' and 'decision' in request.form:
decision = request.form['decision']
brand_ref = fire_db.collection(u'brands').document(brand_id)
if 'reject' in request.form:
# return redirect(url_for('login'))
print("Reject clicked")
update_data = {
u'active': False,
u'status_change_dt': firestore.SERVER_TIMESTAMP,
u'decision': "rejected",
u'decision_reason': decision
}
brand_ref.update(update_data)
rbrand = new_brand_requests.query.filter_by(brand_id=brand_id).first()
# if not rbrand:
# return jsonify({'message': 'No pulse found to update!'}), 204
rbrand.active = False
rbrand.decision = "rejected"
rbrand.decision_reason = decision
rbrand.status_change_dt = datetime.datetime.utcnow()
rbrand.modified_by = session['email']
db.session.commit()
return redirect(url_for('home'))
if 'approve' in request.form:
print("Approve clicked")
update_data = {
u'active': True,
u'status_change_dt': firestore.SERVER_TIMESTAMP,
u'decision': "approved",
u'decision_reason': decision
}
brand_ref.update(update_data)
rbrand = new_brand_requests.query.filter_by(brand_id=brand_id).first()
# if not rbrand:
# return jsonify({'message': 'No pulse found to update!'}), 204
rbrand.active = True
rbrand.decision = "approved"
rbrand.decision_reason = decision
rbrand.status_change_dt = datetime.datetime.utcnow()
rbrand.modified_by = session['email']
db.session.commit()
return redirect(url_for('home'))
print(decision)
return render_template('expand_brand.html', selected_row=selected_row, brand_id=brand_id, brandname=brandname,
branddescription=branddescription, brandcategory=brandcategory, brandtype=brandtype,
posted_by_user=posted_by_user, brandemail=brandemail, brandweb=brandweb)
@app.route('/abuseactivities-view', methods=['GET', 'POST'])
def expand_abuse():
if 'loggedin' in session:
qid = request.args.get('qid')
user_id = request.args.get('user_id')
question = request.args.get('question')
category = request.args.get('category')
totalvote = request.args.get('totalvote')
upvote = request.args.get('upvote')
reportabuse = request.args.get('reportabuse')
status = request.args.get('active')
question_type = request.args.get('question_type')
image_signed_url = request.args.get('image_signed_url')
if request.method == 'POST' and 'decision' in request.form:
decision = request.form['decision']
#Remove the item which is actioned from the screen
session['abuseListToHTML'] = [item for item in session['abuseListToHTML'] if item['qid'] == qid]
abuse_ref = fire_db.collection(u'questions').document(qid)
if 'reject' in request.form:
# return redirect(url_for('login'))
print("Inactive clicked")
update_data = {
u'active': False,
u'active_change_dt': firestore.SERVER_TIMESTAMP,
u'abuse_verified':True,
u'active_change_madeby': session['email'],
u'inactive_reason': decision
}
abuse_ref.update(update_data)
return redirect(url_for('home'))
if 'reject-user' in request.form:
# return redirect(url_for('login'))
print("Inactive user clicked")
#Disable the question
update_data = {
u'active': False,
u'active_change_dt': firestore.SERVER_TIMESTAMP,
u'abuse_verified': True,
u'active_change_madeby': session['email'],
u'inactive_reason': decision
}
abuse_ref.update(update_data)
#Stop the user to post the question
user_ref = fire_db.collection(u'users').document(user_id)
update_user_data = {
u'canCreatePolls': False
}
user_ref.update(update_user_data)
return redirect(url_for('home'))
if 'approve' in request.form:
print("Approve clicked")
update_data = {
u'active': True,
u'active_change_dt': firestore.SERVER_TIMESTAMP,
u'abuse_verified': True,
u'active_change_madeby': session['email'],
u'inactive_reason': decision
}
abuse_ref.update(update_data)
#return redirect(url_for('home'))
return render_template('Abuse_questions_activities.html', questions=session['abuseListToHTML'], username=session['email'])
print(decision)
return render_template('expand_abuse.html', qid = qid, user_id = user_id,question = question,category = category, totalvote = totalvote,upvote = upvote,reportabuse = reportabuse,status = status, question_type = question_type, image_signed_url=image_signed_url)
# http://localhost:5000/pythinlogin/profile - this will be the profile page, only accessible for loggedin users
@app.route('/admin/profile')
def profile():
# Check if user is loggedin
if 'loggedin' in session:
# We need all the account info for the user so we can display it on the profile page
account = web_user.query.filter(web_user.email == session['username']).first()
# Show the profile page with account info
return render_template('profile.html', account=account)
# User is not loggedin redirect to login page
return redirect(url_for('login'))
if __name__ == '__main__':
debug = bool(config['DEBUG']['DEBUG'])
session_id = -1
session_email = ''
app.run(debug=debug)
|
[
"61419742+votesappteam@users.noreply.github.com"
] |
61419742+votesappteam@users.noreply.github.com
|
12d808094d51053e5934fb28f0dcbbaef96b7e43
|
76f17df8dfeaff39bb23cdfcf55cb5b8018c8814
|
/ProxySource/proxyNova.py
|
b5da2ca9bd28faf3d84d3c602b499b346f7dadf8
|
[] |
no_license
|
samsonllam/proxy-utility
|
8c3d079f7f2f6cb21f62e59923a2069602cd3cfe
|
2356a0a83dcc26f8f7bf77d5db3c52a1684140ce
|
refs/heads/master
| 2020-12-01T06:33:41.250414
| 2019-12-28T09:55:29
| 2019-12-28T09:55:29
| 230,576,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,203
|
py
|
import time
import js2py
import loguru
import pyquery
import requests
def getProxiesFromProxyNova():
proxies = []
# 按照網站規則使用各國代碼傳入網址取得各國 IP 代理
countries = [
'tw',
'jp',
'kr',
'id',
'my',
'th',
'vn',
'ph',
'hk',
'uk',
'us'
]
for country in countries:
url = f'https://www.proxynova.com/proxy-server-list/country-{country}/'
loguru.logger.debug(f'getProxiesFromProxyNova: {url}')
loguru.logger.warning(f'getProxiesFromProxyNova: downloading...')
response = requests.get(url)
if response.status_code != 200:
loguru.logger.debug(f'getProxiesFromProxyNova: status code is not 200')
continue
loguru.logger.success(f'getProxiesFromProxyNova: downloaded.')
d = pyquery.PyQuery(response.text)
table = d('table#tbl_proxy_list')
rows = list(table('tbody:first > tr').items())
loguru.logger.warning(f'getProxiesFromProxyNova: scanning...')
for row in rows:
tds = list(row('td').items())
# 若為分隔行則僅有 1 格
if len(tds) == 1:
continue
# 取出 IP 欄位內的 JavaScript 程式碼
js = row('td:nth-child(1) > abbr').text()
# 去除 JavaScript 程式碼開頭的 document.write( 字串與結尾的 ); 字串,
# 再與可供 js2py 執行後回傳指定變數的 JavaScript 程式碼相結合
js = 'let x = %s; x' % (js[15:-2])
# 透過 js2py 執行取得還原後的 IP
ip = js2py.eval_js(js).strip()
# 取出 Port 欄位值
port = row('td:nth-child(2)').text().strip()
# 組合 IP 代理
proxy = f'{ip}:{port}'
proxies.append(proxy)
loguru.logger.success(f'getProxiesFromProxyNova: scanned.')
loguru.logger.debug(f'getProxiesFromProxyNova: {len(proxies)} proxies is found.')
# 每取得一個國家代理清單就休息一秒,避免頻繁存取導致代理清單網站封鎖
time.sleep(1)
return proxies
|
[
"fongpuilam@gmail.com"
] |
fongpuilam@gmail.com
|
3ae17f389f2cbbb7ab8a27b1621e1718c1ae8e09
|
6319ca627fdb1c89f19b08053d418db9825a539b
|
/Load_semester.py
|
0b12535718c2a5b8c85db8160a8229a728c9df5f
|
[] |
no_license
|
williamrtam/PennCourseReview2.0
|
fb5b07aafd908d589f80f9f2957766c5c565458c
|
7a16b61c4a324654e80bd00fbeb1b0c7e304bed6
|
refs/heads/master
| 2022-12-11T08:20:27.474123
| 2020-10-22T00:25:03
| 2020-10-22T00:25:03
| 124,487,091
| 0
| 0
| null | 2022-12-07T03:58:48
| 2018-03-09T04:20:34
|
HTML
|
UTF-8
|
Python
| false
| false
| 692
|
py
|
import requests
import json
import re
import sys
import os
import db_functions as db
from APIRequest import get_request_pcr, print_components, set_http_pcr
conn = db.get_connection()
print("Currently in db:")
db.doQuery(conn, "select * from Semester;")
http_str = set_http_pcr("semesters")
json_obj = get_request_pcr(http_str)
sem_list = json_obj["result"]["values"]
for item in sem_list:
# need to use tripple quotes because some names have ' in them
# leave school as null because we dont know if we can get that
query_str = """INSERT INTO Semester (semid,name) VALUES ("{0}","{1}");""".format(item["id"],item["name"])
print(query_str)
db.doQuery(conn,query_str)
db.close(conn)
|
[
"willtam@seas.upenn.edu"
] |
willtam@seas.upenn.edu
|
cf95ea73cfd5113b7ece32fa64c811f159a1b5fe
|
e6c0683afc2a3d48ada10ffa9f7d257e7c64589e
|
/purity_fb/purity_fb_1dot6/models/directory_service.py
|
203e2febaf5c82ca52a01c9e8de1d4fb3f665632
|
[
"Apache-2.0"
] |
permissive
|
unixtreme/purity_fb_python_client
|
9a5a0375f4505421974aadc674ed04982c2bf84f
|
e836afe9804ffa99f74bf4b5202f181c3c04d9df
|
refs/heads/master
| 2020-04-24T14:53:56.977344
| 2019-02-22T12:37:45
| 2019-02-22T12:37:45
| 172,042,713
| 0
| 0
|
NOASSERTION
| 2019-02-22T10:05:44
| 2019-02-22T10:05:44
| null |
UTF-8
|
Python
| false
| false
| 8,415
|
py
|
# coding: utf-8
"""
Purity//FB REST Client
Client for Purity//FB REST API (1.0 - 1.6), developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.6
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class DirectoryService(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'base_dn': 'str',
'bind_password': 'str',
'bind_user': 'str',
'enabled': 'bool',
'services': 'list[str]',
'uris': 'list[str]',
'smb': 'DirectoryserviceSmb'
}
attribute_map = {
'name': 'name',
'base_dn': 'base_dn',
'bind_password': 'bind_password',
'bind_user': 'bind_user',
'enabled': 'enabled',
'services': 'services',
'uris': 'uris',
'smb': 'smb'
}
def __init__(self, name=None, base_dn=None, bind_password=None, bind_user=None, enabled=None, services=None, uris=None, smb=None):
"""
DirectoryService - a model defined in Swagger
"""
self._name = None
self._base_dn = None
self._bind_password = None
self._bind_user = None
self._enabled = None
self._services = None
self._uris = None
self._smb = None
if name is not None:
self.name = name
if base_dn is not None:
self.base_dn = base_dn
if bind_password is not None:
self.bind_password = bind_password
if bind_user is not None:
self.bind_user = bind_user
if enabled is not None:
self.enabled = enabled
if services is not None:
self.services = services
if uris is not None:
self.uris = uris
if smb is not None:
self.smb = smb
@property
def name(self):
"""
Gets the name of this DirectoryService.
name of the object (e.g., a file system or snapshot)
:return: The name of this DirectoryService.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this DirectoryService.
name of the object (e.g., a file system or snapshot)
:param name: The name of this DirectoryService.
:type: str
"""
self._name = name
@property
def base_dn(self):
"""
Gets the base_dn of this DirectoryService.
Base of the Distinguished Name (DN) of the directory service groups.
:return: The base_dn of this DirectoryService.
:rtype: str
"""
return self._base_dn
@base_dn.setter
def base_dn(self, base_dn):
"""
Sets the base_dn of this DirectoryService.
Base of the Distinguished Name (DN) of the directory service groups.
:param base_dn: The base_dn of this DirectoryService.
:type: str
"""
self._base_dn = base_dn
@property
def bind_password(self):
"""
Gets the bind_password of this DirectoryService.
Obfuscated password used to query the directory.
:return: The bind_password of this DirectoryService.
:rtype: str
"""
return self._bind_password
@bind_password.setter
def bind_password(self, bind_password):
"""
Sets the bind_password of this DirectoryService.
Obfuscated password used to query the directory.
:param bind_password: The bind_password of this DirectoryService.
:type: str
"""
self._bind_password = bind_password
@property
def bind_user(self):
"""
Gets the bind_user of this DirectoryService.
Username used to query the directory.
:return: The bind_user of this DirectoryService.
:rtype: str
"""
return self._bind_user
@bind_user.setter
def bind_user(self, bind_user):
"""
Sets the bind_user of this DirectoryService.
Username used to query the directory.
:param bind_user: The bind_user of this DirectoryService.
:type: str
"""
self._bind_user = bind_user
@property
def enabled(self):
"""
Gets the enabled of this DirectoryService.
Is the directory service enabled or not?
:return: The enabled of this DirectoryService.
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""
Sets the enabled of this DirectoryService.
Is the directory service enabled or not?
:param enabled: The enabled of this DirectoryService.
:type: bool
"""
self._enabled = enabled
@property
def services(self):
"""
Gets the services of this DirectoryService.
Services that the directory service configuration is used for.
:return: The services of this DirectoryService.
:rtype: list[str]
"""
return self._services
@services.setter
def services(self, services):
"""
Sets the services of this DirectoryService.
Services that the directory service configuration is used for.
:param services: The services of this DirectoryService.
:type: list[str]
"""
self._services = services
@property
def uris(self):
"""
Gets the uris of this DirectoryService.
List of URIs for the configured directory servers.
:return: The uris of this DirectoryService.
:rtype: list[str]
"""
return self._uris
@uris.setter
def uris(self, uris):
"""
Sets the uris of this DirectoryService.
List of URIs for the configured directory servers.
:param uris: The uris of this DirectoryService.
:type: list[str]
"""
self._uris = uris
@property
def smb(self):
"""
Gets the smb of this DirectoryService.
:return: The smb of this DirectoryService.
:rtype: DirectoryserviceSmb
"""
return self._smb
@smb.setter
def smb(self, smb):
"""
Sets the smb of this DirectoryService.
:param smb: The smb of this DirectoryService.
:type: DirectoryserviceSmb
"""
self._smb = smb
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, DirectoryService):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"azaria.zornberg@purestorage.com"
] |
azaria.zornberg@purestorage.com
|
2d20b51302c61b50e4c36ad33f8fa93167553059
|
c1fe1f9b3b790370f35df4b2be46435e4e4c0686
|
/newrelic/api/cat_header_mixin.py
|
0db5d0fc18106bc68e07fcffcb4a7324c007954c
|
[
"Apache-2.0"
] |
permissive
|
BillSchumacher/newrelic-python-agent
|
0358365ba547e7b2ec383e9e8118a8f353005ab7
|
79f3da46fe3fdbe68beb3cb9511d6b291f0ebc8e
|
refs/heads/main
| 2022-11-16T02:53:33.877322
| 2020-07-05T03:20:17
| 2020-07-05T03:20:17
| 277,208,609
| 0
| 0
|
Apache-2.0
| 2020-07-05T01:00:17
| 2020-07-05T01:00:13
| null |
UTF-8
|
Python
| false
| false
| 4,266
|
py
|
# Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from newrelic.common.encoding_utils import (obfuscate, deobfuscate,
json_encode, json_decode, base64_encode, base64_decode)
# CatHeaderMixin assumes the mixin class also inherits from TimeTrace
class CatHeaderMixin(object):
cat_id_key = 'X-NewRelic-ID'
cat_transaction_key = 'X-NewRelic-Transaction'
cat_appdata_key = 'X-NewRelic-App-Data'
cat_synthetics_key = 'X-NewRelic-Synthetics'
cat_metadata_key = 'x-newrelic-trace'
cat_distributed_trace_key = 'newrelic'
settings = None
def __enter__(self):
result = super(CatHeaderMixin, self).__enter__()
if result is self and self.transaction:
self.settings = self.transaction.settings or None
return result
def process_response_headers(self, response_headers):
"""
Decode the response headers and create appropriate metrics based on the
header values. The response_headers are passed in as a list of tuples.
[(HEADER_NAME0, HEADER_VALUE0), (HEADER_NAME1, HEADER_VALUE1)]
"""
settings = self.settings
if not settings:
return
if not settings.cross_application_tracer.enabled:
return
appdata = None
try:
for k, v in response_headers:
if k.upper() == self.cat_appdata_key.upper():
appdata = json_decode(deobfuscate(v,
settings.encoding_key))
break
if appdata:
self.params['cross_process_id'] = appdata[0]
self.params['external_txn_name'] = appdata[1]
self.params['transaction_guid'] = appdata[5]
except Exception:
pass
def process_response_metadata(self, cat_linking_value):
payload = base64_decode(cat_linking_value)
nr_headers = json_decode(payload)
self.process_response_headers(nr_headers.items())
@classmethod
def generate_request_headers(cls, transaction):
"""
Return a list of NewRelic specific headers as tuples
[(HEADER_NAME0, HEADER_VALUE0), (HEADER_NAME1, HEADER_VALUE1)]
"""
if transaction is None:
return []
settings = transaction.settings
nr_headers = []
if settings.distributed_tracing.enabled:
transaction.insert_distributed_trace_headers(nr_headers)
elif settings.cross_application_tracer.enabled:
transaction.is_part_of_cat = True
encoded_cross_process_id = obfuscate(settings.cross_process_id,
settings.encoding_key)
nr_headers.append((cls.cat_id_key, encoded_cross_process_id))
transaction_data = [transaction.guid, transaction.record_tt,
transaction.trip_id, transaction.path_hash]
encoded_transaction = obfuscate(json_encode(transaction_data),
settings.encoding_key)
nr_headers.append(
(cls.cat_transaction_key, encoded_transaction))
if transaction.synthetics_header:
nr_headers.append(
(cls.cat_synthetics_key, transaction.synthetics_header))
return nr_headers
@staticmethod
def _convert_to_cat_metadata_value(nr_headers):
payload = json_encode(nr_headers)
cat_linking_value = base64_encode(payload)
return cat_linking_value
@classmethod
def get_request_metadata(cls, transaction):
nr_headers = dict(cls.generate_request_headers(transaction))
if not nr_headers:
return None
return cls._convert_to_cat_metadata_value(nr_headers)
|
[
"opensource@newrelic.com"
] |
opensource@newrelic.com
|
542a2dfc44395e85c1f87848f139eaca5f2bcba8
|
f9fcde08ff87b2d740c9b8e0ec61e66f08791427
|
/apps/neplat/walker.py
|
2f4231392b202bfc4c63e753b32a7890f5dc4e14
|
[
"BSD-2-Clause"
] |
permissive
|
amirgeva/py2d
|
c0187cb44d8371a247b87a17c30560a4aa1dfb6a
|
0557941989a4eb6045bc00e3b3ac5192eb464936
|
refs/heads/master
| 2022-12-17T20:49:23.043521
| 2022-11-26T07:48:43
| 2022-11-26T07:48:43
| 63,190,978
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,363
|
py
|
import sys
from engine import *
from engine.utils import vector2
class Walker(Entity):
def __init__(self,spr_filename):
super().__init__(load_file(spr_filename))
self.set_acceleration(0, 200)
self.onground=0
def collision(self,other,col_pt):
y=col_pt.y
h=self.anim.get_current_height()
v=self.get_velocity()
if y>(h-8) and v.y>0:
self.set_position(self._position.x, self._previous_position.y - (h - 1 - y))
self.set_velocity(self._velocity.x, 0.0)
self.onground=3
def get_external_force(self):
v=self.get_velocity().x
if self.onground>0:
return vector2(-2.5*v,0)
return vector2(-0.1*v,0)
def advance(self,dt):
self.onground-=1
acc=self.get_acceleration()
v=self.get_velocity()
# if self.onground>0:
# acc.x-=v.x**2
self.set_acceleration(acc.x, acc.y)
if v.x>10:
self.anim.set_active_sequence('right')
elif v.x<-10:
self.anim.set_active_sequence('left')
elif self.anim.get_active_sequence_name()[0]!='s':
s='sleft' if v.x<0 else 'sright'
self.anim.set_active_sequence(s)
if abs(acc.x)<20:
self.set_velocity(0.0,v.y)
return super().advance(dt)
|
[
"amirgeva@gmail.com"
] |
amirgeva@gmail.com
|
d9a26c20ab0c90ff0de2ec461a88699266fde70a
|
706d5ff4707793a225f41c469f19a4f1891078da
|
/ui/web.py
|
09e66ea9aaf1e3b9ff5472bb548cf4649962bb4e
|
[] |
no_license
|
dcc668/PyDemo1.2
|
eb5f13a19343e4d9d82fdd7c54f6f45622c5c00e
|
f883ca1d9bc04673beb9b40d889da74d2aaa5095
|
refs/heads/master
| 2020-04-15T02:23:16.000157
| 2019-01-30T23:57:41
| 2019-01-30T23:57:41
| 164,312,703
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,073
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'web.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(497, 279)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(180, 10, 151, 61))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label.setBaseSize(QtCore.QSize(20, 28))
font = QtGui.QFont()
font.setFamily("Adobe 宋体 Std L")
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setAcceptDrops(False)
self.label.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label.setStyleSheet("")
self.label.setTextFormat(QtCore.Qt.PlainText)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox.setGeometry(QtCore.QRect(30, 70, 461, 171))
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.textEdit = QtWidgets.QTextEdit(self.groupBox)
self.textEdit.setGeometry(QtCore.QRect(130, 30, 301, 31))
self.textEdit.setObjectName("textEdit")
self.label_2 = QtWidgets.QLabel(self.groupBox)
self.label_2.setGeometry(QtCore.QRect(40, 40, 91, 16))
font = QtGui.QFont()
font.setPointSize(15)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.textEdit_2 = QtWidgets.QTextEdit(self.groupBox)
self.textEdit_2.setGeometry(QtCore.QRect(130, 80, 191, 31))
self.textEdit_2.setAutoFillBackground(False)
self.textEdit_2.setReadOnly(True)
self.textEdit_2.setObjectName("textEdit_2")
self.btn_select = QtWidgets.QPushButton(self.groupBox)
self.btn_select.setGeometry(QtCore.QRect(340, 80, 91, 31))
self.btn_select.setObjectName("btn_select")
self.start = QtWidgets.QPushButton(self.groupBox)
self.start.setGeometry(QtCore.QRect(180, 130, 91, 31))
self.start.setObjectName("start")
self.label_finish = QtWidgets.QLabel(self.centralwidget)
self.label_finish.setGeometry(QtCore.QRect(370, 240, 91, 16))
font = QtGui.QFont()
font.setPointSize(15)
self.label_finish.setFont(font)
self.label_finish.setText("")
self.label_finish.setObjectName("label_finish")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 497, 23))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "网页离线器"))
self.label_2.setText(_translate("MainWindow", "输入网址:"))
self.btn_select.setText(_translate("MainWindow", "选择"))
self.start.setText(_translate("MainWindow", "开始"))
|
[
"1187053696@qq.com"
] |
1187053696@qq.com
|
a33859d98febcffb4f19ebb946c04f04181cf303
|
1af209a307230f9f5470f687d23cfe87741b9023
|
/nblog1/migrations/0001_initial.py
|
540fe00ce541555c422561b47b99da02b03b336e
|
[
"MIT"
] |
permissive
|
naritotakizawa/django-narito-blog1
|
67290a4961a9b7b0ba1b72d1163316a4d2a2bd05
|
ea133bbc043cd72215847088323a895fe0b13758
|
refs/heads/master
| 2021-07-10T09:52:15.330729
| 2020-12-12T06:11:41
| 2020-12-12T06:11:41
| 221,838,129
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,260
|
py
|
# Generated by Django 2.2.7 on 2019-11-15 03:05
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='名無し', max_length=255, verbose_name='名前')),
('text', models.TextField(verbose_name='本文')),
('email', models.EmailField(blank=True, help_text='入力しておくと、返信があった際に通知します。コメント欄には表示されません。', max_length=254, verbose_name='メールアドレス')),
('created_at', models.DateTimeField(default=django.utils.timezone.now, verbose_name='作成日')),
],
),
migrations.CreateModel(
name='EmailPush',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mail', models.EmailField(max_length=254, unique=True, verbose_name='メールアドレス')),
('is_active', models.BooleanField(default=False, verbose_name='有効フラグ')),
],
),
migrations.CreateModel(
name='LinePush',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.CharField(max_length=100, unique=True, verbose_name='ユーザーID')),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True, verbose_name='タグ名')),
],
),
migrations.CreateModel(
name='Reply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='名無し', max_length=255, verbose_name='名前')),
('text', models.TextField(verbose_name='本文')),
('created_at', models.DateTimeField(default=django.utils.timezone.now, verbose_name='作成日')),
('target', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='nblog1.Comment', verbose_name='対象コメント')),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=32, verbose_name='タイトル')),
('text', models.TextField(verbose_name='本文')),
('is_public', models.BooleanField(default=True, verbose_name='公開可能か?')),
('description', models.TextField(max_length=130, verbose_name='記事の説明')),
('keywords', models.CharField(default='記事のキーワード', max_length=255, verbose_name='記事のキーワード')),
('created_at', models.DateTimeField(default=django.utils.timezone.now, verbose_name='作成日')),
('updated_at', models.DateTimeField(default=django.utils.timezone.now, verbose_name='更新日')),
('relation_posts', models.ManyToManyField(blank=True, related_name='_post_relation_posts_+', to='nblog1.Post', verbose_name='関連記事')),
('tags', models.ManyToManyField(blank=True, to='nblog1.Tag', verbose_name='タグ')),
],
),
migrations.AddField(
model_name='comment',
name='target',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='nblog1.Post', verbose_name='対象記事'),
),
]
|
[
"toritoritorina@gmail.com"
] |
toritoritorina@gmail.com
|
3fa8421db6c67838cb4fd0331fdf9481e8482882
|
c4ea8fdf03c03b7c9a6361bd04e1409016b35354
|
/script.py
|
ae9f4e9ea148294a9679aae2f419e68553b8fcb2
|
[] |
no_license
|
SoloLeveling94/Project_2
|
3ea56ae7c22b810a59825fb79418fc26b8b6eaa2
|
b10b7c5bc02576bc0af9f604ed21b8519f019259
|
refs/heads/main
| 2023-06-04T07:50:00.910465
| 2021-06-16T09:10:58
| 2021-06-16T09:10:58
| 374,657,841
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,478
|
py
|
import requests
from bs4 import BeautifulSoup
#URL = 'http://books.toscrape.com/'
url = 'http://books.toscrape.com/catalogue/its-only-the-himalayas_981/index.html'
response = requests.get(url)
data = response.content
soup = BeautifulSoup(data, 'html.parser')
results = soup.find(id='default')
info_book = []
dict_info_book = {}
# Affichage lisible html
# print(results.prettify())
product_page_url = url
info_book.append(product_page_url)
universal_product_code = results.find(
'th', text='UPC').find_next_sibling('td').text
info_book.append(universal_product_code)
title = results.find('h1').text
info_book.append(title)
price_including_tax = results.find(
'th', text='Price (incl. tax)').find_next_sibling('td').text
info_book.append(price_including_tax)
price_excluding_tax = results.find(
'th', text='Price (excl. tax)').find_next_sibling('td').text
info_book.append(price_excluding_tax)
number_search = results.find(
'th', text='Availability').find_next_sibling('td').text
number_treatment = number_search.rsplit("(")
#print("1 " , number_treatment)
number_treatment = number_treatment[1].split()
#print("2 " , number_treatment)
for number in number_treatment:
if number.isdigit():
number_available = number
info_book.append(number)
product_description = (results.find(
'div', id='product_description').find_next_sibling('p').text)
info_book.append(product_description)
category = results.find('ul', class_='breadcrumb')
category_split = list(category.stripped_strings)
# print(category_split)
category_book = category_split[2]
info_book.append(category_book)
# Recupere la valeur classe star-rating sans les childs
review_rating = results.find('p', class_='star-rating')['class']
review_rating = review_rating[1]
# print(review_rating)
info_book.append(review_rating)
img_url = results.find('div', {'class': 'item active'}).find_next("img")
img_url = img_url.get('src')
info_book.append(img_url)
dict_info_book = {
'product_page_url': product_page_url,
'universal_product_code': universal_product_code,
'title': title,
'price_including_tax': price_including_tax,
'price_excluding_tax': price_excluding_tax,
'number_available': number_available,
'product_description': product_description,
'category': category_book,
'review_rating': review_rating,
'image_url': 'http://books.toscrape.com/' + img_url[6:]
}
for key in dict_info_book:
print(key, '->', dict_info_book[key])
|
[
"sbokby@gmail.com"
] |
sbokby@gmail.com
|
54452476970b2431f414ca9ddab22e7a3c54cb7e
|
5c729a2e8b1c9126a2430c17b23df0d7de5e3b17
|
/easy/websec_level-20/websec_level-20.py
|
4a526f4bbb8ee941d703330e4f8392e1d67927da
|
[] |
no_license
|
NPC-GO/entroy0421_websec_writeup
|
ea0e94b109596f1d55aa30784010996d9fcaca1e
|
bd874dee5c38c4bf1de2d40bfdd43152e5199dea
|
refs/heads/master
| 2020-12-01T13:42:38.072740
| 2020-09-25T07:52:12
| 2020-09-25T07:52:12
| 230,645,280
| 0
| 2
| null | 2020-09-25T07:52:13
| 2019-12-28T18:08:04
|
Python
|
UTF-8
|
Python
| false
| false
| 375
|
py
|
from requests import *
url = 'http://websec.fr/level20/index.php'
data = {
'value' : '',
'submit' : 'Add'
}
cookies = {
'data' : 'Qzo0OiJGbGFnIjowOnt9Ow=='
}
r = post(url , data=data , cookies=cookies)
pos = r.text.find('WEBSEC{')
while (True):
txt = r.text[pos]
print(txt, end='')
if(txt == '}'):
print()
break
pos += 1
|
[
"39701965a@gmail.com"
] |
39701965a@gmail.com
|
86cf1286c1f1d116e942ca38d501f4115bb7dc9a
|
d905b94566b8b6dbe46ff0a939390b6e69a9dd2f
|
/forkids/countingcombatants.py
|
130f14118ab95c1cdb6beefaa089cdfbaad191ab
|
[] |
no_license
|
cwtopel/python
|
3b0981f64beb2ba48a2023a6d94f0b1f617421f4
|
a1ecf82d5b411033c62dc096510f5fb6aa53bb26
|
refs/heads/master
| 2021-01-21T07:35:15.114130
| 2015-01-19T21:59:19
| 2015-01-19T21:59:19
| 26,261,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 137
|
py
|
buildings = 3
ninjas = 25
tunnels = 2
samurai = 40
#so much for these variables I didn't need
#thanks obama
print((3 * 25) + (2 * 40))
|
[
"ctopel@protonmail.ch"
] |
ctopel@protonmail.ch
|
0b2fd76f030c0698c98ebf2062dede17f62b356a
|
d22903da4e8be134dff562d744f0c5781b2b2c9b
|
/game/DataCastle/Drug_screening/combine.py
|
a6eb88f7eac7ebf4db1d190b5c00ad9c0df019df
|
[] |
no_license
|
xiaoqian19940510/python-
|
2eb32e06b864c835fc27d3a68e2aa0974dae2352
|
276a384888c65bed27481db3757ea1834309d070
|
refs/heads/master
| 2018-09-10T08:52:37.689830
| 2018-08-07T11:37:13
| 2018-08-07T11:37:13
| 113,555,426
| 11
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,348
|
py
|
import pandas as pd
#data process
train1=pd.read_csv('taijing/df_affinity_train.csv')
train2=pd.read_csv('taijing/df_molecule.csv')
test=pd.read_csv('taijing/df_affinity_test_toBePredicted.csv')
train1=pd.DataFrame(train1)
train2=pd.DataFrame(train2)
test=pd.DataFrame(test)
test.columns = ['Protein_ID','Molecule_ID','Ki']
test['Ki']=0
del test['Ki']
# del test['Ki']
train1.columns = ['Protein_ID','Molecule_ID','Ki']
# train1.dropna(inplace=True)
train1.fillna(0.0,inplace=True)
train2.columns = ['Molecule_ID','Fingerprint','cyp_3a4','cyp_2c9','cyp_2d6','ames_toxicity','fathead_minnow_toxicity','tetrahymena_pyriformis_toxicity','honey_bee','cell_permeability','logP','renal_organic_cation_transporter','CLtotal','hia','biodegradation','Vdd','p_glycoprotein_inhibition','NOAEL','solubility','bbb']
# train2.dropna(inplace=True)
del train2['Fingerprint']
train2.fillna(0.0,inplace=True)
test.fillna(0.0,inplace=True)
train1.fillna(0.0,inplace=True)
# train2.to_csv('taijing/df_molecule_drop.csv')
# test_fianll=test.concat(train2, keys=['Molecule_ID'])
test_fianll=pd.merge(test,train2)
train_finall=pd.merge(train1,train2)
test_fianll.fillna(0.0,inplace=True)
train_finall.fillna(0.0,inplace=True)
# print(train.head(6))
train_finall.to_csv('taijing/df_affinity_train_combine.csv')
test_fianll.to_csv('taijing/df_affinity_test_combine.csv')
|
[
"noreply@github.com"
] |
xiaoqian19940510.noreply@github.com
|
98b140f696433695145daef016d731b86c66831a
|
beda1bec86d9e7b06b625ab939e0b4175b8e0535
|
/benchmark/benchmark.py
|
7b3a9fba45c98af2d188a0a23f2c7dab685d4dfc
|
[
"Apache-2.0"
] |
permissive
|
doyouqa/python-sdk
|
1bee86371595c27c9546e844debdcb21ed8c7bda
|
1a0bf2c296600967197da9db5c94f3cff13aa7e2
|
refs/heads/master
| 2020-03-16T23:40:19.275657
| 2017-10-17T20:20:35
| 2017-10-17T20:20:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,762
|
py
|
#!/usr/bin/env python3
import os
import time
import argparse
import platform
import base64
from contextlib import contextmanager
import logging
import oneid
logger = logging.getLogger('__undefined__')
def main():
parser = argparse.ArgumentParser(description='Run specific benchmark for oneID-connect library')
parser.add_argument('-d', '--debug',
choices=['NONE', 'INFO', 'DEBUG', 'WARNING', 'ERROR'],
default='NONE',
help='Specify level of debug output (default: %(default)s)'
)
parser.add_argument('-i', '--environment',
action='store_true',
help='Display runtime environment description'
)
parser.add_argument('-a', '--aes-keys',
action='store_true',
help='Generate AES keys'
)
parser.add_argument('-S', '--symmetric',
action='store_true',
help='Encrypt and decrypt random bytes'
)
parser.add_argument('-E', '--ecdsa-key',
action='store_true',
help='Generate ECDSA keys'
)
parser.add_argument('-A', '--asymmetric',
action='store_true',
help='Sign and verify signatures'
)
parser.add_argument('-J', '--jwt',
action='store_true',
help='Create and verify JWTs'
)
parser.add_argument('-s', '--data-size',
type=int,
default=256,
help='Number of bytes for operations on random data (default: %(default)s)'
)
parser.add_argument('-n', '--count',
type=int,
default=1000,
help='Number of operations to perform (default: %(default)s)'
)
args = parser.parse_args()
set_logging_level(args.debug)
logger = logging.getLogger('oneID-connect/benchmark')
logger.debug('args=%s', args)
if (args.environment):
show_environment()
if args.aes_keys:
run_aes_keys_tasks(args.count)
if args.symmetric:
run_symmetric_tasks(args.data_size, args.count)
if args.ecdsa_key:
run_ecdsa_key_tasks(args.count)
if args.asymmetric:
run_asymmetric_tasks(args.data_size, args.count)
if args.jwt:
run_jwt_tasks(args.data_size, args.count)
@contextmanager
def operations_timer(numops, oplabel='operations'):
start = time.process_time()
yield
end = time.process_time()
delta = end - start
rate = numops/delta
print('Completed {numops:,d} {oplabel} in {delta:,.3f} seconds, or {rate:,.2f} {oplabel}/second'
.format(numops=numops, delta=delta, rate=rate, oplabel=oplabel)
)
def show_environment():
print('Environment:')
print(' {}'.format(platform.platform()))
print(' {} {}'.format(platform.python_implementation(), platform.python_version()))
def run_aes_keys_tasks(count):
print('Creating {:,d} AES key(s)'.format(count))
with operations_timer(count, 'AES keys'):
for _ in range(count):
oneid.service.create_aes_key()
def run_symmetric_tasks(data_size, count):
print('Encrypting/Decrypting {} {}-byte random message(s)'.format(count, data_size))
key = oneid.service.create_aes_key()
data = os.urandom(data_size)
edata = oneid.service.encrypt_attr_value(data, key)
with operations_timer(count, 'encryptions'):
for _ in range(count):
oneid.service.encrypt_attr_value(data, key)
with operations_timer(count, 'decryptions'):
for _ in range(count):
oneid.service.decrypt_attr_value(edata, key)
def run_ecdsa_key_tasks(count):
print('Creating {:,d} ECDSA key(s)'.format(count))
with operations_timer(count, 'ECDSA keys'):
for _ in range(count):
oneid.service.create_secret_key()
def run_asymmetric_tasks(data_size, count):
print('Signing/Verifying {:,d} {:,d}-byte random messages'.format(count, data_size))
keypair = oneid.service.create_secret_key()
data = os.urandom(data_size)
sig = keypair.sign(data)
with operations_timer(count, 'signatures'):
for _ in range(count):
keypair.sign(data)
with operations_timer(count, 'verifies'):
for _ in range(count):
if not keypair.verify(data, sig):
raise RuntimeError('error verifying signature')
def run_jwt_tasks(data_size, count):
print('Creating/Verifying {:,d} JWTs with {:,d}-byte random payloads'.format(count, data_size))
keypair = oneid.service.create_secret_key()
data = {'d': base64.b64encode(os.urandom(data_size)).decode('utf-8')[:data_size]}
jwt = oneid.jwts.make_jwt(data, keypair)
with operations_timer(count, 'creates'):
for _ in range(count):
oneid.jwts.make_jwt(data, keypair)
with operations_timer(count, 'verifies'):
for _ in range(count):
if not oneid.jwts.verify_jwt(jwt, keypair):
raise RuntimeError('error verifying jwt')
def set_logging_level(debug_level):
level = getattr(logging, debug_level.upper(), 100)
if not isinstance(level, int):
raise ValueError('Invalid log level: %s' % debug_level)
logging.basicConfig(level=level,
format='%(asctime)-15s %(levelname)-8s [%(name)s:%(lineno)s] %(message)s'
)
if __name__ == '__main__':
main()
|
[
"cdunham@gmail.com"
] |
cdunham@gmail.com
|
84b0ad08bbee0f065c172c9e5d961c13c6f2e508
|
630da111c53b6ffe987f99b6aa7ac0f3ad4c9110
|
/test/random_string.py
|
81ac86a613d71063f7c50bcd84defca3d707d6f0
|
[] |
no_license
|
durenk/python-socket-engine
|
ad1c1f87abe32815abb4a4fce5fb77473226785d
|
8292ab752e419beea089085934b431b36c3138f1
|
refs/heads/master
| 2021-05-27T06:30:12.558953
| 2013-12-28T06:17:46
| 2013-12-28T06:17:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,230
|
py
|
#!/usr/bin/env python
# coding:utf-8
import _env
import conf
from lib.socket_engine import TCPSocketEngine, Connection
from lib.net_io import send_all, recv_all, NetHead
import socket
import threading
import random
import time
from lib.log import Log, getLogger
import lib.io_poll as iopoll
#from lib.conn_pool import *
import os
import traceback
import string
#from lib.timecache import TimeCache
global_lock = threading.Lock()
server_addr = ("0.0.0.0", 20300)
g_round = 50
g_send_count = 0
g_client_num = 20
g_done_client = 0
MAX_LEN = 8 * 1024
def random_string(n):
s = string.ascii_letters + string.digits
result = ""
for i in xrange(n):
result += random.choice(s)
return result
def start_echo_server():
global server_addr
poll = None
if 'EPoll' in dir(iopoll):
poll = iopoll.EPoll(True)
print "using epoll et mode"
else:
poll = iopoll.Poll()
server = TCPSocketEngine(poll, is_blocking=False, debug=False)
server.set_logger(getLogger("server"))
# server.get_time = tc.time
def _on_readable(conn):
def __on_send(conn):
#print "write_ok"
#_on_readable(conn)
server.watch_conn(conn)
return
buf, eof = server.read_avail(conn, 4096)
if buf:
#print "write", len(buf)
server.write_unblock(conn, buf, __on_send, None)
elif eof:
server.close_conn(conn)
else:
server.watch_conn(conn)
return
server.listen_addr(server_addr, readable_cb=_on_readable)
def _run(_server):
while True:
try:
_server.poll()
except Exception, e:
traceback.print_exc()
os._exit(1)
return
th = threading.Thread(target=_run, args=(server,))
th.setDaemon(1)
th.start()
return server
def client():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
global g_send_count, g_client_num, g_done_client, server_addr, global_lock
global g_round
sock.connect(server_addr)
# times = random.randint(1, 5000)
# time.sleep(times/ 2000.0)
for i in xrange(0, g_round):
l = random.randint(1, MAX_LEN)
data = random_string(l)
send_all(sock, data)
# print l, len(data)
_data = recv_all(sock, l)
if _data == data:
#print "client", i
global_lock.acquire()
g_send_count += 1
global_lock.release()
else:
global_lock.acquire()
print "client recv invalid data", i, len(_data), len(data), l
global_lock.release()
os._exit(0)
# time.sleep(0.01)
print "client done", g_done_client
sock.close()
global_lock.acquire()
g_done_client += 1
global_lock.release()
def test_client():
global g_send_count, g_done_client, g_client_num
## pool = ConnPool(10, -1)
i = 0
ths = list()
start_time = time.time()
while True:
if i < g_client_num:
# ths.append(threading.Thread(target=client_pool, args=(pool, )))
ths.append(threading.Thread(target=client, args=()))
ths[i].setDaemon(1)
ths[i].start()
i += 1
else:
for j in xrange(0, i):
ths[j].join()
print "time:", time.time() - start_time
print g_done_client, g_send_count
# pool.clear_conn(server_addr)
if g_client_num == g_done_client:
print "test OK"
os._exit(0)
else:
print "test fail"
return
def test_client_line():
poll = None
if 'EPoll' in dir(iopoll):
poll = iopoll.EPoll(True)
print "client using epoll et mode"
else:
poll = iopoll.Poll()
engine = TCPSocketEngine(poll, debug=False)
# engine.get_time = tc.time
engine.set_logger(getLogger("client"))
start_time = time.time()
def __on_conn_err(e, client_id):
print client_id, "connect error", str(e)
os._exit(1)
return
def __on_err(conn, client_id, count, *args):
print client_id, "error", str(conn.error), count
return
def __on_recv(conn, client_id, count, data):
global g_done_client
if count >= 0 and data:
buf = conn.get_readbuf()
if buf != data:
print "data recv invalid, client:%s, count:%s, data:[%s]" % (client_id, count, buf)
os._exit(0)
if count < g_round:
#print client_id, count
l = random.randint(1, MAX_LEN -1)
newdata = random_string(l)
engine.write_unblock(conn, newdata, __on_send, __on_err, (client_id, count + 1, newdata))
else:
engine.close_conn(conn)
g_done_client += 1
print "client", client_id, "done"
if g_done_client == g_client_num:
print "test client done time: ", time.time() - start_time
os._exit(0)
return
def __on_send( conn, client_id, count, data):
# print "send", client_id, count, "len", len(data)
engine.read_unblock(conn, len(data), __on_recv, __on_err, (client_id, count, data))
return
def __on_conn(sock, client_id):
# print "conn", client_id, time.time()
__on_recv(Connection(sock), client_id, -1, None)
return
def _run(engine):
global g_done_client
while g_done_client < g_client_num:
try:
engine.poll()
except Exception, e:
traceback.print_exc()
os._exit(1)
print g_done_client
return
print "client_unblock started"
for i in xrange(0, g_client_num):
# print "conning", i
engine.connect_unblock(server_addr, __on_conn, __on_conn_err, (i,))
_run(engine)
def main():
Log("client", config=conf)
Log("server", config=conf)
server = start_echo_server()
time.sleep(1)
test_client()
#test_client_line()
if __name__ == '__main__':
main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 :
|
[
"frostyplanet@gmail.com"
] |
frostyplanet@gmail.com
|
c541376904f49c99666438a9116b7aa518c05664
|
1fab03a8aace79cfb0095e6ec9a618ff0bd72a35
|
/unique.py
|
04086fda7ee996ee6e256757b6fa02fda35d4447
|
[] |
no_license
|
alaeddingurel/justext_folia_conll
|
cc1baa55e34cd260626b13a5d0a993cdfe6e8571
|
374c2536ac001155ba125cdfeea40d6ba8e79c89
|
refs/heads/master
| 2020-05-19T19:33:46.546734
| 2019-05-06T11:28:23
| 2019-05-06T11:28:23
| 185,182,910
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 824
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 17 10:23:22 2019
@author: user
"""
import pandas as pd
df = pd.read_fwf('/home/user/Desktop/all_info.csv')
df = df['gandhi bhavan']
from pynlpl.formats import folia
for doc_name in glob.glob("*.xml"):
df = pd.read_fwf('/home/user/Desktop/new_partial_folia_file/' + doc_name , header=None)
series = []
for elem in df[0]:
new = re.split(r'\t+', elem)
if len(new) > 1:
series.append(elem)
# print(len(series))
df_ne = pd.DataFrame(series, columns=['text'])
print(doc_name)
print("Unique number" + str(len(df_ne.text.unique())))
duplicate = df_ne[df_ne.text.duplicated()]
print("Duplication number : " + str(len(duplicate)))
doc = folia.Document(file=filename)
doc.text()
|
[
"noreply@github.com"
] |
alaeddingurel.noreply@github.com
|
ee4804a60baa6b44b65ba8efe00264d804db19e1
|
840917addef4e36de1539d34aee811f487f2abb5
|
/bin/pasteurize
|
ff8a7b6b7877befa455a612dd76b9d0f1399011c
|
[] |
no_license
|
goks/DjangoExcel-Project
|
c26cbbbd8a208521a38dea0d8a11e438590584fd
|
050d35f944902c4c76f682d8684a460590709d71
|
refs/heads/master
| 2020-12-30T22:31:23.380287
| 2016-05-30T16:23:31
| 2016-05-30T16:23:31
| 59,377,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 341
|
#!/home/goks/bin/virtualEnv/djangoexcel/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.15.2','console_scripts','pasteurize'
__requires__ = 'future==0.15.2'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('future==0.15.2', 'console_scripts', 'pasteurize')()
)
|
[
"gokulav2@gmail.com"
] |
gokulav2@gmail.com
|
|
1eead32becd32c239779d7f2ab4347a0e71989df
|
1a3a9ecb618987d8a4ce2a546cf7546c78f318d8
|
/dask/bytes/compression.py
|
37cebe5379f599d3544f1d566290c4b40b62229d
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
DalavanCloud/dask
|
2a433158c3423446584e735f0c27193ce72d5cea
|
b5e27c254660d6744033b0b7c9297e3eb71afbc3
|
refs/heads/master
| 2020-04-19T21:46:30.921701
| 2017-09-25T11:43:19
| 2017-09-25T11:43:19
| 168,451,252
| 1
| 0
|
NOASSERTION
| 2019-01-31T02:44:12
| 2019-01-31T02:44:12
| null |
UTF-8
|
Python
| false
| false
| 2,763
|
py
|
from __future__ import print_function, division, absolute_import
import bz2
import sys
import zlib
from toolz import identity
from ..compatibility import gzip_compress, gzip_decompress, GzipFile
from ..utils import ignoring
def noop_file(file, **kwargs):
return file
compress = {'gzip': gzip_compress,
'zlib': zlib.compress,
'bz2': bz2.compress,
None: identity}
decompress = {'gzip': gzip_decompress,
'zlib': zlib.decompress,
'bz2': bz2.decompress,
None: identity}
files = {'gzip': lambda f, **kwargs: GzipFile(fileobj=f, **kwargs),
None: noop_file}
seekable_files = {None: noop_file}
with ignoring(ImportError):
import snappy
compress['snappy'] = snappy.compress
decompress['snappy'] = snappy.decompress
with ignoring(ImportError):
import lz4
compress['lz4'] = lz4.LZ4_compress
decompress['lz4'] = lz4.LZ4_uncompress
with ignoring(ImportError):
from ..compatibility import LZMAFile, lzma_compress, lzma_decompress
compress['xz'] = lzma_compress
decompress['xz'] = lzma_decompress
files['xz'] = LZMAFile
# Seekable xz files actually tend to scan whole file - see `get_xz_blocks`
# with ignoring(ImportError):
# import lzma
# seekable_files['xz'] = lzma.LZMAFile
#
# with ignoring(ImportError):
# import lzmaffi
# seekable_files['xz'] = lzmaffi.LZMAFile
if sys.version_info[0] >= 3:
import bz2
files['bz2'] = bz2.BZ2File
def get_xz_blocks(fp):
from lzmaffi import (STREAM_HEADER_SIZE, decode_stream_footer,
decode_index, LZMAError)
fp.seek(0, 2)
def _peek(f, size):
data = f.read(size)
f.seek(-size, 1)
return data
if fp.tell() < 2 * STREAM_HEADER_SIZE:
raise LZMAError("file too small")
# read stream paddings (4 bytes each)
fp.seek(-4, 1)
padding = 0
while _peek(fp, 4) == b'\x00\x00\x00\x00':
fp.seek(-4, 1)
padding += 4
fp.seek(-STREAM_HEADER_SIZE + 4, 1)
stream_flags = decode_stream_footer(_peek(fp, STREAM_HEADER_SIZE))
fp.seek(-stream_flags.backward_size, 1)
index = decode_index(_peek(fp, stream_flags.backward_size), padding)
return {'offsets': [b.compressed_file_offset for i, b in index],
'lengths': [b.unpadded_size for i, b in index],
'check': stream_flags.check}
def xz_decompress(data, check):
from lzmaffi import decode_block_header_size, LZMADecompressor, FORMAT_BLOCK
hsize = decode_block_header_size(data[:1])
header = data[:hsize]
dc = LZMADecompressor(format=FORMAT_BLOCK, header=header,
unpadded_size=len(data), check=check)
return dc.decompress(data[len(header):])
|
[
"mrocklin@gmail.com"
] |
mrocklin@gmail.com
|
1ef4178efb1d01db1887e39443f41b387d96ee8c
|
59d1347d6bb37134e7b2f3e1a55dac0df3d4a8a1
|
/mybooksite/mybooksite/wsgi.py
|
b9bdf2bd63c675e389630f1577b612347791aaf5
|
[] |
no_license
|
Lukisn/DjangoTutorial
|
95ebc5cc21d9ddd6ea0f8a748c20c96f58395f13
|
d1d409f50ab7840ddc8820a311d36b143fc3430a
|
refs/heads/master
| 2021-04-26T23:13:45.403057
| 2018-04-17T14:46:42
| 2018-04-17T14:46:42
| 123,951,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
WSGI config for mybooksite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mybooksite.settings")
application = get_wsgi_application()
|
[
"lukas.reimer@icloud.com"
] |
lukas.reimer@icloud.com
|
ee22d863f9f616a2b9d29cfee64b9252a79b65c1
|
c00a2490947ad10582b5d675f070ccb62b70901d
|
/chromium/chrome/app/version_assembly/version_assembly_manifest_action.gypi
|
de47191e55ed4bebae16e3c854bc6fbb8eb62e1f
|
[
"BSD-3-Clause"
] |
permissive
|
teotikalki/vivaldi-source
|
543d0ab336fb5784eaae1904457598f95f426186
|
22a46f2c969f6a0b7ca239a05575d1ea2738768c
|
refs/heads/master
| 2021-01-23T01:17:34.305328
| 2016-04-29T20:28:18
| 2016-04-29T20:28:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,320
|
gypi
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file contains an action which can be used to construct a manifest file
# with the same name as the version directory so that chrome.exe identifies the
# version directory as an assembly. This will be copied over to the version
# directory by the installer script.
# To use this the following variables need to be defined:
# version_path: string: path to file containing version data (e.g.
# chrome/VERSION).
# version_py_path: string: path to file containing version script (e.g.
# build/util/version.py).
# version_full: string: version string in W.X.Y.Z form.
{
'variables': {
'template_input_path':
'<(DEPTH)/chrome/app/version_assembly/version_assembly_manifest.template',
},
'inputs': [
'<(template_input_path)',
'<(version_path)',
],
'outputs': [
'<(PRODUCT_DIR)/<(version_full).manifest',
],
'action': [
'python', '<(version_py_path)',
'-f', '<(version_path)',
'-f', '<(vivaldi_version_path)',
'-e', 'VIVALDI_BUILD=<(vivaldi_global_build_number)',
'<(template_input_path)',
'<@(_outputs)',
],
'message': 'Generating <@(_outputs)',
}
|
[
"jason@theograys.com"
] |
jason@theograys.com
|
7c856f16ff140fd40cdc6fa09daa1ff4a5e60ff0
|
9f2445e9a00cc34eebcf3d3f60124d0388dcb613
|
/2019-12-24-Parametersearch_justNaKDR_fixed kinetics/Modelparameters_temp/outputModels_dict_3474228170.py
|
ae7e4e7517d1ae469fb53980d9e13a3d1cb1b8ad
|
[] |
no_license
|
analkumar2/Thesis-work
|
7ee916d71f04a60afbd117325df588908518b7d2
|
75905427c2a78a101b4eed2c27a955867c04465c
|
refs/heads/master
| 2022-01-02T02:33:35.864896
| 2021-12-18T03:34:04
| 2021-12-18T03:34:04
| 201,130,673
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,586
|
py
|
# exec(open('Modelparameters_temp/outputModels_dict_3474228170.py').read())
Models = {}
Models['Model1'] = {'Error': 1.419837827938917, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.9204758566959047e-10, 'Rm': 337742349.7398348, 'Em': -0.0749537142056817}, 'Channels': {'Na_Chan': {'Gbar': 1.1307469614765005e-05, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.052877798990324965}, 'K_DR_Chan': {'Gbar': 1.9983285091874207e-07, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.09076415709263243}}}}
Models['Model2'] = {'Error': 1.6193118549981724, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.0316669641127651e-10, 'Rm': 788034653.3574164, 'Em': -0.08132009562736825}, 'Channels': {'Na_Chan': {'Gbar': 1.3308443635200184e-05, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.09478050137911642}, 'K_DR_Chan': {'Gbar': 9.747822230913774e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08118127732913177}}}}
Models['Model3'] = {'Error': 0.9827506131142281, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.3282396957895702e-10, 'Rm': 753802728.2159829, 'Em': -0.07559526085967141}, 'Channels': {'Na_Chan': {'Gbar': 5.151174870080978e-07, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.08160544653740835}, 'K_DR_Chan': {'Gbar': 1.9544800050207157e-07, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.09306726894317391}}}}
Models['Model4'] = {'Error': 0.5871125242600386, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.986089017007014e-10, 'Rm': 452571113.5707114, 'Em': -0.07297721176984498}, 'Channels': {'Na_Chan': {'Gbar': 2.352261593949365e-07, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.08785130395786099}, 'K_DR_Chan': {'Gbar': 3.702666798101107e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.09467746228010339}}}}
Models['Model5'] = {'Error': 1.4233434710567927, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.9464965022515397e-10, 'Rm': 208611379.4928552, 'Em': -0.0777647783337698}, 'Channels': {'Na_Chan': {'Gbar': 1.115780880695695e-05, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.07773711578253037}, 'K_DR_Chan': {'Gbar': 1.4283587466582212e-07, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08269240569672898}}}}
Models['Model6'] = {'Error': 1.7985200968562203, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.0765928525021928e-10, 'Rm': 858795616.3021265, 'Em': -0.08103348138524567}, 'Channels': {'Na_Chan': {'Gbar': 1.2487867599387614e-05, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.0656467643819845}, 'K_DR_Chan': {'Gbar': 9.218025344383141e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08000490714597971}}}}
Models['Model7'] = {'Error': 1.303035090724178, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.8806373534123978e-10, 'Rm': 697772632.401678, 'Em': -0.041031437136541046}, 'Channels': {'Na_Chan': {'Gbar': 2.2605850044167385e-07, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.09595766356513609}, 'K_DR_Chan': {'Gbar': 2.5952657263292243e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.09157685771814203}}}}
Models['Model8'] = {'Error': 1.945837897765706, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.5126377195006043e-10, 'Rm': 729193107.9823704, 'Em': -0.08209081970790558}, 'Channels': {'Na_Chan': {'Gbar': 1.3898664225709352e-05, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.09796441667468059}, 'K_DR_Chan': {'Gbar': 8.370071948777725e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08186132640915719}}}}
Models['Model9'] = {'Error': 1.0978676147871804, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.9648130192430767e-10, 'Rm': 850295124.5641676, 'Em': -0.0800220493392245}, 'Channels': {'Na_Chan': {'Gbar': 4.187256604883301e-07, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.08440153408853598}, 'K_DR_Chan': {'Gbar': 2.5721391354182192e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.09147143398772167}}}}
Models['Model10'] = {'Error': 0.039659067014619666, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 8.451008659418782e-11, 'Rm': 954791879.399444, 'Em': -0.0541593889726902}, 'Channels': {'Na_Chan': {'Gbar': 1.3949118056942082e-07, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.09559929336410818}, 'K_DR_Chan': {'Gbar': 6.700418224517404e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08888329315854383}}}}
Models['Model11'] = {'Error': 0.10897732842995046, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.6588245094461933e-10, 'Rm': 966353286.2459922, 'Em': -0.05785840974385293}, 'Channels': {'Na_Chan': {'Gbar': 2.2611090757117447e-07, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.08387201547078699}, 'K_DR_Chan': {'Gbar': 2.401040488731951e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.09229797393466789}}}}
Models['Model12'] = {'Error': 1.5820921609836105, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.1639557662906249e-10, 'Rm': 912223286.2777464, 'Em': -0.08310729146489661}, 'Channels': {'Na_Chan': {'Gbar': 5.423245732940308e-06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.05511071320850202}, 'K_DR_Chan': {'Gbar': 6.593697472180598e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08200504955418032}}}}
Models['Model13'] = {'Error': 0.6226492715183406, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.7523760615206106e-10, 'Rm': 376538671.2773571, 'Em': -0.07472693045504353}, 'Channels': {'Na_Chan': {'Gbar': 7.406807402859963e-07, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.07067470047445029}, 'K_DR_Chan': {'Gbar': 2.387194212895543e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08234995395961686}}}}
Models['Model14'] = {'Error': 0.9527227757398194, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.2247104592434724e-10, 'Rm': 713068508.3480333, 'Em': -0.07361673584105075}, 'Channels': {'Na_Chan': {'Gbar': 7.901703691347777e-07, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.05456077646744808}, 'K_DR_Chan': {'Gbar': 3.920314888029509e-07, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.0808444466000882}}}}
Models['Model15'] = {'Error': 1.3154357137403614, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 8.640549389926698e-11, 'Rm': 993655555.3563042, 'Em': -0.08091968468322452}, 'Channels': {'Na_Chan': {'Gbar': 1.2017353290157207e-05, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.06337520921083938}, 'K_DR_Chan': {'Gbar': 1.24271375923462e-05, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08089678796435706}}}}
Models['Model16'] = {'Error': 1.6884083177405307, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.758720791205937e-10, 'Rm': 643715907.3068905, 'Em': -0.08022540566867073}, 'Channels': {'Na_Chan': {'Gbar': 4.833640093735622e-07, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.05942914122471426}, 'K_DR_Chan': {'Gbar': 5.731896382363442e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.09529690806238304}}}}
Models['Model17'] = {'Error': 0.6108283280432321, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.7616406247553514e-10, 'Rm': 995599470.0611521, 'Em': -0.07316587297557442}, 'Channels': {'Na_Chan': {'Gbar': 4.1830387738716374e-07, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.09445976897541816}, 'K_DR_Chan': {'Gbar': 5.8077788103179394e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08878414563121813}}}}
Models['Model18'] = {'Error': 1.5559527456734155, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.2124068459865816e-10, 'Rm': 864006239.3831329, 'Em': -0.07918828779654388}, 'Channels': {'Na_Chan': {'Gbar': 1.5566181904360733e-06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.06561931932355977}, 'K_DR_Chan': {'Gbar': 1.1092225350220656e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08276840502165883}}}}
Models['Model19'] = {'Error': 1.798987530121205, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.294839755989419e-10, 'Rm': 760774003.6426685, 'Em': -0.08343354792304333}, 'Channels': {'Na_Chan': {'Gbar': 1.0860631749702075e-06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.05339953867896728}, 'K_DR_Chan': {'Gbar': 3.786114383447181e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08525164196650657}}}}
Models['Model20'] = {'Error': 0.01601204756253395, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.6185750092611338e-10, 'Rm': 337706398.8613654, 'Em': -0.0642501871513868}, 'Channels': {'Na_Chan': {'Gbar': 3.1432386723602914e-07, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.06839414878682136}, 'K_DR_Chan': {'Gbar': 3.611175556688616e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08710507634411792}}}}
Models['Model21'] = {'Error': 1.443789056030713, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.800790734236058e-10, 'Rm': 705070064.2134507, 'Em': -0.08034174877960773}, 'Channels': {'Na_Chan': {'Gbar': 3.8616635973946434e-06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.052913513508096754}, 'K_DR_Chan': {'Gbar': 1.3647471366483926e-05, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.0801205657278521}}}}
Models['Model22'] = {'Error': 0.5442802302954188, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.4406339103577736e-10, 'Rm': 392134400.9085014, 'Em': -0.07161480432268683}, 'Channels': {'Na_Chan': {'Gbar': 2.7716559199080253e-07, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.07820929027263789}, 'K_DR_Chan': {'Gbar': 5.0361357546124555e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.09346476774010809}}}}
Models['Model23'] = {'Error': 1.4700903138305046, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.24895495097443e-10, 'Rm': 542526619.7358536, 'Em': -0.0799579390135484}, 'Channels': {'Na_Chan': {'Gbar': 7.135694844952374e-07, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.05690040010906017}, 'K_DR_Chan': {'Gbar': 9.305122832942497e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08043793874893794}}}}
Models['Model24'] = {'Error': 0.6970424470345439, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.9005956885337377e-10, 'Rm': 198003597.70230156, 'Em': -0.07361352521687117}, 'Channels': {'Na_Chan': {'Gbar': 6.840541262227081e-07, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.07619613498885514}, 'K_DR_Chan': {'Gbar': 2.3933198244645175e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08218471328740176}}}}
Models['Model25'] = {'Error': 1.9494589377435074, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 9.18361409320687e-11, 'Rm': 852768274.9683672, 'Em': -0.08487360306635214}, 'Channels': {'Na_Chan': {'Gbar': 1.9738858164602058e-06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.07267917209634422}, 'K_DR_Chan': {'Gbar': 4.620877685562946e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08127852000548637}}}}
Models['Model26'] = {'Error': 1.9696493557829982, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.9587897874995014e-10, 'Rm': 595629541.2258523, 'Em': -0.0826512304587218}, 'Channels': {'Na_Chan': {'Gbar': 5.826870970607293e-06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.08455816515785271}, 'K_DR_Chan': {'Gbar': 1.2019161796887787e-05, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08120211993142709}}}}
Models['Model27'] = {'Error': 0.6525128412926475, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.6664576879856996e-10, 'Rm': 280863839.41191304, 'Em': -0.07489250839895878}, 'Channels': {'Na_Chan': {'Gbar': 3.7201057434091317e-06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.07507390602097719}, 'K_DR_Chan': {'Gbar': 7.236798895323412e-08, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08887476690080148}}}}
Models['Model28'] = {'Error': 1.0129162831506802, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.4925862972523648e-10, 'Rm': 503229799.43109375, 'Em': -0.0773323382797576}, 'Channels': {'Na_Chan': {'Gbar': 3.3764653144003634e-07, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.08510428339940457}, 'K_DR_Chan': {'Gbar': 2.1313271091187957e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.09807718149457285}}}}
Models['Model29'] = {'Error': 0.3303256371413619, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.778239566179671e-10, 'Rm': 925327745.3140541, 'Em': -0.07212775062503496}, 'Channels': {'Na_Chan': {'Gbar': 2.7044317223965357e-07, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.09874158404250172}, 'K_DR_Chan': {'Gbar': 4.078098255436322e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08443086829954276}}}}
Models['Model30'] = {'Error': 1.719986742097222, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.0334107699982469e-10, 'Rm': 605980748.2658067, 'Em': -0.08249686170933956}, 'Channels': {'Na_Chan': {'Gbar': 4.267320331756272e-06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.06521616444426756}, 'K_DR_Chan': {'Gbar': 4.348725902338714e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08014270274962826}}}}
Models['Model31'] = {'Error': 1.7157379887600683, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.8808937832974398e-10, 'Rm': 402668492.76130944, 'Em': -0.08100210567168653}, 'Channels': {'Na_Chan': {'Gbar': 4.005789719540257e-06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.07529698859847032}, 'K_DR_Chan': {'Gbar': 9.789272091933482e-07, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08226867575024621}}}}
Models['Model32'] = {'Error': 1.389375392882973, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.3907792628056447e-10, 'Rm': 783751070.9190366, 'Em': -0.08063353783088704}, 'Channels': {'Na_Chan': {'Gbar': 1.088003891017425e-06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.09797910954366124}, 'K_DR_Chan': {'Gbar': 9.091809003459845e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08128670878199482}}}}
Models['Model33'] = {'Error': 1.8523150990820836, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.6126448275500684e-10, 'Rm': 840822686.0770975, 'Em': -0.07939095868468321}, 'Channels': {'Na_Chan': {'Gbar': 2.3654982967323394e-06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.07693238530405394}, 'K_DR_Chan': {'Gbar': 3.0968788241141153e-07, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.0847050600897313}}}}
Models['Model34'] = {'Error': 0.7687370023086375, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.227304999415173e-10, 'Rm': 774522978.7250155, 'Em': -0.07677536925839226}, 'Channels': {'Na_Chan': {'Gbar': 9.048037580143577e-07, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.07324886721128882}, 'K_DR_Chan': {'Gbar': 1.2623492628431014e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08193313040424938}}}}
Models['Model35'] = {'Error': 0.07008067359551517, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.805787624516205e-10, 'Rm': 710798553.9121771, 'Em': -0.06408475130895788}, 'Channels': {'Na_Chan': {'Gbar': 3.62670784833539e-07, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.06143101423853191}, 'K_DR_Chan': {'Gbar': 3.049060490244417e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.09747268862916617}}}}
Models['Model36'] = {'Error': 1.2680726221407648, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.4633100823731958e-10, 'Rm': 557438407.578701, 'Em': -0.07892933390485037}, 'Channels': {'Na_Chan': {'Gbar': 6.010962620312548e-07, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.06430120716269569}, 'K_DR_Chan': {'Gbar': 5.547237831019964e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08451219011531966}}}}
Models['Model37'] = {'Error': 1.659491970989038, 'parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.9415508839074088e-10, 'Rm': 971644114.1663289, 'Em': -0.0840459560591935}, 'Channels': {'Na_Chan': {'Gbar': 2.091850707648537e-06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_(Migliore2018)', 'Erev': 0.08467794091302303}, 'K_DR_Chan': {'Gbar': 5.046198511312119e-06, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_(Migliore2018)', 'Erev': -0.08370921729684225}}}}
|
[
"analkumar2@gmail.com"
] |
analkumar2@gmail.com
|
9180653fb1d4e9ff99440afcb90b5fd33e993765
|
56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e
|
/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544841/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_365/run_cfg.py
|
a517d025b507290027fe35f47979dd948e53c8ae
|
[] |
no_license
|
rmanzoni/HTT
|
18e6b583f04c0a6ca10142d9da3dd4c850cddabc
|
a03b227073b2d4d8a2abe95367c014694588bf98
|
refs/heads/master
| 2016-09-06T05:55:52.602604
| 2014-02-20T16:35:34
| 2014-02-20T16:35:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,494
|
py
|
import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544841/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_766.root',
'/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_767.root',
'/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_768.root',
'/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_769.root',
'/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_77.root')
)
|
[
"riccardo.manzoni@cern.ch"
] |
riccardo.manzoni@cern.ch
|
b1377b4cdd513c2e3e41267c071ab687b5173900
|
9d7501001c8f446625a84775aaebc16b01cdd6ad
|
/Scripts/test_17.py
|
dbddfd5d3cf995f3b1e44611f1e91fc904d8f252
|
[] |
no_license
|
Wang5247/pytest017
|
e95e3cea7bd02b29039d7d7b71beee1f0f42780f
|
edc6cedb1ec6efe80793b8c40bcfef59a77dd004
|
refs/heads/master
| 2022-12-15T12:18:15.629661
| 2020-09-14T08:44:24
| 2020-09-14T08:44:24
| 295,356,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 65
|
py
|
class Test017:
def test_017(self):
print("test_017")
|
[
"1721029461@qq.com"
] |
1721029461@qq.com
|
131828766b7f4a327f7e1366069d07203613f579
|
b5262cea6ce3fdea0f0caa9c4157d62418799bf9
|
/sitio_web/restaurantes/models.py
|
4a00d14500834383dd67769917156976bbe1f900
|
[] |
no_license
|
rbnuria/DAI
|
25a44a0ae412fc7256564f5e7cccbca0c086af5d
|
a1631a82630a1de9d3ee50762633fdd957dd8943
|
refs/heads/master
| 2021-03-27T10:25:38.453189
| 2018-04-06T11:07:28
| 2018-04-06T11:07:28
| 116,402,304
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
from django.db import models
# Create your models here.
from pymongo import MongoClient
client = MongoClient('localhost', 27017)
db = client.test #base de dato
restaurantes = db.restaurants #colección
|
[
"rbnuria6@gmail.com"
] |
rbnuria6@gmail.com
|
105ecdf7a691354731b96148abcd5e7f9a770273
|
d364123a0655bff7e9d725382934fe2c15b5bfc4
|
/venv/Lib/site-packages/borax/patterns/lazy.py
|
7ad0a3447e458468bc14f2c43704d2f208df7b18
|
[] |
no_license
|
yuan1093040152/SeleniumTest
|
88d75361c8419354f56856c326f843a0a89d7ca6
|
d155b98702bc46c174499042b43257696b861b5e
|
refs/heads/master
| 2023-08-31T15:00:25.415642
| 2023-08-30T09:26:42
| 2023-08-30T09:26:42
| 227,269,300
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,488
|
py
|
# coding=utf8
"""
A Lazy Creator for a Object.
"""
import operator
__all__ = ['LazyObject']
EMPTY = object()
def proxy_method(func):
def inner(self, *args):
if self._wrapped is EMPTY:
self._setup()
return func(self._wrapped, *args)
return inner
class LazyObject(object):
_wrapped = None
def __init__(self, func, args=None, kwargs=None):
self.__dict__['_setupfunc'] = func
self.__dict__['_args'] = args or []
self.__dict__['_kwargs'] = kwargs or {}
self._wrapped = EMPTY
def _setup(self):
self._wrapped = self._setupfunc(*self._args, **self._kwargs)
__getattr__ = proxy_method(getattr)
def __setattr__(self, key, value):
if key == '_wrapped':
self.__dict__['_wrapped'] = value
else:
if self._wrapped is EMPTY:
self._setup()
setattr(self._wrapped, key, value)
def __delattr__(self, name):
if name == "_wrapped":
raise TypeError("can't delete _wrapped.")
if self._wrapped is EMPTY:
self._setup()
delattr(self._wrapped, name)
__getitem__ = proxy_method(operator.getitem)
__class__ = property(proxy_method(operator.attrgetter("__class__")))
__eq__ = proxy_method(operator.eq)
__ne__ = proxy_method(operator.ne)
__hash__ = proxy_method(hash)
__bytes__ = proxy_method(bytes)
__str__ = proxy_method(str)
__bool__ = proxy_method(bool)
|
[
"1093040152@qq.com"
] |
1093040152@qq.com
|
c24b5cf5c6bed8dfe29babd7153800bcf4d21d26
|
03f0a82e829a5711a9165d8f7d3762ca0c1ceaea
|
/ahgl/apps/tournaments/migrations/0017_auto__add_field_tournamentround_wins__add_field_tournamentround_losses.py
|
a85bf8daf00cadaa573af08f7778f93216006bbc
|
[
"BSD-2-Clause"
] |
permissive
|
day9tv/ahgl
|
4d273a39e06334cc15eb12031de0a806366396b9
|
5e06cfecb28c153c1b83ef89112fc217897131cb
|
refs/heads/master
| 2021-01-22T08:32:53.663312
| 2012-10-06T21:32:52
| 2012-10-06T21:32:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,453
|
py
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TeamRoundMembership.wins'
db.add_column('tournaments_tournamentround_teams', 'wins', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'TeamRoundMembership.losses'
db.add_column('tournaments_tournamentround_teams', 'losses', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'TeamRoundMembership.tiebreaker'
db.add_column('tournaments_tournamentround_teams', 'tiebreaker', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Changing field 'Match.creation_date'
db.alter_column('tournaments_match', 'creation_date', self.gf('django.db.models.fields.DateField')())
def backwards(self, orm):
# Deleting field 'TeamRoundMembership.wins'
db.delete_column('tournaments_tournamentround_teams', 'wins')
# Deleting field 'TeamRoundMembership.losses'
db.delete_column('tournaments_tournamentround_teams', 'losses')
# Deleting field 'TeamRoundMembership.tiebreaker'
db.delete_column('tournaments_tournamentround_teams', 'tiebreaker')
# Changing field 'Match.creation_date'
db.alter_column('tournaments_match', 'creation_date', self.gf('django.db.models.fields.DateField')(auto_now_add=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.charity': {
'Meta': {'ordering': "('name',)", 'object_name': 'Charity'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'logo': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'profiles.profile': {
'Meta': {'object_name': 'Profile'},
'autosubscribe': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'avatar': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'bnet_profile': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'char_code': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'char_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'custom_thumb': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '10', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'photo': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'questions_answers': ('profiles.fields.HTMLField', [], {'attributes': '[]', 'blank': 'True', 'tags': "['ol', 'ul', 'li', 'strong', 'em', 'p']"}),
'race': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'show_signatures': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'signature': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'blank': 'True'}),
'signature_html': ('django.db.models.fields.TextField', [], {'max_length': '1054', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'time_zone': ('django.db.models.fields.FloatField', [], {'default': '3.0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'profiles.team': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('name', 'tournament'), ('slug', 'tournament'))", 'object_name': 'Team'},
'captain': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'captain_of'", 'null': 'True', 'to': "orm['profiles.Profile']"}),
'charity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'teams'", 'null': 'True', 'to': "orm['profiles.Charity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'losses': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'teams'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['profiles.Profile']"}),
'motto': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'photo': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'rank': ('django.db.models.fields.IntegerField', [], {}),
'seed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'tiebreaker': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tournament': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'teams'", 'to': "orm['tournaments.Tournament']"}),
'wins': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'tournaments.game': {
'Meta': {'ordering': "('order',)", 'unique_together': "(('order', 'match'),)", 'object_name': 'Game'},
'away_player': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'away_games'", 'null': 'True', 'to': "orm['profiles.Profile']"}),
'away_race': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'forfeit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'home_player': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'home_games'", 'null': 'True', 'to': "orm['profiles.Profile']"}),
'home_race': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_ace': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'loser': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'game_losses'", 'null': 'True', 'to': "orm['profiles.Profile']"}),
'loser_team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'game_losses'", 'null': 'True', 'to': "orm['profiles.Team']"}),
'map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tournaments.Map']"}),
'match': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'games'", 'to': "orm['tournaments.Match']"}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'replay': ('django.db.models.fields.files.FileField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'vod': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'winner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'game_wins'", 'null': 'True', 'to': "orm['profiles.Profile']"}),
'winner_team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'game_wins'", 'null': 'True', 'to': "orm['profiles.Team']"})
},
'tournaments.map': {
'Meta': {'ordering': "('name',)", 'object_name': 'Map'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'photo': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'tournaments.match': {
'Meta': {'object_name': 'Match'},
'away_submitted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'away_team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'away_matches'", 'to': "orm['profiles.Team']"}),
'creation_date': ('django.db.models.fields.DateField', [], {}),
'home_submitted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'home_team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'home_matches'", 'to': "orm['profiles.Team']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loser': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'match_losses'", 'null': 'True', 'to': "orm['profiles.Team']"}),
'publish_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'referee': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Profile']", 'null': 'True', 'blank': 'True'}),
'tournament': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'matches'", 'to': "orm['tournaments.Tournament']"}),
'winner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'match_wins'", 'null': 'True', 'to': "orm['profiles.Team']"})
},
'tournaments.tournament': {
'Meta': {'object_name': 'Tournament'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featured_game': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tournaments.Game']", 'null': 'True', 'blank': 'True'}),
'games_per_match': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '5'}),
'map_pool': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['tournaments.Map']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'primary_key': 'True', 'db_index': 'True'})
},
'tournaments.tournamentround': {
'Meta': {'ordering': "('stage', 'name')", 'object_name': 'TournamentRound'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'stage': ('django.db.models.fields.IntegerField', [], {}),
'structure': ('django.db.models.fields.CharField', [], {'default': "'G'", 'max_length': '1'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'rounds'", 'symmetrical': 'False', 'to': "orm['profiles.Team']"}),
'tournament': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rounds'", 'to': "orm['tournaments.Tournament']"}),
}
}
complete_apps = ['tournaments']
|
[
"me@ntucker.me"
] |
me@ntucker.me
|
b901ea41ff6540c9e2f1d67b3a622b92a666448f
|
788fc345bb297ee717ba192e00e8af951bc5d300
|
/senstech/senstech/doctype/senstech_klassen/senstech_klassen.py
|
74301a10003a9aa360cbc3926846644686484356
|
[
"MIT"
] |
permissive
|
alkuhlani/senstech
|
41aa3ad8b5e25bd66ac472875021f72e3a1259ad
|
84082b0dcbb2d5d2618f7172db3bb6e416977e07
|
refs/heads/master
| 2023-05-13T11:03:45.135113
| 2021-06-08T18:11:21
| 2021-06-08T18:11:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, libracore AG and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class SenstechKlassen(Document):
pass
|
[
"joel.mesmer@libracore.com"
] |
joel.mesmer@libracore.com
|
86d60d58012df0a71099b30eedc68ae160d577cf
|
824230143f53003ec4b6f12411829b4208679f39
|
/coding-practice/Tree/lowest_common_ancestor.py
|
87aa93a09abd567b60993765b9415b4e120d1d5f
|
[
"MIT"
] |
permissive
|
sayak1711/coding_solutions
|
af29cdaee2134e3b960d65059a0ed57154895478
|
1001e80f9a528f2bd0dcaf30df2cd441280781ba
|
refs/heads/master
| 2023-05-25T17:03:31.390821
| 2023-05-02T05:32:17
| 2023-05-02T05:32:17
| 80,296,568
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,352
|
py
|
class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
# since it is BST they are in sorted order
# they either lie on same side or on opposite side
# if they are on opposite side of root then root is the LCA
def lca(root, node1, node2):
while (root.val-node1.val)*(root.val-node2.val) > 0: # then it means they are on same side
if node1.val < root.val: # then they are both on left side
root = root.left
else: # both are on right side
root = root.right
return root # now we can say that they are on opposite sides of current root
'''
5
4 6
3 7
8
'''
tree = Node(5)
tree.left = Node(4)
tree.left.left = Node(3)
tree.right = Node(6)
tree.right.right = Node(7)
tree.right.right.right = Node(8)
ans = lca(tree, Node(3), Node(8))
print(f'LCA is {ans.val}')
'''
6
2 8
0 4 7 9
3 5
'''
tree = Node(6)
tree.left = Node(2)
tree.right = Node(8)
tree.left.left = Node(0)
tree.left.right = Node(4)
tree.right.left = Node(7)
tree.right.right = Node(9)
tree.left.right.left = Node(3)
tree.left.right.right = Node(5)
ans = lca(tree, Node(8), Node(9))
print(f'LCA is {ans.val}')
|
[
"96sayak@gmail.com"
] |
96sayak@gmail.com
|
c5323ef71a9301c277ba11cb312e25abebec563e
|
e77c5920fc95cc3c2ed418b596f778a9d0fe9567
|
/Computer Security 2019/final/crypto/Train/server.py
|
41a179d8b70ad6487c0568b2d078e6abb563fcd0
|
[] |
no_license
|
ktpss95112/Practices
|
af55849524da35efbe82c93da7c180dd01a954c3
|
45e95a134ac706c4652f93b04c63eab3dfbe13ad
|
refs/heads/master
| 2022-12-22T23:34:59.796247
| 2022-07-10T13:25:28
| 2022-07-10T13:25:28
| 166,650,609
| 2
| 0
| null | 2022-12-11T20:29:09
| 2019-01-20T10:51:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,486
|
py
|
#!/usr/bin/env python3
import os
from Crypto.Util.number import *
with open('flag', 'r') as f:
flag = f.read()
def genkeys():
e = 3
while True:
p, q = getPrime(512), getPrime(512)
n, phi = p * q, (p - 1) * (q - 1)
if GCD(e, phi) == 1:
d = inverse(e, phi)
return (n, e), (n, d)
class RSA:
MODE_CBC = 1
def __init__(self, key, mode):
self.pub = key[0]
self.pri = key[1]
self.mode = mode
@classmethod
def new(cls, key, mode):
return cls(key, mode)
def encrypt(self, plain):
if self.mode == self.MODE_CBC:
n, e = self.pub
iv = os.urandom(128)
prev, cipher = bytes_to_long(iv), b''
for i in range(0, len(plain), 16):
x = (prev + bytes_to_long(plain[i:i+16])) % n
prev = pow(x, e, n)
cipher += long_to_bytes(prev)
return iv + cipher
else:
raise NotImplementedError
def decrypt(self, cipher):
if self.mode == self.MODE_CBC:
n, d = self.pri
iv, cipher = cipher[:128], cipher[128:]
prev, plain = bytes_to_long(iv), b''
for i in range(0, len(cipher), 128):
x = pow(bytes_to_long(cipher[i:i+128]), d, n)
plain += long_to_bytes((x - prev) % n)
prev = bytes_to_long(cipher[i:i+128])
return plain
def menu():
print(f'{"Want to buy some train tickets? ":=^20}')
print('1) your ticket')
print('2) use ticket')
print('3) exit')
def show(rsa):
session = os.urandom(5).hex()
(n, e), c = rsa.pub, rsa.encrypt(f'date:2019/1/11|session:{session}|secret:{flag}'.encode())
print(f'n = {n}')
print(f'e = {e}')
print(f'ticket = {c.hex()}')
def use(rsa):
cipher = bytes.fromhex(input('ticket = '))
try:
plain = rsa.decrypt(cipher)
date, session, secret = plain.split(b'|')
if date.partition(b':')[2] == b'2019/1/11':
print('Pass')
else:
print('Wrong ticket')
except:
print('Oops, our train has some technical issue')
def main():
pub, pri = genkeys()
rsa = RSA.new((pub, pri), RSA.MODE_CBC)
while True:
menu()
cmd = input('> ')
if cmd == '1':
show(rsa)
elif cmd == '2':
use(rsa)
else:
print('I have spoken')
return
main()
|
[
"ktpss95112@yahoo.com.tw"
] |
ktpss95112@yahoo.com.tw
|
1fba2badf65b79d216de3dfc1cc5bdbce4ead09a
|
8f34497cbb3313d8da2f751f65fa16f658ce0ded
|
/task4.py
|
96beac5f035fe5664bc85bc1ab288f22bbe64d39
|
[] |
no_license
|
jc345932/programming
|
8682d1c5fb316b0573b7005f888210567a66bb68
|
5bb812fc2b0519a147c9d047ecee745778198ccb
|
refs/heads/master
| 2020-07-03T19:23:59.544735
| 2016-08-31T06:47:36
| 2016-08-31T06:47:36
| 67,005,787
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 356
|
py
|
def calcItem(input):
initial = int(input("Enter the initial stock:"))
bought = int(input("Enter how many we bought:"))
sold = int(input("Enter how many we sold:"))
total = initial + bought - sold
return total
def main():
totalCost = calcItem()
print("The total cost of the product at the end of the day is ", totalCost)
main()
|
[
"junnao.luo@my.jcu.edu.au"
] |
junnao.luo@my.jcu.edu.au
|
467a37a4c8b531cc5736533e1bfa9642cc7415e2
|
5600178b3efa23159b05fe0621c8d8aa91dcf16c
|
/samples/cupy_test.py
|
2e9f2d940b30bfcb5d6dd4086c6eb4dcc138170d
|
[] |
no_license
|
ZWJ-here/maskrcnn-keypoint3d
|
409cdcbe0d53a10a2121ce5906fb0cc01f3062af
|
48ca3789be6210dfae361d6f6a3323f518a2445d
|
refs/heads/master
| 2022-02-19T01:19:35.310696
| 2019-09-12T06:58:38
| 2019-09-12T06:58:38
| 220,231,299
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
import numpy as np
import cupy as cp
import time
x=np.ones((1024,512,4,4))*1024.
y=np.ones((1024,512,4,1))*512.3254
time1=time.time()
for i in range(20):
z=x*y
print('average time for 20 times cpu:',(time.time()-time1)/20.)
x=cp.ones((1024,512,4,4))*1024.
y=cp.ones((1024,512,4,1))*512.3254
time1=time.time()
for i in range(20):
z=x*y
print('average time for 20 times gpu:',(time.time()-time1)/20.)
|
[
"xixiaoyan@focuschina.com"
] |
xixiaoyan@focuschina.com
|
97b86ce976f5a392e07c9c4f063f6e33694e068a
|
6fdb4eaf5b0e6dbd7db4bf947547541e9aebf110
|
/hardware-testing/hardware_testing/production_qc/gripper_assembly_qc_ot3/test_force.py
|
7da282fc77af1524104d1f53a043b4aa362af884
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
Opentrons/opentrons
|
874321e01149184960eeaeaa31b1d21719a1ceda
|
026b523c8c9e5d45910c490efb89194d72595be9
|
refs/heads/edge
| 2023-09-02T02:51:49.579906
| 2023-08-31T16:02:45
| 2023-08-31T16:02:45
| 38,644,841
| 326
| 174
|
Apache-2.0
| 2023-09-14T21:47:20
| 2015-07-06T20:41:01
|
Python
|
UTF-8
|
Python
| false
| false
| 8,022
|
py
|
"""Test Force."""
from asyncio import sleep
from typing import List, Union, Tuple, Optional
from opentrons.hardware_control.ot3api import OT3API
from opentrons.hardware_control.types import GripperJawState
from hardware_testing.drivers import find_port, list_ports_and_select
from hardware_testing.drivers.mark10 import Mark10, SimMark10
from hardware_testing.data.csv_report import (
CSVReport,
CSVLine,
CSVResult,
CSVLineRepeating,
)
from hardware_testing.data import ui
from hardware_testing.opentrons_api import helpers_ot3
from hardware_testing.opentrons_api.types import Axis, OT3Mount, Point
SLOT_FORCE_GAUGE = 4
GRIP_DUTY_CYCLES: List[int] = [40, 30, 25, 20, 15, 10, 6]
NUM_DUTY_CYCLE_TRIALS = 20
GRIP_FORCES_NEWTON: List[int] = [20, 15, 10, 5]
NUM_NEWTONS_TRIALS = 1
FAILURE_THRESHOLD_PERCENTAGES = [10, 10, 10, 20]
WARMUP_SECONDS = 10
FORCE_GAUGE_TRIAL_SAMPLE_INTERVAL = 0.25 # seconds
FORCE_GAUGE_TRIAL_SAMPLE_COUNT = 20 # 20 samples = 5 seconds @ 4Hz
GAUGE_OFFSET = Point(x=2, y=-42, z=75)
def _get_test_tag(
trial: int,
newtons: Optional[int] = None,
duty_cycle: Optional[int] = None,
) -> str:
if newtons and duty_cycle:
raise ValueError("must measure either force or duty-cycle, not both")
if newtons is None and duty_cycle is None:
raise ValueError("both newtons and duty-cycle are None")
if newtons is not None:
return f"newtons-{newtons}-trial-{trial + 1}"
else:
return f"duty-cycle-{duty_cycle}-trial-{trial + 1}"
def _get_gauge(is_simulating: bool) -> Union[Mark10, SimMark10]:
if is_simulating:
return SimMark10()
else:
try:
port = find_port(*Mark10.vid_pid())
except RuntimeError:
port = list_ports_and_select("Mark10 Force Gauge")
print(f"Setting up force gauge at port: {port}")
return Mark10.create(port=port)
def _get_force_gauge_hover_and_grip_positions(api: OT3API) -> Tuple[Point, Point]:
grip_pos = helpers_ot3.get_slot_calibration_square_position_ot3(SLOT_FORCE_GAUGE)
grip_pos += GAUGE_OFFSET
hover_pos = grip_pos._replace(z=api.get_instrument_max_height(OT3Mount.GRIPPER))
return hover_pos, grip_pos
def build_csv_lines() -> List[Union[CSVLine, CSVLineRepeating]]:
"""Build CSV Lines."""
lines: List[Union[CSVLine, CSVLineRepeating]] = list()
for force in GRIP_FORCES_NEWTON:
for trial in range(NUM_NEWTONS_TRIALS):
tag = _get_test_tag(trial, newtons=force)
force_data_types = [float] * FORCE_GAUGE_TRIAL_SAMPLE_COUNT
lines.append(CSVLine(f"{tag}-data", force_data_types))
lines.append(CSVLine(f"{tag}-average", [float]))
lines.append(CSVLine(f"{tag}-target", [float]))
lines.append(CSVLine(f"{tag}-pass-%", [float]))
lines.append(CSVLine(f"{tag}-result", [CSVResult]))
for duty_cycle in GRIP_DUTY_CYCLES:
for trial in range(NUM_DUTY_CYCLE_TRIALS):
tag = _get_test_tag(trial, duty_cycle=duty_cycle)
force_data_types = [float] * FORCE_GAUGE_TRIAL_SAMPLE_COUNT
lines.append(CSVLine(f"{tag}-data", force_data_types))
lines.append(CSVLine(f"{tag}-average", [float]))
lines.append(CSVLine(f"{tag}-duty-cycle", [float]))
return lines
async def _read_forces(gauge: Union[Mark10, SimMark10]) -> List[float]:
n = list()
for _ in range(FORCE_GAUGE_TRIAL_SAMPLE_COUNT):
force = gauge.read_force()
n.append(force)
if not gauge.is_simulator():
await sleep(FORCE_GAUGE_TRIAL_SAMPLE_INTERVAL)
return n
async def _grip_and_read_forces(
api: OT3API,
gauge: Union[Mark10, SimMark10],
force: Optional[int] = None,
duty: Optional[int] = None,
) -> List[float]:
if not api.is_simulator:
await sleep(2) # let sensor settle
if duty is not None:
await api._grip(duty_cycle=float(duty))
api._gripper_handler.set_jaw_state(GripperJawState.GRIPPING)
else:
assert force is not None
await api.grip(float(force))
if gauge.is_simulator():
if duty is not None:
gauge.set_simulation_force(float(duty) * 0.5) # type: ignore[union-attr]
elif force is not None:
gauge.set_simulation_force(float(force)) # type: ignore[union-attr]
ret_list = await _read_forces(gauge)
await api.ungrip()
return ret_list
async def _setup(api: OT3API) -> Union[Mark10, SimMark10]:
z_ax = Axis.Z_G
g_ax = Axis.G
mount = OT3Mount.GRIPPER
# OPERATOR SETS UP GAUGE
ui.print_header("SETUP FORCE GAUGE")
if not api.is_simulator:
ui.get_user_ready(f"add gauge to slot {SLOT_FORCE_GAUGE}")
ui.get_user_ready("plug gauge into USB port on OT3")
gauge = _get_gauge(api.is_simulator)
gauge.connect()
print("test readings")
ret_list = await _read_forces(gauge)
print(ret_list)
# HOME
print("homing Z and G...")
await api.home([z_ax, g_ax])
# MOVE TO GAUGE
await api.ungrip()
_, target_pos = _get_force_gauge_hover_and_grip_positions(api)
await helpers_ot3.move_to_arched_ot3(api, mount, target_pos + Point(z=15))
if not api.is_simulator:
ui.get_user_ready("please make sure the gauge in the middle of the gripper")
await helpers_ot3.jog_mount_ot3(api, OT3Mount.GRIPPER)
if not api.is_simulator:
ui.get_user_ready("about to grip")
await api.grip(20)
for sec in range(WARMUP_SECONDS):
print(f"warmup ({sec + 1}/{WARMUP_SECONDS})")
if not api.is_simulator:
await sleep(1)
await api.ungrip()
return gauge
async def run_increment(api: OT3API, report: CSVReport, section: str) -> None:
"""Run Increment."""
gauge = await _setup(api)
# LOOP THROUGH DUTY-CYCLES
ui.print_header("MEASURE DUTY-CYCLES")
for duty_cycle in GRIP_DUTY_CYCLES:
# GRIP AND MEASURE FORCE
for trial in range(NUM_DUTY_CYCLE_TRIALS):
print(
f"{duty_cycle}% duty cycle - trial {trial + 1}/{NUM_DUTY_CYCLE_TRIALS}"
)
actual_forces = await _grip_and_read_forces(api, gauge, duty=duty_cycle)
print(actual_forces)
avg_force = sum(actual_forces) / len(actual_forces)
print(f"average = {round(avg_force, 2)} N")
tag = _get_test_tag(trial, duty_cycle=duty_cycle)
report(section, f"{tag}-data", actual_forces)
report(section, f"{tag}-average", [avg_force])
report(section, f"{tag}-duty-cycle", [duty_cycle])
print("done")
await api.retract(OT3Mount.GRIPPER)
async def run(api: OT3API, report: CSVReport, section: str) -> None:
"""Run."""
gauge = await _setup(api)
# LOOP THROUGH FORCES
ui.print_header("MEASURE NEWTONS")
for expected_force, allowed_percent_error in zip(
GRIP_FORCES_NEWTON, FAILURE_THRESHOLD_PERCENTAGES
):
for trial in range(NUM_NEWTONS_TRIALS):
print(f"{expected_force}N - trial {trial + 1}/{NUM_NEWTONS_TRIALS}")
actual_forces = await _grip_and_read_forces(
api, gauge, force=expected_force
)
print(actual_forces)
# base PASS/FAIL on average
avg_force = sum(actual_forces) / len(actual_forces)
print(f"average = {round(avg_force, 2)} N")
error = (avg_force - expected_force) / expected_force
result = CSVResult.from_bool(abs(error) * 100 < allowed_percent_error)
# store all data in CSV
tag = _get_test_tag(trial, newtons=expected_force)
report(section, f"{tag}-data", actual_forces)
report(section, f"{tag}-average", [avg_force])
report(section, f"{tag}-target", [expected_force])
report(section, f"{tag}-pass-%", [allowed_percent_error])
report(section, f"{tag}-result", [result])
print("done")
await api.retract(OT3Mount.GRIPPER)
|
[
"noreply@github.com"
] |
Opentrons.noreply@github.com
|
09a9277d2a0674799865d294850b177d744cea52
|
4ad317f50bd6109584d661bc0fbca5886e637c06
|
/convert_poeditor.py
|
f5b23106c916f939729ff5692920ac26f6eca30a
|
[
"MIT"
] |
permissive
|
CyberSys/POEditor-UE4
|
5119ef458f5e933607efc39a816ae5186099eeda
|
e6f254e92274313baf4e29a24bf4b87155501550
|
refs/heads/master
| 2023-08-14T21:26:54.527166
| 2019-01-13T15:08:42
| 2019-01-13T15:08:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,716
|
py
|
# Takes a POEditor .po file and an Unreal Engine .po file and overwrites the
# latter with translated strings from the former, matching msgctxt with
# msgid.
import os
import pycountry
localization_path = 'Localization/Game/'
poeditor_path = 'POEditor/'
poeditor_files = os.listdir(poeditor_path)
for poe_file in poeditor_files:
file_name = poe_file[17:-3]
file_split = file_name.split("_")
language_name = file_split[0]
if len(file_split) > 1:
variant_name = file_split[1]
# No need to translate English US
if 'English' in poe_file and 'US' in variant_name:
continue
if 'UK' in variant_name:
variant_name = 'GB'
elif 'simplified' in variant_name:
variant_name = 'Hans'
elif 'traditional' in variant_name:
variant_name = 'Hant'
elif 'Moldavian' in language_name:
language_name = 'Romanian'
variant_name = 'MD'
print(language_name + " ({0})".format(variant_name))
variant_name = "-" + variant_name
else:
variant_name = ""
print(language_name)
if 'Greek' in language_name:
language_code = 'el'
else:
pycountry_language = pycountry.languages.get(name=language_name)
try:
language_code = pycountry_language.alpha_2
except AttributeError:
print("Skipping {language_name} because pycountry couldn't "
"get a language code.".format(language_name=language_name))
continue
ue_file = localization_path + language_code + variant_name + "/Game.po"
if not os.path.exists(ue_file):
print("Skipping {language_name} because a UE .po file "
"couldn't be found.".format(language_name=language_name))
continue
# We could probably do something clever like load this file into
# a msgid:msgstr dictionary instead of indexing a list
with open(poeditor_path + poe_file, 'r', encoding='utf-8') as poeditor_file:
poeditor_lines = poeditor_file.read().split('\n')
with open(ue_file, 'r', encoding='utf-8') as game_file:
output_text = []
last_msgctxt = None
for line in game_file:
# Save the latest msgctxt (msgid) for use in a valid msgstr
if line.startswith("msgctxt"):
last_msgctxt = line.split("\"")[1]
# Make sure we've encountered a last_msgctxt yet.
if line.startswith("msgstr") and last_msgctxt:
# Ignore empty msgctxt
if last_msgctxt:
# UE uses , whereas POEditor msgids use . sometimes
msgid = "msgid \"{msgctxt}\""
this_msgid = msgid.format(msgctxt=last_msgctxt)
# Is this string in the POEditor file?
try:
poeditor_id_location = poeditor_lines.index(this_msgid)
except ValueError:
try:
this_msgid = msgid.format(msgctxt=last_msgctxt.replace(",", ".", 1))
poeditor_id_location = poeditor_lines.index(this_msgid)
except:
output_text.append(line)
continue
# msgstr comes right after msgid
msgstr = poeditor_lines[poeditor_id_location+1]
output_text.append(msgstr + "\n")
else:
output_text.append(line)
# Write out to file
with open(ue_file, 'w', encoding='utf-8') as output_file:
for output_line in output_text:
output_file.write(output_line)
|
[
"noreply@github.com"
] |
CyberSys.noreply@github.com
|
70b207e0b366b8b3abb7be390826e6240b2c173d
|
0958cceb81de1c7ee74b0c436b800a1dc54dd48a
|
/wincewebkit/WebKitTools/QueueStatusServer/handlers/nextpatch.py
|
eb88b4ab1dabea8a3f1569a52291250fb9d294ef
|
[] |
no_license
|
datadiode/WinCEWebKit
|
3586fac69ba7ce9efbde42250266ddbc5c920c5e
|
d331d103dbc58406ed610410736b59899d688632
|
refs/heads/master
| 2023-03-15T23:47:30.374484
| 2014-08-14T14:41:13
| 2014-08-14T14:41:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,664
|
py
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from datetime import datetime
from google.appengine.ext import db
from google.appengine.ext import webapp
from model.queues import Queue
class NextPatch(webapp.RequestHandler):
# FIXME: This should probably be a post, or an explict lock_patch
# since GET requests shouldn't really modify the datastore.
def get(self, queue_name):
queue = Queue.queue_with_name(queue_name)
if not queue:
self.error(404)
return
# FIXME: Patch assignment should probably move into Queue.
patch_id = db.run_in_transaction(self._assign_patch, queue.active_work_items().key(), queue.work_items().item_ids)
if not patch_id:
self.error(404)
return
self.response.out.write(patch_id)
@staticmethod
def _assign_patch(key, work_item_ids):
now = datetime.now()
active_work_items = db.get(key)
active_work_items.deactivate_expired(now)
next_item = active_work_items.next_item(work_item_ids, now)
active_work_items.put()
return next_item
|
[
"achellies@163.com"
] |
achellies@163.com
|
79c4b5823456f6421c9da6d241e0d6e58fa762d4
|
92c67bd1547dacee271f8dacc469dd01ab5f68c4
|
/Robot/Server/server.py
|
69e921407b81c5132bf100011d21b56ec22abc86
|
[] |
no_license
|
yashkp/raspi
|
f31dc225888db9f70afdcc05a5b58a43694daada
|
3fd47dc186574aa4928c15a0b5546ddad8c5e687
|
refs/heads/master
| 2020-03-29T06:55:18.007666
| 2018-09-20T18:10:15
| 2018-09-20T18:10:15
| 149,646,299
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,178
|
py
|
import cv2
from flask import Flask, render_template, Response,request, jsonify
#
#cam.set(3, 480)
#cam.set(4, 640)
class VideoCamera(object):
def __init__(self):
global cam
self.cam = self.camera_obj()
#global cam
def __del__(self):
self.cam.release()
#cam.release()
def camera_obj(self):
global cam, flag
if flag is False:
cam = cv2.VideoCapture(0)
return cam
else:
return cam
def get_frame(self):
ret, image = self.cam.read()
ret, jpeg = cv2.imencode('.jpg', image)
return jpeg.tostring()
cam = None
flag = False
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
def gen(camera):
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@app.route('/video_feed')
def video_feed():
return Response(gen(VideoCamera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True, threaded=True)
|
[
"yaswanthkp14@gmail.com"
] |
yaswanthkp14@gmail.com
|
066619f24d8a7d6ed974495b5c15395e14e6a26d
|
8e98a1d47277f3a5560f059546a4d133b2b75b61
|
/src/main.py
|
206b1ddeeeaf051d672e7be79e4664edcca6f99f
|
[] |
no_license
|
BenPalmer1983/activity_v2_archived
|
1a24759dfee4b34b31b75a21d2e3873779cea96a
|
8b0a6d0e1c931e0eab410064511b262c7e878b98
|
refs/heads/master
| 2023-06-17T17:16:08.385492
| 2021-07-08T14:45:19
| 2021-07-08T14:45:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 669
|
py
|
################################################################
# Main Program
#
#
#
#
################################################################
import os
import time
import datetime
import re
import sys
import shutil
import numpy
import json
import zlib
from globals import g
from activity import activity
class main():
def start():
# RECORD START TIME
g.times['start'] = time.time()
now = datetime.datetime.now()
date_time = now.strftime("%m/%d/%Y, %H:%M:%S")
print(date_time)
activity.run()
main.end()
def end():
# CLOSE LOG
g.times['end'] = time.time()
exit()
# Run
main.start()
|
[
"benpalmer1983@gmail.com"
] |
benpalmer1983@gmail.com
|
76dd8c450aa549e0fa28ca41ddc2529e4f4bd206
|
202626cbfdb71c1153f2f6f50a41ce3a6e2fbc4c
|
/functions/requestWeb.py
|
518fc69bcbdaa5f8dfd3014dfef2b6d2a85e65f4
|
[] |
no_license
|
chhroger/YTDownload
|
6927bcba11840f57b941f88e3812a1cd5b2178cb
|
b1a26db2de3fad9ea53b4c0bc67ea4f7f7e02569
|
refs/heads/master
| 2021-01-01T18:16:37.840816
| 2017-07-25T11:36:28
| 2017-07-25T11:36:28
| 98,294,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 790
|
py
|
import re
import requests
from bs4 import BeautifulSoup
request = requests.get("https://www.youtube.com/results?search_query=Bloodborne")
content = request.content
soup = BeautifulSoup(content, "html.parser")
youtube_domain = "https://www.youtube.com{}"
for element in soup.find_all('a', {"rel": "spf-prefetch"}):
video_link = element.get("href")
video_id = video_link.split("=")[1]
all_images = soup.find_all('img', {"height": True, "alt": True, "data-ytimg": True, "onload": True})
image = re.findall("https://i.ytimg.com/vi/{}/[\S]+".format(video_id), str(all_images))
image_url = str(image).strip("[\"\']")
image_url = image_url.replace("&","&")
print(image_url)
for element in soup.find_all('span', {"class": "video-time-overlay"}):
print(element)
|
[
"chhroger@gmail.com"
] |
chhroger@gmail.com
|
acef582b2caf8dbea4fddacdf03460be937bff76
|
9e9c08e0b5cee7dbea8579c52a2020a94e953f78
|
/accounts/signals.py
|
f4b55c3e64ca3211135b95c6574d0965bc3abfb3
|
[] |
no_license
|
vedanti06/Customer_Management
|
e8393e825baeb31b4f67e84e4dfb40640944d263
|
15f67006da1b659bc3e4591cdeb635269533adc6
|
refs/heads/master
| 2023-08-24T05:53:13.392577
| 2021-11-05T17:10:10
| 2021-11-05T17:10:10
| 424,994,525
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 530
|
py
|
from django.db.models.signals import post_save
from django.contrib.auth.models import User, Group
from .models import *
def customer_profile(sender,instance,created,**kwargs):
if created:
group = Group.objects.get ( name='customer' )
instance.groups.add ( group )
# Added username after video because of error returning customer name if not added
Customer.objects.create (
user=instance,
name=instance.username,
)
post_save.connect(customer_profile,sender=User)
|
[
"vedantidantwala@gmail.com"
] |
vedantidantwala@gmail.com
|
d73d5bc205def0ba8182269cd5a7039b8fe84637
|
0a33cc0ebb67c51cc38750f0f04c3e6c088e3b1a
|
/homeassistant/components/input_button/__init__.py
|
3182e36d5fc7c38e4c85efaf5f2fc99cfed0bb94
|
[
"Apache-2.0"
] |
permissive
|
robert-alfaro/home-assistant
|
e9bb08ad22a167ed226fb3de8f5b36acfc393548
|
4a53121b58b77a318f08c64ad2c5372a16b800e0
|
refs/heads/dev
| 2023-02-28T06:46:23.217246
| 2022-04-26T17:30:08
| 2022-04-26T17:30:08
| 115,894,662
| 4
| 0
|
Apache-2.0
| 2023-02-22T06:21:08
| 2018-01-01T02:00:35
|
Python
|
UTF-8
|
Python
| false
| false
| 5,618
|
py
|
"""Support to keep track of user controlled buttons which can be used in automations."""
from __future__ import annotations
import logging
from typing import cast
import voluptuous as vol
from homeassistant.components.button import SERVICE_PRESS, ButtonEntity
from homeassistant.const import (
ATTR_EDITABLE,
CONF_ICON,
CONF_ID,
CONF_NAME,
SERVICE_RELOAD,
)
from homeassistant.core import HomeAssistant, ServiceCall, callback
from homeassistant.helpers import collection
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.integration_platform import (
async_process_integration_platform_for_component,
)
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.helpers.service
from homeassistant.helpers.storage import Store
from homeassistant.helpers.typing import ConfigType
DOMAIN = "input_button"
_LOGGER = logging.getLogger(__name__)
CREATE_FIELDS = {
vol.Required(CONF_NAME): vol.All(str, vol.Length(min=1)),
vol.Optional(CONF_ICON): cv.icon,
}
UPDATE_FIELDS = {
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ICON): cv.icon,
}
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: cv.schema_with_slug_keys(vol.Any(UPDATE_FIELDS, None))},
extra=vol.ALLOW_EXTRA,
)
RELOAD_SERVICE_SCHEMA = vol.Schema({})
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
class InputButtonStorageCollection(collection.StorageCollection):
"""Input button collection stored in storage."""
CREATE_SCHEMA = vol.Schema(CREATE_FIELDS)
UPDATE_SCHEMA = vol.Schema(UPDATE_FIELDS)
async def _process_create_data(self, data: dict) -> vol.Schema:
"""Validate the config is valid."""
return self.CREATE_SCHEMA(data)
@callback
def _get_suggested_id(self, info: dict) -> str:
"""Suggest an ID based on the config."""
return cast(str, info[CONF_NAME])
async def _update_data(self, data: dict, update_data: dict) -> dict:
"""Return a new updated data object."""
update_data = self.UPDATE_SCHEMA(update_data)
return {**data, **update_data}
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up an input button."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
# Process integration platforms right away since
# we will create entities before firing EVENT_COMPONENT_LOADED
await async_process_integration_platform_for_component(hass, DOMAIN)
id_manager = collection.IDManager()
yaml_collection = collection.YamlCollection(
logging.getLogger(f"{__name__}.yaml_collection"), id_manager
)
collection.sync_entity_lifecycle(
hass, DOMAIN, DOMAIN, component, yaml_collection, InputButton.from_yaml
)
storage_collection = InputButtonStorageCollection(
Store(hass, STORAGE_VERSION, STORAGE_KEY),
logging.getLogger(f"{__name__}.storage_collection"),
id_manager,
)
collection.sync_entity_lifecycle(
hass, DOMAIN, DOMAIN, component, storage_collection, InputButton
)
await yaml_collection.async_load(
[{CONF_ID: id_, **(conf or {})} for id_, conf in config.get(DOMAIN, {}).items()]
)
await storage_collection.async_load()
collection.StorageCollectionWebsocket(
storage_collection, DOMAIN, DOMAIN, CREATE_FIELDS, UPDATE_FIELDS
).async_setup(hass)
async def reload_service_handler(service_call: ServiceCall) -> None:
"""Remove all input buttons and load new ones from config."""
conf = await component.async_prepare_reload(skip_reset=True)
if conf is None:
return
await yaml_collection.async_load(
[
{CONF_ID: id_, **(conf or {})}
for id_, conf in conf.get(DOMAIN, {}).items()
]
)
homeassistant.helpers.service.async_register_admin_service(
hass,
DOMAIN,
SERVICE_RELOAD,
reload_service_handler,
schema=RELOAD_SERVICE_SCHEMA,
)
component.async_register_entity_service(SERVICE_PRESS, {}, "_async_press_action")
return True
class InputButton(ButtonEntity, RestoreEntity):
"""Representation of a button."""
_attr_should_poll = False
def __init__(self, config: ConfigType) -> None:
"""Initialize a button."""
self._config = config
self.editable = True
self._attr_unique_id = config[CONF_ID]
@classmethod
def from_yaml(cls, config: ConfigType) -> ButtonEntity:
"""Return entity instance initialized from yaml storage."""
button = cls(config)
button.entity_id = f"{DOMAIN}.{config[CONF_ID]}"
button.editable = False
return button
@property
def name(self) -> str | None:
"""Return name of the button."""
return self._config.get(CONF_NAME)
@property
def icon(self) -> str | None:
"""Return the icon to be used for this entity."""
return self._config.get(CONF_ICON)
@property
def extra_state_attributes(self) -> dict[str, bool]:
"""Return the state attributes of the entity."""
return {ATTR_EDITABLE: self.editable}
async def async_press(self) -> None:
"""Press the button.
Left emtpty intentionally.
The input button itself doesn't trigger anything.
"""
return None
async def async_update_config(self, config: ConfigType) -> None:
"""Handle when the config is updated."""
self._config = config
self.async_write_ha_state()
|
[
"noreply@github.com"
] |
robert-alfaro.noreply@github.com
|
58cdd4b09be2ddfece7ebee800aa0251df29bcf7
|
94d9c7c2bdeda401dbda05d8d368db2c23d19e45
|
/dev/workAutomation/excel/taxfree.py
|
7a8ddb6526a1a32a1ba151d4c33f69b695a0f23c
|
[] |
no_license
|
gusek93/Django
|
2ed6ac3f25fcc43f11484ed599c8482a6cb5f7ab
|
21d9ac00b7c2458bc61bc201ac13fb96d7e395d6
|
refs/heads/main
| 2023-08-30T14:50:33.863928
| 2021-10-24T19:37:32
| 2021-10-24T19:37:32
| 410,279,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,916
|
py
|
import os
import sys
import glob
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)) + '/app')))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'workAutomation.settings')
import django
django.setup()
import pandas as pd
import warnings
# 엑셀 읽어오기
file_path = glob.glob('./media/taxfreeSubtraction/*')
list_excel = [file for file in file_path if file.endswith(".xlsx")]
def taxfree():
excelpath = list_excel[0]
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
df = pd.read_excel(excelpath, engine="openpyxl",usecols="A,B,E,F,I")
list_from_df = df.values.tolist()
data = df.dropna(how='all')
num_list = []
name_list = []
sal_list = []
car_list = []
run_list = []
salcaculater = []
salcaculater2 = []
i = 0
while i < len(data):
num_list.append(list_from_df[i][0])
name_list.append(list_from_df[i][1])
car_list.append(list_from_df[i][2])
run_list.append(list_from_df[i][3])
salcaculater.append(list_from_df[i][2] - list_from_df[i][3])
i += 3
j = 2
x = 0
while j< len(data):
sal_list.append(list_from_df[int(j)][4])
salcaculater2.append(list_from_df[int(j)][4] - salcaculater[x])
j += 3
x +=1
num_list_data = pd.DataFrame(num_list)
name_list_data = pd.DataFrame(name_list)
car_list_data = pd.DataFrame(car_list)
run_list_data = pd.DataFrame(run_list)
sal_list_data = pd.DataFrame(sal_list)
salcaculater2 = pd.DataFrame(salcaculater2)
result = pd.concat([num_list_data,name_list_data,car_list_data,run_list_data,sal_list_data,salcaculater2], axis=1)
#df.to_excel('../media/result/계산성공.xlsx')
result.to_excel('./media/result/taxfree/빼기성공.xlsx')
#print(num_list,name_list,car_list,run_list)
#taxfree()
|
[
"gusek93@naver.com"
] |
gusek93@naver.com
|
0b9cf60b77ba7abf732228b31c930de8ddfbff86
|
8ebc932406c45a1c9d7eb46ff9d3b0826ad6ca19
|
/weekone/day2/mrmrs.py
|
054107cab9b2352444120a1aa6893ce62e6c0f8b
|
[] |
no_license
|
zupercat/python-projects
|
00c6080e8a1a49066bd8a6de1477711178e8b7f6
|
ba43557df770d63645d944b540b0b914071b151e
|
refs/heads/master
| 2020-07-03T10:17:23.221376
| 2019-08-12T07:01:11
| 2019-08-12T07:01:11
| 201,876,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 88
|
py
|
name_one = input("Please state your name ")
name = name_one[4:10000]
print("hello",name)
|
[
"alicia.y.wen@icloud.com"
] |
alicia.y.wen@icloud.com
|
94cc4fcea23e4fa5784cb9e54032e743b57064b5
|
094531188a783de87db45d68a3fcc4e332f2e967
|
/importFromFlam/9_16.py
|
48a9660d0d8f2e6e249a5b3517d79df792d8d667
|
[] |
no_license
|
nortongeo/wui-fire
|
641adf89ac78719e0094502b8a6e57ce809c59b2
|
b0ce2c9da505466dfe90b88197192dc971dd7cba
|
refs/heads/master
| 2022-05-01T07:40:05.681341
| 2017-10-22T18:34:54
| 2017-10-22T18:34:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,948
|
py
|
#-------------------------------------------------------------------------------
# Name: generateTraining Tool
# Purpose: Takes raw naip and lidar data, uses thresholds to classify data
# and generates training samples by identifying tight thresholds.
#
# Steps:
# - Segment heights into unique objects using SMS
# - Calculate and join mean height to objects with Zonal Stastics
# - Separate ground and nonground features and mask naip
# Author: Peter Norton
#
# Created: 05/25/2017
# Updated: 06/03/2017
# Copyright: (c) Peter Norton and Matt Ashenfarb 2017
#-------------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Import modules
import arcpy
import os
import sys
from arcpy import env
from arcpy.sa import *
arcpy.env.overwriteOutput = True
from tableJoin import one_to_one_join
#-----------------------------------------------
# Set scratch workspace and environment settings
scriptpath = sys.path[0]
toolpath = os.path.dirname(scriptpath)
scratchws = os.path.join(toolpath, "Scratch")
scratchgdb = os.path.join(scratchws, "Scratch.gdb")
#-----------------------------------------------
# Set I/O Paths
outputs = os.path.join(toolpath, "Outputs")
inputs = os.path.join(toolpath, "Inputs")
#-----------------------------------------------
# Inputs
#-----------------------------------------------
risk_fc = os.path.join(inputs, "classified.shp")
naip = os.path.join(inputs, "naip.tif")
projection = "UTMZ10"
#-----------------------------------------------
# Outputs
#-----------------------------------------------
# Alert
count = 1
def generateMessage(text):
global count
arcpy.AddMessage("Step " + str(count) + ": " +text),
count += 1
#-----------------------------------------------
#-----------------------------------------------
# Processing
#-----------------------------------------------
#-----------------------------------------------
fields = ["fire_line_intensity"]#, "flame_len", "rate_of_spread"]
if projection == "UTMZ10":
scale_height = 0.3048
projection = "PROJCS['NAD_1983_UTM_Zone_10N',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',-123.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]]"
elif projection == "UTMZ11":
scale_height = 0.3048
projection = "PROJCS['NAD_1983_UTM_Zone_11N',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',-117.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]]"
elif projection == "SPIII":
scale_height = 1
projection = "PROJCS['NAD_1983_StatePlane_California_III_FIPS_0403_Feet',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Lambert_Conformal_Conic'],PARAMETER['False_Easting',6561666.666666666],PARAMETER['False_Northing',1640416.666666667],PARAMETER['Central_Meridian',-120.5],PARAMETER['Standard_Parallel_1',37.06666666666667],PARAMETER['Standard_Parallel_2',38.43333333333333],PARAMETER['Latitude_Of_Origin',36.5],UNIT['Foot_US',0.3048006096012192]]"
elif projection == "SPIV":
scale_height = 1
projection = "PROJCS['NAD_1983_StatePlane_California_VI_FIPS_0406_Feet',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Lambert_Conformal_Conic'],PARAMETER['False_Easting',6561666.666666666],PARAMETER['False_Northing',1640416.666666667],PARAMETER['Central_Meridian',-116.25],PARAMETER['Standard_Parallel_1',32.78333333333333],PARAMETER['Standard_Parallel_2',33.88333333333333],PARAMETER['Latitude_Of_Origin',32.16666666666666],UNIT['Foot_US',0.3048006096012192]]"
arcpy.env.snapRaster = naip
cell_size = str(arcpy.GetRasterProperties_management(naip, "CELLSIZEX", ""))
naip_cell_size = cell_size +" " +cell_size
for field in fields:
#------------------------------
#-----------------------------------------------
text = "Calculating and joining max " + field + " to each object."
generateMessage(text)
#-----------------------------------------------
in_ascii_file = os.path.join(inputs, field + ".asc")
data_type = "INTEGER"
inValueRaster = os.path.join(scratchgdb, field)
raw_raster = os.path.join(outputs, field + "_raw.tif")
shift = os.path.join(outputs, field+".tif")
arcpy.ASCIIToRaster_conversion(in_ascii_file, raw_raster, data_type)
arcpy.DefineProjection_management(raw_raster, projection)
arcpy.Resample_management(raw_raster, inValueRaster, naip_cell_size, "")
arcpy.Shift_management(inValueRaster, shift, -(int(cell_size)), 0, naip)
zoneField = "JOIN"
outTable = os.path.join(scratchgdb, "zonal_"+field)
arcpy.CalculateField_management(risk_fc, zoneField, "[FID]")
z_table = ZonalStatisticsAsTable(risk_fc, zoneField, shift, outTable, "NODATA", "MAXIMUM")
if field == "fire_line_intensity":
field = "fli"
elif field == "rate_of_spread":
field = "ros"
elif field == "flame_len":
field = "fl"
#-----------------
arcpy.AddField_management(outTable, field, "INTEGER")
arcpy.CalculateField_management(outTable, field, "int([MAX])")
one_to_one_join(risk_fc, outTable, field, "INTEGER")
#-----------------------------------------------
#-----------------------------------------------
text = "All burn severity joins are complete."
generateMessage(text)
#-----------------------------------------------
# # Fire Simulation
# output_file = os.path.join(toolpath, "output")
# toagdb = os.path.join(FlamMap_output, "toa.gdb")
# toa = os.path.join(scratchgdb, "toa")
# temp = os.path.join(scratchgdb, "temp")
# gifpath = os.path.join(toolpath, "fire_gif")
# mxd_path = os.path.join(toolpath,"temp_mxd.mxd")
# bndpath = os.path.join(scratchgdb, "bnd")
# basemap = os.path.join(scratchgdb, "house_buff_landscape_dis")
# symbology_bnd = os.path.join(toolpath, "bnd_template.lyr")
# symbology_layer = os.path.join(toolpath, "fire_template.lyr")
# symbology_baselayer = os.path.join(toolpath, "house_buff_landscape_dis.lyr")
# spatial_ref = os.path.join(toolpath, "bnd.prj")
# # convert ascii to polygon and add dissolve field
# toa_ascii = os.path.join(outputs, "hb_toa.asc")
# toa_raster = os.path.join(output, "hb_toa_raster.tif")
# arcpy.ASCIIToRaster_conversion(toa_ascii, toa_raster, "INTEGER")
# arcpy.AddMessage("...making raster...")
# arcpy.DefineProjection_management(toa_raster, "PROJCS['NAD_1983_StatePlane_California_III_FIPS_0403_Feet',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Lambert_Conformal_Conic'],PARAMETER['False_Easting',6561666.666666666],PARAMETER['False_Northing',1640416.666666667],PARAMETER['Central_Meridian',-120.5],PARAMETER['Standard_Parallel_1',37.06666666666667],PARAMETER['Standard_Parallel_2',38.43333333333333],PARAMETER['Latitude_Of_Origin',36.5],UNIT['Foot_US',0.3048006096012192]]")
# arcpy.AddMessage("...making polygon...")
# arcpy.RasterToPolygon_conversion(toa_raster, toa)
# arcpy.AddField_management(toa, "toa", "INTEGER")
# arcpy.CalculateField_management(toa, "toa", 1)
# cursor = arcpy.da.SearchCursor(toa, "gridcode", "")
# maximum = 0
# for row in cursor:
# val = row[0]
# if maximum == 0:
# maximum = val
# elif val > maximum:
# maximum = val
# arcpy.AddMessage("...separating toa...")
# #creating backgdrop
# mxd = arcpy.mapping.MapDocument(mxd_path)
# df = arcpy.mapping.ListDataFrames(mxd)[0]
# bnd = arcpy.mapping.Layer(bndpath)
# base_layer = arcpy.mapping.Layer(basemap)
# arcpy.ApplySymbologyFromLayer_management(base_layer,symbology_baselayer)
# range_lst = []
# for i in range(maximum):
# if i % 100 == 0:
# range_lst.append(i)
# lwr_bnd = range_lst.pop(0)
# range_len = len(range_lst)
# range_len = len(range_lst)
# output_burn = ""
# count = 1
# gif_list = []
# merge = ""
# for i in range_lst:
# upr_bnd = i
# out_name = str(upr_bnd)
# burn = os.path.join(toagdb, "hb_toa_" + out_name)
# where_clause = "GRIDCODE > " + str(lwr_bnd) + " AND GRIDCODE <=" + str(upr_bnd)
# arcpy.Select_analysis(toa, temp, where_clause)
# arcpy.Dissolve_management(temp, burn, "toa", "", "", "")
# # lwr_bnd = upr_bnd
# # if merge == "":
# output_burn = burn
# # else:
# # output_burn = os.path.join(scratchgdb, "b_" + str(count))
# # arcpy.Merge_management([burn, merge], output_burn)
# # merge = output_burn
# arcpy.AddMessage("...making .PNG...")
# # Make png
# # png_name = "hb_"+out_name + ".png"
# # output_png = os.path.join(gifpath, png_name)
# # arcpy.mapping.AddLayer(df, base_layer)
# # layer = arcpy.mapping.Layer(output_burn)
# # arcpy.ApplySymbologyFromLayer_management(layer,symbology_layer)
# # arcpy.mapping.AddLayer(df, layer)
# # arcpy.ApplySymbologyFromLayer_management(bnd,symbology_bnd)
# # arcpy.mapping.AddLayer(df, bnd)
# # df.spatialReference = arcpy.SpatialReference(spatial_ref)
# # df.extent = bnd.getExtent()
# # arcpy.RefreshActiveView()
# # arcpy.RefreshTOC()
# # arcpy.mapping.ExportToPNG(mxd, output_png)
# # if count > 2:
# # scrap = os.path.join(scratchgdb, "b_" + str(i-1))
# # arcpy.Delete_management(scrap)
# # arcpy.AddMessage("{0} of {1} developed.".format(count, range_len))
# # count += 1
|
[
"p.norton5@berkeley.edu"
] |
p.norton5@berkeley.edu
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.