blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
34b6d2825eff4be4f5a6eeea5fb44f5a8c0c54d4
|
69bd4cbf6660bb75131b89b1faa2668dfdd55da5
|
/7. Imagenes/src/imagenes_TEST.py
|
0e416026a0228381d7080134de6b67f5be8b09b6
|
[] |
no_license
|
anatitanic/proyectos-laboratorio
|
8ed50c524ecb9a228cbee37555d2f9897f56c676
|
22296bb9ef593f3641ec33858e5a73fb27f4257a
|
refs/heads/master
| 2021-10-09T13:09:38.614674
| 2018-12-28T16:20:06
| 2018-12-28T16:20:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,101
|
py
|
# -*- coding: utf-8 -*-
from imagenes import *
################################################################
# Funciones de test
################################################################
def test_muestra_imagen(imagen):
muestra_imagen(imagen)
def test_guarda_imagen(imagen):
guarda_imagen('../img/salida.jpeg', imagen)
def test_calcula_dimensiones(imagen):
filas, columnas = calcula_dimensiones(imagen)
print("Las dimensiones de la imagen son:")
print(" - Filas:", filas)
print(" - Columnas:", columnas)
def test_calcula_intensidades_medias(imagen):
rojo, verde, azul = calcula_intensidades_medias(imagen)
print("Las intensidades medias de la imagen son:")
print(" - Rojo:", rojo)
print(" - Verde:", verde)
print(" - Azul:", azul)
def test_reflejo_vertical(imagen):
reflejo = reflejo_vertical(imagen)
muestra_imagen(reflejo)
def test_reflejo_horizontal(imagen):
reflejo = reflejo_horizontal(imagen)
muestra_imagen(reflejo)
def test_rotacion(imagen):
rotada = rotacion(imagen)
muestra_imagen(rotada)
def test_filtro_color(imagen):
solo_azul_rojo = filtro_color(imagen, ['B', 'R'])
muestra_imagen(solo_azul_rojo)
def test_escala_grises(imagen):
grises = escala_grises(imagen)
muestra_imagen(grises, cmap='gray')
guarda_imagen('../img/grises.jpeg', grises, cmap='gray')
def test_blanco_negro(imagen):
imagen_bn = blanco_negro(imagen)
muestra_imagen(imagen_bn, cmap='gray')
################################################################
# Programa principal
################################################################
imagen = lee_imagen('../img/gibraltar.jpeg')
#test_muestra_imagen(imagen)
#test_guarda_imagen(imagen)
#test_calcula_dimensiones(imagen)
#test_calcula_intensidades_medias(imagen)
#test_reflejo_vertical(imagen)
#test_reflejo_horizontal(imagen)
#test_rotacion(imagen)
#test_filtro_color(imagen)
#test_escala_grises(imagen)
#test_blanco_negro(imagen)
|
[
"noreply@github.com"
] |
noreply@github.com
|
f1b17665c3b46dd3f018e50c818700cf0e574a2f
|
73d361e2cd52cc647d39660e1f7cf31f0fea6b0f
|
/Regression/SimpleLinearRegression/SalaryPrediction.py
|
06e09ef8db3315144bf9de01a8ac9e5ad50460f3
|
[] |
no_license
|
Aniruddha10/UdemyPythonML
|
0d1ac434bcb2c8cd5bb6124c4648b2b16126449c
|
3d33de775d72e51ee6e47483882c10d9c3f2d43c
|
refs/heads/master
| 2022-11-24T17:05:42.777851
| 2020-07-25T16:06:13
| 2020-07-25T16:06:13
| 282,477,910
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,381
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 15 19:19:14 2020
@author: chakanc
"""
from sklearn import linear_model
import matplotlib.pyplot as mp
import numpy as np
#regressor = ""
#train SLR model on Training set
def trainModel(X_train, y_train):
#print(X_train)
#print(y_train)
regressor = linear_model.LinearRegression()
regressor.fit(X_train, y_train)
return regressor
def showTheta(regressor):
print('Coefficient {} '.format(regressor.Coefficient))
#Predict test Set
def predictTest(regressor, X_test):
y_test = regressor.predict(X_test)
np.set_printoptions(precision=2)
return y_test
def predictMultiTest(regressor, X_test, y_test):
y_pred = regressor.predict(X_test)
np.set_printoptions(precision=2)
y_pred_array = y_pred.reshape(len(y_pred), 1)
y_test_nparray = np.array(y_test)
y_test_array = y_test_nparray.reshape(len(y_test_nparray),1)
print(np.concatenate((y_pred_array, y_test_array), 1))
return y_test
#Visualize training set results
def drawTrainSet(X_train, y_train):
mp.scatter(X_train, y_train, color='black')
mp.plot(X_train, y_train, color = 'blue', linewidth=3)
#Visualize Test set results
def drawTestSet(X_test, y_test):
mp.scatter(X_test, y_test, color='black')
#mp.plot(X_test, y_test, color = 'blue', linewidth=1)
mp.show()
|
[
"chakanc@MFCGD.COM"
] |
chakanc@MFCGD.COM
|
03d32acc40385f42ae5c0d2d04765bfd79f49c6e
|
c71eefb18081ffa79ed63aa1a0d762ec49948cb8
|
/blog/migrations/0004_auto_20201110_1314.py
|
53bf3195b4127687b9ede056d84c5fd21aa614fd
|
[] |
no_license
|
gitjeet/Blog_Site_Using_Django
|
6532cdf25f0f7db941deb3d905c2bd83d2fcb19f
|
bf69c81d4cc1c5004827905d42b7192128d92b56
|
refs/heads/main
| 2023-01-29T22:46:58.957503
| 2020-12-03T17:55:54
| 2020-12-03T17:55:54
| 318,273,824
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
# Generated by Django 3.1.2 on 2020-11-10 07:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_auto_20201110_1019'),
]
operations = [
migrations.RenameField(
model_name='comment',
old_name='create_date',
new_name='created_date',
),
migrations.RenameField(
model_name='post',
old_name='create_date',
new_name='created_date',
),
]
|
[
"vayuabhijeet@gmail.com"
] |
vayuabhijeet@gmail.com
|
a1175788b89b96596e5c65524f893947f9cd6f68
|
59a27d1244c372d72e2124e634330a62a40d504b
|
/aoc_21/01.py
|
f6ccdcdc71177bdf6c719dba89689ecaf96ef9d1
|
[] |
no_license
|
rliffredo/advent-of-code
|
2037813d6765a2c650c240190267c944e4d5148d
|
3dccb51e252d0e0cc2f627476db69a38be7686de
|
refs/heads/master
| 2022-12-22T05:02:14.708180
| 2022-12-15T08:44:58
| 2022-12-15T08:44:58
| 160,335,685
| 0
| 0
| null | 2021-05-13T21:07:08
| 2018-12-04T09:53:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,037
|
py
|
from itertools import tee
from common import read_data, pairwise
def parse_data():
raw_numbers = read_data("01", True)
return [int(n) for n in raw_numbers]
def part_1(print_result: bool = True) -> int:
depths = parse_data()
measurement_pairs = list(pairwise(depths))
increased_pairs = [p for p in measurement_pairs if p[1] > p[0]]
increased = len(increased_pairs)
return increased
def thricewise(iterable):
"""
s -> (s0,s1,s2), (s1,s2), (s2, s3), ...
"""
a, b, c = tee(iterable, 3)
next(b, None)
next(c, None)
next(c, None)
return zip(a, b, c)
def part_2(print_result: bool = True) -> int:
depths = parse_data()
sliding_windows = list(sum(t) for t in thricewise(depths))
measurement_pairs = list(pairwise(sliding_windows))
increased_pairs = [p for p in measurement_pairs if p[1] > p[0]]
increased = len(increased_pairs)
return increased
SOLUTION_1 = 1400
SOLUTION_2 = 1429
if __name__ == "__main__":
print(part_1())
print(part_2())
|
[
"rliffredo@outlook.com"
] |
rliffredo@outlook.com
|
8d8ddb865c6a12401cc24112051255881181248e
|
f4f5d98101db7baf9703be077615383b831c35d8
|
/setup.py
|
f00a4b6116f81b93954694c531ecc2ff819e8e74
|
[
"MIT"
] |
permissive
|
TrendingTechnology/PyYouTube-1
|
23099fd1b825f226cabf2e0f50112e1b3f53346b
|
774213412210ab03adf11eb8b38906b0f3de5ee6
|
refs/heads/main
| 2023-08-17T13:50:03.035784
| 2021-09-15T09:11:31
| 2021-09-15T09:11:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 858
|
py
|
import pathlib
import setuptools
def read(file: str) -> list:
with open(file, encoding="utf-8") as r:
return [i.strip() for i in r]
file = pathlib.Path(__file__).parent
README = (file / "README.md").read_text()
setuptools.setup(
name='PyYouTube',
version="1.0.7",
author="mrlokaman",
author_email="ln0technical@gmail.com",
long_description = README,
long_description_content_type = "text/markdown",
description="Python library Get YouTube Video Data",
license="MIT",
url="https://github.com/lntechnical2/PyYouTube",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
install_requires = read("requirements.txt"),
python_requires=">=3.6"
)
|
[
"noreply@github.com"
] |
noreply@github.com
|
439bf310097453fbbe84be8f94fdecf17a33b4a2
|
701dc6b5c2ea7af5f94729dcc4ca56a8f8c8477e
|
/settings/wsgi.py
|
67d3dd056ee37db4b3874cb1f6f0e8630246dcd0
|
[] |
no_license
|
GuiMend/delivery-backend
|
f00521920b720967b0bdc9cb9dd9e574424d74eb
|
56fba4f8fe400d18b0f2813b530cb916488b42ea
|
refs/heads/master
| 2023-04-13T16:36:35.636676
| 2021-04-27T14:46:34
| 2021-04-27T14:46:34
| 361,578,794
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
"""
topfood-backend WSGI Configuration
"""
###
# Libraries
###
import os
from django.core.wsgi import get_wsgi_application
###
# Main Configuration
###
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.settings")
application = get_wsgi_application()
|
[
"guigmen@gmail.com"
] |
guigmen@gmail.com
|
8b41ac7a5a4163138fe8b9a9962e83c1799151aa
|
dcd616927e4d0a6748a623e916e1c5ba6baaf143
|
/h2o-py/tests/testdir_generic_model/pyunit_generic_model_mojo_irf.py
|
890b9f27a2be891431827653afefdaaeb49fcf2e
|
[
"Apache-2.0"
] |
permissive
|
kaistha23/h2o-3
|
526340a58d04d440f14e4e0e2847b6a1a95bec57
|
1f31e84c68c0cda3b3dde0d3112c0ff3cac2270c
|
refs/heads/master
| 2020-05-09T10:07:00.356419
| 2019-04-12T04:14:18
| 2019-04-12T04:14:18
| 181,020,702
| 1
| 0
|
Apache-2.0
| 2019-04-12T14:08:44
| 2019-04-12T14:08:44
| null |
UTF-8
|
Python
| false
| false
| 1,322
|
py
|
import h2o
import tempfile
import os
from h2o.estimators import H2OIsolationForestEstimator, H2OGenericEstimator
from tests import pyunit_utils
def mojo_model_irf_test():
# GLM
airlines = h2o.import_file(path=pyunit_utils.locate("smalldata/testng/airlines_train.csv"))
irf = H2OIsolationForestEstimator(ntrees=1)
irf.train(x = ["Origin", "Dest"], y = "Distance", training_frame=airlines)
original_model_filename = tempfile.mkdtemp()
original_model_filename = irf.download_mojo(original_model_filename)
model = H2OGenericEstimator.from_file(original_model_filename)
assert model is not None
predictions = model.predict(airlines)
assert predictions is not None
assert predictions.nrows == 24421
assert model._model_json["output"]["variable_importances"] is None
assert model._model_json["output"]["model_summary"] is not None
assert len(model._model_json["output"]["model_summary"]._cell_values) > 0
generic_mojo_filename = tempfile.mkdtemp("zip", "genericMojo");
generic_mojo_filename = model.download_mojo(path=generic_mojo_filename)
assert os.path.getsize(generic_mojo_filename) == os.path.getsize(original_model_filename)
if __name__ == "__main__":
pyunit_utils.standalone_test(mojo_model_irf_test)
else:
mojo_model_irf_test()
|
[
"noreply@github.com"
] |
noreply@github.com
|
c1ae9e6fb753e4652eee299f5a2816c6ddc0a51f
|
514e85ae034b7782ff29dcff082a888d80a3dc8f
|
/urlshorter/migrations/0001_initial.py
|
6a3763f321e156bc1ee0cb592f33697ff01fd0bf
|
[] |
no_license
|
CossackDex/UrlShorterDjango
|
48785adda7565104d20ce4ad184a7f42618244d6
|
fd778acb0f030f3458aea84dfdd112c2b4b15ce7
|
refs/heads/main
| 2023-05-09T14:53:49.140333
| 2021-06-03T14:42:13
| 2021-06-03T14:42:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 793
|
py
|
# Generated by Django 3.2.4 on 2021-06-03 01:00
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UrlShortener',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_date', models.DateTimeField(auto_now_add=True)),
('click_counter', models.PositiveIntegerField(default=0)),
('long_url', models.URLField()),
('short_url', models.CharField(max_length=15, unique=True)),
],
options={
'ordering': ['-create_date'],
},
),
]
|
[
"242589@student.pwr.edu.pl"
] |
242589@student.pwr.edu.pl
|
bd2c830fec21a04fa77baa6c330e3c3708b289db
|
932a75866784589d427afadb11f41abfbbc2dcd4
|
/yelp-crawler/YelpAdaption.py
|
45fdee987c09094020666b32b4f016e431c5b310
|
[] |
no_license
|
wjffxy/dining-bot
|
a69d5ac5567b149379a86530e8acdb504653a318
|
047255aabb9816162598066cd30310ab8e794de5
|
refs/heads/master
| 2021-01-09T19:00:41.691686
| 2020-02-22T22:37:28
| 2020-02-22T22:37:28
| 242,420,645
| 0
| 0
| null | 2020-02-22T22:30:47
| 2020-02-22T22:28:39
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,883
|
py
|
# -*- coding: utf-8 -*-
"""
Yelp Fusion API code sample.
This program demonstrates the capability of the Yelp Fusion API
by using the Search API to query for businesses by a search term and location,
and the Business API to query additional information about the top result
from the search query.
Please refer to http://www.yelp.com/developers/v3/documentation for the API
documentation.
This program requires the Python requests library, which you can install via:
`pip install -r requirements.txt`.
Sample usage of the program:
`python sample.py --term="bars" --location="San Francisco, CA"`
"""
from __future__ import print_function
import argparse
import json
import pprint
import requests
import sys
import urllib
# This client code can run on Python 2.x or 3.x. Your imports can be
# simpler if you only need one of those.
try:
# For Python 3.0 and later
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.parse import urlencode
except ImportError:
# Fall back to Python 2's urllib2 and urllib
from urllib2 import HTTPError
from urllib import quote
from urllib import urlencode
# Yelp Fusion no longer uses OAuth as of December 7, 2017.
# You no longer need to provide Client ID to fetch Data
# It now uses private keys to authenticate requests (API Key)
# You can find it on
# https://www.yelp.com/developers/v3/manage_app
API_KEY= "ueSaG54dzEo5zQeM8aI2LT5C4krMvCYm5HJiNWuh13viiwEgh-Zl3qk3Te1ZOfYK6l4kWDIQzaL4O0sezTPUejlxXv_4-v0DDcguHQjazqPClbvOhTclNpJXOe6YXXYx"
# API constants, you shouldn't have to change these.
API_HOST = 'https://api.yelp.com'
SEARCH_PATH = '/v3/businesses/search'
BUSINESS_PATH = '/v3/businesses/' # Business ID will come after slash.
# Defaults for our simple example.
DEFAULT_TERM = 'french restaurant'
DEFAULT_LOCATION = 'manhatten'
SEARCH_LIMIT = 50
OFFSET = 0
def request(host, path, api_key, url_params=None):
url_params = url_params or {}
url = '{0}{1}'.format(host, quote(path.encode('utf8')))
headers = {
'Authorization': 'Bearer %s' % api_key,
}
print(u'Querying {0} ...'.format(url))
response = requests.request('GET', url, headers=headers, params=url_params)
return response.json()
def search(api_key, term, location, offset):
url_params = {
'term': term.replace(' ', '+'),
'location': location.replace(' ', '+'),
'limit': SEARCH_LIMIT,
'offset': offset
}
return request(API_HOST, SEARCH_PATH, api_key, url_params=url_params)
def get_business(api_key, business_id):
business_path = BUSINESS_PATH + business_id
return request(API_HOST, business_path, api_key)
def query_api(term, location):
offset = 0
response = []
json_map = {}
k=0
for i in range(20):
k = k + 1
json_map[k] = search(API_KEY, term, location, offset)
offset = offset + 50
with open('data.json', 'w') as openfile:
json.dump(json_map, openfile,sort_keys=True, indent=4)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-q', '--term', dest='term', default=DEFAULT_TERM,type=str, help='Search term (default: %(default)s)')
parser.add_argument('-l', '--location', dest='location',default=DEFAULT_LOCATION, type=str,help='Search location (default: %(default)s)')
input_values = parser.parse_args()
try:
query_api(input_values.term, input_values.location)
except HTTPError as error:
sys.exit('Encountered HTTP error {0} on {1}:\n {2}\nAbort program.'.format(error.code,error.url,error.read(),))
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
e988fb44ebf77ef6fb46be8468feeba747b3f022
|
7afc43157ab4338614ee1ad9e6824c75694293fb
|
/inventario/bin/chardetect
|
eadc1c8ccb6bbc406cf6577397dc505a686811fc
|
[] |
no_license
|
eze-fayu/inventario
|
9d8c2c83a123f0a643028e262cc83c1ef612862f
|
83b63a2f5e7bd14d12e5f6b34682fbacca10fd28
|
refs/heads/main
| 2023-05-13T22:20:12.382242
| 2021-05-29T20:41:19
| 2021-05-29T20:41:19
| 354,267,142
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
#!/home/ezequiel/Documentos/Proyectos/inventario/inventario/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ezequielfaiella@gmail.com"
] |
ezequielfaiella@gmail.com
|
|
a9d23248a6c436c6c49ed34c13b6d1fb71b7abf5
|
bf388b74d823551121f45dfba3333637f845e051
|
/hm_web/apps.py
|
2f43c6503ad735c3c85578dadb4c034beb9e681b
|
[
"MIT"
] |
permissive
|
dantswain/heavy_meta
|
d5178a61f63474315a62d2c9597f2ddd7ab9187e
|
c3154130c2c951f1a5b46174193157625bb1be17
|
refs/heads/master
| 2021-09-06T13:26:50.018504
| 2018-02-07T02:55:31
| 2018-02-07T02:55:31
| 117,156,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
'''
Django boilerplate
'''
from django.apps import AppConfig
class HmWebConfig(AppConfig):
'''
Django config boilerplate
'''
name = 'hm_web'
|
[
"dan.t.swain@gmail.com"
] |
dan.t.swain@gmail.com
|
2bec7b1c41956ec1783c8369ebfd7c8ba03d97fa
|
5604a50361637f8ed1327a5618f96e63fdc60c24
|
/first_bot/bin/flask
|
c409f1b8ca260f1d58f0c76ea8703c531a53ca9d
|
[] |
no_license
|
razmanov666/first_bot_telegram
|
f82daded45341f92d9b531e6e74cee595fce7779
|
6d33aae297e3369e3c036f8f50b6ac555cd7c3de
|
refs/heads/master
| 2023-07-25T22:20:23.074419
| 2021-06-30T06:57:46
| 2021-09-01T17:35:03
| 376,825,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 263
|
#!/home/kurama/Документы/first_bot_telegram/first_bot/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"razmanov666@gmail.com"
] |
razmanov666@gmail.com
|
|
bbb4d451e6ea33efbfab2bbfba0e4b1b5efcb954
|
bc40f1bcf6d779b39a684e79816e51da7ad828d9
|
/venv/Scripts/pip3.6-script.py
|
3dd31b464e10e9516671a4ffb64d17b5d90d2e3e
|
[] |
no_license
|
dovgin2/data_hackerspace_homework
|
c3b71b4f988688ce3d133a002fc8419e3dff8322
|
2396fe026b81c634f30c67694f27ec1dc3c83a7c
|
refs/heads/master
| 2020-03-30T11:26:17.094261
| 2018-10-16T00:06:29
| 2018-10-16T00:06:29
| 151,173,716
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
#!C:\Users\s3dov\PycharmProjects\data_hackerspace_homework\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.6'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.6')()
)
|
[
"s3dovgin@gmail.com"
] |
s3dovgin@gmail.com
|
faf4719b940c4e5811346205c59cd9ad7daa89ec
|
2813f969fc9833023f543fa14c1c22a87325ca8f
|
/logging_tree/tests/test_format.py
|
787b959fd92bbf2bdc6650b8ba7e639e870cd017
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
ralphbean/logging_tree
|
5761afc380719f4069fd00b1f21f5765927ce593
|
1db0ad6f485b5333fee637813faf827990924421
|
refs/heads/master
| 2021-01-16T22:05:05.459626
| 2012-12-04T02:16:14
| 2012-12-04T02:16:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,425
|
py
|
"""Tests for the `logging_tree.format` module."""
import logging
import logging.handlers
import unittest
import sys
from logging_tree.format import build_description, printout
from logging_tree.tests.case import LoggingTestCase
if sys.version_info >= (3,):
from io import StringIO
else:
from StringIO import StringIO
class FakeFile(StringIO):
def __init__(self, filename, mode):
self.filename = filename
StringIO.__init__(self)
def __repr__(self):
return '<file %r>' % self.filename
class FormatTests(LoggingTestCase):
def setUp(self):
# Prevent logging file handlers from trying to open real files.
# (The keyword delay=1, which defers any actual attempt to open
# a file, did not appear until Python 2.6.)
logging.open = FakeFile
super(FormatTests, self).setUp()
def tearDown(self):
del logging.open
super(FormatTests, self).tearDown()
def test_printout(self):
stdout, sys.stdout = sys.stdout, StringIO()
printout()
self.assertEqual(sys.stdout.getvalue(), '<--""\n Level WARNING\n')
sys.stdout = stdout
def test_simple_tree(self):
logging.getLogger('a')
logging.getLogger('a.b').setLevel(logging.DEBUG)
logging.getLogger('x.c')
self.assertEqual(build_description(), '''\
<--""
Level WARNING
|
o<--"a"
| |
| o<--"a.b"
| Level DEBUG
|
o<--[x]
|
o<--"x.c"
''')
def test_fancy_tree(self):
logging.getLogger('').setLevel(logging.DEBUG)
log = logging.getLogger('db')
log.setLevel(logging.INFO)
log.propagate = False
log.addFilter(MyFilter())
handler = logging.StreamHandler()
log.addHandler(handler)
handler.addFilter(logging.Filter('db.errors'))
logging.getLogger('db.errors')
logging.getLogger('db.stats')
log = logging.getLogger('www.status')
log.setLevel(logging.DEBUG)
log.addHandler(logging.FileHandler('/foo/log.txt'))
log.addHandler(MyHandler())
self.assertEqual(build_description(), '''\
<--""
Level DEBUG
|
o "db"
| Level INFO
| Propagate OFF
| Filter <MyFilter>
| Handler Stream %r
| Filter name='db.errors'
| |
| o<--"db.errors"
| |
| o<--"db.stats"
|
o<--[www]
|
o<--"www.status"
Level DEBUG
Handler File '/foo/log.txt'
Handler <MyHandler>
''' % (sys.stderr,))
def test_most_handlers(self):
ah = logging.getLogger('').addHandler
ah(logging.handlers.RotatingFileHandler(
'/bar/one.txt', maxBytes=10000, backupCount=3))
ah(logging.handlers.SocketHandler('server.example.com', 514))
ah(logging.handlers.DatagramHandler('server.example.com', 1958))
ah(logging.handlers.SysLogHandler())
ah(logging.handlers.SMTPHandler(
'mail.example.com', 'Server', 'Sysadmin', 'Logs!'))
# ah(logging.handlers.NTEventLogHandler())
ah(logging.handlers.HTTPHandler('api.example.com', '/logs', 'POST'))
ah(logging.handlers.BufferingHandler(20000))
sh = logging.StreamHandler()
ah(logging.handlers.MemoryHandler(30000, target=sh))
self.assertEqual(build_description(), '''\
<--""
Level WARNING
Handler RotatingFile '/bar/one.txt' maxBytes=10000 backupCount=3
Handler Socket server.example.com 514
Handler Datagram server.example.com 1958
Handler SysLog ('localhost', 514) facility=1
Handler SMTP via mail.example.com to ['Sysadmin']
Handler HTTP POST to http://api.example.com//logs
Handler Buffering capacity=20000
Handler Memory capacity=30000 dumping to:
Handler Stream %r
''' % (sh.stream,))
logging.getLogger('').handlers[3].socket.close() # or Python 3 warning
def test_2_dot_5_handlers(self):
if sys.version_info < (2, 5):
return
ah = logging.getLogger('').addHandler
ah(logging.handlers.TimedRotatingFileHandler('/bar/two.txt'))
self.assertEqual(build_description(), '''\
<--""
Level WARNING
Handler TimedRotatingFile '/bar/two.txt' when='H' interval=3600 backupCount=0
''')
def test_2_dot_6_handlers(self):
if sys.version_info < (2, 6):
return
ah = logging.getLogger('').addHandler
ah(logging.handlers.WatchedFileHandler('/bar/three.txt'))
self.assertEqual(build_description(), '''\
<--""
Level WARNING
Handler WatchedFile '/bar/three.txt'
''')
def test_nested_handlers(self):
h1 = logging.StreamHandler()
h2 = logging.handlers.MemoryHandler(30000, target=h1)
h2.addFilter(logging.Filter('worse'))
h3 = logging.handlers.MemoryHandler(30000, target=h2)
h3.addFilter(logging.Filter('bad'))
logging.getLogger('').addHandler(h3)
self.assertEqual(build_description(), '''\
<--""
Level WARNING
Handler Memory capacity=30000 dumping to:
Filter name='bad'
Handler Memory capacity=30000 dumping to:
Filter name='worse'
Handler Stream %r
''' % (h1.stream,))
class MyFilter(object):
def __repr__(self):
return '<MyFilter>'
class MyHandler(object):
def __repr__(self):
return '<MyHandler>'
if __name__ == '__main__': # for Python <= 2.4
unittest.main()
|
[
"brandon@rhodesmill.org"
] |
brandon@rhodesmill.org
|
cb0b2c679a02d35a32e443a9412c0292555d4f6b
|
cff588a68be44913be884ba5c4ebf36a0a96cb75
|
/python/007study_namespace.py
|
3377539352b3e241261c717dfa8c5240c876539d
|
[] |
no_license
|
KOOKDONGHUN/trading
|
e6a8d023f4bdbb0f1cf32e3e5b6b26b6265fc3a6
|
2d4337978a5849098ed890e9e2c3f059e4706536
|
refs/heads/master
| 2022-11-15T00:38:32.705125
| 2020-07-12T10:25:46
| 2020-07-12T10:25:46
| 275,761,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,215
|
py
|
class Stock:
market = 'kospi'
print(dir())# // ['Stock', '__annotations__', '__builtins__', '__cached__', '__doc__',
# // '__file__', '__loader__', '__name__', '__package__', '__spec__']
# Stock이 추가됐다
print(Stock) # <class '__main__.Stock'>
# 클래스가 정의되면 하나의 독립적인 네임스페이스가 생기고 클래스내에 정의된 변수나 메서드는 해당 네임스페이스 안에 파이썬 딕셔너리 타입으로 저장된다
print(Stock.market)
# 네임스페이스를 확인하는 방법
print(Stock.__dict__) # // {'__module__': '__main__', 'market': 'kospi', '__dict__': <attribute '__dict__' of 'Stock' objects>,
#// '__weakref__': <attribute '__weakref__' of 'Stock' objects>, '__doc__': None}
s1 = Stock()
s2 = Stock()
print(id(s1)) # 2120139199496
print(id(s2)) # 2120139199560
print(s1.__dict__) # 비어있음
print(s2.__dict__) # 비어있음
s1.market = 'kosdaq'
print(s1.__dict__) # {'market': 'kosdaq'}
print(s2.__dict__) # 비어있음
print(s1.market) # kosdaq
# 인스턴스의 네임스페이스에 해당 이름이 없으면 클래스의 네임스페이스로 이동
print(s2.market) # kospi
|
[
"dh3978@naver.com"
] |
dh3978@naver.com
|
e9a17f8c7e0790a3636e026ef43c3c2087b22714
|
49ce91d1058f98088350a262700aeb36673800f2
|
/aula39/aula39.py
|
a1a880a7a21687070682de71c6f65175b4c94fdc
|
[] |
no_license
|
adrxA/python
|
fc838d96518e5e4dd5e6bbdc5420cadf2f0882e2
|
31488e812af54c30117e7027b2b387bf9b4b78cc
|
refs/heads/master
| 2023-06-22T14:34:41.267845
| 2021-07-19T03:58:11
| 2021-07-19T03:58:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
import pandas as pd
alunos = {
"Nome": ['Ricardo', 'Pedro', 'Roberto', 'Carlos'],
"Nota": [4, 7, 9, 9.7],
"Aprovado":['Não', 'Sim', 'Sim', 'Sim']
}
dataframe = pd.DataFrame(alunos)
print(dataframe.head())
#Transformou o dicionário em dataframe
objeto1 = pd.Series([1,2,4,6,7,9])
print(objeto1)
matriz = [
["1" , "A" , "X"],
["2" , "B" , "Y"],
["3" , "C" , "Z"]
]
objeto2 = pd.Series(matriz)
print(objeto2)
#Transformou a matriz em dataframe
|
[
"leovasc5@hotmail.com"
] |
leovasc5@hotmail.com
|
187e1c46f87f4c4220aeebea29f79220ed2388ec
|
1d3ee56d9a95ffc35e08bbc211f2a6b8e83be7c2
|
/resources/chart_line.py
|
95ee7407a9e507a1fe2c0397207e5748f31395b8
|
[] |
no_license
|
Lucas0Braz/charts_generete
|
6ea69cf3526ec713877d9214173d40a65456a184
|
748e110971839f4b1c7463c23c297fcebcdd70e1
|
refs/heads/master
| 2021-07-10T09:53:50.906534
| 2020-01-28T03:11:18
| 2020-01-28T03:11:18
| 236,640,364
| 0
| 0
| null | 2021-03-20T02:39:52
| 2020-01-28T02:16:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,110
|
py
|
from flask_restful import Resource,reqparse
from models.chart_line import Chart_LineModel
class Chart_LineResource(Resource):
parser = reqparse.RequestParser()
parser.add_argument('x',
#type=list,
action='append',
required=True,
help="This field X cannot be left blank, and must be a list of integers"
)
parser.add_argument('y',
#type=list,
action='append',
required=True,
help="This field Y cannot be left blank, and must be a list of integers"
)
def post(self):
data = self.parser.parse_args()
import sys
print(type(data), file=sys.stderr)
chart = Chart_LineModel(**data)
path_chart = chart.gerar_chart()
if path_chart is not None:
return {"path_chart":f"{path_chart}"},200
return {"message":"Something went bad"}, 404
|
[
"lucasbraz430@gmail.com"
] |
lucasbraz430@gmail.com
|
7f079cf4970ef56999d64bea7b1fcc1351cba791
|
cee5070499cf59dd5019f86e785e41ad5c55837b
|
/PageLocators/setting_tab_locator.py
|
daabcd188f53a965b2ab4371cdec37c91c884a4d
|
[] |
no_license
|
Yttyou/AppUi
|
b76cae345039d6c3a39f601960c085696b63b60a
|
d5303d33c4fb67d3b50e02918f15e5849a956b30
|
refs/heads/master
| 2023-02-25T07:59:12.759684
| 2021-02-01T01:47:45
| 2021-02-01T01:47:45
| 334,800,103
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 938
|
py
|
__author__ = 'developer'
# 【设定】tab页面元素定位
from appium.webdriver.common.mobileby import MobileBy as Mb
class SettingTabLocator:
setting_button = (Mb.ID, "com.suncity.sunpeople.qa:id/tv_setting") # 导航栏【设定】
signout_button = (Mb.ID, "com.suncity.sunpeople.qa:id/tv_signout") # 【登出】按钮
confirm_signout_button = (
Mb.ANDROID_UIAUTOMATOR, "new UiSelector().className(\"android.widget.TextView\").textContains(\"登出\")")
signout_button_load = (Mb.ID,"com.suncity.sunpeople.qa:id/lcv_circleload") # 登出load
set_button = (Mb.ID, "com.suncity.sunpeople.qa:id/rl_setting") # 设定按钮
user_image = (Mb.ID, "com.suncity.sunpeople.qa:id/sdv_avatar") # 用户图像
taking_pictures = (Mb.CLASS_NAME, "android.widget.TextView") # 拍照
the_shutter = (Mb.ID, "NONE") # 快门
is_ok = (Mb.ID, "com.sec.android.app.camera:id/okay") # 确定
|
[
"t-youtongtong@MEGVII-INC.com"
] |
t-youtongtong@MEGVII-INC.com
|
e793c7028c56c76c140624f40a268ba44ce1ccc3
|
90def3915f597b26182087a6c6b6d54eded6d940
|
/catalogExport/public_api/httpClient/callapi.py
|
4878c72c6096c9a0e2b3fcaf30d55721d642cf97
|
[
"MIT"
] |
permissive
|
goodbarber/shop_custom_dev_examples
|
4f8122b54149c2a6a0d618bcc7bfaa651b823fa2
|
b8aa99386fb2a1ef83ed86f519ef707c533d7470
|
refs/heads/main
| 2023-07-06T01:45:59.532726
| 2021-08-03T06:36:56
| 2021-08-03T06:36:56
| 342,181,521
| 1
| 1
| null | 2021-08-03T06:36:58
| 2021-02-25T08:55:20
|
Python
|
UTF-8
|
Python
| false
| false
| 689
|
py
|
import requests
class CallApi:
'''Class that allow to call multiple requests types'''
def get(self, url, headers={}):
print(url)
res = requests.get(url, headers=headers)
if res.status_code == 200:
return res
else:
raise requests.models.HTTPError(
f"HTTP {res.status_code}, aborting.\nBody: {res.text}")
def patch(self, url, data, headers={}):
res = requests.patch(url, headers=headers, json=data)
# res = res.json()
return res
def post(self, url, data, headers={}):
res = requests.post(url, headers=headers, json=data)
# res = res.json()
return res
|
[
"christophelucchini@MacBook-Pro-de-Christophe.local"
] |
christophelucchini@MacBook-Pro-de-Christophe.local
|
fc0d64f7a2b0c2bf8d96c8331a25546b633e2b03
|
9f926ba20c9d72004f30944646eae2f9cc305afd
|
/timeline_logger/compat.py
|
b61099b1a6832c44e448678b0e81a5580c5352fb
|
[
"MIT"
] |
permissive
|
tsiaGeorge/django-timeline-logger
|
625a15c27fc6b5b4caf73b300120e0c3c29540e7
|
07bee5038dd84256e3863e6a7634e29521345ce1
|
refs/heads/master
| 2021-04-06T00:01:11.095501
| 2018-03-07T11:34:45
| 2018-03-07T11:34:45
| 125,027,461
| 0
| 0
| null | 2018-03-13T09:41:11
| 2018-03-13T09:41:11
| null |
UTF-8
|
Python
| false
| false
| 263
|
py
|
import sys
# Safely importing the HTML parser utility, highly dependent on Python versions
if sys.version_info >= (3, 4):
# From v3.4.0 in advance
import html
else:
# Python 2.x versions
from HTMLParser import HTMLParser
html = HTMLParser()
|
[
"jose.lpa@gmail.com"
] |
jose.lpa@gmail.com
|
62ba6288d1f83cb12eabbd9f1ca1f699279434d8
|
96a6d2f6544d8d4aa43ce73c0abe851697fa0602
|
/birthplan/scheduler.py
|
722c5423ed7b18e606c1abf6f1d353fac063ce22
|
[] |
no_license
|
basman/atc_bot
|
7f12d65dbac21fc586646c1773e7144f8da2f985
|
4106bed0b635dbfabfb1cb4499f27154ed380c08
|
refs/heads/master
| 2021-01-20T04:37:25.582247
| 2014-10-31T11:22:15
| 2014-10-31T11:22:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,323
|
py
|
import string
import time
from position import Airport, Exit, Position
class Scheduler:
DEBUG_STEPS = False
DEBUG_NO_TIMEOUT = False
def __init__(self, arena, connector):
self._arena = arena
self._connector = connector
self._schedules = {} # nested dict; _schedules[time][airplane_id] = position
def _compute_commands(self, path, airplane):
# compute commands along the path
delta_z = 0 # ascent rate
delta_z_idx = -1 # start of ascent rate
for i in range(1, len(path)):
speed = airplane.speed
if i == 1:
# slow plane might be born at odd time, so we might need a command on the first position (i.e. starting from airports)
speed = 1
# slow planes move every second time step
if path[i].time % speed != 0:
continue
if path[i].z - path[i - speed].z != delta_z:
if delta_z_idx >= 0:
path[delta_z_idx].add_cmd_altitude(path[i - speed].z) # new delta_z
delta_z = path[i].z - path[i - speed].z
if delta_z != 0:
delta_z_idx = i - speed
else:
delta_z_idx = -1
if (i > 1 or path[0].z != 7) and path[i].dir != path[i - speed].dir or i == 1 and path[0].z == 7 and path[i].dir != path[0].reverseDirection():
path[i - speed].add_cmd_direction(path[i].dir)
else:
# don't forget command for last section
if delta_z != 0:
path[delta_z_idx].add_cmd_altitude(path[-1].z)
def _complex_path(self, airplane):
# used for airplanes were brute force path computation took to long
print "all scheduled flight paths at moment of despair:"
for aid in sorted(self._arena.airplanes):
a = self._arena.airplanes[aid]
print "Airplane " + aid + ": ",
i=self._arena.clock
while i in self._schedules and a in self._schedules[i]:
print "%15s " % str(self._schedules[i][a]),
i += 1
print ""
raise Exception("emergency procedures necessary for airplane " + str(airplane))
def _compute_path(self, airplane, timelimit):
#print "looking for a path from " + str(start) + " to " + str(dest) + ", starting at " + str(p)
begin_computation = time.time()
start = Position(airplane)
start.time = self._arena.clock
plan = [ start ]
# aim for approach position, one step in front of airport
if isinstance(airplane.dest, Airport):
tmp = Position(airplane.dest)
tmp.dir = tmp.reverseDirection()
approach = tmp.step(0, 1)
approach.dir = airplane.dest.dir # turn around
else:
approach = Position(airplane.dest)
approach.dir_tolerance = 90 # allow max. 90 degree derivation from target direction
# enter recursion
if not self._step_recursive(airplane, plan, start, approach, timelimit):
if time.time() > timelimit:
print "Path of " + str(airplane) + " from " + str(start) + " to " + str(airplane.dest) + ": COMPUTATION TIMEOUT (comp.time=" + \
str(int((time.time()-begin_computation) * 1000000)/1000.0) + "ms)"
else:
print "Airplane " + str(airplane) + " will delay its take-off due to ongoing traffic"
return False
# append destination itself
d = Position(airplane.dest)
d.time = plan[-1].time + 1
plan.append( d )
self._compute_commands(plan, airplane)
print "Path of " + str(airplane) + " from " + str(start) + " to " + str(airplane.dest) + " (" + str(len(plan)) + " steps, comp.time=" + \
str(int((time.time()-begin_computation) * 100000)/100.0) + "ms): ",
print string.join(map(str, plan), '; ')
# add schedule to database
for s in plan:
if s.time < self._arena.clock:
raise Exception("can't schedule for past time " + str(s.time) + ". current time: " + str(self._arena.clock))
if not s.time in self._schedules:
self._schedules[s.time] = {}
self._schedules[s.time][airplane] = s
return True
def _scheduled_is_collision(self, airplane, p):
if p.time in self._schedules:
for a in self._schedules[p.time]:
if self._schedules[p.time][a].is_collision(p):
return True
return False
def _step_recursive(self, airplane, path, p, dest, timeout):
# slow planes move every second time step
if (p.time+1) % airplane.speed != 0:
p = Position(p)
p.time += 1
if self._scheduled_is_collision(airplane, p):
return False
path.append(p)
if not self._step_recursive(airplane, path, p, dest, timeout):
del(path[-1])
return False
else:
return True
if p.equals(dest):
return True
if len(path) >= airplane.fuel * airplane.speed:
# safe one fuel unit for the last step from approach to destination
return False
if time.time() > timeout:
#print "Airplane " + str(airplane) + " can't find a path before next update"
return False
#self.log += "\n _step_recursive: try " + str(p)
steps = self._gen_possible_steps(p)
possible_steps = {}
# try to walk in any direction (preferrably towards dest)
for s in steps:
s.time = p.time+1
if self._scheduled_is_collision(airplane, s):
continue
skip = False
if s.equals(dest):
# present arrival as only solution
possible_steps[0] = [s]
break
# exclude illegal steps (out of area or invalid altitude)
if ( s.x <= 0 or s.y <= 0 or s.y >= self._arena.height-1 or s.x >= self._arena.width-1 or s.z < 1 or s.z > 9):
continue
# must start straight from airport
if path[0].z == 0 and len(path) < 2 and s.dir != path[0].dir:
continue
if skip:
continue
distance = dest.distance(s)
if not distance in possible_steps:
possible_steps[distance] = []
possible_steps[distance].append(s)
if len(possible_steps) == 0:
#print " step_rec.: fail"
return False
ordered_steps = []
for d in sorted(possible_steps):
ordered_steps.extend(possible_steps[d])
for st in ordered_steps:
path.append(st)
if Scheduler.DEBUG_STEPS:
print '-STEPPING(' + str(len(path)) + '): ' + ','.join(map(str, path))
if self._step_recursive(airplane, path, st, dest, timeout):
return True
else:
del(path[-1])
return False
def _gen_possible_steps(self, pos):
steps = []
if pos.z == 0:
steps.append(Position(pos)) # stay at airport or ...
steps.append(pos.step(0, 1)) # ...take off
else:
for delta_dir in ( 0, -45, 45, -90, 90 ):
for delta_z in (-1, 0, 1):
npos = pos.step(delta_dir, delta_z)
# skip invalid steps
if not npos is None:
steps.append(npos)
return steps
def update(self):
# cleanup past schedule
if self._arena.clock-1 in self._schedules:
del(self._schedules[self._arena.clock-1])
# Prio 0: guide old planes
commands = [] # collect commands of all guided airplanes
unguided = [] # list unguided airplanes for path computation
gonner = [] # list airplanes that jumped (glitch!) and therefor were reborn
for aid in sorted(self._arena.airplanes.keys()):
a = self._arena.airplanes[aid]
if self._arena.clock in self._schedules and a in self._schedules[self._arena.clock]:
# only airplanes still on the ground can avoid this loop
# (i.e. no collision free launch is possible at the moment)
for c in self._schedules[self._arena.clock][a].cmd:
commands.append(a.id + c + "\n")
print "cmd: " + a.id + c
# check flight path position for each plane
if not a.equals(self._schedules[self._arena.clock][a]):
# In rare cases an airplane can reach its destination and the ID is reused by a new plane during the same update cycle.
# The bot will think it is still the old plane that jumped to a different location.
# We analyse the jump distance. If it's more than 3, we delete the airplane and let it reappear by the next update, which will
# trigger a path calculation.
if a.distance(self._schedules[self._arena.clock][a]) >= 4:
print "REBORN airplane " + str(a)
gonner.append(a)
else:
print "Path: " + self._sched2str(a)
raise Exception("airplane left flight path: " + str(a) + ", expected " + str(self._schedules[self._arena.clock][a]) + ', t=' + str(self._arena.clock))
else:
unguided.append(a)
if len(commands) > 0:
self._connector.send(string.join(commands))
commands = []
waiting = {}
# allow searching for a solution for almost one update interval of atc
timelimit = time.time() + (float(self._arena.update_time - 0.02) / max(len(unguided), 1))
if Scheduler.DEBUG_NO_TIMEOUT:
timelimit = time.time() + 3600*24*7
# cleanup airplanes and commands that were reborn under the same name. They will be routed upon the next update cycle.
for a in gonner:
del(self._arena.airplanes[a.id])
for i in range(2):
if self._arena.clock+i in self._schedules and a in self._schedules[self._arena.clock+i]:
del(self._schedules[self._arena.clock+i][a])
for a in unguided:
# Prio 1: guide new planes in the air
if a.z > 0:
# new airplane already in flight
if not self._compute_path(a, timelimit):
self._complex_path(a)
else:
# Prio 2: guide new planes waiting on the ground
ap = a.start
# pull up one single airplane per airport
if not ap in waiting and not ap.must_wait(self._arena.clock):
waiting[ap] = a
if not self._arena.clock in self._schedules or not a in self._schedules[self._arena.clock]:
self._compute_path(a, timelimit)
# send commands for freshly routed planes
if self._arena.clock in self._schedules and a in self._schedules[self._arena.clock]:
for c in self._schedules[self._arena.clock][a].cmd:
commands.append(a.id + c + "\n")
print "cmd: " + a.id + c
if len(commands) > 0:
self._connector.send(string.join(commands))
def _sched2str(self, airplane):
clock = self._arena.clock
result = ''
while clock in self._schedules and airplane in self._schedules[clock]:
if clock != self._arena.clock:
result += ', '
result += str(self._schedules[clock][airplane])
clock += 1
return result
|
[
"rha_github@disconnect.ch"
] |
rha_github@disconnect.ch
|
6dc14984a85ef7d3fbfbaa874a0267f1787fbeba
|
8723d2cb798f6198a84f5746cb9a2ae732a49dd6
|
/vafnet/util/Optimizer.py
|
1cb4956e0d5975f00f0c6a23b9fda71bfe48a11d
|
[
"Apache-2.0"
] |
permissive
|
MaxInGaussian/VAFnet
|
5d3984ae97be576ac61df5995f58dad96aa6ad05
|
618a16abae08a193b94072d5d5ff176f02bb1288
|
refs/heads/master
| 2021-07-11T06:31:49.834370
| 2017-10-15T15:54:29
| 2017-10-15T15:54:29
| 104,314,614
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,637
|
py
|
# Copyright 2017 Max W. Y. Lam
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import numpy.random as npr
import theano
import theano.tensor as TT
from collections import OrderedDict
__all__ = [
"Optimizer",
]
class Optimizer(object):
algos = [
"momentum",
"nesterov",
"sgd",
"adagrad",
"rmsprop",
"adadelta",
"adam",
"adamax",
]
@staticmethod
def momentum(updates,
momentum=0.9):
"""Returns a modified update dictionary including momentum
Generates update expressions of the form:
*``velocity := momentum*velocity+updates[param]-param``
*``param := param+velocity``
Parameters
----------
updates : OrderedDict
A dictionary mapping parameters to update expressions
momentum : float or symbolic scalar, optional
The amount of momentum to apply. Higher momentum results in
smoothing over more update steps. Defaults to 0.9.
Returns
-------
OrderedDict
A copy of `updates` with momentum updates for all `params`.
Notes
-----
Higher momentum also results in larger update steps. To counter that,
you can optionally scale your learning rate by `1-momentum`.
"""
params = list(updates.keys())[0]
updates = OrderedDict(updates)
value = params.get_value(borrow=True)
velocity = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=params.broadcastable)
x = momentum*velocity+updates[params]
updates[velocity] = x-params
updates[params] = x
return updates
@staticmethod
def nesterov(updates,
momentum=0.9):
"""Returns a modified update dictionary including Nesterov momentum
Generates update expressions of the form:
*``velocity := momentum*velocity+updates[params]-params``
*``params := params+momentum*velocity+updates[params]-params``
Parameters
----------
updates : OrderedDict
A dictionary mapping parameters to update expressions
momentum : float or symbolic scalar, optional
The amount of momentum to apply. Higher momentum results in
smoothing over more update steps. Defaults to 0.9.
Returns
-------
OrderedDict
A copy of `updates` with momentum updates for all `params`.
Notes
-----
Higher momentum also results in larger update steps. To counter that,
you can optionally scale your learning rate by `1-momentum`.
The classic formulation of Nesterov momentum (or Nesterov accelerated
gradient) requires the gradient to be evaluated at the predicted next
position in parameter space. Here, we use the formulation described at
https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617,
which allows the gradient to be evaluated at the current parameters.
"""
params = list(updates.keys())[0]
updates = OrderedDict(updates)
value = params.get_value(borrow=True)
velocity = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=params.broadcastable)
x = momentum*velocity+updates[params]-params
updates[velocity] = x
updates[params] = momentum*x+updates[params]
return updates
@staticmethod
def sgd(updates,
learning_rate=0.01,
**args):
"""Stochastic Gradient Descent (SGD) updates
Generates update expressions of the form:
*``params := params-learning_rate*gradient``
Parameters
----------
updates : OrderedDict
A dictionary mapping parameters to update expressions
learning_rate : float or symbolic scalar
The learning rate controlling the size of update steps
Returns
-------
OrderedDict
A dictionary mapping each parameter to its update expression
"""
params, grads = list(updates.items())[0]
updates = OrderedDict(updates)
updates[params] = params-learning_rate*grads
return updates
@staticmethod
def adagrad(updates,
learning_rate=0.01,
epsilon=1e-6,
**args):
"""Adagrad updates
Scale learning rates by dividing with the square root of accumulated
squared gradients. See [1]_ for further description.
Parameters
----------
updates : OrderedDict
A dictionary mapping parameters to update expressions
learning_rate : float or symbolic scalar
The learning rate controlling the size of update steps
epsilon : float or symbolic scalar
Small value added for numerical stability
Returns
-------
OrderedDict
A dictionary mapping each parameter to its update expression
Notes
-----
Using step size eta Adagrad calculates the learning rate for feature i at
time step t as:
.. math:: \\eta_{t,i} = \\frac{\\eta}
{\\sqrt{\\sum^t_{t^\\prime} g^2_{t^\\prime,i}+\\epsilon}} g_{t,i}
as such the learning rate is monotonically decreasing.
Epsilon is not included in the typical formula, see [2]_.
References
----------
.. [1] Duchi, J., Hazan, E., & Singer, Y. (2011):
Adaptive subgradient methods for online learning and stochastic
optimization. JMLR, 12:2121-2159.
.. [2] Chris Dyer:
Notes on AdaGrad. http://www.ark.cs.cmu.edu/cdyer/adagrad.pdf
"""
params, grads = list(updates.items())[0]
updates = OrderedDict(updates)
value = params.get_value(borrow=True)
accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=params.broadcastable)
accu_new = accu+grads**2
updates[accu] = accu_new
updates[params] = params-(learning_rate*grads/TT.sqrt(accu_new+epsilon))
return updates
@staticmethod
def rmsprop(updates,
learning_rate=0.01,
rho=0.9,
epsilon=1e-6,
**args):
"""RMSProp updates
Scale learning rates by dividing with the moving average of the root mean
squared (RMS) gradients. See [1]_ for further description.
Parameters
----------
updates : OrderedDict
A dictionary mapping parameters to update expressions
learning_rate : float or symbolic scalar
The learning rate controlling the size of update steps
rho : float or symbolic scalar
Gradient moving average decay factor
epsilon : float or symbolic scalar
Small value added for numerical stability
Returns
-------
OrderedDict
A dictionary mapping each parameter to its update expression
Notes
-----
`rho` should be between 0 and 1. A value of `rho` close to 1 will decay the
moving average slowly and a value close to 0 will decay the moving average
fast.
Using the step size :math:`\\eta` and a decay factor :math:`\\rho` the
learning rate :math:`\\eta_t` is calculated as:
.. math::
r_t &= \\rho r_{t-1}+(1-\\rho)*g^2\\\\
\\eta_t &= \\frac{\\eta}{\\sqrt{r_t+\\epsilon}}
References
----------
.. [1] Tieleman, TT. and Hinton, G. (2012):
Neural Networks for Machine Learning, Lecture 6.5-rmsprop.
Coursera. http://www.youtube.com/watch?v=O3sxAc4hxZU (formula @5:20)
"""
params, grads = list(updates.items())[0]
updates = OrderedDict(updates)
one = TT.constant(1)
value = params.get_value(borrow=True)
accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=params.broadcastable)
accu_new = rho*accu+(one-rho)*grad**2
updates[accu] = accu_new
updates[params] = params-(learning_rate*grads/TT.sqrt(accu_new+epsilon))
return updates
@staticmethod
def adadelta(updates,
learning_rate=1.,
rho=0.95,
epsilon=1e-6,
**args):
""" Adadelta updates
Scale learning rates by the ratio of accumulated gradients to accumulated
updates, see [1]_ and notes for further description.
Parameters
----------
updates : OrderedDict
A dictionary mapping parameters to update expressions
learning_rate : float or symbolic scalar
The learning rate controlling the size of update steps
rho : float or symbolic scalar
Squared gradient moving average decay factor
epsilon : float or symbolic scalar
Small value added for numerical stability
Returns
-------
OrderedDict
A dictionary mapping each parameter to its update expression
Notes
-----
rho should be between 0 and 1. A value of rho close to 1 will decay the
moving average slowly and a value close to 0 will decay the moving average
fast.
rho = 0.95 and epsilon=1e-6 are suggested in the paper and reported to
work for multiple datasets (MNIST, speech).
In the paper, no learning rate is considered (so learning_rate=1.0).
Probably best to keep it at this value.
epsilon is important for the very first update (so the numerator does
not become 0).
Using the step size eta and a decay factor rho the learning rate is
calculated as:
.. math::
r_t &= \\rho r_{t-1}+(1-\\rho)*g^2\\\\
\\eta_t &= \\eta \\frac{\\sqrt{s_{t-1}+\\epsilon}}
{\sqrt{r_t+\epsilon}}\\\\
s_t &= \\rho s_{t-1}+(1-\\rho)*(\\eta_t*g)^2
References
----------
.. [1] Zeiler, M. D. (2012):
ADADELTA: An Adaptive Learning Rate Method.
arXiv Preprint arXiv:1212.5701.
"""
params, grads = list(updates.items())[0]
updates = OrderedDict(updates)
one = TT.constant(1)
value = params.get_value(borrow=True)
accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=params.broadcastable)
delta_accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=params.broadcastable)
accu_new = rho*accu+(one-rho)*grads**2
updates[accu] = accu_new
update = (grads*TT.sqrt(delta_accu+epsilon)/
TT.sqrt(accu_new+epsilon))
updates[params] = params-learning_rate*update
delta_accu_new = rho*delta_accu+(one-rho)*update**2
updates[delta_accu] = delta_accu_new
return updates
@staticmethod
def adam(updates,
learning_rate=0.01,
beta1=0.9,
beta2=0.99,
epsilon=1e-8,
**args):
"""Adam updates
Adam updates implemented as in [1]_.
Parameters
----------
updates : OrderedDict
A dictionary mapping parameters to update expressions
learning_rate : float
Learning rate
beta1 : float
Exponential decay rate for the first moment estimates.
beta2 : float
Exponential decay rate for the second moment estimates.
epsilon : float
Constant for numerical stability.
Returns
-------
OrderedDict
A dictionary mapping each parameter to its update expression
Notes
-----
The paper [1]_ includes an additional hyperparameter lambda. This is only
needed to prove convergence of the algorithm and has no practical use
(personal communication with the authors), it is therefore omitted here.
References
----------
.. [1] Kingma, Diederik, and Jimmy Ba (2014):
Adam: A Method for Stochastic Optimization.
arXiv preprint arXiv:1412.6980.
"""
params, grads = list(updates.items())[0]
updates = OrderedDict(updates)
t_prev = theano.shared(np.asarray(0., dtype=theano.config.floatX))
one = TT.constant(1)
t = t_prev+1
a_t = learning_rate*TT.sqrt(one-beta2**t)/(one-beta1**t)
value = params.get_value(borrow=True)
m_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=params.broadcastable)
v_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=params.broadcastable)
m_t = beta1*m_prev+(one-beta1)*grads
v_t = beta2*v_prev+(one-beta2)*grads**2
step = a_t*m_t/(TT.sqrt(v_t)+epsilon)
updates[m_prev] = m_t
updates[v_prev] = v_t
updates[params] = params-step
updates[t_prev] = t
return updates
@staticmethod
def adamax(updates,
learning_rate=0.01,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
**args):
"""Adamax updates
Adamax updates implemented as in [1]_. This is a variant of of the Adam
algorithm based on the infinity norm.
Parameters
----------
updates : OrderedDict
A dictionary mapping parameters to update expressions
learning_rate : float
Learning rate
beta1 : float
Exponential decay rate for the first moment estimates.
beta2 : float
Exponential decay rate for the weighted infinity norm estimates.
epsilon : float
Constant for numerical stability.
Returns
-------
OrderedDict
A dictionary mapping each parameter to its update expression
References
----------
.. [1] Kingma, Diederik, and Jimmy Ba (2014):
Adam: A Method for Stochastic Optimization.
arXiv preprint arXiv:1412.6980.
"""
params, grads = list(updates.items())[0]
updates = OrderedDict(updates)
t_prev = theano.shared(np.asarray(0., dtype=theano.config.floatX))
one = TT.constant(1)
t = t_prev+1
a_t = learning_rate/(one-beta1**t)
value = params.get_value(borrow=True)
m_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=params.broadcastable)
u_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=params.broadcastable)
m_t = beta1*m_prev+(one-beta1)*grads
u_t = TT.maximum(beta2*u_prev, abs(grads))
step = a_t*m_t/(u_t+epsilon)
updates[m_prev] = m_t
updates[u_prev] = u_t
updates[params] = params-step
updates[t_prev] = t
return updates
|
[
"maxingaussian@gmail.com"
] |
maxingaussian@gmail.com
|
405b180fbc5d7387565172702112313d01dff282
|
6efb6f69426bbd821f2e748decb9d9c0652e0743
|
/lab4/venv/Scripts/UTscapy-script.py
|
0630774b1472e4df9b492ed23be125597842f719
|
[] |
no_license
|
Notheryne/Sieci
|
cd3616577c40f0b546f1bfa77aca0d7f054997b2
|
fcdb1b7af298ca4eaea8110b0a7dc171400ce37b
|
refs/heads/master
| 2020-09-04T16:26:13.821539
| 2020-01-28T22:43:35
| 2020-01-28T22:43:35
| 219,801,060
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
#!D:\Programowanie\SieciLab\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'scapy==2.4.3','console_scripts','UTscapy'
__requires__ = 'scapy==2.4.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('scapy==2.4.3', 'console_scripts', 'UTscapy')()
)
|
[
"rydzinski.bartosz.1998@gmail.com"
] |
rydzinski.bartosz.1998@gmail.com
|
d661b6d2d860ce3bc6425535f573ad65b0bb1d5a
|
c23692a8b77916124ac3fd840ff8466405b5e2ff
|
/samples/clap_driving.py
|
dbecdc37ba53a76df07a43866d8354938ef3d11e
|
[] |
no_license
|
toanh/edpysim
|
575586cda1c8f6364841e162af801cfeaa509f6f
|
e9d3244c4f2414de9a370adc9f831e71f9296fe2
|
refs/heads/master
| 2021-05-17T21:05:10.180301
| 2020-04-08T06:48:26
| 2020-04-08T06:48:26
| 250,952,059
| 2
| 1
| null | 2020-04-08T06:49:46
| 2020-03-29T04:18:53
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,369
|
py
|
#-------------Setup----------------
Ed.EdisonVersion = Ed.V2
Ed.DistanceUnits = Ed.CM
Ed.Tempo = Ed.TEMPO_MEDIUM
def waitClap():
#loop around, waiting for a clap to be detected
while Ed.ReadClapSensor() != Ed.CLAP_DETECTED:
pass
#--------Your code below-----------
while True:
#wait for a clap to be detected
waitClap()
#turn on LED to indicate a detection
Ed.RightLed(Ed.ON)
#wait a short amount of time so that the same clap is not detected twice
Ed.TimeWait(100, Ed.TIME_MILLISECONDS)
#clear the clap detection, so that the same clap is not detected twice
Ed.ReadClapSensor()
#wait a short amount of time to ensure the second clap has time to be detected
Ed.TimeWait(250, Ed.TIME_MILLISECONDS)
#test to see if a second clap has occured
if Ed.ReadClapSensor() == Ed.CLAP_DETECTED:
#A second clap has been found! turn on the other LED and drive forwards
Ed.LeftLed(Ed.ON)
Ed.Drive(Ed.FORWARD, Ed.SPEED_10, 15)
else:
#only one clap detected. spin to the right
Ed.Drive(Ed.SPIN_RIGHT, Ed.SPEED_10, 45)
# wait a short time and clears the clap detection before looping
Ed.TimeWait(250, Ed.TIME_MILLISECONDS)
Ed.RightLed(Ed.OFF)
Ed.LeftLed(Ed.OFF)
Ed.ReadClapSensor()
#To use this code with Edison Version 1:
#change the version in the setup to Ed.EdisonVersion = Ed.V2
#change Ed.DistanceUnits = Ed.CM to Ed.DistanceUnits = Ed.TIME
|
[
"toan.kien@gmail.com"
] |
toan.kien@gmail.com
|
d53862d05b52b8276f07a6c6e02b29cd0b79c931
|
2f95a207bb948ad1ac09e9a8d50c5ec0b72c9201
|
/untils.py
|
6e6ccaf3bb46994152037295c93527d33dca7938
|
[] |
no_license
|
wangyuanhao/springD2A
|
037e79117cf493481e35da694eca0857348189a5
|
89f1f6d1f6a3e4a942d28a83232c643578888854
|
refs/heads/main
| 2023-05-01T13:52:00.406182
| 2021-05-24T02:45:09
| 2021-05-24T02:45:09
| 348,259,419
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,083
|
py
|
import os
from torch.utils.data import TensorDataset, DataLoader
import torch
import matplotlib.pyplot as plt
import numpy as np
from torch import nn
import logging
def get_logger(filename, verbosity=1, name=None):
level_dict = {0: logging.DEBUG, 1: logging.INFO, 2: logging.WARNING}
# formatter = logging.Formatter(
# "[%(asctime)s][%(filename)s][line:%(lineno)d][%(levelname)s] %(message)s"
# )
formatter = logging.Formatter(
"[%(asctime)s][%(levelname)s] %(message)s"
)
logger = logging.getLogger(name)
logger.setLevel(level_dict[verbosity])
fh = logging.FileHandler(filename, "w")
fh.setFormatter(formatter)
logger.addHandler(fh)
sh = logging.StreamHandler()
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger
def chck_dir(tdir):
if not os.path.exists(tdir):
os.makedirs(tdir)
def compute_adj(X, topk):
X = X.numpy()
sample_num = X.shape[0]
t_adj_mat = np.zeros((sample_num, sample_num))
for i in range(sample_num):
dist = np.diag(np.dot(X[i, :] - X, (X[i, :] - X).T))
# dist = -X[i, :]
# dist[i] = 0
ind = np.argsort(dist)
t_adj_mat[i, ind[:topk]] = 1
adj_mat_bool = ((t_adj_mat + t_adj_mat.T) / 2) > 0.5
sym_adj_mat = np.zeros((sample_num, sample_num))
sym_adj_mat[adj_mat_bool] = 1.0
return sym_adj_mat - np.diag(np.diag(sym_adj_mat))
def ind2sub(array_shape, ind):
ind[ind < 0] = -1
ind[ind >= array_shape[0]*array_shape[1]] = -1
rows = (ind // array_shape[1])
cols = ind % array_shape[1]
return rows, cols
def get_k_fold(k, i, X, y):
# 返回第i折交叉验证是所需要的训练和验证数据
assert k > 1
fold_size = X.shape[0] // k
X_train, y_train = None, None
for j in range(k):
idx = slice(j*fold_size, (j+1) * fold_size)
X_part, y_part = X[idx, :], y[idx]
if j == i:
X_valid, y_valid = X_part, y_part
elif X_train is None:
X_train, y_train = X_part, y_part
else:
X_train = torch.cat((X_train, X_part), dim=0)
y_train = torch.cat((y_train, y_part), dim=0)
return X_train, y_train, X_valid, y_valid
def flatten_data(disease_data, drug_data, get_idx=False):
disease_num, disease_dim = disease_data.shape[0], disease_data.shape[1]
drug_num, drug_dim = drug_data.shape[0], drug_data.shape[1]
disease_drug_assoc = torch.zeros((disease_num, drug_num))
idx = torch.where(disease_drug_assoc == 0)
if get_idx:
return idx
else:
disease_drug_data = torch.cat((disease_data[idx[0]], drug_data[idx[1]]), dim=1)
return disease_drug_data
def data_iter_obsolte(pos_X_train_idx, unkwn_pairs_idx, batch_size, neg_pos_ratio):
np.random.shuffle(unkwn_pairs_idx)
pos_num = len(pos_X_train_idx)
neg_num = np.minimum(neg_pos_ratio*pos_num, len(unkwn_pairs_idx))
selected_unkwn_paris_idx = unkwn_pairs_idx[0:neg_num]
y = torch.cat((torch.ones(pos_num, ), torch.zeros(neg_num, )), dim=0)
#
# X = torch.cat((disease_drug_data[pos_X_train_idx, :],
# disease_drug_data[selected_unkwn_paris_idx, :]), dim=0)
X = torch.cat((torch.tensor(pos_X_train_idx), torch.tensor(selected_unkwn_paris_idx)), dim=0)
dataset = TensorDataset(X, y)
train_iter = DataLoader(dataset, batch_size, shuffle=True)
return train_iter
def data_loader(pos_X_train_idx, unkwn_pairs_idx, batch_size, neg_pos_ratio, neg_sample_weight):
# np.random.seed(123)
# np.random.shuffle(unkwn_pairs_idx)
pos_num = len(pos_X_train_idx)
neg_num = np.minimum(neg_pos_ratio * pos_num, len(unkwn_pairs_idx))
# selected_unkwn_paris_idx = unkwn_pairs_idx[0:neg_num]
select_unkwn_pairs_idx = np.random.choice(unkwn_pairs_idx, neg_num, replace=False, p=neg_sample_weight)
pos_train_iter = data_iter(pos_X_train_idx, batch_size, pos=True)
neg_train_iter = data_iter(select_unkwn_pairs_idx, batch_size*neg_pos_ratio, pos=False)
if len(pos_train_iter) != len(neg_train_iter):
assert "pos-neg missmathced!"
return zip(pos_train_iter, neg_train_iter), select_unkwn_pairs_idx
def data_iter(train_idx, batch_size, pos=True):
if pos:
y = torch.ones((len(train_idx), ))
else:
y = torch.zeros(len(train_idx, ))
dataset = TensorDataset(torch.tensor(train_idx), y)
train_iter = DataLoader(dataset, batch_size, shuffle=True)
return train_iter
def loss_in_epoch(train_ce_ls, train_roc_ls, train_pr_ls, test_ce_ls, test_roc_ls, test_pr_ls,
title_, fout1, fout2, num_epochs, interval):
# epoch_num = len(train_ce_ls)
fig1, ax1 = plt.subplots()
ax1.plot(range(interval, num_epochs+interval, interval), train_ce_ls, "r--", label="Train Loss")
ax1.plot(range(interval, num_epochs+interval, interval), test_ce_ls, "b", label="Test Loss")
# ax1.plot(range(1, epoch_num+1), test_acc_ls, "b:", label="Test ACC")
ax1.set(xlabel="Epoch", ylabel="Loss", title=title_)
lg1 = ax1.legend(loc='best')
fig1.savefig(fout1)
fig2, ax2 = plt.subplots()
ax2.plot(range(interval, num_epochs+interval, interval), train_roc_ls, "r", label="Train ROC")
ax2.plot(range(interval, num_epochs+interval, interval), test_roc_ls, "b", label="Test ROC")
ax2.plot(range(interval, num_epochs+interval, interval), train_pr_ls, "r--", label="Train PR")
ax2.plot(range(interval, num_epochs+interval, interval), test_pr_ls, "b--", label="Test PR")
ax2.set(xlabel="Epoch", ylabel="Metric", title=title_)
lg2 = ax2.legend(loc='best')
fig2.savefig(fout2)
def adjust_learning_rate(optimizer, epoch, init_lr):
if epoch < 100:
update_lr = init_lr
elif epoch < 200:
update_lr = 0.01
else:
update_lr = 0.001
for param_group in optimizer.param_groups:
param_group["lr"] = update_lr
def cyclial_learning_rate(optimizer, epoch, min_lr, init_max_lr, step, lr_decay):
k = np.floor(epoch / (2*step))
max_lr = init_max_lr*lr_decay ** k
cycle = np.ceil(epoch / (2*step))
x = np.abs(epoch / step - 2 * cycle + 1)
# if epoch > 1500:
lr = min_lr + (max_lr - min_lr) * np.maximum(0, 1-x)
# lr = init_max_lr / 9
# elif epoch > 1000:
# lr = init_max_lr / 3
# elif epoch > 500:
# lr = init_max_lr / 3
# else:
# lr = init_max_lr
for param_group in optimizer.param_groups:
param_group["lr"] = lr
return optimizer
def step_decay_learning_rate(optimizer, epoch, init_lr, step, lr_decay):
lr = init_lr * (lr_decay ** np.floor(epoch/step))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
return optimizer
def write_train_record(f_name, train_ls, valid_ls):
write_lines = ["epoch %d, train loss: %f, test loss: %f\n"
% (i+1, train_ls[i], valid_ls[-1]) for i in range(len(train_ls))]
write_lines = ["="*60+"\n"] + write_lines
with open(f_name, "a") as fout:
fout.writelines(write_lines)
|
[
"wang_yuanhao@live.com"
] |
wang_yuanhao@live.com
|
6fff1d2dfde9c50515de49dc1e667c6e3472c144
|
663d48b2a2bda714b97341ce9aaefba92602f194
|
/model_utils.py
|
87ae801ef079ef49395de421fc1d45eae0a38f7c
|
[
"MIT"
] |
permissive
|
SushantKafle/speechtext-wimp-labeler
|
15d9bbac20e96c894d91a5e7ae50c50640f91d7a
|
32b71e72f86ab7f864e75e8517bb32f4400352d4
|
refs/heads/master
| 2023-04-02T00:02:09.914758
| 2020-04-12T18:11:08
| 2020-04-12T18:11:08
| 119,284,336
| 11
| 2
|
MIT
| 2023-03-24T23:22:54
| 2018-01-28T18:09:07
|
Python
|
UTF-8
|
Python
| false
| false
| 753
|
py
|
import tensorflow as tf
def get_rnn_cell(size, type_, state_is_tuple = True):
if type_ == "LSTM":
return tf.contrib.rnn.LSTMCell(size, state_is_tuple=state_is_tuple)
elif type_ == "GRU":
return tf.contrib.rnn.GRUCell(size)
def create_feedforward(input_tensor, input_size, output_size, fn_initializer, activation, scope):
with tf.variable_scope(scope):
weights = tf.get_variable("W_", dtype = tf.float32, initializer = fn_initializer((input_size, output_size)))
bias = tf.get_variable("b_", dtype = tf.float32, initializer = fn_initializer((output_size,)))
output = tf.matmul(input_tensor, weights) + bias
if activation == "tanh":
output = tf.tanh(output)
elif activation == "sigmoid":
output = tf.sigmoid(output)
return output
|
[
"sxk5664@lac2050-05.main.ad.rit.edu"
] |
sxk5664@lac2050-05.main.ad.rit.edu
|
58dea61f01c79d510c5abba801845512de484d1f
|
31ed3085759ed1e8dc8ffdbeb52d0c6605a009f3
|
/mylinebot/urls.py
|
4d6b2fff64af08db4795cda8762dfba9c9e60732
|
[] |
no_license
|
poytoday/lineconnect
|
af28ef27c57b4c7c7693b2c0595495366dfbe112
|
45a4ac0406326c036bdbd0c01050f1f772f56b40
|
refs/heads/master
| 2022-11-18T01:00:10.172245
| 2020-07-14T10:19:33
| 2020-07-14T10:19:33
| 279,545,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
from django.urls import path, include
from .views import callback
urlpatterns=[
path('callback/', callback, name='callback')
]
|
[
"poytoday@gmail.com"
] |
poytoday@gmail.com
|
dbc6d0472c2132b6af6592c1e1e8e960c32f3f6d
|
e8d3b04a19ba1b6373877068c3200e91f5142932
|
/lastfour_main/helper.py
|
9522ad7c13cd6f25f7de19af268f57c0b4856c83
|
[] |
no_license
|
kpavankumar623/cricket-project
|
46005ebc7586979deef4d9071441c13b726b095a
|
d83ed4400080d1bf8f40f086cd88d5e71957e3ba
|
refs/heads/master
| 2020-07-16T14:15:03.475153
| 2019-09-20T13:08:06
| 2019-09-20T13:08:06
| 205,804,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
def cal_winners(inn,match):
"""below conditions declare the winner"""
actual_score = match.WIN_SCORE-1
if inn.score >= actual_score:
print("Lengaburu WON by {} wickets".format(match.WICKETS_MAX - inn.wickets))
elif inn.score == actual_score:
print("Match DRAWN")
else:
print("Enchai WON the Match by {} runs".format(actual_score - inn.score)) #Enchai score is 39.
|
[
"kpavankumar623@hotmail.com"
] |
kpavankumar623@hotmail.com
|
5f96b2f9df61b2997848aed9767153a92a516338
|
762de1c66746267e05d53184d7854934616416ee
|
/tools/MolSurfGenService/MolSurfaceGen32/chimera/share/VolumeProcessing/apply.py
|
e3698c7a49fcc4c0b7f6619db155e7b141e47eb8
|
[] |
no_license
|
project-renard-survey/semanticscience
|
6e74f5d475cf0ebcd9bb7be6bb9522cf15ed8677
|
024890dba56c3e82ea2cf8c773965117f8cda339
|
refs/heads/master
| 2021-07-07T21:47:17.767414
| 2017-10-04T12:13:50
| 2017-10-04T12:13:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,364
|
py
|
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# Apply a function to a grid pointwise.
# The resulting volume is written in netcdf format.
#
# Syntax: apply.py sqrt|square|abs|exp|log <infile> <outfile>
#
# The file type must be one of the types handled by VolumeData.
#
import sys
from VolumeData import Grid_Data
# -----------------------------------------------------------------------------
#
def apply_function(array_func, inpath, outpath):
from VolumeData import fileformats
try:
grids = fileformats.open_file(inpath)
except fileformats.Unknown_File_Type, e:
sys.stderr.write(str(e))
sys.exit(1)
fvalues = [Mapped_Grid(g, array_func) for g in grids]
from VolumeData.netcdf import write_grid_as_netcdf
write_grid_as_netcdf(fvalues, outpath)
# -----------------------------------------------------------------------------
#
class Mapped_Grid(Grid_Data):
def __init__(self, grid_data, array_func):
self.array_func = array_func
Grid_Data.__init__(self, grid_data.size, grid_data.value_type,
grid_data.origin, grid_data.step,
name = grid_data.name, default_color = grid_data.rgba)
# ---------------------------------------------------------------------------
#
def read_matrix(self, ijk_origin, ijk_size, ijk_step, progress):
data = self.component.matrix(ijk_origin, ijk_size, ijk_step, progress)
fvalues = self.array_func(data)
return fvalues
# -----------------------------------------------------------------------------
#
def syntax():
msg = ('Apply a function to a grid pointwise.\n' +
'The resulting volume is written in netcdf format.\n'
'Syntax: apply.py sqrt|square|abs|exp|log <infile> <outfile>\n')
sys.stderr.write(msg)
sys.exit(1)
# -----------------------------------------------------------------------------
#
if len(sys.argv) != 4:
syntax()
fname = sys.argv[1]
from numpy import sqrt, power, absolute, exp, log
if fname == 'sqrt':
array_func = sqrt
elif fname == 'square':
array_func = lambda a: power(a, 2)
elif fname == 'abs':
array_func = absolute
elif fname == 'exp':
array_func = exp
elif fname == 'log':
array_func = log
else:
syntax()
inpath = sys.argv[2]
outpath = sys.argv[3]
apply_function(array_func, inpath, outpath)
|
[
"alex.gawronski@d60594c4-dda9-11dd-87d8-31aa04531ed5"
] |
alex.gawronski@d60594c4-dda9-11dd-87d8-31aa04531ed5
|
83bee1c913ad98cd00f75327075dbef6727ae53a
|
3784495ba55d26e22302a803861c4ba197fd82c7
|
/venv/lib/python3.6/site-packages/torchx/legacy/nn/VolumetricReplicationPadding.py
|
16cc7a1c097d7c351bcc12cb145425dff9ac1bf3
|
[
"MIT"
] |
permissive
|
databill86/HyperFoods
|
cf7c31f5a6eb5c0d0ddb250fd045ca68eb5e0789
|
9267937c8c70fd84017c0f153c241d2686a356dd
|
refs/heads/master
| 2021-01-06T17:08:48.736498
| 2020-02-11T05:02:18
| 2020-02-11T05:02:18
| 241,407,659
| 3
| 0
|
MIT
| 2020-02-18T16:15:48
| 2020-02-18T16:15:47
| null |
UTF-8
|
Python
| false
| false
| 1,969
|
py
|
import torch
from .Module import Module
class VolumetricReplicationPadding(Module):
def __init__(self, pleft, pright=None, ptop=None, pbottom=None, pfront=None, pback=None):
super(VolumetricReplicationPadding, self).__init__()
self.pleft = pleft
self.pright = pright or pleft
self.ptop = ptop or pleft
self.pbottom = pbottom or pleft
self.pfront = pfront or pleft
self.pback = pback or pleft
def updateOutput(self, input):
assert input.dim() == 5
self._backend.VolumetricReplicationPadding_updateOutput(
self._backend.library_state,
input,
self.output,
self.pleft, self.pright,
self.ptop, self.pbottom,
self.pfront, self.pback
)
return self.output
def updateGradInput(self, input, gradOutput):
assert input.dim() == 5 and gradOutput.dim() == 5
assert input.size(0) == gradOutput.size(0)
assert input.size(1) == gradOutput.size(1)
assert input.size(2) + self.pfront + self.pback == gradOutput.size(2)
assert input.size(3) + self.ptop + self.pbottom == gradOutput.size(3)
assert input.size(4) + self.pleft + self.pright == gradOutput.size(4)
self._backend.VolumetricReplicationPadding_updateGradInput(
self._backend.library_state,
input,
gradOutput,
self.gradInput,
self.pleft, self.pright,
self.ptop, self.pbottom,
self.pfront, self.pback
)
return self.gradInput
def __repr__(self):
s = super(VolumetricReplicationPadding, self).__repr__()
s += '({}, {}, {}, {}, {}, {})'.format(self.pleft, self.pright,
self.ptop, self.pbottom,
self.pfront, self.pback
)
return s
|
[
"luis20dr@gmail.com"
] |
luis20dr@gmail.com
|
5e57e42cf81e3523dfaa874a315995fbc33cfcb9
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/edifact/D11B/PAYDUCD11BUN.py
|
3dccdf3361385387dedef9f876212a5ce94c56a8
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 1,580
|
py
|
#Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD11BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'PAI', MIN: 1, MAX: 1},
{ID: 'FII', MIN: 1, MAX: 2},
{ID: 'DTM', MIN: 1, MAX: 4},
{ID: 'CUX', MIN: 0, MAX: 1},
{ID: 'PYT', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 5},
{ID: 'RFF', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'NAD', MIN: 0, MAX: 6, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 5, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 1},
]},
]},
{ID: 'GEI', MIN: 1, MAX: 9, LEVEL: [
{ID: 'RFF', MIN: 1, MAX: 1},
{ID: 'MOA', MIN: 1, MAX: 9},
{ID: 'BUS', MIN: 0, MAX: 1},
{ID: 'CUX', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 99, LEVEL: [
{ID: 'UGH', MIN: 0, MAX: 1, LEVEL: [
{ID: 'NAD', MIN: 0, MAX: 999999, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 9},
{ID: 'MOA', MIN: 1, MAX: 9},
{ID: 'AJT', MIN: 0, MAX: 9},
{ID: 'PYT', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 3},
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'UGT', MIN: 1, MAX: 1},
]},
]},
]},
{ID: 'UNS', MIN: 1, MAX: 1},
{ID: 'MOA', MIN: 1, MAX: 1},
{ID: 'CNT', MIN: 0, MAX: 9},
{ID: 'AUT', MIN: 0, MAX: 1},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
669826572171af8678d7799f11c25be1be9d1480
|
b092806631c5284a71996c7f16ba3aec6d845cdb
|
/Analysis/HSCPStudy/python/config.py
|
63a9fdad617cd84fbca9131658dde97f0026f240
|
[] |
no_license
|
tvami/HSCPAnalysis
|
f4700c40cf96cc42133befe4c313b0536f69ffaf
|
936725605c9ad5f64dd3b874064f9d4d58806d66
|
refs/heads/master
| 2021-12-25T18:56:04.191661
| 2021-12-20T00:38:21
| 2021-12-20T00:38:21
| 218,607,653
| 0
| 0
| null | 2021-03-30T20:12:26
| 2019-10-30T19:35:57
|
C++
|
UTF-8
|
Python
| false
| false
| 643
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("Demo")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) )
process.source = cms.Source("PoolSource",
# replace 'myfile.root' with the source file you want to use
fileNames = cms.untracked.vstring(
'file:/afs/cern.ch/cms/Tutorials/TWIKI_DATA/TTJets_8TeV_53X.root'
)
)
process.demo = cms.EDAnalyzer('HSCPStudy'
)
process.p = cms.Path(process.demo)
|
[
"noreply@github.com"
] |
noreply@github.com
|
0308fbd80076e9763f9daf836a65b64f1a5decc9
|
2558bfcc4781f220ecbeb664785c643cf45720f2
|
/blog_tutorial/views.py
|
bb172057aaab3f642de1a5dc6aeecd00af9977f3
|
[] |
no_license
|
bornie21/blog_tutorial
|
066d5c7722bfae82c3b928f67f07a36ba38940c1
|
6595337809d9a8ffdf448cc08e6990b97dfaba81
|
refs/heads/master
| 2021-01-17T20:13:11.589245
| 2016-09-18T17:13:34
| 2016-09-18T17:13:34
| 68,509,992
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 186
|
py
|
from django.views.generic import ListView
from blog.models import Entry
class HomeView(ListView):
template_name = 'index.html'
queryset = Entry.objects.order_by('-created_at')
|
[
"matembudzeb@gmail.com"
] |
matembudzeb@gmail.com
|
33707edb80b081ec1ed745507088f9c26ebd20fd
|
b182ff74d1107c00d77d3bb241dfca589ccc9404
|
/config.py
|
2bba1aadff966f60605fa7fdf900d990f46442d1
|
[] |
no_license
|
aexleader/Tornado-OA-System
|
7846a13a90c6da512a7f7620b003bd77b331a63d
|
6ffc51d2f42fcbd5b0abe7082dae4505bf687894
|
refs/heads/master
| 2020-08-01T14:00:28.966198
| 2019-09-10T10:57:23
| 2019-09-10T10:57:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,153
|
py
|
#coding=utf-8
from libs.flash.flash_lib import get_flashed_messages # 引入一个变量
from libs.permission.permission_auth.permission_interface_libs import menu_permission
settings = dict(
template_path = 'templates',
static_path = 'static',
debug = True,
cookie_secret = 'aaaa',
login_url = '/auth/user_login',
xsrf_cookies = True,
# ui_mrthods 是可以作为全局模板变量,在所有的html文件中都可以获取这个参数
ui_methods= {
"menu_permission": menu_permission,
"get_flashed_messages": get_flashed_messages
},
# pycket的配置信息
pycket = {
'engine': 'redis', # 设置存储器类型
'storage': {
'host': 'localhost',
'port': 6379,
'db_sessions': 5,
'db_notifications': 11,
'max_connections': 2 ** 31,
},
'cookies': {
'expires_days': 30, # 设置过期时间
#'max_age': 5000,
},
},
)
|
[
"htxz_jiang@163.com"
] |
htxz_jiang@163.com
|
9b25fca4182e31a9a72666772e8b52b3eebcb24f
|
33b40df749eecb1195fc2312135c5a1f4b38355a
|
/django_newsapp/core/migrations/0007_auto_20160111_0141.py
|
a2e955df261020773a28948b709602ea666e3367
|
[] |
no_license
|
ksj1993/django_newsapp
|
01d8b47b91ad599a605e34310fd31950ecdc214e
|
4e7224c74ce6dcc76c443a38cb7e2b57e7267dad
|
refs/heads/master
| 2021-01-10T07:59:52.210334
| 2016-02-06T06:29:53
| 2016-02-06T06:29:53
| 49,241,435
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 708
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-11 01:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_article_title'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='description',
field=models.CharField(default='', max_length=300),
preserve_default=False,
),
migrations.AddField(
model_name='userprofile',
name='occupation',
field=models.CharField(default='', max_length=100),
preserve_default=False,
),
]
|
[
"kunaljasty@gmail.com"
] |
kunaljasty@gmail.com
|
e01310498475202a0897ae8f1cbd77d2d62f8aea
|
6b163125b7d2f3ea5c2b107e6451e423ac7f1f3a
|
/app/forms/signup_form.py
|
4b55991e511d372190937258dd788f9ba156783f
|
[] |
no_license
|
guny12/Capstone-Mise-En
|
a1d6e689230ad2e49cce7a09bad52d6243808d15
|
b45d510adc04a69c73cf738a97c3a68d7166eebd
|
refs/heads/main
| 2023-06-14T02:13:24.280617
| 2021-07-15T06:30:39
| 2021-07-15T06:30:39
| 363,795,101
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,106
|
py
|
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms.validators import DataRequired, Email, ValidationError, Length
from app.models import User
def email_exists(form, field):
email = field.data
user = User.query.filter(User.email == email).first()
if user:
raise ValidationError("User is already registered.")
def user_exists(form, field):
username = field.data
user = User.query.filter(User.username == username).first()
if user:
raise ValidationError("User is already registered.")
class SignUpForm(FlaskForm):
firstName = StringField("First name", validators=[DataRequired()])
lastName = StringField("Last name", validators=[DataRequired()])
email = StringField(
"Email",
validators=[
DataRequired(),
email_exists,
Email(),
],
)
username = StringField("Username", validators=[DataRequired(), user_exists])
password = StringField(
"Password", validators=[DataRequired(), Length(min=8, message="password must be at least 8 characters")]
)
|
[
"Jimjnguy@gmail.com"
] |
Jimjnguy@gmail.com
|
14e20c46fd479497e766de6b6114b7dae024aad5
|
295cdb8828639d84c9bdd71f80587669773c174e
|
/mysite/time_traveler/migrations/0001_initial.py
|
b23615ea8a4d74e6f332fc1e677222fb65950d85
|
[] |
no_license
|
VeyronRomeo/mysite
|
a6cdddc9ffa34be865492cbfb7c346c4167ae2c4
|
5c0180678d25c453091e4612e4868e9bbf748b4e
|
refs/heads/master
| 2021-08-16T23:42:35.887457
| 2017-11-20T13:32:54
| 2017-11-20T13:32:54
| 27,227,975
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 846
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-17 03:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='userName',
fields=[
('id', models.IntegerField(primary_key=True)),
('uname', models.CharField(max_length=12)),
('upassword', models.CharField(max_length=32)),
('ulast_time_password', models.CharField(max_length=32)),
('uregistration_time', models.DateTimeField()),
('ulast_time_login_time', models.DateTimeField()),
('ulast_time_login_addr', models.CharField(max_length=32)),
],
),
]
|
[
"killni.ma@163.com"
] |
killni.ma@163.com
|
e8ff2ecd2620b1f6bb2ef13e7babba0a737a2405
|
fbb552bd9ef5378c915b73f35ae002e0719bfdbd
|
/dumbPlayer.py
|
8e03986b887f23176e658fcb138390e89eb1ff8c
|
[] |
no_license
|
PROgram52bc/COS280_TicTacToe
|
1c346efd83fc07a9a7344e272c46d2275a13dc47
|
7e5b382fa29a98fb8921db5d5e3337b7a899b10c
|
refs/heads/master
| 2020-04-29T06:17:27.783495
| 2019-03-24T21:38:33
| 2019-03-24T21:38:38
| 175,911,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
from player import Player
class DumbPlayer(Player):
def __init__(self, mySymbol, opponentSymbol):
super().__init__(mySymbol, opponentSymbol)
def makeMove(self, board):
for row in range(len(board)):
for col in range(len(board[0])):
if not board[row][col]:
return row,col
def __str__(self):
return "Dumb Player"
|
[
"daviddenghaotian@163.com"
] |
daviddenghaotian@163.com
|
355a94e3e219a007c388d1c3247bbc2eb62f6ec2
|
30e06035a4fd3cdfc6bae72c4cd66c0154802d52
|
/TpFriend/TpFriend.py
|
3d17e88db6adbfccca5b39f4e8c8cc372b2374ef
|
[] |
no_license
|
h0wHigh/Python-Plugins
|
3563e5e8ad42e9a270146d57056daa74af005209
|
e8e0eaea4077ca2a19cfc235c4441e729a608af9
|
refs/heads/master
| 2021-01-18T05:16:40.663474
| 2015-03-14T00:42:22
| 2015-03-14T00:42:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,132
|
py
|
__author__ = 'DreTaX'
__version__ = '3.7.1'
import clr
clr.AddReferenceByPartialName("Fougerite")
import Fougerite
import math
import System
from System import *
import re
import sys
path = Util.GetRootFolder()
sys.path.append(path + "\\Save\\Lib\\")
try:
import random
except ImportError:
pass
red = "[color #FF0000]"
green = "[color #009900]"
white = "[color #FFFFFF]"
"""
Class
"""
Pending = []
class TpFriend:
"""
Methods
"""
sys = None
DizzyDist = None
def On_PluginInit(self):
DataStore.Flush("TpTimer")
DataStore.Flush("tpfriendautoban")
DataStore.Flush("tpfriendpending")
DataStore.Flush("tpfriendpending2")
DataStore.Flush("tpfriendcooldown")
DataStore.Flush("tpfriendy")
config = self.TpFriendConfig()
self.sys = config.GetSetting("Settings", "sysname")
self.DizzyDist = float(config.GetSetting("Settings", "DizzyDist"))
Util.ConsoleLog("TpFriend v" + __version__ + " by " + __author__ + " loaded.", True)
def TpFriendConfig(self):
if not Plugin.IniExists("TpFriendConfig"):
loc = Plugin.CreateIni("TpFriendConfig")
loc.Save()
return Plugin.GetIni("TpFriendConfig")
def DefaultLoc(self):
if not Plugin.IniExists("DefaultLoc"):
loc = Plugin.CreateIni("DefaultLoc")
loc.Save()
return Plugin.GetIni("DefaultLoc")
def KillJob(self, Player):
if Player in Pending:
Pending.remove(Player)
"""
CheckV method based on Spock's method.
Upgraded by DreTaX
Can Handle Single argument and Array args.
V4.1
"""
def GetPlayerName(self, namee):
try:
name = namee.lower()
for pl in Server.Players:
if pl.Name.lower() == name:
return pl
return None
except:
return None
def CheckV(self, Player, args):
count = 0
if hasattr(args, '__len__') and (not isinstance(args, str)):
p = self.GetPlayerName(str.join(" ", args))
if p is not None:
return p
for pl in Server.Players:
for namePart in args:
if namePart.lower() in pl.Name.lower():
p = pl
count += 1
continue
else:
nargs = str(args).lower()
p = self.GetPlayerName(nargs)
if p is not None:
return p
for pl in Server.Players:
if nargs in pl.Name.lower():
p = pl
count += 1
continue
if count == 0:
Player.MessageFrom(self.sys, "Couldn't find [color#00FF00]" + str.join(" ", args) + "[/color]!")
return None
elif count == 1 and p is not None:
return p
else:
Player.MessageFrom(self.sys, "Found [color#FF0000]" + str(count) + "[/color] player with similar name. [color#FF0000] Use more correct name!")
return None
def getPlayer(self, d):
try:
pl = Server.FindPlayer(d)
return pl
except:
return None
def Replace(self, String):
str = re.sub('[(\)]', '', String)
return str.split(',')
def TrytoGrabID(self, Player):
try:
id = Player.SteamID
return id
except:
return None
def isMod(self, id):
if DataStore.ContainsKey("Moderators", id):
return True
return False
"""
Timer Functions
"""
def addJob(self, xtime, PlayerFrom, PlayerTo, callback, id=None, tid=None):
List = Plugin.CreateDict()
List["PlayerF"] = PlayerFrom
List["PlayerT"] = PlayerTo
# Let's make sure we have the steamid all the time.
if id is None:
List["PlayerFID"] = PlayerFrom.SteamID
List["PlayerTID"] = PlayerTo.SteamID
else:
List["PlayerFID"] = id
List["PlayerTID"] = tid
List["Call"] = callback
Plugin.CreateParallelTimer("TpJobTimer", xtime * 1000, List).Start()
def clearTimers(self):
Plugin.KillParallelTimer("TpJobTimer")
def TpJobTimerCallback(self, timer):
timer.Kill()
List = timer.Args
PlayerFrom = List["PlayerF"]
PlayerTo = List["PlayerT"]
callback = List["Call"]
id = List["PlayerFID"]
tid = List["PlayerTID"]
if self.TrytoGrabID(PlayerFrom) is None or self.TrytoGrabID(PlayerTo) is None:
DataStore.Add("tpfriendautoban", id, "none")
self.KillJob(PlayerFrom)
self.KillJob(PlayerTo)
return
DataStore.Add("tpfriendautoban", id, "using")
# Normal Teleport Callback
if callback == 1:
PlayerFrom.TeleportTo(PlayerTo.Location)
PlayerFrom.MessageFrom(self.sys, "You have been teleported to your friend")
self.addJob(2, PlayerFrom, PlayerTo, 3, id, tid)
# AutoKill
elif callback == 2:
if PlayerFrom not in Pending or PlayerTo not in Pending:
return
self.KillJob(PlayerFrom)
self.KillJob(PlayerTo)
ispend = DataStore.Get("tpfriendpending", id)
ispend2 = DataStore.Get("tpfriendpending2", tid)
if ispend is not None and ispend2 is not None:
DataStore.Remove("tpfriendpending", id)
DataStore.Remove("tpfriendpending2", tid)
DataStore.Add("tpfriendcooldown", id, 7)
DataStore.Add("tpfriendautoban", id, "none")
if PlayerFrom is not None:
PlayerFrom.MessageFrom(self.sys, "Teleport request timed out")
if PlayerTo is not None:
PlayerTo.MessageFrom(self.sys, "Teleport request timed out")
elif callback == 3:
PlayerFrom.TeleportTo(PlayerTo.Location)
PlayerFrom.MessageFrom(self.sys, "You have been teleported to your friend again.")
DataStore.Add("tpfriendy", id, str(PlayerTo.Y))
self.addJob(2, PlayerFrom, PlayerTo, 5, id, tid)
elif callback == 4:
DataStore.Add("tpfriendautoban", id, "none")
elif callback == 5:
y = float(PlayerFrom.Y)
oy = float(DataStore.Get("tpfriendy", id))
if oy - y > self.DizzyDist:
Server.BroadcastFrom(self.sys, PlayerFrom.Name + red + " tried to fall through a house via tpa. Kicked.")
Plugin.Log("DizzyHackBypass", PlayerFrom.Name + " - " + PlayerFrom.SteamID + " - " + PlayerFrom.IP + " - " + str(PlayerFrom.Location))
rand = self.DefaultLoc()
num = random.randrange(1, 8155)
loc = rand.GetSetting("DefaultLoc", str(num))
loc = self.Replace(loc)
loc = Util.CreateVector(float(loc[0]), float(loc[1]), float(loc[2]))
PlayerFrom.TeleportTo(loc)
DataStore.Remove("tpfriendy", id)
self.addJob(2, PlayerFrom, PlayerTo, 6, id, tid)
return
self.addJob(2, PlayerFrom, PlayerTo, 4, id, tid)
elif callback == 6:
try:
PlayerFrom.Disconnect()
except:
pass
def On_PlayerDisconnected(self, Player):
id = self.TrytoGrabID(Player)
if id is None:
return
self.KillJob(Player)
DataStore.Add("tpfriendautoban", id, "none")
def On_Command(self, Player, cmd, args):
id = Player.SteamID
if cmd == "cleartpatimers":
if Player.Admin or self.isMod(id):
self.clearTimers()
Player.MessageFrom(self.sys, "Cleared!")
elif cmd == "tpa":
if len(args) == 0:
Player.MessageFrom(self.sys, "Teleport Usage:")
Player.MessageFrom(self.sys, "TpFriend V" + __version__ + " by DreTaX")
Player.MessageFrom(self.sys, "\"/tpa [PlayerName]\" to request a teleport.")
Player.MessageFrom(self.sys, "\"/tpaccept\" to accept a requested teleport.")
Player.MessageFrom(self.sys, "\"/tpdeny\" to deny a request.")
Player.MessageFrom(self.sys, "\"/tpcount\" to see how many requests you have remaining.")
Player.MessageFrom(self.sys, "\"/tpcancel\" to cancel your own request.")
else:
config = self.TpFriendConfig()
playertor = self.CheckV(Player, args)
if playertor is None:
return
if playertor == Player:
Player.MessageFrom(self.sys, "Cannot teleport to yourself!")
return
name = Player.Name
id = Player.SteamID
idt = playertor.SteamID
namet = playertor.Name
maxuses = int(config.GetSetting("Settings", "Maxuses"))
cooldown = int(config.GetSetting("Settings", "cooldown"))
stuff = int(config.GetSetting("Settings", "timeoutr"))
time = DataStore.Get("tpfriendcooldown", id)
usedtp = DataStore.Get("tpfriendusedtp", id)
if time is None:
DataStore.Add("tpfriendcooldown", id, 7)
time = 7
calc = System.Environment.TickCount - time
if calc < 0 or math.isnan(calc):
DataStore.Add("tpfriendcooldown", id, 7)
time = 7
if calc >= cooldown or time == 7:
if usedtp is None:
DataStore.Add("tpfriendusedtp", id, 0)
usedtp = 0
if maxuses > 0:
if maxuses >= int(usedtp):
Player.MessageFrom(self.sys, "Reached max number of teleport requests!")
return
if DataStore.Get("tpfriendpending2", idt) is not None:
Player.MessageFrom(self.sys, "This player is pending a request. Wait a bit.")
return
if DataStore.Get("tpfriendpending", id):
Player.MessageFrom(self.sys, "You are pending a request. Wait a bit or cancel It")
return
DataStore.Add("tpfriendcooldown", id, System.Environment.TickCount)
playertor.MessageFrom(self.sys, "Teleport request from " + name + " to accept write /tpaccept")
Player.MessageFrom(self.sys, "Teleport request sent to " + namet)
DataStore.Add("tpfriendpending", id, idt)
DataStore.Add("tpfriendpending2", idt, id)
self.KillJob(Player)
self.KillJob(playertor)
self.addJob(stuff, Player, playertor, 2, id, idt)
else:
Player.MessageFrom(self.sys, "You have to wait before teleporting again!")
done = round((calc / 1000) / 60, 2)
done2 = round((cooldown / 1000) / 60, 2)
Player.MessageFrom(self.sys, "Time Remaining: " + str(done) + "/" + str(done2) + " mins")
elif cmd == "tpaccept":
pending = DataStore.Get("tpfriendpending2", id)
config = self.TpFriendConfig()
if pending is not None:
playerfromm = self.getPlayer(pending)
if playerfromm is not None:
self.KillJob(Player)
self.KillJob(playerfromm)
maxtpnumber = int(config.GetSetting("Settings", "Maxuses"))
playertpuse = int(DataStore.Get("tpfriendusedtp", pending))
tpdelayy = int(config.GetSetting("Settings", "tpdelay"))
if maxtpnumber > 0:
playertpuse = int(playertpuse) + 1
DataStore.Add("tpfriendusedtp", pending, playertpuse)
playerfromm.MessageFrom(self.sys, "Teleport requests used " + str(playertpuse) + " / " + str(maxtpnumber))
else:
playerfromm.MessageFrom(self.sys, "You have unlimited requests remaining!")
check = int(config.GetSetting("Settings", "safetpcheck"))
idt = playerfromm.SteamID
if tpdelayy > 0:
playerfromm.MessageFrom(self.sys, "Teleporting you in: " + str(tpdelayy) + " second(s)")
self.addJob(tpdelayy, playerfromm, Player, 1, idt, id)
else:
DataStore.Add("tpfriendautoban", idt, "using")
DataStore.Add("tpfriendy", idt, str(Player.Y))
playerfromm.TeleportTo(Player.Location)
playerfromm.MessageFrom(self.sys, "Teleported!")
DataStore.Add("tpfriendautoban", idt, "none")
self.addJob(check, playerfromm, Player, 3, idt, id)
DataStore.Remove("tpfriendpending", idt)
DataStore.Remove("tpfriendpending2", id)
Player.MessageFrom(self.sys, "Teleport Request Accepted!")
else:
self.KillJob(Player)
Player.MessageFrom(self.sys, "Player isn't online!")
else:
Player.MessageFrom(self.sys, "Your request was timed out, or you don't have any.")
elif cmd == "tpdeny":
pending = DataStore.Get("tpfriendpending2", id)
if pending is not None:
playerfromm = self.getPlayer(pending)
if playerfromm is not None:
playerfromm.MessageFrom(self.sys, "Your request was denied!")
self.KillJob(playerfromm)
self.KillJob(Player)
DataStore.Remove("tpfriendpending", pending)
DataStore.Add("tpfriendcooldown", pending, 7)
DataStore.Remove("tpfriendpending2", id)
Player.MessageFrom(self.sys, "Request denied!")
else:
Player.MessageFrom(self.sys, "No request to deny.")
elif cmd == "tpcancel":
pending = DataStore.Get("tpfriendpending", id)
if pending is not None:
playerto = self.getPlayer(pending)
if playerto is not None:
playerto.MessageFrom(self.sys, Player.Name + " Cancelled the request!")
self.KillJob(playerto)
self.KillJob(Player)
DataStore.Remove("tpfriendpending", id)
DataStore.Add("tpfriendcooldown", id, 7)
DataStore.Remove("tpfriendpending2", pending)
Player.MessageFrom(self.sys, "Request Cancelled!")
else:
Player.MessageFrom(self.sys, "There is nothing to cancel.")
elif cmd == "tpcount":
config = self.TpFriendConfig()
maxuses = int(config.GetSetting("Settings", "Maxuses"))
if maxuses > 0:
uses = int(DataStore.Get("tpfriendusedtp", id))
if uses is None:
uses = 0
Player.MessageFrom(self.sys, "Teleport requests used " + str(uses) + " / " + str(maxuses))
else:
Player.MessageFrom(self.sys, "You have unlimited requests remaining!")
elif cmd == "tpresettime":
if Player.Admin or self.isMod(id):
DataStore.Add("tpfriendcooldown", id, 7)
Player.Message("Reset!")
elif cmd == "clearuses":
id = Player.SteamID
if Player.Admin or self.isMod(id):
DataStore.Flush("tpfriendusedtp")
Player.MessageFrom(self.sys, "Flushed!")
|
[
"dretax14@gmail.com"
] |
dretax14@gmail.com
|
335fe03ecf60ad60f91b937781b87ec328478859
|
d7f366993efd8dce8ee88836ccd02db4fb6c31a2
|
/attack.py
|
6061a3a8217792d89a700f5c2e33413ede522625
|
[
"Apache-2.0"
] |
permissive
|
hebo1221/Inconspicuous-Adversarial-perturbation-post-processing-method-with-texture-analysis
|
b92721b88f4691bbaa041ee4c74ca031a0eaa7bc
|
770a2c8898d56293845d7fd63f01649793e0d65f
|
refs/heads/main
| 2023-02-16T01:45:08.868788
| 2021-01-16T00:36:23
| 2021-01-16T00:36:23
| 330,053,015
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,405
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchattacks.attack import Attack
import cv2
from skimage.color import rgb2gray
from skimage.filters import sobel
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float
from skimage.metrics import mean_squared_error, structural_similarity
import numpy as np
import json
import os
import sys
import time
from torch import Tensor
import torch.nn as nn
import torch.optim as optim
import torchvision.utils
from torchvision import models
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import torchattacks
from utils import imshow, image_folder_custom_label
import matplotlib.pyplot as plt
# True False
show = False
original_attack = False
use_cuda = True
device = torch.device("cuda" if use_cuda else "cpu")
class_idx = json.load(open("./data/imagenet_class_index.json"))
idx2label = [class_idx[str(k)][1] for k in range(len(class_idx))]
transform = transforms.Compose([
transforms.Resize((299, 299)),
transforms.ToTensor(), # ToTensor : [0, 255] -> [0, 1]
# Using normalization for Inception v3.
# https://discuss.pytorch.org/t/how-to-preprocess-input-for-pre-trained-networks/683
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# However, DO NOT USE normalization transforms in this section.
# torchattacks only supports images with a range between 0 and 1.
# Thus, please refer to the model construction section.
])
print("dataset: imagenet-mini_val")
# print("dataset: imagenet-mini_train")
# print("dataset: custom mini dataset")
normal_data = image_folder_custom_label(root='./data/imagenet2', transform=transform, idx2label=idx2label)
normal_loader = torch.utils.data.DataLoader(normal_data, batch_size=1, shuffle=False)
test_set = torchvision.datasets.ImageNet( root='./data/imagenet', split= 'val', download=False, transform=transform )
normal_loader = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False)
class Normalize(nn.Module) :
def __init__(self, mean, std) :
super(Normalize, self).__init__()
self.register_buffer('mean', torch.Tensor(mean))
self.register_buffer('std', torch.Tensor(std))
def forward(self, input):
# Broadcasting
mean = self.mean.reshape(1, 3, 1, 1)
std = self.std.reshape(1, 3, 1, 1)
return (input - mean) / std
# Adding a normalization layer for Inception v3.
# We can't use torch.transforms because it supports only non-batch images.
norm_layer = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
print("original_attack",end=": ")
print(original_attack)
print("network model",end=": ")
model = nn.Sequential(
norm_layer,
# models.inception_v3(pretrained=True)
# models.alexnet(pretrained=True)
models.resnet50(pretrained=True)
)
print(model)
model = model.to(device).eval()
class FGSM(Attack):
r"""
FGSM in the paper 'Explaining and harnessing adversarial examples'
[https://arxiv.org/abs/1412.6572]
"""
def __init__(self, model, eps=0.007):
super(FGSM, self).__init__("FGSM", model)
self.eps = eps
def forward(self, images, labels, filterd):
r"""
Overridden.
"""
images = images.to(self.device)
labels = labels.to(self.device)
labels = self._transform_label(images, labels)
loss = nn.CrossEntropyLoss()
images.requires_grad = True
outputs = self.model(images)
cost = self._targeted*loss(outputs, labels).to(self.device)
grad = torch.autograd.grad(cost, images,
retain_graph=False, create_graph=False)[0]
#########
purturb = grad.sign()
# filterd = torch.clamp(filterd, min=0.02, max=0.3)
if original_attack == False:
# adv_images = images_ + purturb_*self.eps
# eps_filterd_2d = (filterd/filterd.mean())
# eps_filterd_2d = filterd
# purturb_ = np.uint8(purturb.cpu().data.squeeze(0).permute(1, 2, 0).numpy()*255)*10
# purturb_[:,:,0] = purturb_[:,:,0] * (eps_filterd_2d)*200
# purturb_ = torch.clamp(transforms.ToTensor()(purturb_), min=0, max=1).detach()
adv_images = images + filterd*purturb*self.eps
else:
adv_images = images + purturb*self.eps
# adv_images = images + self.eps*grad.sign()
adv_images = torch.clamp(adv_images, min=0, max=1).detach()
return adv_images
class RFGSM(Attack):
r"""
R+FGSM in the paper 'Ensemble Adversarial Training : Attacks and Defences'
[https://arxiv.org/abs/1705.07204]
"""
def __init__(self, model, eps=16/255, alpha=8/255, steps=1):
super(RFGSM, self).__init__("RFGSM", model)
self.eps = eps
self.alpha = alpha
self.steps = steps
def forward(self, images, labels, filterd):
r"""
Overridden.
"""
images = images.to(self.device)
labels = labels.to(self.device)
labels = self._transform_label(images, labels)
loss = nn.CrossEntropyLoss()
adv_images = images.clone().detach() + self.alpha*torch.randn_like(images).sign()
adv_images = torch.clamp(adv_images, min=0, max=1).detach()
for i in range(self.steps):
adv_images.requires_grad = True
outputs = self.model(adv_images)
cost = self._targeted*loss(outputs, labels).to(self.device)
grad = torch.autograd.grad(cost, adv_images,
retain_graph=False, create_graph=False)[0]
purturb = (self.eps-self.alpha)*grad.sign()
if original_attack == False:
adv_images = adv_images.detach() + filterd * purturb
else:
adv_images = adv_images.detach() + purturb
adv_images = torch.clamp(adv_images, min=0, max=1).detach()
return adv_images
class FFGSM(Attack):
r"""
New FGSM proposed in 'Fast is better than free: Revisiting adversarial training'
[https://arxiv.org/abs/2001.03994]
"""
def __init__(self, model, eps=8/255, alpha=10/255):
super(FFGSM, self).__init__("FFGSM", model)
self.eps = eps
self.alpha = alpha
def forward(self, images, labels, filterd):
r"""
Overridden.
"""
images = images.to(self.device)
labels = labels.to(self.device)
labels = self._transform_label(images, labels)
loss = nn.CrossEntropyLoss()
adv_images = images.clone().detach()
adv_images = adv_images + torch.randn_like(images).uniform_(-self.eps, self.eps)
adv_images = torch.clamp(adv_images, min=0, max=1).detach()
adv_images.requires_grad = True
outputs = self.model(adv_images)
cost = self._targeted*loss(outputs, labels).to(self.device)
grad = torch.autograd.grad(cost, adv_images,
retain_graph=False, create_graph=False)[0]
purturb = self.alpha*grad.sign()
if original_attack == False:
adv_images = adv_images.detach() + filterd * purturb
else:
adv_images = adv_images.detach() + purturb
delta = torch.clamp(adv_images - images, min=-self.eps, max=self.eps)
adv_images = torch.clamp(images + delta, min=0, max=1).detach()
return adv_images
class PGD(Attack):
r"""
PGD in the paper 'Towards Deep Learning Models Resistant to Adversarial Attacks'
[https://arxiv.org/abs/1706.06083]
"""
def __init__(self, model, eps=0.3, alpha=2/255, steps=40, random_start=False):
super(PGD, self).__init__("PGD", model)
self.eps = eps
self.alpha = alpha
self.steps = steps
self.random_start = random_start
def forward(self, images, labels, filterd):
r"""
Overridden.
"""
images = images.to(self.device)
labels = labels.to(self.device)
labels = self._transform_label(images, labels)
loss = nn.CrossEntropyLoss()
adv_images = images.clone().detach()
# eps_filterd_2d = (filterd/filterd.mean())*self.eps #이러면 합이 엡실론
if self.random_start:
# Starting at a uniformly random point
adv_images = adv_images + torch.empty_like(adv_images).uniform_(-self.eps, self.eps)
adv_images = torch.clamp(adv_images, min=0, max=1)
for i in range(self.steps):
adv_images.requires_grad = True
outputs = self.model(adv_images)
cost = self._targeted*loss(outputs, labels).to(self.device)
grad = torch.autograd.grad(cost, adv_images,
retain_graph=False, create_graph=False)[0]
purturb = self.alpha*grad.sign()
if original_attack == False:
adv_images = adv_images.detach() + filterd * purturb
else:
adv_images = adv_images.detach() + purturb
delta = torch.clamp(adv_images.to(self.device) - images, min=-self.eps, max=self.eps)
adv_images = torch.clamp(images + delta, min=0, max=1).detach()
return adv_images
class TPGD(Attack):
r"""
PGD based on KL-Divergence loss in the paper 'Theoretically Principled Trade-off between Robustness and Accuracy'
[https://arxiv.org/abs/1901.08573]
"""
def __init__(self, model, eps=8/255, alpha=2/255, steps=7):
super(TPGD, self).__init__("TPGD", model)
self.eps = eps
self.alpha = alpha
self.steps = steps
self._attack_mode = 'only_original'
def forward(self, images, labels, filterd):
r"""
Overridden.
"""
images = images.to(self.device)
adv_images = images.clone().detach() + 0.001*torch.randn_like(images).to(self.device).detach()
adv_images = torch.clamp(adv_images, min=0, max=1).detach()
loss = nn.KLDivLoss(reduction='sum')
for i in range(self.steps):
adv_images.requires_grad = True
logit_ori = self.model(images)
logit_adv = self.model(adv_images)
cost = loss(F.log_softmax(logit_adv, dim=1),
F.softmax(logit_ori, dim=1)).to(self.device)
grad = torch.autograd.grad(cost, adv_images,
retain_graph=False, create_graph=False)[0]
adv_images = adv_images.detach() + self.alpha*grad.sign()
delta = torch.clamp(adv_images.to(self.device) - images, min=-self.eps, max=self.eps)
if original_attack == False:
adv_images = torch.clamp(images + filterd * delta, min=0, max=1).detach()
else:
adv_images = torch.clamp(images + delta, min=0, max=1).detach()
return adv_images
class APGD(Attack):
r"""
Comment on "Adv-BNN: Improved Adversarial Defense through Robust Bayesian Neural Network"
[https://arxiv.org/abs/1907.00895]
Distance Measure : Linf
"""
def __init__(self, model, eps=0.3, alpha=2/255, steps=40, sampling=10):
super(APGD, self).__init__("APGD", model)
self.eps = eps
self.alpha = alpha
self.steps = steps
self.sampling = sampling
def forward(self, images, labels, filterd):
r"""
Overridden.
"""
images = images.to(self.device)
labels = labels.to(self.device)
labels = self._transform_label(images, labels)
loss = nn.CrossEntropyLoss()
ori_images = images.clone().detach()
for i in range(self.steps):
grad = torch.zeros_like(images)
images.requires_grad = True
for j in range(self.sampling):
outputs = self.model(images)
cost = self._targeted*loss(outputs, labels).to(self.device)
grad += torch.autograd.grad(cost, images,
retain_graph=False,
create_graph=False)[0]
# grad.sign() is used instead of (grad/sampling).sign()
adv_images = images + self.alpha*grad.sign()
eta = torch.clamp(adv_images - ori_images, min=-self.eps, max=self.eps)
if original_attack == False:
images = torch.clamp(ori_images + filterd *eta, min=0, max=1).detach()
else:
images = torch.clamp(ori_images + eta, min=0, max=1).detach()
adv_images = images
return adv_images
class DeepFool(Attack):
r"""
'DeepFool: A Simple and Accurate Method to Fool Deep Neural Networks'
[https://arxiv.org/abs/1511.04599]
Distance Measure : L2
"""
def __init__(self, model, steps=3):
super(DeepFool, self).__init__("DeepFool", model)
self.steps = steps
self._attack_mode = 'only_original'
def forward(self, images, labels, filterd):
r"""
Overridden.
"""
images = images.to(self.device)
for b in range(images.shape[0]):
image = images[b:b+1, :, :, :]
image.requires_grad = True
output = self.model(image)[0]
_, pre_0 = torch.max(output, 0)
f_0 = output[pre_0]
grad_f_0 = torch.autograd.grad(f_0, image,
retain_graph=False,
create_graph=False)[0]
num_classes = len(output)
for i in range(self.steps):
image.requires_grad = True
output = self.model(image)[0]
_, pre = torch.max(output, 0)
if pre != pre_0:
image = torch.clamp(image, min=0, max=1).detach()
break
r = None
min_value = None
for k in range(num_classes):
if k == pre_0:
continue
f_k = output[k]
grad_f_k = torch.autograd.grad(f_k, image,
retain_graph=True,
create_graph=True)[0]
f_prime = f_k - f_0
grad_f_prime = grad_f_k - grad_f_0
value = torch.abs(f_prime)/torch.norm(grad_f_prime)
if r is None:
r = (torch.abs(f_prime)/(torch.norm(grad_f_prime)**2))*grad_f_prime
min_value = value
else:
if min_value > value:
r = (torch.abs(f_prime)/(torch.norm(grad_f_prime)**2))*grad_f_prime
min_value = value
if original_attack == False:
image = torch.clamp(image + filterd * r, min=0, max=1).detach()
else:
image = torch.clamp(image + r, min=0, max=1).detach()
images[b:b+1, :, :, :] = image
adv_images = images
return adv_images
attacks = [
FGSM(model, eps=4/255),
FFGSM(model, eps=4/255, alpha=12/255),
RFGSM(model, eps=8/255, alpha=4/255, steps=1),
PGD(model, eps=4/255, alpha=2/255, steps=7),
APGD(model, eps=4/255, alpha=2/255, steps=7),
TPGD(model, eps=8/255, alpha=2/255, steps=7),
DeepFool(model, steps=3),
#torchattacks.RFGSM(model, eps=8/255, alpha=4/255, steps=1),
#torchattacks.FFGSM(model, eps=8/255, alpha=12/255),
#torchattacks.APGD(model, eps=8/255, alpha=2/255, steps=7),
#torchattacks.TPGD(model, eps=8/255, alpha=2/255, steps=7),
]
def filter(im):
# read image
imsum = im.sum(axis=2)
img_gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
img_canny = cv2.Canny(img_gray, 50, 150)
img_canny_f = img_as_float(img_canny)
N=5
S=img_canny.shape
E=np.array(img_canny_f)
for row in range(S[0]):
for col in range(S[1]):
Lx=np.max([0,col-N])
Ux=np.min([S[1],col+N])
Ly=np.max([0,row-N])
Uy=np.min([S[0],row+N])
region=img_canny_f[Ly:Uy,Lx:Ux].mean()
E[row,col]=region
E = E + 0.02
if show == True:
plt.imshow(E, cmap=plt.cm.jet)
plt.colorbar()
plt.show()
# print(img_canny_f.mean())
E = E / E.mean()
return E
print("Adversarial Image & Predicted Label")
for attack in attacks :
print("-"*70)
print(attack)
correct = 0
total = 0
stacked_img = np.array([[0]*3])
for images_, labels in normal_loader:
original = np.uint8(images_.squeeze(0).permute(1, 2, 0).numpy()*255)
start = time.time()
if original_attack == False:
filterd = filter(original)
stacked_img = np.stack((filterd,)*3,-1)
adv_images = attack(images_, labels, transforms.ToTensor()(stacked_img).to(device, dtype=torch.float))
# print(structural_similarity(original,np.uint8(adv_images.clone().cpu().squeeze(0).permute(1, 2, 0).numpy()*255), full=True,multichannel=True))
if show == True:
# imshow(torchvision.utils.make_grid(transforms.ToTensor()(adv_images).permute(0, 1, 2).cpu().data), [normal_data.classes[i] for i in pre])
imshow(torchvision.utils.make_grid(adv_images.cpu().data),'filterd')
#plt.imshow(original)
#plt.show()
# img = np.array([originaㅋl])
labels = labels.to(device)
outputs = model(adv_images.to(device))
_, pre = torch.max(outputs.data, 1)
total += 1
correct += (pre == labels).sum()
"""
if (pre == labels):
print('O',end=" ")
else:
print('X',end=" ")
"""
# imshow(torchvision.utils.make_grid(transforms.ToTensor()(original).permute(0, 1, 2).cpu().data), [normal_data.classes[i] for i in pre])
# imshow(torchvision.utils.make_grid(images.cpu().data, normalize=True), [normal_data.classes[i] for i in pre])
# imshow(torchvision.utils.make_grid(noise_.cpu().data, normalize=True), [normal_data.classes[i] for i in pre])
print('Total elapsed time (sec) : %.2f' % (time.time() - start))
print('Robust accuracy: %.2f %%' % (100 * float(correct) / total))
|
[
"noreply@github.com"
] |
noreply@github.com
|
e7b07e9da69275211369027ccc4b4e3df2428c9a
|
98d328e4e00ac7cf8930d2ff9bd68af1d9d9cc3b
|
/utils/lib_clustering.py
|
3e1b9079f84417c6585bb40e6d8bcf926bf03a2b
|
[] |
no_license
|
jtpils/Lane-Detection-from-Point-Cloud
|
4d7e98cafada569097e16e7bcb5fdabc048e0644
|
238cb8cedc823a84c32b60ce13e7de8c81f19232
|
refs/heads/master
| 2020-06-05T08:42:46.397450
| 2019-06-17T15:41:58
| 2019-06-17T15:41:58
| 192,380,398
| 14
| 4
| null | 2019-06-17T16:16:58
| 2019-06-17T16:16:58
| null |
UTF-8
|
Python
| false
| false
| 3,810
|
py
|
'''
Clustering by DBSCAN using sklearn library
This code is copied and modified from:
https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html
'''
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
class Clusterer(object):
def __init__(self):
self.fit_success = False
def fit(self, X, eps=0.3, min_samples=10):
# Compute DBSCAN
db = DBSCAN(eps=eps, min_samples=min_samples).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
# samples that is close to the center
core_samples_mask[db.core_sample_indices_] = True
self.X = X
self.db = db
self.core_samples_mask = core_samples_mask
self.fit_success = True
self.labels = db.labels_ # label of each sample
self.unique_labels = set(self.labels)
self.n_clusters = len(set(self.labels)) - \
(1 if -1 in self.labels else 0)
def plot_clusters(self):
if not self.fit_success:
return
assert self.X.shape[1] == 2, "To visualize result, X must be 2 dimenstions."
# member vars used in this function
labels, n_clusters, unique_labels = self.labels, self.n_clusters, self.unique_labels
core_samples_mask = self.core_samples_mask
X = self.X
# Black removed and is used for noise instead.
colors = [plt.cm.Spectral(each)
for each in np.linspace(0, 1, len(unique_labels))]
# print(colors)
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = [0, 0, 0, 1]
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=6)
# break
plt.title('Clustering result: {} clusters'.format(n_clusters))
def print_clustering_result(self):
if not self.fit_success:
return
labels, n_clusters = self.labels, self.n_clusters
# Number of clusters in labels, ignoring noise if present.
n_noise_ = list(labels).count(-1)
print('Estimated number of clusters: %d' % n_clusters)
print('Estimated number of noise points: %d' % n_noise_)
print("Homogeneity: %0.3f" %
metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" %
metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" %
metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels,
average_method='arithmetic'))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
if __name__ == "__main__":
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
# Fit
cluster = Clusterer()
cluster.fit(X)
# Plot
cluster.plot_clusters()
plt.show()
|
[
"felixchenfy@gmail.com"
] |
felixchenfy@gmail.com
|
0acc75cf50775793b9ea0595bc40eede48ac8136
|
5358e2a0c829490f1384b6969a42b1ce3ee2c830
|
/docker_container/enderecos/endereco.py
|
1850d08271052659eb38f8f718d709884a6014cd
|
[] |
no_license
|
douglasrocha06/tech_talent_desafio_III
|
ffb23deade7a4698b15bb01a0028540b83f75da9
|
0c6bcd21b76e958f9d45141c804aa71a8c3248b0
|
refs/heads/master
| 2023-08-16T20:56:25.953386
| 2021-10-15T17:37:53
| 2021-10-15T17:37:53
| 417,583,818
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,832
|
py
|
import pymysql
from app import app
from config import mysql
from flask import jsonify
from flask import flash, request
from flask_httpauth import HTTPBasicAuth
auth = HTTPBasicAuth()
@app.route('/')
def welcome():
return 'Sejam Bem-Vindos!'
#Vizualizar ENDEREÇOS DE TODOS CLIENTES
@app.route('/enderecos/clientes', methods=['GET'])
@auth.login_required
def enderecos_clientes():
try:
conn = mysql.connect()
cursor = conn.cursor(pymysql.cursors.DictCursor)
cursor.execute("select clientes.id as id, clientes.nome as Nome, enderecos.rua as Rua, enderecos.numero as Numero, enderecos.complemento as Complemento, enderecos.bairro as Bairro, enderecos.cidade as Cidade, enderecos.estado as Estado, enderecos.cep as Cep from clientes join enderecos on clientes.id = enderecos.idCliente order by Nome, Rua")
linha = cursor.fetchall() #Retornará todas as linhas do banco de dados
resposta = jsonify(linha) #Formata em JSON
resposta.status_code = 200
return resposta
except Exception as e:
print(e)
finally:
cursor.close()
conn.close()
#Vizualiza os endereços de UM CLIENTE ESPECÍFICO
@app.route('/enderecos/clientes/<int:id>', methods=['GET'])
@auth.login_required
def vizu_end_clientes(id):
try:
conn = mysql.connect()
cursor = conn.cursor(pymysql.cursors.DictCursor)
cursor.execute("select clientes.id as id,clientes.nome as Nome, enderecos.rua as Rua, enderecos.numero as Numero, enderecos.complemento as Complemento, enderecos.bairro as Bairro, enderecos.cidade as Cidade, enderecos.estado as Estado, enderecos.cep as Cep from clientes join enderecos on clientes.id = enderecos.idCliente where id = %s", id)
linhas = cursor.fetchall() #Retornará todas as linhas com os endereços do cliente específico.
if not linhas:
return jsonify({'status':'Cliente não possui endereço cadastrado!'}), 404
resposta = jsonify(linhas) #Formata em JSON
resposta.status_code = 200
return resposta
except Exception as e:
print(e)
finally:
cursor.close()
conn.close()
#Vizualizar todos os endereços
@app.route('/enderecos', methods=['GET'])
@auth.login_required
def enderecos():
try:
conn = mysql.connect()
cursor = conn.cursor(pymysql.cursors.DictCursor)
cursor.execute("SELECT idEndereco, rua, numero, complemento, bairro, cidade, estado, cep, idCliente FROM enderecos")
linha = cursor.fetchall() #Retornará todas as linhas do banco de dados
resposta = jsonify(linha) #Formata em JSON
resposta.status_code = 200
return resposta
except Exception as e:
print(e)
finally:
cursor.close()
conn.close()
#Vizualizar um endereço específico
@app.route('/enderecos/<int:id>', methods=['GET'])
@auth.login_required
def vizualizar(id):
try:
conn = mysql.connect()
cursor = conn.cursor(pymysql.cursors.DictCursor)
cursor.execute("SELECT idEndereco, rua, numero, complemento, bairro, cidade, estado, cep, idCliente FROM enderecos WHERE idEndereco =%s", id)
linhas = cursor.fetchone() #Retornará apenas uma linha do banco de dados
if not linhas:
return jsonify({'status':'Endereço não cadastrado!'}), 404
resposta = jsonify(linhas) #Formata em JSON
resposta.status_code = 200
return resposta
except Exception as e:
print(e)
finally:
cursor.close()
conn.close()
#Adicionar um endereço
@app.route('/enderecos', methods=['POST'])
@auth.login_required
def adicionar():
try:
json = request.json #Pegando os dados para adicionar no Banco
rua = json['rua']
numero = json['numero']
complemento = json['complemento']
bairro = json['bairro']
cidade = json['cidade']
estado = json['estado']
cep = json['cep']
idCliente = json['idCliente']
if rua and numero and complemento and bairro and cidade and estado and cep and idCliente and request.method == 'POST':
sqlQuery = "INSERT INTO enderecos(rua, numero, complemento, bairro, cidade, estado, cep, idCliente) VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
dados = (rua, numero, complemento, bairro, cidade, estado, cep, idCliente)
conn = mysql.connect() #Conexão com banco de dados
cursor = conn.cursor(pymysql.cursors.DictCursor)
cursor.execute(sqlQuery, dados)
conn.commit()
resposta = jsonify({'status':'Endereço adicionado com sucesso!'})
resposta.status_code = 200
return resposta
else:
return not_found()
except Exception as e:
print(e)
finally:
cursor.close()
conn.close()
#Atualizar um endereço
@app.route('/enderecos', methods=['PUT'])
@auth.login_required
def atualizar():
try:
json = request.json
idEndereco = json['idEndereco']
rua = json['rua']
numero = json['numero']
complemento = json['complemento']
bairro = json['bairro']
cidade = json['cidade']
estado = json['estado']
cep = json['cep']
idCliente = json['idCliente']
if rua and numero and complemento and bairro and cidade and estado and cep and idCliente and idEndereco and request.method == 'PUT':
sqlQuery = "UPDATE enderecos SET rua=%s, numero=%s, complemento=%s, bairro=%s, cidade=%s, estado=%s, cep=%s, idCliente=%s WHERE idEndereco=%s"
dados = (rua, numero, complemento, bairro, cidade, estado, cep, idCliente, idEndereco,)
conn = mysql.connect() #Conexão banco de dados
cursor = conn.cursor()
cursor.execute(sqlQuery, dados)
conn.commit()
resposta = jsonify({'status':'Endereço atualizado com sucesso!'})
resposta.status_code = 200
return resposta
else:
return not_found()
except Exception as e:
print(e)
finally:
cursor.close()
conn.close()
#Deletar um endereço
@app.route('/enderecos/<int:id>', methods=['DELETE'])
@auth.login_required
def deletar(id):
try:
conn = mysql.connect()
cursor = conn.cursor()
sqlQuery = "SELECT * FROM enderecos where idEndereco=%s"
cursor.execute(sqlQuery, id)
linha = cursor.fetchone()
if not linha:
return jsonify({'error':'Endereço inexistente!'}), 404
cursor.execute("DELETE FROM enderecos WHERE idEndereco =%s", (id,))
conn.commit()
resposta = jsonify({'status':'Endereço deletado com sucesso!'})
resposta.status_code = 200
return resposta
except Exception as e:
print(e)
finally:
cursor.close()
conn.close()
#Método de verificação de senha
@auth.verify_password
def verificacao(login, senha):
usuarios= {
'douglas':'123',
'cristhian':'321'
}
#Valida se o login existe
if not (login, senha): #Se não for informado retorna false
return False
return usuarios.get(login) == senha #Retorna verdadeiro se for iguais
#Caso não encontre o caminho
@app.errorhandler(404)
def not_found(error=None):
messagem = {
'status': 404,
'mensagem': 'Registro nao encontrado: ' + request.url,
}
respone = jsonify(messagem)
respone.status_code = 404
return respone
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=5200)
|
[
"douglas.rocha@inmetrics.com.br"
] |
douglas.rocha@inmetrics.com.br
|
d10cffd433e1382aa8c9269811b4ec706c2b5af9
|
28aca7a21dbd066c30e0385937adc683ae707401
|
/flag_bot.py
|
954844ba9373d425539a352150b25658caf78f8d
|
[] |
no_license
|
obscuritysystems/DC801_CTF
|
219ed509e96e29712e2c018f4bd97602e7a19568
|
5b9ae581d1e1e2b914cd413735c5fe057a916dea
|
refs/heads/master
| 2016-09-06T17:52:50.623862
| 2013-05-07T05:26:54
| 2013-05-07T05:26:54
| 7,049,834
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 560
|
py
|
#!/usr/bin/env python
import sys, time, socket
from daemon import Daemon
from flag_bot_ai import FlagBotAI
class FlagBot(Daemon):
def run(self):
bot = FlagBotAI()
bot.run()
if __name__ == "__main__":
daemon = NemusBot('/tmp/FlagBot.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
|
[
"nemus@obscuritysytems.com"
] |
nemus@obscuritysytems.com
|
fe17d4d7bf8095d15031ae2cf93d0c18c28d6751
|
10409d39ca4db722a084ee024d4088492917d8c1
|
/date.py
|
aab77b2861504e3aa5a0f35972ad30a53b7b2a6e
|
[] |
no_license
|
polaroidz/tkdata_fraud_detetction
|
d0ca643f4cbf1e7f44a0971f298a82ba726c4cba
|
dfaeb1c075fc8bbf6e5fcddf6b31e112d993383b
|
refs/heads/master
| 2022-04-20T09:11:18.875931
| 2020-04-18T22:48:51
| 2020-04-18T22:48:51
| 256,614,723
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,248
|
py
|
import math
import pyspark
from pyspark import keyword_only
from pyspark.sql import functions as F
from pyspark.ml import Transformer
from pyspark.ml.param.shared import HasInputCol
from pyspark.ml.param.shared import HasInputCols
from pyspark.ml.param.shared import HasOutputCol
from pyspark.ml.param.shared import Param
from pyspark.ml.param.shared import Params
from pyspark.ml.param.shared import TypeConverters
class DateColumns(Transformer, HasInputCol):
@keyword_only
def __init__(self, inputCol=None):
super(Transformer, self).__init__()
self.setInputCol(inputCol)
def _transform(self, df):
input = self.getInputCol()
df = df.withColumn("dt_day", F.dayofmonth(input))
df = df.withColumn("dt_hour", F.hour(input))
df = df.withColumn("dt_minute", F.minute(input))
df = df.withColumn("dt_second", F.second(input))
df = df.withColumn("dt_dayofyear", F.dayofyear(input))
df = df.withColumn("dt_dayofweek", F.dayofweek(input))
df = df.withColumn("dt_weekofyear", F.weekofyear(input))
return df
def getOutputColumns(self):
return [
"dt_day",
"dt_hour",
"dt_minute",
"dt_second",
"dt_dayofyear",
"dt_dayofweek",
"dt_weekofyear"
]
|
[
"diego.mrodrigues11@gmail.com"
] |
diego.mrodrigues11@gmail.com
|
ca02764f5cda7953b2d8979696c2f5fce8cc6ebc
|
25318e17552bce267ab4bc7436f0f630800d9aac
|
/OCR_MLP_M.py
|
70c25fb97264371eba6521022a963489a0216e76
|
[] |
no_license
|
Stx666Michael/digit_recognition
|
fbed7413955c919ef86805dd09582df7e5659e2e
|
1bf12d4c1e673d5cb2fd459c1bd56b7c7d3c71aa
|
refs/heads/main
| 2023-03-07T20:01:03.600883
| 2021-02-22T01:50:21
| 2021-02-22T01:50:21
| 340,858,740
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,568
|
py
|
import numpy as np #导入numpy工具包
from os import listdir #使用listdir模块,用于访问本地文件
from sklearn.neural_network import MLPClassifier
import PIL.Image as image
import random
import time
def img2vector(fileName):
f = open(fileName,'rb')
data = []
img = image.open(f)
m,n = img.size
for i in range(m):
for j in range(n):
x = img.getpixel((i,j))
data.append(round(x/255))
f.close()
return np.mat(data)
def readDataSet_R(path,num):
fileList = listdir(path) #获取文件夹下的所有文件
numFiles = len(fileList) #统计需要读取的文件的数目
print("Ramdom:",num)
dataSet = np.zeros([num,784],int) #用于存放所有的数字文件
hwLabels = np.zeros([num,10])#用于存放对应的标签
Sample = random.sample(range(numFiles),num)
for i in range(num): #遍历所有的文件
filePath = fileList[Sample[i]] #获取文件名称/路径
digit = int(filePath.split('_')[0]) #通过文件名获取标签
hwLabels[i][digit] = 1.0 #将对应的one-hot标签置1
dataSet[i] = img2vector(path +'/'+filePath) #读取文件内容
if (i%(num/100) == 0):
print("\rLoading:",'█'*int(20*i/num),100*i/num+1,"%",end="")
return dataSet,hwLabels
#read dataSet
print("Training...")
#train_dataSet, train_hwLabels = readDataSet('trainingDigits_M')
train_dataSet, train_hwLabels = readDataSet_R('trainingDigits_M',10000)
clf = MLPClassifier(solver='sgd',activation='relu',alpha=1e-4,hidden_layer_sizes=(50,50),random_state=1,max_iter=10,verbose=10,learning_rate_init=.1)
print()
print(clf)
clf.fit(train_dataSet,train_hwLabels)
print("Training complete.")
#read testing dataSet
print("\nTesting...")
#dataSet,hwLabels = readDataSet('testDigits_M')
dataSet,hwLabels = readDataSet_R('testDigits_M',1000)
res = clf.predict(dataSet) #对测试集进行预测
error_num = 0 #统计预测错误的数目
num = len(dataSet) #测试集的数目
for i in range(num): #遍历预测结果
#比较长度为10的数组,返回包含01的数组,0为不同,1为相同
#若预测结果与真实结果相同,则10个数字全为1,否则不全为1
if np.sum(res[i] == hwLabels[i]) < 10:
error_num += 1
print("\nTotal num:",num," Wrong num:", \
error_num," Accuracy:",1 - error_num / float(num))
time.sleep(100)
|
[
"noreply@github.com"
] |
noreply@github.com
|
0963782c89cb406827ba1acfa143e7fdd445951a
|
601f1eb021241c13d7b84aab0b51d30efa259b09
|
/homework 8.1 mood checker.py
|
46348b98396405be5f06098886d5f07db8878bea
|
[] |
no_license
|
johnbook666/PythonersteProgramme
|
f1d690332219e8807dce8e001065f2fa1086d454
|
95242ce9ffefc7c2c13c2b5e14b8b1d97128a424
|
refs/heads/master
| 2020-11-24T21:55:48.874857
| 2019-12-16T10:03:27
| 2019-12-16T10:03:27
| 228,356,654
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 495
|
py
|
print('--- mood checker ---')
print('enter mood: happy, nervous, sad, excited, relaxed')
mood = input('your mood ? ')
if mood == "happy":
print('It is great to see you happy!')
elif mood == "nervous":
print('Take a deep breath 3 times.')
elif mood == "sad":
print('take a walk and enjoy the sun !')
elif mood == "excited":
print('get the party started !')
elif mood == "relaxed":
print('feel free to relax more')
else:
print('I don't recognize this mood. Next time :-)')
|
[
"markusarendt@hotmail.com"
] |
markusarendt@hotmail.com
|
ba0cd3b1c0948476c5e95310ac7aa974653dfd23
|
29e444e9cd38e9d54f7af12db13be325946c8608
|
/events_spider/master_server.py
|
93e7e7c01260190296bc6d9b8d669f8a51fefa8a
|
[] |
no_license
|
HughLK/Distributed-News-Monitoring-System
|
0cf3da8f4a3de3afb9b24e6185408231d9ec7bdd
|
180472c4f5bc642eddc247d18b203d5da92fe40d
|
refs/heads/master
| 2020-09-24T12:53:23.538981
| 2019-12-14T05:25:16
| 2019-12-14T05:25:16
| 225,763,169
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
# -*- coding: utf-8 -*-
import datetime
from events_spider.utils.tools import LOGGER, APP_CONF, SCHEDULER
from rpc_client import RpcClient
from SimpleXMLRPCServer import SimpleXMLRPCServer
CLIENT = RpcClient()
def callback():
LOGGER.info("New Master Confirmed.")
CLIENT.call()
# SCHEDULER.add_job(CLIENT.call, 'interval', id='call', minutes=APP_CONF['config']['crawl_frequency'], next_run_time=datetime.datetime.now())
LOGGER.info(SCHEDULER.get_jobs())
server = SimpleXMLRPCServer((APP_CONF['config']['localhost'], 8888))
server.register_function(callback, "call")
LOGGER.info("Awaiting Being Eelcted.")
server.serve_forever()
|
[
"scdylk@aliyun.com"
] |
scdylk@aliyun.com
|
46335ec9e4accf5fef2e9fe3fc4542bb092994bf
|
6bff080a2bfba280244fc956c40acfd294e9c095
|
/Inner Graph Generator/graph_tester.py
|
66d35ed8f0e3f38c8b5e92af551439672e8c6bd7
|
[] |
no_license
|
thunderbolt06/Enumeration-of-RFPs
|
7ca0745e79fa094be3aeef5580be926f4999c34d
|
324366ba8f690347eda5ac97a5285fb8a139a3e6
|
refs/heads/master
| 2020-08-07T13:17:22.366289
| 2020-03-26T11:48:00
| 2020-03-26T11:48:00
| 213,460,625
| 1
| 2
| null | 2020-02-16T10:30:07
| 2019-10-07T18:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 333
|
py
|
import networkx as nx
import matplotlib.pyplot as plt
gaf = nx.Graph()
for i in range(int(input())):
gaf.add_edge(*map(int, input().strip().split(' ')))
print(gaf.edges())
nx.draw_planar(gaf, labels=None, font_size=12, font_color='k', font_family='sans-serif', font_weight='normal', alpha=1.0, bbox=None, ax=None)
plt.show()
|
[
"4chinmai@gmail.com"
] |
4chinmai@gmail.com
|
aa8aabf65ecb49d7092f518affba7b4f4200745b
|
609582ee37a01ac6a67fb9c957825dcd3c9a5b3a
|
/LeetCode_Math/67_Add_Binaray.py
|
77bf2de64eddd1dca19c9a8f56aeabd0235107f3
|
[] |
no_license
|
captainjack331089/captainjack33.LeetCode
|
a9ad7b3591675c76814eda22e683745068e0abed
|
4c03f28371e003e8e6a7c30b7b0c46beb5e2a8e7
|
refs/heads/master
| 2022-03-07T19:53:40.454945
| 2019-11-06T19:32:00
| 2019-11-06T19:32:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
"""
67. Add Binary
Category: Math
Difficulty: Easy
"""
"""
Given two binary strings, return their sum (also a binary string).
The input strings are both non-empty and contains only characters 1 or 0.
Example 1:
Input: a = "11", b = "1"
Output: "100"
Example 2:
Input: a = "1010", b = "1011"
Output: "10101"
"""
class Solution():
def addBinary(self,a,b):
return (bin( int(a,2) + int(b,2) )[2:])
a = "100"
b = "100"
if __name__ == "__main__":
print(Solution().addBinary(a,b))
|
[
"qfhjack@gmail.com"
] |
qfhjack@gmail.com
|
483082bb132d04dca5ed5513a195893b7756e4ed
|
e86c934b98fd78352eda6fa5ee23ab89a9814c4b
|
/aliyun/log/etl_core/restrict_config_parser.py
|
5b9db1008be7bd7755136820711f15e427bae620
|
[
"MIT"
] |
permissive
|
aliyun/aliyun-log-python-sdk
|
fbe8212da62f0ae30aa4fcb2c10c4e2ef7b7aaee
|
0ccf358adecf01f953011f21dfcf259114bab2aa
|
refs/heads/master
| 2023-08-31T03:25:26.183336
| 2023-08-18T02:12:26
| 2023-08-18T02:12:26
| 78,168,645
| 162
| 149
|
MIT
| 2023-09-05T01:37:51
| 2017-01-06T03:03:17
|
Python
|
UTF-8
|
Python
| false
| false
| 2,723
|
py
|
import ast
import logging
import six
import sys
TRUST_AST_TYPES = (ast.Call, ast.Module, ast.List, ast.Tuple, ast.Dict, ast.Name, ast.Num, ast.Str,
ast.Assign, ast.Load)
if sys.version_info[:2] == (3, 3):
TRUST_AST_TYPES = TRUST_AST_TYPES + (ast.Bytes,)
elif six.PY3:
TRUST_AST_TYPES = TRUST_AST_TYPES + (ast.Bytes, ast.NameConstant)
class InvalidETLConfig(Exception):
pass
builtin_macros = [
'KEEP_EVENT_',
'DROP_EVENT_',
'KEEP_FIELDS_',
'DROP_FIELDS_',
'RENAME_FIELDS_',
'ALIAS_',
'DISPATCH_EVENT_',
'TRANSFORM_EVENT_',
'KV_FIELDS_'
]
built_in_fns = ['V', 'JSON', 'CSV', 'REGEX', 'EMPTY', 'NO_EMPTY', 'DROP_F', 'KV', 'TSV', 'PSV', 'LOOKUP', 'SPLIT', 'ZIP']
built_in_ids = ['KV', 'ANY', 'ALL', 'F_TIME', 'F_META', 'F_TAGS', 'SPLIT', 'JSON', 'True', 'False', 'None']
logger = logging.getLogger(__name__)
class RestrictConfigParser(ast.NodeVisitor):
def visit_ImportFrom(self, node):
if node.module == 'aliyun.log.etl_core' and len(node.names) == 1 and node.names[0].name == '*':
logger.info("[Passed] import detected: from aliyun.log.etl_core import *")
else:
raise InvalidETLConfig("unknown import: {0}".format(node.module))
def visit_Call(self, node):
if isinstance(node.func, ast.Name):
if isinstance(node.func.ctx, ast.Load) and node.func.id in built_in_fns:
logger.info("[Passed] known call detected")
else:
raise InvalidETLConfig("unknown call id detected: {0}".format(node.func.id))
else:
raise InvalidETLConfig("unknown call type detected: {0}".format(node.func))
def visit_Name(self, node):
if isinstance(node.ctx, ast.Store):
for p in builtin_macros:
if node.id.startswith(p):
logger.info('[Passed] assign detected: ', node.id)
break
else:
raise InvalidETLConfig('unknown assign detected: ', node.id)
elif isinstance(node.ctx, ast.Load):
if node.id in built_in_ids:
logger.info(' [Passed] assigned name:', node.id)
else:
raise InvalidETLConfig('unknown load detected: ', node.id)
else:
raise InvalidETLConfig("unknown Name: {0}".format(node.id))
def generic_visit(self, node):
if isinstance(node, TRUST_AST_TYPES):
logger.info("... known type detected: ", type(node))
else:
raise InvalidETLConfig("unknown type detected: {0}".format(type(node)))
ast.NodeVisitor.generic_visit(self, node)
def parse(self, code):
self.visit(ast.parse(code))
|
[
"wjo1212@163.com"
] |
wjo1212@163.com
|
db5478f9a0cb0cf030d084d4aa9c480907c197a7
|
0dc3e9b70da8ccd056e0a0fab2b1d8f850c3d470
|
/lantern/django/django_celery/src/apps/cars/serializers.py
|
3b2841adafff0d4d82de945686eeba93f6718cd8
|
[] |
no_license
|
ArturYefriemov/green_lantern
|
28e7150af7b9d2281a107ad80026828ad77af62a
|
2841b647e1bfae4a7505e91e8a8695d03f35a3a2
|
refs/heads/master
| 2021-03-01T16:54:58.881835
| 2020-11-17T19:42:23
| 2020-11-17T19:42:23
| 245,799,969
| 0
| 0
| null | 2020-07-14T18:51:13
| 2020-03-08T11:13:32
|
Python
|
UTF-8
|
Python
| false
| false
| 190
|
py
|
from rest_framework import serializers
from apps.cars.models import Car
class CarSerializer(serializers.ModelSerializer):
class Meta:
model = Car
fields = '__all__'
|
[
"odarchenko@ex.ua"
] |
odarchenko@ex.ua
|
80d65ef8dd7fb82cd527bbcf4cec83c78d67c536
|
e12b840d9ac3eb1cc0e3a08e91b5812fc9509798
|
/invoke_lambda/demo/demo.py
|
b31cc9a44d7cdb8eba5db9b55f0c63b9627f5326
|
[] |
no_license
|
kfunamizu/python_commonlib
|
7122272f3c1c75e62d9585ebdf5cbc6fa5c1ab77
|
6ecdbc904840763880928c1014c6898f82fb6b46
|
refs/heads/master
| 2020-04-17T03:57:09.252029
| 2019-11-22T02:08:25
| 2019-11-22T02:08:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
from invoke_lambda import invoke_lambda
lambda_function_name = 'kfunamizu_sample'
event_dict = {
'1' : '1',
'2' : '2',
'3' : '3'
}
invoke_lambda(lambda_function_name, event_dict)
|
[
"noreply@github.com"
] |
noreply@github.com
|
e0b9e75b23d4f7865588f65066034254d752d3c3
|
33e0d2d343b276b96236890823e8446482c5b9d8
|
/myapi/settings.py
|
11a2ffab604010b5288b8d2f545d6d1d8ec2f759
|
[] |
no_license
|
ketan9712735468/myapi
|
1b3e96ba76f4b37dc28a4954b0a4d1f306d9ae7c
|
0c3c4054f55759c816a589246aac439f85544b7a
|
refs/heads/main
| 2023-04-17T02:53:00.794462
| 2021-05-11T05:51:14
| 2021-05-11T05:51:14
| 366,246,508
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,762
|
py
|
"""
Django settings for myapi project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path,os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
# 'rest_framework.authentication.BasicAuthentication',
# 'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
],
'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
'rest_framework.permissions.AllowAny'
]
}
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'eucbpib&@3hm3zauyx0)10#do-45c!ey^+$dom-w-6e-jy@j4p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'firstapp',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'django.contrib.sites',
'allauth',
'allauth.account',
'rest_auth.registration',
'cars',
]
SITE_ID = 1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myapi.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myapi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"ketanmangukiya001@gmail.com"
] |
ketanmangukiya001@gmail.com
|
3a4176029eb9b62a827d71c46ebb1792fdf37b91
|
6d703f97d1326023cbfe9818474d446a32e9e7a0
|
/siteProject/wsgi.py
|
252143bc4db532c80ff3654dbd30ecdecfab7e7b
|
[] |
no_license
|
LiFFTB/LearnPython
|
748d1b89d2197ba1da3026dd488c08aaa15bf258
|
2132c055f8ce3219366a0d023a3e3cb4a0398979
|
refs/heads/master
| 2022-10-05T11:46:48.552301
| 2020-06-01T15:15:46
| 2020-06-01T15:15:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
"""
WSGI config for siteProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'siteProject.settings')
application = get_wsgi_application()
|
[
"jinyu929@qq.com"
] |
jinyu929@qq.com
|
1f6d7e07edb5c93c86a793d1ec1b2f4d6b7155c1
|
479aefa19c46bd31d567035fed0227b4282fc662
|
/MovieWebsite/group_func/migrations/0005_remove_group_description.py
|
ebb5650eb875ada7722ba22810ddda024100be8d
|
[] |
no_license
|
KarryBanana/SE_Project
|
cc60371bf5194bf003ecafe1d21ce7f3878f644d
|
f9ede4caae56de97dfe6abb84f0e4811ff6d65b5
|
refs/heads/master
| 2021-05-23T01:36:27.939979
| 2020-08-10T08:39:25
| 2020-08-10T08:39:25
| 253,175,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
# Generated by Django 3.0.7 on 2020-06-29 06:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('group_func', '0004_group_description'),
]
operations = [
migrations.RemoveField(
model_name='group',
name='description',
),
]
|
[
"bill881@126.com"
] |
bill881@126.com
|
8e1d635e43cf0d4a577b35facf856bf52864130c
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p04005/s700047032.py
|
6b014ba065ec8998d3dab92a228e7bca1810778d
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 77
|
py
|
a,b,c=map(int,input().split())
print(0 if (a*b*c)%2==0 else min(a*b,b*c,c*a))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
635b2ea8ba272c29ef49790aae721a40124e6873
|
98f3deef793bee63b029f5bcc2335524e8c2e5c2
|
/爬虫/爬虫入门/封装函数后爬取某网站.py
|
50c1c24b6182b08bf25e0ac5785cdc9135f43a84
|
[] |
no_license
|
BBBBchan/python
|
f6a069821b8c4848fd25d70c6f36d6217023beb5
|
ca106ea549f68b557829da51588c5ceb416d5435
|
refs/heads/master
| 2021-06-04T13:19:13.485475
| 2019-11-29T03:38:12
| 2019-11-29T03:38:12
| 112,024,051
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
py
|
# -*- coding: utf-8 -*-
import requests
def gethtml(url):
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return "异常"
if __name__ == "__main__":
url = "www.baidu.com"
print(gethtml(url))
|
[
"sbysbysby123@gmail.com"
] |
sbysbysby123@gmail.com
|
76b5e2452098e49235282783ad7eb1263db83e08
|
ae7ba9c83692cfcb39e95483d84610715930fe9e
|
/yubinbai/pcuva-problems/UVa 10539 - Almost Prime Numbers/main.py
|
30bb7c3cab4b9a2a5ac9a024702a2f2bdb6ddbf0
|
[] |
no_license
|
xenron/sandbox-github-clone
|
364721769ea0784fb82827b07196eaa32190126b
|
5eccdd8631f8bad78eb88bb89144972dbabc109c
|
refs/heads/master
| 2022-05-01T21:18:43.101664
| 2016-09-12T12:38:32
| 2016-09-12T12:38:32
| 65,951,766
| 5
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 820
|
py
|
from bisect import *
from bitstring import BitArray
import sys
MAXN = 1000005
def prime_sieve(top=MAXN):
b = BitArray(top) # bitstring of ’0’ bits
for i in range(2, top):
if not b[i]:
yield i
# i is prime, so set all its multiples to ’1’.
b.set(True, range(i * i, top, i))
if __name__ == '__main__':
primes = list(prime_sieve())
almostPrimes = []
for p in primes:
p1 = p ** 2
while p1 < MAXN:
almostPrimes.append(p1)
p1 *= p
almostPrimes.sort()
sys.stdin = open('input.txt')
numTest = int(input())
for x in range(numTest):
left, right = map(int, raw_input().split())
i1 = bisect_right(almostPrimes, left)
i2 = bisect_right(almostPrimes, right)
print(i2 - i1)
|
[
"xenron@outlook.com"
] |
xenron@outlook.com
|
d485c36b325898c6a6574811444544a7d9f9a257
|
6001b2460904142720818b961669f8306dd330ed
|
/posts/tests/test_urls.py
|
04870058ad052e1973f51af4e76d181c18aa72a7
|
[] |
no_license
|
aryanlilian/Social-Posts
|
aad06d3d7527a73db296d107845b38ffee6daeae
|
610b59474aa4f65e9ccd8504c547d9ec31d725c1
|
refs/heads/main
| 2023-04-18T15:30:08.081117
| 2021-04-29T16:50:41
| 2021-04-29T16:50:41
| 362,097,172
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 569
|
py
|
from django.test import SimpleTestCase
from django.urls import reverse, resolve
from posts.views import postsList
class TestUrls(SimpleTestCase):
# The setUp method is for defining all the fields that should be used for testing purposes in the testing methods
def setUp(self):
self.posts_list_url = reverse('posts-list')
# testing the URLs by checking if it's using the right view function for handling the request and responses
def test_posts_list_url_resolves(self):
self.assertEquals(resolve(self.posts_list_url).func, postsList)
|
[
"aryanlilian@gmail.com"
] |
aryanlilian@gmail.com
|
7326d2d7689ae724544c3a6135e5fed3824e819f
|
b27f5d5691d1aaae3f3b334f73c4f91f835bcabc
|
/Code/ops.py
|
eb9e8390bbc44d2e79660faaa3eacc3112d47c71
|
[] |
no_license
|
VTLI/comic-gen
|
a27220193524ec0833a3896343ffc2613c2ebedd
|
4ee2a6e22b29b47b2aeaaef18ba972c9d5f5d151
|
refs/heads/master
| 2020-05-24T03:46:26.256772
| 2019-07-11T08:39:53
| 2019-07-11T08:39:53
| 187,078,397
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,013
|
py
|
import math
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from utils import *
try:
image_summary = tf.image_summary
scalar_summary = tf.scalar_summary
histogram_summary = tf.histogram_summary
merge_summary = tf.merge_summary
SummaryWriter = tf.train.SummaryWriter
except:
image_summary = tf.summary.image
scalar_summary = tf.summary.scalar
histogram_summary = tf.summary.histogram
merge_summary = tf.summary.merge
SummaryWriter = tf.summary.FileWriter
if "concat_v2" in dir(tf):
def concat(tensors, axis, *args, **kwargs):
return tf.concat_v2(tensors, axis, *args, **kwargs)
else:
def concat(tensors, axis, *args, **kwargs):
return tf.concat(tensors, axis, *args, **kwargs)
class batch_norm(object):
def __init__(self, epsilon=1e-5, momentum = 0.9, name="batch_norm"):
with tf.variable_scope(name):
self.epsilon = epsilon
self.momentum = momentum
self.name = name
def __call__(self, x, train=True):
return tf.contrib.layers.batch_norm(x,
decay=self.momentum,
updates_collections=None,
epsilon=self.epsilon,
scale=True,
is_training=train,
scope=self.name)
def conv_cond_concat(x, y):
"""Concatenate conditioning vector on feature map axis."""
x_shapes = x.get_shape()
y_shapes = y.get_shape()
return concat([
x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], 3)
def conv2d(input_, output_dim,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="conv2d"):
with tf.variable_scope(name):
w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
return conv
def deconv2d(input_, output_shape,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="deconv2d", with_w=False):
with tf.variable_scope(name):
# filter : [height, width, output_channels, in_channels]
w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
initializer=tf.random_normal_initializer(stddev=stddev))
try:
deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape,
strides=[1, d_h, d_w, 1])
# Support for verisons of TensorFlow before 0.7.0
except AttributeError:
deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,
strides=[1, d_h, d_w, 1])
biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
if with_w:
return deconv, w, biases
else:
return deconv
def lrelu(x, leak=0.2, name="lrelu"):
return tf.maximum(x, leak*x)
def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
try:
matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
except ValueError as err:
msg = "NOTE: Usually, this is due to an issue with the image dimensions. Did you correctly set '--crop' or '--input_height' or '--output_height'?"
err.args = err.args + (msg,)
raise
bias = tf.get_variable("bias", [output_size],
initializer=tf.constant_initializer(bias_start))
if with_w:
return tf.matmul(input_, matrix) + bias, matrix, bias
else:
return tf.matmul(input_, matrix) + bias
|
[
"noreply@github.com"
] |
noreply@github.com
|
9d9a28c406e812fde853a9ab4577cc16b649995d
|
9b77f1e31d5901924431a2a3164312cc346bde4f
|
/ADI4/manage.py
|
e9ec1a96a7b096f6b2698c979c0b121ed89eb43f
|
[] |
no_license
|
Adi19471/Djnago_Code-Daily
|
c2184bf21db5c8d4b3c4098fbd593e4949375ae8
|
03b1b70d3e187fe85eb24e88b7ef3391b14aa98c
|
refs/heads/master
| 2023-08-14T14:36:36.144243
| 2021-09-20T12:52:46
| 2021-09-20T12:52:46
| 375,690,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 624
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ADI4.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"akumatha@gmail.com"
] |
akumatha@gmail.com
|
44ccbd118db55872abc28425b29a43c1682f0194
|
a14ab84ccb471b52e32fe1bf0ed70a1d1813396e
|
/python/firstproject/firstproject/settings.py
|
9fa477b41686ba8d46a37c5f60de4b86c4660205
|
[] |
no_license
|
Aashirya1995/COMP705
|
051c7be353e8ffa21262feacf3e2af0016153ad5
|
bc382beb66979462de24ff427f397de4f9b8892d
|
refs/heads/master
| 2021-05-09T20:33:14.511019
| 2018-03-06T17:05:18
| 2018-03-06T17:05:18
| 118,692,267
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,354
|
py
|
"""
Django settings for firstproject project.
Generated by 'django-admin startproject' using Django 2.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# equivalent to where manage.py lives
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r8y_2gctiffm(jd47wb76zu6o3kpeyxog+@04a+l0b^76y228z'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'firstproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'firstproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIR = [os.path.join(BASE_DIR, 'static')]
|
[
"ak1107@wildcats.unh.edu"
] |
ak1107@wildcats.unh.edu
|
627e5d9f212753a73b9f9c9e81a215e81c4bc4b6
|
14b768ac8d2ea19fa8f3c99f9802fb9096a2e7f0
|
/core/world_patch_block.py
|
368aaefd742a463d793003cbd5c261621fc6e847
|
[] |
no_license
|
RogodaThallus/PyLogo
|
f1c397b0cd262b5cf39f3720c30348204525e0de
|
b41e47dfa287636675f38535ae2d3995aa4cd027
|
refs/heads/master
| 2021-01-08T08:13:50.671951
| 2020-04-11T00:02:17
| 2020-04-11T00:02:17
| 241,967,208
| 0
| 0
| null | 2020-02-20T19:07:50
| 2020-02-20T19:07:49
| null |
UTF-8
|
Python
| false
| false
| 9,780
|
py
|
from __future__ import annotations
from math import sqrt
from typing import Tuple
import numpy as np
from pygame.color import Color
from pygame.rect import Rect
from pygame.sprite import Sprite
from pygame.surface import Surface
import core.gui as gui
# Importing this file eliminates the need for a globals declaration
# noinspection PyUnresolvedReferences
import core.world_patch_block as world
from core.gui import SHAPES
from core.pairs import center_pixel, Pixel_xy, RowCol
from core.utils import get_class_name
class Block(Sprite):
"""
A generic patch/agent. Has a Pixel_xy but not necessarily a RowCol. Has a Color.
"""
agent_text_offset = int(1.5*gui.PATCH_SIZE)
patch_text_offset = -int(1.0*gui.PATCH_SIZE)
def __init__(self, center_pixel: Pixel_xy, color=Color('black')):
super().__init__()
self.center_pixel: Pixel_xy = center_pixel
self.rect = Rect((0, 0), (gui.PATCH_SIZE, gui.PATCH_SIZE))
# noinspection PyTypeChecker
sum_pixel: Pixel_xy = center_pixel + Pixel_xy((1, 1))
self.rect.center = sum_pixel
self.image = Surface((self.rect.w, self.rect.h))
self.color = self.base_color = color
self._label = None
self.highlight = None
def distance_to_xy(self, xy: Pixel_xy):
x_dist = self.center_pixel.x - xy.x
y_dist = self.center_pixel.y - xy.y
dist = sqrt(x_dist * x_dist + y_dist*y_dist)
return dist
# Note that the actual drawing (blit and draw_line) takes place in core.gui.
def draw(self, shape_name=None):
if self.label:
self.draw_label()
if isinstance(self, Patch) or shape_name in SHAPES:
self.rect.center = self.center_pixel
# self.rect = Rect(center=self.rect.center)
gui.blit(self.image, self.rect)
else:
gui.draw(self, shape_name=shape_name)
def draw_label(self):
offset = Block.patch_text_offset if isinstance(self, Patch) else Block.agent_text_offset
text_center = Pixel_xy((self.rect.x + offset, self.rect.y + offset))
line_color = Color('white') if isinstance(self, Patch) and self.color == Color('black') else self.color
obj_center = self.rect.center
label = self.label
gui.draw_label(label, text_center, obj_center, line_color)
# def draw_label(self):
# text = gui.FONT.render(self.label, True, Color('black'), Color('white'))
# offset = Block.patch_text_offset if isinstance(self, Patch) else Block.agent_text_offset
# text_center = Pixel_xy((self.rect.x + offset, self.rect.y + offset))
# gui.blit(text, text_center)
# line_color = Color('white') if isinstance(self, Patch) and self.color == Color('black') else self.color
# gui.draw_line(start_pixel=self.rect.center, end_pixel=text_center, line_color=line_color)
@property
def label(self):
return self._label if self._label else None
@label.setter
def label(self, value):
self._label = value
def set_color(self, color):
self.color = color
self.image.fill(color)
class Patch(Block):
def __init__(self, row_col: RowCol, color=Color('black')):
super().__init__(row_col.patch_to_center_pixel(), color)
self.row_col = row_col
self.agents = None
self._neighbors_4 = None
self._neighbors_8 = None
self._neighbors_24 = None
def __str__(self):
class_name = get_class_name(self)
return f'{class_name}{(self.row_col.row, self.row_col.col)}'
def add_agent(self, agent):
self.agents.add(agent)
@property
def col(self):
return self.row_col.col
@property
def row(self):
return self.row_col.row
def clear(self):
self.agents = set()
self.label = None
self.set_color(self.base_color)
def neighbors_4(self):
if self._neighbors_4 is None:
cardinal_deltas = ((-1, 0), (1, 0), (0, -1), (0, 1))
self._neighbors_4 = self.neighbors(cardinal_deltas)
return self._neighbors_4
def neighbors_8(self):
if self._neighbors_8 is None:
eight_deltas = ((-1, 0), (1, 0), (0, -1), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1))
self._neighbors_8 = self.neighbors(eight_deltas)
return self._neighbors_8
def neighbors_24(self):
if self._neighbors_24 is None:
twenty_four_deltas = ((-1, 0), (1, 0), (0, -1), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1),
(-2, -2), (-1, -2), (0, -2), (1, -2), (2, -2),
(-2, -1), (2, -1),
(-2, 0), (2, 0),
(-2, 1), (2, 1),
(-2, 2), (-1, 2), (0, 2), (1, 2), (2, 2),
)
self._neighbors_24 = self.neighbors(twenty_four_deltas)
return self._neighbors_24
def neighbors(self, deltas):
"""
The neighbors of this patch determined by the deltas.
Note the addition of two RowCol objects to produce a new RowCol object: self.row_col + utils.RowCol(r, c).
Wrap around is handled by RowCol. We then use the RowCol object as a tuple to access the np.ndarray
"""
# noinspection PyUnresolvedReferences
neighbors = [World.patches_array[(self.row_col + RowCol((r, c))).wrap().as_int()]
for (r, c) in deltas]
return neighbors
def remove_agent(self, agent):
self.agents.remove(agent)
class World:
agents = None
links = None
patches = None
patches_array: np.ndarray = None
ticks = None
def __init__(self, patch_class, agent_class):
World.ticks = 0
self.patch_class = patch_class
self.create_patches_array()
self.agent_class = agent_class
self.done = False
self.reset_all()
@staticmethod
def clear_all():
World.agents = set()
World.links = set()
for patch in World.patches:
patch.clear()
def create_agents(self, nbr_agents):
for _ in range(nbr_agents):
self.agent_class()
def create_ordered_agents(self, n, shape_name='netlogo_figure', scale=1.4, color=None, radius=140):
"""
Create n Agents with headings evenly spaced from 0 to 360
Return a list of the Agents in the order created.
"""
agent_list = [self.agent_class(shape_name=shape_name, scale=scale, color=color) for _ in range(n)]
for (i, agent) in enumerate(agent_list):
heading = i * 360 / n
agent.set_heading(heading)
if radius:
agent.forward(radius)
return agent_list
def create_patches_array(self):
patch_pseudo_array = [[self.patch_class(RowCol((r, c))) for c in range(gui.PATCH_COLS)]
for r in range(gui.PATCH_ROWS)]
World.patches_array = np.array(patch_pseudo_array)
# .flat is an iterator. Can't use it more than once.
World.patches = list(World.patches_array.flat)
def create_random_agents(self, n, shape_name='netlogo_figure', color=None, scale=1.4):
"""
Create n Agents placed randomly on the screen. They are all facing the screen's center pixel.
"""
for _ in range(n):
agent = self.agent_class(color=color, shape_name=shape_name, scale=scale)
agent.move_to_xy(Pixel_xy.random_pixel())
agent.face_xy(center_pixel())
def draw(self):
"""
Draw the world by drawing the patches and agents.
Should check to see which really need to be re-drawn.
"""
for patch in World.patches:
patch.draw()
for link in World.links:
link.draw()
for agent in World.agents:
agent.draw()
def final_thoughts(self):
""" Add any final tests, data gathering, summarization, etc. here. """
pass
# Uncomment this code to see how well the (@lru) caches work.
# print()
# for fn in [utils._heading_to_dxdy_int, utils._dx_int, utils._dy_int,
# utils.atan2_normalized, utils._cos_int, utils._sin_int]:
# if fn == utils.atan2:
# print()
# print(f'{str(fn.__wrapped__).split(" ")[1]}: {fn.cache_info()}')
def handle_event(self, _event):
pass
@staticmethod
def increment_ticks():
World.ticks += 1
def mouse_click(self, xy):
pass
def pixel_tuple_to_patch(self, xy: Tuple[int, int]):
"""
Get the patch RowCol for this pixel
"""
return self.pixel_xy_to_patch(Pixel_xy(xy))
@staticmethod
def pixel_xy_to_patch(pixel_xy: Pixel_xy) -> Patch:
"""
Get the patch RowCol for this pixel
"""
row_col: RowCol = pixel_xy.pixel_to_row_col()
patch = World.patches_array[row_col.row, row_col.col]
return patch
def reset_all(self):
self.done = False
self.clear_all()
self.reset_ticks()
@staticmethod
def reset_ticks():
World.ticks = 0
def setup(self):
"""
Set up the world. Override for each world
"""
pass
def step(self):
"""
Update the world. Override for each world
"""
pass
|
[
"noreply@github.com"
] |
noreply@github.com
|
a9df993bc04715662c0fc174ac283338714e2f13
|
94f07753b134531c06dc2e789f594c25bfa6993e
|
/aiida_kkr/tests/workflows/test_kkrimp_dos_wc.py
|
1a7b6fa1abb1b81319983a2e8bed278e2d44c444
|
[
"MIT"
] |
permissive
|
IngoMeyer441/aiida-kkr
|
23316ca7d74b40580d149ebcf7ae9da79373f8a5
|
5ce20a61a667787aeb68fa0c8b81e73aa3cf94a5
|
refs/heads/master
| 2023-07-15T01:02:11.685631
| 2021-05-05T06:54:56
| 2021-05-05T06:54:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,222
|
py
|
#!/usr/bin/env python
from __future__ import absolute_import
import pytest
from aiida_kkr.tests.dbsetup import *
# tests
class Test_kkrimp_dos_workflow():
"""
Tests for the kkrimp_scf workflow
"""
@pytest.mark.timeout(300, method='thread')
@pytest.mark.usefixtures("fresh_aiida_env")
def test_dos_startpot_wc(self):
"""
simple Cu noSOC, FP, lmax2 full example using scf workflow for impurity host-in-host
"""
from aiida.orm import Code, load_node
from aiida.plugins import DataFactory
from aiida.orm.querybuilder import QueryBuilder
from masci_tools.io.kkr_params import kkrparams
from aiida_kkr.workflows.kkr_imp_dos import kkr_imp_dos_wc
from numpy import array
Dict = DataFactory('dict')
StructureData = DataFactory('structure')
# prepare computer and code (needed so that
prepare_code(kkrimp_codename, codelocation, computername, workdir)
prepare_code(kkr_codename, codelocation, computername, workdir)
wfd =kkr_imp_dos_wc.get_wf_defaults()
options = {'queue_name' : queuename, 'resources': {"num_machines": 1}, 'max_wallclock_seconds' : 5*60, 'use_mpi' : False, 'custom_scheduler_commands' : ''}
options = Dict(dict=options)
# The scf-workflow needs also the voronoi and KKR codes to be able to run the calulations
KKRimpCode = Code.get_from_string(kkrimp_codename+'@'+computername)
KKRCode = Code.get_from_string(kkr_codename+'@'+computername)
# import previous GF writeout
from aiida.tools.importexport import import_data
import_data('files/db_dump_kkrflex_create.tar.gz')
GF_host_calc = load_node('baabef05-f418-4475-bba5-ef0ee3fd5ca6')
# now create a SingleFileData node containing the impurity starting potential
from aiida_kkr.tools.common_workfunctions import neworder_potential_wf
from numpy import loadtxt
neworder_pot1 = [int(i) for i in loadtxt(GF_host_calc.outputs.retrieved.open('scoef'), skiprows=1)[:,3]-1]
settings_dict = {'pot1': 'out_potential', 'out_pot': 'potential_imp', 'neworder': neworder_pot1}
settings = Dict(dict=settings_dict)
startpot_imp_sfd = neworder_potential_wf(settings_node=settings, parent_calc_folder=GF_host_calc.outputs.remote_folder)
label = 'kkrimp_dos Cu host_in_host'
descr = 'kkrimp_dos workflow for Cu bulk'
imp_info = GF_host_calc.inputs.impurity_info.get_dict()
imp_info ['Rcut'] = 2.5533
print(imp_info)
# create process builder to set parameters
builder = kkr_imp_dos_wc.get_builder()
builder.metadata.description = descr
builder.metadata.label = label
builder.options = options
builder.kkr = KKRCode
builder.kkrimp = KKRimpCode
builder.imp_pot_sfd = startpot_imp_sfd
builder.wf_parameters = Dict(dict=wfd)
builder.impurity_info = Dict(dict=imp_info)
builder.host_remote = GF_host_calc.outputs.remote_folder
# now run calculation
from aiida.engine import run
print(builder)
out = run(builder)
print(out)
assert 'last_calc_info' in out.keys()
assert 'last_calc_output_parameters' in out.keys()
assert 'workflow_info' in out.keys()
assert 'dos_data' in out.keys()
assert 'dos_data_interpol' in out.keys()
assert len(out['dos_data_interpol'].get_y()) == 5
assert len(out['dos_data_interpol'].get_y()[0]) == 3
assert len(out['dos_data_interpol'].get_y()[0][0]) == 20
@pytest.mark.timeout(300, method='thread')
def test_dos_reuse_gf_writeout(self):
pass
@pytest.mark.timeout(300, method='thread')
def test_dos_from_kkrimp_sub(self):
pass
@pytest.mark.timeout(300, method='thread')
def test_dos_from_kkrimp_full(self):
pass
#run test manually
if __name__=='__main__':
from aiida import load_profile
load_profile()
Test = Test_kkrimp_dos_workflow()
Test.test_dos_startpot_wc()
Test.test_dos_reuse_gf_writeout()
Test.test_dos_from_kkrimp_sub()
Test.test_dos_from_kkrimp_full()
|
[
"p.ruessmann@fz-juelich.de"
] |
p.ruessmann@fz-juelich.de
|
85350cd3891916912a4ea7f31d7e3e9d72b3c3e5
|
ad32805a821fb06bde87a6d05c3d80ae477dc00b
|
/parts/migrations/0008_auto_20200204_0224.py
|
216c76ae8458539d3633edecdce7f9023faeff6c
|
[] |
no_license
|
phrac/maintdx
|
a89a15e4d92f77b91016737a7b410a7579d07c6c
|
583b308568e25da6d99fa6d41150602baa253377
|
refs/heads/master
| 2021-07-01T05:11:51.994071
| 2021-05-14T13:41:20
| 2021-05-14T13:41:20
| 131,013,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 587
|
py
|
# Generated by Django 3.0.2 on 2020-02-04 02:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parts', '0007_part_on_hand'),
]
operations = [
migrations.AlterField(
model_name='partinventoryitem',
name='current_on_hand',
field=models.PositiveIntegerField(default=0),
),
migrations.AlterField(
model_name='partinventoryitem',
name='purchase_quantity',
field=models.PositiveIntegerField(default=0),
),
]
|
[
"158748+phrac@users.noreply.github.com"
] |
158748+phrac@users.noreply.github.com
|
374aa8f21043fa4ad07e5c55dc32e3b41b325b47
|
592be52fb1623947515a8c9a6ab9f17da58366a6
|
/shop_account/views.py
|
d1b7437a13ba203acbc309837291b05e653e441a
|
[] |
no_license
|
mahnazfallah067/them
|
c224921ad27903b2aae79d9b186fe400288899d1
|
b51209a5b524298a53ab6888a61b5e1d4bfd20e2
|
refs/heads/master
| 2023-08-13T11:53:05.800278
| 2021-10-02T18:45:59
| 2021-10-02T18:45:59
| 412,881,887
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,605
|
py
|
from django.contrib.auth.models import User
from django.shortcuts import render, redirect
from .form import LoginForms, RegisterForm
from django.contrib.auth import authenticate, login, logout
# Create your views here.
def login_user(request):
if request.user.is_authenticated:
return redirect('/')
login_form = LoginForms(request.POST or None)
if login_form.is_valid():
username = login_form.cleaned_data.get('username')
password = login_form.cleaned_data.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('/')
else:
login_form.add_error('username', 'کاربری با مشخصات وارد شده یافت نشد')
context = {
'login_form': login_form
}
return render(request, 'account/login.html', context)
def register_user(request):
if request.user.is_authenticated:
return redirect('/')
register_form = RegisterForm(request.POST or None)
if register_form.is_valid():
username = register_form.cleaned_data.get('username')
email = register_form.cleaned_data.get('email')
password = register_form.cleaned_data.get('password')
User.objects.create_user(username=username, email=email, password=password)
return redirect('/login')
context = {
'register_form': register_form
}
return render(request, 'account/register.html', context)
def log_out(request):
logout(request)
return redirect('/login')
|
[
"mahnaz.fallah1213@gmail.com"
] |
mahnaz.fallah1213@gmail.com
|
a9fbf6d770010914427a3c36f2748a7edf47a25c
|
4236063269a9989e97d56cea6651ac371e310ed0
|
/gscholar/migrations/0002_auto_20150526_1846.py
|
0daab647349f8d31ab37dc4760f66708be8f448a
|
[
"MIT"
] |
permissive
|
afonari/scholar-scrapy
|
8cddef03b9657adf0148e5a87cb846cd409c92e1
|
2d363789a376e5428971ceaa6b830de326e2b1a3
|
refs/heads/master
| 2021-01-18T04:44:40.307013
| 2015-05-27T14:02:21
| 2015-05-27T14:02:21
| 36,240,290
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 580
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('gscholar', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='organization',
name='logo',
field=models.CharField(max_length=255, blank=True),
),
migrations.AlterField(
model_name='organization',
name='title',
field=models.CharField(max_length=255, blank=True),
),
]
|
[
"firstname.lastname@gaetch.edu"
] |
firstname.lastname@gaetch.edu
|
73ca58f9efc45c92c45588633643d3a142934c43
|
4ad8443f46a93eb3d4dfe0855ed0860143818a2b
|
/wiki_processor/exobraindata_preprocessor.py
|
e8bcca59479cfaf7aa7af25ef69510b0fd8f247c
|
[] |
no_license
|
delosyCho/Sentio-Web
|
97186c659ca268288b66e8f222bd5ca1427fc304
|
c39493998dd9aa217102e5b9578c98268dd4fa7e
|
refs/heads/master
| 2020-03-30T08:20:57.578484
| 2018-11-08T16:54:30
| 2018-11-08T16:54:30
| 151,008,025
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,391
|
py
|
import numpy as np
import codecs
import pandas
from functions import *
exo_paragraph = open('exo_paragraph', 'w', encoding='utf-8')
exo_question = open('exo_question', 'w', encoding='utf-8')
exo_label = open('exo_label', 'w', encoding='utf-8')
exo_answers = open('exo_answers', 'w', encoding='utf-8')
paragraph_file = open('wiki_corpus', 'r', encoding='utf-8')
rule_file = open('wiki_info', 'r', encoding='utf-8')
# 정보가 들어있는 텍스트파일
exo_Questions = []
exo_Titles = []
exo_answer_info = []
exo_answer = []
exobrain_data1 = pandas.read_excel('exo1.xlsx')
temp = exobrain_data1['질문']
temp2 = exobrain_data1['위키피디아 제목']
temp3 = exobrain_data1['정답 근거1(문장)']
temp4 = exobrain_data1['정답']
for i in range(len(temp)):
exo_Questions.append(str(temp[i]).replace('?', ''))
exo_Titles.append(str(temp2[i]))
info_str = str(temp3[i])
exo_answer_info.append(preprocess(info_str))
exo_answer.append(preprocess(str(temp4[i])))
exobrain_data1 = pandas.read_excel('exo3.xlsx')
temp = exobrain_data1['질문']
temp2 = exobrain_data1['위키피디아 제목']
temp3 = exobrain_data1['정답 근거1(문장)']
temp4 = exobrain_data1['정답']
for i in range(len(temp)):
exo_Questions.append(str(temp[i]).replace('?', ''))
exo_Titles.append(str(temp2[i]))
info_str = str(temp3[i]).replace('.', '\n')
exo_answer_info.append(preprocess(info_str))
exo_answer.append(preprocess(str(temp4[i])))
print(len(exo_answer))
print(len(exo_answer_info))
print(len(exo_Questions))
print(len(exo_Titles))
exo_data_dictionary = np.array(exo_Titles, dtype='<U20')
exo_dictionary_index = exo_data_dictionary.argsort()
exo_data_dictionary.sort()
paragraphs = paragraph_file.read().split('\a')
count = 0
for i in range(len(exo_answer)):
exo_answers.write(exo_answer[i])
exo_answers.write('\n')
exo_answers.close()
for i, paragraph in enumerate(exo_answer_info):
temp_TK = str(exo_answer_info[i]).split()
TK = str(exo_answer[i]).split()
if len(TK) > 0:
start_word = TK[0]
stop_word = TK[len(TK) - 1]
TK = str(exo_answer_info[i]).split()
start_index = -1
stop_index = -1
for j in range(len(TK)):
a = TK[j].find(start_word)
b = TK[j].find(stop_word)
if a != -1 and start_index == -1:
start_index = j
if b != -1 and stop_index == -1:
stop_index = j
if start_index != -1 and stop_word != -1:
if start_index <= stop_index:
para = str(exo_answer_info[i]).replace(str(exo_answer[i]), '#' + str(i) + '@')
TK = para.split('\n')
for k in range(len(TK)):
exo_paragraph.write(TK[k].strip())
exo_paragraph.write('\n')
exo_paragraph.write('@#!\n')
exo_question.write(exo_Questions[i])
exo_question.write('\a')
exo_label.write(exo_answer[i])
exo_label.write('\a')
count += 1
else:
print('check!!!!!!!!!!')
print(exo_answer_info[i])
print(exo_answer[i])
print(start_index, stop_index)
print('---------------')
print(count)
exo_paragraph.close()
exo_question.close()
exo_label.close()
|
[
"delosycho@gmail.com"
] |
delosycho@gmail.com
|
11d8813586dc9942197cdf6aa7084833e35848d1
|
a12d2d99d6bbb00bf4dabcab8c3efe5ea4714648
|
/DistractedDriverMaskFaces.py
|
6dfccfe249c75a8c993f7a9c25cc269dfce6661e
|
[] |
no_license
|
devyhia/slim-backup
|
01a702f65d4d5a2994cd2fac1ee225646e819fd9
|
26471cf66cf8e8ceff136a692ae6ba36fea90952
|
refs/heads/master
| 2021-01-12T00:14:25.456094
| 2017-10-17T09:54:35
| 2017-10-17T09:54:35
| 78,696,206
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,195
|
py
|
import pickle
from glob import glob
import numpy as np
from PIL import Image
import Shared
from tqdm import tqdm
import os
with open('/home/devyhia/FaceDetection_CNN/result.pickle') as f:
results = pickle.load(f)
def mask_image(img_path, boxes):
img = Image.open(img_path)
if len(boxes) == 0:
return img
box = boxes[0] # there is only one box per image! (by construction)
x0 = int(np.floor(box[0]))
y0 = int(np.floor(box[1]))
x1 = int(np.ceil(box[2]))
y1 = int(np.ceil(box[3]))
if x0 > 0.5 * 1920: # Face can not be in the right corner of the image (by construction)
return img
mask = np.zeros((1080, 1920, 3))
mask[y0:y1, x0:x1, :] = 1
return Image.fromarray(mask.astype(np.uint8) * img) # <-- Masked Image
img_count = 0
for k in tqdm(results.keys(), total=len(results.keys()), desc="Masking Faces"):
img_path = k.replace('\n', '').replace('.jpg', '.original.jpg') # avoid the \n at the end of each file!
boxes = results[k]
save_path = img_path.replace('.original.jpg', '.face.jpg')
# if os.path.isfile(save_path): continue
masked_img = mask_image(img_path, boxes)
masked_img.save(save_path)
|
[
"devyhia@aucegypt.edu"
] |
devyhia@aucegypt.edu
|
3821e4b7f0ea0366e7b7f21df53b33cf7690def7
|
9a942f1e58f296b68868d777c5351d7dfaa43103
|
/mysite/polls/admin.py
|
1c12987515f57b99d7e9a6f0812fdf0720def0ff
|
[] |
no_license
|
priscilamoreno/proyectofinal
|
570f16afd53df7d9ac24a877e2b4c9f0e1e6e470
|
c40fdb47754eacbe6c7aba606c1706f281b5a16c
|
refs/heads/master
| 2020-08-23T19:21:22.109761
| 2019-10-29T00:53:04
| 2019-10-29T00:53:04
| 216,691,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 780
|
py
|
from django.contrib import admin
# Register your models here.
from .models import Choice, Question
class QuestionAdmin(admin.ModelAdmin):
fields = ['pub_date', 'question_text']
fieldsets = [
(None, {'fields': ['question_text']}),
('Date information', {'fields': ['pub_date']}),
]
list_display = ('question_text', 'pub_date','was_published_recently')
admin.site.register(Choice)
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['question_text']}),
('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
]
inlines = [ChoiceInline]
admin.site.register(Question,QuestionAdmin)
|
[
"prismoreno12@gmail.com"
] |
prismoreno12@gmail.com
|
d58bb0a7506b482ebe8691fb9e5ca33753fc225f
|
51c5f9b4dfeb9b17f451215556c1c80f77373731
|
/LeetCode/firstUniqueCharacterInAString.py
|
a56e50cfae12139b6a45450b8edc331cc15d2f85
|
[] |
no_license
|
rupampatil/InterviewPractice
|
56c4b75b88f9c99092d4351e98e002eadddc19e4
|
e1b2acd592a9255b865c3a7973241e65e78881c4
|
refs/heads/master
| 2021-09-13T06:34:14.188208
| 2018-04-26T01:47:41
| 2018-04-26T01:47:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
class Solution(object):
def firstUniqChar(self, s):
"""
:type s: str
:rtype: int
"""
index = -1
occuranceMap = {}
counter = 0
for char in s:
if char not in occuranceMap:
occuranceMap[char] = [1, counter]
else:
occuranceMap[char][0] +=1
counter+=1
maxIndex = len(s)
for key, value in occuranceMap.iteritems():
if value[0] == 1 and value[1] < maxIndex:
maxIndex = value[1]
index = value[1]
return index
|
[
"vignesh.palani96@gmail.com"
] |
vignesh.palani96@gmail.com
|
9f9f44f274098ac6cff49f994acfc329089a67fc
|
6169edb3d95a01ccc03a6e8fe9c571267c6f5773
|
/setup.py
|
5865ed91c4583b6e56b7d480f9c59ff6a4a12850
|
[
"MIT"
] |
permissive
|
thatscotdatasci/flask-simple-api
|
95d823e46b7ee0cda485567128823060bf0c42e9
|
e4c23a3d9c0817856e3736da4805a9d727a37058
|
refs/heads/master
| 2020-04-02T04:01:24.551686
| 2018-12-02T17:49:59
| 2018-12-02T17:49:59
| 153,995,864
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('packages.dat') as f:
packages = f.readlines()
packages = [s.strip() for s in packages]
setup(
name='flaskapi',
version=open('version.txt').read().strip(),
packages=packages,
description='Simple API written in Flask to test various AWS features',
author='Alan Clark',
author_email='alan@thatscotdatasci.com',
url='thatscotdatasci.com',
platforms='linux',
license=open('LICENSE').read(),
long_description=open('README.md').read(),
install_requires=open('requirements.txt').read()
)
|
[
"alan@thatscottishdatascientist.com"
] |
alan@thatscottishdatascientist.com
|
e0f9165cff9cd3483a106ee3bf7fe2a57c111548
|
6632d2c21ad089ef6952422211fd01bf33a9408e
|
/backend/todo_api/todos/migrations/0001_initial.py
|
d5de5a7a539d4425737b2308f0d479ae147c178d
|
[] |
no_license
|
tylarpierson/front-end-to-back-end
|
bc5a46d662abd369e05a60864d1c2b9b94c075fc
|
e2d8da87418544abcf3f9ee9746a96e098d01ddb
|
refs/heads/master
| 2020-03-25T16:09:22.172683
| 2018-08-09T17:02:10
| 2018-08-09T17:02:10
| 143,917,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
# Generated by Django 2.1 on 2018-08-07 20:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.TextField()),
],
),
]
|
[
"tylarpiersonbusiness@gmail.com"
] |
tylarpiersonbusiness@gmail.com
|
c0574a8a5fa7e73892144516652ea7a402e4d49e
|
62f0ed8bbe8f10bd646cc9f8eaaf5a2da20f9241
|
/Persistence/settings.py
|
bdeccb02e414f2142a1c14a2dd63ec91e24b008a
|
[] |
no_license
|
leonardoGarciaOlmos/Persistence
|
3e329366ded3ac4cac41564e48c9048027674529
|
266ff798f70656b580355d689a327807604a9ba9
|
refs/heads/master
| 2022-12-26T15:10:27.612123
| 2020-09-09T02:20:55
| 2020-09-09T02:20:55
| 293,978,223
| 0
| 0
| null | 2020-10-06T01:58:21
| 2020-09-09T02:14:57
|
Python
|
UTF-8
|
Python
| false
| false
| 3,077
|
py
|
"""
Django settings for Persistence project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd)&8@ve@x_t7whim9mru42)%8qi4#601r(l_$9jxktej(+8pk='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Persistence.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Persistence.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"leonardogarciaolmos.12@gmail.com"
] |
leonardogarciaolmos.12@gmail.com
|
b5722af8ed32f8e2da48f5c2d6fcd13c8de9701f
|
52d324c6c0d0eb43ca4f3edc425a86cdc1e27d78
|
/scripts/asos/archive_quantity.py
|
9c22be17d7528b94acd44e3f1e30933859ee8315
|
[
"MIT"
] |
permissive
|
deenacse/iem
|
992befd6d95accfdadc34fb7928d6b69d661d399
|
150512e857ca6dca1d47363a29cc67775b731760
|
refs/heads/master
| 2021-02-04T04:20:14.330527
| 2020-02-26T21:11:32
| 2020-02-26T21:11:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,342
|
py
|
""" Create a simple prinout of observation quanity in the database """
from __future__ import print_function
import sys
import datetime
import numpy as np
from pyiem.util import get_dbconn
class bcolors:
"""Kind of hacky"""
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
def d(hits, total):
"""another hack"""
if total == 0:
return " N/A"
val = hits / float(total)
c1 = bcolors.ENDC
if val > 0.5:
c1 = bcolors.FAIL
return "%s%.2f%s" % (c1, val, bcolors.ENDC)
def main(argv):
"""Go Main Go"""
now = datetime.datetime.utcnow()
counts = np.zeros((120, 12))
mslp = np.zeros((120, 12))
metar = np.zeros((120, 12))
pgconn = get_dbconn("asos", user="nobody")
acursor = pgconn.cursor()
stid = argv[1]
acursor.execute(
"""
SELECT extract(year from valid) as yr,
extract(month from valid) as mo, count(*),
sum(case when mslp is null or mslp < 1 then 1 else 0 end),
sum(case when metar is null or metar = '' then 1 else 0 end)
from alldata WHERE
station = %s GROUP by yr, mo ORDER by yr ASC, mo ASC
""",
(stid,),
)
for row in acursor:
counts[int(row[0] - 1900), int(row[1] - 1)] = row[2]
mslp[int(row[0] - 1900), int(row[1] - 1)] = row[3]
metar[int(row[0] - 1900), int(row[1] - 1)] = row[4]
print("Observation Count For %s" % (stid,))
print("YEAR JAN FEB MAR APR MAY JUN JUL AUG SEP OCT NOV DEC")
output = False
for i in range(120):
year = 1900 + i
if year > now.year:
continue
if not output and np.max(counts[i, :]) == 0:
continue
output = True
if len(argv) < 3:
print(
("%s %4i %4i %4i %4i %4i %4i %4i %4i %4i %4i %4i %4i")
% (
year,
counts[i, 0],
counts[i, 1],
counts[i, 2],
counts[i, 3],
counts[i, 4],
counts[i, 5],
counts[i, 6],
counts[i, 7],
counts[i, 8],
counts[i, 9],
counts[i, 10],
counts[i, 11],
)
)
else:
if argv[2] == "metar":
data = metar
else:
data = mslp
print(
("%s %4s %4s %4s %4s %4s %4s %4s %4s %4s %4s %4s %4s")
% (
year,
d(data[i, 0], counts[i, 0]),
d(data[i, 1], counts[i, 1]),
d(data[i, 2], counts[i, 2]),
d(data[i, 3], counts[i, 3]),
d(data[i, 4], counts[i, 4]),
d(data[i, 5], counts[i, 5]),
d(data[i, 6], counts[i, 6]),
d(data[i, 7], counts[i, 7]),
d(data[i, 8], counts[i, 8]),
d(data[i, 9], counts[i, 9]),
d(data[i, 10], counts[i, 10]),
d(data[i, 11], counts[i, 11]),
)
)
if __name__ == "__main__":
main(sys.argv)
|
[
"akrherz@iastate.edu"
] |
akrherz@iastate.edu
|
64be5a82e097dc255667f247d99645295516de35
|
b194a672bfd619a82fb6f46755ab277a249e8f8c
|
/pcdViewer.py
|
ba8fed3078e23da69f1d6910320118e5e2f85ee8
|
[] |
no_license
|
nakawang/3D_VIEWER
|
af968d7ec1de681951fc22d5ea6f36b0dac65c5c
|
eed7187c76e25d3235fe5882ef5f0f8a7990b75d
|
refs/heads/master
| 2020-08-14T19:45:37.307273
| 2019-10-24T00:25:30
| 2019-10-24T00:25:30
| 215,224,225
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,802
|
py
|
# -*- coding: utf-8 -*-
import vtk,sys,numpy,os
from numpy import random,genfromtxt,size
from PyQt5 import QtCore, QtGui, QtWidgets
from vtk.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
from PyQt5.QtCore import pyqtSlot, QThread
from vtkPointCloud import VtkPointCloud
class PCDviewer(QtWidgets.QFrame):
def __init__(self, parent, dataPath=None):
super(PCDviewer,self).__init__(parent)
self.interactor = QVTKRenderWindowInteractor(self)
self.layout = QtWidgets.QHBoxLayout()
self.layout.addWidget(self.interactor)
self.layout.setContentsMargins(0,0,0,0)
self.setLayout(self.layout)
self.pointCloud = VtkPointCloud()
self.actors = []
if dataPath != None:
self.add_newData(dataPath)
# Renderer
renderer = vtk.vtkRenderer()
renderer.AddActor(self.pointCloud.vtkActor)
#cubeActor = self.addCubeAxesActor(renderer)
#renderer.AddActor(cubeActor)
# Scalar Bar
#renderer.SetBackground(.2, .3, .4)
#colors=vtk.vtkNamedColors()
#colors.SetColor("BkgColor",[179,204,255,255])
#renderer.SetBackground(colors.GetColor3d("BkgColor"))
renderer.ResetCamera()
#renderer.SetLayer(1)
# Render Window
renderWindow = self.interactor.GetRenderWindow()
#renderWindow = vtk.vtkRenderWindow()
print(renderWindow)
#renderWindow.SetNumberOfLayers(2)
renderWindow.AddRenderer(renderer)
#renderWindow.AddRenderer(self.addLogo())
# Interactor
#renderWindowInteractor = vtk.vtkRenderWindowInteractor()
self.interactor.SetRenderWindow(renderWindow)
self.interactor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())
# Scalar Bar
#self.addScalarBar(self.pointCloud.getLUT())
#renderer.AddActor(self.addScalarBar(self.pointCloud.getLUT()))
#renderWindow.SetInteractor(self.interactor)
# Logo
#self.addLogo()
# Begin Interaction
renderWindow.Render()
renderWindow.SetWindowName("XYZ Data Viewer:"+ "xyz")
self.interactor.Start()
#renderWindowInteractor.Start()
# Pack to class
self.renderer=renderer
#self.interactor=interactor
#self.xyzLoader.signalOut.connect(self.addActor)
def start(self):
self.interactor.Start()
def addScalarBar(self,lut):
self.scalarBar = vtk.vtkScalarBarActor()
self.scalarBar.SetOrientationToVertical()
self.scalarBar.SetLookupTable(lut)
self.scalarBar.SetBarRatio(0.12)
self.scalarBar.SetTitleRatio(0.12)
self.scalarBar.SetMaximumWidthInPixels(60)
self.scalarBar.SetMaximumHeightInPixels(300)
print(self.scalarBar.GetProperty().SetDisplayLocationToBackground())
#self.scalarBar.SetDisplayPosition(750,250)
self.scalarBar.SetDisplayPosition(60,200)
textP = vtk.vtkTextProperty()
textP.SetFontSize(10)
self.scalarBar.SetLabelTextProperty(textP)
self.scalarBar.SetTitleTextProperty(textP)
self.scalarBar.SetNumberOfLabels(8)
self.scalarBar.SetLabelFormat("%-#6.3f")#輸出格式
#self.scalarBarWidget = vtk.vtkScalarBarWidget()
#self.scalarBarWidget.SetInteractor(self.interactor)
#self.scalarBarWidget.SetScalarBarActor(self.scalarBar)
#self.scalarBarWidget.On()
self.interactor.Initialize()
return self.scalarBar
def addCubeAxesActor(self,renderer):
cubeAxesActor = vtk.vtkCubeAxesActor()
#設定軸上下限
cubeAxesActor.SetBounds(self.pointCloud.getBounds())
#將RENDER CAMERA指定給軸
cubeAxesActor.SetCamera(renderer.GetActiveCamera())
#設定標題與標籤文字顏色
cubeAxesActor.GetTitleTextProperty(0).SetColor(0.5,0.5,0.5)
cubeAxesActor.GetLabelTextProperty(0).SetColor(0.5,0.5,0.5)
cubeAxesActor.GetTitleTextProperty(1).SetColor(0.5,0.5,0.5)
cubeAxesActor.GetLabelTextProperty(1).SetColor(0.5,0.5,0.5)
cubeAxesActor.GetTitleTextProperty(2).SetColor(0.5,0.5,0.5)
cubeAxesActor.GetLabelTextProperty(2).SetColor(0.5,0.5,0.5)
#設定坐標軸線寬
cubeAxesActor.GetXAxesLinesProperty().SetLineWidth(0.5)
cubeAxesActor.GetYAxesLinesProperty().SetLineWidth(0.5)
cubeAxesActor.GetZAxesLinesProperty().SetLineWidth(0.5)
#開啟網格線
cubeAxesActor.DrawXGridlinesOn()
cubeAxesActor.DrawYGridlinesOn()
cubeAxesActor.DrawZGridlinesOn()
#內部網格線不畫
cubeAxesActor.SetDrawXInnerGridlines(False)
cubeAxesActor.SetDrawYInnerGridlines(False)
cubeAxesActor.SetDrawZInnerGridlines(False)
#網格線顏色
cubeAxesActor.GetXAxesGridlinesProperty().SetColor(0.5,0.5,0.5)
cubeAxesActor.GetYAxesGridlinesProperty().SetColor(0.5,0.5,0.5)
cubeAxesActor.GetZAxesGridlinesProperty().SetColor(0.5,0.5,0.5)
#控制軸的繪製方式(外,最近,最遠,靜態最近,靜態外)
cubeAxesActor.SetFlyMode(0)
#設定刻度線的位置(內,外,兩側)
cubeAxesActor.SetTickLocation(1)
#網格線樣式(所有,最近,最遠)
cubeAxesActor.SetGridLineLocation(2)
cubeAxesActor.XAxisMinorTickVisibilityOff()
cubeAxesActor.YAxisMinorTickVisibilityOff()
cubeAxesActor.ZAxisMinorTickVisibilityOff()
return cubeAxesActor
def add_newData(self,path):
xyz = genfromtxt(path,dtype=float,usecols=[0,1,2])
minH=xyz[:,2].min()
maxH=xyz[:,2].max()
count = len(xyz)
pcd=VtkPointCloud(minH,maxH,count)
pcd.clearPoints()
for k in range(size(xyz,0)):
point = xyz[k]
pcd.addPoint(point)
self.pointCloud = pcd
self.__addActor()
def __addActor(self):
lastActor=self.renderer.GetActors().GetLastActor()
if lastActor:
self.renderer.RemoveActor(lastActor)
actor=self.pointCloud.vtkActor
#set uniform color
#actor.GetMapper().ScalarVisibilityOff()
#actor.GetProperty().SetColor(1.0,0.0,0.0)
#actor.GetProperty().SetPointSize(4)
print("set actor color")
self.renderer.AddActor(actor)
self.refresh_renderer()
def __removeAll(self):
actors = self.renderer.GetActors()
print(actors)
if len(actors)>0:
for i in actors:
self.renderer.RemoveActor(i)
def refresh_renderer(self):
render_window=self.interactor.GetRenderWindow()
self.renderer.ResetCamera()
render_window.Render()
if __name__=="__main__":
print("PCD viewer")
|
[
"nakawang@benanos.com"
] |
nakawang@benanos.com
|
0f0ac7131d34ccf128488c79b8bcfe76a6563d3b
|
d9eee05d8b4b7c7eadc3f1a4b5f7c4c2ceb34640
|
/Blog_app/Blog_app/wsgi.py
|
b2990117ca40734ad4f7f1b3ba0d3dbabd1788c0
|
[] |
no_license
|
Balarubinan/Y_Blog_Django
|
42713bd4c1e49db2d428a5cf11ba49f010a0abcf
|
6ff0d1e7b22edb47446ad4c0f64b01de5ecefaa3
|
refs/heads/master
| 2023-05-29T20:52:33.299987
| 2021-06-18T01:27:05
| 2021-06-18T01:27:05
| 371,956,802
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
WSGI config for Blog_app project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Blog_app.settings')
application = get_wsgi_application()
|
[
"sudhagouthirubi@gmail.com"
] |
sudhagouthirubi@gmail.com
|
21930607b7f52c05a07d0c7707febfa51400b0ab
|
553ff32aab110184fc4ed8eb709a99e03e37b9f7
|
/wxglade/egy/egy.py~
|
319c86196187e4c4400f55d90951044eaa8e1a7e
|
[] |
no_license
|
janos01/esti2020Python
|
21201e07e7c0f8a93fd7c5bc420d6b10021b5d51
|
6617370c715e309614e8b4808e12ffe3b0130562
|
refs/heads/main
| 2023-04-15T10:01:56.332714
| 2021-04-28T21:48:03
| 2021-04-28T21:48:03
| 302,071,533
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,458
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# generated by wxGlade 1.0.0a9 on Wed Dec 16 18:20:18 2020
#
import wx
# begin wxGlade: dependencies
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class MainFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MainFrame.__init__
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.SetSize((400, 300))
self.SetTitle("frame")
self.panel_1 = wx.Panel(self, wx.ID_ANY)
sizer_1 = wx.BoxSizer(wx.VERTICAL)
self.button_1 = wx.Button(self.panel_1, wx.ID_ANY, "button_1")
sizer_1.Add(self.button_1, 0, wx.ALL, 5)
self.button_2 = wx.Button(self.panel_1, wx.ID_ANY, "button_2")
sizer_1.Add(self.button_2, 0, wx.ALL, 5)
self.text_ctrl_1 = wx.TextCtrl(self.panel_1, wx.ID_ANY, "")
sizer_1.Add(self.text_ctrl_1, 0, wx.ALL, 5)
label_1 = wx.StaticText(self.panel_1, wx.ID_ANY, u"Első wxGlade")
sizer_1.Add(label_1, 0, wx.ALL, 5)
self.panel_1.SetSizer(sizer_1)
self.Layout()
# end wxGlade
# end of class MainFrame
class EgyApp(wx.App):
def OnInit(self):
self.frame = MainFrame(None, wx.ID_ANY, "")
self.SetTopWindow(self.frame)
self.frame.Show()
return True
# end of class EgyApp
if __name__ == "__main__":
app = EgyApp(0)
app.MainLoop()
|
[
"termih@gmail.com"
] |
termih@gmail.com
|
|
668367219c3778b58c7a11e35d4dd7933fc53e37
|
6240aaa218bacab8099e13f95c37faff0ff9f074
|
/scripts/inject-document-props-as-overload.py
|
1b7d302fbf1e8c24229df496e932c0589c583689
|
[
"Apache-2.0"
] |
permissive
|
florian-hoenicke/jina
|
5f5a7b38641a1cbe1018bfeab17b22a00c0f16f8
|
52cd3074b65caec7a370386ec5a5f87ad7b0133d
|
refs/heads/master
| 2023-07-21T03:17:14.676658
| 2021-08-29T15:42:32
| 2021-08-29T15:42:32
| 309,146,742
| 1
| 0
|
Apache-2.0
| 2021-08-29T18:26:30
| 2020-11-01T17:11:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,667
|
py
|
import inspect
import re
import warnings
from operator import itemgetter
from typing import Optional, Tuple, List
from jina import Document
def get_properties(cls) -> List[Tuple[str, Optional[str], Optional[str]]]:
src = inspect.getsource(cls)
members = dict(inspect.getmembers(cls))
setters = re.findall(
r'@[a-zA-Z0-9_]+\.setter\s+def\s+([a-zA-Z0-9_]+)\s*\(self,\s*[a-zA-Z0-9_]+\s*:\s*(.*?)\)',
src,
flags=re.DOTALL,
)
property_docs = []
for setter, _ in setters:
if setter not in members:
warnings.warn(
f'{setter} is found as a setter but there is no corresponding getter'
)
property_docs.append(None)
else:
doc = inspect.getdoc(members[setter])
description = next(iter(re.findall(':return:(.*)', doc)), None)
if description:
description = description.strip()
property_docs.append(description)
return sorted(
list(
zip(map(itemgetter(0), setters), map(itemgetter(1), setters), property_docs)
),
key=lambda x: x[0],
)
def get_overload_signature(
properties,
indent=' ' * 4,
):
kwargs = [
f'{indent}{indent}{property_name}: Optional[{type_hint}] = None'
for property_name, type_hint, _ in properties
]
args_str = ', \n'.join(kwargs + [f'{indent}{indent}**kwargs'])
doc_str = '\n'.join(
[
f'{indent}{indent}:param {property_name}: {description}'
for property_name, _, description in properties
]
+ [
f'{indent}{indent}:param kwargs: other parameters to be set _after_ the document is constructed'
]
)
signature = f'def __init__(\n{indent}{indent}self,\n{args_str}\n{indent}):'
final_str = f'@overload\n{indent}{signature}\n{indent}{indent}"""\n{doc_str}\n{indent}{indent}"""'
return final_str
def write_signature(
cls,
overload_signature,
tag,
indent=' ' * 4,
):
filepath = inspect.getfile(cls)
final_code = re.sub(
rf'(# overload_inject_start_{tag}).*(# overload_inject_end_{tag})',
f'\\1\n{indent}{overload_signature}\n{indent}\\2',
open(filepath).read(),
0,
re.DOTALL,
)
with open(filepath, 'w') as fp:
fp.write(final_code)
def inject_properties_as_overload(cls):
properties = get_properties(cls)
overload_signature = get_overload_signature(properties)
write_signature(cls, overload_signature, 'document')
print(inspect.getfile(cls))
if __name__ == '__main__':
inject_properties_as_overload(Document)
|
[
"noreply@github.com"
] |
noreply@github.com
|
900089348be19cdb331a0375bc31ff95224dd942
|
d8c006e858be14eb595cdfbe702d1a29f1262e35
|
/qa_community/urls.py
|
d159479338e3daf1c5c1dea4e5808477e8cd04ae
|
[] |
no_license
|
xautxuqiang/qa_community
|
96dc1211b3a254dd2faac44571c474d48bb75059
|
385244735e6deb3e36c9340ea5b2d08f74ea25db
|
refs/heads/master
| 2021-01-20T00:21:40.329485
| 2017-04-23T08:54:15
| 2017-04-23T08:54:15
| 89,118,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,006
|
py
|
"""qa_community URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from question.views import index,profile,settings
urlpatterns = [
url(r'^$', index, name='index'),
url(r'^people/(\d+)/$', profile, name='profile'),
url(r'^settings/$', settings, name='settings'),
url(r'^admin/', admin.site.urls),
url(r'^accounts/', include('users.urls')),
]
|
[
"xautxuqiang@126.com"
] |
xautxuqiang@126.com
|
d7c5e0e63a5da66fb40e2613241a31fe1a99a12e
|
2418e4c4f640c48708c12c3058f8e16144856a0c
|
/problem0016.py
|
d0c86a69bd091b9f305da0b8e4390291e4127b61
|
[] |
no_license
|
sergii-yatsuk/projecteuler
|
482eb32d0cc71ea5dbcaaf3bb1bca7ef575f799b
|
94918d0d77c0b1b4f2c28af8d241e1c2fd8424f4
|
refs/heads/master
| 2021-05-26T18:32:11.616693
| 2013-01-10T14:59:00
| 2013-01-10T14:59:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
# 2^15 = 32768 and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26.
#
# What is the sum of the digits of the number 2^1000?
s = str(2**1000)
sum = 0
for i in s:
sum = sum + eval(i)
print (sum)
|
[
"tifon@bigmir.net"
] |
tifon@bigmir.net
|
c4ec45f547704f0827cbcb4543ad433d679ff606
|
da50dccf17b01c370a3e31a27f2a813be1dd74ba
|
/2.RL_Codes/6.Oct16th-WeightedInterpolation/RL_SISO_Linea2.py
|
faf0b67afd08b5daee6c439f100c5110e081553c
|
[] |
no_license
|
Tang-08080103/Research
|
13d297749657bf572ad3258fb507305afb4e2311
|
814e1054ed43384d99e3be7085cbffbed93f4dd1
|
refs/heads/master
| 2022-04-12T04:37:32.171533
| 2019-10-29T23:36:52
| 2019-10-29T23:36:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,482
|
py
|
import numpy as np
import sys
sys.path.insert(0, '/home/rui/Documents/RL_vs_MPC/Models')
sys.path.insert(0, '/home/rui/Documents/RL_vs_MPC/Modules')
from RL_Module import ReinforceLearning
from Linear_System import LinearSystem
"""
Define reward function for RL. User defines the reward function structure. The below is an example.
"""
def simulation():
# Model Initiation
model = LinearSystem(nsim=100, model_type='SISO', x0=np.array([0.5]), u0=np.array([1]), xs=np.array([5]),
us=np.array([10]), step_size=0.2)
# model = LinearSystem(nsim=100, model_type='MIMO', x0=np.array())
# Reinforcement Learning Initiation
rl = ReinforceLearning(discount_factor=0.95, states_start=300, states_stop=340, states_interval=0.5,
actions_start=-15, actions_stop=15, actions_interval=2.5, learning_rate=0.5,
epsilon=0.2, doe=1.2, eval_period=1)
"""
Example of user defined states and actions. Users do not need to do this. This is only if users want to define
their own states and actions. RL will automatically populate states and actions if user does not input their own.
"""
states = np.zeros([27])
states[0:12] = np.linspace(0, 2.5, 12)
states[12:27] = np.linspace(3, 8, 15)
rl.user_states(list(states))
# actions = np.zeros([20])
# actions[0:5] = np.linspace(290, 298, 5)
actions = np.linspace(5, 15, 16)
# actions[30:35] = np.linspace(302, 310, 5)
rl.user_actions(list(actions))
"""
Load pre-trained Q, T and NT matrices
"""
q = np.loadtxt("Q_Matrix.txt")
t = np.loadtxt("T_Matrix.txt")
nt = np.loadtxt("NT_Matrix.txt")
rl.user_matrices(q, t, nt)
"""
Simulation portion
"""
rlist = []
for episode in range(1):
# Reset the model after each episode
model.reset(random_init=False)
tot_reward = 0
state = 0
action_index = 0
for t in range(1, model.Nsim + 1):
"""
Disturbance
"""
# if t % 30 == 0:
# model.x[t - 1] += np.random.uniform(-5, 3)
"""
RL Evaluate
"""
if t % rl.eval_period == 0:
state, action = rl.ucb_action_selection(model.x[t - 1, 0])
action, action_index = rl.action_selection(state, action, model.u[t - 1, 0], no_decay=25,
ep_greedy=False, time=t,
min_eps_rate=0.5)
# Use interpolation to perform action
action = rl.interpolation(model.x[t - 1, 0])
else:
action = model.u[t - 1, :][0]
next_state, reward, done, info = model.step([action], t, obj_function="MPC")
"""
Feedback evaluation
"""
if t == rl.eval_feedback:
rl.matrix_update(action_index, reward, state, model.x[t, 0], 5)
tot_reward = tot_reward + reward
rlist.append(tot_reward)
rl.autosave(episode, 250)
if episode % 100 == 0:
print(model.cost_function(transient_period=120))
return model, rl, rlist
if __name__ == "__main__":
env, RL, rList = simulation()
|
[
"Rnian@ualberta.ca"
] |
Rnian@ualberta.ca
|
fa29d9535df03bd0c2da27a460344a114a8b1395
|
58fc6705ce0422a1cc1d597aac41501600d5328c
|
/Python/113_PathSum2.py
|
e14f8818e68b55e0ef4632200285b9ed16b2df47
|
[] |
no_license
|
CollinErickson/LeetCode
|
d8963e6e6f34863148aa0a724adc783413ad1297
|
d9159ba7ebd14daec994380f3d4361777053ea67
|
refs/heads/master
| 2022-03-03T19:55:55.003619
| 2022-02-22T00:29:13
| 2022-02-22T00:29:13
| 90,581,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,569
|
py
|
class TreeNode(object):
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def __repr__(self):
s = str(self.val)
if self.left is not None:
s += "(" + str(self.left) + ")"
if self.right is not None:
s += "[" + str(self.right) + "]"
return s
class Solution(object):
def pathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: bool
"""
s = []
if root is None:
return s
if root.left is None and root.right is None:
if sum == root.val:
s.append([root.val])
if root.left is not None:
sL = self.pathSum(root.left, sum - root.val)
for isL in sL:
s.append([root.val] + isL)
if root.right is not None:
sR = self.pathSum(root.right, sum - root.val)
for isR in sR:
#print([root.val], isR)
s.append([root.val] + isR)
return s
sol = Solution()
s = []
n1 = TreeNode(5)
n2 = TreeNode(4)
n3 = TreeNode(8)
n4 = TreeNode(11)
#n5 = TreeNode(5)
n6 = TreeNode(13)
n7 = TreeNode(4)
n8 = TreeNode(7)
n9 = TreeNode(2)
n10 = TreeNode(1)
n11 = TreeNode(5)
n1.left = n2
n1.right = n3
n2.left = n4
n3.left = n6
n3.right = n7
n4.left = n8
n4.right = n9
n7.right = n10
n7.left = n11
print(n1, sol.pathSum(n1, 22), True)
print(n1, sol.pathSum(n1, 23), False)
print(n10, sol.pathSum(n10, 1), True)
print(n10, sol.pathSum(n10, 2), False)
|
[
"collinberickson@gmail.com"
] |
collinberickson@gmail.com
|
8ddd254592f7edae681a46dc57c7889ff7b0ec7a
|
65d5b8c6c928fd26015e1af4b9b5c644f28f04e2
|
/Comp307v3/TeraChess/views.py
|
e4eb6e54125a42961a13560a7dec6dcfabe6a7ec
|
[] |
no_license
|
307Project2018/The-Project
|
3fd66f1776ea0ea6026c7f880e678c9f829f6ce7
|
ba850f7d6f82bca034e53778e6ecea2d20c246e4
|
refs/heads/master
| 2020-04-04T11:16:37.294196
| 2018-12-11T16:59:22
| 2018-12-11T16:59:22
| 155,884,992
| 0
| 0
| null | 2018-12-10T21:39:53
| 2018-11-02T15:24:45
|
Python
|
UTF-8
|
Python
| false
| false
| 25,631
|
py
|
from django.shortcuts import render, redirect
from .models import PieceSet, Player, PieceInstance, BoardInstance, Cell
from django.http import Http404
from django.views.generic.edit import CreateView
from django.views.generic import View
from .forms import UserForm, PieceSetForm, PieceInstanceForm, BoardForm, SecondPlayerForm, MoveForm
from django.contrib.auth import authenticate, login
from django.contrib.auth import logout
import random
def account(request):
return render(request, 'TeraChess/html/account.html')
def build(request):
return render(request, 'TeraChess/html/build.html')
def collection(request):
context = {}
if request.user.is_authenticated:
profile = request.user.profile
all_pieces = PieceSet.objects.filter(player=profile)
context = {
'profile': profile,
'all_pieces': all_pieces
}
return render(request, 'TeraChess/html/collection.html', context)
def gameUI(request):
return render(request, 'TeraChess/html/gameUI.html')
def index(request):
return render(request, 'TeraChess/html/index.html')
def learn(request):
return render(request, 'TeraChess/html/learn.html')
def loginSignUp(request):
context = {}
if request.user.is_authenticated:
profile = request.user.profile
all_pieces = PieceSet.objects.filter(player=profile)
context = {
'profile': profile,
'all_pieces': all_pieces
}
return render(request, 'TeraChess/html/loginSignUp.html', context)
def pieces(request, piece_set_id):
try:
current_set = PieceSet.objects.get(pk=piece_set_id)
my_pieces = current_set.pieceinstance_set.all()
except PieceSet.DoesNotExist:
raise Http404("PieceSet does not exist")
return render(request, 'TeraChess/html/pieces.html', {'pieces': my_pieces})
def piecesetupdate(request, piece_set_id):
current_set = PieceSet.objects.get(pk=piece_set_id)
all_sets = PieceSet.objects.filter(player=request.user.profile)
for set in all_sets:
set.main = False
set.save()
current_set.main = True
current_set.save()
return render(request, 'TeraChess/html/index.html')
def piece_details(request, piece_id):
current_piece = PieceInstance.objects.get(pk=piece_id)
return render(request, 'TeraChess/html/piece_details.html', {'piece_instance': current_piece})
def play(request):
return render(request, 'TeraChess/html/play.html')
def template(request):
return render(request, 'TeraChess/html/template.html')
class PieceSetCreate(CreateView):
model = PieceSet
fields = ['name', 'player']
def PieceSetDelete(request, piece_set_id):
piece_set = PieceSet.objects.get(pk=piece_set_id)
piece_set.delete()
return render(request, 'TeraChess/html/pieceset_confirm_delete.html', {'piece_set':piece_set})
def deletePieceSet(request):
context = {}
if request.user.is_authenticated:
profile = request.user.profile
all_pieces = PieceSet.objects.filter(player=profile)
context = {
'profile': profile,
'all_pieces': all_pieces
}
return render(request, 'TeraChess/html/delete_pieceset.html', context)
class PieceInstanceCreate(CreateView):
model = PieceInstance
fields = ['name', 'order', 'piece', 'piece_set']
class PieceInstanceFormView(View):
form_class = PieceInstanceForm
template_name = 'TeraChess/pieceinstance_form.html'
def get(self, request):
form = self.form_class(user=request.user)
return render(request, self.template_name, {'form': form})
def post(self, request):
form = self.form_class(request.POST, user=request.user)
if form.is_valid():
name = form.cleaned_data['name']
order = form.cleaned_data['order']
piece = form.cleaned_data['piece']
piece_set = form.cleaned_data['piece_set']
front = piece.front
PieceInstance.objects.create(name=name, order=order, piece=piece, piece_set=piece_set, front=front)
if request.user.is_authenticated:
return redirect('TeraChess/index')
return render(request, self.template_name, {'form': form})
def get_form_kwargs(self):
kwargs = super(PieceInstanceFormView, self).get_form_kwargs()
kwargs.update({'user': self.request.user})
return kwargs
class PieceSetFormView(View):
form_class = PieceSetForm
template_name = 'TeraChess/pieceset_form.html'
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
PieceSet.objects.create(player=request.user.profile, name=name)
if request.user.is_authenticated:
return redirect('TeraChess/index')
return render(request, self.template_name, {'form': form})
def get(self, request):
form = self.form_class(None)
return render(request, self.template_name, {'form': form})
class BoardFormView(View):
template_name = 'TeraChess/boardinstance_form.html'
form_class = BoardForm
def get(self, request):
form = self.form_class(None)
return render(request, self.template_name, {'form': form})
def post(self, request):
form = self.form_class(request.POST)
my_rand = random.randint(0, 11)
if form.is_valid():
game_id = form.cleaned_data['game_id']
current_player = request.user.profile
if my_rand <= 4:
board = BoardInstance.objects.create(player1=current_player, game_id=game_id, white_player=current_player)
else:
board = BoardInstance.objects.create(player1=current_player, game_id=game_id, black_player=current_player)
for i in range(0, 8):
for j in range(0, 8):
board.cell_set.add(Cell.objects.create(x_coord=j, y_coord=i))
if request.user.is_authenticated:
return redirect('TeraChess/index')
return render(request, self.template_name, {'form': form})
class SecondPlayer(View):
form_class = SecondPlayerForm
template_name = 'TeraChess/secondplayer_form.html'
def get(self, request):
form = self.form_class(None)
return render(request, self.template_name, {'form': form})
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
game_id = form.cleaned_data['game_id']
current_player = request.user.profile
board = BoardInstance.objects.get(game_id=game_id)
board.player2 = current_player.username
if board.white_player:
board.black_player = current_player.username
else:
board.white_player = current_player.username
board.save()
if request.user.is_authenticated:
return redirect('TeraChess/index')
return render(request, self.template_name, {'form': form})
class UserFormView(View):
form_class = UserForm
template_name = 'TeraChess/registration_form.html'
def get(self, request):
form = self.form_class(None)
return render(request, self.template_name, {'form': form})
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save(commit=False)
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.username = username
user.set_password(password)
user.save()
Player.objects.create(user=user, username=username)
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return redirect('TeraChess/index')
return render(request, self.template_name, {'form': form})
def logoutview(request):
if request.user.is_authenticated:
logout(request)
return redirect('TeraChess/index')
def loginview(request):
if request.user.is_authenticated:
login(request)
return redirect('TeraChess/collection')
def displayGames(request):
context = {}
if request.user.is_authenticated:
player = request.user.profile
games_player1 = BoardInstance.objects.filter(player1=player.username)
games_player2 = BoardInstance.objects.filter(player2=player.username)
context = {
'games_player1': games_player1,
'games_player2': games_player2
}
return render(request, 'TeraChess/html/my_games.html', context)
# I am so very sorry for having disgusting hard-coded garbage like this
def gametime(request, game_id):
context = {}
is_front = False
if request.user.is_authenticated:
game = BoardInstance.objects.get(game_id=game_id)
white_player = Player.objects.get(username=game.white_player)
black_player = Player.objects.get(username=game.black_player)
white_player_set = PieceSet.objects.get(player=white_player, main=True).pieceinstance_set
black_player_set = PieceSet.objects.get(player=black_player, main=True).pieceinstance_set
for i in range(0, 8):
for j in range(0, 2):
if j == 0:
is_front = False
if j == 1:
is_front = True
cell_piece = white_player_set.get(order=i, front=is_front)
cell_piece.picture = cell_piece.piece.picture_white
cell_piece.save()
cell = Cell.objects.get(x_coord=i, y_coord=j, board=game)
cell.piece = cell_piece
cell.is_null = False
cell.save()
for i in range(0, 8):
for j in range(2,6):
cell = Cell.objects.get(x_coord=i, y_coord=j, board=game)
cell.is_null = True
cell.save()
for i in range(0, 8):
for j in range(6, 8):
if j == 6:
is_front = True
if j == 7:
is_front = False
cell_piece = black_player_set.get(order=i, front=is_front)
cell_piece.picture = cell_piece.piece.picture_black
cell_piece.save()
cell = Cell.objects.get(x_coord=i, y_coord=j, board=game)
cell.piece = cell_piece
cell.is_null = False
cell.save()
cell_00 = Cell.objects.get(x_coord=0, y_coord=0, board=game)
cell_01 = Cell.objects.get(x_coord=0, y_coord=1, board=game)
cell_02 = Cell.objects.get(x_coord=0, y_coord=2, board=game)
cell_03 = Cell.objects.get(x_coord=0, y_coord=3, board=game)
cell_04 = Cell.objects.get(x_coord=0, y_coord=4, board=game)
cell_05 = Cell.objects.get(x_coord=0, y_coord=5, board=game)
cell_06 = Cell.objects.get(x_coord=0, y_coord=6, board=game)
cell_07 = Cell.objects.get(x_coord=0, y_coord=7, board=game)
cell_10 = Cell.objects.get(x_coord=1, y_coord=0, board=game)
cell_11 = Cell.objects.get(x_coord=1, y_coord=1, board=game)
cell_12 = Cell.objects.get(x_coord=1, y_coord=2, board=game)
cell_13 = Cell.objects.get(x_coord=1, y_coord=3, board=game)
cell_14 = Cell.objects.get(x_coord=1, y_coord=4, board=game)
cell_15 = Cell.objects.get(x_coord=1, y_coord=5, board=game)
cell_16 = Cell.objects.get(x_coord=1, y_coord=6, board=game)
cell_17 = Cell.objects.get(x_coord=1, y_coord=7, board=game)
cell_20 = Cell.objects.get(x_coord=2, y_coord=0, board=game)
cell_21 = Cell.objects.get(x_coord=2, y_coord=1, board=game)
cell_22 = Cell.objects.get(x_coord=2, y_coord=2, board=game)
cell_23 = Cell.objects.get(x_coord=2, y_coord=3, board=game)
cell_24 = Cell.objects.get(x_coord=2, y_coord=4, board=game)
cell_25 = Cell.objects.get(x_coord=2, y_coord=5, board=game)
cell_26 = Cell.objects.get(x_coord=2, y_coord=6, board=game)
cell_27 = Cell.objects.get(x_coord=2, y_coord=7, board=game)
cell_30 = Cell.objects.get(x_coord=3, y_coord=0, board=game)
cell_31 = Cell.objects.get(x_coord=3, y_coord=1, board=game)
cell_32 = Cell.objects.get(x_coord=3, y_coord=2, board=game)
cell_33 = Cell.objects.get(x_coord=3, y_coord=3, board=game)
cell_34 = Cell.objects.get(x_coord=3, y_coord=4, board=game)
cell_35 = Cell.objects.get(x_coord=3, y_coord=5, board=game)
cell_36 = Cell.objects.get(x_coord=3, y_coord=6, board=game)
cell_37 = Cell.objects.get(x_coord=3, y_coord=7, board=game)
cell_40 = Cell.objects.get(x_coord=4, y_coord=0, board=game)
cell_41 = Cell.objects.get(x_coord=4, y_coord=1, board=game)
cell_42 = Cell.objects.get(x_coord=4, y_coord=2, board=game)
cell_43 = Cell.objects.get(x_coord=4, y_coord=3, board=game)
cell_44 = Cell.objects.get(x_coord=4, y_coord=4, board=game)
cell_45 = Cell.objects.get(x_coord=4, y_coord=5, board=game)
cell_46 = Cell.objects.get(x_coord=4, y_coord=6, board=game)
cell_47 = Cell.objects.get(x_coord=4, y_coord=7, board=game)
cell_50 = Cell.objects.get(x_coord=5, y_coord=0, board=game)
cell_51 = Cell.objects.get(x_coord=5, y_coord=1, board=game)
cell_52 = Cell.objects.get(x_coord=5, y_coord=2, board=game)
cell_53 = Cell.objects.get(x_coord=5, y_coord=3, board=game)
cell_54 = Cell.objects.get(x_coord=5, y_coord=4, board=game)
cell_55 = Cell.objects.get(x_coord=5, y_coord=5, board=game)
cell_56 = Cell.objects.get(x_coord=5, y_coord=6, board=game)
cell_57 = Cell.objects.get(x_coord=5, y_coord=7, board=game)
cell_60 = Cell.objects.get(x_coord=6, y_coord=0, board=game)
cell_61 = Cell.objects.get(x_coord=6, y_coord=1, board=game)
cell_62 = Cell.objects.get(x_coord=6, y_coord=2, board=game)
cell_63 = Cell.objects.get(x_coord=6, y_coord=3, board=game)
cell_64 = Cell.objects.get(x_coord=6, y_coord=4, board=game)
cell_65 = Cell.objects.get(x_coord=6, y_coord=5, board=game)
cell_66 = Cell.objects.get(x_coord=6, y_coord=6, board=game)
cell_67 = Cell.objects.get(x_coord=6, y_coord=7, board=game)
cell_70 = Cell.objects.get(x_coord=7, y_coord=0, board=game)
cell_71 = Cell.objects.get(x_coord=7, y_coord=1, board=game)
cell_72 = Cell.objects.get(x_coord=7, y_coord=2, board=game)
cell_73 = Cell.objects.get(x_coord=7, y_coord=3, board=game)
cell_74 = Cell.objects.get(x_coord=7, y_coord=4, board=game)
cell_75 = Cell.objects.get(x_coord=7, y_coord=5, board=game)
cell_76 = Cell.objects.get(x_coord=7, y_coord=6, board=game)
cell_77 = Cell.objects.get(x_coord=7, y_coord=7, board=game)
context = {
'game_id': game_id,
'white_player_set': white_player_set,
'black_player_set': black_player_set,
'player1': game.player1,
'player2': game.player2,
'cells': game.cell_set,
'cell_00': cell_00,
'cell_01': cell_01,
'cell_02': cell_02,
'cell_03': cell_03,
'cell_04': cell_04,
'cell_05': cell_05,
'cell_06': cell_06,
'cell_07': cell_07,
'cell_10': cell_10,
'cell_11': cell_11,
'cell_12': cell_12,
'cell_13': cell_13,
'cell_14': cell_14,
'cell_15': cell_15,
'cell_16': cell_16,
'cell_17': cell_17,
'cell_20': cell_20,
'cell_21': cell_21,
'cell_22': cell_22,
'cell_23': cell_23,
'cell_24': cell_24,
'cell_25': cell_25,
'cell_26': cell_26,
'cell_27': cell_27,
'cell_30': cell_30,
'cell_31': cell_31,
'cell_32': cell_32,
'cell_33': cell_33,
'cell_34': cell_34,
'cell_35': cell_35,
'cell_36': cell_36,
'cell_37': cell_37,
'cell_40': cell_40,
'cell_41': cell_41,
'cell_42': cell_42,
'cell_43': cell_43,
'cell_44': cell_44,
'cell_45': cell_45,
'cell_46': cell_46,
'cell_47': cell_47,
'cell_50': cell_50,
'cell_51': cell_51,
'cell_52': cell_52,
'cell_53': cell_53,
'cell_54': cell_54,
'cell_55': cell_55,
'cell_56': cell_56,
'cell_57': cell_57,
'cell_60': cell_60,
'cell_61': cell_61,
'cell_62': cell_62,
'cell_63': cell_63,
'cell_64': cell_64,
'cell_65': cell_65,
'cell_66': cell_66,
'cell_67': cell_67,
'cell_70': cell_70,
'cell_71': cell_71,
'cell_72': cell_72,
'cell_73': cell_73,
'cell_74': cell_74,
'cell_75': cell_75,
'cell_76': cell_76,
'cell_77': cell_77,
}
return render(request, 'TeraChess/html/gameUI.html', context)
class NextMoveFormView(View):
form_class = MoveForm
template_name = 'TeraChess/gameUI_form.html'
def get(self, request):
form = self.form_class(None)
return render(request, self.template_name, {'form': form})
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save(commit=False)
x_old = form.cleaned_data['x_coord_old']
y_old = form.cleaned_data['y_coord_old']
x_new = form.cleaned_data['x_coord_new']
y_new = form.cleaned_data['y_coord_new']
game_id = form.cleaned_data['game_id']
game = BoardInstance.objects.get(game_id=game_id)
old_cell = game.cell_set.get(x_coord=x_old, y_coord=y_old)
new_cell = game.cell_set.get(x_coord=x_new, y_coord=y_new)
def is_valid_move(prev_cell, next_cell):
return True
if is_valid_move(old_cell, new_cell):
new_cell.piece = old_cell.piece
new_cell.is_null = False
new_cell.save()
old_cell.is_null = True
old_cell.save()
if request.user.is_authenticated:
next_page = 'ViewGame/' + str(game_id)
return redirect(next_page)
return render(request, self.template_name, {'form': form})
def viewgame(request, game_id):
context = {}
game = BoardInstance.objects.get(game_id=game_id)
if request.user.is_authenticated:
cell_00 = Cell.objects.get(x_coord=0, y_coord=0, board=game)
cell_01 = Cell.objects.get(x_coord=0, y_coord=1, board=game)
cell_02 = Cell.objects.get(x_coord=0, y_coord=2, board=game)
cell_03 = Cell.objects.get(x_coord=0, y_coord=3, board=game)
cell_04 = Cell.objects.get(x_coord=0, y_coord=4, board=game)
cell_05 = Cell.objects.get(x_coord=0, y_coord=5, board=game)
cell_06 = Cell.objects.get(x_coord=0, y_coord=6, board=game)
cell_07 = Cell.objects.get(x_coord=0, y_coord=7, board=game)
cell_10 = Cell.objects.get(x_coord=1, y_coord=0, board=game)
cell_11 = Cell.objects.get(x_coord=1, y_coord=1, board=game)
cell_12 = Cell.objects.get(x_coord=1, y_coord=2, board=game)
cell_13 = Cell.objects.get(x_coord=1, y_coord=3, board=game)
cell_14 = Cell.objects.get(x_coord=1, y_coord=4, board=game)
cell_15 = Cell.objects.get(x_coord=1, y_coord=5, board=game)
cell_16 = Cell.objects.get(x_coord=1, y_coord=6, board=game)
cell_17 = Cell.objects.get(x_coord=1, y_coord=7, board=game)
cell_20 = Cell.objects.get(x_coord=2, y_coord=0, board=game)
cell_21 = Cell.objects.get(x_coord=2, y_coord=1, board=game)
cell_22 = Cell.objects.get(x_coord=2, y_coord=2, board=game)
cell_23 = Cell.objects.get(x_coord=2, y_coord=3, board=game)
cell_24 = Cell.objects.get(x_coord=2, y_coord=4, board=game)
cell_25 = Cell.objects.get(x_coord=2, y_coord=5, board=game)
cell_26 = Cell.objects.get(x_coord=2, y_coord=6, board=game)
cell_27 = Cell.objects.get(x_coord=2, y_coord=7, board=game)
cell_30 = Cell.objects.get(x_coord=3, y_coord=0, board=game)
cell_31 = Cell.objects.get(x_coord=3, y_coord=1, board=game)
cell_32 = Cell.objects.get(x_coord=3, y_coord=2, board=game)
cell_33 = Cell.objects.get(x_coord=3, y_coord=3, board=game)
cell_34 = Cell.objects.get(x_coord=3, y_coord=4, board=game)
cell_35 = Cell.objects.get(x_coord=3, y_coord=5, board=game)
cell_36 = Cell.objects.get(x_coord=3, y_coord=6, board=game)
cell_37 = Cell.objects.get(x_coord=3, y_coord=7, board=game)
cell_40 = Cell.objects.get(x_coord=4, y_coord=0, board=game)
cell_41 = Cell.objects.get(x_coord=4, y_coord=1, board=game)
cell_42 = Cell.objects.get(x_coord=4, y_coord=2, board=game)
cell_43 = Cell.objects.get(x_coord=4, y_coord=3, board=game)
cell_44 = Cell.objects.get(x_coord=4, y_coord=4, board=game)
cell_45 = Cell.objects.get(x_coord=4, y_coord=5, board=game)
cell_46 = Cell.objects.get(x_coord=4, y_coord=6, board=game)
cell_47 = Cell.objects.get(x_coord=4, y_coord=7, board=game)
cell_50 = Cell.objects.get(x_coord=5, y_coord=0, board=game)
cell_51 = Cell.objects.get(x_coord=5, y_coord=1, board=game)
cell_52 = Cell.objects.get(x_coord=5, y_coord=2, board=game)
cell_53 = Cell.objects.get(x_coord=5, y_coord=3, board=game)
cell_54 = Cell.objects.get(x_coord=5, y_coord=4, board=game)
cell_55 = Cell.objects.get(x_coord=5, y_coord=5, board=game)
cell_56 = Cell.objects.get(x_coord=5, y_coord=6, board=game)
cell_57 = Cell.objects.get(x_coord=5, y_coord=7, board=game)
cell_60 = Cell.objects.get(x_coord=6, y_coord=0, board=game)
cell_61 = Cell.objects.get(x_coord=6, y_coord=1, board=game)
cell_62 = Cell.objects.get(x_coord=6, y_coord=2, board=game)
cell_63 = Cell.objects.get(x_coord=6, y_coord=3, board=game)
cell_64 = Cell.objects.get(x_coord=6, y_coord=4, board=game)
cell_65 = Cell.objects.get(x_coord=6, y_coord=5, board=game)
cell_66 = Cell.objects.get(x_coord=6, y_coord=6, board=game)
cell_67 = Cell.objects.get(x_coord=6, y_coord=7, board=game)
cell_70 = Cell.objects.get(x_coord=7, y_coord=0, board=game)
cell_71 = Cell.objects.get(x_coord=7, y_coord=1, board=game)
cell_72 = Cell.objects.get(x_coord=7, y_coord=2, board=game)
cell_73 = Cell.objects.get(x_coord=7, y_coord=3, board=game)
cell_74 = Cell.objects.get(x_coord=7, y_coord=4, board=game)
cell_75 = Cell.objects.get(x_coord=7, y_coord=5, board=game)
cell_76 = Cell.objects.get(x_coord=7, y_coord=6, board=game)
cell_77 = Cell.objects.get(x_coord=7, y_coord=7, board=game)
context = {
'game_id': game_id,
'cell_00': cell_00,
'cell_01': cell_01,
'cell_02': cell_02,
'cell_03': cell_03,
'cell_04': cell_04,
'cell_05': cell_05,
'cell_06': cell_06,
'cell_07': cell_07,
'cell_10': cell_10,
'cell_11': cell_11,
'cell_12': cell_12,
'cell_13': cell_13,
'cell_14': cell_14,
'cell_15': cell_15,
'cell_16': cell_16,
'cell_17': cell_17,
'cell_20': cell_20,
'cell_21': cell_21,
'cell_22': cell_22,
'cell_23': cell_23,
'cell_24': cell_24,
'cell_25': cell_25,
'cell_26': cell_26,
'cell_27': cell_27,
'cell_30': cell_30,
'cell_31': cell_31,
'cell_32': cell_32,
'cell_33': cell_33,
'cell_34': cell_34,
'cell_35': cell_35,
'cell_36': cell_36,
'cell_37': cell_37,
'cell_40': cell_40,
'cell_41': cell_41,
'cell_42': cell_42,
'cell_43': cell_43,
'cell_44': cell_44,
'cell_45': cell_45,
'cell_46': cell_46,
'cell_47': cell_47,
'cell_50': cell_50,
'cell_51': cell_51,
'cell_52': cell_52,
'cell_53': cell_53,
'cell_54': cell_54,
'cell_55': cell_55,
'cell_56': cell_56,
'cell_57': cell_57,
'cell_60': cell_60,
'cell_61': cell_61,
'cell_62': cell_62,
'cell_63': cell_63,
'cell_64': cell_64,
'cell_65': cell_65,
'cell_66': cell_66,
'cell_67': cell_67,
'cell_70': cell_70,
'cell_71': cell_71,
'cell_72': cell_72,
'cell_73': cell_73,
'cell_74': cell_74,
'cell_75': cell_75,
'cell_76': cell_76,
'cell_77': cell_77,
}
next_page = 'TeraChess/ '
return render(request, 'TeraChess/html/gameUI.html', context)
|
[
"44643868+oblivionmasta@users.noreply.github.com"
] |
44643868+oblivionmasta@users.noreply.github.com
|
8390f315350a7ef693bf2bcfdc6e4036a4cc0c15
|
c968e2d6e6e6ce33ed3a32f7f458117495627856
|
/chessBoard.py
|
e7f21d88521d0d33f730a83ba064b9aae25f1611
|
[] |
no_license
|
cjeongmin/AD_Project
|
e86c000cc50c04ee5d31b1d8a73f96fb733a0859
|
9293e4870b2e2eeb7f4e8e8ca82a93d8eda806c9
|
refs/heads/master
| 2020-09-09T23:25:59.257906
| 2019-12-19T07:45:49
| 2019-12-19T07:45:49
| 221,595,081
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,302
|
py
|
import os, sys, copy
sys.path.insert(0, os.path.dirname(os.path.abspath(__package__))+"/chess")
from chess.pieces.pawn import Pawn
from chess.pieces.bishop import Bishop
from chess.pieces.knight import Knight
from chess.pieces.rook import Rook
from chess.pieces.queen import Queen
from chess.pieces.king import King
from chess.position import Position
from chess.team import Team
from chess.check import *
chessBoard_init = [
[
Rook(Position(0, 0), Team.BLACK), Knight(Position(1, 0), Team.BLACK), Bishop(Position(2, 0), Team.BLACK), Queen(Position(3, 0), Team.BLACK), King(Position(4, 0), Team.BLACK), Bishop(Position(5, 0), Team.BLACK), Knight(Position(6, 0), Team.BLACK), Rook(Position(7, 0), Team.BLACK)
],
[
Pawn(Position(x, 1), Team.BLACK) for x in range(8)
],
[None for _ in range(8)],
[None for _ in range(8)],
[None for _ in range(8)],
[None for _ in range(8)],
[
Pawn(Position(x, 6), Team.WHITE) for x in range(8)
],
[
Rook(Position(0, 7), Team.WHITE), Knight(Position(1, 7), Team.WHITE), Bishop(Position(2, 7), Team.WHITE), Queen(Position(3, 7), Team.WHITE), King(Position(4, 7), Team.WHITE), Bishop(Position(5, 7), Team.WHITE), Knight(Position(6, 7), Team.WHITE), Rook(Position(7, 7), Team.WHITE)
],
]
checkBoard_init = fillCheckBoard(chessBoard_init, Team.WHITE)[0]
chessBoard_checkMate = [
[
Rook(Position(0, 0), Team.BLACK), Knight(Position(1, 0), Team.BLACK), Bishop(Position(2, 0), Team.BLACK), None, King(Position(4, 0), Team.BLACK), Bishop(Position(5, 0), Team.BLACK), Knight(Position(6, 0), Team.BLACK), Rook(Position(7, 0), Team.BLACK)
],
[
Pawn(Position(x, 1), Team.BLACK) if x != 4 else None for x in range(8)
],
[None if x != 4 else Pawn(Position(x, 2), Team.BLACK) for x in range(8)],
[None for _ in range(8)],
[None if x != 6 else Pawn(Position(x, 4), Team.WHITE) for x in range(7)] + [Queen(Position(7, 4), Team.BLACK)],
[None if x != 5 else Pawn(Position(x, 5), Team.WHITE) for x in range(8)],
[
Pawn(Position(x, 6), Team.WHITE) if x != 5 and x != 6 else None for x in range(8)
],
[
Rook(Position(0, 7), Team.WHITE), Knight(Position(1, 7), Team.WHITE), Bishop(Position(2, 7), Team.WHITE), Queen(Position(3, 7), Team.WHITE), King(Position(4, 7), Team.WHITE), Bishop(Position(5, 7), Team.WHITE), Knight(Position(6, 7), Team.WHITE), Rook(Position(7, 7), Team.WHITE)
],
]
checkBoard_checkMate = fillCheckBoard(chessBoard_checkMate, Team.WHITE)[0]
chessBoard_staleMate = [
[None for _ in range(8)],
[None for _ in range(8)],
[None for _ in range(8)],
[None for _ in range(8)],
[None for _ in range(8)],
[None if x != 0 else King(Position(x, 5), Team.BLACK) for x in range(8)],
[None for _ in range(8)],
[King(Position(0, 7), Team.WHITE), Bishop(Position(1, 7), Team.WHITE)] + [None for _ in range(5)] + [Rook(Position(7, 7), Team.BLACK)],
]
checkBoard_staleMate = fillCheckBoard(chessBoard_staleMate, Team.WHITE)[0]
chessBoard_check = [
[
Rook(Position(0, 0), Team.BLACK), Knight(Position(1, 0), Team.BLACK), Bishop(Position(2, 0), Team.BLACK), None, King(Position(4, 0), Team.BLACK), Bishop(Position(5, 0), Team.BLACK), Knight(Position(6, 0), Team.BLACK), Rook(Position(7, 0), Team.BLACK)
],
[
Pawn(Position(x, 1), Team.BLACK) if x != 3 else None for x in range(8)
],
[None for _ in range(8)],
[None for _ in range(8)],
[None for _ in range(3)] + [Pawn(Position(4, 4), Team.WHITE), Queen(Position(5, 4), Team.BLACK)] + [None for _ in range(3)],
[None for _ in range(8)],
[
Pawn(Position(x, 6), Team.WHITE) if x != 3 and x != 4 else None for x in range(8)
],
[
Rook(Position(0, 7), Team.WHITE), Knight(Position(1, 7), Team.WHITE), Bishop(Position(2, 7), Team.WHITE), Queen(Position(3, 7), Team.WHITE), King(Position(4, 7), Team.WHITE), Bishop(Position(5, 7), Team.WHITE), Knight(Position(6, 7), Team.WHITE), Rook(Position(7, 7), Team.WHITE)
],
]
checkBoard_check = fillCheckBoard(chessBoard_check, Team.WHITE)[0]
chessBoard_promotion = [
[None for _ in range(8)],
[None for _ in range(8)],
[None for _ in range(7)] + [Pawn(Position(7, 2), Team.WHITE)],
[None for _ in range(8)],
[None for _ in range(8)],
[None for _ in range(8)],
[None for _ in range(8)],
[None for _ in range(8)],
]
pawn_board = [[Pawn(Position(4, 4), Team.WHITE) if i == 4 and j == 4 else None for j in range(8)] for i in range(8)]
rook_board = [[Rook(Position(4, 4), Team.WHITE) if i == 4 and j == 4 else None for j in range(8)] for i in range(8)]
bishop_board = [[Bishop(Position(4, 4), Team.WHITE) if i == 4 and j == 4 else None for j in range(8)] for i in range(8)]
knight_board = [[Knight(Position(4, 4), Team.WHITE) if i == 4 and j == 4 else None for j in range(8)] for i in range(8)]
queen_board = [[Queen(Position(4, 4), Team.WHITE) if i == 4 and j == 4 else None for j in range(8)] for i in range(8)]
|
[
"hsj9830@kookmin.ac.kr"
] |
hsj9830@kookmin.ac.kr
|
c9cae191ed82b398a32e7bbb77325f3110605fe2
|
1c099518cfa5843928763854e6c231d435fc25f4
|
/deepfake/feature_trainer.py
|
792d8d588a90208ba85107a71b5c7f82eb5ecf4d
|
[
"CC-BY-2.0"
] |
permissive
|
poke53280/ml_mercari
|
b3cdda6d53fc4e0f2fca93d9a0ea0231f205ad69
|
f01ff6c1ca3f341e57c769e06abb136a044c9f74
|
refs/heads/master
| 2021-06-02T06:21:57.211262
| 2020-10-11T12:08:58
| 2020-10-11T12:08:58
| 114,643,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,039
|
py
|
import numpy as np
from mp4_frames import get_output_dir
from mp4_frames import get_ready_data_dir
from featureline import get_feature_converter
from featureline import is_error_line
import pandas as pd
from random import shuffle
####################################################################################
#
# create_test_merge
#
def create_test_merge(iPartMin, iPartMax):
assert iPartMax > iPartMin
l_test_parts = list (range(iPartMin, iPartMax))
num_length = 32
input_dir = get_output_dir()
assert input_dir.is_dir()
output_dir = get_ready_data_dir()
assert output_dir.is_dir()
d_f = get_feature_converter()
l_files = list (input_dir.iterdir())
l_files = [x for x in l_files if x.suffix == '.npy']
l_data_test = {}
for zFeature in list (d_f.keys()):
l_data_test[zFeature] = []
l_iPart = []
l_zVideo = []
l_y = []
for x in l_files:
l_x = str(x.stem).split("_")
isTestFile = (len (l_x) == 6) and (l_x[1] == 'Test')
if isTestFile:
pass
else:
continue
iPart = int (l_x[3])
video = l_x[4]
y = l_x[5]
isCollect = (iPart in l_test_parts)
if isCollect:
pass
else:
continue
data = np.load(x)
if is_error_line(data):
continue
anFeature = data[:, 0]
data = data[:, 1:]
data = data.reshape(-1, num_length, 3)
num_rows = data.shape[0]
assert num_rows % len (d_f.keys()) == 0
num_rows_per_feature = num_rows // len (d_f.keys())
l_iPart.extend([iPart] * num_rows_per_feature)
l_zVideo.extend([video] * num_rows_per_feature)
l_y.extend([y] * num_rows_per_feature)
for zFeature in list (d_f.keys()):
iF = d_f[zFeature]
m_correct_feature = (anFeature == iF)
l_data_test[zFeature].append(data[m_correct_feature])
assert data[m_correct_feature].shape[0] == num_rows_per_feature
num_meta = len (l_iPart)
for zFeature in list (d_f.keys()):
if len (l_data_test[zFeature]) > 0:
anDataTest = np.concatenate(l_data_test[zFeature])
assert anDataTest.shape[0] == num_meta
np.save(output_dir / f"test_{zFeature}_p_{iPartMin}_p_{iPartMax}.npy", anDataTest)
else:
print(f"No data: test_{zFeature}_p_{iPartMin}_p_{iPartMax}")
df_meta = pd.DataFrame({'iPart' : l_iPart, 'video': l_zVideo, 'y': l_y})
df_meta.to_pickle(output_dir / f"test_meta_p_{iPartMin}_p_{iPartMax}.pkl")
####################################################################################
#
# create_train_merge
#
def create_train_merge(iPartMin, iPartMax):
assert iPartMax > iPartMin
l_train_parts = list (range(iPartMin, iPartMax))
num_length = 32
input_dir = get_output_dir()
assert input_dir.is_dir()
output_dir = get_ready_data_dir()
assert output_dir.is_dir()
d_f = get_feature_converter()
l_files = list (input_dir.iterdir())
l_files = [x for x in l_files if x.suffix == '.npy']
l_data_train = {}
for zFeature in list (d_f.keys()):
l_data_train[zFeature] = []
for x in l_files:
l_x = str(x.stem).split("_")
isTrainFile = (len (l_x) == 6) and (l_x[1] == 'Pair')
if isTrainFile:
pass
else:
continue
iPart = int (l_x[3])
original = l_x[4]
fake = l_x[5]
isCollect = (iPart in l_train_parts)
if isCollect:
pass
else:
continue
data = np.load(x)
if is_error_line(data):
continue
anFeature = data[:, 0]
data = data[:, 1:]
data = data.reshape(-1, num_length * 2, 3)
for zFeature in list (d_f.keys()):
iF = d_f[zFeature]
m_correct_feature = (anFeature == iF)
l_data_train[zFeature].append(data[m_correct_feature])
for zFeature in list (d_f.keys()):
if len (l_data_train[zFeature]) > 0:
anDataTrain = np.concatenate(l_data_train[zFeature])
np.save(output_dir / f"train_{zFeature}_p_{iPartMin}_p_{iPartMax}.npy", anDataTrain)
####################################################################################
#
# create_train_merge_chunks
#
def create_train_merge_chunks(iPartMin, iPartMax):
assert iPartMax > iPartMin
l_Parts = list (range(iPartMin, iPartMax))
for iPart in l_Parts:
create_train_merge(iPart, iPart + 1)
####################################################################################
#
# create_test_merge_chunks
#
def create_test_merge_chunks(iPartMin, iPartMax):
assert iPartMax > iPartMin
l_Parts = list (range(iPartMin, iPartMax))
for iPart in l_Parts:
create_test_merge(iPart, iPart + 1)
####################################################################################
#
# create_train_chunks
#
def create_train_chunks(iPartMin, iPartMax, nGBInternal):
assert iPartMax > iPartMin
assert nGBInternal > 5
data_dir = get_ready_data_dir()
l_files = list (data_dir.iterdir())
l_files_out = []
for x in l_files:
l_x = str(x.stem).split("_")
if len(l_x) != 7:
continue
if l_x[0] != 'train':
continue
iMin = int (l_x[4])
iMax = int (l_x[6])
assert iMax > iMin
if (iMin >= iPartMin) and (iMax <= iPartMax):
pass
else:
continue
l_files_out.append(x)
shuffle(l_files_out)
size_row_bytes = 64 * 3 * 4
size_internal_bytes = nGBInternal * 1024 * 1024 * 1024
max_internal_rows = int (size_internal_bytes / size_row_bytes)
max_out_rows = 1000000
l_data = []
num_rows_internal = 0
iFile = 0
for idx, x in enumerate(l_files_out):
isLastFile = (idx == (len(l_files_out) -1))
print(f"loading {x}...")
anData = np.load(x)
assert anData.shape[0] <= max_internal_rows, "single file exceeds internal buffer size"
num_rows_internal = num_rows_internal + anData.shape[0]
l_data.append(anData.copy())
if isLastFile or (num_rows_internal > max_internal_rows):
print(f"Writing out. {num_rows_internal} > {max_internal_rows} or last file")
anData = np.concatenate(l_data)
np.random.shuffle(anData)
num_rows_out = anData.shape[0]
num_chunks = int (1 + num_rows_out / max_out_rows)
print(f" Writing out. {num_rows_out} lines in {num_chunks} chunks")
l_data = np.array_split(anData, num_chunks)
for data_chunk in l_data:
file_out = data_dir / f"tr_{iPartMin}_{iPartMax}_{iFile:04}.npy"
np.save(file_out, data_chunk)
print(f" saved chunk with {data_chunk.shape[0]} lines")
iFile = iFile + 1
l_data = []
num_rows_internal = 0
####################################################################################
#
# _get_meta_file
#
def _get_meta_file(iMin, iMax):
data_dir = get_ready_data_dir()
filename = data_dir / f"test_meta_p_{iMin}_p_{iMax}.pkl"
return filename
####################################################################################
#
# create_test_video_chunks
#
def create_test_video_chunks(iPartMin, iPartMax):
assert iPartMax > iPartMin
data_dir = get_ready_data_dir()
l_files = list (data_dir.iterdir())
l_files_out = []
for x in l_files:
l_x = str(x.stem).split("_")
if len(l_x) != 7:
continue
if l_x[0] != 'test':
continue
if l_x[1] == 'meta':
continue
iMin = int (l_x[4])
iMax = int (l_x[6])
assert iMax > iMin
if (iMin >= iPartMin) and (iMax <= iPartMax):
pass
else:
continue
metafile = _get_meta_file(iMin, iMax)
if metafile.is_file():
pass
else:
continue
l_files_out.append((x, metafile))
"""c"""
l_test = []
l_meta = []
for x in l_files_out:
anTest = np.load(x[0])
df_meta = pd.read_pickle(x[1])
assert anTest.shape[0] == df_meta.shape[0]
l_test.append(anTest)
l_meta.append(df_meta)
anTest = np.concatenate(l_test)
df_meta = pd.concat(l_meta, ignore_index = True)
z_video = df_meta.iPart.astype('str') + "_" + df_meta.video
azVideo = np.unique(z_video)
for ix, x in enumerate(azVideo):
m = z_video == x
anVideoData = anTest[m]
zRealFake = df_meta[m].y.iloc[0]
zOut = data_dir / f"te_{iPartMin}_{iPartMax}_{ix:04}_{zRealFake}"
np.save(zOut, anVideoData)
|
[
"anders.topper@gmail.com"
] |
anders.topper@gmail.com
|
d55e1cacb5ece1533562af42c297ae0b015d4026
|
f49e474bfc8a605d6b1afe3fbc020e976fadbca4
|
/KWS/TestPrograms/test_vad_test.py
|
ffae8969597b5e6e975a5569b98232ab9fee26c2
|
[
"MIT"
] |
permissive
|
xrick/gotek_smic
|
1b3100f38253cb06eae9eb4435c176168cc71cc6
|
7655b6d7415b23c35810b8db48af7424f7dcdb06
|
refs/heads/master
| 2021-01-14T16:51:22.331707
| 2020-09-15T08:11:08
| 2020-09-15T08:11:08
| 242,686,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,919
|
py
|
import numpy as np
from numpy.linalg import norm
import scipy.io.wavfile as wavio
def vad_test(s, fs):
s = s - np.amin(s)
s = s / np.amax(s)
FrameSize = int(fs * 0.025) #400
ShiftSize = int(fs * 0.010) #160
Overlap = FrameSize - ShiftSize #240
threshold = -1.9
s_temp = []
temp = []
temp_all = []
new = []
rest_s = []
t = s
n = np.floor((len(s) - FrameSize) / ShiftSize) #97
loop_size = int(ShiftSize * n + FrameSize) #15920
norm_t = norm(t, 2) #115.2325447
for i in range(FrameSize, loop_size, ShiftSize):
temp = np.log(norm(t[i - FrameSize:i], 2) / norm_t + 0.00001)
# temp_all = np.insert(temp_all, temp)#[temp_all, temp]
temp_all = np.hstack((temp_all, temp))
if temp > threshold:
# new = [new, 1 * np.ones(ShiftSize, 1)]
new = np.hstack((new, 1 * np.ones(ShiftSize)))
else:
# new = [new, 0 * np.ones(ShiftSize, 1)]
new = np.hstack((new, 0 * np.ones(ShiftSize)))
# for i in range(ShiftSize * n + FrameSize):
# for i in range(loop_size): #15920
s_temp = np.array(s)
end = len(new)#len(s_temp)
s_temp = s_temp[0:end ]#s_temp[0:(end - Overlap)]
new_s = np.transpose(new) * s_temp
for j in range(len(new)):
if new[j] == 1:
rest_s = np.hstack((rest_s, new_s[j]))
# rest_s = np.insert(rest_s, new_s[j])
return rest_s
if __name__ == "__main__":
# read test file
test_wav = "../../Speech_DataSets/whole_keyword_clean_second_run_1429/0b40aa8e_nohash_0y4s6_1.wav"
save_wav = "../../Speech_DataSets/whole_keyword_clean_second_run_1429/silence_removed/reduced_0b40aa8e_nohash_0y4s6_1.wav"
fs, sig = wavio.read(test_wav)
processed_sig = vad_test(sig, fs)
print("original signal length is {}".format(sig.shape))
print("processed signal length is {}".format(processed_sig.shape))
|
[
"noreply@github.com"
] |
noreply@github.com
|
f3b2d326186a171ad037c7d0921726cd3589a4da
|
d51ae5ad901442f117f68cbac61551fed4e6b89d
|
/article/migrations/0013_auto_20200402_1207.py
|
7bcd04b865bff3c0252dce15107f397ba349af4e
|
[] |
no_license
|
wmc-wmc/wmc-ex_2
|
933717da29ef830d81eda1540ac3551d7f131e32
|
eea9ead0f66bd68167f46f796d154733b6b28dbc
|
refs/heads/master
| 2021-02-13T00:41:14.541497
| 2020-12-24T00:35:27
| 2020-12-24T00:35:27
| 244,646,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
# Generated by Django 2.2 on 2020-04-02 12:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('article', '0012_auto_20200402_1205'),
]
operations = [
migrations.AlterField(
model_name='text',
name='tag',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='article.Tag'),
),
]
|
[
"ting@TingdeMacBook-Pro.local"
] |
ting@TingdeMacBook-Pro.local
|
d61e4fd8b851cc4502bc8ecd7ef7e7bfa986eba9
|
408ac5f3ca1bd31f3f0aad22a7a96f800b8bd197
|
/main.py
|
615df94c4c5beacdd6fc0eabd2a923dc5e089ddc
|
[
"MIT"
] |
permissive
|
HandsomeBrotherShuaiLi/ICM
|
8bedc9df2320374f82f06cf6acd2d1e00a4e9b6d
|
81218c52e83760f7a0f0b98677be138ccf5f99f1
|
refs/heads/master
| 2022-06-02T05:57:34.428872
| 2020-05-02T04:51:04
| 2020-05-02T04:51:04
| 240,472,354
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,117
|
py
|
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
import tqdm,json,math
from collections import defaultdict
match_path='2020_Problem_D_DATA/matches.csv'
passings='2020_Problem_D_DATA/passingevents.csv'
fullevents='2020_Problem_D_DATA/fullevents.csv'
def draw_graph():
fp=pd.read_csv(passings)
matchID=list(set(fp['MatchID']))
for id in tqdm.tqdm(matchID,total=len(matchID)):
match_passing=fp[fp.MatchID==id]
all_team_id=set(match_passing['TeamID'])
for team_id in all_team_id:
graph=nx.DiGraph()
sub_matching=match_passing[match_passing.TeamID==team_id]
edges_label={}
for index in sub_matching.index:
team_id=match_passing.loc[index,'TeamID']
original_coord=(match_passing.loc[index,'EventOrigin_x'],match_passing.loc[index,'EventOrigin_y'])
dst_coord=(match_passing.loc[index,'EventDestination_x'],match_passing.loc[index,'EventDestination_y'])
original_id=match_passing.loc[index,'OriginPlayerID']
dst_id=match_passing.loc[index,'DestinationPlayerID']
match_period=match_passing.loc[index,'MatchPeriod']
match_time=match_passing.loc[index,'EventTime']
graph.add_nodes_from([
(original_id,{'id':original_id,'coord':original_coord,'team_id':team_id,'match_time':match_time,'match_period':match_period}),
(dst_id,{'id':dst_id,'coord':dst_coord,'team_id':team_id,'match_time:':match_time,'match_period':match_period})
])
edges_label[(original_id,dst_id)]="{}".format(match_time)
graph.add_edge(original_id,dst_id)
nx.draw(graph,pos=nx.spring_layout(graph),with_labels=True,font_color='black',node_color='pink',
font_size=7)
# nx.draw_networkx_edge_labels(graph,pos=nx.spring_layout(graph),edge_labels=edges_label,font_size=3,font_weight='bold',
# font_color='green')
plt.title('macthID_{}_teamID_{} passing graphs'.format(id,team_id))
plt.savefig('passing_graphs/macthid_{}_teamid_{}_passing_graphs.png'.format(id,team_id),dpi=200)
plt.close()
graph.clear()
def draw_full_events_ball_count(draw_pic=False,analysis=True):
fp=pd.read_csv(fullevents)
match_ID=set(fp['MatchID'])
mapping={}
plt.rc('font',family='Times New Roman')
ax=plt.gca()
ax.spines['right'].set_color("none")
ax.spines['top'].set_color("none")
f=open('pattern_report/all_match_count.txt','w',encoding='utf-8')
for id in tqdm.tqdm(match_ID,total=len(match_ID)):
match_ball_graph=fp[fp.MatchID==id]
all_team_id=set(match_ball_graph['TeamID'])
if len(list(all_team_id))!=2:
raise ValueError('team error!')
ball_passing_coord=[]
ball_control_player=[]
ball_control_time=[]
for c,idx in enumerate(match_ball_graph.index):
teamID=match_ball_graph.loc[idx,'TeamID']
time=match_ball_graph.loc[idx,'EventTime']
original_player_id=match_ball_graph.loc[idx,'OriginPlayerID']
dstplayer_id=match_ball_graph.loc[idx,'DestinationPlayerID']
original_x=match_ball_graph.loc[idx,'EventOrigin_x']
original_y=match_ball_graph.loc[idx,'EventOrigin_y']
dst_x=match_ball_graph.loc[idx,'EventDestination_x']
dst_y=match_ball_graph.loc[idx,'EventDestination_y']
if pd.isna(dst_x) or pd.isna(dst_y) or pd.isna(original_x) or pd.isna(original_y):
continue
if teamID.startswith('Opponent'):
original_x=100-original_x
original_y=100-original_y
dst_x=100-dst_x
dst_y=100-dst_y
original_x,original_y,dst_x,dst_y=int(original_x),int(original_y),int(dst_x),int(dst_y)
if c==0:
ball_passing_coord.append((original_x,original_y,dst_x,dst_y))
ball_control_time.append(time)
ball_control_player.append([original_player_id])
else:
if (original_x,original_y,dst_x,dst_y)==ball_passing_coord[-1]:
ball_control_player[-1].append(original_player_id)
else:
if (original_x,original_y)==(ball_passing_coord[-1][2],ball_passing_coord[-1][3]):
ball_passing_coord.append((original_x,original_y,dst_x,dst_y))
ball_control_time.append(time)
ball_control_player.append([original_player_id])
else:
original_x,original_y=ball_passing_coord[-1][2],ball_passing_coord[-1][3]
if (original_x,original_y,dst_x,dst_y)==ball_passing_coord[-1]:
ball_control_player[-1].append(original_player_id)
else:
ball_passing_coord.append((original_x,original_y,dst_x,dst_y))
ball_control_time.append(time)
ball_control_player.append([original_player_id])
assert len(ball_passing_coord)==len(ball_control_player)
assert len(ball_control_player)==len(ball_control_time)
mapping[id]=[ball_control_player,ball_passing_coord,ball_control_time]
if draw_pic:
last_dst=None
linesw=0.05
for i in range(len(ball_control_player)):
players=ball_control_player[i]
coord=[j*10000 for j in ball_passing_coord[i]]
teams=list(set([j.split('_')[0] for j in players]))
if len(teams)==1 and teams[0]=='Huskies':
if last_dst==None:
plt.scatter(x=[coord[0],coord[2]],y=[coord[1],coord[3]],
c='red',marker='^',linewidths=linesw)
plt.quiver(coord[0],coord[1],coord[2],coord[3],color='g', width=0.0005)
else:
plt.scatter(x=[last_dst[0],coord[2]],y=[last_dst[1],coord[3]],c='red',
marker='^',linewidths=linesw)
plt.quiver(last_dst[0],last_dst[1],coord[2],coord[3],color='g', width=0.0005)
elif len(teams)==1 and teams[0].startswith('Opponent'):
if last_dst==None:
plt.scatter(x=[coord[0],coord[2]],y=[coord[1],coord[3]],
c='blue',marker='o',linewidths=linesw)
plt.quiver(coord[0],coord[1],coord[2],coord[3],color='g', width=0.0005)
else:
plt.scatter(x=[last_dst[0],coord[2]],y=[last_dst[1],coord[3]],c='blue',
marker='o',linewidths=linesw)
plt.quiver(last_dst[0],last_dst[1],coord[2],coord[3],color='g', width=0.0005)
else:
if last_dst==None:
plt.scatter(x=[coord[0],coord[2]],y=[coord[1],coord[3]],
c='black',marker='o',linewidths=linesw)
plt.quiver(coord[0],coord[1],coord[2],coord[3],color='g', width=0.0005)
else:
plt.scatter(x=[last_dst[0],coord[2]],y=[last_dst[1],coord[3]],c='black',
marker='o',linewidths=linesw)
plt.quiver(last_dst[0],last_dst[1],coord[2],coord[3],color='g', width=0.0005)
last_dst=(coord[2],coord[3])
plt.savefig('ball_graphs/{}_full_events.png'.format(id),dpi=300)
plt.close()
if analysis:
report=open('pattern_report/matchid_{}.txt'.format(id),'w',encoding='utf-8')
i=0#quick point
j=0#slow point
count=defaultdict(int)
while i<len(ball_control_time):
teams_i=set([p.split('_')[0] for p in ball_control_player[i]])
teams_j=set([p.split('_')[0] for p in ball_control_player[j]])
if teams_i==teams_j:
i+=1
else:
teams_j=list(teams_j)
teams_i=list(teams_i)
time_cut=(ball_control_time[j],ball_control_time[i])
temp_p=[]
for play_t in ball_control_player[j:i]:
temp_p+=play_t
temp_p=list(set(temp_p))
if len(temp_p)==3:
patt='triadic configuration'
elif len(temp_p)<=2:
patt='dyadic'
else:
patt='team formations'
if len(teams_i)==1 and len(teams_j)==1:
report.write('From time: {} to time: {}, {} team takes {}, with players:{}\n'.format(
time_cut[0],time_cut[1],teams_j[0],patt,temp_p
))
count[teams_j[0]+'_'+patt]+=1
j=i
i+=1
elif len(teams_i)>1 and len(teams_j)==1:
report.write('From time: {} to time: {}, {} team took {}, with players:{}, and teams:{} started to dual \n'.format(
time_cut[0],time_cut[1],teams_j[0],patt,temp_p,teams_i
))
count[teams_j[0]+'_'+patt]+=1
j=i
i+=1
elif len(teams_i)==1 and len(teams_j)>1:
ts=[z for z in teams_j if z not in teams_i]
report.write('From time: {} to time: {}, {} team took {}, with players:{}, and '
'teams:{} started to take control of the ball by player:{} \n'.format(
time_cut[0],time_cut[1],teams_i[0],patt,temp_p,ts[0],ball_control_player[i]
))
count[teams_j[0]+'_'+patt]+=1
j=i
i+=1
else:
report.write('From time: {} to time: {}, teams:{} were always dualing\n'.format(
time_cut[0],time_cut[1],teams_j
))
count[teams_j[0]+'_'+patt]+=1
j=i
i+=1
report.close()
f.write('*'*20+'Match:{}'.format(id)+'*'*20+'\n')
for k in count:
f.write("{}:{}\n".format(k,count[k]))
json.dump(mapping,open('all_match_mapping.json','w',encoding='utf-8'))
f.close()
def conduct_new_passing_tables():
fp=pd.read_csv(passings)
match_id=list(set(fp['MatchID']))
for id in tqdm.tqdm(match_id,total=len(match_id)):
match_passing=fp[fp.MatchID==id]
node=pd.DataFrame()
edge=pd.DataFrame()
for c,idx in enumerate(match_passing.index):
original_x=match_passing.loc[idx,'EventOrigin_x']
original_y=match_passing.loc[idx,'EventOrigin_y']
dst_x=match_passing.loc[idx,'EventDestination_x']
dst_y=match_passing.loc[idx,'EventDestination_y']
if match_passing.loc[idx,'TeamID'].startswith('Opponent'):
original_x=100-original_x
original_y=100-original_y
dst_x=100-dst_x
dst_y=100-dst_y
original_x,original_y,dst_x,dst_y=int(original_x),int(original_y),int(dst_x),int(dst_y)
node=node.append({'Id':int(c),'Label':match_passing.loc[idx,'OriginPlayerID']},
ignore_index=True)
edge=edge.append({
'Source':match_passing.loc[idx,'OriginPlayerID'],
'Target':match_passing.loc[idx,'DestinationPlayerID'],
'EventTime':round(float(match_passing.loc[idx,'EventTime']),2),
'EventSubType':match_passing.loc[idx,'EventSubType'],
'MatchPeriod':match_passing.loc[idx,'MatchPeriod'].strip('H'),
'EventOrigin_x':original_x,
'EventOrigin_y':original_y,
'EventDestination_x':dst_x,
'EventDestination_y':dst_y,
'Distance':round(math.sqrt((original_x-dst_x)**2+(original_y-dst_y)**2),2)
},ignore_index=True)
node.to_csv('new_passing_tables/passingevents_{}_node.csv'.format(id),index=False)
edge.to_csv('new_passing_tables/passingevents_{}_edge.csv'.format(id),index=False)
def conduct_degree():
fp=pd.read_csv(passings)
match_ID=set(fp['MatchID'])
all_match_degree_in=defaultdict(int)
all_match_degree_out=defaultdict(int)
all_match_degree={}
result=open('all_match_in_out_degree.txt','w',encoding='utf-8')
for id in tqdm.tqdm(match_ID,total=len(match_ID)):
Ha_degree_in=defaultdict(int)
Ha_degree_out=defaultdict(int)
Ha_degree_all={}
Oppo_degree_in=defaultdict(int)
Oppo_degree_out=defaultdict(int)
Oppo_degree_all={}
match_single=fp[fp.MatchID==id]
for idx in match_single.index:
if not pd.isna(match_single.loc[idx,'OriginPlayerID']) and match_single.loc[idx,'OriginPlayerID'].startswith('Huskies'):
Ha_degree_out[match_single.loc[idx,'OriginPlayerID']]+=1
all_match_degree_out[match_single.loc[idx,'OriginPlayerID']]+=1
if not pd.isna(match_single.loc[idx,'DestinationPlayerID']) and match_single.loc[idx,'DestinationPlayerID'].startswith('Huskies'):
Ha_degree_in[match_single.loc[idx,'DestinationPlayerID']]+=1
all_match_degree_in[match_single.loc[idx,'DestinationPlayerID']]+=1
if not pd.isna(match_single.loc[idx,'OriginPlayerID']) and match_single.loc[idx,'OriginPlayerID'].startswith('Opponent'):
Oppo_degree_out[match_single.loc[idx,'OriginPlayerID']]+=1
if not pd.isna(match_single.loc[idx,'DestinationPlayerID']) and match_single.loc[idx,'DestinationPlayerID'].startswith('Opponent'):
Oppo_degree_in[match_single.loc[idx,'DestinationPlayerID']]+=1
ha_player=set(list(Ha_degree_in.keys())+list(Ha_degree_out.keys()))
oppo_player=set(list(Oppo_degree_in.keys())+list(Oppo_degree_out.keys()))
for h_p in ha_player:
if h_p in Ha_degree_out.keys() and h_p in Ha_degree_out.keys():
Ha_degree_all[h_p]=Ha_degree_in[h_p]+Ha_degree_out[h_p]
elif h_p in Ha_degree_out.keys():
Ha_degree_all[h_p]=Ha_degree_out[h_p]
elif h_p in Ha_degree_in.keys():
Ha_degree_all[h_p]=Ha_degree_in[h_p]
else:
pass
for o_p in oppo_player:
if o_p in Oppo_degree_in.keys() and o_p in Oppo_degree_out.keys():
Oppo_degree_all[o_p]=Oppo_degree_in[o_p]+Oppo_degree_out[o_p]
elif o_p in Oppo_degree_out.keys():
Oppo_degree_all[o_p]=Oppo_degree_out[o_p]
elif o_p in Oppo_degree_in.keys():
Oppo_degree_all[h_p]=Oppo_degree_in[h_p]
else:
pass
ha_out=sorted(Ha_degree_out.items(),key=lambda item:item[1],reverse=True)
ha_in=sorted(Ha_degree_in.items(),key=lambda item:item[1],reverse=True)
oppo_out=sorted(Oppo_degree_out.items(),key=lambda item:item[1],reverse=True)
oppo_in=sorted(Oppo_degree_in.items(),key=lambda item:item[1],reverse=True)
oppo_all=sorted(Oppo_degree_all.items(),key=lambda item:item[1],reverse=True)
ha_all=sorted(Ha_degree_all.items(),key=lambda item:item[1],reverse=True)
result.write('*'*20+'第{}场比赛哈士奇队出度:'.format(id)+'*'*20+'\n')
for i in ha_out:
result.write("{}:{}\n".format(i[0],i[1]))
result.write('*'*20+'第{}场比赛哈士奇队入度:'.format(id)+'*'*20+'\n')
for i in ha_in:
result.write('{}:{}\n'.format(i[0],i[1]))
result.write('*'*20+'第{}场比赛哈士奇队总度:'.format(id)+'*'*20+'\n')
for i in ha_all:
result.write('{}:{}\n'.format(i[0],i[1]))
result.write('*'*20+'第{}场比赛反方出度:'.format(id)+'*'*20+'\n')
for i in oppo_out:
result.write('{}:{}\n'.format(i[0],i[1]))
result.write('*'*20+'第{}场比赛反方入度:'.format(id)+'*'*20+'\n')
for i in oppo_in:
result.write('{}:{}\n'.format(i[0],i[1]))
result.write('*'*20+'第{}场比赛反方总度:'.format(id)+'*'*20+'\n')
for i in oppo_all:
result.write('{}:{}\n'.format(i[0],i[1]))
ha_player=set(list(all_match_degree_in.keys())+list(all_match_degree_out.keys()))
for p in ha_player:
if p in all_match_degree_out.keys() and p in all_match_degree_in.keys():
all_match_degree[p]=all_match_degree_in[p]+all_match_degree_out[p]
elif p in all_match_degree_out.keys():
all_match_degree[p]=all_match_degree_out[p]
elif p in all_match_degree_in.keys():
all_match_degree[p]=all_match_degree_in[p]
else:
pass
all_match_degree_in=sorted(all_match_degree_in.items(),key=lambda item:item[1],reverse=True)
all_match_degree_out=sorted(all_match_degree_out.items(),key=lambda item:item[1],reverse=True)
all_match_degree=sorted(all_match_degree.items(),key=lambda item:item[1],reverse=True)
result.write('*'*20+'哈士奇队总的出度:'+'*'*20+'\n')
for i in all_match_degree_out:
result.write('{}:{}\n'.format(i[0],i[1]))
result.write('*'*20+'哈士奇队总的入度:'+'*'*20+'\n')
for i in all_match_degree_in:
result.write('{}:{}\n'.format(i[0],i[1]))
result.write('*'*20+'哈士奇队总度数:'+'*'*20+'\n')
for i in all_match_degree:
result.write('{}:{}\n'.format(i[0],i[1]))
if __name__=='__main__':
conduct_degree()
|
[
"shuai.li@shopee.com"
] |
shuai.li@shopee.com
|
1f329583d0700cf40723e268122106387cae7255
|
31a60b44e078fa75033c4deb8528c2da9726a370
|
/run_files/191012_regularizer_comp_toy.py
|
2fa0dbccf291c278969dfc082ea0e8ba0d5509a6
|
[] |
no_license
|
janmaltel/stew
|
85f8d4592ab1c06ced21c99a8ea480c679562f8b
|
a1ed53499c6ace8a92a83b959680f89cf95cf5b7
|
refs/heads/master
| 2021-12-29T16:39:38.138520
| 2021-12-15T13:35:04
| 2021-12-15T13:35:04
| 157,102,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,544
|
py
|
import numpy as np
import stew.example_data as create
import stew.mlogit as mlogit
import stew.utils as utils
import matplotlib.pyplot as plt
from stew.utils import create_diff_matrix
from sklearn.linear_model import LinearRegression
from stew.regression import *
import matplotlib.pyplot as plt
import os
from datetime import datetime
# from stew.regression import LinearRegressionTorch
time_id = datetime.now().strftime('%Y_%m_%d_%H_%M');
name_id = "_reg_path_comp"
run_id = time_id + name_id
run_id_path = os.path.join("/Users/malte/Dropbox/projects/ozgur/shrinkage toward noncompensatory weights/figures/", run_id)
if not os.path.exists(run_id_path):
os.makedirs(run_id_path)
os.makedirs(os.path.join(run_id_path, "positive_weights"))
num_samples = 500
noise_scale = 1
beta = np.array([1, -1.2, 1.5, -0.3, 0.5])
num_features = len(beta)
epochs = 2000
# Torch params
learning_rate = 0.002
np.random.seed(1)
torch.manual_seed(1)
X, y = create.regression_example_data(num_samples=num_samples,
num_features=num_features,
noise_scale=noise_scale,
beta=beta)
regularizers = np.array(["stew2", "stem2", "stow", "stnw", "sted"])
regularizer_names = np.array(["Shrinkage toward equal weights",
"Shrinkage toward equal weight magnitudes",
"Shrinkage toward ordered weights",
"Shrinkage toward noncompensatory weights",
"Shrinkage toward exponentially decaying weights"])
# regularizers = np.array(["sted"])
# regularizer_names = np.array(["Shrinkage toward exponentially decaying weights"])
num_regularizers = len(regularizers)
lambda_min = -3
lambda_max = 1.8
num_lambdas = 40
# lams = np.insert(np.logspace(lambda_min, lambda_max, num=num_lambdas-1), 0, 0.0)
lams = np.logspace(lambda_min, lambda_max, num=num_lambdas)
weight_storage = np.zeros((num_regularizers, num_lambdas, num_features))
for reg_ix, regularizer in enumerate(regularizers): # regularizer = "stew"
print(reg_ix, regularizer)
for lam_ix, lam in enumerate(lams): # lam = 0.1
lin_reg_torch = LinearRegressionTorch(num_features=num_features,
learning_rate=learning_rate,
regularization=regularizer,
lam=lam)
betas = lin_reg_torch.fit(X, y, epochs=epochs).detach().numpy()
weight_storage[reg_ix, lam_ix] = betas
# Plot
for reg_ix, regularizer in enumerate(regularizers):
fig1, ax1 = plt.subplots(figsize=(8, 6))
plt.title(regularizer_names[reg_ix])
for weight_ix in range(num_features):
ax1.plot(lams, weight_storage[reg_ix, :, weight_ix], label="beta_" + str(weight_ix+1))
plt.xscale("log")
plt.axhline(y=0, color="grey")
plt.legend()
fig1.show()
fig1.savefig(os.path.join(run_id_path, regularizer + ".pdf"), )
plt.close()
## On positive weights
beta = np.array([1, 0.8, 1.5, 0.3, 0.5])
X, y = create.regression_example_data(num_samples=num_samples,
num_features=num_features,
noise_scale=noise_scale,
beta=beta)
weight_storage = np.zeros((num_regularizers, num_lambdas, num_features))
for reg_ix, regularizer in enumerate(regularizers): # regularizer = "stew"
print(reg_ix, regularizer)
for lam_ix, lam in enumerate(lams): # lam = 0.1
lin_reg_torch = LinearRegressionTorch(num_features=num_features,
learning_rate=learning_rate,
regularization=regularizer,
positivity_constraint=True,
lam=lam)
betas = lin_reg_torch.fit(X, y, epochs=epochs).detach().numpy()
weight_storage[reg_ix, lam_ix] = betas
# Plot
for reg_ix, regularizer in enumerate(regularizers):
fig1, ax1 = plt.subplots(figsize=(5, 3.5))
plt.title(regularizer_names[reg_ix])
for weight_ix in range(num_features):
ax1.plot(lams, weight_storage[reg_ix, :, weight_ix], label="beta_" + str(weight_ix+1))
plt.xscale("log")
plt.axhline(y=0, color="grey")
plt.legend()
fig1.show()
fig1.savefig(os.path.join(run_id_path, "positive_weights", regularizer + ".pdf"))
plt.close()
|
[
"j.m.lichtenberg@bath.ac.uk"
] |
j.m.lichtenberg@bath.ac.uk
|
6dc7df348f7ef3111d18950993f5efc39de88ea9
|
41d95796e81289c87ef839ab78e74decb862d556
|
/Website_Scanner/general.py
|
eb69beec20a259e92191c11a442cab9ffbc7de01
|
[] |
no_license
|
Sliking/Redes
|
770446b5be0118f39af9860e8d6f78957a279478
|
32183dfe0abcb005b2c622c3878b0db8f2208cb7
|
refs/heads/master
| 2021-01-10T15:47:32.462242
| 2015-11-26T23:38:27
| 2015-11-26T23:38:27
| 45,213,119
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
import os
def create_dir(directory):
print("[DEBUG] Checking folder")
if not os.path.exists(directory):
os.makedirs(directory)
print("[DEBUG] Created folder -> DONE!")
def write_file(path, data):
print("[DEBUG] Writing in file")
f = open(path, 'w')
f.write(data)
f.close()
print("[DEBUG] Write -> DONE!")
|
[
"miguelpinto25@hotmail.com"
] |
miguelpinto25@hotmail.com
|
8a52bc396fcafcd7f2ed6b20d0b110a3e5a59648
|
1d60c5a7b8ce6277bff514e376f79848f706344c
|
/Data Scientist with Python - Career Track /22. Machine Learning with the Experts: School Budgets/02. Creating a simple first model/01. Setting up a train-test split in scikit-learn.py
|
09e603e05172de82530517858d1031747721ca01
|
[] |
no_license
|
DidiMilikina/DataCamp
|
338c6e6d3b4f5b6c541c1aba155a36e9ee24949d
|
3bf2cf3c1430190a7f8e54efda7d50a5fd66f244
|
refs/heads/master
| 2020-12-15T13:16:54.178967
| 2020-05-06T17:30:54
| 2020-05-06T17:30:54
| 235,113,616
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,016
|
py
|
'''
Setting up a train-test split in scikit-learn
Alright, you've been patient and awesome. It's finally time to start training models!
The first step is to split the data into a training set and a test set. Some labels don't occur very often, but we want to make sure that they appear in both the training and the test sets. We provide a function that will make sure at least min_count examples of each label appear in each split: multilabel_train_test_split.
Feel free to check out the full code for multilabel_train_test_split here.
You'll start with a simple model that uses just the numeric columns of your DataFrame when calling multilabel_train_test_split. The data has been read into a DataFrame df and a list consisting of just the numeric columns is available as NUMERIC_COLUMNS.
Instructions
100 XP
Create a new DataFrame named numeric_data_only by applying the .fillna(-1000) method to the numeric columns (available in the list NUMERIC_COLUMNS) of df.
Convert the labels (available in the list LABELS) to dummy variables. Save the result as label_dummies.
In the call to multilabel_train_test_split(), set the size of your test set to be 0.2. Use a seed of 123.
Fill in the .info() method calls for X_train, X_test, y_train, and y_test.
'''
SOLUTION
# Create the new DataFrame: numeric_data_only
numeric_data_only = df[NUMERIC_COLUMNS].fillna(-1000)
# Get labels and convert to dummy variables: label_dummies
label_dummies = pd.get_dummies(df[LABELS])
# Create training and test sets
X_train, X_test, y_train, y_test = multilabel_train_test_split(numeric_data_only,
label_dummies,
size=0.2,
seed=123)
# Print the info
print("X_train info:")
print(X_train.info())
print("\nX_test info:")
print(X_test.info())
print("\ny_train info:")
print(y_train.info())
print("\ny_test info:")
print(y_test.info())
|
[
"didimilikina8@gmail.com"
] |
didimilikina8@gmail.com
|
9b930250c80b39f856585160a5b1f150a3d9355a
|
6053cef7fc0b063a6105cd38659ba082ee706335
|
/tweettools/blockmute.py
|
945725ca153e6f977a12db922ae170e6fb90aabe
|
[
"MIT"
] |
permissive
|
jdidion/blockmute
|
18dd24535d75d6c8998a432a1a5b657a3e91b93f
|
05984da637206d2bc5c69d2f68b10a1df4f9985f
|
refs/heads/main
| 2021-01-19T19:52:16.657531
| 2018-04-29T01:20:39
| 2018-04-29T01:20:39
| 101,212,612
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,470
|
py
|
#!/usr/bin/env python
# Block everyone you've muted, and vice-versa.
from argparse import ArgumentParser
import time
from tqdm import tqdm
from tweettools import get_client
def blockmute(api, sleep_secs=300):
mutes = set(api.GetMutesIDs())
blocks = set(api.GetBlocksIDs())
new_blocks = mutes - blocks
for user_id in tqdm(new_blocks):
while True:
try:
api.CreateBlock(user_id)
break
except:
print("Exceeded rate limit; sleeping for {} seconds".format(sleep_secs))
time.sleep(sleep_secs)
new_mutes = blocks - mutes
for user_id in tqdm(new_mutes):
while True:
try:
api.CreateMute(user_id)
break
except:
print("Exceeded rate limit; sleeping for {} seconds".format(sleep_secs))
time.sleep(sleep_secs)
def main():
parser = ArgumentParser()
parser.add_argument('-ck', '--consumer-key')
parser.add_argument('-cs', '--consumer-secret')
parser.add_argument('-tk', '--token-key', default=None)
parser.add_argument('-ts', '--token-secret', default=None)
parser.add_argument('-s', '--sleep-secs', type=int, default=15*60)
args = parser.parse_args()
api = get_client(args.token_key, args.token_secret, args.consumer_key, args.consumer_secret)
blockmute(api, sleep_secs=args.sleep_secs)
if __name__ == '__main__':
main()
|
[
"github@didion.net"
] |
github@didion.net
|
e0d35b7ac5b882a39a6e7533e9b46ac4bddf2677
|
27752fee55422acb5264f3e19fa45c6ab421338d
|
/convetions/dd_game.py
|
85d73a3f18d03f0dfcf8422d46f269be85dcc7f5
|
[] |
no_license
|
Tuzosdaniel12/learningPython
|
0be4ae6c2aa4e45744afc1bac5bf38b932433023
|
966fcc6a05e2b203f8d134b8ac0d3f0f179ad19a
|
refs/heads/main
| 2023-06-03T07:55:18.228505
| 2021-06-17T09:34:54
| 2021-06-17T09:34:54
| 359,373,334
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,756
|
py
|
import logging
import random
logging.basicConfig(filename="game.log", level=logging.DEBUG)
logging.info("You wont see this")
logging.warn("OH NO")
player = {'location': None, 'path': []}
cells = [(0, 0), (0, 1), (0, 2),
(1, 0), (1, 1), (1, 2),
(2, 0), (2, 1), (2, 2)]
def get_locations():
monster = random.choice(cells)
door = random.choice(cells)
start = random.choice(cells)
if monster == door or monster == start or door == start:
monster, door, start = get_locations()
return monster, door, start
def get_moves(player):
moves = ['LEFT', 'RIGHT', 'UP', 'DOWN']
if player in [(0, 0), (1, 0), (2, 0)]:
moves.remove('LEFT')
if player in [(0, 0), (0, 1), (0, 2)]:
moves.remove('UP')
if player in [(0, 2), (1, 2), (2, 2)]:
moves.remove('RIGHT')
if player in [(2, 0), (2, 1), (2, 2)]:
moves.remove('DOWN')
return moves
def move_player(player, move):
x, y = player['location']
player['path'].append((x, y))
if move == 'LEFT':
player['location'] = x, y - 1
elif move == 'UP':
player['location'] = x - 1, y
elif move == 'RIGHT':
player['location'] = x, y + 1
elif move == 'DOWN':
player['location'] = x + 1, y
return player
def draw_map():
print(' _ _ _')
tile = '|{}'
for idx, cell in enumerate(cells):
if idx in [0, 1, 3, 4, 6, 7]:
if cell == player['location']:
print(tile.format('X'), end='')
elif cell in player['path']:
print(tile.format('.'), end='')
else:
print(tile.format('_'), end='')
else:
if cell == player['location']:
print(tile.format('X|'))
elif cell in player['path']:
print(tile.format('.|'))
else:
print(tile.format('_|'))
monster, door, player['location'] = get_locations()
logging.info(f"monster: {monster}, door: {door}, player:{player['location']}")
while True:
moves = get_moves(player['location'])
print("Welcome to the dungeon!")
print("You're currently in room {}".format(player['location']))
draw_map()
print("\nYou can move {}".format(', '.join(moves)))
print("Enter QUIT to quit")
move = input("> ")
move = move.upper()
if move == 'QUIT':
break
if not move in moves:
print("\n** Walls are hard! Stop running into them! **\n")
continue
player = move_player(player, move)
if player['location'] == door:
print("\n** You escaped! **\n")
break
elif player['location'] == monster:
print("\n** You got eaten! **\n")
break
else:
continue
|
[
"danielsoledad@gmail.com"
] |
danielsoledad@gmail.com
|
e27620ac43423c4e604d8e08a4bc43ff1c01c49e
|
c9c97b5f002577f97fe14fb12951cec7dae5c3e1
|
/data/jpg_to_npy.py
|
eb0019236a7071b28919ec4a1afaec2cc1c26ed9
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
trandaitai327/ml-lab-02-classification
|
b44eed12ff4568031a6c78a5c36f5c5a819977c8
|
0130e89eef80e9f69d2a6c578313841c770bc7f7
|
refs/heads/main
| 2023-05-30T19:25:38.418310
| 2021-06-07T15:18:42
| 2021-06-07T15:18:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,829
|
py
|
import os
import numpy as np
import cv2 as cv
import pandas as pd
IMG_ORIGINAL_SHAPE = (1365, 2048, 3)
def center_crop(image,out_height,out_width):
image_height, image_width = image.shape[:2]
offset_height = (image_height - out_height) // 2
offset_width = (image_width - out_width) // 2
image = image[offset_height:offset_height+out_height, offset_width:offset_width+out_width,:]
return image
def resize_maintain_aspect(image,target_h,target_w):
image_height, image_width = image.shape[:2]
if image_height > image_width:
new_width = target_w
new_height = int(image_height*(target_w/image_width))
else:
new_height = target_h
new_width = int(image_width*(target_h/image_height))
image = cv.resize(image,(new_width,new_height),interpolation=cv.INTER_CUBIC)
return image
def npy_converter(image_path, image_height,image_width, output_path):
# open image to numpy array
img = cv.imread(image_path)
# resize
img = resize_maintain_aspect(img,image_height,image_width)
# center crop to target height & width
img = center_crop(img,image_height,image_width)
# switch to RGB from BGR
img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
np.save(output_path, img, allow_pickle=True)
IMAGES_PATH = 'images/'
df_train = pd.read_csv('train.csv')
df_test = pd.read_csv('test.csv')
def get_image_path(filename):
return (IMAGES_PATH + filename + '.jpg')
df_train['image_path'] = df_train['image_id'].apply(get_image_path)
df_test['image_path'] = df_test['image_id'].apply(get_image_path)
train_labels = df_train.loc[:, 'healthy':'scab']
train_paths = df_train.image_path
test_paths = df_test.image_path
for filename in df_test.image_id:
npy_converter(IMAGES_PATH + filename + '.jpg', 512, 512, 'images_npy/' + filename + '.npy')
|
[
"lenam.fithcmus@gmail.com"
] |
lenam.fithcmus@gmail.com
|
9ac298fb3e0f4e14401c2b75d4e859bc33eb3bd0
|
6b99473f9ba16700d0e86aaa8e5f109a28d1e976
|
/Cryptography/Cryptography-1/Materials/caesar_encrypt.py
|
6a56c9ec31a0c331b0cb43f26a2dce2a68ae1db1
|
[] |
no_license
|
kcwong395/CyberSecMaterial
|
190988ca26f04d365c24d49e2a014b00a6086410
|
cb974f7a837a6103b4344c6c5e50d26ddf1c1752
|
refs/heads/master
| 2020-07-26T07:09:16.877651
| 2020-01-03T14:52:53
| 2020-01-03T14:52:53
| 208,573,409
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 655
|
py
|
# This function intends to encrypt plaintext using Caesar Cipher
def CaesarEncrypt():
plainText = input("Plaintext: ")
key = input("Key: ")
cipher = ""
for letter in plainText:
if 'a' <= letter <= 'z':
# ord(letter) - ord('a') gives the index in alphabetical sequence
# + int(key) gives the encrypted index
letter = chr((ord(letter) - ord('a') + int(key)) % 26 + ord('a'))
elif 'A' <= letter <= 'Z':
letter = chr((ord(letter) - ord('A') + int(key)) % 26 + ord('A'))
cipher += letter
return cipher
while True:
print("Ciphertext: " + CaesarEncrypt() + '\n')
|
[
"wl01377870@gmail.com"
] |
wl01377870@gmail.com
|
12e4828e3b5e912252a0998ff78bb73a30201e47
|
8d902f52d27bc433534c27ae2c83fa73d5148cf4
|
/blog_api/articles/migrations/0002_auto_20201005_1255.py
|
492f0a90eff41602e26bd1d40ddb86e6a3a3ccf3
|
[] |
no_license
|
AleksanderRadziszewski/zadanko_api_mrx
|
f39d3f8c3169b683809153fa331aeddcac232ebd
|
1197c6533904509467429c3fdf764ae5dadc1f2b
|
refs/heads/master
| 2023-01-22T13:53:15.384122
| 2020-11-20T09:48:24
| 2020-11-20T09:48:24
| 301,105,359
| 0
| 0
| null | 2020-11-16T15:55:34
| 2020-10-04T10:53:44
|
Python
|
UTF-8
|
Python
| false
| false
| 929
|
py
|
# Generated by Django 2.2.6 on 2020-10-05 12:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('body', models.TextField()),
('created', models.DateTimeField(auto_now=True, null=True)),
('modified', models.DateTimeField(auto_now=True, null=True)),
('pub_date', models.DateTimeField(auto_now_add=True, null=True)),
('comments_count', models.IntegerField(null=True)),
],
),
migrations.DeleteModel(
name='articles',
),
]
|
[
"radziszewski.aleksander@gmail.com"
] |
radziszewski.aleksander@gmail.com
|
668a774ae59db7287f12b14d726b4dfd98b97063
|
6b757245104fc83aec31d87a07a40b1fa75648ad
|
/spec/lab_results.spec
|
940b4ed432dc49e9e50f479d22626a95923a7a54
|
[] |
no_license
|
BaobabHealthTrust/module_chronic_care
|
d8a74edb3aa30822986217300aca747f884b98e0
|
b4652c9dd1ed9f8ebdba25783f3e99883dfd2b55
|
refs/heads/master
| 2021-01-04T14:18:56.341563
| 2015-06-10T11:32:21
| 2015-06-10T11:32:21
| 9,372,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,024
|
spec
|
P.1. LAB RESULTS [program: CHRONIC CARE MODULE, label: Lab Results]
C.1.1. For all patients capture the following lab results
Q.1.1.1. Test Type [pos: 1, concept: Test Type, field_type: number]
O.1.1.1.1. FASTING BLOOD SUGAR
O.1.1.1.2. NON-FASTING BLOOD SUGAR
O.1.1.1.3. RANDOM BLOOD SUGAR
O.1.1.1.4. CHOLESTEROL FASTING
O.1.1.1.5. CHOLESTEROL NOT FASTING
Q.1.1.2. Height [pos: 2, concept: Height, field_type: number]
Q.1.1.3. Waist circumference (in cm) [pos: 3, concept: Waist circumference (in cm), field_type: number]
Q.1.1.4. Systolic blood pressure [pos: 4, concept: Systolic blood pressure, field_type: number]
Q.1.1.5. Diastolic blood pressure [pos: 5, concept: Diastolic blood pressure, field_type: number]
Q.1.1.6. Respiratory rate [pos: 6, concept: Respiratory rate, field_type: number]
Q.1.1.7. Pulse rate [pos: 7, concept: Pulse rate, field_type: number]
Q.1.1.8. Oxygen saturation [pos: 8, concept: Oxygen saturation, field_type: number]
Q.1.1.9. Temperature [pos: 9, concept: Temperature, field_type: number]
|
[
"F88kavutausiwa@gmail.com"
] |
F88kavutausiwa@gmail.com
|
c59860e2c00c47a8509ad358429bc68d5820ac99
|
555e43e55bf51273cf413fe828a5913a44ab1878
|
/Aws boto/Boto_scripts/s3file.py
|
a0045d276a795608dc77d29aded02b19effb9445
|
[] |
no_license
|
sagarkites/Devops
|
24ea85d184a4c51bebb490575e9d10163a149471
|
2b58206cabb8f55a0ff69325fa424ff9c480565f
|
refs/heads/master
| 2020-04-12T07:48:30.406785
| 2019-04-09T11:53:05
| 2019-04-09T11:53:05
| 130,677,714
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
import boto3
# Downloding file from S3
s3 = boto3.client('s3')
s3_get = s3.download_file('jiraya','Bucket.py', '/Users/pavanscott/Downloads/script.py')
print(s3_get)
|
[
"vidyasagarchintaluri@gmail.com"
] |
vidyasagarchintaluri@gmail.com
|
0dcac4aca6617a9ef7da23c13d41c070f15d4041
|
3d9689945a2b40f4ffc975e19eb21a68aa1e12e3
|
/homepage/migrations/0001_initial.py
|
86c909114bf4830e62b7d346987f2bf770c291bf
|
[] |
no_license
|
mjdemory/RoastBoast_assessment
|
45adcb8715435dd2e8220e51c5e377a443faefa2
|
2d3d8b595c3b9ef52328ee6a4a762e0de2d42ff1
|
refs/heads/master
| 2022-12-01T18:11:52.871404
| 2020-08-21T20:39:18
| 2020-08-21T20:39:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 724
|
py
|
# Generated by Django 3.1 on 2020-08-21 15:04
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='RoastBoastModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.CharField(max_length=280)),
('choices', models.BooleanField(choices=[(True, 'Boast'), (False, 'Roast')], default=True)),
('upvote', models.IntegerField(default=0)),
('downvote', models.IntegerField(default=0)),
],
),
]
|
[
"mjdemory2891@gmail.com"
] |
mjdemory2891@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.