blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
334d33f29abda1bd5ee1f296500d953a5ea85a6a
|
9b5717d1c602c07a4c16c7fd653834c15dffee2c
|
/setup.py
|
e25540f06d637ccf4ba7a0e5ba09ac2440460ca3
|
[
"MIT"
] |
permissive
|
CoAxLab/infomercial
|
5ee5d4093ec0d40eaafd2c81078664f6e0998f22
|
fa5d1c1e5c1351735dda2961a2a94f71cd17e270
|
refs/heads/master
| 2021-12-17T16:05:35.659765
| 2020-11-30T20:27:38
| 2020-11-30T20:27:38
| 153,649,059
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 782
|
py
|
from setuptools import setup
setup(name='infomercial',
version='0.0.1',
description="Agents who seek information.",
url='',
author='Erik J. Peterson',
author_email='erik.exists@gmail.com',
license='MIT',
packages=['infomercial'],
scripts=[
'infomercial/exp/meta_bandit.py',
'infomercial/exp/softmeta_bandit.py',
'infomercial/exp/epsilon_bandit.py',
'infomercial/exp/beta_bandit.py',
'infomercial/exp/softbeta_bandit.py',
'infomercial/exp/random_bandit.py',
'infomercial/exp/count_bandit.py',
'infomercial/exp/entropy_bandit.py',
'infomercial/exp/tune_bandit.py',
'infomercial/exp/curiosity_bandit.py',
],
zip_safe=False)
|
[
"Erik.Exists@gmail.com"
] |
Erik.Exists@gmail.com
|
cf082048b760e33c9e786bc8f9932ce082143d19
|
02c75658bcc69106b8d52472819bba4a860ea7c7
|
/src/common/__init__.py
|
d4f709d659e989789c7a059820df111000039332
|
[] |
no_license
|
neilrussell6/tezos-boilerplate-ligo-pytezos
|
38055271dfb6239b483a76dbcb8569b1f05fd475
|
30fefb8df3f89f8c8364c4fb4f8446aac7ba69af
|
refs/heads/master
| 2022-12-11T12:07:03.289420
| 2020-01-12T15:16:42
| 2020-01-12T15:16:42
| 233,399,217
| 1
| 0
| null | 2022-12-08T03:27:31
| 2020-01-12T13:45:37
|
Python
|
UTF-8
|
Python
| false
| false
| 21
|
py
|
"""Common modules"""
|
[
"neilrussell6@gmail.com"
] |
neilrussell6@gmail.com
|
5c31dc517afe0642d9a7dc60e5f0f96ac2a0f7e4
|
9b3eba85673e909b751458e0bc00d8181f373d0d
|
/Utilities/levelEditor/CommonUtilities.py
|
b37d2e370c38ae3ed7b7cd3afb28ede87a8d1e34
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
alexiob/Be2
|
b9b1f6bd8b463f8d2ba0054a68d02c60ab2c652f
|
ec048549b4ca2a94b35d8e6274ddda70d2149f3d
|
refs/heads/master
| 2021-01-19T17:59:23.190723
| 2019-10-07T11:31:10
| 2019-10-07T11:31:10
| 8,890,075
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
from PyQt4 import QtCore, QtGui, Qt
def strToQColor (s):
s = str (s)
if s:
sa = map (lambda x: int (x.strip ()), s.strip ().split (','))
else:
sa = [255, 255, 255]
return QtGui.QColor (sa[0], sa[1], sa[2])
def qColorToStr (c):
return "%d,%d,%d" % (c.red (), c.green (), c.blue ())
def dict2lua (d):
data = ''
for k, v in d.iteritems ():
if isinstance (v, basestring):
data += '%s="%s";' % (k, v)
else:
data += '%s=%.1f;' % (k, v)
data = '{%s}' % data
return data
|
[
"alessandro.iob@gmail.com"
] |
alessandro.iob@gmail.com
|
d2e5450b759afb2a0f146834c9e6ea9539c4931b
|
e9a651c8928c48c3d8fa17f356204e14bda0e1a0
|
/src/pythermalcomfort/utilities.py
|
49465f35ab820be4b860ce7c9a85a19a40570e5d
|
[
"MIT"
] |
permissive
|
ThreePointSquare/pythermalcomfort
|
234aa46ba5eac6f6e07ed40e90dcbf129bc448ab
|
cfb6daf3203deb67d39a1b55cd036f677533aa1c
|
refs/heads/master
| 2023-08-09T15:24:41.316497
| 2021-09-08T04:00:25
| 2021-09-08T04:00:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,604
|
py
|
import warnings
import math
from pythermalcomfort.psychrometrics import p_sat
warnings.simplefilter("always")
def transpose_sharp_altitude(sharp, altitude):
altitude_new = math.degrees(
math.asin(
math.sin(math.radians(abs(sharp - 90))) * math.cos(math.radians(altitude))
)
)
sharp = math.degrees(
math.atan(math.sin(math.radians(sharp)) * math.tan(math.radians(90 - altitude)))
)
sol_altitude = altitude_new
return round(sharp, 3), round(sol_altitude, 3)
def check_standard_compliance(standard, **kwargs):
params = dict()
params["standard"] = standard
for key, value in kwargs.items():
params[key] = value
if params["standard"] == "utci":
for key, value in params.items():
if key == "v" and (value > 17 or value < 0.5):
warnings.warn(
"UTCI wind speed applicability limits between 0.5 and 17 m/s",
UserWarning,
)
if params["standard"] == "ankle_draft":
for key, value in params.items():
if key == "met" and value > 1.3:
warnings.warn(
"The ankle draft model is only valid for met <= 1.3",
UserWarning,
)
if key == "clo" and value > 0.7:
warnings.warn(
"The ankle draft model is only valid for clo <= 0.7",
UserWarning,
)
elif params["standard"] == "ashrae": # based on table 7.3.4 ashrae 55 2017
for key, value in params.items():
if key in ["tdb", "tr"]:
if key == "tdb":
parameter = "dry-bulb"
else:
parameter = "mean radiant"
if value > 40 or value < 10:
warnings.warn(
f"ASHRAE {parameter} temperature applicability limits between 10 and 40 °C",
UserWarning,
)
if key in ["v", "vr"] and (value > 2 or value < 0):
warnings.warn(
"ASHRAE air speed applicability limits between 0 and 2 m/s",
UserWarning,
)
if key == "met" and (value > 2 or value < 1):
warnings.warn(
"ASHRAE met applicability limits between 1.0 and 2.0 met",
UserWarning,
)
if key == "clo" and (value > 1.5 or value < 0):
warnings.warn(
"ASHRAE clo applicability limits between 0.0 and 1.5 clo",
UserWarning,
)
if key == "v_limited" and value > 0.2:
raise ValueError(
"This equation is only applicable for air speed lower than 0.2 m/s"
)
elif params["standard"] == "fan_heatwaves": # based on table 7.3.4 ashrae 55 2017
for key, value in params.items():
if key in ["tdb", "tr"]:
if key == "tdb":
parameter = "dry-bulb"
else:
parameter = "mean radiant"
if value > 50 or value < 30:
warnings.warn(
f"{parameter} temperature applicability limits between 30 and 50 °C",
UserWarning,
)
if key in ["v", "vr"] and (value > 4.5 or value < 0.1):
warnings.warn(
"Air speed applicability limits between 0.4 and 4.5 m/s",
UserWarning,
)
if key == "met" and (value > 2 or value < 0.7):
warnings.warn(
"Met applicability limits between 0.7 and 2.0 met",
UserWarning,
)
if key == "clo" and (value > 1.0 or value < 0):
warnings.warn(
"Clo applicability limits between 0.0 and 1.0 clo",
UserWarning,
)
elif params["standard"] == "iso": # based on ISO 7730:2005 page 3
for key, value in params.items():
if key == "tdb" and (value > 30 or value < 10):
warnings.warn(
"ISO air temperature applicability limits between 10 and 30 °C",
UserWarning,
)
if key == "tr" and (value > 40 or value < 10):
warnings.warn(
"ISO mean radiant temperature applicability limits between 10 and 40 °C",
UserWarning,
)
if key in ["v", "vr"] and (value > 1 or value < 0):
warnings.warn(
"ISO air speed applicability limits between 0 and 1 m/s",
UserWarning,
)
if key == "met" and (value > 4 or value < 0.8):
warnings.warn(
"ISO met applicability limits between 0.8 and 4.0 met",
UserWarning,
)
if key == "clo" and (value > 2 or value < 0):
warnings.warn(
"ISO clo applicability limits between 0.0 and 2 clo",
UserWarning,
)
elif params["standard"] == "ISO7933": # based on ISO 7933:2004 Annex A
if params["tdb"] > 50 or params["tdb"] < 15:
warnings.warn(
"ISO 7933:2004 air temperature applicability limits between 15 and 50 °C",
UserWarning,
)
p_a = p_sat(params["tdb"]) / 1000 * params["rh"] / 100
rh_max = 4.5 * 100 * 1000 / p_sat(params["tdb"])
if p_a > 4.5 or p_a < 0:
warnings.warn(
f"ISO 7933:2004 t_r - t_db applicability limits between 0 and {rh_max} %",
UserWarning,
)
if params["tr"] - params["tdb"] > 50 or params["tr"] - params["tdb"] < 0:
warnings.warn(
"ISO 7933:2004 t_r - t_db applicability limits between 0 and 60 °C",
UserWarning,
)
if params["v"] > 3 or params["v"] < 0:
warnings.warn(
"ISO 7933:2004 air speed applicability limits between 0 and 3 m/s",
UserWarning,
)
if params["met"] > 450 or params["met"] < 100:
warnings.warn(
"ISO 7933:2004 met applicability limits between 100 and 450 met",
UserWarning,
)
if params["clo"] > 1 or params["clo"] < 0.1:
warnings.warn(
"ISO 7933:2004 clo applicability limits between 0.1 and 1 clo",
UserWarning,
)
def body_surface_area(weight, height, formula="dubois"):
"""
Returns the body surface area in square meters.
Parameters
----------
weight : float
body weight, [kg]
height : float
height, [m]
formula : {"dubois"}, default="dubois"
formula used to calculate the body surface area
Returns
-------
body_surface_area : float
body surface area, [m2]
"""
if formula == "dubois":
return 0.202 * (weight ** 0.425) * (height ** 0.725)
def f_svv(w, h, d):
"""Calculates the sky-vault view fraction
Parameters
----------
w : float
width of the window, [m]
h : float
height of the window, [m]
d : float
distance between the occupant and the window, [m]
Returns
-------
f_svv : float
sky-vault view fraction ranges between 0 and 1
"""
return (
math.degrees(math.atan(h / (2 * d)))
* math.degrees(math.atan(w / (2 * d)))
/ 16200
)
def v_relative(v, met):
"""Estimates the relative air speed which combines the average air speed of
the space plus the relative air speed caused by the body movement. Vag is assumed to
be 0 for metabolic rates equal and lower than 1 met and otherwise equal to
Vag = 0.3 (M – 1) (m/s)
Parameters
----------
v : float
air speed measured by the sensor, [m/s]
met : float
metabolic rate, [met]
Returns
-------
vr : float
relative air speed, [m/s]
"""
if met > 1:
return round(v + 0.3 * (met - 1), 3)
else:
return v
def clo_dynamic(clo, met, standard="ASHRAE"):
"""Estimates the dynamic clothing insulation of a moving occupant. The activity as
well as the air speed modify the insulation characteristics of the clothing and the
adjacent air layer. Consequently the ISO 7730 states that the clothing insulation
shall be corrected [2]_. The ASHRAE 55 Standard corrects for the effect
of the body movement for met equal or higher than 1.2 met using the equation
clo = Icl × (0.6 + 0.4/met)
Parameters
----------
clo : float
clothing insulation, [clo]
met : float
metabolic rate, [met]
standard: str (default="ASHRAE")
- If "ASHRAE", uses Equation provided in Section 5.2.2.2 of ASHRAE 55 2017
Returns
-------
clo : float
dynamic clothing insulation, [clo]
"""
if standard.lower() not in ["ashrae"]:
raise ValueError(
"clo dynamic calculation can only be performed in compliance ASHRAE Standard"
)
if met > 1.2:
return round(clo * (0.6 + 0.4 / met), 3)
else:
return clo
def running_mean_outdoor_temperature(temp_array, alpha=0.8, units="SI"):
"""Estimates the running mean temperature also known as prevailing mean outdoor
temperature.
Parameters
----------
temp_array: list
array containing the mean daily temperature in descending order (i.e. from
newest/yesterday to oldest) :math:`[\Theta_{day-1}, \Theta_{day-2}, \dots ,
\Theta_{day-n}]`.
Where :math:`\Theta_{day-1}` is yesterday's daily mean temperature. The EN
16798-1 2019 [3]_ states that n should be equal to 7
alpha : float
constant between 0 and 1. The EN 16798-1 2019 [3]_ recommends a value of 0.8,
while the ASHRAE 55 2017 recommends to choose values between 0.9 and 0.6,
corresponding to a slow- and fast- response running mean, respectively.
Adaptive comfort theory suggests that a slow-response running mean (alpha =
0.9) could be more appropriate for climates in which synoptic-scale (day-to-
day) temperature dynamics are relatively minor, such as the humid tropics.
units: str default="SI"
select the SI (International System of Units) or the IP (Imperial Units) system.
Returns
-------
t_rm : float
running mean outdoor temperature
"""
if units.lower() == "ip":
for ix, x in enumerate(temp_array):
temp_array[ix] = units_converter(tdb=temp_array[ix])[0]
coeff = [alpha ** ix for ix, x in enumerate(temp_array)]
t_rm = sum([a * b for a, b in zip(coeff, temp_array)]) / sum(coeff)
if units.lower() == "ip":
t_rm = units_converter(tmp=t_rm, from_units="si")[0]
return round(t_rm, 1)
def units_converter(from_units="ip", **kwargs):
"""Converts IP values to SI units
Parameters
----------
from_units: str
specify system to convert from
**kwargs : [t, v]
Returns
-------
converted values in SI units
"""
results = list()
if from_units == "ip":
for key, value in kwargs.items():
if "tmp" in key or key == "tr" or key == "tdb":
results.append((value - 32) * 5 / 9)
if key in ["v", "vr", "vel"]:
results.append(value / 3.281)
if key == "area":
results.append(value / 10.764)
if key == "pressure":
results.append(value * 101325)
elif from_units == "si":
for key, value in kwargs.items():
if "tmp" in key or key == "tr" or key == "tdb":
results.append((value * 9 / 5) + 32)
if key in ["v", "vr", "vel"]:
results.append(value * 3.281)
if key == "area":
results.append(value * 10.764)
if key == "pressure":
results.append(value / 101325)
return results
#: This dictionary contains the met values of typical tasks.
met_typical_tasks = {
"Sleeping": 0.7,
"Reclining": 0.8,
"Seated, quiet": 1.0,
"Reading, seated": 1.0,
"Writing": 1.0,
"Typing": 1.1,
"Standing, relaxed": 1.2,
"Filing, seated": 1.2,
"Flying aircraft, routine": 1.2,
"Filing, standing": 1.4,
"Driving a car": 1.5,
"Walking about": 1.7,
"Cooking": 1.8,
"Table sawing": 1.8,
"Walking 2mph (3.2kmh)": 2.0,
"Lifting/packing": 2.1,
"Seated, heavy limb movement": 2.2,
"Light machine work": 2.2,
"Flying aircraft, combat": 2.4,
"Walking 3mph (4.8kmh)": 2.6,
"House cleaning": 2.7,
"Driving, heavy vehicle": 3.2,
"Dancing": 3.4,
"Calisthenics": 3.5,
"Walking 4mph (6.4kmh)": 3.8,
"Tennis": 3.8,
"Heavy machine work": 4.0,
"Handling 100lb (45 kg) bags": 4.0,
"Pick and shovel work": 4.4,
"Basketball": 6.3,
"Wrestling": 7.8,
}
#: This dictionary contains the total clothing insulation of typical typical ensembles.
clo_typical_ensembles = {
"Walking shorts, short-sleeve shirt": 0.36,
"Typical summer indoor clothing": 0.5,
"Knee-length skirt, short-sleeve shirt, sandals, underwear": 0.54,
"Trousers, short-sleeve shirt, socks, shoes, underwear": 0.57,
"Trousers, long-sleeve shirt": 0.61,
"Knee-length skirt, long-sleeve shirt, full slip": 0.67,
"Sweat pants, long-sleeve sweatshirt": 0.74,
"Jacket, Trousers, long-sleeve shirt": 0.96,
"Typical winter indoor clothing": 1.0,
}
# This dictionary contains the clo values of individual clothing elements. To calculate the total clothing insulation you need to add these values together.
clo_individual_garments = {
"Metal chair": 0.00,
"Bra": 0.01,
"Wooden stool": 0.01,
"Ankle socks": 0.02,
"Shoes or sandals": 0.02,
"Slippers": 0.03,
"Panty hose": 0.02,
"Calf length socks": 0.03,
"Women's underwear": 0.03,
"Men's underwear": 0.04,
"Knee socks (thick)": 0.06,
"Short shorts": 0.06,
"Walking shorts": 0.08,
"T-shirt": 0.08,
"Standard office chair": 0.10,
"Executive chair": 0.15,
"Boots": 0.1,
"Sleeveless scoop-neck blouse": 0.12,
"Half slip": 0.14,
"Long underwear bottoms": 0.15,
"Full slip": 0.16,
"Short-sleeve knit shirt": 0.17,
"Sleeveless vest (thin)": 0.1,
"Sleeveless vest (thick)": 0.17,
"Sleeveless short gown (thin)": 0.18,
"Short-sleeve dress shirt": 0.19,
"Sleeveless long gown (thin)": 0.2,
"Long underwear top": 0.2,
"Thick skirt": 0.23,
"Long-sleeve dress shirt": 0.25,
"Long-sleeve flannel shirt": 0.34,
"Long-sleeve sweat shirt": 0.34,
"Short-sleeve hospital gown": 0.31,
"Short-sleeve short robe (thin)": 0.34,
"Short-sleeve pajamas": 0.42,
"Long-sleeve long gown": 0.46,
"Long-sleeve short wrap robe (thick)": 0.48,
"Long-sleeve pajamas (thick)": 0.57,
"Long-sleeve long wrap robe (thick)": 0.69,
"Thin trousers": 0.15,
"Thick trousers": 0.24,
"Sweatpants": 0.28,
"Overalls": 0.30,
"Coveralls": 0.49,
"Thin skirt": 0.14,
"Long-sleeve shirt dress (thin)": 0.33,
"Long-sleeve shirt dress (thick)": 0.47,
"Short-sleeve shirt dress": 0.29,
"Sleeveless, scoop-neck shirt (thin)": 0.23,
"Sleeveless, scoop-neck shirt (thick)": 0.27,
"Long sleeve shirt (thin)": 0.25,
"Long sleeve shirt (thick)": 0.36,
"Single-breasted coat (thin)": 0.36,
"Single-breasted coat (thick)": 0.44,
"Double-breasted coat (thin)": 0.42,
"Double-breasted coat (thick)": 0.48,
}
# This dictionary contains the reflection coefficients, Fr, for different special materials
f_r_garments = {
"Cotton with aluminium paint": 0.42,
"Viscose with glossy aluminium foil": 0.19,
"Aramid (Kevlar) with glossy aluminium foil": 0.14,
"Wool with glossy aluminium foil": 0.12,
"Cotton with glossy aluminium foil": 0.04,
"Viscose vacuum metallized with aluminium": 0.06,
"Aramid vacuum metallized with aluminium": 0.04,
"Wool vacuum metallized with aluminium": 0.05,
"Cotton vacuum metallized with aluminium": 0.05,
"Glass fiber vacuum metallized with aluminium": 0.07,
}
|
[
"federicotartarini@gmail.com"
] |
federicotartarini@gmail.com
|
37b6124b451232c7f1d6b4aa9e3649b286ff3509
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_wormed.py
|
4e52f28f4bef52a941d111e3e9c95013812986cf
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
from xai.brain.wordbase.nouns._worm import _WORM
#calss header
class _WORMED(_WORM, ):
def __init__(self,):
_WORM.__init__(self)
self.name = "WORMED"
self.specie = 'nouns'
self.basic = "worm"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
11b26922faf03f351820b5636ba14c73dcbb73bd
|
22370508f11e4284b9eddc48cdccbdb751f0e813
|
/biuandfriends/biuandfriends/settings.py
|
a00393522920785acd174e3870e16993261924c4
|
[] |
no_license
|
another-godel/biuandfriends
|
4c6136a4ead878a9663b2e638cfae9dd13818e90
|
6128796cbbafe7ea48c13ba4560c25d5b7e7b120
|
refs/heads/main
| 2023-01-23T14:35:39.078294
| 2020-12-03T15:41:38
| 2020-12-03T15:41:38
| 318,116,740
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,141
|
py
|
"""
Django settings for biuandfriends project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=@dilqlz$t$@!q3tewr^*ril(u86g_kz4%-v-iu-bddy99_*ne'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'conchobiu.apps.ConchobiuConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'biuandfriends.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR,'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'biuandfriends.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"75416397+Hinnnnn@users.noreply.github.com"
] |
75416397+Hinnnnn@users.noreply.github.com
|
275844e203a0f295ceb5d63ea59ea2fe191a39a0
|
6760a7aaf7148c48902b2705a1c9158481166237
|
/test_cv/canny_detection_test.py
|
611fe17ee4d5c165b94daf728d6241a5f820e9ce
|
[] |
no_license
|
LockVer/opencv-singleEyeDetection-graduationDesign
|
42a38cb866bf67ce7ad94ed2c91da09c38e820f7
|
0e3c3d7db57eefd25a27ec8d0645b506497c28e3
|
refs/heads/master
| 2023-04-17T05:19:21.270623
| 2021-05-05T12:14:15
| 2021-05-05T12:14:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,093
|
py
|
"""
其中mask的掩膜操作需要np.uint8八位无符号整型
掩膜操作只分是不是0
1和255对掩膜操作没有区别
要形成彩色的canny
就只是通过掩膜把原图的颜色保存下来了
"""
import cv2 as cv
import numpy as np
def edge_demo(image):
# mask1 = np.zeros((900, 1440), dtype=np.uint8)
# mask1[:450, : 720] = 255
# cv.imshow("mask1", mask1)
blurred = cv.GaussianBlur(image, (3, 3), 0)
gray = cv.cvtColor(blurred, cv.COLOR_BGR2GRAY)
#X_gradient
xgrad = cv.Sobel(gray, cv.CV_16SC1, 1, 0)
#y_gradient
ygrad = cv.Sobel(gray, cv.CV_16SC1, 0, 1)
#edge
edge = cv.Canny(xgrad, ygrad, 50, 150)
print(edge.shape)
cv.imshow("Canny_edge", edge)
dst = cv.bitwise_and(image, image, mask=edge) #这个函数的意义在于mask操作而不是与操作,得到的dst是在原图基础上mask边缘
cv.imshow("Canny_edge_colored", dst)
src = cv.imread("D:/studyfuckinghard/graduationcv/imgtest/brz1.jpg")
cv.imshow("input", src)
edge_demo(src)
cv.waitKey(0)
cv.destroyAllWindows()
|
[
"noreply@github.com"
] |
LockVer.noreply@github.com
|
0b8f8cc4368270c7c91f001a5ef3587a49f97805
|
d3562041939882f51bbff2505ad1792456638d28
|
/personal/Nwoo/Side_Project/Gesture_Recognition/main_work/Final Work.py
|
802c82a6ad744ec8e171d8c8b3dd086a6aa06a8f
|
[] |
no_license
|
h0han/2021_UGRP_
|
f4e69b51f3a8f4f7e0099ba3a91957b956e94a1d
|
ec7cbd2556869fd0f3f3c645311e107f37b2afe7
|
refs/heads/master
| 2023-03-01T23:20:07.708637
| 2021-02-04T02:08:48
| 2021-02-04T02:08:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,949
|
py
|
import easydict
import cv2
import numpy as np
from tf_pose.networks import get_graph_path, model_wh
import os
import time
import logging
import math
import slidingwindow as sw
import tensorflow as tf
from tf_pose import common
from tf_pose.common import CocoPart
from tf_pose.tensblur.smoother import Smoother
from PIL import ImageFont, ImageDraw, Image
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import load_model
# CNN 모델 load
model = load_model("CNN_best_model_91_tf1.h5")
try:
from tf_pose.pafprocess import pafprocess
except ModuleNotFoundError as e:
print(e)
print(
'you need to build c++ library for pafprocess. See : https://github.com/ildoonet/tf-pose-estimation/tree/master/tf_pose/pafprocess')
exit(-1)
logger = logging.getLogger('TfPoseEstimator')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def _round(v):
return int(round(v))
def _include_part(part_list, part_idx):
for part in part_list:
if part_idx == part.part_idx:
return True, part
return False, None
class Human:
"""
body_parts: list of BodyPart
"""
__slots__ = ('body_parts', 'pairs', 'uidx_list', 'score')
def __init__(self, pairs):
self.pairs = []
self.uidx_list = set()
self.body_parts = {}
for pair in pairs:
self.add_pair(pair)
self.score = 0.0
@staticmethod
def _get_uidx(part_idx, idx):
return '%d-%d' % (part_idx, idx)
def add_pair(self, pair):
self.pairs.append(pair)
self.body_parts[pair.part_idx1] = BodyPart(Human._get_uidx(pair.part_idx1, pair.idx1),
pair.part_idx1,
pair.coord1[0], pair.coord1[1], pair.score)
self.body_parts[pair.part_idx2] = BodyPart(Human._get_uidx(pair.part_idx2, pair.idx2),
pair.part_idx2,
pair.coord2[0], pair.coord2[1], pair.score)
self.uidx_list.add(Human._get_uidx(pair.part_idx1, pair.idx1))
self.uidx_list.add(Human._get_uidx(pair.part_idx2, pair.idx2))
def is_connected(self, other):
return len(self.uidx_list & other.uidx_list) > 0
def merge(self, other):
for pair in other.pairs:
self.add_pair(pair)
def part_count(self):
return len(self.body_parts.keys())
def get_max_score(self):
return max([x.score for _, x in self.body_parts.items()])
def get_face_box(self, img_w, img_h, mode=0):
"""
Get Face box compared to img size (w, h)
:param img_w:
:param img_h:
:param mode:
:return:
"""
# SEE : https://github.com/ildoonet/tf-pose-estimation/blob/master/tf_pose/common.py#L13
_NOSE = CocoPart.Nose.value
_NECK = CocoPart.Neck.value
_REye = CocoPart.REye.value
_LEye = CocoPart.LEye.value
_REar = CocoPart.REar.value
_LEar = CocoPart.LEar.value
_THRESHOLD_PART_CONFIDENCE = 0.2
parts = [part for idx, part in self.body_parts.items() if part.score > _THRESHOLD_PART_CONFIDENCE]
is_nose, part_nose = _include_part(parts, _NOSE)
if not is_nose:
return None
size = 0
is_neck, part_neck = _include_part(parts, _NECK)
if is_neck:
size = max(size, img_h * (part_neck.y - part_nose.y) * 0.8)
is_reye, part_reye = _include_part(parts, _REye)
is_leye, part_leye = _include_part(parts, _LEye)
if is_reye and is_leye:
size = max(size, img_w * (part_reye.x - part_leye.x) * 2.0)
size = max(size,
img_w * math.sqrt((part_reye.x - part_leye.x) ** 2 + (part_reye.y - part_leye.y) ** 2) * 2.0)
if mode == 1:
if not is_reye and not is_leye:
return None
is_rear, part_rear = _include_part(parts, _REar)
is_lear, part_lear = _include_part(parts, _LEar)
if is_rear and is_lear:
size = max(size, img_w * (part_rear.x - part_lear.x) * 1.6)
if size <= 0:
return None
if not is_reye and is_leye:
x = part_nose.x * img_w - (size // 3 * 2)
elif is_reye and not is_leye:
x = part_nose.x * img_w - (size // 3)
else: # is_reye and is_leye:
x = part_nose.x * img_w - size // 2
x2 = x + size
if mode == 0:
y = part_nose.y * img_h - size // 3
else:
y = part_nose.y * img_h - _round(size / 2 * 1.2)
y2 = y + size
# fit into the image frame
x = max(0, x)
y = max(0, y)
x2 = min(img_w - x, x2 - x) + x
y2 = min(img_h - y, y2 - y) + y
if _round(x2 - x) == 0.0 or _round(y2 - y) == 0.0:
return None
if mode == 0:
return {"x": _round((x + x2) / 2),
"y": _round((y + y2) / 2),
"w": _round(x2 - x),
"h": _round(y2 - y)}
else:
return {"x": _round(x),
"y": _round(y),
"w": _round(x2 - x),
"h": _round(y2 - y)}
def get_upper_body_box(self, img_w, img_h):
"""
Get Upper body box compared to img size (w, h)
:param img_w:
:param img_h:
:return:
"""
if not (img_w > 0 and img_h > 0):
raise Exception("img size should be positive")
_NOSE = CocoPart.Nose.value
_NECK = CocoPart.Neck.value
_RSHOULDER = CocoPart.RShoulder.value
_LSHOULDER = CocoPart.LShoulder.value
_THRESHOLD_PART_CONFIDENCE = 0.3
parts = [part for idx, part in self.body_parts.items() if part.score > _THRESHOLD_PART_CONFIDENCE]
part_coords = [(img_w * part.x, img_h * part.y) for part in parts if
part.part_idx in [0, 1, 2, 5, 8, 11, 14, 15, 16, 17]]
if len(part_coords) < 5:
return None
# Initial Bounding Box
x = min([part[0] for part in part_coords])
y = min([part[1] for part in part_coords])
x2 = max([part[0] for part in part_coords])
y2 = max([part[1] for part in part_coords])
# # ------ Adjust heuristically +
# if face points are detcted, adjust y value
is_nose, part_nose = _include_part(parts, _NOSE)
is_neck, part_neck = _include_part(parts, _NECK)
torso_height = 0
if is_nose and is_neck:
y -= (part_neck.y * img_h - y) * 0.8
torso_height = max(0, (part_neck.y - part_nose.y) * img_h * 2.5)
#
# # by using shoulder position, adjust width
is_rshoulder, part_rshoulder = _include_part(parts, _RSHOULDER)
is_lshoulder, part_lshoulder = _include_part(parts, _LSHOULDER)
if is_rshoulder and is_lshoulder:
half_w = x2 - x
dx = half_w * 0.15
x -= dx
x2 += dx
elif is_neck:
if is_lshoulder and not is_rshoulder:
half_w = abs(part_lshoulder.x - part_neck.x) * img_w * 1.15
x = min(part_neck.x * img_w - half_w, x)
x2 = max(part_neck.x * img_w + half_w, x2)
elif not is_lshoulder and is_rshoulder:
half_w = abs(part_rshoulder.x - part_neck.x) * img_w * 1.15
x = min(part_neck.x * img_w - half_w, x)
x2 = max(part_neck.x * img_w + half_w, x2)
# ------ Adjust heuristically -
# fit into the image frame
x = max(0, x)
y = max(0, y)
x2 = min(img_w - x, x2 - x) + x
y2 = min(img_h - y, y2 - y) + y
if _round(x2 - x) == 0.0 or _round(y2 - y) == 0.0:
return None
return {"x": _round((x + x2) / 2),
"y": _round((y + y2) / 2),
"w": _round(x2 - x),
"h": _round(y2 - y)}
def __str__(self):
return ' '.join([str(x) for x in self.body_parts.values()])
def __repr__(self):
return self.__str__()
class BodyPart:
"""
part_idx : part index(eg. 0 for nose)
x, y: coordinate of body part
score : confidence score
"""
__slots__ = ('uidx', 'part_idx', 'x', 'y', 'score')
def __init__(self, uidx, part_idx, x, y, score):
self.uidx = uidx
self.part_idx = part_idx
self.x, self.y = x, y
self.score = score
def get_part_name(self):
return CocoPart(self.part_idx)
def __str__(self):
return 'BodyPart:%d-(%.2f, %.2f) score=%.2f' % (self.part_idx, self.x, self.y, self.score)
def __repr__(self):
return self.__str__()
class PoseEstimator:
def __init__(self):
pass
@staticmethod
def estimate_paf(peaks, heat_mat, paf_mat):
pafprocess.process_paf(peaks, heat_mat, paf_mat)
humans = []
for human_id in range(pafprocess.get_num_humans()):
human = Human([])
is_added = False
for part_idx in range(18):
c_idx = int(pafprocess.get_part_cid(human_id, part_idx))
if c_idx < 0:
continue
is_added = True
human.body_parts[part_idx] = BodyPart(
'%d-%d' % (human_id, part_idx), part_idx,
float(pafprocess.get_part_x(c_idx)) / heat_mat.shape[1],
float(pafprocess.get_part_y(c_idx)) / heat_mat.shape[0],
pafprocess.get_part_score(c_idx)
)
if is_added:
score = pafprocess.get_score(human_id)
human.score = score
humans.append(human)
return humans
class TfPoseEstimator:
# TODO : multi-scale
def __init__(self, graph_path, target_size=(320, 240), tf_config=None):
self.target_size = target_size
# load graph
logger.info('loading graph from %s(default size=%dx%d)' % (graph_path, target_size[0], target_size[1]))
with tf.gfile.GFile(graph_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
self.graph = tf.get_default_graph()
tf.import_graph_def(graph_def, name='TfPoseEstimator')
self.persistent_sess = tf.Session(graph=self.graph, config=tf_config)
# for op in self.graph.get_operations():
# print(op.name)
# for ts in [n.name for n in tf.get_default_graph().as_graph_def().node]:
# print(ts)
self.tensor_image = self.graph.get_tensor_by_name('TfPoseEstimator/image:0')
self.tensor_output = self.graph.get_tensor_by_name('TfPoseEstimator/Openpose/concat_stage7:0')
self.tensor_heatMat = self.tensor_output[:, :, :, :19]
self.tensor_pafMat = self.tensor_output[:, :, :, 19:]
self.upsample_size = tf.placeholder(dtype=tf.int32, shape=(2,), name='upsample_size')
self.tensor_heatMat_up = tf.image.resize_area(self.tensor_output[:, :, :, :19], self.upsample_size,
align_corners=False, name='upsample_heatmat')
self.tensor_pafMat_up = tf.image.resize_area(self.tensor_output[:, :, :, 19:], self.upsample_size,
align_corners=False, name='upsample_pafmat')
smoother = Smoother({'data': self.tensor_heatMat_up}, 25, 3.0)
gaussian_heatMat = smoother.get_output()
max_pooled_in_tensor = tf.nn.pool(gaussian_heatMat, window_shape=(3, 3), pooling_type='MAX', padding='SAME')
self.tensor_peaks = tf.where(tf.equal(gaussian_heatMat, max_pooled_in_tensor), gaussian_heatMat,
tf.zeros_like(gaussian_heatMat))
self.heatMat = self.pafMat = None
# warm-up
self.persistent_sess.run(tf.variables_initializer(
[v for v in tf.global_variables() if
v.name.split(':')[0] in [x.decode('utf-8') for x in
self.persistent_sess.run(tf.report_uninitialized_variables())]
])
)
self.persistent_sess.run(
[self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up],
feed_dict={
self.tensor_image: [np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32)],
self.upsample_size: [target_size[1], target_size[0]]
}
)
self.persistent_sess.run(
[self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up],
feed_dict={
self.tensor_image: [np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32)],
self.upsample_size: [target_size[1] // 2, target_size[0] // 2]
}
)
self.persistent_sess.run(
[self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up],
feed_dict={
self.tensor_image: [np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32)],
self.upsample_size: [target_size[1] // 4, target_size[0] // 4]
}
)
def __del__(self):
# self.persistent_sess.close()
pass
@staticmethod
def _quantize_img(npimg):
npimg_q = npimg + 1.0
npimg_q /= (2.0 / 2 ** 8)
# npimg_q += 0.5
npimg_q = npimg_q.astype(np.uint8)
return npimg_q
# 이 부분 코드 수정하였음.
@staticmethod
def draw_humans(npimg, humans, imgcopy=False):
if imgcopy:
npimg = np.copy(npimg)
image_h, image_w = npimg.shape[:2]
i = 0
detected_humans = []
for human in humans:
i = i + 1
centers = {}
x_poses = []
y_poses = []
# draw point
for i in range(common.CocoPart.Background.value):
if i not in human.body_parts.keys():
continue
body_part = human.body_parts[i]
center = (int(body_part.x * image_w + 0.5), int(body_part.y * image_h + 0.5))
centers[i] = center
x_poses.append(center[0])
y_poses.append(center[1])
cv2.circle(npimg, center, 3, common.CocoColors[i], thickness=3, lineType=8, shift=0)
# draw line
for pair_order, pair in enumerate(common.CocoPairsRender):
if pair[0] not in human.body_parts.keys() or pair[1] not in human.body_parts.keys():
continue
# npimg = cv2.line(npimg, centers[pair[0]], centers[pair[1]], common.CocoColors[pair_order], 3)
cv2.line(npimg, centers[pair[0]], centers[pair[1]], common.CocoColors[pair_order], 3)
x_min = min(x_poses)
x_max = max(x_poses)
y_min = min(y_poses)
y_max = max(y_poses)
width = x_max - x_min
height = y_max - y_min
x_min = round(x_min - (0.3 * width))
x_max = round(x_max + (0.3 * width))
y_min = round(y_min - (0.2 * height))
y_max = round(y_max + (0.2 * height))
x_min = 0 if x_min < 0 else x_min
y_min = 0 if y_min < 0 else y_min
detected_humans.append([x_min, y_min, x_max, y_max])
return npimg, detected_humans
def _get_scaled_img(self, npimg, scale):
get_base_scale = lambda s, w, h: max(self.target_size[0] / float(h), self.target_size[1] / float(w)) * s
img_h, img_w = npimg.shape[:2]
if scale is None:
if npimg.shape[:2] != (self.target_size[1], self.target_size[0]):
# resize
npimg = cv2.resize(npimg, self.target_size, interpolation=cv2.INTER_CUBIC)
return [npimg], [(0.0, 0.0, 1.0, 1.0)]
elif isinstance(scale, float):
# scaling with center crop
base_scale = get_base_scale(scale, img_w, img_h)
npimg = cv2.resize(npimg, dsize=None, fx=base_scale, fy=base_scale, interpolation=cv2.INTER_CUBIC)
o_size_h, o_size_w = npimg.shape[:2]
if npimg.shape[0] < self.target_size[1] or npimg.shape[1] < self.target_size[0]:
newimg = np.zeros(
(max(self.target_size[1], npimg.shape[0]), max(self.target_size[0], npimg.shape[1]), 3),
dtype=np.uint8)
newimg[:npimg.shape[0], :npimg.shape[1], :] = npimg
npimg = newimg
windows = sw.generate(npimg, sw.DimOrder.HeightWidthChannel, self.target_size[0], self.target_size[1], 0.2)
rois = []
ratios = []
for window in windows:
indices = window.indices()
roi = npimg[indices]
rois.append(roi)
ratio_x, ratio_y = float(indices[1].start) / o_size_w, float(indices[0].start) / o_size_h
ratio_w, ratio_h = float(indices[1].stop - indices[1].start) / o_size_w, float(
indices[0].stop - indices[0].start) / o_size_h
ratios.append((ratio_x, ratio_y, ratio_w, ratio_h))
return rois, ratios
elif isinstance(scale, tuple) and len(scale) == 2:
# scaling with sliding window : (scale, step)
base_scale = get_base_scale(scale[0], img_w, img_h)
npimg = cv2.resize(npimg, dsize=None, fx=base_scale, fy=base_scale, interpolation=cv2.INTER_CUBIC)
o_size_h, o_size_w = npimg.shape[:2]
if npimg.shape[0] < self.target_size[1] or npimg.shape[1] < self.target_size[0]:
newimg = np.zeros(
(max(self.target_size[1], npimg.shape[0]), max(self.target_size[0], npimg.shape[1]), 3),
dtype=np.uint8)
newimg[:npimg.shape[0], :npimg.shape[1], :] = npimg
npimg = newimg
window_step = scale[1]
windows = sw.generate(npimg, sw.DimOrder.HeightWidthChannel, self.target_size[0], self.target_size[1],
window_step)
rois = []
ratios = []
for window in windows:
indices = window.indices()
roi = npimg[indices]
rois.append(roi)
ratio_x, ratio_y = float(indices[1].start) / o_size_w, float(indices[0].start) / o_size_h
ratio_w, ratio_h = float(indices[1].stop - indices[1].start) / o_size_w, float(
indices[0].stop - indices[0].start) / o_size_h
ratios.append((ratio_x, ratio_y, ratio_w, ratio_h))
return rois, ratios
elif isinstance(scale, tuple) and len(scale) == 3:
# scaling with ROI : (want_x, want_y, scale_ratio)
base_scale = get_base_scale(scale[2], img_w, img_h)
npimg = cv2.resize(npimg, dsize=None, fx=base_scale, fy=base_scale, interpolation=cv2.INTER_CUBIC)
ratio_w = self.target_size[0] / float(npimg.shape[1])
ratio_h = self.target_size[1] / float(npimg.shape[0])
want_x, want_y = scale[:2]
ratio_x = want_x - ratio_w / 2.
ratio_y = want_y - ratio_h / 2.
ratio_x = max(ratio_x, 0.0)
ratio_y = max(ratio_y, 0.0)
if ratio_x + ratio_w > 1.0:
ratio_x = 1. - ratio_w
if ratio_y + ratio_h > 1.0:
ratio_y = 1. - ratio_h
roi = self._crop_roi(npimg, ratio_x, ratio_y)
return [roi], [(ratio_x, ratio_y, ratio_w, ratio_h)]
def _crop_roi(self, npimg, ratio_x, ratio_y):
target_w, target_h = self.target_size
h, w = npimg.shape[:2]
x = max(int(w * ratio_x - .5), 0)
y = max(int(h * ratio_y - .5), 0)
cropped = npimg[y:y + target_h, x:x + target_w]
cropped_h, cropped_w = cropped.shape[:2]
if cropped_w < target_w or cropped_h < target_h:
npblank = np.zeros((self.target_size[1], self.target_size[0], 3), dtype=np.uint8)
copy_x, copy_y = (target_w - cropped_w) // 2, (target_h - cropped_h) // 2
npblank[copy_y:copy_y + cropped_h, copy_x:copy_x + cropped_w] = cropped
else:
return cropped
def inference(self, npimg, resize_to_default=True, upsample_size=1.0):
if npimg is None:
raise Exception('The image is not valid. Please check your image exists.')
if resize_to_default:
upsample_size = [int(self.target_size[1] / 8 * upsample_size), int(self.target_size[0] / 8 * upsample_size)]
else:
upsample_size = [int(npimg.shape[0] / 8 * upsample_size), int(npimg.shape[1] / 8 * upsample_size)]
if self.tensor_image.dtype == tf.quint8:
# quantize input image
npimg = TfPoseEstimator._quantize_img(npimg)
pass
logger.debug('inference+ original shape=%dx%d' % (npimg.shape[1], npimg.shape[0]))
img = npimg
if resize_to_default:
img = self._get_scaled_img(npimg, None)[0][0]
peaks, heatMat_up, pafMat_up = self.persistent_sess.run(
[self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={
self.tensor_image: [img], self.upsample_size: upsample_size
})
peaks = peaks[0]
self.heatMat = heatMat_up[0]
self.pafMat = pafMat_up[0]
logger.debug('inference- heatMat=%dx%d pafMat=%dx%d' % (
self.heatMat.shape[1], self.heatMat.shape[0], self.pafMat.shape[1], self.pafMat.shape[0]))
t = time.time()
humans = PoseEstimator.estimate_paf(peaks, self.heatMat, self.pafMat)
logger.debug('estimate time=%.5f' % (time.time() - t))
return humans
# TF-Pose 객체 생성
e = TfPoseEstimator(get_graph_path('mobilenet_thin'), target_size=(432, 368))
count = 0
cap = cv2.VideoCapture(0)
categories = ['pedestrian', 'sitter', 'taxier']
while True:
ret, image = cap.read()
people_bbox = dict()
if not ret:
break
# Skeleton 그리기 with Background
humans = e.inference(image, upsample_size=4.0)
image, detected_humans = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
for coors in detected_humans:
# 가로 200pixel 세로 300pixel 이하 제외
# if w < 100 and h < 200: continue
cv2.rectangle(image, (coors[0], coors[1]), (coors[2], coors[3]), (0, 0, 255), 3)
# image resizing and reshape (Preprocessing)
input_image = image[int(coors[1]):int(coors[3]), int(coors[0]):int(coors[2])]
img = Image.fromarray(input_image, "RGB")
# img = Image.open(input_image)
img = img.convert("RGB")
img = img.resize((256, 256))
data = np.asarray(img)
X = np.array(data)
X = X.astype("float") / 256
X = X.reshape(-1, 256, 256, 3)
# model prediction
result = [np.argmax(value) for value in model.predict(X)]
result = categories[result[0]]
people_bbox[(coors[0], coors[1], coors[2], coors[3])] = result
# esc 누르면 종료
if cv2.waitKey(10) == 27:
break
print('%d.jpg done' % count)
count += 1
# Class name print
for coors in people_bbox:
gesture = people_bbox[(coors[0], coors[1], coors[2], coors[3])]
cv2.putText(image, gesture, (coors[0], coors[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2, cv2.LINE_AA)
cv2.imshow("Gesture_Recognition", image)
# 윈도우 종료
cap.release()
cv2.destroyWindow('Gesture_Recognition')
|
[
"ktkwak5218@naver.com"
] |
ktkwak5218@naver.com
|
3c3a6e486dac9e822f6acc546b647a8fb43c2498
|
324b597f233f873138cff1aa51eaae17a8fa87c7
|
/drift_plot.py
|
b14ebdf06636093a4ec42013980e16b6e3cf6cea
|
[] |
no_license
|
mengxin513/fibre_stage_characterisation
|
b69679b7455dce4451874df3a5d00b9a4adba898
|
db39f977e12ecd4a418524952902c9098fe8a841
|
refs/heads/master
| 2021-05-07T13:50:58.840309
| 2018-07-04T09:55:53
| 2018-07-04T09:55:53
| 109,703,898
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 649
|
py
|
#import libraries
import h5py
import numpy as np
import matplotlib.pyplot as plt
from plot_tools import plot_tools
print "Loading data..." #indication of the programme running
df = h5py.File("drift_41217.hdf5", "r") #reads data from file
group = df["test_data000"] #loads a data group
n = len(group) #finds the number of elements in the group
data = np.zeros([3, n]) #creates an empty array of zeros
for i in range(n):
dset = group["data%05d" % i] #loads each dataset from the group
data[:, i] = np.mean(dset, axis = 1) #finds the mean of each column in the dataset
plot_tools(data) #plots the graph
plt.show() #shows the plot on screen
|
[
"qm237@bath.ac.uk"
] |
qm237@bath.ac.uk
|
fb8a38e1a141d682a285b65b537da8abe6aa5241
|
957373475ea85d56c10176e27ae4833f3e29e663
|
/warehouse_app/report/__init__.py
|
c2d4b2fec92a90be4b79727172ad7ba77fb028e5
|
[] |
no_license
|
Comunitea/app
|
907ce5af16d2a4d0b52ee963e59131e1c8b18d14
|
ec86428282b68990e0205dabbd556ca0fc74e702
|
refs/heads/master
| 2021-04-03T10:22:26.770464
| 2018-03-15T23:14:51
| 2018-03-15T23:14:51
| 124,746,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,064
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2014 Pexego Sistemas Informáticos All Rights Reserved
# $Carlos Lombardía Rodríguez$ <carlos@comunitea.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product_tag_parser
import location_tag_parser
|
[
"javierjcf@gmail.com"
] |
javierjcf@gmail.com
|
f8705e39f3508909bbcf4ebc7145cbc5ebfaa55f
|
89474b15817bb144542dfa866770fa98cc7ead90
|
/tests/bdd/conftest.py
|
3894fd13e5f9a6557859d40ea465840de0df3513
|
[] |
no_license
|
nasioDSW/webui
|
dc6bd1a9188d9e7aeeca0f3c844df83950ce5298
|
497d7ce536ba3ead99c51fa0c6a04f636fabcaf6
|
refs/heads/master
| 2023-03-28T16:53:36.912571
| 2021-04-02T19:26:44
| 2021-04-02T19:26:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,398
|
py
|
# !/usr/bin/env python3
import pytest
import os
import time
from configparser import ConfigParser
from platform import system
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import NoSuchElementException
def browser():
profile = webdriver.FirefoxProfile()
profile.set_preference("browser.download.folderList", 2)
profile.set_preference("browser.download.dir", "/tmp")
profile.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/json")
profile.set_preference("browser.download.manager.showWhenStarting", False)
profile.set_preference("browser.link.open_newwindow", 3)
binary = '/usr/bin/firefox' if system() == "Linux" else '/usr/local/bin/firefox'
firefox_capabilities = DesiredCapabilities.FIREFOX
firefox_capabilities['marionette'] = True
firefox_capabilities['firefox_profile'] = profile.encoded
firefox_capabilities['binary'] = binary
web_driver = webdriver.Firefox(capabilities=firefox_capabilities)
web_driver.implicitly_wait(2)
return web_driver
web_driver = browser()
@pytest.fixture
def driver():
return web_driver
if os.path.exists('config.cfg'):
configs = ConfigParser()
configs.read('config.cfg')
ip = configs['NAS_CONFIG']['ip']
password = configs['NAS_CONFIG']['password']
@pytest.fixture
def ui_url():
global url
url = f"http://{ip}"
return url
@pytest.fixture
def root_password():
return password
@pytest.mark.hookwrapper
def pytest_runtest_makereport(item):
"""
Extends the PyTest Plugin to take and embed screenshot whenever test fails.
"""
outcome = yield
report = outcome.get_result()
if report.when == 'call' or report.when == "setup":
xfail = hasattr(report, 'wasxfail')
if (report.skipped and xfail) or (report.failed and not xfail):
screenshot_name = f'screenshot/{report.nodeid.replace("::", "_")}.png'
# look if there is a Error window
if element_exist('//h1[contains(.,"Error")]'):
web_driver.find_element_by_xpath('//div[@ix-auto="button__backtrace-toggle"]').click()
time.sleep(2)
traceback_name = f'screenshot/{report.nodeid.replace("::", "_")}.txt'
save_traceback(traceback_name)
save_screenshot(screenshot_name)
# Press CLOSE if exist
if element_exist('//button[@ix-auto="button__CLOSE"]'):
web_driver.find_element_by_xpath('//button[@ix-auto="button__CLOSE"]').click()
else:
if element_exist('//button[@ix-auto="button__I AGREE"]'):
web_driver.find_element_by_xpath('//button[@ix-auto="button__I AGREE"]').click()
# if test that use disable failover make sure to enable failover back.
if 'T0905' in screenshot_name or 'T0919' in screenshot_name or 'T0920' in screenshot_name or 'T0922' in screenshot_name:
if element_exist('//mat-icon[@svgicon="ha_disabled"]'):
enable_failover()
def save_screenshot(name):
web_driver.save_screenshot(name)
def save_traceback(name):
traceback_file = open(name, 'w')
traceback_file.writelines(web_driver.find_element_by_xpath('//textarea[@id="err-bt-text"]').text)
traceback_file.close()
def element_exist(xpath):
try:
web_driver.find_element_by_xpath(xpath)
return True
except NoSuchElementException:
return False
def wait_on_element(wait, loop, xpath):
for _ in range(loop):
time.sleep(wait)
if element_exist(xpath):
return True
else:
return False
def enable_failover():
driver.find_element_by_xpath('//mat-list-item[@ix-auto="option__Dashboard"]').click()
wait_on_element(driver, 0.5, 7, '//mat-list-item[@ix-auto="option__System Settings"]')
driver.find_element_by_xpath('//mat-list-item[@ix-auto="option__System Settings"]').click()
wait_on_element(driver, 0.5, 7, '//mat-list-item[@ix-auto="option__Misc"]')
driver.find_element_by_xpath('//mat-list-item[@ix-auto="option__Misc"]').click()
assert wait_on_element(driver, 0.5, 7, '//h1[contains(.,"Miscellaneous")]')
assert wait_on_element(driver, 0.5, 7, '//li[contains(.,"Failover")]')
driver.find_element_by_xpath('//li[contains(.,"Failover")]').click()
assert wait_on_element(driver, 0.5, 7, '//h1[contains(.,"Failover")]')
element = web_driver.find_element_by_xpath('//mat-checkbox[@ix-auto="checkbox__Disable Failover"]')
class_attribute = element.get_attribute('class')
if 'mat-checkbox-checked' in class_attribute:
web_driver.find_element_by_xpath('//mat-checkbox[@ix-auto="checkbox__Disable Failover"]').click()
wait_on_element(0.5, 5, '//button[@ix-auto="button__SAVE"]')
web_driver.find_element_by_xpath('//button[@ix-auto="button__SAVE"]').click()
wait_on_element(0.5, 4, '//h1[contains(.,"Settings saved")]')
if element_exist('//button[@ix-auto="button__CLOSE"]'):
web_driver.find_element_by_xpath('//button[@ix-auto="button__CLOSE"]').click()
time.sleep(1)
web_driver.find_element_by_xpath('//mat-list-item[@ix-auto="option__Dashboard"]').click()
# wait_on_element(1, 90, '//mat-icon[@svgicon="ha_enabled"]')
|
[
"ericturgeon.bsd@gmail.com"
] |
ericturgeon.bsd@gmail.com
|
21da28b70b562a80106595216ab69a3a2debacc8
|
fefa4f10774a82f7960f8e3e750e98cae434c853
|
/functional_tests/tests.py
|
9ec27cadaf9f1780b47bc5cd996e840befb9fd40
|
[] |
no_license
|
youjeng/superlists
|
4b222d11864efc2e453fd63068ad5c7a370de193
|
f970de4823e2d739ff0eab74368e9aea01d877a5
|
refs/heads/main
| 2023-08-21T11:21:05.340083
| 2021-09-29T11:03:04
| 2021-09-29T11:03:04
| 372,086,249
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,502
|
py
|
from django.test import LiveServerTestCase
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.firefox.webdriver import WebDriver
import unittest
import time
MAX_WAIT = 10
class NewVisitorTest(LiveServerTestCase):
def setUp(self):
self.browser = webdriver.Firefox()
def wait_for_row_in_list_table(self, row_text):
start_time = time.time()
while True:
try:
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertIn(row_text, [row.text for row in rows])
return
except(AssertionError, WebDriverException) as e:
if time.time() - start_time > MAX_WAIT:
raise e
time.sleep(0.5)
def tearDown(self):
self.browser.quit()
def test_can_start_a_list_and_retrieve_it_later(self):
# Edith heard about a sweet new online to-do app.
# She goes to check out it's home page
self.browser.get(self.live_server_url)
# She notices the page title and header mention To-Do lists
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do', header_text)
# She is invited to enter a todo item straight away
input_box = self.browser.find_element_by_id('id_new_item')
self.assertEqual(
input_box.get_attribute('placeholder'),
'Enter a to-do item'
)
# She types "Fill out paper work for ODNR"
input_box.send_keys('Fill out paper work for ODNR')
# When she hits enter, the page updates, and not the page lists
# "1: Fill out paper work for ODNR" as an item in a to-do list
input_box.send_keys(Keys.ENTER)
time.sleep(1)
self.wait_for_row_in_list_table('1: Fill out paper work for ODNR')
# There is still a text box inviting her to add another item.
# She enters "order parts"
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('order parts')
inputbox.send_keys(Keys.ENTER)
# The page updates again, and shows both items on her list
self.wait_for_row_in_list_table('1: Fill out paper work for ODNR')
self.wait_for_row_in_list_table('2: order parts')
# Satisifed, she goes back to sleep
def test_multiple_users_can_start_lists_at_different_urls(self):
# Edith starts a new to-do list
self.browser.get(self.live_server_url)
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Buy peacock feathers')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy peacock feathers')
# She notices that her list has a unique URL
edith_list_url = self.browser.current_url
self.assertRegex(edith_list_url, '/lists/.+')
# Now a new user, Francis, comes along to the site
## We use a new browser session to make sure that no information
## of Edith's is coming through from cookies
self.browser.quit()
self.browser.webdriver.Firefox()
# Francis visits the home page, there is no sign of Ediths lists
self.browser.get(self.live_server_url)
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertNotIn('order parts', page_text)
# Francis starts a new list by entering a new item. He is less interesting than Edith
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Buy milk')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy milk')
# Francis gets his own unique URL
fancis_list_url = self.browser.current_url
assertRegex(francis_list_url, '/lists/+')
# Edith wonders if the site will remember her list
# She see's that the site has generated a unique URL for her -- there is
# some explanatory text to that effect
self.fail('Finish the test!')
# She visits that URL - her to-do list is still there
# Satisfied, she goe s back to sleep
|
[
"mcheich@gmail.com"
] |
mcheich@gmail.com
|
979e69700c1e63600045be2addfb894eb2fcccfc
|
35ae63b26cd3f9b80aa8b3edeb598fbd143c1e17
|
/utils/telegram_bot/__init__.py
|
902b066386f1085f64fd6175553c07ad6276130f
|
[] |
no_license
|
trevenue44/whatsapp-telegram
|
e4f2fa7adfa1bfb5c9ce56cb16b87eeb7e2e9cd2
|
742d96af955145da0fe62988a59c3da64e95fb91
|
refs/heads/master
| 2023-06-17T17:27:11.358814
| 2021-07-17T14:36:52
| 2021-07-17T14:36:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23
|
py
|
from .main_bot import *
|
[
"trevepaul2@gmail.com"
] |
trevepaul2@gmail.com
|
13a7ea0b99fb7e6c47e3d62208c6fe6c62cca099
|
5387aba1e74f7c177ad76e50dffea1c76d545660
|
/cah.py
|
a101b6ce65b4ec785d817719adfecef0c7eb7bb0
|
[] |
no_license
|
wluberti/cardsAgainstHumanity
|
1aafbb8449663e24c28e23b28782bea40a363974
|
900aab63ce01ab3d24d162075e3421a67f3b1df3
|
refs/heads/master
| 2021-04-13T20:37:44.367802
| 2020-03-22T13:36:45
| 2020-03-22T13:36:45
| 249,186,499
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,783
|
py
|
#! /usr/bin/env python3
"""
See the README.md for details!
"""
from os import listdir
from os.path import join, isdir
from random import sample, choice
DATA_DIRECTORY = './data/'
DEFAULT_LANGUAGE = 'NL'
NUMBER_OF_ANSWERS = 8
class CardsAgainstHumanity(object):
def __init__(self):
"""Print a nice welcome message"""
print("Welcome to Card Against Humanity")
def getLanguages(self):
"""Looks in the DATA_DIRECTORY to look for available languages"""
try:
dirs = [directory for directory in listdir(DATA_DIRECTORY) if directory.isupper() and isdir(join(DATA_DIRECTORY, directory))]
self.availableLanguages = dirs
return dirs
except OSError:
exit("Could not read from 'data' directory!")
def setLanguage(self, language):
"""Sets the language used for the questions and answers"""
if language == '':
language = 'EN'
if language.upper() in self.availableLanguages:
self.language = language.upper()
else:
exit('This language is not available')
def loadFiles(self):
"""Loads the questions and answers files for the set language"""
try:
with open (join(DATA_DIRECTORY + self.language, "questions.txt")) as questionfile:
self.questions = questionfile.readlines()
with open (join(DATA_DIRECTORY + self.language, "answers.txt")) as answersfile:
self.answers = answersfile.readlines()
except OSError:
exit("Could not read the question and answer files for this language:")
def getQuestion(self):
"""Get a ramdom question"""
question = choice(self.questions)
self.numberOfAnswersNeeded = question.count('_')
return question.replace('_', '<.....>')
def getAnswers(self, amount = NUMBER_OF_ANSWERS):
"""Gets multile answers (as defined by NUMBER_OF_ANSWERS. Note: If a question holds
multiple answers the amount of answers will multiply accordingly."""
if self.numberOfAnswersNeeded > 1:
amount *= self.numberOfAnswersNeeded
return sample(self.answers, amount)
if __name__ == "__main__":
game = CardsAgainstHumanity()
game.getLanguages()
if DEFAULT_LANGUAGE == '':
language = input("Default language not set. In what language would you like to play? (Available: {},\nNo input will default to: EN) ".format(game.availableLanguages))
game.setLanguage(language)
else:
game.setLanguage(DEFAULT_LANGUAGE)
game.loadFiles()
print("=" * 80, end='\n\n')
print(game.getQuestion())
for num, answer in enumerate(game.getAnswers()):
print(num +1, ") ", answer.strip(), sep='')
|
[
"wouter@zenith.freedom.nl"
] |
wouter@zenith.freedom.nl
|
f43b7d1a8e62d5594e3f4f28fc6255696831d704
|
69e18da87bba00ce9cbb482c84442c4e36799a1e
|
/gen-smtp.py
|
92b40559966bc7bd9082009ef1b5127e5e964639
|
[] |
no_license
|
hosting90/lets-encrypt
|
3878ac2325247287da00e509d5fffc2098516beb
|
eabe7363fc56114d9abeaed167750affe8dd767e
|
refs/heads/master
| 2020-05-04T20:40:13.041738
| 2019-09-05T11:21:38
| 2019-09-05T11:21:38
| 179,446,937
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,632
|
py
|
#!/usr/bin/python
import MySQLdb, os, sys
try:
conn = MySQLdb.connect(host = "galera-hosting-db.hosting90.cz",
user = "hosting_antispam",
passwd = "TmnJUrd5bT9WzCSN",
db = "hosting_smtp")
except MySQLdb.Error, e:
print 'MySQL Error'
sys.exit(1)
#try:
# blacklist = open('antispam/blacklist','w')
# cursor = conn.cursor()
# cursor.execute('SELECT from_local_part,from_domain FROM email_blacklist')
# row = cursor.fetchone()
# while row:
# blacklist.write(row[0]+'@'+row[1]+'\n')
# row = cursor.fetchone()
# cursor.close()
# blacklist.close()
#except:
# print 'Blacklist error'
# sys.exit(1)
try:
php_limits = open('smtp/php_limits','w')
cursor = conn.cursor()
cursor.execute('SELECT DISTINCT CONCAT(mainftp, ":", hourly_limit) FROM `php_users` ORDER BY mainftp')
row = cursor.fetchone()
while row:
php_limits.write(row[0]+'\n')
row = cursor.fetchone()
cursor.close()
php_limits.close()
except:
print 'Php limits error'
sys.exit(1)
try:
php_limits = open('smtp/php_daily_limits','w')
cursor = conn.cursor()
cursor.execute('SELECT DISTINCT CONCAT(mainftp, ":", daily_limit) FROM `php_users` ORDER BY mainftp')
row = cursor.fetchone()
while row:
php_limits.write(row[0]+'\n')
row = cursor.fetchone()
cursor.close()
php_limits.close()
except:
print 'Php daily limits error'
sys.exit(1)
try:
php_alternatives = open('smtp/php_alternatives','w')
cursor = conn.cursor()
cursor.execute('SELECT DISTINCT CONCAT(ftp, ":", mainftp) AS item FROM `php_users` WHERE ftp IS NOT NULL AND mainftp IS NOT NULL UNION SELECT DISTINCT CONCAT(jmeno_domeny, ":", mainftp) AS item FROM `php_users` WHERE jmeno_domeny IS NOT NULL AND mainftp IS NOT NULL ORDER BY item')
# cursor.execute('SELECT DISTINCT CONCAT(ftp, ":", mainftp) FROM `php_users` WHERE ftp IS NOT NULL AND mainftp IS NOT NULL ORDER BY ftp')
row = cursor.fetchone()
while row:
php_alternatives.write(row[0]+'\n')
row = cursor.fetchone()
cursor.close()
php_alternatives.close()
except:
print 'PHP alternatives error'
sys.exit(1)
try:
php_domains = open('smtp/php_domains','w')
cursor = conn.cursor()
cursor.execute('SELECT DISTINCT CONCAT(mainftp, ":", jmeno_domeny) FROM `php_users` ORDER BY mainftp')
row = cursor.fetchone()
while row:
php_domains.write(row[0]+'\n')
row = cursor.fetchone()
cursor.close()
php_domains.close()
except:
print 'Php domains error'
sys.exit(1)
try:
smtp_limits = open('smtp/smtp_limits','w')
cursor = conn.cursor()
cursor.execute('SELECT CONCAT(jmeno, "@", domena, ":", max_email_daily) FROM `v_smtp` WHERE block_outgoing_mail = 0 ORDER BY domena, jmeno')
row = cursor.fetchone()
while row:
if row[0] != None:
smtp_limits.write(row[0]+'\n')
row = cursor.fetchone()
cursor.close()
smtp_limits.write('*:0\n')
smtp_limits.close()
except:
print 'smtp limits error'
sys.exit(1)
#try:
# smtp_password = open('smtp/smtp_password','w')
# cursor = conn.cursor()
# cursor.execute('SELECT CONCAT(jmeno, "@", domena, ":", heslo) FROM `v_smtp` WHERE block_outgoing_mail = 0 ORDER BY domena, jmeno')
# row = cursor.fetchone()
# while row:
# if row[0] != None:
# smtp_password.write(row[0]+'\n')
# row = cursor.fetchone()
# cursor.close()
# smtp_password.close()
#except:
# print 'smtp password'
# sys.exit(1)
try:
smtp_password = open('smtp/smtp_password_sha1','w')
cursor = conn.cursor()
cursor.execute('SELECT concat(jmeno, "@", domena, ":",hex(password_ssha)) as mystring FROM `v_smtp` WHERE block_outgoing_mail = 0 AND jmeno IS NOT NULL AND domena IS NOT NULL AND password_ssha is not null ORDER BY domena, jmeno;')
row = cursor.fetchone()
while row:
if row[0] != None:
smtp_password.write(row[0]+'\n')
row = cursor.fetchone()
cursor.close()
smtp_password.close()
except:
print 'smtp password sha1'
sys.exit(1)
try:
smtp_bd = open('smtp/smtp_blacklist_domain','w')
cursor = conn.cursor()
cursor.execute('SELECT `domena` FROM `v_smtp` WHERE `block_outgoing_mail_domain` = 1 GROUP BY `domena`')
row = cursor.fetchone()
while row:
if row[0] != None:
smtp_bd.write(row[0]+'\n')
row = cursor.fetchone()
cursor.close()
smtp_bd.close()
except:
print 'smtp blacklist_domain'
sys.exit(1)
try:
smtp_top_domains = open('smtp/smtp_top_domains','w')
cursor = conn.cursor()
cursor.execute('SELECT CONCAT(jmeno, "@", domena, ":", domena_top) FROM `v_smtp` ORDER BY domena, jmeno')
row = cursor.fetchone()
while row:
if row[0] != None:
smtp_top_domains.write(row[0]+'\n')
row = cursor.fetchone()
cursor.close()
smtp_top_domains.close()
except:
print 'smtp top_domains error'
sys.exit(1)
try:
smtp_top_domains_limits = open('smtp/smtp_top_domains_limits','w')
cursor = conn.cursor()
cursor.execute('SELECT DISTINCT CONCAT(domena_top,":",domain_limit) FROM `v_smtp` ORDER BY domena_top')
row = cursor.fetchone()
while row:
if row[0] != None:
smtp_top_domains_limits.write(row[0]+'\n')
row = cursor.fetchone()
cursor.close()
smtp_top_domains_limits.close()
except:
print 'smtp top domains limits error'
sys.exit(1)
try:
domain_keys_dir = 'smtp/domain_keys'
cursor = conn.cursor()
cursor.execute('SELECT jmeno_domeny, dkim_key FROM `hosting`.`domeny` WHERE `dkim_key` IS NOT NULL')
dkim_domainlist = []
while True:
row = cursor.fetchone()
if row == None:
break #This is the end of sql response
dkim_domain = row[0]
dkim_key = row[1]
if dkim_key:
dkim_domainlist.append(dkim_domain + '.key')
#Lets write keys to their files.
try:
key_file = open(domain_keys_dir + '/' + dkim_domain + '.key', 'w')
key_file.write(dkim_key)
finally:
key_file.close()
cursor.close()
#Now, let's delete old keyfiles, that are no longer in database
local_key_list = os.listdir(domain_keys_dir)
files_to_delete = set(local_key_list) - set(dkim_domainlist)
for file in files_to_delete:
os.remove(domain_keys_dir + '/' + file)
except:
print 'DKIM keys error'
sys.exit(1)
os.chdir('smtp')
#This could also be done while we are creating/removing keyfiles before. But this may be bit faster and efficient. But who knows
os.system('git add domain_keys/*') #Add new keys to git repo
os.system('git add -u domain_keys/*') #Remove deleted keys from git repo
#os.execlp('git', 'git', 'commit', 'php_domains', 'php_limits', 'php_daily_limits', 'php_alternatives', 'smtp_limits', 'smtp_password', 'smtp_blacklist_domain','smtp_top_domains','smtp_top_domains_limits', '-m', 'Autocommit')
os.system('git commit php_domains php_limits php_daily_limits php_alternatives smtp_limits smtp_password_sha1 smtp_blacklist_domain smtp_top_domains smtp_top_domains_limits domain_keys -m Autocommit >/dev/null')
|
[
"matej@divecky.com"
] |
matej@divecky.com
|
31d7a56d98ac554be81d6c85a7d9a9d0b0e1a2fd
|
9ceb332cff5a6caa8c1e3fc24b865737d07c05a5
|
/fpn/retina_fpn.py
|
f2566fd4816a237310d78f3b561844e573607c58
|
[
"MIT"
] |
permissive
|
blyucs/MaskRCNN
|
95a7a8817fe08155c076ff5227dc009589ae4d1a
|
493730f1987886913ce059e934e9a720ee85bb20
|
refs/heads/master
| 2020-11-30T14:26:13.543724
| 2019-12-30T11:48:45
| 2019-12-30T11:48:45
| 230,416,578
| 0
| 0
|
NOASSERTION
| 2019-12-27T09:45:40
| 2019-12-27T09:45:39
| null |
UTF-8
|
Python
| false
| false
| 4,663
|
py
|
'''RetinaFPN in PyTorch.
See the paper "Focal Loss for Dense Object Detection" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(
planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False), nn.BatchNorm2d(self.expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class RetinaFPN(nn.Module):
def __init__(self, block, num_blocks):
super(RetinaFPN, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
# Bottom-up layers
self.layer2 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer3 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer4 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer5 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.conv6 = nn.Conv2d(2048, 256, kernel_size=3, stride=2, padding=1)
self.conv7 = nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1)
# Top layer
self.toplayer = nn.Conv2d(
2048, 256, kernel_size=1, stride=1, padding=0) # Reduce channels
# Smooth layers
self.smooth1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.smooth2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
# Lateral layers
self.latlayer1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def _upsample_add(self, x, y):
'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.upsample(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''
_, _, H, W = y.size()
return F.upsample(x, size=(H, W), mode='bilinear') + y
def forward(self, x):
# Bottom-up
c1 = F.relu(self.bn1(self.conv1(x)))
c1 = F.max_pool2d(c1, kernel_size=3, stride=2, padding=1)
c2 = self.layer2(c1)
c3 = self.layer3(c2)
c4 = self.layer4(c3)
c5 = self.layer5(c4)
p6 = self.conv6(c5)
p7 = self.conv7(F.relu(p6))
# Top-down
p5 = self.toplayer(c5)
p4 = self._upsample_add(p5, self.latlayer1(c4))
p3 = self._upsample_add(p4, self.latlayer2(c3))
# Smooth
p4 = self.smooth1(p4)
p3 = self.smooth2(p3)
return p3, p4, p5, p6, p7
def RetinaFPN101():
# return RetinaFPN(Bottleneck, [2,4,23,3])
return RetinaFPN(Bottleneck, [2, 2, 2, 2])
def test():
net = RetinaFPN101()
fms = net(Variable(torch.randn(1, 3, 600, 900)))
for fm in fms:
print(fm.size())
test()
|
[
"18588220928@163.com"
] |
18588220928@163.com
|
7aaa1de0f8b242659f974aeedebdb47987b29d49
|
87257cdeac8f62b00c35ef142bcede4b884805a2
|
/app0.2_remote-objects/stockmarket/stockmarket.py
|
7ddadec74a2e59dbf97d504d1e3355d9f89bdebb
|
[] |
no_license
|
mglucas/SD
|
45f41d108fac2c2e0e3628610cf6a63a938cf846
|
1b5ec23209545f1eda4116b1e6435af675f1c4b5
|
refs/heads/main
| 2023-07-17T23:54:19.003156
| 2021-09-01T19:07:11
| 2021-09-01T19:07:11
| 380,574,711
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,193
|
py
|
# stockmarket.py
from __future__ import print_function
import random
import time
import Pyro4
@Pyro4.expose
class StockMarket(object):
def __init__(self, marketname, symbols):
self._name = marketname
self._symbols = symbols
def quotes(self):
while True:
symbol = random.choice(self.symbols)
yield symbol, round(random.uniform(5, 150), 2)
time.sleep(random.random()/2.0)
@property
def name(self):
return self._name
@property
def symbols(self):
return self._symbols
if __name__ == "__main__":
nasdaq = StockMarket("NASDAQ", ["AAPL", "CSCO", "MSFT", "GOOG"])
newyork = StockMarket("NYSE", ["IBM", "HPQ", "BP"])
# for example purposes we will access the daemon and name server ourselves and not use serveSimple
with Pyro4.Daemon() as daemon:
nasdaq_uri = daemon.register(nasdaq)
newyork_uri = daemon.register(newyork)
with Pyro4.locateNS() as ns:
ns.register("example.stockmarket.nasdaq", nasdaq_uri)
ns.register("example.stockmarket.newyork", newyork_uri)
print("Stockmarkets available.")
daemon.requestLoop()
|
[
"luc0797@gmail.com"
] |
luc0797@gmail.com
|
719850b932da4953da84ee2edd197cb74e461187
|
9eb497bf08553121bb96860485d230d17eb47af0
|
/Seaborn.docset/Contents/Resources/Documents/generated/seaborn-distplot-1.py
|
dea7cb9c41139ac7396e4b02fc850fcbf669c633
|
[] |
no_license
|
peixian/seaborn-dash
|
34eba1c332cbfd3624b7e0e51b196c5c3d15b59a
|
100abc4dd503348640285fdaaaffcf7099a40d25
|
refs/heads/master
| 2020-09-22T12:48:10.649151
| 2016-08-30T16:28:03
| 2016-08-30T16:28:03
| 66,892,301
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
import seaborn as sns, numpy as np
sns.set(rc={"figure.figsize": (8, 4)}); np.random.seed(0)
x = np.random.randn(100)
ax = sns.distplot(x)
|
[
"peixian@users.noreply.github.com"
] |
peixian@users.noreply.github.com
|
d77ed54b64dd54d79d02e8c5825fe260bc99827b
|
d867d08f51b959abc48aa53c47087b78263377c0
|
/predict.py
|
8b86bc112b0ee7c6f60c503639543dc16e296f24
|
[] |
no_license
|
imclab/boardeye
|
7f143bb30da2961fef2c8f44dee87dee56c6130b
|
c40f40c7e4d0ccea74e77c03ebe36574177c3f9b
|
refs/heads/master
| 2021-01-15T21:48:42.339659
| 2016-04-28T19:25:25
| 2016-04-28T19:25:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 529
|
py
|
#!/usr/bin/env python
import sys
import numpy as np
from keras.models import model_from_json
MODEL_FILE = './data/model.json'
WEIGHT_FILE = './data/weight.hdfs'
#
# Preparing data
#
x = np.array([sys.argv[1].split(',')], dtype=np.float32)
#
# Load model
#
model = model_from_json(open(MODEL_FILE).read())
model.load_weights(WEIGHT_FILE)
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
#
# Prediction
#
print model.predict_classes(x, batch_size=32, verbose=1)
|
[
"ray.sakai@logical-fabrics.com"
] |
ray.sakai@logical-fabrics.com
|
cb6d594fb6a6e9b54d5b57bc4bb708f689fb6082
|
ad5e071f3182e36c2683a57c5d6c13a75c281c2c
|
/conversion.py
|
fac90699857ac218017e4f9f1fd55cd3dfe37d22
|
[] |
no_license
|
JTam3/johnny-conversions
|
3c4b5f6401b26e8285ff7f762ea9dc191d9fc2a0
|
e0357e376665d0c795daa10e2da63895ca5f1774
|
refs/heads/master
| 2021-01-11T02:55:03.085578
| 2016-10-14T15:00:10
| 2016-10-14T15:00:10
| 70,908,363
| 0
| 1
| null | 2016-10-14T15:00:11
| 2016-10-14T12:30:14
|
Python
|
UTF-8
|
Python
| false
| false
| 79
|
py
|
# My Conversion Tools
def dollars2cents(dollars):
print("Hello!", dollars)
|
[
"johnny@nih.gov"
] |
johnny@nih.gov
|
ab40f36a86bd19bcdcc25767006ce554c82c95cc
|
aa4e049abfafd0eaa71a8d1286b025b978083f96
|
/0x05-python-exceptions/0-safe_print_list.py
|
f904959fe26a74b2f4957d484fee91dbf90795ba
|
[] |
no_license
|
PabloYepes27/holbertonschool-higher_level_programming
|
fdc517c2535b6affab2a8eee4e98252c78314be5
|
23be0d96c5cc11c8801e6a1fd758a87af57be42f
|
refs/heads/master
| 2022-12-16T13:39:15.268636
| 2020-09-25T00:03:38
| 2020-09-25T00:03:38
| 259,388,690
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
#!/usr/bin/python3
def safe_print_list(my_list=[], x=0):
j = 0
for i in range(0, x):
try:
print(my_list[i], end="")
j += 1
except IndexError:
pass
print()
return j
|
[
"juanpabloyepest@gmail.com"
] |
juanpabloyepest@gmail.com
|
b22bce9bc39515a6d5737f34cb246c8f7773fd4c
|
d1c15641ece2cdbfe73e4e287d199a8edcb60647
|
/pytest/exercises.py
|
600589b8c35acad3067fe26ec255292cc1274306
|
[] |
no_license
|
caichinger/pyconsk18_testing
|
24c0eb438290aad53e2ad1fc048ec8990243e075
|
da8da6ec75e0f8fd77acbafc016d554c88266d7f
|
refs/heads/master
| 2021-04-03T09:02:12.448826
| 2018-03-12T19:52:48
| 2018-03-12T19:52:48
| 124,597,060
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 998
|
py
|
"""
Complete/fix these tests.
"""
import pytest
import numpy as np
import pandas as pd
def test_comparing_list():
assert [1, 2, 3] == [1, 3]
def test_comparing_dict():
assert {'a': 1, 'b': 2} == {'c': 2, 'a': 1}
def test_comparing_numbers():
assert (0.1 + 0.1 + 0.1) / 3 == 0.1
def test_comparing_numpy_arrays():
assert np.array([1, 2, 3.1]) == np.array([1, 2, 3])
def test_comparing_dataframes():
assert pd.DataFrame({'a': [1, 2], 'b': [3, 4]}) == pd.DataFrame({'b': [3., 4.], 'a': [1, 2]})
def some_exception():
raise ZeroDivisionError('O.o')
def test_some_exception():
# check that ZeroDivisionError is raised with 'O.o'
pass
def multiply(a, b):
"""
>>> multiply(1, 2)
2
>>> multiply([3], 3)
[3, 3, 3]
"""
return a * b
def test_multiply():
# run doctest using pytest
# make parametrized test case based on doctests
pass
if __name__ == '__main__':
pytest.main([__file__])
|
[
"claus.aichinger@mailbox.org"
] |
claus.aichinger@mailbox.org
|
25e7c21509763f2ac001db2554122f4e02d77f3d
|
16cbf67734dba7f91be76e00d038f5f0c07823a9
|
/lesospider/pipelines.py
|
af181889bd6e1c11dae11a4f3432237791d90e56
|
[] |
no_license
|
huichen90/lesospider
|
0381b0837b362d28099616cce808b0da70a9e141
|
9363dcf4259f2eddb367a9a9f35221d36ca04cee
|
refs/heads/master
| 2020-03-17T17:08:43.470383
| 2018-10-08T08:47:17
| 2018-10-08T08:47:17
| 133,776,720
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,840
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import datetime
import logging
import pymysql
from langdetect import detect
from scrapy.utils.project import get_project_settings
from lesospider.translate import Translate
from lesospider.videodownload import VdieoDownload
class Mysql(object):
"""存储到数据库中"""
def __init__(self):
settings = get_project_settings()
self.host = settings["DB_HOST"]
self.port = settings["DB_PORT"]
self.user = settings["DB_USER"]
self.pwd = settings["DB_PWD"]
self.name = settings["DB_NAME"]
self.charset = settings["DB_CHARSET"]
self.connect()
def connect(self):
self.conn = pymysql.connect(host=self.host,
port=self.port,
user=self.user,
password=self.pwd,
db=self.name,
charset=self.charset)
self.cursor = self.conn.cursor()
def colose_spider(self, spider):
self.conn.close()
self.cursor.close()
class LesospiderPipeline(Mysql):
def process_item(self, item, spider):
try:
d = VdieoDownload(db=self.conn, cursor=self.cursor)
d.Automatic_download()
except Exception as e:
print(e)
logging.error('下载失败 %s' % e)
return item
class MysqlPipeline(Mysql):
"""存储到数据库中"""
def process_item(self, item, spider):
# 查重处理
self.cursor.execute(
"""select * from videoitems where url = %s""",
item['url'])
# 是否有重复数据
repetition = self.cursor.fetchone()
# 重复
if repetition or (item['site_name'] != 'letv' and item['site_name'] != 'iqiyi'):
print("此条重复抓取,没有存入数据库")
elif int(item['video_time']) > int(item['video_time_long']) or int(item['video_time']) < int(
item['video_time_short']):
print('视频时间不满足要求')
elif int(float(item['start_date'])) <= int(float(item['upload_time'])) <= int(float(item['end_date'])):
item['upload_time'] = self.ts2dts(item['upload_time'])
dt = datetime.datetime.now().strftime("%Y-%m-%d")
if detect(item['title']) != 'zh-cn':
t = Translate(q=item['title']) # 翻译
item['title_cn'], item['language'] = t.translate()
else:
item['title_cn'] = item['title']
item['language'] = '中文'
sql = 'insert into videoitems(title,keywords,spider_time,url,site_name,video_time,' \
'play_count,upload_time,info,video_category,tags,task_id,isdownload,lg,title_cn)' \
' values( "%s","%s","%s","%s", "%s" ,"%s","%s", "%s", "%s","%s","%s","%s",0,"%s","%s")' \
% (item['title'], item['keywords'], dt, item['url'], item['site_name'], item['video_time'],
item["play_count"], item['upload_time'], item['info'],
item['video_category'], item['tags'], item['task_id'], item['language'], item['title_cn'])
# 执行SQL语句
self.cursor.execute(sql)
self.conn.commit()
else:
print('发布日期不符合要求,没有存入数据库')
return item
def ts2dts(self, timeStamp):
'''timestamp translate to datestring'''
import time
timeArray = time.localtime(timeStamp)
datestr = time.strftime("%Y-%m-%d", timeArray)
return datestr
|
[
"huichen90"
] |
huichen90
|
05e2d627d9d53c64b423fc63e8ffef75cd230cfd
|
c62665c9cd3c8d48fd6841af286c290809ac0b74
|
/features/environment.py
|
eb69c899c65bd99d69394ed62a54d384b37d3858
|
[] |
no_license
|
khandepc/sampleseleniumproject
|
17c220e2a90bec9ff9a32d7155e699bed051edf1
|
8b86230946f7b03b45dcc580041902dd26bf9359
|
refs/heads/master
| 2020-05-18T05:04:24.019964
| 2019-04-30T05:00:34
| 2019-04-30T05:00:34
| 184,193,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,091
|
py
|
import generic.seleniumbase as sb
def before_step(context, step):
pass
def after_step(context, step):
pass
#These run before and after every step.
def before_scenario(context, scenario):
sb.launch_app('chrome','https://www.facebook.com/')
actual_title=sb.get_page_details('title')
expected_title='Facebook – log in or sign up'
assert actual_title==expected_title,actual_title+'is not matching with'+expected_title
def after_scenario(context, scenario):
sb.capture_screen_shot(scenario.name)
sb.close_app()
#These run before and after each scenario is run.
def before_feature(context, feature):
pass
def after_feature(context, feature):
pass
#These run before and after each feature file is exercised.
def before_tag(context, tag):
pass
def after_tag(context, tag):
pass
#These run before and after a section tagged with the given name.
# They are invoked for each tag encountered in the order they’re found in the feature file.
# See controlling things with tags.
def before_all(context):
pass
def after_all(context):
pass
|
[
"khandepc@gmail.com"
] |
khandepc@gmail.com
|
939e94062c6e45b96e81639c3bd0987e702fa899
|
24f2696aab87f1632705a7c8b2d3b866e26aa3ee
|
/container_most_water_11.py
|
72b6c6448e71d275a5e8bde4f53685a914c43692
|
[] |
no_license
|
adiggo/leetcode_py
|
44a77a0b029f4d92bd0d8e24cad21ceea52e7794
|
4aa3a3a0da8b911e140446352debb9b567b6d78b
|
refs/heads/master
| 2020-04-06T07:05:21.770518
| 2016-07-01T16:00:40
| 2016-07-01T16:00:40
| 30,397,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 565
|
py
|
class Solution(object):
def maxArea(self, height):
"""
:type height: List[int]
:rtype: int
"""
left, right = 0, len(height) - 1
max_container = 0
while left <= right and right < len(height):
width = min(height[left], height[right])
max_container = max(width * (right - left), max_container)
# move the min of left and right
if height[left] < height[right]:
left += 1
else:
right -= 1
return max_container
|
[
"adiggo@gmail.com"
] |
adiggo@gmail.com
|
9d55a508e3a6001695fd83bf5b5803da3bc9311a
|
27acd9eeb0d2b9b6326cc0477e7dbb84341e265c
|
/test/vraag4/src/yahtzee/126.py
|
e93b5b1f181af0f4fcb6d0417587a05553fef970
|
[] |
no_license
|
VerstraeteBert/algos-ds
|
e0fe35bc3c5b7d8276c07250f56d3719ecc617de
|
d9215f11cdfa1a12a3b19ade3b95fa73848a636c
|
refs/heads/master
| 2021-07-15T13:46:58.790446
| 2021-02-28T23:28:36
| 2021-02-28T23:28:36
| 240,883,220
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 654
|
py
|
def histogram(stenen):
stenen.sort()
bib = {}
for i in stenen:
bib[i] = stenen.count(i)
return bib
def max_gelijk(stenen):
stenen.sort()
bib = {}
waarden = []
for i in stenen:
bib[i] = stenen.count(i)
for i in bib.values():
waarden.append(i)
waarden.sort()
return waarden[-1]
def is_FullHouse(stenen):
stenen.sort()
bib = {}
waarden = []
for i in stenen:
bib[i] = stenen.count(i)
for i in bib.values():
waarden.append(i)
waarden.sort(reverse = True)
if waarden[0] == 3 and waarden[1] == 2:
return True
else:
return False
|
[
"bertverstraete22@gmail.com"
] |
bertverstraete22@gmail.com
|
f128c4eb47e99367d6fba9e5c51bfad341cca121
|
7b4ffa988c12fb4076655722fa04fef98c21c43d
|
/PraatWrapper.py
|
40bf34c019e0d4865788ac20fccaed11e701c4e4
|
[] |
no_license
|
lyndonrey/presidential_pronunciation
|
cbeee725f31bfb2be5aaf508cdf3ba1d50dc4223
|
0fb0c037616297196a46020fdb1bc3d2a6d39ba6
|
refs/heads/master
| 2021-01-17T14:39:23.673154
| 2017-03-06T16:50:11
| 2017-03-06T16:50:11
| 84,096,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,214
|
py
|
#!/usr/bin/env python
from subprocess import call
from multiprocessing import Process as Task, Queue
import time, sys, collections, subprocess, glob
def callPraat(wavIn, dataOut):
""" Calls the praat formant getter script on the wav file param, and prints to file """
f = open(dataOut, "a") # This variable is what I'm piping stdout of the praat call to
return call(["praat", "--run", "formants-master.praat", wavIn], stdout=f)
def removeUndefs(fileIn):
""" Removes '--undefined--' values from output, this helps with stats things """
f = open(fileIn, "r+")
lines = f.readlines()
f.seek(0) # This is necessary to return to the beginning of the file
for line in lines:
if "--undefined--" not in line:
f.write(line)
f.truncate()
f.close()
print ("Done removing undefined measurements")
def getTotalDuration(fileIn):
""" Returns the total duration of each file to enable efficiency calculations """
return subprocess.Popen(["praat", "--run", fileIn], stdout=subprocess.PIPE).communicate()[0]
directory = sys.argv[1] # The input directory must be in quotes (in Bash anyway) or else it wildcards weird
# print (directory)
# Get all the wav files from directory to parse
wavs = glob.glob(directory)
dataFile = sys.argv[2] # Output file for data
f = open(dataFile, "w+") # Create file if it doesn't exist
f.close()
sumOfProcesses = 0 # The summed time total of all processed wavs
# For each wav in the directory specified...
for wav in wavs:
# print ("total Duration:", getTotalDuration(wav))
workers = []
# Specify and start the task
child = Task(target=callPraat, args=(wav, dataFile))
child.start()
workers.append(child)
# These few lines just give a command line counter so it seems more interactive
progress = 0
while any(i.is_alive() for i in workers):
progress += 1
print ("Running time (s):", progress, wav.split("/")[-1], end="\r")
time.sleep(1)
sumOfProcesses += progress
print ("\nCurrent total (s):", sumOfProcesses)
print ("Total analysis time (s):", sumOfProcesses)
print ("Removing undefined measurements")
removeUndefs(dataFile)
|
[
"lrey@uwo.ca"
] |
lrey@uwo.ca
|
a8ea32ee092bd7d2b6681537f10d679a04d2e658
|
d0f8474101f55a181e2cbaa95b9169378b84deb8
|
/app/main/model/oracle/supplier_management.py
|
ccb57a595f6927b5b0a58bc085ac66412868206a
|
[] |
no_license
|
onewoorks/knowage-dataset-api
|
ae60041d297d1f76f89e7a3cc35df361b7dacbef
|
7dbf19a549b2b20b245cd9cea6bd4eaeffcd5e76
|
refs/heads/master
| 2022-07-10T15:38:56.897691
| 2019-12-10T08:45:22
| 2019-12-10T08:45:22
| 198,759,687
| 0
| 0
| null | 2022-06-21T23:06:36
| 2019-07-25T04:52:32
|
Python
|
UTF-8
|
Python
| false
| false
| 5,116
|
py
|
from .. import execute_oracle_query
from datetime import datetime, date
class RETURN_RESPONSE:
def response(self, columns, result):
final = []
for r in result:
data = {}
index = 0
for c in columns:
data[c] = r[index]
index += 1
final.append(data)
return final
class ORACLE_SM_QUERY(RETURN_RESPONSE):
def ytd_mof_registration(self):
year = datetime.now().year
query = "SELECT "
query += "b.appl_type as \"application_type\", "
query += "sum(d.PAYMENT_AMT) as \"total_amount\" "
query += "FROM SM_MOF_CERT a, sm_appl b, py_bill c, PY_PAYMENT d "
query += "WHERE "
query += "a.appl_id = b.appl_id "
query += "AND c.org_profile_id = b.supplier_id "
query += "AND b.appl_no = c.bill_no "
query += "AND c.BILL_ID = d.BILL_ID "
query += "AND to_char(a.eff_date, 'YYYY') = '{}' ".format(year)
query += "AND a.is_bumi_cert = 0 "
query += "AND d.PAYMENT_AMT = '400' "
query += "AND d.RECEIPT_NO IS NOT NULL "
query += "GROUP BY b.appl_type "
query += "ORDER BY 1 "
resp = execute_oracle_query(query)
columns = ("APPLICATION_TYPE", "AMOUNT")
return self.response(columns, resp)
def actual_mof_registration(self, current_year='now', current_month='now'):
year = datetime.now().year if current_year == 'now' else current_year
month = datetime.now().month if current_year == 'now' else current_month
query = "SELECT "
query += "to_char(a.eff_date,'YYYY-MM') as \"year_and_month\", "
query += "b.appl_type as \"application_type\", "
query += "sum(d.PAYMENT_AMT) as \"total_amount\" "
query += "FROM SM_MOF_CERT a, sm_appl b, py_bill c, PY_PAYMENT d "
query += "WHERE "
query += "a.appl_id = b.appl_id "
query += "AND c.org_profile_id = b.supplier_id "
query += "AND b.appl_no = c.bill_no "
query += "AND c.BILL_ID = d.BILL_ID "
query += "AND to_char(a.eff_date, 'YYYY-MM') = '{}-{}' ".format(
year, str(month).zfill(2))
query += "AND a.is_bumi_cert = 0 "
query += "AND d.PAYMENT_AMT = '400' "
query += "AND d.RECEIPT_NO IS NOT NULL "
query += "GROUP BY b.appl_type, to_char(a.eff_date, 'YYYY-MM') "
query += "ORDER BY 1 "
resp = execute_oracle_query(query)
columns = ("YEAR_AND_MONTH", "APPLICATION_TYPE", "AMOUNT")
return self.response(columns, resp)
def ora_actual_supplier_revenue(self,working_days,appl_type):
this_month = date.today().strftime("%Y-%m")
print("perform actual supplier revenue (oracle) => {}".format(appl_type))
query = ""
count = 1
for i in working_days:
query += "SELECT '{}' AS working_date , SUM(x.PAYMENT_AMT) AS total FROM ( ".format(i)
query += "SELECT DISTINCT"
query += " a.mof_account_id,"
query += " b.appl_no,"
query += " b.appl_type,"
query += " a.eff_date,"
query += " a.exp_date,"
query += " d.PAYMENT_DATE,"
query += " d.PAYMENT_AMT "
query += "FROM SM_MOF_CERT a, sm_appl b, py_bill c, PY_PAYMENT d "
query += "WHERE"
query += " a.appl_id = b.appl_id"
query += " AND c.org_profile_id = b.supplier_id"
query += " AND b.appl_no = c.bill_no"
query += " AND c.BILL_ID = d.BILL_ID"
if '-' in i:
day = i.split('-')
day_start = day[0].zfill(2)
day_end = day[1].zfill(2)
query += " AND to_char(a.eff_date, 'YYYY-MM-DD') >= '{}-{}'".format(this_month,day_start)
query += " AND to_char(a.eff_date, 'YYYY-MM-DD') <= '{}-{}'".format(this_month,day_end)
else:
query += " AND to_char(a.eff_date, 'YYYY-MM-DD') = '{}-{}'".format(this_month,i.zfill(2))
query += " AND a.is_bumi_cert = 0"
query += " AND b.appl_type IN ('{}')".format(appl_type)
query += " AND d.PAYMENT_AMT = '400'"
query += " AND d.RECEIPT_NO IS NOT NULL"
query += " ORDER BY 1"
query += ") X GROUP BY 1,'{}' ".format(i)
if count < len(working_days):
query += "UNION ALL "
count += 1
resp = execute_oracle_query(query)
final = []
for result in resp:
content = {
"working date": result[0],
"total": result[1]
}
final.append(content)
return final
|
[
"irwanbibrahim@onewoorks.com"
] |
irwanbibrahim@onewoorks.com
|
7d2536b37851f699500ad519ad72a92ef4880edb
|
fc017beff82d8e5d00f5c18fe85dae9aacafbac7
|
/wosis/__init__.py
|
9720f21e40c75ef21bfd8c8d24066f0390450ff2
|
[] |
no_license
|
ConnectedSystems/wosis
|
a29a82b5828842eb409e058e03caceabecf1417f
|
0418389affce4e2c440f3c96b306dbaad2a23156
|
refs/heads/master
| 2020-04-04T00:39:48.731783
| 2019-09-11T06:00:05
| 2019-09-11T06:00:05
| 155,658,135
| 9
| 0
| null | 2019-01-31T03:20:17
| 2018-11-01T03:40:22
|
HTML
|
UTF-8
|
Python
| false
| false
| 131
|
py
|
from .query import *
from .store import *
from .convert import *
from .analysis import *
from wosis.TopicResult import TopicResult
|
[
"takuyai@gmail.com"
] |
takuyai@gmail.com
|
6136481ab72ac9aa1f3ab3abc6824c864535f5cb
|
3505ea8c883a2986c05ecb2807f6bc2ec13aac03
|
/team_as_a_unit/team_graph.py
|
498b9995c05f2f46d8530f627716192347b31d55
|
[] |
no_license
|
wdunicornpro/GithubTeamRecommendation
|
b89c42823251b9baec9534b2a2389cb9d08905eb
|
94299ca1cb408644b592be739cf13eaa5c1bf5a5
|
refs/heads/master
| 2023-03-14T21:17:35.706355
| 2021-03-30T06:45:19
| 2021-03-30T06:45:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,387
|
py
|
import json
import os
import re
teams = []
team_inds = {}
user_team = {}
with open('team_profiles.json') as tj:
for l in tj.readlines():
line = l.strip().split('\t')
teams.append(line[0])
for user in json.loads(line[0]):
if not user in user_team:
user_team[user] = []
user_team[user].append(len(teams)-1)
team_inds[line[0]] = len(teams)-1
repo_team = {}
team_graph = {}
cnt = 0
with open("contributors.json") as rj:
for l in rj.readlines():
print(cnt)
contributors = [c['login'] for c in json.loads(l)['contributors']]
intersect = list(set(contributors).intersection(user_team))
for i,u in enumerate(intersect):
for team in user_team[u]:
if not team in team_graph:
team_graph[team] = {}
for j in range(i+1,len(intersect)):
for tm in user_team[intersect[j]]:
if tm == team:
continue
if not tm in team_graph[team]:
team_graph[team][tm] = 0
team_graph[team][tm] += 1
cnt += 1
with open('team_graph.json','w') as rp:
for i,team in enumerate(teams):
g = team_graph[i] if i in team_graph else {}
rp.write("%s\t%s\n"%(team,json.dumps(g)))
|
[
"18221943430@163.com"
] |
18221943430@163.com
|
0fb30ce665904a7a14c1f692855392fc8cacaf74
|
3cb5758d30868b8ae0cc4e9a2c0c1d03bba95db3
|
/src/alg/simplex/simplex_method.py
|
676899d91fa968750381bebed3bb5e235dc87ff7
|
[] |
no_license
|
Brahialis0209/gui-qt
|
c306611c33ca0b829ae9c5a92a8a6b89323c6948
|
a54c4810afa980129c28989dde1d91c4921181e6
|
refs/heads/master
| 2022-12-02T14:54:13.687320
| 2020-05-30T21:21:47
| 2020-05-30T21:21:47
| 287,610,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,587
|
py
|
import numpy as np
import itertools as it
import copy as cp
from src.alg.exceptions import SimplexAlgorithmException, \
IncompleteTaskRankException
from src.alg.simplex.config import SimplexConfig
def del_null(A, c): # A - constraint matrix, # c - goal function coefficients
null_ind = list()
for index, column in enumerate(A):
if all_null(column): # all column components null
null_ind.append(index)
null_ind.reverse()
for index in null_ind:
A = np.delete(A, index, axis=0)
c = np.delete(c, index)
return A, c
def all_null(column):
return not list(filter(lambda x: x != 0, column))
def plus_list(ref_vector): # N - column num
N_plus_index = []
for index, x in enumerate(ref_vector):
if x > 0:
N_plus_index.append(index)
return N_plus_index
def artificial_basis(A, b): # A - constraint matrix, # c - goal function coefficients
rows, columns = A.shape
rank = np.linalg.matrix_rank(A)
if rank != rows:
raise IncompleteTaskRankException()
E = np.eye(rows)
sub_A = np.append(A, E, axis=1)
arr_zero = np.zeros(columns, float)
sub_c = np.append(arr_zero, np.ones(rows))
ref_vector = np.append(arr_zero, b) # ref_vector - new approximation
return sub_A, sub_c, ref_vector
def pos_vector(vector):
return not list(filter(lambda x: x < 0, vector))
def find_new_basis(N_k, indices_not_plus_element, L, A): # N_k - column num in current ref_vector
sub_A = list() # A - constraint matrix, # c - goal function coefficients
for ind_not_plus_element in indices_not_plus_element:
for ind_N_k in N_k:
if ind_N_k != ind_not_plus_element:
sub_A.append(A.transpose()[ind_N_k])
for ind_L in L: # L - column num who negative
new_A_N = cp.deepcopy(sub_A)
new_A_N.append(A.transpose()[ind_L])
new_A_N = np.array(new_A_N)
if np.linalg.det(new_A_N) != 0:
N_k[N_k.index(ind_not_plus_element)] = ind_L
return N_k
N_k.append(ind_not_plus_element)
def find_new_A_N(A, A_N, N_k, N_null_index): # N_k - column num in current ref_vector
new_M, new_N = A_N.shape # A - constraint matrix, # c - goal function coefficients
delta = new_M - new_N
if delta == 0:
return N_k, A_N
column_A_combinations = list(it.combinations(N_null_index, delta))
for column_A in column_A_combinations:
sub_A = list()
for column in A_N.transpose():
sub_A.append(column)
for index in column_A:
sub_A.append(A.transpose()[index])
sub_A = np.array(sub_A).transpose()
if np.linalg.det(sub_A) != 0:
for index in column_A:
N_k.append(index)
return N_k, sub_A
def find_new_B(B, N_k, i_k, sub_u): # inverse calculation
F = np.eye(len(N_k)) # B - inv to A, F - inv to B
for ind in range(len(N_k)):
column_element_for_F = -sub_u[ind] if F[ind][N_k.index(i_k)] != 1 else 1
F[ind][N_k.index(i_k)] = column_element_for_F / sub_u[N_k.index(i_k)]
return np.dot(F, B)
def update_d_L(d_L):
for index, d in enumerate(d_L):
if abs(d) <= SimplexConfig.eps:
d_L[index] = 0
def simplex_dates_L_dimension(N_k, A, c, B):
M, N = A.shape
c_N = list()
L = list()
A_L = list()
c_L = list()
for index in N_k:
c_N.append(c[index])
c_N = np.array(c_N)
for index in range(N):
if index not in N_k:
L.append(index)
A_L.append(A.transpose()[index])
c_L.append(c[index])
A_L = np.array(A_L).transpose()
c_L = np.array(c_L)
Y = np.dot(c_N.transpose(), B)
return c_L - np.dot(Y, A_L), L # d_L and L
def calc_j_k(d_L, L):
neg_elements_d_L = list(filter(lambda x: x < 0, d_L)) # d_L = c_L - np.dot(Y, A_L)
return L[list(d_L).index(neg_elements_d_L[0])] if neg_elements_d_L else 0
def calc_list_i_k(N_k, sub_u, u):
i_k_list = list()
for index, n_k in enumerate(N_k):
u[n_k] = sub_u[index]
if u[n_k] > 0:
i_k_list.append(n_k)
return i_k_list
def calc_coefficients(i_k_list, ref_vector, u): # coefficient - gamma(min fraction)
i_k = i_k_list[0] # index min fraction
coefficient = ref_vector[i_k] / u[i_k]
for i in i_k_list:
if (ref_vector[i] / u[i]) < coefficient:
i_k = i
coefficient = ref_vector[i_k] / u[i_k]
return coefficient, i_k
def calc_indices_not_plus_element(N_k, N_plus_index):
indices_not_plus_element = list()
for index in N_k:
if index not in N_plus_index:
indices_not_plus_element.append(index)
return indices_not_plus_element
def main_algorithm(N_k, A, c, ref_vector, B):
M, N = A.shape
d_L, L = simplex_dates_L_dimension(N_k, A, c, B) # d_L = c_L - np.dot(Y, A_L)
update_d_L(d_L)
if pos_vector(d_L):
return True, ref_vector, B, N_k
j_k = calc_j_k(d_L, L) # j_k = first index in L
A_j = A.transpose()[j_k]
u = np.zeros(N)
sub_u = np.dot(B, A_j.transpose())
i_k_list = calc_list_i_k(N_k, sub_u, u) # i_k - index min fraction
u[j_k] = -1
if len(i_k_list) == 0:
return False, np.zeros(N), B, N_k
coefficient, i_k = calc_coefficients(i_k_list, ref_vector, u)
N_plus_index = plus_list(ref_vector)
B = find_new_B(B, N_k, i_k, sub_u)
if len(N_plus_index) != len(N_k):
indices_not_plus_element = calc_indices_not_plus_element(N_k, N_plus_index)
for index in indices_not_plus_element:
if u[index] > 0:
N_k = find_new_basis(N_k, indices_not_plus_element, L, A)
return False, ref_vector, B, N_k
new_ref_vector = ref_vector - coefficient * u
N_k[N_k.index(i_k)] = j_k
return False, new_ref_vector, B, N_k
def first_step(A, ref_vector):
N_k = list()
N_null_index = list() # need to build N_k
N_plus_index = list()
A_N = list()
for ind, x in enumerate(ref_vector):
if x > 0:
N_plus_index.append(ind)
N_k.append(ind)
A_N.append(A.transpose()[ind])
elif x == 0:
N_null_index.append(ind)
A_N = np.array(A_N)
A_N = A_N.transpose()
N_k, new_A_N = find_new_A_N(A, A_N, N_k, N_null_index) # N_k - column num in current ref_vector
B = np.linalg.inv(new_A_N)
return N_k, B
def N_K_dates_L_dimensions(N, N_k):
L = list() # L - column num who negative
for index in range(N):
if index not in N_k:
L.append(index)
return L
def transform_ref_vector(ref_vector, B, N_k, A):
M, N = A.shape
ref_vector = ref_vector[:N]
A_N = list()
N_k_old = list(N_k)
N_k.sort()
new_B = list()
for ind in N_k:
A_N.append(A.transpose()[ind])
new_B.append(B[N_k_old.index(ind), :])
B = np.array(new_B) # B - inv to A
L = N_K_dates_L_dimensions(N, N_k)
A_N = np.array(A_N).transpose()
E = np.eye(M) # eye matrix M dimension
for i in range(M):
for j in range(M):
if np.array_equal(A_N[:, i], E[:, j]):
for ind in L:
A_N[:, i] = A[:, ind]
if np.linalg.det(A_N) != 0:
i_k = N_k[i]
j_k = ind
A_j = A.transpose()[j_k]
sub_u = np.dot(B, A_j.transpose())
B = find_new_B(B, N_k, i_k, sub_u)
N_k[i] = j_k
L.remove(ind)
break
return ref_vector, B
def start_simplex_method(A, b, c):
plot_points = list()
try:
sub_A, sub_c, ref_vector = artificial_basis(A, b)
except IncompleteTaskRankException:
raise IncompleteTaskRankException()
N_k, B = first_step(sub_A, ref_vector)
ref_vector, B, N_k = start_alg_iterations(N_k, ref_vector, B, sub_A, sub_c, plot_points)
ref_vector, B = transform_ref_vector(ref_vector, B, N_k, A)
ref_vector, B, N_k = start_alg_iterations(N_k, ref_vector, B, A, c, plot_points)
return ref_vector, plot_points
def start_alg_iterations(N_k, ref_vector, B, A, c, plot_points):
end = False
while not end:
end, ref_vector, B, N_k = main_algorithm(N_k, A,
c, ref_vector, B)
if all_null(ref_vector):
raise SimplexAlgorithmException()
plot_points.append(ref_vector)
return ref_vector, B, N_k
|
[
"rafantem@mail.ru"
] |
rafantem@mail.ru
|
4bab4960eafc5aae2b791d8ee9b3692b30babe22
|
1313593a03ccace06a838e43a83d953f746d6b33
|
/items.py
|
806de5a573170de959b1b3a91535ae1055757d2a
|
[] |
no_license
|
reindels/Python-Pokemon
|
f4f71aa2587817ad8d3c83f52c60104c95664cf9
|
1c5e0f0426d7c4b09f43427764c1514dccab37cf
|
refs/heads/master
| 2021-01-20T04:33:27.496385
| 2015-11-12T00:28:17
| 2015-11-12T00:28:17
| 45,713,796
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,281
|
py
|
from constants import constants
class item():
def __init__(self, item_dict):
self.name = 'Potion'
self.type = constants.item_type_medicine
# constants.item_type_medicine = 'Medicine'
# constants.item_type_vitamin = 'Vitamin'
# constants.item_type_berry = 'Berry'
# constants.item_type_key_item = 'Key Item'
# constants.item_type_poke_ball = 'Poke Ball'
# constants.item_type_tm_hm = 'TM HM'
# constants.item_type_held_item = 'Held Items'
# constants.item_type_general = 'General'
# constants.item_type_mail = 'Mail'
self.price = 300
self.message = None
self.effects = []
self.description = "Heals a Pokemon's HP by 20"
self.ball_modifier = None
self.create_item_from_dict(item_dict=item_dict)
#
def get_sell_back_price(self):
return int(round(self.price / 2.0))
#
def create_item_from_dict(self, item_dict=None):
if item_dict is None:
return None
self.name = item_dict.get('name')
self.type = item_dict.get('type')
self.price = item_dict.get('price')
self.effects = item_dict.get('effect')
self.description = item_dict.get('description')
self.ball_modifier = item_dict.get('ball modifier') # 1-4
self.message = item_dict.get('message')
#
#
def __str__(self):
return self.name + " (" + self.type + "): $" + str(self.price)
#
def copy(self):
copy_item = item()
copy_item.name = self.name
copy_item.type = self.type
copy_item.price = self.price
copy_item.effects = self.effects
copy_item.description = self.description
copy_item.ball_modifier = self.ball_modifier
return copy_item
def get_recover_amount(self):
return 80
#
# poke balls
custom_ball_dict = {'name': 'Custom Ball',
'type': constants.item_type_poke_ball,
'price': 1200, 'ball modifier': 3,
'description': "A custom-made Poke Ball used to capture Pokemon."
}
poke_ball_dict = {'name': 'Poke Ball',
'type': constants.item_type_poke_ball,
'price': 200, 'ball modifier': 1,
'description': "A device used to capture Pokemon."
}
great_ball_dict = {'name': 'Great Ball',
'type': constants.item_type_poke_ball,
'price': 600, 'ball modifier': 1.5,
'description': "A good Poke Ball used to capture Pokemon."
}
ultra_ball_dict = {'name': 'Ultra Ball',
'type': constants.item_type_poke_ball,
'price': 1200, 'ball modifier': 2,
'description': "A high-quality Poke Ball used to capture Pokemon."
}
master_ball_dict = {'name': 'Master Ball',
'type': constants.item_type_poke_ball,
'price': 1000000, 'ball modifier': constants.empty,
'description': "A custom-made Poke Ball that guaruntees capture of any Pokemon."
}
#Potions
fresh_water_dict = {'name': 'Fresh Water',
'type': constants.item_type_medicine,
'effect': ["Pokemon Recover 80 hp"],
'price': 300, 'description': "A pokemon medicine that heals a Pokemon by 80 HP."
}
revive_dict = {'name': 'Revive',
'type': constants.item_type_medicine,
'effect': ["Revives Pokemon to Half HP"],
'price': 300, 'description': "A medicine that revives a fainted pokemon to half its full HP."
}
potion_dict = {'name': 'Potion',
'type': constants.item_type_medicine,
'effect': ["Pokemon Recover 20 hp"],
'price': 300, 'description': "A pokemon medicine that heals a Pokemon by 20 HP."
}
superpotion_dict = {'name': 'Super Potion',
'type': constants.item_type_medicine,
'effect': ["Pokemon Recover 50 hp"],
'price': 600, 'description': "A pokemon medicine that heals a Pokemon by 50 HP."
}
full_heal_dict = {'name': 'Full Heal',
'type': constants.item_type_medicine,
'effect': ["{} {} {} on Pokemon".format(constants.remove, constants.status_all_negative, constants.status_infliction)],
'price': 1000, 'description': "A spray bottle whose contents remove negative status conditions from Pokemon."
}
full_restore_dict = {'name': 'Full Restore',
'type': constants.item_type_medicine,
'effect': ["{} {} {}".format(constants.remove, constants.status_all_negative, constants.status_infliction),
"Pokemon Recover 9999 hp"],
'price': 3000, 'description': "A spray bottle whose contents remove negative status conditions from Pokemon and fully heals them."
}
#General Items
escape_rope_dict = {'name': 'Escape Rope',
'type': constants.item_type_general,
'effect': "Exit Dungeon",
'price': 450, 'description': "A rope that you can tie off at a dungeon entrance. If you get lost, follow it to find the entrance."
}
#Vitamin Items
protein_dict = {'name': 'Protein',
'type': constants.item_type_vitamin,
'effect': ["Add 10 EV to a Pokemon's ATTACK Stat"],
'price': 9800, 'description': "Vitamins that boost Attack."
}
#held Items
pink_bow_dict = {'name': 'Pink Bow',
'type': constants.item_type_held_item,
'effect': ["Boost Normal Type Attacks by x1.5"],
'price': 1000, 'description': "A cute pink bow that boost Normal type attacks when given to a pokemon to hold."
}
#Berry Items
oran_berry_dict = {'name': 'Oran Bery',
'type': constants.item_type_berry,
'effect': ["Pokemon Recover 20 hp"],
'price': 10, 'description': "A pokemon medicine that heals a Pokemon by 10 HP."
}
#TM & HM Items
HM_01_dict = {'name': 'HM 01 (Cut)',
'type': constants.item_type_tm_hm,
'effect': ["Pokemon Learns Move Cut"],
'price': 1000, 'description': "A Reuseable data disk that can teach pokemon the Hidden Move Cut."
}
#Key Items
acro_bike_dict = {'name': 'Acro Bike',
'type': constants.item_type_key_item,
'effect': ["Allows Movement Through Bike Paths And Trick Bike Paths"],
'price': 10000, 'description': "A ligth weight bike that can perform tricky manuevers."
}
#Mail Items
pika_mail_dict = {'name': 'Pika Mail',
'type': constants.item_type_mail,
'effect': ["Write A Message"],
'message': "Write a message here...",
'price': 30, 'description': "A Pikachu themed paper and envelope."
}
# Evolution Items
water_stone_dict = {'name': 'Water Stone',
'type': constants.item_type_key_item,
'effect': ["Evolves certain Water-Type Pokemon"],
'price': 3000, 'description': "This clear blue stone radiates a cool watery aura and smells like the water were it was found. This stone could evolve certain Pokemon."
}
thunder_stone_dict = {'name': 'Thunder Stone',
'type': constants.item_type_key_item,
'effect': ["Evolves certain Electric-Type Pokemon"],
'price': 3000, 'description': "This green-blue stone with yellow veins radiates an electrical aura and smells like a brewing thunderstorm. This stone could evolve certain Pokemon."
}
fire_stone_dict = {'name': 'Water Stone',
'type': constants.item_type_key_item,
'effect': ["Evolves certain Fire-Type Pokemon"],
'price': 3000, 'description': "This orange-red stone radiates a warm firey aura and smells like brimstone. This stone could evolve certain Pokemon."
}
moon_stone_dict = {'name': 'Moon Stone',
'type': constants.item_type_key_item,
'effect': ["Evolves certain Pokemon"],
'price': 3000, 'description': "This opaque pearly white stone radiates an aura like cold water. This stone could evolve certain Pokemon."
}
sun_stone_dict = {'name': 'Sun Stone',
'type': constants.item_type_key_item,
'effect': ["Evolves certain Pokemon"],
'price': 3000, 'description': "This bright yellow stone radiates a warm bright aura and smells like dried sunflowers. This stone could evolve certain Pokemon."
}
fey_stone_dict = {'name': 'Fey Stone',
'type': constants.item_type_key_item,
'effect': ["Evolves certain Pokemon"],
'price': 3000, 'description': "This sparkling pink stone radiates a warm aura and smells like a pinewoods. This stone could evolve certain Pokemon."
}
#BASIC General INVENTORY
test_inv_dict = {
constants.item_type_general: {
'Escape Rope': {'count': 1, 'item': item(escape_rope_dict)}
},
constants.item_type_vitamin: {
'Protein': {'count': 1, 'item': item(protein_dict)}
},
constants.item_type_medicine:{
'Potion': {'count': 5, 'item': item(potion_dict)},
'Super Potion': {'count': 1, 'item': item(superpotion_dict)},
'Fresh Water': {'count': 1, 'item': item(fresh_water_dict)},
'Full Heal': {'count': 1, 'item': item(full_heal_dict)},
'Revive': {'count': 1, 'item': item(revive_dict)},
},
constants.item_type_poke_ball:{
'Poke Ball': {'count': 50, 'item': item(poke_ball_dict)},
'Great Ball': {'count': 50, 'item': item(great_ball_dict)},
'Ultra Ball': {'count': 50, 'item': item(ultra_ball_dict)},
'Master Ball': {'count': 5, 'item': item(master_ball_dict)},
'Custom Ball': {'count': 5, 'item': item(custom_ball_dict)}
},
constants.item_type_held_item: {
'Pink Bow': {'count': 1, 'item': item(pink_bow_dict)}
},
constants.item_type_berry: {
'Oran berry': {'count': 1, 'item': item(oran_berry_dict)}
},
constants.item_type_tm_hm: {
'HM 01 (Cut)': {'count': 1, 'item': item(HM_01_dict)}
},
constants.item_type_key_item: {
'Acro Bike': {'count': 1, 'item': item(acro_bike_dict)}
},
constants.item_type_mail: {
'Pika Mail': {'count': 1, 'item': item(pika_mail_dict)}
}
}
#
#
#
#
|
[
"reindels@oregonstate.edu"
] |
reindels@oregonstate.edu
|
18fb7eb6c151368495d76ef2ebf1261449f1bfd9
|
936d82330f6cabea8ffea6ed35cfd6784b3a7885
|
/Baselibs/src/cmor/build/lib.linux-ppc64le-2.7/cmor/table_generator.py
|
1df495d9269771cda91b548e7ccfe75369276315
|
[] |
no_license
|
aborle1/GEOS2
|
cc4386e19c76fb0c11a98348ffde5f1b55db9337
|
47d25a0d92a20efd7fd552a934ae0dd7422320ba
|
refs/heads/AJ
| 2021-07-20T22:40:42.121925
| 2017-10-17T18:46:08
| 2017-10-17T18:46:08
| 104,491,177
| 0
| 1
| null | 2017-10-17T18:46:09
| 2017-09-22T15:39:23
|
Fortran
|
UTF-8
|
Python
| false
| false
| 12,365
|
py
|
#amon_2d
#Change time to time1 for dims
#change time: mean to time: point for cell_method
import sys,time,os,genutil
prefix = "CMIP5"
general = """cmor_version: 2.0 ! version of CMOR that can read this table
cf_version: 1.4 ! version of CF that output conforms to
project_id: %s ! project id
table_date: %s ! date this table was constructed
missing_value: 1.e20 ! value used to indicate a missing value
! in arrays output by netCDF as 32-bit IEEE
! floating-point numbers (float or real)
baseURL: http://cmip-pcmdi.llnl.gov/CMIP5/dataLocation
product: output
required_global_attributes: creation_date tracking_id forcing model_id ! space separated required global attribute
expt_id_ok: '10- or 30-year run initialized in year XXXX' 'decadalXXXX'
expt_id_ok: 'volcano-free hindcasts' 'noVolcano'
expt_id_ok: 'prediction with 2010 volcano' 'volcano2010'
expt_id_ok: 'pre-industrial control' 'piControl'
expt_id_ok: 'Historical' 'historical'
expt_id_ok: 'mid-Holocene' 'midHolocene'
expt_id_ok: 'last glacial maximum' 'lgm'
expt_id_ok: 'last millennium' 'past1000'
expt_id_ok: 'RCP4.5' 'rcp45'
expt_id_ok: 'RCP8.5' 'rcp85'
expt_id_ok: 'RCP2.X' 'rcp2x'
expt_id_ok: 'RCP6' 'rcp6'
expt_id_ok: 'ESM pre-industrial control' 'esmControl'
expt_id_ok: 'ESM historical' 'esmHistorical'
expt_id_ok: 'ESM RCP8.5' 'esmRcp85'
expt_id_ok: 'ESM fixed climate 1' 'esmFixClim1'
expt_id_ok: 'ESM fixed climate 2' 'esmFixClim2'
expt_id_ok: 'ESM feedback 1' 'esmFdbk1'
expt_id_ok: 'ESM feedback 2' 'esmFdbk2'
expt_id_ok: '1 percent per year CO2' '1pctCo2'
expt_id_ok: 'abrupt 4XCO2' 'abrupt4xco2'
expt_id_ok: 'natural-only' 'historicalNat'
expt_id_ok: 'GHG-only' 'historicalGhg'
expt_id_ok: 'AMIP' 'amip'
expt_id_ok: '2030 time-slice' 'sst2030'
expt_id_ok: 'control SST climatology' 'sstClim'
expt_id_ok: 'CO2 forcing' 'sstClim4xco2'
expt_id_ok: 'all aerosol forcing' 'sstClimAerosol'
expt_id_ok: 'sulfate aerosol forcing' 'sstClimSulfate'
expt_id_ok: '4xCO2 AMIP' 'amip4xco2'
expt_id_ok: 'AMIP plus patterned anomaly' 'amipFuture'
expt_id_ok: 'aqua planet control' 'aquaControl'
expt_id_ok: '4xCO2 aqua planet' 'aqua4xco2'
expt_id_ok: 'aqua planet plus 4K anomaly' 'aqua4K'
expt_id_ok: 'AMIP plus 4K anomaly' 'amip4K'
""" % (prefix,time.strftime("%d %B %Y"))
#realm: %s
#table_id: Table %s ! table id
axis_tmpl = """
!============
axis_entry: %(CMOR dimension)
!============
!----------------------------------
! Axis attributes:
!----------------------------------
standard_name: %(standard name)
units: %(units)
axis: %(axis) ! X, Y, Z, T (default: undeclared)
positive: %(positive) ! up or down (default: undeclared)
long_name: %(long name)
!----------------------------------
! Additional axis information:
!----------------------------------
out_name: %(output dimension name)
valid_min: %(valid_min)
valid_max: %(valid_max)
stored_direction: %(stored direction)
formula: %(formula)
z_factors: %(z_factors)
z_bounds_factors: %(z_factor_bounds)
tolerance: %(tol_on_requests: variance from requested values that is tolerated)
type: %(type)
requested: %(requested) ! space-separated list of requested coordinates
requested_bounds: %(bounds_ requested) ! space-separated list of requested coordinate bounds
value: %(value) ! of scalar (singleton) dimension
bounds_values: %(bounds _values) ! of scalar (singleton) dimension bounds
must_have_bounds: %(bounds?)
index_only: %(index axis?)
climatology: %(climatology)
coords_attrib: %(coords_attrib)
!----------------------------------
!
"""
var_tmpl = """!============
variable_entry: %(CMOR variable name)
!============
modeling_realm: %(realm)
!----------------------------------
! Variable attributes:
!----------------------------------
standard_name: %(standard name)
units: %(unformatted units)
cell_methods: %(cell_methods)
long_name: %(long name)
comment: %(comment)
!----------------------------------
! Additional variable information:
!----------------------------------
dimensions: %(CMOR dimensions)
out_name: %(output variable name)
type: %(type)
positive: %(positive)
valid_min: %(valid min)
valid_max: %(valid max)
ok_min_mean_abs: %(mean absolute min)
ok_max_mean_abs: %(mean absolute max)
!----------------------------------
!
"""
def process_a_line(line):
if line.find("usiiiiually,")>-1:
debug = True
print
print 'NEW STUFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF'
print
else:
debug=False
line=line.replace("\r\n","\n") # dos character
line = line.replace('""','"$$$')
sps = line .split(",")
sp=[]
st=""
while (len(sps)>0):
s = sps.pop(0)
if debug:
print 's:',s
if len(s)>0 and s[0]=='"' and s[-1]!='"':
if debug: print 'inthere'
s=s[1:]
s2=sps.pop(0)
if debug: print 's2:',s2
while len(s2)==0 or s2[-1]!='"':
s = ','.join([s,s2])
s2=sps.pop(0)
else:
s = ','.join([s,s2[:-1]])
sp.append(s.replace("$$$",'"'))
return sp
def process_template(tmpl,cnames,cols,voids={}):
F = genutil.StringConstructor(tmpl)
keys = F.keys()
match = 0
for c in cnames:
if c in keys:
match+=1
indx = cnames.index(c)
## print ' matched at %i' % indx ,
if indx<len(cols):
val = cols[indx]
else:
val = ""
if val.strip()=='time2':
setattr(F,"climatology","yes")
if "climatology" in keys: keys.remove("climatology")
if val.strip()!="":
setattr(F,c,val)
keys.remove(c)
## print
## else:
## print ' but empty'
## else:
## print
if "CMOR dimension" in keys:
raise "crap"
for k in keys:
setattr(F,k,"!CRAP WE NEED TO REMOVE THAT LINE")
## Now generates
out = F()
sp = out.split("\n")
lines = []
for l in sp:
if l.find("!CRAP WE NEED TO REMOVE THAT LINE")>-1:
continue
lines.append(l)
out = "\n".join(lines)
## print 'We got: %i matches' % match
# fixes Karl input bug
out = out.replace("..",".")
#Ok now check the void thing
for kw in voids.keys():
v = getattr(F,kw,"we keep").strip()
vals = voids[kw]
if not isinstance(vals,(list,tuple)):
vals = [vals,]
for V in vals:
if V == v:
out = ""
return out
def create_table_header(tbnm, table_file, dims_file, fqcy):
#First of All create the header
fnm = "Tables/" + prefix + '_'+tbnm
fo = open(fnm,'w')
print >> fo, "table_id: Table %s" % tbnm
realm = None
if tbnm[0]=='O':
realm = "ocean"
elif tbnm[0]=='A':
realm = 'atmos'
elif tbnm[0]=='L':
realm = 'land'
else:
realm = "atmos"
print >> fo, "modeling_realm: %s\n" % realm
print >> fo, "frequency: %s\n" % fqcy
print >> fo, general
# looking for approx interval, ASSUMING UNITS ARE IN DAYS SINCE
if tbnm.find("mon")>-1:
interval = 30.
elif tbnm.lower().find('clim')>-1:
interval = 30.
elif tbnm.lower().find('aero')>-1:
interval = 30.
elif tbnm.lower().find('yr')>-1:
interval = 365.
elif tbnm.lower().find('da')>-1:
interval = 1.
elif tbnm.find("hr")==1:
interval = float(tbnm[0])/24.
elif tbnm.find("min")>-1:
interval = float(tbnm[2:tbnm.find("min")])/1440.
else:
interval = 0.
print >> fo, """approx_interval: %f ! approximate spacing between successive time
! samples (in units of the output time
! coordinate.""" % interval
D = open(dims_file)
dlines = D.readlines()[1:]
i=0
while dlines[i].strip()=="":
i+=1
dlines=dlines[i:]
cnms = dlines[0].split(',')
for i in range(len(cnms)):
cnms[i]=cnms[i].strip()
addLines = False
for l in dlines[1:]:
sp = process_a_line(l)
if sp[0].find(tbnm)>-1:
if l.find("alevel")>-1:
addLines = True
zlevel_name = 'alevel'
file_add = 'Tables_csv/add_dims.txt'
elif l.find("olevel")>-1:
addLines = True
zlevel_name = 'olevel'
file_add = 'Tables_csv/add_dims2.txt'
else:
print >> fo, process_template(axis_tmpl,cnms,sp)
if addLines:
print 'adding:',file_add,'to',tbnm
tmpf=open(file_add)
lns=tmpf.read()
tmpf.close()
if file_add == 'Tables_csv/add_dims.txt':
if not tbnm in ['aero','cfMon']:
lns=lns[:-20]
lns=lns.replace("zlevel",zlevel_name)
print >> fo, lns
return fo
def create_table(table_file, dims_file):
tables = {}
foundnm= False
D = open(table_file)
dlines = D.readlines()
ivar = -2
if len(dlines)==1:
dlines = dlines[0].split('\r')
#This bit insert a table
dlines2=[]
for i in range(len(dlines)):
if dlines[i].find("include Amon 2D")>-1:
f=open("Tables_csv/amon_2D.csv")
add_lines = f.readlines()
if table_file[-11:-4] in ['cfsites','v/cf3hr']:
tmplines=[]
for aline in add_lines:
sp = aline.split(",")
if len(sp)>4 and 'time' in sp[-4]:
sp[-4]=sp[-4].replace('time','time1')
myline = ','.join(sp)
myline = myline.replace("time: mean","time: point")
if len(sp)>7 and sp[6] in ['tasmin','tasmax']:
pass
else:
tmplines.append(myline)
add_lines = tmplines
dlines2=dlines2+add_lines
elif dlines[i].find("include Oyr")>-1:
f=open("Tables_csv/oyr_tracer.csv")
dlines2=dlines2+f.readlines()
else:
dlines2.append(dlines[i])
dlines=dlines2
for l in dlines:
sp = process_a_line(l)
if 0<=sp[0].find("CMOR Table")<=1 and foundnm == False: # line that will give us the table name
i=1
while sp[i].strip()=="":
i+=1
tbnm = sp[i].strip()
fqcy = sp[i+1].strip()
foundnm = True
if sp[0] == 'priority':
cnms = sp
for i in range(len(cnms)):
cnms[i]=cnms[i].strip()
ivar = cnms.index("CMOR variable name")
continue
if ivar!=-2 and len(sp)>ivar and sp[ivar].strip()!="":
if tbnm in tables.keys():
fo = tables[tbnm]
else: # New table
fo = create_table_header(tbnm,table_file,dims_file,fqcy)
tables[tbnm]=fo
print >> fo, process_template(var_tmpl,cnms,sp,{'CMOR variable name':['?','0']})
print 'Created tables:',tables.keys()
if __name__== "__main__" :
if len(sys.argv)>2:
dims_table = sys.argv[2]
else:
dims_table = 'Tables_csv/dims.csv'
if len(sys.argv)>1:
print sys.argv
create_table(sys.argv[1],dims_table)
else:
tables_nms = """Tables_csv/3hr.csv Tables_csv/amon.csv Tables_csv/cfMon.csv Tables_csv/oclim.csv
Tables_csv/6hrLev.csv Tables_csv/cfsites.csv Tables_csv/cfOff.csv Tables_csv/fx.csv Tables_csv/olmon.csv
Tables_csv/6hrPlev.csv Tables_csv/cf3hr.csv Tables_csv/cfmip.csv Tables_csv/llmon.csv Tables_csv/omon.csv
Tables_csv/aero.csv Tables_csv/cfDa.csv Tables_csv/da.csv Tables_csv/lmon.csv Tables_csv/oyr.csv
""".split()
for nm in tables_nms:
print 'Processing:',nm
create_table(nm,dims_table)
|
[
"aborle1@umbc.edu"
] |
aborle1@umbc.edu
|
43c33a3ca9aa9e5c96a6ba64fea51456be46b493
|
75513bf64984c0d737c69fbcf0adcf40a8640e47
|
/HT/lib/python3.6/stat.py
|
0ee2f18b0a5477078ffa86399eb0c293c9b0eb67
|
[] |
no_license
|
thekekk/HT_ATC
|
e5da9d2b1d34c562b8236b8938913c5e79da9767
|
5af80b5ab55658f909f03bd6e629b4e7afefe6d4
|
refs/heads/master
| 2020-04-04T15:56:51.538276
| 2018-08-02T01:03:52
| 2018-08-02T01:03:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 52
|
py
|
/Users/gavinnewcomer/anaconda3/lib/python3.6/stat.py
|
[
"gavin.newcomer@temple.edu"
] |
gavin.newcomer@temple.edu
|
e893ce5201070444e3ae7f78c51204c87503cf6c
|
0e0e78019f860431986162265c6fb20d06c2b6f0
|
/test/test_graphical_layout/test_map.py
|
3dac0959d9a097a737fb6482d96eaa468e1f4b14
|
[] |
no_license
|
ouskah/OpenClassroomsPython
|
a2cfc9f3974a77f83360d671069d0c67023dc7ff
|
316d7891226c6d046448c8c7efb34d4333bf98b7
|
refs/heads/master
| 2020-08-04T11:03:51.183127
| 2019-09-24T22:20:41
| 2019-09-24T22:20:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,637
|
py
|
# -*-coding:Utf-8 -*
"""This module contains tests for the class Map."""
import unittest
import os
import parameters.parameters as parameters
import test.parameters_for_testing as test_parameters
from graphical_layout.map import Map
class TestMap(unittest.TestCase):
"""TestCase for functions of the 'map' module."""
def setUp(self):
"""Loads the test maps"""
# Load Test Maps present in dir_test_maps
self.test_maps = {}
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
dir_maps = os.path.join(location, parameters.dir_test_maps)
for name_file in os.listdir(dir_maps):
if name_file.endswith(".txt"):
map_path = os.path.join(dir_maps, name_file)
map_name = name_file[:-4].lower()
with open(map_path, "r") as map_file:
self.test_maps[map_name] = map_file.read()
def test_is_valid(self):
"""Tests if the method is_valid from the function Map correctly identifies invalid maps."""
for test_map in self.test_maps:
is_valid, message = Map.is_valid(self.test_maps[test_map].upper())
if test_map == "correct_map":
self.assertTrue(is_valid)
elif test_map == "invalid_character_map":
self.assertFalse(is_valid)
self.assertEqual(message, Map.INVALID_CHARS_ERROR)
elif test_map in ["no_u_map", "several_u_map"]:
self.assertFalse(is_valid)
self.assertEqual(message, Map.U_COUNT_ERROR)
elif test_map == "non_rectangular_map":
self.assertFalse(is_valid)
self.assertEqual(message, Map.NON_RECTANGULAR_ERROR)
elif test_map == "too_small_map":
self.assertFalse(is_valid)
self.assertEqual(message, Map.TOO_SMALL_ERROR)
elif test_map == "too_large_map":
self.assertFalse(is_valid)
self.assertEqual(message, Map.TOO_LARGE_ERROR)
def test_constructor(self):
"""Tests the constructor of the class Map"""
expected_grid = [list(r) for r in test_parameters.correct_grid]
map_name = "correct_map"
my_map = Map(map_name, self.test_maps[map_name])
self.assertEqual(my_map.name, map_name)
self.assertEqual(my_map.height, 20)
self.assertEqual(my_map.width, 20)
self.assertEqual(my_map.grid, expected_grid)
if __name__ == '__main__':
unittest.main()
|
[
"lea.fanchon@gmail.com"
] |
lea.fanchon@gmail.com
|
d62bbfbc015db36893e18e1e7c97e64cc7333a4b
|
6d2fa524df3cbb054761e47717ac6e902301ed1d
|
/akash_update.py
|
55916a1f8365ba8b23ea329f32a0c2fa607139f3
|
[] |
no_license
|
dalinxz/RedditFinancial
|
29f9703b8d88adadb72330ccc1991854a2d6fc5e
|
ad98ef284d2f017f078378bb0c916bcb6d98744d
|
refs/heads/main
| 2023-02-27T23:34:44.613764
| 2021-01-31T19:13:18
| 2021-01-31T19:13:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,290
|
py
|
# -*- coding: utf-8 -*-
"""akash_update.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fd_bVVLZjgbtdkiSNysC-O8dfnsQOca1
"""
!pip install praw
import praw
import time
import requests #idk why we need request? but ok
import requests.auth
import pandas as pd
reddit = praw.Reddit(
client_id="wal33qeX5QhkVQ",
client_secret="5Jv5XrScw3sTCuHE18j-YOUzeaKBjw",
user_agent="script:test_app:v0.1 (by u/Comprehensive-Self39)"
)
#^SETUP CODE
#Importing the CSV from GitHub
url = 'https://raw.githubusercontent.com/enanpurrp/Debullshitiser/main/options.csv'
options_df = pd.read_csv(url, index_col=0)
url = 'https://raw.githubusercontent.com/enanpurrp/Debullshitiser/main/financialindependence(1).csv'
finInd_df = pd.read_csv(url, index_col=0)
url = 'https://raw.githubusercontent.com/enanpurrp/Debullshitiser/main/stocks.csv'
stocks_df = pd.read_csv(url, index_col=0)
url = 'https://raw.githubusercontent.com/enanpurrp/Debullshitiser/main/wallstreetbets.csv'
WSB_df = pd.read_csv(url, index_col=0)
#adding data to csv files: data is added from the "top of last month" field, and as long as the top isn't
#lower than the UTC, it'll be added!(create's a new pandas dataframe and adds it)
def update_CSV(pdataframe, subreddit):
latest = pdataframe['TimeStamp'].max()
#^find the most recent post from these posts
columns = ['Title', 'Flair', 'Score of Post', 'ID', 'URL', 'TimeStamp', 'Message body', '#Comments', 'SubReddit']
accepted_tags_WSB = ['None', 'Charts', 'Earnings Thread', 'Gain', 'Loss', 'News']
title = []; flair = []; score = []; id = []; url = []; creation = []; message = []; numComments = []; label=[];
for submission in reddit.subreddit(subreddit).top("week"):
if (subreddit == 'wallstreetbets'):
if submission.link_flair_text in accepted_tags_WSB:
if submission.over_18 == False:
if submission.created_utc > latest:
numComments.append(submission.num_comments)
flair.append(submission.link_flair_text)
title.append(submission.title)
score.append(submission.score)
id.append(submission.id)
url.append(submission.url)
creation.append(submission.created_utc)
message.append(submission.selftext)
label.append(subreddit)
else:
if submission.created_utc > latest:
numComments.append(submission.num_comments)
flair.append(submission.link_flair_text)
title.append(submission.title)
score.append(submission.score)
id.append(submission.id)
url.append(submission.url)
creation.append(submission.created_utc)
message.append(submission.selftext)
label.append(subreddit)
extract_array = [title, flair, score, id, url, creation, message, numComments, label]
update_dict = dict(zip(columns, extract_array))
update_df = pd.DataFrame(update_dict)
return pdataframe.append(update_df, ignore_index=True)
#testruns
options_df = update_CSV(options_df, 'options')
finInd_df = update_CSV(finInd_df, 'financialindependence')
stocks_df = update_CSV(stocks_df, 'stocks')
WSB_df = update_CSV(WSB_df, 'wallstreetbets')
|
[
"noreply@github.com"
] |
dalinxz.noreply@github.com
|
3ccfa877e6a2a3cc1ce964343c91024df48d7241
|
32bbbd6dbd100bbb9a2282f69ac3b7b34516347f
|
/Study/Coding_test/3-3, 3-4.py
|
1f86239fe8abeffb5c765ed94ff2396cc8d87877
|
[] |
no_license
|
kimjh1753/AIA_Academy_Study
|
2162d4d4f1a6b8ca1870f86d540df45a8742f359
|
6022718ae7f9e5170a19c4786d096c8042894ead
|
refs/heads/master
| 2023-05-07T12:29:12.920693
| 2021-06-05T01:09:33
| 2021-06-05T01:09:33
| 324,136,796
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 874
|
py
|
# 3-3.py
# N, M을 공백으로 구분하여 입력받기
n, m = map(int, input().split())
result = 0
# 한 줄씩 입력받아 확인
for i in range(n):
data = list(map(int, input().split()))
# 현재 줄에서 '가장 적은 수' 찾기
min_value = min(data)
# '가장 적은 수'들 중에서 가장 큰 수 찾기
result = max(result, min_value)
print(result) # 최종 답안 출력
# 3-4.py
# N, M을 공백으로 구분하여 입력받기
n, m = map(int, input().split())
result = 0
# 한 줄씩 입력받아 확인
for i in range(n):
data = list(map(int, input().split()))
# 현재 줄에서 '가장 적은 수' 찾기
min_value = 10001
for a in data:
min_value = min(min_value, a)
# '가장 적은 수'들 중에서 가장 큰 수 찾기
result = max(result, min_value)
print(result) # 최종 답안 출력
|
[
"kimjh1753@naver.com"
] |
kimjh1753@naver.com
|
d8a679818375ef475c2ef0113b71e29e2d7062d7
|
8a42fe9a7f615087d9987829c5f297020ea72690
|
/app.py
|
6eb8c3ff5cceb33edd792bb487fdaaff53007a79
|
[] |
no_license
|
edwardchow33/androidbot
|
d04b42637d1f2109dcea7bdb4b08b83207967c59
|
504368d6c04de7a352224e2c032bc11959a96a6e
|
refs/heads/master
| 2020-03-26T12:05:24.000795
| 2018-08-15T16:07:07
| 2018-08-15T16:07:07
| 144,874,971
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 345
|
py
|
from flask import Flask,request
import json
from bot import Bot
app = Flask(__name__)
bot = Bot()
@app.route('/',methods=['GET'])
def add():
print "add"
bot.create_post()
result = {'result':200}
return json.dumps(result)
@app.route('/',methods=['GET'])
def select():
print select
if __name__ == '__main__':
app.run()
|
[
"edwardchow0303@gmail.com"
] |
edwardchow0303@gmail.com
|
7bb88ed2387b5d755cdb8f97907547bd2b05ced7
|
7bb27fcf43b8701fb22a54df197b6a46910e09a2
|
/home/models.py
|
274c946e711e99085cc53e86feffd80937edea3d
|
[] |
no_license
|
bladas/blog
|
d9c56b05e4d9fa21e0677e8bdb92e5e6c1d6732d
|
00841d4645c3f58177587bf17751ad8c21dc0bdb
|
refs/heads/master
| 2023-04-05T02:36:26.261824
| 2021-04-21T14:25:58
| 2021-04-21T14:25:58
| 360,196,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,368
|
py
|
from django.db import models
from transliterate import translit
# Create your models here.
from django.utils.text import slugify
from base.models import BaseModel
class HomeImages(BaseModel):
image = models.ImageField(upload_to="media/home/", blank=True, null=True)
class Post(BaseModel):
title = models.CharField(max_length=255)
small_description = models.TextField()
image = models.ImageField(upload_to="media/post/", blank=True, null=True)
slug = models.SlugField(unique=True, blank=True, null=True)
def save(self, *args, **kwargs):
self.slug = slugify(translit(self.title, 'uk', reversed=True))
print(self.slug)
super(Post, self).save(*args, **kwargs)
def __str__(self):
return self.title
class PostDescription(BaseModel):
post = models.ForeignKey(Post, on_delete=models.CASCADE)
description = models.TextField()
def __str__(self):
return f'{self.post}'
class Comments(BaseModel):
post = models.ForeignKey(Post, on_delete=models.CASCADE)
name = models.CharField(max_length=50, blank=True, null=True)
email = models.EmailField()
text = models.TextField()
def __str__(self):
return self.email
class AboutMe(BaseModel):
image = models.ImageField(upload_to="media/about-me/", blank=True, null=True)
about_me_text = models.TextField()
|
[
"dashkevich_v@ukr.net"
] |
dashkevich_v@ukr.net
|
a26a75df3d299756462f23745b86c524c4ae9386
|
ab1d4edec94494e9f34069446a76a787ae9769a8
|
/kronos/models/base.py
|
3913ee5d39b2c1db379a9db91d0823a2e1d8c59c
|
[] |
no_license
|
kzka/kronos
|
ae0d710d30511eb11da586bdb2d778a68fe123eb
|
a50a9fe41377ae2e3c9c9b9c1bd7ed7d70c42437
|
refs/heads/master
| 2023-02-26T11:19:39.837644
| 2021-01-29T22:33:32
| 2021-01-29T22:40:12
| 334,278,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,475
|
py
|
import abc
import logging
import torch
import torch.nn as nn
from kronos.utils.torch_utils import freeze_model
class BaseFeaturizer(abc.ABC, nn.Module):
"""Abstract base class for featurizers.
Subclasses must implement the `_build_featurizer` method.
"""
def __init__(
self,
pretrained=True,
layers_train="bn_only",
bn_use_running_stats=False,
):
"""Constructor.
Args:
pretrained (bool): Whether to use Imagenet pretrained weights.
layers_train (str): Controls which layers are trained. Can be
one of `['all', 'frozen', 'bn_only']`.
bn_use_running_stats (bool): Set to `True` to disable batch
statistics and use running mean and variance learned during
training.
"""
super().__init__()
self._bn_use_running_stats = bn_use_running_stats
self._layers_train = layers_train
self._pretrained = pretrained
# build featurizer
self.model = self._build_featurizer()
# figure out batch norm related freezing
if layers_train != "all":
if layers_train == "frozen":
logging.info("Freezing all featurizer layers.")
bn_freeze_affine = True
elif layers_train == "bn_only":
logging.info(
"Freezing all featurizer layers except for batch norm layers."
)
bn_freeze_affine = False
else:
raise ValueError(
"{} is not a valid layer selection strategy.".format(
layers_train
)
)
freeze_model(self.model, bn_freeze_affine, bn_use_running_stats)
# build param to module dict
self.param_to_module = {}
for m in self.modules():
for p in m.parameters(recurse=False):
self.param_to_module[p] = type(m).__name__
@abc.abstractmethod
def _build_featurizer(self):
"""Build the featurizer architecture.
"""
pass
def forward(self, x):
"""Extract features from the video frames.
Args:
x (torch.FloatTensor): The video frames of shape
`(B, T, C, H, W)`. If there are `S` video
frames and we are using `X` context frames,
then `T = S * X`.
"""
assert x.ndim == 5
batch_size, t, c, h, w = x.shape
x = x.view((batch_size * t, c, h, w))
feats = self.model(x)
_, c, h, w = feats.shape
feats = feats.view((batch_size, t, c, h, w))
return feats
def train(self):
"""Sets the model in `train` mode.
"""
self.training = True
for m in self.model.modules():
# set everything that is NOT batchnorm to train
if not isinstance(m, torch.nn.modules.batchnorm._BatchNorm):
m.train()
else:
# for batch norm, we only want train mode
# if we were not using running statistics
if self._bn_use_running_stats:
m.eval()
else:
m.train()
def eval(self):
"""Sets the model in `eval` mode.
"""
self.training = False
for m in self.model.modules():
m.eval()
@property
def pretrained(self):
return self._pretrained
class BaseEncoder(abc.ABC, nn.Module):
"""Abstract base class for encoders.
Subclasses must implement the `_build_featurizer` method.
"""
def __init__(self):
super().__init__()
self.model = self._build_encoder()
self._init_weights()
# build param to module dict
self.param_to_module = {}
for m in self.modules():
for p in m.parameters(recurse=False):
self.param_to_module[p] = type(m).__name__
@abc.abstractmethod
def _build_encoder(self):
"""Build the encoder architecture.
"""
pass
@abc.abstractmethod
def _init_weights(self):
"""Initialize the weights of the encoder.
"""
pass
def train(self):
"""Sets the model in `train` model.
"""
super().train(True)
def eval(self):
"""Sets the model in `eval` model.
"""
super().train(False)
def _sanity_check_input(self, x):
"""Add necessary logic to parse inputs here.
"""
pass
def forward(self, x):
"""Forward the video frame features through the encoder.
Args:
inputs (tensor): A FloatTensor of shape `(B, C, X, H, W)` where
`X` is the number of context frames.
"""
self._sanity_check_input(x)
return self.model(x)
class BaseTemporalModel(abc.ABC, nn.Module):
"""Abstract base class for temporal video models.
"""
def __init__(
self, num_ctx_frames, model_config,
):
"""Constructor.
Args:
num_ctx_frames (int): The number of context frames stacked
together for each individual video frame.
model_config (edict): A dictionary containing model architecture
hyperparameters.
"""
super().__init__()
self.training = True
self._num_ctx_frames = num_ctx_frames
self._model_config = model_config
# initialize models
self.featurizer_net = self._init_featurizer()
self.encoder_net = self._init_encoder()
self.auxiliary_net = self._init_auxiliary_net()
@abc.abstractmethod
def _init_featurizer(self):
pass
@abc.abstractmethod
def _init_encoder(self):
pass
def _init_auxiliary_net(self):
return None
def param_groups(self):
"""Return a dict of variable optimization parameters.
This is useful for specifying variable learning
rates and regularizations strengths for the
different models.
"""
param_groups = {
"featurizer": {"free": self.featurizer_net.parameters()},
"encoder": {"free": self.encoder_net.parameters()},
}
if self.auxiliary_net is not None:
param_groups["auxiliary"] = {
"free": self.auxiliary_net.parameters()
}
return param_groups
def _sanity_check_input(self, frames):
"""Add necessary logic to parse inputs here.
"""
pass
def forward(self, frames, num_ctx_frames=None):
"""Forward the video frames through the network.
Args:
x (torch.FloatTensor): The video frames of shape
`(B, T, C, H, W)`. If there are `S` video
frames and we are using `X` context frames,
then `T = S * X`.
Returns:
A FloatTensor of shape `(B, S, D)` where `S`
is the number of video frames and `D` is
the dimension of the emebdding space.
"""
self._sanity_check_input(frames)
# extract frame features
feats = self.featurizer_net(frames)
# reshape for the 3D convs
if num_ctx_frames is None:
num_ctx_frames = self._num_ctx_frames
batch_size, s, c, h, w = feats.shape
num_cc_frames = s // num_ctx_frames
feats = feats.view(
(batch_size * num_cc_frames, c, num_ctx_frames, h, w)
)
# embed frames using encoder
embs = self.encoder_net(feats)
embs = embs.view((-1, num_cc_frames, embs.shape[-1]))
return {
"embs": embs,
"feats": feats,
}
def train(self):
"""Set the model in `train` mode.
"""
self.training = True
self.featurizer_net.train()
self.encoder_net.train()
if self.auxiliary_net is not None:
self.auxiliary_net.train()
def eval(self):
"""Set the model in `eval` mode.
"""
if self.training:
logging.debug("Setting model to EVAL mode.")
self.featurizer_net.eval()
self.encoder_net.eval()
if self.auxiliary_net is not None:
self.auxiliary_net.eval()
self.training = False
@property
def num_ctx_frames(self):
return self._num_ctx_frames
|
[
"kevinarmandzakka@gmail.com"
] |
kevinarmandzakka@gmail.com
|
adba8dad5970b3b852b6c11aa08187a439ac0fcc
|
f4e8c59222b46a58f770160d70f3b86866b6bbdc
|
/week02/task04.py
|
b5fd60136ec17e30490e6f9194f5f150e3e3f528
|
[] |
no_license
|
GitKurmax/coursera-python
|
08902bf622d4aaed7197f352b7b74a73a398c74a
|
ffb3621c2b921ccdec5370da0108d14aeec220e9
|
refs/heads/master
| 2023-02-08T05:05:01.809386
| 2020-12-31T11:52:18
| 2020-12-31T11:52:18
| 325,789,322
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 119
|
py
|
year = int(input())
if (year % 4 == 0 and year % 100 != 0) or year % 400 == 0:
print("Yes")
else:
print("No")
|
[
"alexandrmail32@gmail.com"
] |
alexandrmail32@gmail.com
|
57ffb2b83595af10a419343366c1df061a62a01d
|
b1ce14cd8969aa9c919d247e55709889da0d5f86
|
/zhaopin/analysis/city_map.py
|
0ee9184e0a5a745f080c221206d941c932310dc4
|
[] |
no_license
|
TwoIceBing/job51
|
ba164cb18a193c44720d67663902be76a20f1f51
|
6d73893daad40d75653708e37e45d28bdbc62873
|
refs/heads/master
| 2020-04-10T21:35:54.758502
| 2018-12-11T09:02:50
| 2018-12-11T09:02:50
| 161,300,717
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,379
|
py
|
# coding:utf-8
import json
from urllib import urlopen, quote
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def getlnglat(address):
url = 'http://api.map.baidu.com/geocoder/v2/'
output = 'json'
ak = 'FOtHtZ92dCKMjpx0XA05g8VEZn95QWOK'
add = quote(address.encode('utf-8')) #由于本文城市变量为中文,为防止乱码,先用quote进行编码
print add
uri = url + '?' + 'address=' + add + '&output=' + output + '&ak=' + ak
print uri
req = urlopen(uri)
res = req.read() #将其他编码的字符串解码成unicode
temp = json.loads(res) #对json数据进行解析
return temp
file = open(r'./input/city.json','w') #建立json数据文件
with open(r'./input/city_salary.json', 'r') as f:
js = json.load(f)
data = []
for k,v in js.iteritems():
c = {}
c['city'] = k
c['points'] = []
for i in range(len(v['city'])):
if v['city'][i] == u'异地招聘':
continue
lnglat = getlnglat(v['city'][i]) # 采用构造的函数来获取经度
test = {}
test['lng'] = lnglat['result']['location']['lng']
test['lat'] = lnglat['result']['location']['lat']
test['count'] = v['avg_salary'][i]
c['points'].append(test)
data.append(c)
json.dump(data,file,ensure_ascii=False)
|
[
"1390932736@qq.com"
] |
1390932736@qq.com
|
818a3d7f024fe45733376415e120f38f9c5d4a61
|
f8666599b83d34c861651861cc7db5b3c434fc87
|
/plotly/validators/layout/xaxis/__init__.py
|
4af4c7585fe88366a6ad08c6dc33513618796377
|
[
"MIT"
] |
permissive
|
mode/plotly.py
|
8b66806e88c9f1820d478bab726f0bea81884432
|
c5a9ac386a40df2816e6c13264dadf14299401e4
|
refs/heads/master
| 2022-08-26T00:07:35.376636
| 2018-09-26T19:08:54
| 2018-09-26T19:19:31
| 60,372,968
| 1
| 1
|
MIT
| 2019-11-13T23:03:22
| 2016-06-03T19:34:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,943
|
py
|
from ._zerolinewidth import ZerolinewidthValidator
from ._zerolinecolor import ZerolinecolorValidator
from ._zeroline import ZerolineValidator
from ._visible import VisibleValidator
from ._type import TypeValidator
from ._titlefont import TitlefontValidator
from ._title import TitleValidator
from ._tickwidth import TickwidthValidator
from ._tickvalssrc import TickvalssrcValidator
from ._tickvals import TickvalsValidator
from ._ticktextsrc import TicktextsrcValidator
from ._ticktext import TicktextValidator
from ._ticksuffix import TicksuffixValidator
from ._ticks import TicksValidator
from ._tickprefix import TickprefixValidator
from ._tickmode import TickmodeValidator
from ._ticklen import TicklenValidator
from ._tickformatstops import TickformatstopsValidator
from ._tickformat import TickformatValidator
from ._tickfont import TickfontValidator
from ._tickcolor import TickcolorValidator
from ._tickangle import TickangleValidator
from ._tick0 import Tick0Validator
from ._spikethickness import SpikethicknessValidator
from ._spikesnap import SpikesnapValidator
from ._spikemode import SpikemodeValidator
from ._spikedash import SpikedashValidator
from ._spikecolor import SpikecolorValidator
from ._side import SideValidator
from ._showticksuffix import ShowticksuffixValidator
from ._showtickprefix import ShowtickprefixValidator
from ._showticklabels import ShowticklabelsValidator
from ._showspikes import ShowspikesValidator
from ._showline import ShowlineValidator
from ._showgrid import ShowgridValidator
from ._showexponent import ShowexponentValidator
from ._separatethousands import SeparatethousandsValidator
from ._scaleratio import ScaleratioValidator
from ._scaleanchor import ScaleanchorValidator
from ._rangeslider import RangesliderValidator
from ._rangeselector import RangeselectorValidator
from ._rangemode import RangemodeValidator
from ._range import RangeValidator
from ._position import PositionValidator
from ._overlaying import OverlayingValidator
from ._nticks import NticksValidator
from ._mirror import MirrorValidator
from ._linewidth import LinewidthValidator
from ._linecolor import LinecolorValidator
from ._layer import LayerValidator
from ._hoverformat import HoverformatValidator
from ._gridwidth import GridwidthValidator
from ._gridcolor import GridcolorValidator
from ._fixedrange import FixedrangeValidator
from ._exponentformat import ExponentformatValidator
from ._dtick import DtickValidator
from ._domain import DomainValidator
from ._constraintoward import ConstraintowardValidator
from ._constrain import ConstrainValidator
from ._color import ColorValidator
from ._categoryorder import CategoryorderValidator
from ._categoryarraysrc import CategoryarraysrcValidator
from ._categoryarray import CategoryarrayValidator
from ._calendar import CalendarValidator
from ._autorange import AutorangeValidator
from ._automargin import AutomarginValidator
from ._anchor import AnchorValidator
|
[
"adam.kulidjian@gmail.com"
] |
adam.kulidjian@gmail.com
|
9f3bdf6da9344ea265c846222363cff5598dc14a
|
1ee59d0a9a87134efe0d61c2378b37ee2c87d93a
|
/svm.py
|
06755a84b04cc0e530251ba74aba83e8f9c38789
|
[] |
no_license
|
tayaba02/Tactile-recognition-of-textures
|
8a8f368ff5c7273a430a85c3afe5737a61dbd5f5
|
267af231ae27b72e185219d1851412cb47a9c997
|
refs/heads/main
| 2023-07-18T02:02:53.743084
| 2021-09-06T09:52:03
| 2021-09-06T09:52:03
| 403,332,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,859
|
py
|
from __future__ import absolute_import, division, print_function
from tensorflow.keras import Model, layers
import numpy as np
import pandas as pd
from tensorflow.keras.layers import SimpleRNN, LSTM, GRU,Bidirectional
from numpy import genfromtxt
from keras.datasets import mnist
from keras.models import Sequential, load_model
from keras.layers.core import Dense, Dropout, Activation,ActivityRegularization
from keras.utils.np_utils import to_categorical
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
import warnings
warnings.filterwarnings('ignore')
from datetime import datetime
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
scaler = StandardScaler()
import keras
from sklearn.metrics import precision_score, recall_score, accuracy_score
from keras.models import Sequential, load_model
from keras.layers.core import Dense, Dropout, Activation,ActivityRegularization
from keras.utils.np_utils import to_categorical
from sklearn import metrics
import tensorflow as tf
from keras.datasets import mnist
from keras.models import Sequential, load_model
from keras.layers.core import Dense, Dropout, Activation,ActivityRegularization
from keras.layers import Conv2D, MaxPooling2D,Flatten
from keras.utils.np_utils import to_categorical
import matplotlib.pyplot as plt
import keras
from keras.datasets import mnist
from keras.datasets import mnist
from keras.models import Sequential, load_model
from keras.layers.core import Dense, Dropout, Activation,ActivityRegularization
from keras.utils.np_utils import to_categorical
import matplotlib.pyplot as plt
from numpy import load
#plotting f
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
import seaborn as sn
import matplotlib.pyplot as plt
from pylab import rcParams
def textplotting(model,x_test,y_test):
y_pred = model.predict(x_test)
y_pred = np.argmax(y_pred, axis=1)
y_test = np.argmax(y_test, axis=1)
y_pred = y_pred.reshape(-1,1)
y_test = y_test.reshape(-1,1)
print(confusion_matrix(y_test, y_pred))
print(accuracy_score(y_test, y_pred))
def metric(name,best_model,x_train,y_train,x_test,y_test):
plt.clf()
y_pred = best_model.predict(x_test)
sn.set(font_scale=1)
rcParams['figure.figsize'] = 30, 30
confusion_matrix = pd.crosstab(y_test.argmax(axis=1), y_pred.argmax(axis=1), rownames=['Actual'], colnames=['Predicted'])
sn.heatmap(confusion_matrix, annot=True)
plt.savefig(name+"Test.png")
plt.clf()
confusion_matrix = pd.crosstab(y_train.argmax(axis=1), best_model.predict(x_train).argmax(axis=1), rownames=['Actual'], colnames=['Predicted'])
sn.heatmap(confusion_matrix, annot=True)
plt.savefig(name+"Train.png")
plt.clf()
def plotting(name,history):
plt.clf()
sn.set(font_scale=2)
rcParams['figure.figsize'] = 10, 10
fig = plt.figure()
history_dict = history.history
print(history_dict.keys())
plt.subplot(2,1,1)
plt.plot(history_dict['accuracy'])
plt.plot(history_dict['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['Training Set', 'Validation Set'], loc='lower right')
plt.subplot(2,1,2)
plt.plot( history_dict['loss'])
plt.plot( history_dict['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['Training Set', 'Validation Set'], loc='upper right')
plt.tight_layout()
plt.savefig(name +"Accuracy.png")
plt.clf()
plt.show
#Bio
ytrain = load('labels_trainAll.npy')
print(ytrain.shape)
xtrain = load('bio_trainAll.npy')
print(xtrain.shape)
xtest = load('bio_test.npy')
print(xtest.shape)
ytest = load('labels_test.npy')
print(ytest.shape)
'''
ytrain = load('labels_trainAll.npy')
print(ytrain.shape)
xtrain = load('icub_trainAll.npy')
print(xtrain.shape)
xtest = load('icub_test.npy')
print(xtest.shape)
ytest = load('labels_test.npy')
print(ytest.shape)
'''
#### 2D classifiers
def svm(xtrain,ytrain,xtest,ytest):
xtrain = xtrain.reshape(xtrain.shape[0],xtrain.shape[1]*xtrain.shape[2])
xtest = xtest.reshape(xtest.shape[0],xtest.shape[1]*xtest.shape[2])
from sklearn.svm import SVC
model = SVC(kernel='linear', probability=True)
model.fit(xtrain, ytrain)
y_pred = model.predict(xtest)
from sklearn import metrics
print("Test Accuracy:",metrics.accuracy_score(ytest, y_pred))
print("Training Accuracy:",metrics.accuracy_score(ytrain, model.predict(xtrain)))
svm(xtrain,ytrain,xtest,ytest)
|
[
"noreply@github.com"
] |
tayaba02.noreply@github.com
|
7fa26db7546059397d857662e5c3152ce9b6361f
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4182/codes/1582_2897.py
|
28a7bb152b0197d08b28465ac147822e23411d40
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
# Use este codigo como ponto de partida
# Leitura de valores de entrada e conversao para inteiro
num = int(input("Digite o numero:"))
# Impressao do dobro do numero
print(num*2)
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
4f054e3eb26478e6754fc46e3c9b7d2de5931107
|
4b340f77af77c120d06dab824606088814e7f5c8
|
/python-zumbies/twp295.py
|
55fe0eba5e5b6aedcd2b08071eaaf751b04e522e
|
[] |
no_license
|
jeyziel/python-studies
|
955c39938a8a89b7626a4ac8d37a215753e0fbed
|
f86ff997643d188ad7099047ff68343e4d96b92b
|
refs/heads/master
| 2020-03-10T20:00:33.983638
| 2018-07-30T18:54:01
| 2018-07-30T18:54:01
| 129,560,901
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 102
|
py
|
import random
lista = []
for i in range(15):
lista.append(random.randint(10, 100))
print(lista)
|
[
"jeyzielgato@gmail.com"
] |
jeyzielgato@gmail.com
|
97afa910069619d1bf28d324dc78438e217c94d3
|
22ea049e5ff21a34972b1ac2e20e02eb0d562994
|
/mazegen.py
|
58661a920d25976fbff3342b401e931747a84835
|
[] |
no_license
|
mihai-craciun/maze
|
011edeaa8b5673cf79f4dd096ac2c40177ad4627
|
3b05c44097aa156ffe4f16af1b0fdd9028334e5a
|
refs/heads/master
| 2022-04-27T01:33:01.861623
| 2020-04-26T16:49:47
| 2020-04-26T16:49:47
| 258,873,568
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,243
|
py
|
from typing import Tuple, List
from disjoint_set import DisjointSet
from collections import deque
import random
class Maze:
# Avaliable directions
LEFT = 1 << 0
RIGHT = 1 << 1
UP = 1 << 2
DOWN = 1 << 3
# Offsets
OFF_LEFT = (0, -1)
OFF_RIGHT = (0, 1)
OFF_UP = (-1, 0)
OFF_DOWN = (1, 0)
# Special markers
EMPTY = 0
VISITED = 1
SOLUTION = 2
# Algorithms
DFS = 'dfs'
KRUSKAL = 'kruskal'
DIVISION = 'division'
@staticmethod
def get_dir(i_offset: int, j_offset: int) -> int:
pass
@staticmethod
def neg_mask(d: int) -> int:
return (Maze.LEFT | Maze.RIGHT | Maze.UP | Maze.DOWN) - d
@staticmethod
def can_go(cell: int, d: int) -> bool:
return True if cell == Maze.EMPTY else cell & d > 0
def __init__(self, w: int=10, h: int=10
,start: Tuple[int ,int]=(0, 0), end: Tuple[int, int] = None, algorithm='kruskal'):
# Inits
self.w = w
self.h = h
self.start = start
self.maze = None
self.solution = None
self.algorithm = algorithm
if end is None:
end = (h-1, w-1)
self.end = end
self.compute_aux_maps()
def get_avail_neighbour_offset(self, y: int, x: int) -> List[Tuple[int, int]]:
nbs = []
for (i, j) in self.OFF_DIR.keys():
if y + i < 0 or y + i >= self.h or x + j < 0 or x + j >= self.w:
continue
if self.maze[y + i][x + j] == self.EMPTY:
nbs.append((i, j))
return nbs
def set_dir(self, y: int, x: int, d: int):
self.maze[y][x] = max(self.maze[y][x], 0) | d
def populate_dfs(self):
stack = []
# Run DFS, create a stack of nodes
stack.append((self.start))
while stack:
i, j = stack.pop()
# Select a new neighbor
nbs = self.get_avail_neighbour_offset(i, j)
if not nbs:
continue
random.shuffle(nbs)
# pair the cells
for nb in nbs:
nd = self.OFF_DIR[nb]
self.set_dir(i, j, nd)
self.set_dir(i+nb[0], j+nb[1], self.OPOSITE[nd])
stack.append((i+nb[0], j+nb[1]))
def populate_kruskal(self):
cells = []
edges = []
for i in range(self.h):
for j in range(self.w):
cells.append((i, j))
if i + 1 < self.h:
edges.append(((i, j), (i + 1, j)))
if j + 1 < self.w:
edges.append(((i, j), (i, j + 1)))
ds = DisjointSet(cells)
random.shuffle(edges)
for edge in edges:
(a, b), (c, d) = edge
if ds.union((a, b), (c, d)):
nb = (c-a, d-b)
nd = self.OFF_DIR[nb]
self.set_dir(a, b, nd)
self.set_dir(c, d, self.OPOSITE[nd])
def populate_recursive(self, start: Tuple[int, int], end: Tuple[int, int]):
i1, j1 = start
i2, j2 = end
# if grid is less than 2x2 return
if i2 - i1 < 2 or j2 - j1 < 2:
return
# choose x, y in the middle
y = (i1 + i2) // 2
x = (j1 + j2) // 2
# set walls
for j in range(j1, j2):
self.maze[y-1][j] &= self.neg_mask(self.DOWN)
self.maze[y][j] &= self.neg_mask(self.UP)
for i in range(i1, i2):
self.maze[i][x-1] &= self.neg_mask(self.RIGHT)
self.maze[i][x] &= self.neg_mask(self.LEFT)
# select a random index from each one of the 4 smaller walls formed by the intersection
walls = [
(y, random.randint(j1, x-1), -1, 0, self.UP, self.DOWN),
(y, random.randint(x, j2-1), -1, 0, self.UP, self.DOWN),
(random.randint(i1, y-1), x, 0, -1, self.LEFT, self.RIGHT),
(random.randint(y, i2-1), x, 0, -1, self.LEFT, self.RIGHT),
]
# keep only 3 of them
toremove = random.choice(walls)
walls.remove(toremove)
# remove selected walls
for wall in walls:
i, j, o1, o2, d1, d2 = wall
self.maze[i][j] |= d1
self.maze[i+o1][j+o2] |= d2
# populate the maze in all 4 sub mazes generated
self.populate_recursive((i1, j1), (y, x))
self.populate_recursive((i1, x), (y, j2))
self.populate_recursive((y, j1), (i2, x))
self.populate_recursive((y, x), (i2, j2))
def populate(self):
self.erase()
if self.algorithm == self.DFS:
self.populate_dfs()
elif self.algorithm == self.KRUSKAL:
self.populate_kruskal()
elif self.algorithm == self.DIVISION:
self.populate_recursive((0, 0), (self.h, self.w))
def solve(self):
self.solution = [[self.EMPTY for _ in range(self.w)] for _ in range(self.h)]
prevs = {self.start: None}
queue = deque()
queue.append(self.start)
while queue:
x = queue.popleft()
if x == self.end:
break
i, j = x
self.solution[i][j] = self.VISITED
# get available neighbors
nbs = []
for k in self.OFF_DIR:
(oi, oj), d = k, self.OFF_DIR[k]
# if valid neighbor (in offsets, dir not blocked and not visited)
if ((0 <= i + oi < self.h and 0 <= j + oj < self.w )
and self.maze[i+oi][j+oj] & self.OPOSITE[d] > 0 and self.solution[i+oi][j+oj] == self.EMPTY and (i+oi, j+oj) not in prevs):
nbs.append((i+oi, j+oj))
prevs[(i+oi, j+oj)] = x
# add neighbors in queue
queue.extend(nbs)
# calculate paths
p = self.end
while p is not None:
i, j = p
self.solution[i][j] = self.SOLUTION
p = prevs[p]
def erase(self):
if self.algorithm == self.DIVISION:
self.maze = [[self.LEFT | self.RIGHT | self.UP | self.DOWN for _ in range(self.w)] for _ in range(self.h)]
self.maze[0] = [x & self.neg_mask(self.UP) for x in self.maze[0]]
self.maze[-1] = [x & self.neg_mask(self.DOWN) for x in self.maze[-1]]
for i in range(self.h):
self.maze[i][0] &= self.neg_mask(self.LEFT)
self.maze[i][-1] &= self.neg_mask(self.RIGHT)
else:
self.maze = [[self.EMPTY for _ in range(self.w)] for _ in range(self.h)]
def print(self):
if self.maze is None:
return
for i, line in enumerate(self.maze):
char_func = lambda j_v: self.CHARS_EMPTY[j_v[1]] if (self.solution is not None
and self.solution[i][j_v[0]] == self.SOLUTION) else self.CHARS[j_v[1]]
line = list(map(char_func, enumerate(line)))
print("".join(line))
def compute_aux_maps(self):
self.OFF_DIR = {
self.OFF_LEFT: self.LEFT,
self.OFF_RIGHT: self.RIGHT,
self.OFF_UP: self.UP,
self.OFF_DOWN: self.DOWN
}
# opposite
self.OPOSITE = {
self.LEFT: self.RIGHT,
self.RIGHT: self.LEFT,
self.UP: self.DOWN,
self.DOWN: self.UP
}
# drawing chars
self.CHARS = {
self.EMPTY: " ",
self.LEFT: "\u2578",
self.RIGHT: "\u257a",
self.UP: "\u2579",
self.DOWN: "\u257b",
self.LEFT | self.RIGHT: "\u2501",
self.LEFT | self.UP: "\u251b",
self.LEFT | self.DOWN: "\u2513",
self.RIGHT | self.UP: "\u2517",
self.RIGHT | self.DOWN: "\u250f",
self.UP | self.DOWN: "\u2503",
self.LEFT | self.RIGHT | self.UP: "\u253b",
self.LEFT | self.RIGHT | self.DOWN: "\u2533",
self.LEFT | self.UP | self.DOWN: "\u252b",
self.RIGHT | self.UP | self.DOWN: "\u2523",
self.LEFT | self.RIGHT | self.UP | self.DOWN: "\u254b"
}
self.CHARS_EMPTY = {
self.EMPTY: " ",
self.LEFT: "\u2555",
self.RIGHT: "\u2558",
self.UP: "\u2559",
self.DOWN: "\u2556",
self.LEFT | self.RIGHT: "\u2550",
self.LEFT | self.UP: "\u255d",
self.LEFT | self.DOWN: "\u2557",
self.RIGHT | self.UP: "\u255a",
self.RIGHT | self.DOWN: "\u2554",
self.UP | self.DOWN: "\u2551",
self.LEFT | self.RIGHT | self.UP: "\u2569",
self.LEFT | self.RIGHT | self.DOWN: "\u2566",
self.LEFT | self.UP | self.DOWN: "\u2563",
self.RIGHT | self.UP | self.DOWN: "\u2560",
self.LEFT | self.RIGHT | self.UP | self.DOWN: "\u256c"
}
if __name__ == "__main__":
maze = Maze(20, 20, algorithm=Maze.DIVISION)
maze.populate()
maze.solve()
maze.print()
|
[
"mihaicraciun96@gmail.com"
] |
mihaicraciun96@gmail.com
|
4c85fd307ea4f4d698efbbbc3b78460b6a68dd65
|
d2645593a16a882b407bcac8805e7444fa6f1ec9
|
/SlidingWindowMaximum.py
|
4431b09e5f3c91e38277e3420b3ba873c072e4d9
|
[] |
no_license
|
Steve-Jadav/LeetCode
|
040843533c4ab228a15752c388f953e9b2ecbf3f
|
7bcdd966c8a560bc0c71b9f357f600c50db97a53
|
refs/heads/master
| 2023-01-24T03:34:44.227573
| 2020-12-11T09:43:42
| 2020-12-11T09:43:42
| 265,413,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
# Problem 239
# Approach: deque
from collections import deque
class Solution:
def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:
if (nums == None or len(nums) == 0 or k > len(nums)):
return []
result = list()
deck = deque([])
for i in range(0, len(nums)):
if deck and deck[0] == i - k:
deck.popleft()
while (deck and nums[deck[-1]] <= nums[i]):
deck.pop()
deck.append(i)
if (i >= k - 1):
result.append(nums[deck[0]])
return result
|
[
"ssjadav@asu.edu"
] |
ssjadav@asu.edu
|
efed81c1348d5dcc108b1148af5fce89e4e89030
|
c53d91b106d127f4c0181631513bb6244fdb591c
|
/bmmiici/migrations/0002_doctor_specializations.py
|
129dbc7372fc4dd6babb76075bd7790e0d40b2dc
|
[] |
no_license
|
rezaul3025/bmmiici
|
adee847f8c9fd49c34b04dfbc569ed9e3625dc6f
|
d72ffab09cfd1a64af80b5edaf8e9b2f42031aa3
|
refs/heads/master
| 2020-12-03T00:18:33.256610
| 2017-07-08T13:00:07
| 2017-07-08T13:00:07
| 96,013,461
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-05 12:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bmmiici', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='doctor',
name='specializations',
field=models.TextField(default='NULL'),
preserve_default=False,
),
]
|
[
"rezaul.km105@gmail.com"
] |
rezaul.km105@gmail.com
|
19406253672ec27fdc20e4d53f4be3f3efaa50b3
|
d9c176985d24b82b21d5dd319b5ff82ae5d783f6
|
/hw4.py
|
677aff7aace43bb5955021afb87666f7bdb2174a
|
[
"MIT"
] |
permissive
|
nv29arh/hw4
|
913248f57a9e021f7147b31fa88b713a1b2d5d97
|
f2e1f38bf4fa0d6bc11cb07df63754a0fb7e3595
|
refs/heads/master
| 2022-04-24T10:40:55.254816
| 2020-04-18T15:35:18
| 2020-04-18T15:35:18
| 256,785,889
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,274
|
py
|
#1. Напишите функцию (F): на вход список имен и целое число N; на выходе список длины N случайных имен из первого списка
# (могут повторяться, можно взять значения: количество имен 20, N = 100, рекомендуется использовать функцию random);
import random
def random_list_of_the_names(a,N):
'''
:param a: первоначальный список имен
:param N: количество имен
:return: новый список из случайных имен
'''
new_list = []
for i in range(N):
new_list.append(random.choice(a))
return new_list
a = ['Вася','Петя','Аня', 'Катя', 'Саша', 'Оля', 'Дима', 'Костя', 'Алла', 'Лена', 'Ксюша', 'Паша', 'Артем', 'Андрей',
'Сергей', 'Инна', 'Ашот', 'Миша', 'Стас', 'Лев']
print(random_list_of_the_names(a,20))
#2. Напишите функцию вывода самого частого имени из списка на выходе функции F;
from collections import Counter
def the_most_common_name(a,N):
'''
:param a: начальный список имен
:param N: количество имен в списке
:return: самое частое имя в списке
'''
new_list = []
for i in range(N):
new_list.append(random.choice(a))
return new_list, Counter(new_list).most_common(1)
print(the_most_common_name(a,20))
# 3 Напишите функцию вывода самой редкой буквы, с которого начинаются имена в списке на выходе функции F
def the_rarest_letter(a, N):
'''
:param a: начальный список имен
:param N: количество имен в списке
:return: самая редкая буква
'''
new_list = []
for i in range(N):
new_list.append(random.choice(a))
the_first_letter = list(map(lambda names:names[0],new_list))
return new_list, Counter(the_first_letter).most_common()[-1]
print(the_rarest_letter(a, 15))
|
[
"nv29arh@gmail.com"
] |
nv29arh@gmail.com
|
0703fd507726e31b497d16301aa2cc35d4f91a4b
|
20bbbc7d104ec39af1a15b1567f7a29f84829b45
|
/djangosite/booksite/booksite/settings.py
|
df0ee4247a8af5ea5b2207b43d5465a95002ccf3
|
[] |
no_license
|
liuxinqiqi/Pythonclassbasic
|
07eeb01e6d579a995600f31408f68b22d36e573b
|
fbd85cd9c4341c611eba170083ed944f5b8f16da
|
refs/heads/master
| 2021-01-16T18:27:00.640306
| 2017-08-12T02:41:00
| 2017-08-12T02:41:00
| 100,084,047
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,303
|
py
|
"""
Django settings for booksite project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0n&(%lny-o13b&-&cgk4bo-!2#z9%v*s@yk&o6x^fhu)8=d$j4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'usr',
'costomers',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'booksite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'booksite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'USER',
'USER': 'root',
'PASSWORD': '123',
'HOST':'',
'PORT':'',
},
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
AUTH_USER_MODEL = 'usr.User'
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_ROOT = '/static/'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
BASE_DIR,'static'
)
|
[
"XinQi_Liu@outlook.com"
] |
XinQi_Liu@outlook.com
|
e1a3b9f62dfa9070f5a3fae6496477ee023461b4
|
07560f9448a55d14380d5cf022323af09e2d1e6b
|
/1.DataScience/2.BigO/Green18/lec10_sum.py
|
3dfc22aaaea7b383c4afbb59abc65f72be92844e
|
[] |
no_license
|
namth2015/python
|
58d6cb5d93db6ccaec0a436150d1c4458d4f6a03
|
a32e691e2d5fc1ae1d80d0c7d99185c456e48212
|
refs/heads/master
| 2023-06-01T23:42:59.585763
| 2021-06-19T10:34:39
| 2021-06-19T10:34:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
class cell:
def __init__(self,x,y,val = None):
self.x = x
self.y = y
self.val = val
m, n = map(int,input().split())
k = int(input())
a = []
for i in range(k):
x, y, val = map(int,input().split())
a.append(cell(x,y,val))
lst = [x for x in a if x.val > 0]
print(len(lst))
for i in range(len(lst)):
print(lst[i].x, lst[i].y)
|
[
"cungbac.tran@gmail.com"
] |
cungbac.tran@gmail.com
|
d76fef10ced97c1c78acbadd55454645b2ab6c26
|
427c02cd6cb7719159a4d130ab501723c8f8d717
|
/wsi_qa/applicants/apps.py
|
c8c171f6b3fc452c49883809adef5845bebfbf13
|
[] |
no_license
|
rhedenjohn/wsiqa
|
64016f8a1babf2efe2726d1b353065446011d6fe
|
51f2e8ece347cbbb4a8994ec53d49d3379479ef3
|
refs/heads/master
| 2022-11-09T21:04:25.062978
| 2020-06-16T12:58:59
| 2020-06-16T12:58:59
| 271,193,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 94
|
py
|
from django.apps import AppConfig
class ApplicantsConfig(AppConfig):
name = 'applicants'
|
[
"rhedenjohn@hotmail.com"
] |
rhedenjohn@hotmail.com
|
b0ed056ba3090317cb01c1f931b47c350a3d5312
|
ce2e068a1bf0111b85ebccd4009370e02d3e268a
|
/hahaha/util.py
|
025e4f78455eee3a3d5608c551fa622e79fb93b7
|
[] |
no_license
|
wzk1015/hahaha
|
6565dbca5f215b3e0b0009f2cbc17237fcae1a5b
|
e676e6289554f2b64f30864f77c66dd52e2e4dc3
|
refs/heads/master
| 2023-02-12T15:32:12.815595
| 2021-01-03T18:01:47
| 2021-01-03T18:01:47
| 324,585,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 951
|
py
|
import numpy as np
import cv2
import os
import pkg_resources
PACKAGE_NAME = "hahaha"
template_path = "template-imgs/"
def get_path(path):
return pkg_resources.resource_filename(PACKAGE_NAME, path)
def check_file(path):
assert os.path.exists(path), "file '" + path + "' not exists"
def read(path) -> np.ndarray:
p = get_path(path)
check_file(p)
return cv2.imread(p)
def read_template(path) -> np.ndarray:
return read(os.path.join(template_path, path))
def save(path, img):
cv2.imwrite(path, img)
print("saved to", path)
def show(name, img):
print("showing")
cv2.imshow(name, img)
cv2.waitKey(0)
def wrapper(f, *args, **kwargs):
new_kwargs = kwargs.copy()
if "out_path" in kwargs.keys():
del new_kwargs["out_path"]
save(kwargs["out_path"], f(*args, **new_kwargs))
else:
show("result", f(*args, **new_kwargs))
|
[
"18231047@buaa.edu.cn"
] |
18231047@buaa.edu.cn
|
fb4dfff30de3a38bb92711cbb6dc49b298649908
|
111a0e4c5950a81b752c8ac941d3e296dadf48b8
|
/guessing_game.py
|
eddda92bb6eabcf63d6b69f5705b1e160578781a
|
[] |
no_license
|
Mr-JackWilson2019/Treehouse-project-1
|
de9fcb1ba75f2868ae0a1743da02860cadae5708
|
af11f25a5a856e10f6b2841600a1eef3bb44a255
|
refs/heads/master
| 2020-05-26T09:26:06.159021
| 2019-05-23T07:45:14
| 2019-05-23T07:45:14
| 188,186,215
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,655
|
py
|
"""
Python Web Development Techdegree
Project 1 - Number Guessing Game
--------------------------------
For this first project we will be using Workspaces.
NOTE: If you strongly prefer to work locally on your own computer, you can totally do that by clicking: File -> Download Workspace in the file menu after you fork the snapshot of this workspace.
"""
#random is slower than numpy.random
import random
def start_game(high_score):
"""Psuedo-code Hints
When the program starts, we want to:
------------------------------------
1. Display an intro/welcome message to the player.
2. Store a random number as the answer/solution.
3. Continuously prompt the player for a guess.
a. If the guess greater than the solution, display to the player "It's lower".
b. If the guess is less than the solution, display to the player "It's higher".
4. Once the guess is correct, stop looping, inform the user they "Got it"
and show how many attempts it took them to get the correct number.
5. Let the player know the game is ending, or something that indicates the game is over.
( You can add more features/enhancements if you'd like to. )
"""
print("""Hello, welcome to the number guessing game. This program will
randomly select a number between 1 and 10, including 1 and 10. To play,
guess a number between 1 and 10. If the program tells you the number is
higher, guess another number that is higher than the last number you
guessed. If the program tells you that the number is lower, guess a number
lower than the one you guessed. Try to beat the high score if one exists.
Enjoy!""")
if high_score is not None:
#I learned about f strings a while ago. I'm not new to python
print(f"The current high score is {high_score}")
number_to_guess = random.randrange(1, 10)
number_of_guesses = 0
try:
guess = int(input("Guess the number. It's between 1 and 10: "))
except ValueError:
print("Looks like you didn't enter an integer. Pleae try again")
exit()
while True:
try:
number_of_guesses += 1
if guess == number_to_guess:
print("congratulations! You won!")
print(f"It took {number_of_guesses} tries.")
play_again = input("Do you want to play again? Y/N ")
if play_again.upper() == "Y":
#check to see if current score is lower than high score
#keeping my character count in check
#shouldn't be an error since the entire or statement evaluates
#the true as soon as part of the condition is true.
if (high_score is None) or\
(number_of_guesses < high_score):
start_game(high_score = number_of_guesses)
else:
start_game(high_score = high_score)
elif play_again.upper() == "N":
exit()
else:
raise ValueError()
elif guess < number_to_guess:
guess = int(input("It's higher! "))
continue
elif guess > number_to_guess:
guess = int(input("It's lower! "))
continue
except ValueError as e:
print("Please enter integers for guessing. Enter either a Y or an N"
"when prompted with Y/N")
#This is just to make sure that your number of guesses doesn't
#increase if you mess up
number_of_guesses -= 1
continue
if __name__ == '__main__':
# Kick off the program by calling the start_game function.
start_game(None)
|
[
"Mr.JackWilson2019@gmail.com"
] |
Mr.JackWilson2019@gmail.com
|
59fc876954e1f9decf3028bb8dd67468bddceff3
|
c107e850b0cc4b925f2d424702dbcf4dcbb52ec3
|
/text_to_phoneme.py
|
bd882ebf89f99409c4569d911ae5f534926361fe
|
[] |
no_license
|
rohithkodali/g2p
|
74deaa29909ebf7825678e9cfe63bc898e45cd29
|
7168b46b2fc9186bd193ae7c25c881fbf697ff77
|
refs/heads/master
| 2021-01-21T07:34:21.254448
| 2015-08-13T12:49:16
| 2015-08-13T12:49:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,653
|
py
|
import sys
import os
""" This file maps the manual transliterations into phonemes using
automatic transliterations and mono-phoneset file. First step is to
find the one-to-one mapping between mannual and automatic
tranliterations. There could be mismatches between them, i.e.
sometimes people may mix two words into single or they forget to
transliterate some words. This tries to correct one word mistakes.
"""
###############################
# After careful observation, we find this table which shows the
# possible phonemes for each grapheme. We can see that, at least for
# telugu, people didn't use the letters 'q', and 'w' for transliteration
ph_dic = {}
ph_dic['2'] = ['r']
ph_dic['3'] = ['t', 'm']
ph_dic['a'] = ['aa', 'ai', 'e', 'ei']
ph_dic['b'] = ['bh']
ph_dic['c'] = ['ch', 'k', 's']
ph_dic['d'] = ['dh', 'dx', 'dxh']
ph_dic['e'] = ['ai', 'ei', 'i', 'ii']
ph_dic['f'] = ['ph']
ph_dic['g'] = ['gh', 'j']
ph_dic['h'] = ['h']
ph_dic['i'] = ['a', 'ai', 'ii']
ph_dic['j'] = ['j']
ph_dic['k'] = ['c', 'kh']
ph_dic['l'] = ['e']
ph_dic['m'] = ['e']
ph_dic['n'] = ['e']
ph_dic['o'] = ['aa', 'a', 'au', 'oo', 'uu', 'w']
ph_dic['p'] = ['ph']
ph_dic['r'] = ['aa', 'rq', 'w']
ph_dic['s'] = ['e', 'sh', 'sx']
ph_dic['t'] = ['dh', 'th', 'tx', 'txh']
ph_dic['u'] = ['a','uu', 'w', 'y']
ph_dic['v'] = ['ei','u','uu', 'w']
ph_dic['x'] = ['e']
ph_dic['y'] = ['e', 'ei', 'w']
ph_dic['z'] = ['j']
####################################
# convert each word into corresponding phoneme representation
def word2phone(phoneset, word):
# phoneset contains the list of monophones
# split the word into small subwords and check they are equal to any phone
# in the phonelist
j = 0
phones = []
while j < len(word):
# check if three letters of a word are equal to a phone or not
if word[j:j+3] in phoneset:
phones.append(word[j:j+3])
j = j + 3
# check if two letters of a word are equal to a phone or not
elif word[j:j+2] in phoneset:
phones.append(word[j:j+2])
j = j + 2
else:
# else if we check single letter of a word is in the phoneset
# most/all of the cases single letter will have a correspond phone
if word[j] in phoneset:
phones.append(word[j])
j = j + 1
return phones
# this function modifys the single word mistakes by joining the two words in the
# longer list
def adjust_oneword_mistakes(a, t):
flag = 0
if len(t) > len(a):
for j in xrange(len(t)):
if j == len(a):
t[j-1] = t[j - 1] + t[j]
t.pop(j)
break
else:
if (a[j][0] != t[j][0]) and (t[j][0] not in ph_dic[a[j][0]]):
t[j-1] = t[j-1] + t[j]
t.pop(j)
break
return a, t
else:
for j in xrange(len(a)):
if j == len(t):
a[j-1] = a[j - 1] + a[j]
a.pop(j)
break
else:
if (a[j][0] != t[j][0]) and (t[j][0] not in ph_dic[a[j][0]]):
a[j-1] = a[j-1] + a[j]
a.pop(j)
break
return a, t
if __name__ == "__main__":
if len(sys.argv) != 4:
sys.exit("Usage: python text_to_phone.py <manual transliteration file> \
<automatic tranliteration file> <mono-phoneset file>")
# get mannual transliteration/annotation file
annont_file = sys.argv[1]
# get automatic transliteration file
trans_file = sys.argv[2]
# get mono-phones file
lang_file = sys.argv[3]
### Read mono-phones into a list ###
phoneset = [] # initialize the phoneset
# open the phoneset file which contains utf8 and their correspond phone
with open(lang_file, 'r') as f:
for line in f.readlines():
ph_list = line.strip().split()
if ph_list[1] not in phoneset:
phoneset.append(ph_list[1])
### Read annotation file ####
with open(annont_file, 'r') as f:
annont_lines = f.readlines()
### Read transliteration file ###
with open(trans_file, 'r') as f:
trans_lines = f.readlines()
### sanity check ###
if len(annont_lines) != len(trans_lines):
sys.exit('The number of lines in automatic and mannual transliteration \
files are different. Check agian!')
### Convert each sentence into its phonemic representation ###
w2p_train = []
train_dic = {}
test_words = []
for i in xrange(len(annont_lines)):
# the lines in these ranges are for test set, thus just add them to test set
if (i >= 1850 and i <= 1900) or (i >= 2200):
a_list = annont_lines[i].strip().split()
for w in a_list:
if w not in test_words:
test_words.append(w)
continue
a_list = annont_lines[i].strip().split()
t_list = trans_lines[i].strip().split()
a_len = len(a_list)
t_len = len(t_list)
# As of now we care about only one word mistakes!
if abs(a_len - t_len) <= 1:
# if the lengths difference is one then adjust the bigger list by merging
# two words
if abs(a_len - t_len) == 1:
#print "before", a_list, t_list
a_list, t_list = adjust_oneword_mistakes(a_list, t_list)
#print "after", a_list, t_list
for j in xrange(len(a_list)):
# check that current word is unique or not
if a_list[j] not in train_dic:
# map the word to phone
phones = word2phone(phoneset, t_list[j])
# add the word into dictionary
train_dic[a_list[j]] = ' '.join(phones)
# this is double check to make sure that phones are correspond
# to the annotation word by comparing the first phone in both
# word and phones
if a_list[j][0] == phones[0] or phones[0] in ph_dic[a_list[j][0]]:
w2p_train.append(a_list[j] + ' ' + ' '.join(phones))
with open('lexicon_train.txt', 'w') as f:
f.write('\n'.join(w2p_train))
with open('test.words', 'w') as f:
f.write('\n'.join(test_words))
os.system('cat lexicon_train.txt | sort | uniq > temp.txt')
os.system('rm lexicon_train.txt')
os.system('mv temp.txt train.lex')
|
[
"bajibau7@gmail.com"
] |
bajibau7@gmail.com
|
57cc05f3174ad501f76c32351312e66932f0f089
|
31a17ccc7d77eac2673b2c1e2c489eaf75f7af58
|
/omtp_factory_behaviors/omtp_factory_flexbe_states/src/omtp_factory_flexbe_states/detect_part_camera_state.py
|
d77d023165092c9991f208ac04016fc9637026cc
|
[] |
no_license
|
aaurm867/omtp_assignments
|
148abecae4043049600270d4854ef25c1e8c2e80
|
9ba73a20ce9ff90ef59a3209eaaca8a98572b090
|
refs/heads/master
| 2021-10-15T21:28:14.980785
| 2021-09-29T10:26:28
| 2021-09-29T10:26:28
| 245,206,695
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,172
|
py
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2018, Delft University of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Delft University of Technology nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Authors: the HRWROS mooc instructors
# Authors: Simon Bogh, updated Mar 31 2020
import rospy
import rostopic
import inspect
import tf2_ros
import tf2_geometry_msgs
from flexbe_core import EventState, Logger
from geometry_msgs.msg import Pose, PoseStamped
from omtp_gazebo.msg import LogicalCameraImage, Model
from flexbe_core.proxy import ProxySubscriberCached
'''
Created on Sep 5 2018
@author: HRWROS mooc instructors
Updated on Mar 31 2020
@author: Simon Bogh
'''
class DetectPartCameraState(EventState):
'''
State to detect the pose of the part with any of the cameras in the factory simulation.
-- ref_frame string reference frame for the part pose output key
-- camera_topic string the topic name for the camera to detect the part
-- camera_frame string frame of the camera
#> pose PoseStamped Pose of the detected part
<= continue if the pose of the part has been succesfully obtained
<= failed otherwise
'''
def __init__(self, ref_frame, camera_topic, camera_frame):
# Declare outcomes, input_keys, and output_keys by calling the super constructor with the corresponding arguments.
super(DetectPartCameraState, self).__init__(outcomes = ['continue', 'failed'], output_keys = ['pose'])
self.ref_frame = ref_frame
self._topic = camera_topic
self._camera_frame = camera_frame
self._connected = False
self._failed = False
# tf to transfor the object pose
self._tf_buffer = tf2_ros.Buffer(rospy.Duration(10.0)) #tf buffer length
self._tf_listener = tf2_ros.TransformListener(self._tf_buffer)
# Subscribe to the topic for the logical camera
(msg_path, msg_topic, fn) = rostopic.get_topic_type(self._topic)
if msg_topic == self._topic:
msg_type = self._get_msg_from_path(msg_path)
self._sub = ProxySubscriberCached({self._topic: msg_type})
self._connected = True
else:
Logger.logwarn('Topic %s for state %s not yet available.\nFound: %s\nWill try again when entering the state...' % (self._topic, self.name, str(msg_topic)))
def execute(self, userdata):
# This method is called periodically while the state is active.
# Main purpose is to check state conditions and trigger a corresponding outcome.
# If no outcome is returned, the state will stay active.
if not self._connected:
userdata.pose = None
return 'failed'
if self._failed:
userdata.pose = None
return 'failed'
if self._sub.has_msg(self._topic):
message = self._sub.get_last_msg(self._topic)
for model in message.models:
if model.type == 'object':
pose = PoseStamped()
pose.pose = model.pose
pose.header.frame_id = self._camera_frame
pose.header.stamp = rospy.Time.now()
# Transform the pose to desired output frame
pose = tf2_geometry_msgs.do_transform_pose(pose, self._transform)
userdata.pose = pose
return 'continue'
def on_enter(self, userdata):
# This method is called when the state becomes active, i.e. a transition from another state to this one is taken.
# It is primarily used to start actions which are associated with this state.
# Get transform between camera and robot1_base
try:
self._transform = self._tf_buffer.lookup_transform(self.ref_frame, self._camera_frame, rospy.Time(0), rospy.Duration(1.0))
except Exception as e:
Logger.logwarn('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!Could not transform pose: ' + str(e))
Logger.logwarn('Could not transform pose: ' + str(e))
self._failed = True
def on_exit(self, userdata):
# This method is called when an outcome is returned and another state gets active.
# It can be used to stop possibly running processes started by on_enter.
pass # Nothing to do
def on_start(self):
# This method is called when the behavior is started.
# If possible, it is generally better to initialize used resources in the constructor
# because if anything failed, the behavior would not even be started.
pass
def on_stop(self):
# This method is called whenever the behavior stops execution, also if it is cancelled.
# Use this event to clean up things like claimed resources.
pass # Nothing to do
def _get_msg_from_path(self, msg_path):
'''
Created on 11.06.2013
@author: Philipp Schillinger
'''
msg_import = msg_path.split('/')
msg_module = '%s.msg' % (msg_import[0])
package = __import__(msg_module, fromlist=[msg_module])
clsmembers = inspect.getmembers(package, lambda member: inspect.isclass(member) and member.__module__.endswith(msg_import[1]))
return clsmembers[0][1]
|
[
"ignigomoreno@gmail.com"
] |
ignigomoreno@gmail.com
|
6543caa054b01d2c7701af472b4883fa2b53e6be
|
d91e60de0880565c339521129f64fe5cd207bf88
|
/total_creation4.py
|
5d28d44b2e04597008b9807dbe5b379825b8edd8
|
[] |
no_license
|
laur34/automatize_sample_sheet
|
e96dfc1872309e6888ec8154b805c26334c355f5
|
409292a2fd62730f06d545a4c7b487a44c39b789
|
refs/heads/master
| 2021-06-20T23:34:15.502433
| 2021-06-10T13:50:39
| 2021-06-10T13:50:39
| 222,692,501
| 0
| 0
| null | 2021-01-21T12:08:57
| 2019-11-19T12:36:42
|
Python
|
UTF-8
|
Python
| false
| false
| 8,920
|
py
|
# Script to automate creation samplesheet.txt file for use with vsearch script for fusion primers (and paired-ends.)
# Version 3.0 - 10.6.2021 - four columns of primers instead of two
# Needs to be run in Python3.
# Before using this script, the sample sheet from the lab must be made into the correct input.
# That is, 1st col is Sample_name, 2nd is Fusion_COI_i5_TAG_Primer, 3rd is Fusion_COI_i7_TAG_Primer.
# Everything else (other columns, and rows not in project) should be deleted from it.
# Give the name of the corrected sheet (as csv) as the arg value.
# e.g. python total_creation4.py splsht_MiSeq_Run2019_25.csv Sample_name Fusion_COI_i5_TAG_Primer Fusion_COI_i7_TAG_Primer
import argparse, sys
parser = argparse.ArgumentParser(description='Transform 3-column sample sheet into first input for samplesheet.txt.')
parser.add_argument('ModifiedSampleSheetCsv_name', nargs=1, help="Give the name of the modified sheet from lab (as csv, not tab-separated).")
parser.add_argument('Sample_name_colname', nargs='?', default="Sample_name", help="Name of the first column (sample names)")
parser.add_argument('i5_TAG_Primer_colname', nargs='?', default="Fusion_COI_i5_TAG_Primer", help="Name of the second column (fwd primers)")
parser.add_argument('i7_TAG_Primer_colname', nargs='?', default="Fusion_COI_i7_TAG_Primer", help="Name of the third column (reverse primers)")
parser.add_argument('merge_status', nargs='?', default="merged", help="merged or forward")
args = parser.parse_args()
import os, warnings, csv, glob
# Define a function for reverse-complementing.
def revcomp(seq):
return seq.translate(str.maketrans('ACGTacgtRYMKrymkVBHDvbhd', 'TGCAtgcaYRKMyrkmBVDHbvdh'))[::-1]
# Implement check for intermediate files, with option to delete thm.
import subprocess
prev_run = os.path.isfile("samplesheet.txt") | os.path.isfile("input_1*.tsv") | os.path.isfile("input_2*.tsv") | os.path.isfile("joined.tsv") | os.path.isfile("samplesheet.tsv")
if prev_run:
i = input('Intermediate files or output already exists. Delete them and continue (y/n)?\n')
if i.lower() == 'yes' or i.lower() == 'y':
if os.path.isfile("samplesheet.txt"):
process = subprocess.Popen(['rm', 'samplesheet.txt'])
if os.path.isfile("samplesheet.tsv"):
process = subprocess.Popen(['rm', 'samplesheet.tsv'])
if os.path.isfile("input1.tsv"):
process = subprocess.Popen(['rm', 'input1.tsv'])
if os.path.isfile("input2.tsv"):
process = subprocess.Popen(['rm', 'input2.tsv'])
if os.path.isfile("input1sorted.tsv"):
process = subprocess.Popen(['rm', 'input1sorted.tsv'])
if os.path.isfile("input2sorted.tsv"):
process = subprocess.Popen(['rm', 'input2sorted.tsv'])
else:
print("Please delete files from previous attempt, and try again.")
sys.exit()
#TODO: remove below checks
# Define function to read in mod sample sheet and create new file (input1) from it, checking first if already exists in dir.
def createInput1file(splsht3col):
with open(splsht3col, 'r') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
for row in reader:
try:
r_rc = revcomp(row[args.i7_TAG_Primer_colname])
except KeyError as err:
print(err, "\n" + "Third column is not named Fusion_COI_i7_TAG_Primer. Please rename it, or specify current name as ARGV4.")
sys.exit()
try:
samplename = row[args.Sample_name_colname]
corename = samplename.replace("_", "-")
except KeyError as err:
print(err, "\n" + "First column is not named Sample_name. Please rename it, or specify current name as ARGV2.")
sys.exit()
try:
f_rc = revcomp(row[args.i5_TAG_Primer_colname])
except KeyError as err:
print("\n" + "Second column is not named Fusion_COI_i5_TAG_Primer. Please rename it, or specify current name as ARGV3")
sys.exit()
try:
print(corename + "\t" + row[args.Sample_name_colname] + "\t" + row[args.i5_TAG_Primer_colname] + "\t" + r_rc + "\t" + row[args.i7_TAG_Primer_colname] + "\t" + f_rc, file=open("input1.tsv","a"))
except KeyError as err:
print(err, "\n" + "Second column is not named Fusion_COI_i5_TAG_Primer. Please rename it, or specify current name as ARGV3.")
sys.exit()
print("File input1.tsv successfully created.")
# Define fcn to check if underscores are present in any fastq file names in the directory:
def checkFASTQnamesForUsc():
fwdfq = glob.glob("*_R1_001.fastq")
for fastqname in fwdfq:
parts = fastqname.split("_")
if len(parts) > 5:
print("Underscore in fastq file name detected!")
print(parts)
print("Exiting.")
sys.exit()
print("Forward FASTQ file names look ok.")
print("")
revfq = glob.glob("*_R2_001.fastq")
for fastqname in revfq:
parts = fastqname.split("_")
if len(parts) > 5:
print("Underscore in fastq file name detected!")
print(parts)
print("Exiting.")
sys.exit()
print("Reverse FASTQ file names look ok.")
print("")
# Define fcn to check if fastq files are in directory and there is the same number of them as sample sheet says.
def CheckForFwdFastqs(sheet):
fastqfiles = glob.glob("*_R1_001.fastq")
count = len(open(sheet, 'r').readlines())
if count == len(fastqfiles):
print("Number of forward fastq files in directory matches number of lines to be written.")
print("")
else:
print("Number of forward fastq files in directory differs from number listed on sample sheet!")
print("Make sure all files are present, and names match (and no special characters in names).")
print("Exiting")
sys.exit()
# Define fcn to create input2 file.
def createInput2file():
fastqfiles = glob.glob("*_R1_001.fastq")
f2 = open('input2.tsv', 'w')
for fastqfile in fastqfiles:
corename = fastqfile.split('_')[0]
f2.write("{}\t".format(corename))
f2.write("{}\n".format(fastqfile))
f2.close()
print("File input2.tsv successfully created.")
print("")
# Incorporating bash commands from joining.sh script.
# Using the shell way for now--this script is for local use only.
# Define a function to sort the two input files, and then join them into a new file.
def sortAndJoin():
# Sorting
process = subprocess.run('sort -k1,1 input1.tsv > input1sorted.tsv', shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process = subprocess.run('sort -k1,1 input2.tsv > input2sorted.tsv', shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Joining
process = subprocess.run('join -j1 input2sorted.tsv input1sorted.tsv > joined.tsv', shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Define a function to cut the joined file, keeping only the wanted columns for samplesheet.
def cutColumns():
for line in open('joined.tsv'):
col2 = line.rstrip('\n').split()[1]
col4 = line.rstrip('\n').split()[3]
col5 = line.rstrip('\n').split()[4]
col6 = line.rstrip('\n').split()[5]
col7 = line.rstrip('\n').split()[6]
print(col2 + "\t" + col4 + "\t" + col5 + "\t" + col6 + "\t" + col7, file=open("samplesheet.tsv","a"))
# Define a fcn to replace raw fastq file endings with merged file endings, and write the final output file.
def replaceEndingsAndWriteFinal():
with open('samplesheet.tsv', 'rt') as spltsv:
with open('samplesheet.txt', 'wt') as spltxt:
for line in spltsv:
if args.merge_status == 'merged':
spltxt.write(line.replace('L001_R1_001.fastq','L001_merged.fq'))
else:
spltxt.write(line)
warnings.warn("Creating forward-only version of sheet.\n")
print("File samplesheet.txt successfully created.")
print("")
def removeIntFiles():
print("Removing intermediate files.")
print("")
process = subprocess.Popen(['rm', 'input1.tsv'])
process = subprocess.Popen(['rm', 'input2.tsv'])
process = subprocess.Popen(['rm', 'input1sorted.tsv'])
process = subprocess.Popen(['rm', 'input2sorted.tsv'])
process = subprocess.Popen(['rm', 'joined.tsv'])
process = subprocess.Popen(['rm', 'samplesheet.tsv'])
def main():
checkFASTQnamesForUsc()
createInput1file(sys.argv[1])
createInput2file()
sortAndJoin()
cutColumns()
CheckForFwdFastqs('samplesheet.tsv')
replaceEndingsAndWriteFinal()
removeIntFiles()
print("Finished.")
if __name__ == '__main__':
main()
|
[
"lv70xo@gmail.com"
] |
lv70xo@gmail.com
|
5f622590e4e53e09f5768da616268d8c3746c578
|
f6bc15034ee1809473279c87e13cc3131bc3675c
|
/groups/sitemaps.py
|
7b9b9770de392bf4b099281e0f54729bd242d6d5
|
[
"MIT"
] |
permissive
|
mangadventure/MangAdventure
|
d92e4c184d1ad91983cf650aa7fa584ba9b977ce
|
e9da91d0309eacca9fbac8ef72356fe35407b795
|
refs/heads/master
| 2023-07-20T04:54:49.215457
| 2023-07-14T15:34:20
| 2023-07-14T15:34:20
| 144,012,269
| 70
| 16
|
MIT
| 2022-08-13T12:22:39
| 2018-08-08T12:43:19
|
Python
|
UTF-8
|
Python
| false
| false
| 513
|
py
|
"""Sitemaps for the groups app."""
from typing import Iterable
from django.contrib.sitemaps import Sitemap
from .models import Group
class GroupSitemap(Sitemap):
"""Sitemap for groups."""
#: The priority of the sitemap.
priority = 0.4
def items(self) -> Iterable[Group]:
"""
Get an iterable of the sitemap's items.
:return: An iterable of ``Group`` objects.
"""
return Group.objects.only('name', 'logo').order_by('name')
__all__ = ['GroupSitemap']
|
[
"chronobserver@disroot.org"
] |
chronobserver@disroot.org
|
6214043f87f5759242026a504acb8c2d6b0353a1
|
7d1e05eb93bc1caaf79dc4063e24a9e7de41ed33
|
/hollow triangle.py
|
cdbfb90355bb7404dd824eaa98897a38a3b1a417
|
[] |
no_license
|
katyaryabova/HOMETASK5
|
a9d36ff4d71b4ea7e8ec1478dfd7d846a80cfeb2
|
7fc7d4297aff65a89ed37a3b33ed7a5f7fbdcefc
|
refs/heads/master
| 2022-11-25T17:58:31.751869
| 2020-07-31T15:13:06
| 2020-07-31T15:13:06
| 279,795,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
n = int(input("enter: "))
for i in range(1, n - 1):
print(' ' * (2 * n - 2 * i - 2) + "*" + ' ' * (4 * i - 5) + "*" * (i != 1))
print('* ' * (2 * n - 3))
|
[
"ts-kateryna.riabova@rakuten.com"
] |
ts-kateryna.riabova@rakuten.com
|
89bcacedd592efcd1eb8e5a4543bafd28d850d84
|
e78635b2fd4bc2df1671214ea24c03a2be91a503
|
/LeetcodeNew/python/LC_416.py
|
7be06eabf28020389ccc22eceb0065e450083b99
|
[] |
no_license
|
arnabs542/OptimizedLeetcode
|
56ec60232ddb7b4e911aeb137a365c12092da16d
|
ab013103b9b28327e3b5a8af0c408606d1e04a24
|
refs/heads/master
| 2023-03-14T05:57:06.070185
| 2021-02-28T03:01:14
| 2021-02-28T03:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,540
|
py
|
"""
416. Partition Equal Subset Sum
473. Matchsticks to Square
698. Partition to K Equal Sum Subsets
996. Number of Squareful Arrays
"""
"""
Given a non-empty array containing only positive integers, find if the array can be partitioned into two subsets such that the sum of elements in both subsets is equal.
Note:
Each of the array element will not exceed 100.
The array size will not exceed 200.
Example 1:
Input: [1, 5, 11, 5]
Output: true
Explanation: The array can be partitioned as [1, 5, 5] and [11].
Example 2:
Input: [1, 2, 3, 5]
Output: false
Explanation: The array cannot be partitioned into equal sum subsets.
https://github.com/wisdompeak/LeetCode/tree/master/DFS/698.Partition-to-K-Equal-Sum-Subsets
"""
import copy
class SolutionTony:
def canPartition(self, nums) -> bool:
summ = sum(nums)
if summ % 2 != 0:
return False
nums.sort()
memo = {}
target = summ // 2
return self.dfs(nums, 0, 0, target, memo)
def dfs(self, nums, idx, summ, target, memo):
if (summ, idx) in memo:
return memo[(summ, idx)]
if summ == target:
return True
if summ > target:
return False
for i in range(idx, len(nums)):
if self.dfs(nums, i + 1, summ + nums[i], target, memo):
memo[summ, idx] = True
return memo[summ, idx]
memo[summ, idx] = False
return memo[summ, idx]
class Solution:
def canPartition(self, nums) -> bool:
target = sum(nums)
if target % 2:
return False
target //= 2
visited = [False] * (len(nums))
return self.dfs(nums, visited, 0, 0, target)
def dfs(self, nums, visited, index, summ, target):
if summ == target:
return True
if index == len(nums):
return False
if summ > target:
return False
for i in range(index, len(nums)):
if visited[i] == True:
continue
visited[i] = True
if i >= 1 and nums[i] == nums[i - 1] and visited[i - 1] == False:
continue
if self.dfs(nums, visited, index + 1, summ + nums[i], target):
return True
visited[i] = False
return False
"""
(100110) subset sum == summ // 2
s = subset sum
dp[s] : whether we can find a subset whose sum equals to s
0 ~ 2e4
dp[s_small] -> dp[s_large]
for num in nums:
for s in range(summ//2+1):
if dp[s-num] == True:
dp[s] = True
"""
class SolutionWisdom:
def canPartition(self, nums) -> bool:
summ = sum(nums)
if summ % 2 == 1:
return False
target = summ // 2
dp = [False] * (target + 1)
dp[0] = True
for num in nums:
old_dp = copy.copy(dp)
for i in range(target + 1):
if i >= num:
dp[i] = dp[i] or old_dp[i - num]
return dp[-1]
class SolutionReversed:
def canPartition(self, nums) -> bool:
summ = sum(nums)
if summ % 2 == 1:
return False
target = summ // 2
dp = [False] * (target + 1)
dp[0] = True
for num in nums:
# old_dp = copy.copy(dp)
for i in reversed(range(target + 1)):
if i >= num:
dp[i] = dp[i] or dp[i - num]
return dp[-1]
nums = [1, 5, 11, 5]
a = Solution()
print(a.canPartition(nums))
|
[
"taocheng984@gmail.com"
] |
taocheng984@gmail.com
|
1b73f94b2be025d3496a8ab70620a8857819ffc4
|
2c2a805d43af773b7e31c7a54d1c2e8c8108801d
|
/test/test_db.py
|
e59b96200a87f7f7ac3be43912e2baf04d7a91bb
|
[] |
no_license
|
atiaxi/chromabot2
|
2dbb2beefcc67f2b324ae98886f4815205830653
|
f8f9e92090017bbbaf87042f4cdc5f5f10e54c25
|
refs/heads/master
| 2020-05-21T13:33:18.565653
| 2017-03-17T17:05:14
| 2017-03-17T17:05:14
| 62,596,154
| 0
| 0
| null | 2017-03-17T17:02:48
| 2016-07-05T01:13:29
|
Python
|
UTF-8
|
Python
| false
| false
| 9,520
|
py
|
from chromabot2.models import User
from chromabot2.battle import (
Battle,
Troop,
OccupiedException,
OutOfBoundsException,
BattleEndedException,
BattleNotStartedException,
)
from chromabot2.utils import now
from test.common import ChromaTest
# These tests are for the raw functionality of db.py - unit tests, mostly.
# Integration tests will be elsewhere (test_battle.py for battle related
# items, and test_play.py for most everything else)
class TestUser(ChromaTest):
def test_create_user(self):
someone = User.create(self.db, name="someone", team=0, leader=True)
some_id = someone.id
with self.db.new_session() as s:
fetched = s.query(User).filter_by(name="someone").first()
self.assertTrue(fetched)
self.assertEqual(fetched.id, some_id)
# TODO: Check to see if the user is in that team's capital
# Make sure it assigned troops
self.assertTrue(fetched.troops)
self.assertEqual(3, len(fetched.troops))
counts = {'infantry': 0, 'cavalry': 0, 'ranged': 0}
for troop in fetched.troops:
self.assertIn(troop.type, counts)
counts[troop.type] += 1
for key in counts.keys():
self.assertEqual(1, counts[key])
# def test_create_user_other_team(self):
# # someone = User.create(self.db, name="someone", team=1, leader=True)
# self.skipTest("No regions yet")
# # TODO: Check to see if the user is in the other team's capital
def test_troops(self):
num_troops = len(self.alice.troops)
troop = Troop(owner=self.alice, hp=1, type="test")
with self.db.session() as s:
s.add(troop)
self.assertEqual(num_troops + 1, len(self.alice.troops))
self.assertIn(troop, self.alice.troops)
class TestTroop(ChromaTest):
def test_owner(self):
troop = Troop(owner=self.alice, hp=1, type="test")
with self.db.session() as s:
s.add(troop)
with self.db.new_session() as s:
fetched = s.query(Troop).\
filter_by(type="test").first()
self.assertTrue(fetched)
self.assertEqual(fetched.type, troop.type)
self.assertEqual(fetched.id, troop.id)
self.assertEqual(fetched.owner.id, self.alice.id)
def test_boilerplate(self):
Troop.infantry(self.alice)
with self.db.new_session() as s:
fetched = s.query(Troop). \
filter_by(type="infantry"). \
filter_by(owner_id=self.alice.id).count()
self.assertEqual(2, fetched)
def test_boilerplate_cavalry(self):
Troop.cavalry(self.alice)
with self.db.new_session() as s:
fetched = s.query(Troop). \
filter_by(type="cavalry"). \
filter_by(owner_id=self.alice.id).count()
self.assertEqual(2, fetched)
def test_boilerplate_ranged(self):
Troop.ranged(self.alice)
with self.db.new_session() as s:
fetched = s.query(Troop). \
filter_by(type="ranged"). \
filter_by(owner_id=self.alice.id).count()
self.assertEqual(2, fetched)
def test_no_battle(self):
troop = Troop(owner=self.alice, hp=1, type="test")
with self.db.session() as s:
s.add(troop)
# By default, this troop shouldn't have a battle assigned to it
self.assertFalse(troop.battle)
def test_circ(self):
# This is going to get exponential real quick but OH WELL
cavalry = Troop.cavalry(self.alice)
infantry = Troop.infantry(self.alice)
ranged = Troop.ranged(self.alice)
with self.subTest("Cavalry"):
with self.subTest("CI"):
self.assertEqual(cavalry.fights(infantry), 1)
with self.subTest("RC"):
self.assertEqual(cavalry.fights(ranged), -1)
with self.subTest("CC"):
self.assertEqual(cavalry.fights(cavalry), 0)
with self.subTest("Infantry"):
with self.subTest("IR"):
self.assertEqual(infantry.fights(ranged), 1)
with self.subTest("CI"):
self.assertEqual(infantry.fights(cavalry), -1)
with self.subTest("II"):
self.assertEqual(infantry.fights(infantry), 0)
with self.subTest("Ranged"):
with self.subTest("RC"):
self.assertEqual(ranged.fights(cavalry), 1)
with self.subTest("IR"):
self.assertEqual(ranged.fights(infantry), -1)
with self.subTest("RR"):
self.assertEqual(ranged.fights(ranged), 0)
class TestBattle(ChromaTest):
def test_create(self):
with self.db.session() as s:
num_battles = s.query(Battle).count()
battle = Battle.create(self.outside)
self.assertTrue(battle)
with self.db.new_session() as s:
now_battles = s.query(Battle).count()
self.assertEqual(num_battles+1, now_battles)
def test_place_troop(self):
troop = self.alice.troops[0]
self.assertFalse(troop.battle)
self.assertEqual(len(self.battle.troops), 0)
self.battle.place_troop(troop, col=1, row=2, outside=self.outside)
self.assertTrue(troop.battle)
self.assertEqual(len(self.battle.troops), 1)
self.assertEqual(self.battle, troop.battle)
self.assertEqual(troop.row, 2)
self.assertEqual(troop.col, 1)
# Hopefully this won't take 10 minutes to run
self.assertAlmostEqual(now(), troop.last_move, delta=600)
board = self.battle.realize_board()
self.assertEqual(board[troop.row][troop.col], troop)
def test_no_place_troop_in_occupied_space(self):
troop = self.alice.troops[0]
self.assertEqual(len(self.battle.troops), 0)
self.battle.place_troop(troop, col=1, row=2, outside=self.outside)
self.assertEqual(len(self.battle.troops), 1)
with self.assertRaises(OccupiedException):
troop = self.alice.troops[1]
self.battle.place_troop(troop, col=1, row=2, outside=self.outside)
self.assertEqual(len(self.battle.troops), 1)
self.assertFalse(self.alice.troops[1].battle)
def test_place_troop_on_right_side_zero(self):
for col in range(0, 5):
troop = self.alice.troops[0]
self.battle.place_troop(troop, col=col, row=2, outside=self.outside)
self.assertEqual(troop.col, col)
def test_place_troop_on_right_side_one(self):
for col in range(6, 11):
troop = self.bob.troops[0]
self.battle.place_troop(troop, col=col, row=2, outside=self.outside)
self.assertEqual(troop.col, col)
def test_no_place_troop_on_wrong_side_zero(self):
troop = self.alice.troops[0]
self.assertEqual(len(self.battle.troops), 0)
with self.assertRaises(OutOfBoundsException):
self.battle.place_troop(troop, col=9, row=2, outside=self.outside)
self.assertEqual(len(self.battle.troops), 0)
self.assertFalse(self.alice.troops[0].battle)
def test_no_place_troop_on_wrong_side_one(self):
troop = self.bob.troops[0]
self.assertEqual(len(self.battle.troops), 0)
with self.assertRaises(OutOfBoundsException):
self.battle.place_troop(troop, col=1, row=2, outside=self.outside)
self.assertEqual(len(self.battle.troops), 0)
self.assertFalse(self.bob.troops[0].battle)
def test_no_place_troop_in_dmz(self):
with self.assertRaises(OutOfBoundsException):
troop = self.alice.troops[0]
self.battle.place_troop(troop, col=5, row=2, outside=self.outside)
with self.assertRaises(OutOfBoundsException):
troop = self.bob.troops[0]
self.battle.place_troop(troop, col=5, row=2, outside=self.outside)
def test_no_place_troop_off_board_row(self):
troop = self.alice.troops[0]
self.assertEqual(len(self.battle.troops), 0)
with self.assertRaises(OutOfBoundsException):
self.battle.place_troop(troop, col=1, row=100000,
outside=self.outside)
self.assertEqual(len(self.battle.troops), 0)
self.assertFalse(self.alice.troops[0].battle)
def test_no_place_troop_off_board_col(self):
troop = self.alice.troops[0]
self.assertEqual(len(self.battle.troops), 0)
with self.assertRaises(OutOfBoundsException):
self.battle.place_troop(troop, col=-5000, row=1,
outside=self.outside)
self.assertEqual(len(self.battle.troops), 0)
self.assertFalse(self.alice.troops[0].battle)
def test_no_fight_ended_battle(self):
"""Can't fight in a battle that's over"""
self.end_battle()
troop = self.alice.troops[0]
with self.assertRaises(BattleEndedException):
self.battle.place_troop(troop, col=1, row=2, outside=self.outside)
def test_no_fight_early_battle(self):
with self.outside.db.session():
battle2 = Battle.create(self.outside)
troop = self.alice.troops[0]
self.assertFalse(battle2.active)
with self.assertRaises(BattleNotStartedException):
battle2.place_troop(troop, col=1, row=2, outside=self.outside)
|
[
"atiaxi@gmail.com"
] |
atiaxi@gmail.com
|
85a63dd438a1f43ed8098ae413e642c923205ea1
|
cb1f34abdbf0ba8dfb87489e8e97bf5d6104e641
|
/previous_work/src/common/ashttpcommon.py
|
24176da250ea5d3ba8b64a9ca487bd690d328e78
|
[] |
no_license
|
yuliang-leon/exp
|
7b8522fc464f1ac9c42a672f72f3e85451912cca
|
889b48f374783921f6b8ea1013dad13e6118d587
|
refs/heads/master
| 2021-01-21T12:36:20.799056
| 2014-04-18T19:14:13
| 2014-04-18T19:14:13
| 18,921,241
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,120
|
py
|
__copyright__ = 'Copyright 2011-2014, Yuliang Wang(yuliang.leon@gmail.com).'
from ascommon import FileStore, MemStore, StoreException, rand_string, shortReadableName
from asaes import AsAES
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from urlparse import urlparse, urljoin
import urllib
from datetime import datetime, timedelta
from Cookie import SimpleCookie
import time
import random
import unittest
import cgitb
import sys
def GetCacheExpires(days_from_now):
expires = datetime.now() + timedelta(days = days_from_now)
return expires.strftime('%a, %d %b %Y %H:%M:%S')
def AddToParams(query, params):
for param in query.strip().split('&'):
items = param.split('=')
if len(items) == 2:
params[urllib.unquote_plus(items[0])] = urllib.unquote_plus(items[1])
else:
print items
# Token
import base64
class InvalidToken(Exception):
def __init__(self, value='invalid token'):
self.value = value
def __str__(self):
return self.value
def CreateToken(userid, passwd, root_passwd, timeout = 60*60*24):
""" Some applicate donot need passwd, we set it to a default one.
"""
output = []
output.append(userid)
output.append(passwd)
expiration = time.time() + timeout
output.append(str(expiration))
output.append('123456789')
output.append(rand_string(16))
message = '\n'.join(output)
if message.split('\n') != output:
raise InvalidToken('input include \\n')
ciphered = AsAES(root_passwd).encrypt(message)
token = base64.urlsafe_b64encode(ciphered)
return token
def ParseToken(token, root_passwd):
""" Parse token, if valid, return the userid and passwd
"""
decoded = base64.urlsafe_b64decode(token)
try:
message = AsAES(root_passwd).decrypt(decoded)
except Exception:
raise InvalidToken('Can not decrypt token')
lines = message.split('\n')
if len(lines) != 5:
raise InvalidToken('lines is %d'%len(lines))
userid = lines[0]
passwd = lines[1]
expiration = lines[2]
salt = lines[3]
# salt match
if salt != '123456789':
raise InvalidToken('salt invalid')
# not expired
if time.time() >= float(expiration):
raise InvalidToken('token expired')
return (userid, passwd)
class CheckToken(unittest.TestCase):
def testTokenParse(self):
token = CreateToken('test', 'test-passwd', 'root-passwd', 60 * 30)
# total invalid token
self.assertRaises(InvalidToken, ParseToken, 'af89\n\t0128$%!',
'root-passwd')
# wrong rootpasswd can not parse
self.assertRaises(InvalidToken, ParseToken, token, 'root-passwd-2')
# valid parse
self.assertEquals(('test', 'test-passwd'), ParseToken(token, 'root-passwd'))
def testTokenExpire(self):
token = CreateToken('test', 'test-passwd', 'root-passwd', 0)
time.sleep(0.1)
self.assertRaises(InvalidToken, ParseToken, token, 'root-passwd')
def userPassValidInFile(userid, userpass, store = FileStore(),
dir = '/home/ec2-user/src/superpass/data'):
""" check user input (userid, userpass) match we stored in file
if file not exist, create one."""
storekey = '%s/%s.key'%(dir, shortReadableName(userid))
user_encoded = AsAES(userpass).encrypt(userid)
try:
saved_text = store.read(storekey)
except StoreException:
store.write(storekey, user_encoded)
return True
else:
return user_encoded == saved_text
class CheckUserPassValid(unittest.TestCase):
def testReadWrite(self):
store = MemStore()
# create 3 entry
self.assertTrue(userPassValidInFile('test', 'test-pass', store))
self.assertTrue(userPassValidInFile('test2', 'test-pass2', store))
self.assertTrue(userPassValidInFile('test3', 'test-pass', store))
# same user, different passwd
self.assertFalse(userPassValidInFile('test', 'test-pass2', store))
self.assertFalse(userPassValidInFile('test2', 'test-pass', store))
self.assertFalse(userPassValidInFile('test3', 'test-pass2', store))
# same user, same passwd
self.assertTrue(userPassValidInFile('test', 'test-pass', store))
self.assertTrue(userPassValidInFile('test2', 'test-pass2', store))
self.assertTrue(userPassValidInFile('test3', 'test-pass', store))
# different user, same or different passwd
self.assertTrue(userPassValidInFile('test4', 'test-pass', store))
self.assertTrue(userPassValidInFile('test5', 'test-pass5', store))
# HTTP handlers
# TODO. instead of inherit, consider composite.
class DispatchHTTPRequestHandler(BaseHTTPRequestHandler):
""" dispatch according to root path.
call handle_login('/login', {user:test}) for '/login?user=test'
call handle_verify('/verify/other/path', {user:x}) for '/verify/other/path?user=x'
call handle_('/', {}) for '/'
call handle_('', {}) for ''
call handleErrorMatch for any unmatched path
call handleException for unknow exception
"""
def do_GET(self):
self.__cached_cookie = {}
self.__handleRequest()
def do_POST(self):
self.__cached_cookie = {}
clen = self.headers.getheader('content-length')
if clen:
clen = int(clen)
else:
print 'Invalid content length'
return
print 'Got post request for %d length'%clen
params = {}
if clen > 0:
AddToParams(self.rfile.read(clen), params)
self.__handleRequest(params)
def __handleRequest(self, post_params = {}):
path = ''
params = {}
try:
parsed_url = urlparse(self.path)
AddToParams(parsed_url.query, params)
path = parsed_url.path
items = path.strip('/').split('/')
root_path = ''
if len(items) > 0:
root_path = items[0]
func = getattr(self, 'handle_%s'%root_path, self.handleErrorMatch)
if func != self.handleErrorMatch:
if not self.handleValidRequestBegin(path, params, post_params):
return
func(path, params, post_params)
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
print 'Exception happend ', e
self.handleException(path, params, post_params)
def handleValidRequestBegin(self, path, params, post_params):
return True
def handleErrorMatch(self, path, params, post_params):
self.sendResponse(404)
self.send_header('Content-type', 'text/html')
self.end_headers()
msg = 'unknow path %s'%path
self.wfile.write(msg)
def handleException(self, path, params, post_params):
self.sendResponse(500)
self.send_header('Content-type', 'text/html')
self.end_headers()
msg = cgitb.html(sys.exc_info(), context=10)
print msg
# TODO. Not display the error message.
self.wfile.write(msg)
def redirect(self, redirect_url):
print 'Redirect to', redirect_url
self.sendResponse(302)
self.send_header('Location', redirect_url)
#self.writeUserHeader()
self.end_headers()
def redirectWithTimeout(self, redirect_url, message, timeout = 1):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(
"""<html><head><meta HTTP-EQUIV="REFRESH" content="%i; url=%s"/>
</head><body>%s</body></html>"""%(timeout, redirect_url, message))
def readCookie(self, name):
cookie_str = self.headers.get('Cookie')
print 'cookie:', cookie_str
if cookie_str:
c = SimpleCookie(cookie_str)
cookie_morsel = c.get(name, None)
if cookie_morsel is not None:
return cookie_morsel.value
return None
def writeCookie(self, name, value, path = '/', days_from_now = 30):
""" cache the cookie set until response sent.
"""
c = SimpleCookie()
c[name] = value
c[name]['path'] = path
c[name]['expires'] = GetCacheExpires(days_from_now)
self.__cached_cookie[name] = c.output(header='')
def clearCookie(self, name):
""" cache the cookie set until response sent.
"""
c = SimpleCookie()
c[name] = ''
c[name]['path'] = '/'
c[name]['expires'] = 0
self.__cached_cookie[name] = c.output(header='')
def sendResponse(self, response_code):
""" To ensure Set-Cookie header is called after sendResponse,
call sendResponse instead of sendResponse.
"""
# TODO. do we need to overwrite send_response?
self.send_response(response_code)
print 'send response %d'%response_code
for v in self.__cached_cookie.itervalues():
self.send_header('Set-Cookie', v)
print 'set cookie %s'%v
class SessionRequestHandler(DispatchHTTPRequestHandler):
""" Force a session exist when handling http request.
"""
def getSession(self, name):
""" Get name in session, return None is not exist
"""
session = self.getRawSession()
if name in session:
return session[name]
else:
return None
def setSession(self, name, value):
session = self.getRawSession()
session[name] = value
def getRawSession(self):
try:
session = self.session
except Exception:
# Get sid from cookie.
sid = self.readCookie('sid')
if sid is None:
sid = rand_string(16)
self.writeCookie('sid', sid)
# TODO. create a SessionServer and try read session back from server.
session = {}
session['id'] = sid
self.session = session
return session
if __name__ == '__main__':
unittest.main()
|
[
"zzw.neo@gmail.com"
] |
zzw.neo@gmail.com
|
a2453feb6f860718515b6a6f5efd92d78ea28887
|
649c0fe730956d971a146d098f29a37c7d573b4a
|
/src/factory_map.py
|
e88c52219ea441a6b315b2e358d7999d7150e917
|
[] |
no_license
|
sergiooncode/robotic-mowers
|
afb89950a8cb1fd25b185c184212598bc9a25932
|
9074a841ef3e97fefe45598187d352e7f9bbbecf
|
refs/heads/main
| 2023-08-24T20:09:30.648925
| 2021-10-05T07:50:06
| 2021-10-05T07:50:06
| 412,827,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 434
|
py
|
from src.command.factory_command import FactoryCommand
from src.mower_situation import MowerSituation
class FactoryMap:
def __init__(self, mower_situation: MowerSituation):
self.__mower_situation = mower_situation
def execute(self, command: FactoryCommand):
self.__mower_situation = command.execute(self.__mower_situation)
@property
def mower_situation(self):
return self.__mower_situation
|
[
"sperez4mba@gmail.com.com"
] |
sperez4mba@gmail.com.com
|
9f416dcedf41c76ae4b39f70d354f501bd9d1ee1
|
424ff46affab21d96fd6e10b5dab2344c882a9ef
|
/tests/test_accommodation.py
|
23f32dfb4e90dfea52c8956093956944320de224
|
[] |
no_license
|
simon446/apaato
|
56caea18ff1e6674ce82876a763171d6424d11f4
|
8226b9a49d247121a911bd1744bd329e47966648
|
refs/heads/master
| 2022-11-16T00:10:20.377366
| 2020-07-12T05:32:49
| 2020-07-12T05:32:49
| 278,954,644
| 0
| 0
| null | 2020-07-11T22:57:05
| 2020-07-11T22:57:05
| null |
UTF-8
|
Python
| false
| false
| 1,253
|
py
|
# test_accommodation.py
import unittest
from apaato.accommodation import Accommodation
class TestAccommodation(unittest.TestCase):
""" Tests for the accommodation module """
def setUp(self):
""" Creates accommodation """
self.accommodation = Accommodation(queue_points_list=[2000, 1000, 200, 20, 2])
def test_position_in_queue(self):
""" Tests the position_in_queue() function of accommodation"""
# First in queue
self.assertEqual(self.accommodation.position_in_queue(2001), 1)
# Last in queue
self.assertEqual(self.accommodation.position_in_queue(3), 5)
# Further than last in queue
self.assertEqual(self.accommodation.position_in_queue(1), 6)
def test_insert_into_queue(self):
""" Tests the insert_into_queue() function of accommodation"""
# First in queue
self.accommodation.insert_into_queue(2001)
# Last in queue
self.accommodation.insert_into_queue(21)
# Further than last in queue
self.accommodation.insert_into_queue(20)
self.assertEqual(self.accommodation.queue_points_list,
[2001, 2000, 1000, 200, 21])
if __name__ == '__main__':
unittest.main()
|
[
"viclo211@student.liu.se"
] |
viclo211@student.liu.se
|
1a7de1c1380a08b391a0c4b1d15c2e226a0ba98d
|
64d8618788c6239e546ad99ccff5ac2adb48e928
|
/adaFamilyTree/wsgi.py
|
48d6a94c74425aa2630c35b1f3f884a193269139
|
[] |
no_license
|
JNEdrozo/adaFamilyTree
|
fdeb8e56a6aba2097e0837a135e8c09322e869fb
|
313ac83a427c6a369909eb442040af7abba90882
|
refs/heads/master
| 2021-05-14T15:28:08.834881
| 2018-03-05T02:45:21
| 2018-03-05T02:45:21
| 115,992,863
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
"""
WSGI config for adaFamilyTree project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from dotenv import load_dotenv
dotenv_path = str.join(os.path.dirname(__file__), '.env')
load_dotenv(dotenv_path)
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "adaFamilyTree.settings")
application = get_wsgi_application()
|
[
"jedrozo@gmail.com"
] |
jedrozo@gmail.com
|
979965982b7efa1234131e8ca65ac9234f559ed4
|
3d2cdc459b582f855a7c37246648b63858d89e01
|
/Events/urls.py
|
d0c84f66c12d880dfad82f9999422bb4ca4735c0
|
[] |
no_license
|
burggrafdev88/VendorManagementPortal
|
d0bf77ce6f7a08e6d3181a024e49546b3f50175b
|
9db16ab1a4b7f0f3f06eb32eec4ac975f90b82db
|
refs/heads/master
| 2023-03-31T16:39:59.862268
| 2021-03-22T14:21:11
| 2021-03-22T14:21:11
| 307,521,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,072
|
py
|
"""VendorManagementPortal URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from . import views
# URL path calls the views
urlpatterns = [
path('schedule_screening/<int:vendorID>', views.schedule_screening, name="schedule_screening"),
path('schedule_screening_ajax', views.schedule_screening_ajax.as_view(), name="schedule_screening_ajax"),
path('retrieve_screening_info_ajax', views.retrieveScreeningInfoAJAX.as_view(), name='retrieve_screening_info_ajax'),
]
|
[
"burggrafdev88@gmail.com"
] |
burggrafdev88@gmail.com
|
ffc6e5981af29a5bfa77a0890c1b5811e52b11ff
|
af35f890c0c6a2fa531f47a4c2ed132e8920190d
|
/python/leetcode/745_prefix_suffix_search.py
|
37a78875c54f311925a20c6c60bf17f617d85477
|
[] |
no_license
|
Levintsky/topcoder
|
b1b17cd3fddef5a23297bcbe4e165508d09a655d
|
a5cb862f0c5a3cfd21468141800568c2dedded0a
|
refs/heads/master
| 2021-06-23T10:15:27.839199
| 2021-02-01T07:49:48
| 2021-02-01T07:49:48
| 188,175,357
| 0
| 1
| null | 2020-05-19T09:25:12
| 2019-05-23T06:33:38
|
C
|
UTF-8
|
Python
| false
| false
| 2,698
|
py
|
"""
745. Prefix and Suffix Search (Hard)
Given many words, words[i] has weight i.
Design a class WordFilter that supports one function, WordFilter.f(String prefix, String suffix). It will return the word with given prefix and suffix with maximum weight. If no word exists, return -1.
Examples:
Input:
WordFilter(["apple"])
WordFilter.f("a", "e") // returns 0
WordFilter.f("b", "") // returns -1
Note:
words has length in range [1, 15000].
For each test case, up to words.length queries WordFilter.f may be made.
words[i] has length in range [1, 10].
prefix, suffix have lengths in range [0, 10].
words[i] and prefix, suffix queries consist of lowercase letters only.
"""
class TrieNode(object):
def __init__(self):
self.is_word = False
self.memo = [None] * 26
self.index = set()
class WordFilter(object):
def __init__(self, words):
"""
:type words: List[str]
"""
self.words = words
self.root = TrieNode()
self.root_inv = TrieNode()
for i, word in enumerate(words):
n = self.root
for j, c in enumerate(word):
n.index.add(i)
ind = ord(c) - ord('a')
if n.memo[ind] is None:
n.memo[ind] = TrieNode()
n = n.memo[ind]
if j == len(word) - 1:
n.is_word = True
n.index.add(i)
for i, word in enumerate(words):
n = self.root_inv
wordinv = word[::-1]
for j, c in enumerate(wordinv):
n.index.add(i)
ind = ord(c) - ord('a')
if n.memo[ind] is None:
n.memo[ind] = TrieNode()
n = n.memo[ind]
if j == len(word) - 1:
n.is_word = True
n.index.add(i)
def f(self, prefix, suffix):
"""
:type prefix: str
:type suffix: str
:rtype: int
"""
n = self.root
for c in prefix:
n = n.memo[ord(c) - ord('a')]
if n is None: return -1
set_pre = n.index
n = self.root_inv
for c in suffix[::-1]:
n = n.memo[ord(c) - ord('a')]
if n is None: return -1
set_post = n.index
set_ = set_pre & set_post
if set_:
return max(set_)
else:
return -1
# Your WordFilter object will be instantiated and called as such:
# obj = WordFilter(words)
# param_1 = obj.f(prefix,suffix)
if __name__ == "__main__":
a = WordFilter(["pop"])
print(a.f("", "op"))
# print(a.f('a', 'e'))
# print(a.f('b', ''))
|
[
"zhuoyuanchen2014@u.northwestern.edu"
] |
zhuoyuanchen2014@u.northwestern.edu
|
b5a9dd0bd794792cccf4fae7dab47dec79c4d097
|
09dbb201b49bc2657ad171d457e62fa153602f23
|
/algorithm/33-如何用一个随机函数得到另外一个随机函数.py
|
5775eff4556a246ce5e681f21dc790b21334aece
|
[] |
no_license
|
llame/algorithms_exercise
|
4203fdeab8740e9430e6749f078bf8b7aa7be4eb
|
8e8f072f0d086a7b22460ac083687fe49c7d689c
|
refs/heads/master
| 2023-01-01T23:11:37.101504
| 2020-10-23T06:11:40
| 2020-10-23T06:11:40
| 163,731,782
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 758
|
py
|
# -*-coding:utf-8-*-
"""
@Author : llame
@Software: PyCharm
@Time : 2020/9/25 3:56 下午
"""
# 题目描述:有一个函数func1能返回0和1两个值,返回0和1的概率都1/2,
# 问怎么利用这个函数得到另外一个函数func2, 使func2也只能返回0和1,
# 且返回的概率为1/4,返回1的概率为3/4.
import random
def get_random():
a=int(round(random.random()))
return a
def get_random_1():
a=get_random()
b=get_random()
if a+b==2:
return 0
else:
return 1
if __name__ == '__main__':
times=1000000
zero_number=0
i=0
while(i<times):
tmp=get_random_1()
if tmp==0:
zero_number+=1
i+=1
print('zero ratio:',zero_number/times)
|
[
"llameli@163.com"
] |
llameli@163.com
|
809ace0b39fd07934ef546604bba2050a9982669
|
d4ea02450749cb8db5d8d557a4c2616308b06a45
|
/students/Net_Michael/lesson7/test_html_render.py
|
4f024a2102ef72912891371551c1a6af772ad4dd
|
[] |
no_license
|
UWPCE-PythonCert-ClassRepos/Self_Paced-Online
|
75421a5bdd6233379443fc310da866ebfcd049fe
|
e298b1151dab639659d8dfa56f47bcb43dd3438f
|
refs/heads/master
| 2021-06-16T15:41:07.312247
| 2019-07-17T16:02:47
| 2019-07-17T16:02:47
| 115,212,391
| 13
| 160
| null | 2019-11-13T16:07:35
| 2017-12-23T17:52:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,376
|
py
|
#!/usr/bin/env python
# coding: utf-8
import pytest
from io import StringIO
import html_render as hr
def test_element():
out_element = hr.Element("content message")
assert out_element.content == ["content message"]
def test_append():
append_element = hr.Element("append message1")
append_element.append("append message2")
assert append_element.content == ["append message1", "append message2"]
def test_render():
render_element = hr.Element("render text")
render_element.tag = 'html'
f = StringIO()
render_element.render(f)
assert f.getvalue() == "<html>\n" + render_element.indent + "render text\n</html>\n"
def test_OneLineTag():
out_OneLineTag = hr.OneLineTag("create title")
out_OneLineTag.tag = 'title'
f = StringIO()
out_OneLineTag.render(f)
assert f.getvalue() == '<title>create title</title>\n'
def test_SelfClosingTag():
out_SelfClosingTag = hr.SelfClosingTag()
out_SelfClosingTag.tag = 'meta'
f = StringIO()
out_SelfClosingTag.render(f)
assert f.getvalue() == '<meta />\n'
def test_A():
out_A = hr.A("http://google.com", "Link")
f = StringIO()
out_A.render(f)
assert f.getvalue() == '<a href="http://google.com">\n Link\n</a>\n'
def test_H():
out_H = hr.H(2, "Header2")
f = StringIO()
out_H.render(f)
assert f.getvalue() == '<h2>Header2</h2>\n'
|
[
"Nettaimam@gmail.com"
] |
Nettaimam@gmail.com
|
e838e7ed68e61e13fba1cfd7a5ee2f76f4b579e0
|
d16086e410ff57be2c16c0dbb1ebb425ff4d1947
|
/patternSearch/patternSearch.py
|
ee9d12aaf1a9d00e48fea1e864240da1edf6c7bb
|
[] |
no_license
|
FaroukElk/Rosalind
|
6e06dace7d43444aa6e45efea6f5cba71c1b0536
|
6ebc5c1d379340e69095da6fbe30d4cd2abc429c
|
refs/heads/master
| 2021-01-11T20:49:03.907426
| 2017-01-23T18:19:51
| 2017-01-23T18:19:51
| 79,190,823
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,391
|
py
|
"""Searches for a pattern within a DNA sequence and
returns a list with the locations where the pattern occurs"""
def patternSearch(DNA, pattern):
locations = []
for i in range(len(DNA)-len(pattern) + 1):
DNA_slice = DNA[i:i+len(pattern)]
if DNA_slice == pattern:
locations.append(i+1)
return locations
print (patternSearch("TCTATCGTTCTATCGTCTCTATCGCACTCTATCGGGGATCTATCGTTCTCGGCTATCTATCGTATCTATCGGACTCTATCGTTCTATCGTTTATCTATCGCATTCTATCGCCTCTATCGTGTATCTATCGAGTTTCTATCGCAATGTAGTCTATCGTCTATCGATCTATCGTCTATCGGTCTATCGCTCTATCGTTATTCTATCGCTCTATCGGTGCGAGTTCTATCGGTCTATCGTCTATCGTCTATCGGCACTTGGCGGCATCTATCGCTCTATCGTCTATCGTGTCTATCGTCTATCGTCTATCGTACTCTATCGGTCTATCGTCTATCGATCTATCGCTCTATCGGTCTATCGACCTCTATCGATAAATCTATCGTCTATCGTGGACTTCTATCGGAGTCTATCGTCTATCGAGCCCTCCAGTCTATCGTGTCTATCGAATTTCGCGTCTATCGACGTCTATCGTCTATCGTCTATCGCTTATCTATCGCATCTATCGATTCTATCGATCTATCGATCTATCGGGTCTATCGGATATCTATCGTCTATCGCCACCGTTCTATCGCAACTCTAGCAACTCTCTATCGGATCTATCGGATCTATCGTCTATCGGTCTATCGGGTGACATTTTGCGAAGAGCAGCACAGGAGTCTATCGATTCTCTATCGGTCTATCGAACTCTATCGTCTATCGTCACGTCTATCGAGATCTATCGTCTATCGGGTTTCTATCGCTCTATCGAATCTATCGGATGACTCTATCGTAAGCTCTATCGAACAATCTATCGTCTATCGTAATCTATCGTCTATCGTCTATCGGATTCTATCGTGATCTATCGTCTATCGTATCTATCGTCTATCGTAAGGTTCTATCGCTCTATCGTCTATCGCACTTCTATCGG", "TCTATCGTC"))
#Solution:
#[9, 150, 165, 230, 237, 272, 288, 295, 320, 373, 403, 462, 469, 541, 602, 683, 690, 712, 784, 801, 808, 835, 851, 879]
|
[
"faroukelkhayat@outlook.com"
] |
faroukelkhayat@outlook.com
|
aa6675fca85df65adea866d06aa069e2bc263fba
|
498dbd2932527d1e7f499e2d7887c4af05e1e425
|
/5hu7_d0wn.py
|
764ad721a05408f832a85fc6006c088a327c96d3
|
[] |
no_license
|
gauriz/py
|
1ddabf24ae805b641793a7f27eb19c5f8ca2bfc7
|
3804951033d8fe144f11bad5a5635c91837e46b5
|
refs/heads/master
| 2022-11-17T10:10:55.196040
| 2020-07-16T09:20:26
| 2020-07-16T09:20:26
| 279,947,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
import os
shutdown = raw_input("Do you wish to shutdown your computer ? (yes / no): ")
if shutdown == 'no':
exit()
else:
os.system("shutdown /s /t 1")
|
[
"gauri@focaloid.com"
] |
gauri@focaloid.com
|
5383e89e7fd17fdf4769c440ba13268284b5621b
|
2caf6885511af24443e22aaa43cd679d694f6f80
|
/note/my_note/first_month/day09/day08_homework.py
|
08f00777fe58712d78bfcc4557bbbbd2ee0f1737
|
[] |
no_license
|
nandadao/Python_note
|
7f9ba54a73af05c935b4f7e24cacb728859a6c69
|
abddfc2e9a1704c88867cff1898c9251f59d4fb5
|
refs/heads/master
| 2020-11-25T18:29:50.607670
| 2019-12-19T01:28:02
| 2019-12-19T01:28:02
| 228,793,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,968
|
py
|
dict_commodity_info = {
101: {"name": "屠龙刀", "price": 10000},
102: {"name": "倚天剑", "price": 10000},
103: {"name": "九阴白骨爪", "price": 8000},
104: {"name": "九阳神功", "price": 9000},
105: {"name": "降龙十八掌", "price": 8000},
106: {"name": "乾坤大挪移", "price": 10000}
}
list_order = []
def print_commodity_info():
"""
打印商品信息
:return:
"""
for key, value in dict_commodity_info.items():
print("编号:%d,名称:%s,单价:%d。" % (key, value["name"], value["price"]))
def buying():
print_commodity_info()
cid, count = create_order()
list_order.append({"cid": cid, "count": count})
print("添加到购物车。")
def create_order():
"""
商品订单
"""
while True:
cid = int(input("请输入商品编号:"))
if cid in dict_commodity_info:
break
else:
print("该商品不存在")
count = int(input("请输入购买数量:"))
return cid, count
def shopping():
"""
购物
"""
while True:
item = input("1键购买,2键结算。")
if item == "1":
buying()
elif item == "2":
settlement()
def settlement():
total_price = 0
print_order_info()
total_price += commodiy["price"] * item["count"]
paying(total_price)
def paying(total_price):
while True:
money = float(input("总价%d元,请输入金额:" % total_price))
if money >= total_price:
print("购买成功,找回:%d元。" % (money - total_price))
list_order.clear()
break
else:
print("金额不足.")
def print_order_info():
for item in list_order:
commodiy = dict_commodity_info[item["cid"]]
print("商品:%s,单价:%d,数量:%d." % (commodiy["name"], commodiy["price"], item["count"]))
shopping()
|
[
"1361335953@qq.com"
] |
1361335953@qq.com
|
29723787e529f7cf19e2c9e4c2a7fad23597715b
|
f7f594f1d6958652cb065c2cd7075c27976c6cc4
|
/PythonSocketIO/sockets.py
|
d72172348fb7ab316afb18149effec18d0e0247a
|
[] |
no_license
|
eric26009/LiDAR-Capstone-2019
|
c4d96acbaf080ee89a4bb65f559ee4e9c4148c3d
|
85d35369bbb832ac977629cc2dfc0ab83fcebfde
|
refs/heads/master
| 2023-03-03T10:44:50.322067
| 2019-08-28T07:17:06
| 2019-08-28T07:17:06
| 204,403,552
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
import socketio # pip install "python-socketio[client]"
import json
# standard Python
sio = socketio.Client()
@sio.event
def connect():
print("I'm connected!")
sio.emit('onLidarStateChanged', "payload")
@sio.event
def onMoveByStepsTriggered(payload):
print("move by steps triggered")
print(payload["steps"])
print(payload["acceleration"])
sio.connect('https://capstone-application-server.herokuapp.com')
|
[
"ericfeldman@outlook.com"
] |
ericfeldman@outlook.com
|
34828399beb915cea5cdedb0525a8b4c0d71b95c
|
a2199be8de33a5890676856eacec6e503a5a3be1
|
/1st+python+assignment+of+skillsanta.py
|
0aadac84a0f4850a053795f6452c9463885c9a01
|
[
"Apache-2.0"
] |
permissive
|
tanmaymishra21/Code
|
dcd77d97783112656443ec698ad390dec20b285c
|
8a1ef44dba6c1527ddc3dfb2246551a003b2adad
|
refs/heads/main
| 2023-01-06T11:21:50.100700
| 2020-10-30T14:34:18
| 2020-10-30T14:34:18
| 302,079,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
# coding: utf-8
# In[1]:
#printing the maximum value and largest number in the list
data=[22,60,78,99,2,6,10]
print(max(data))
# In[3]:
#printing the second largest number in the list
data=[22,60,78,99,2,6,10]
data.sort()
print('second largest number in the list', data[-2])
# In[5]:
#first merge two lists and sort it
data=[22,60,78,99,2,6,10]
data1=[1,34,45,6,77,8]
data2=data+data1
data2.sort()
print(data2)
# In[6]:
#Swap the first and last value of a list
data=[22,60,78,99,2,6,10]
data[0],data[-1]=data[-1],data[0]
print(data)
|
[
"noreply@github.com"
] |
tanmaymishra21.noreply@github.com
|
33859cb61e22b4f91fdf55683f1d74a7534f9566
|
a1d1534962df22065f6566636d5ab5b6bb7c6bd4
|
/hellopython/serializers.py
|
fc3a3f6a16aac5351c3de2bbba1b8872768c47a3
|
[] |
no_license
|
Apeopl/hello
|
a8adeae2d77ebd14dc294da01d4d1788f892e628
|
fbb8d2999191307142c5be71ee777826a7496a7c
|
refs/heads/master
| 2020-05-18T05:24:17.133706
| 2019-05-06T07:18:03
| 2019-05-06T07:18:03
| 184,205,486
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,127
|
py
|
from rest_framework import serializers
from .models import Snippet, LANGUAGE_CHOICES, STYLE_CHOICES
class SnippetSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
title = serializers.CharField(required=False, allow_blank=True, max_length=100)
code = serializers.CharField(style={'base_template': 'textarea.html'})
linenos = serializers.BooleanField(required=False)
language = serializers.ChoiceField(choices=LANGUAGE_CHOICES, default='python')
style = serializers.ChoiceField(choices=STYLE_CHOICES, default='friendly')
def create(self, validated_data):
return Snippet.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.title = validated_data.get('title', instance.title)
instance.code = validated_data.get('code', instance.code)
instance.linenos = validated_data.get('linenos', instance.linenos)
instance.language = validated_data.get('language', instance.language)
instance.style = validated_data.get('style', instance.style)
instance.save()
return instance
|
[
"zhengjinlei@edeMacBook-Air.local"
] |
zhengjinlei@edeMacBook-Air.local
|
88dca76b742c8a26697c487bb55bdd74b7c86779
|
25a894aa2177c64ce5c8c7ab77c53bc13bbbc916
|
/Django/profiles_project/profiles_api/APIView/urls.py
|
34b7d5a5ee1eaefd223d720af5f43380a8d0ea8b
|
[] |
no_license
|
shivam199/Projects
|
8b06a289aa0587ba3290f771e1f7215c1f7a8c19
|
4b47afcc6e812b22f95bd835333de0d048feace3
|
refs/heads/master
| 2022-11-12T19:12:24.177554
| 2020-06-26T07:04:54
| 2020-06-26T07:04:54
| 275,093,761
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
from django.urls import path
from APIView import views
urlpatterns = [
path('hello-view/',views.HelloApiView.as_view()),
]
|
[
"shivamsingh.tcet@gmail.com"
] |
shivamsingh.tcet@gmail.com
|
ad8f14900f1113202f8f7898cf22eeb757b38c04
|
b8af5c6d0a27c8c157917f82bf75540043d9ed39
|
/Chapter_1/1.3/namenum.py
|
edcbf02cb972804619138f2274efc36ee76e3d61
|
[] |
no_license
|
rishiso/USACO-Problems
|
bd650bb2694cfedc6aeca63533db81583946ce1b
|
3252aab02ae10590cc8b67227a7e19e99f4764b0
|
refs/heads/master
| 2021-07-16T11:17:53.053533
| 2020-11-28T01:36:27
| 2020-11-28T01:36:27
| 225,743,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,203
|
py
|
"""
ID: rishiso1
LANG: PYTHON3
TASK: namenum
"""
f = open("namenum.in", "r")
w = open("namenum.out", "w")
dict = open("dict.txt", "r")
names = dict.read()
names = names.split("\n")
number = f.readline()
number = number.replace("\n", "")
names = [e for e in names if len(e) == len(number)]
num_dict = {
2: ["A", "B", "C"],
3: ["D", "E", "F"],
4: ["G", "H", "I"],
5: ["J", "K", "L"],
6: ["M", "N", "O"],
7: ["P", "R", "S"],
8: ["T", "U", "V"],
9: ["W", "X", "Y"]
}
#Recursive function for finding combinations
def combo(arr, i):
res = []
if i == len(number):
return arr
else:
letters = num_dict[int(number[i])]
for name in arr:
for letter in letters:
new_word = name[:i] + letter + name[i + 1:]
res.append(new_word)
return combo(res, i + 1)
original_word = "".join([num_dict[int(n)][0] for n in number])
count = 0
for word in combo([original_word], 0):
if word in names:
count += 1
w.write(word + "\n")
if count == 0:
w.write("NONE\n")
|
[
"noreply@github.com"
] |
rishiso.noreply@github.com
|
cb00c6e6fb74427b3aa52cdd486487c544077759
|
4006fa146861cb9bf9a367786951df284461c169
|
/windchill.py
|
dcb0db3d88b425a31fda947489807c8cd3782028
|
[] |
no_license
|
ccase18/CS550-Fall-Term
|
c58c94f6b0a54395dff07ad2fadbadab3b6fabce
|
7f5ac7a43e14b5300833ad16e654c3152623d99b
|
refs/heads/master
| 2021-08-23T13:51:14.406172
| 2017-12-05T04:18:53
| 2017-12-05T04:18:53
| 112,652,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 351
|
py
|
import sys
t = float(input('What is the temperature?? '))
v = float(input('And what is the wind speed??? '))
if -50 < t < 50 and 3 < v < 120:
w = str(35.74 + (0.6215*t) + ((0.4275*t) - 35.75) * (v**0.16))
print('The wind chill is ' + w + ' degrees')
else:
print('Sorry enter a wind speed value betwen 3 and 120 and a temp value between -50 and 50')
|
[
"canaancase@Canaans-MacBook-Pro.local"
] |
canaancase@Canaans-MacBook-Pro.local
|
c0706709e5f266e7ceef233489e62c2b5f5c9834
|
3b88dcf9b7cd28386ad3ea557c38e2096e59153f
|
/ansibleawx/__init__.py
|
7bddcf207bb4cf600f830ed48cf2ce9c97312091
|
[
"MIT"
] |
permissive
|
ChandlerSwift/AnsibleAWX-Client
|
a48c5209dc623ae000cda9725e15a849e987dc8b
|
6030de8a5ee116c1839620e94b745e45c6759004
|
refs/heads/master
| 2023-04-04T10:50:30.565760
| 2020-07-25T03:27:48
| 2020-07-25T03:27:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30
|
py
|
from ansibleawx.api import Api
|
[
"marcos.diniz@4linux.com.br"
] |
marcos.diniz@4linux.com.br
|
9d2da1c148c1eb28261addf1b5e0432da5bd0623
|
25ebc03b92df764ff0a6c70c14c2848a49fe1b0b
|
/daily/20190403/example_jupyter/02run-kernelapp.py
|
9e4a677651e7a8fbc429a562bbc18e406e88e5aa
|
[] |
no_license
|
podhmo/individual-sandbox
|
18db414fafd061568d0d5e993b8f8069867dfcfb
|
cafee43b4cf51a321f4e2c3f9949ac53eece4b15
|
refs/heads/master
| 2023-07-23T07:06:57.944539
| 2023-07-09T11:45:53
| 2023-07-09T11:45:53
| 61,940,197
| 6
| 0
| null | 2022-10-19T05:01:17
| 2016-06-25T11:27:04
|
Python
|
UTF-8
|
Python
| false
| false
| 138
|
py
|
from jupyter_client.kernelapp import main
# main = KernelApp.launch_instance こうなっているのでmainを使っても良い
main()
|
[
"ababjam61+github@gmail.com"
] |
ababjam61+github@gmail.com
|
ee874271e1c806337273b45970bad55bef0a7d1a
|
02537ef86b8aca4b1324557090e3c17805770f09
|
/pink_led.py
|
822cdb28fd584fe0e4027f6f1d6f42a3a8279dbd
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
ifurusato/scripts
|
6e6bb7f3e608d70bae71ea1896488526440f0d0b
|
2afaa88369ae1736b7449136f4a38f63fe90116e
|
refs/heads/master
| 2023-03-21T18:53:47.160515
| 2021-03-17T08:45:06
| 2021-03-17T08:45:06
| 265,172,338
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,615
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 by Murray Altheim. All rights reserved. This file is part of
# the Robot Operating System project and is released under the "Apache Licence,
# Version 2.0". Please see the LICENSE file included as part of this package.
#
# author: Murray Altheim
# created: 2020-05-19
# modified: 2020-05-19
# see: https://robots.org.nz/2020/05/19/four-corners/
# This script is used as a possible solution for the Four Corners Competition,
# as a way for a robot to locate the trajectory to a distant target.
#
import math, colorsys, traceback
import picamera
import picamera.array
from picamera.array import PiRGBArray
from colorama import init, Fore, Style
init()
PINK_RGB = [151, 55, 180] # hue = 286
def color_distance(e0, e1):
hsv0 = colorsys.rgb_to_hsv(e0[0], e0[1], e0[2])
hsv1 = colorsys.rgb_to_hsv(e1[0], e1[1], e1[2])
dh = min(abs(hsv1[0]-hsv0[0]), 360-abs(hsv1[0]-hsv0[0])) / 180.0
ds = abs(hsv1[1]-hsv0[1])
dv = abs(hsv1[2]-hsv0[2]) / 255.0
distance = math.sqrt(dh*dh+ds*ds+dv*dv)
return distance
def print_row(image, y):
print(Fore.WHITE + '{:d}\t'.format(y) + Fore.BLACK, end='')
for x in reversed(range(0,image.shape[1])):
_rgb = image[y,x]
dist = color_distance(_rgb, PINK_RGB)
_hilite = get_hilite(dist)
print(_hilite + "▪", end='')
print(Style.RESET_ALL)
def get_hilite(dist):
if dist < 0.025:
return Fore.MAGENTA + Style.BRIGHT
elif dist < 0.05:
return Fore.MAGENTA + Style.NORMAL
elif dist < 0.08:
return Fore.RED + Style.BRIGHT
elif dist < 0.10:
return Fore.RED + Style.NORMAL
elif dist < 0.15:
return Fore.YELLOW + Style.BRIGHT
elif dist < 0.2:
return Fore.YELLOW + Style.NORMAL
elif dist < 0.3:
return Fore.GREEN + Style.BRIGHT
elif dist < 0.4:
return Fore.GREEN + Style.NORMAL
elif dist < 0.5:
return Fore.CYAN + Style.NORMAL
elif dist < 0.6:
return Fore.BLUE + Style.BRIGHT
elif dist < 0.7:
return Fore.BLUE + Style.NORMAL
elif dist < 0.8:
return Fore.BLACK + Style.NORMAL
else:
return Fore.BLACK + Style.DIM
# ..............................................................................
try:
print(Fore.CYAN + 'starting...' + Style.RESET_ALL)
# don't necessarily process the whole image (it takes a long time)
_start_row = 180 # the bottom row of the image to be processed
_end_row = 260 # the top row of the image to be processed
with picamera.PiCamera() as camera:
with PiRGBArray(camera) as output:
camera.resolution = (640, 480)
camera.capture(output, 'rgb')
image = output.array
print(Fore.YELLOW + 'Captured {:d}x{:d} image\n'.format(image.shape[1], image.shape[0]) + Style.RESET_ALL)
_width = image.shape[1]
_height = image.shape[0]
print(Fore.YELLOW + 'Captured size {:d}x{:d} image'.format(_width, _height) + Style.RESET_ALL)
for _row in reversed(range(_start_row + 1,_end_row + 1)):
print_row(image, _row)
except picamera.PiCameraMMALError:
print(Fore.RED + Style.BRIGHT + 'could not get camera: in use by another process.' + Style.RESET_ALL)
except Exception:
print(Fore.RED + Style.BRIGHT + 'error starting ros: {}'.format(traceback.format_exc()) + Style.RESET_ALL)
print(Fore.CYAN + 'complete.' + Style.RESET_ALL)
|
[
"ichiro.furusato@gmail.com"
] |
ichiro.furusato@gmail.com
|
38406c33b15ddc21372f111248d8405b59cef7b8
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/ptp1b_input/Lbq/bq-bs_MD_NVT_rerun/set_1ns_equi_1_m.py
|
b172df93ff15c6aaed13071699d2c538f9479f6d
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 936
|
py
|
import os
dir = '/mnt/scratch/songlin3/run/ptp1b/Lbq/MD_NVT_rerun/ti_one-step/bq_bs/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi_1_m.in'
temp_pbs = filesdir + 'temp_1ns_equi_1_m.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi_1_m.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi_1_m.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../bq-bs_merged.prmtop .")
os.system("cp ../0.5_equi_0_3.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
ab3171f459b006cf8312516cf0706e936772f2cc
|
7b0c5ab11a50a1a8443a552e6ba39a060c895912
|
/SIE507/venv/hw4/ApartmentPurchase.py
|
fc7d74bfdc39aa35ae67c4b13940b7764447e171
|
[] |
no_license
|
balmandhunter/oop_class
|
8901c01026b9dbbe6f89d37a6e2d6a0bb9b7d5bf
|
6abf55f9e1ebaf4a2d5f4cbffb84ed27048bf4b8
|
refs/heads/main
| 2023-04-18T03:59:42.022997
| 2021-05-06T13:38:49
| 2021-05-06T13:38:49
| 334,304,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 546
|
py
|
from Apartment import Apartment
from Purchase import Purchase
class ApartmentPurchase(Apartment, Purchase):
'''This static method calls the user prompts from the parent classes, Apartment and Purchase'''
@staticmethod
def prompt_init():
args = Apartment.prompt_init()
args.update(Purchase.prompt_init())
return args
'''This instance method calls the display methods from the parent classes, apartment and purchase'''
def display(self):
Apartment.display(self)
Purchase.display(self)
|
[
"berkeleyalmandhunter@gmail.com"
] |
berkeleyalmandhunter@gmail.com
|
d59159985a4f45e58b045558cc2cb0a0fc614574
|
76070a758fbd86ed289226860bb83768418cdb57
|
/plugins/my_new_option.py
|
efc5983d5e372f3823836c11ad0b39ce5cfbc465
|
[] |
no_license
|
inkerra/python-tasks
|
c3ae9393ddfbab4bc06e11e16617a5f8d14b3272
|
08009819bf072d25c9231a8dc3bb7aa25a1b3bbd
|
refs/heads/master
| 2021-01-20T00:40:15.843238
| 2012-09-14T14:02:40
| 2012-09-14T14:02:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
from opt_plugin.plugin import MetaOpt, Opt
class MyNewOption(Opt):
@classmethod
def action(cls, val):
print "My new option is set to {}".format(val)
opt = '--act2'
value_type = int
default_value = 0
class D123(object):
def __str__(self):
return "D123"
class D1234(object):
def __str__(self):
return "D1234"
|
[
"inkerra@inkerra-desktop"
] |
inkerra@inkerra-desktop
|
c2b5c2cf4c0e71318bb6cf2103ad9ca3738100bf
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2367/60691/272097.py
|
6d63b90625ec0cf2e51f1d19128bfb9aacde9d2b
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
n = int(input())
if n % 2 == 0:
print(-1)
else:
for i in range(1, 20):
l = []
for j in range(i):
l.append('1')
s = "".join(l)
testnum = int(s)
if testnum % n == 0:
print(testnum)
break
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
24ff0eaf61ebecc102e3769515330bb14c3f007d
|
63ebf4326a8485c62944e2fbd102c302dc02c0ad
|
/calculadorafiesta.py
|
457ed8c119287e582cc318ca8b896d82d5f3158d
|
[] |
no_license
|
genesysrm/Python_DataScience
|
7aba9c2d32058e1d3cb285910d06ca3836c26f60
|
4fc2ea8bc63982bc9bc11611dc5bf28c27911f02
|
refs/heads/main
| 2023-01-22T15:25:42.667507
| 2020-11-19T14:52:18
| 2020-11-19T14:52:18
| 314,279,094
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,031
|
py
|
class Festa:
def _init_(self, c,s,d):
self.comida = c
self.suco = s
self.deco = d
def comida(self, qua):
c = 25 * qua
return c
def suco(self, qua, saudavel):
if(saudavel):
s = (5 * qua)
#suco = suco - (suco*0.05)
else:
s = (20 * qua)
return s
def decoracao(self, qua, diferenciada):
if(diferenciada):
valor = 50 + (10*qua)
return valor
else:
valor = 30 + (7.5 * qua)
return
pessoas = int(input("Digite a quantidade de pessoas convidadas para a festa: "))
tipodeco = int(input("Que tipo de decoração deseja 1 Normal ou 2 Diferenciada? Por favor responda com o numero de sua escolha: "))
if tipodeco==2:
dif = True
else:
dif = False
tipodeco = int(input("Que tipo de bebida deseja 1 Saudavel ou 2 Alcolica? Por favor responda com o numero de sua escolha: "))
if tipodeco==21:
saud = True
else:
saud = False
f = Festa()
total = f.total(pessoas,dif,saud)
print("O
|
[
"terceiro-gmerchan@icatuseguros.com.br"
] |
terceiro-gmerchan@icatuseguros.com.br
|
4d522ff9354206da526a83e3537226114e55b7d8
|
658707f12f02c057f3f7976ac020b07396376359
|
/img_recognize/cnn_handwrite.py
|
a43bddf8d9aeaa402028db26dd619e2adf706beb
|
[] |
no_license
|
lwq666666/ar_ocr_nlp_web
|
8c42faf7cf3c66f1b21151be8702ea676130c280
|
b977ae89bb1b360dcc45b4dc67c2c88bb93b7e84
|
refs/heads/master
| 2023-01-10T23:42:57.278631
| 2020-03-25T12:15:15
| 2020-03-25T12:15:15
| 249,963,047
| 0
| 0
| null | 2023-01-04T14:41:38
| 2020-03-25T11:41:43
|
Vue
|
UTF-8
|
Python
| false
| false
| 12,099
|
py
|
import tensorflow as tf
import os
import tensorflow.contrib.slim as slim
import random
import numpy
import cv2
import time
tf.app.flags.DEFINE_string("mode", "test", "train or test")
tf.app.flags.DEFINE_string("checkpoint", "img_recognize/checkpoint", "dir of checkpoint")
tf.app.flags.DEFINE_string("train_dir", "/Users/liweiqiang/PycharmProjects/img_recognition/char_count/new_trn", "dir of training data")
tf.app.flags.DEFINE_string("test_dir", "/Users/liweiqiang/PycharmProjects/img_recognition/char_count/new_tst", "dir of test data")
tf.app.flags.DEFINE_string("test_image", "", "test image for pictest")
tf.app.flags.DEFINE_string("logger_dir", "./logger", "dir of logger")
tf.app.flags.DEFINE_integer("batch_size", 128, "size of batch")
tf.app.flags.DEFINE_integer("img_size", 64, "size of resized images")
tf.app.flags.DEFINE_string("char_dict", "img_recognize/char_dict", "path to character dict")
tf.app.flags.DEFINE_bool("restore", False, "restore from previous checkpoint")
tf.app.flags.DEFINE_integer("max_step", 10000, "maximum steps")
tf.app.flags.DEFINE_string("test_pic", "test_16.jpg", "path to test picture")
FLAGS = tf.app.flags.FLAGS
# Read data from directory
def get_image_path_and_labels(dir):
img_path = []
for root, dir, files in os.walk(dir):
img_path += [os.path.join(root, f) for f in files]
# Shuffle the data to avoid overfit
random.shuffle(img_path)
# Because I created folders corresponding to each character, so the folder name is actual label
labels = [int(name.split(os.sep)[len(name.split(os.sep)) - 2]) for name in img_path]
# labels =[]
# for name in img_path:
# if '0'not in name.split(os.sep)[len(name.split(os.sep)) - 2]:
# print(name)
# print(img_path,labels)
return img_path, labels
def batch(dir, batch_size, prepocess=False):
img_path, labels = get_image_path_and_labels(dir)
# Transfer data into tensor so that it could be feed into network.
img_tensor = tf.convert_to_tensor(img_path, dtype=tf.string)
lb_tensor = tf.convert_to_tensor(labels, dtype=tf.int64)
# Loading the entire dataset comes with extremely high cost on RAM, so slice it and read them batch by batch
input_pipe = tf.train.slice_input_producer([img_tensor, lb_tensor])
# Read actual data and turn them to grey scale
img = tf.read_file(input_pipe[0])
imgs = tf.image.convert_image_dtype(tf.image.decode_png(img, channels=1), tf.float32)
# Randomly modify the images in order to avoid overfit.
if prepocess:
imgs = tf.image.random_contrast(imgs, 0.9, 1.1)
# Resize original images to unified size.
imgs = tf.image.resize_images(imgs, tf.constant([FLAGS.img_size, FLAGS.img_size], dtype=tf.int32))
# Read label
lbs = input_pipe[1]
# Get batch
img_batch, lb_batch = tf.train.shuffle_batch([imgs, lbs], batch_size=batch_size, capacity=50000,
min_after_dequeue=10000)
return img_batch, lb_batch
def cnn():
# (1-keep_prob) equals to dropout rate on fully-connected layers
keep_prob = tf.placeholder(dtype=tf.float32, shape=[], name='keep_prob')
# Set up places for data and label, so that we can feed data into network later
img = tf.placeholder(tf.float32, shape=[None, 64, 64, 1], name="img_batch")
labels = tf.placeholder(tf.int64, shape=[None], name="label_batch")
# Structure references to : http://yuhao.im/files/Zhang_CNNChar.pdf,
# however I adjust a little bit due to limited computational resource.
# Four convolutional layers with kernel size of [3,3], and ReLu as activation function
conv1 = slim.conv2d(img, 64, [3, 3], 1, padding="SAME", scope="conv1")
pool1 = slim.max_pool2d(conv1, [2, 2], [2, 2], padding="SAME")
conv2 = slim.conv2d(pool1, 128, [3, 3], padding="SAME", scope="conv2")
pool2 = slim.max_pool2d(conv2, [2, 2], [2, 2], padding="SAME")
conv3 = slim.conv2d(pool2, 256, [3, 3], padding="SAME", scope="conv3")
pool3 = slim.max_pool2d(conv3, [2, 2], [2, 2], padding="SAME")
conv4 = slim.conv2d(pool3, 512, [3, 3], [2, 2], scope="conv4", padding="SAME")
pool4 = slim.max_pool2d(conv4, [2, 2], [2, 2], padding="SAME")
# Flat the feature map so that we can connect it to fully-connected layers
flat = slim.flatten(pool4)
# Two fully-connected layers with dropout rate as mentioned at the start
# First layer used tanh() as activation function
fcnet1 = slim.fully_connected(slim.dropout(flat, keep_prob=keep_prob), 1024, activation_fn=tf.nn.tanh,
scope="fcnet1")
fcnet2 = slim.fully_connected(slim.dropout(fcnet1, keep_prob=keep_prob), 142, activation_fn=None, scope="fcnet2")
# loss function is defined as cross entropy on result of softmax function on last layer
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=fcnet2, labels=labels))
# compare result to actual label to get accuracy
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(fcnet2, 1), labels), tf.float32))
step = tf.get_variable("step", shape=[], initializer=tf.constant_initializer(0), trainable=False)
# learning rate with exponential decay
lrate = tf.train.exponential_decay(2e-4, step, decay_rate=0.97, decay_steps=2000, staircase=True)
# Adam optimizer to decrease loss value
optimizer = tf.train.AdamOptimizer(learning_rate=lrate).minimize(loss, global_step=step)
prob_dist = tf.nn.softmax(fcnet2)
val_top3, index_top3 = tf.nn.top_k(prob_dist, 3)
# Write log into TensorBoard
tf.summary.scalar("loss", loss)
tf.summary.scalar("accuracy", accuracy)
summary = tf.summary.merge_all()
return {"img": img,
"label": labels,
"global_step": step,
"optimizer": optimizer,
"loss": loss,
"accuracy": accuracy,
"summary": summary,
'keep_prob': keep_prob,
"val_top3": val_top3,
"index_top3": index_top3
}
def train():
with tf.Session() as sess:
start = time.clock()
print("Start reading data")
# Get batch tensor of data
trn_imgs, trn_labels = batch(FLAGS.train_dir, FLAGS.batch_size, prepocess=True)
tst_imgs, tst_labels = batch(FLAGS.test_dir, FLAGS.batch_size)
graph = cnn()
# Preparation before training
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
saver = tf.train.Saver()
if not os.path.isdir(FLAGS.logger_dir):
os.mkdir(FLAGS.logger_dir)
trn_summary = tf.summary.FileWriter(os.path.join(FLAGS.logger_dir, 'trn'), sess.graph)
tst_summary = tf.summary.FileWriter(os.path.join(FLAGS.logger_dir, 'tst'))
step = 0
# If received restore flag, train from last checkpoint
if FLAGS.restore:
# Get last checkpoint in checkpoint directory
checkpoint = tf.train.latest_checkpoint(FLAGS.checkpoint)
if checkpoint:
# Restore data from checkpoint
saver.restore(sess, checkpoint)
step += int(checkpoint.split('-')[-1])
print("Train from checkpoint")
print("Start training")
while not coord.should_stop():
# Get actual data
trn_img_batch, trn_label_batch = sess.run([trn_imgs, trn_labels])
# Prepare parameters for network
graph_dict = {graph['img']: trn_img_batch, graph['label']: trn_label_batch, graph['keep_prob']: 0.8}
# Feed and parameter into network
opt, loss, summary, step = sess.run(
[graph['optimizer'], graph['loss'], graph['summary'], graph['global_step']], feed_dict=graph_dict)
trn_summary.add_summary(summary, step)
print("# " + str(step) + " with loss " + str(loss))
if step > FLAGS.max_step:
break
# Evaluate current network based on test dataset
if (step % 500 == 0) and (step >= 500):
tst_img_batch, tst_label_batch = sess.run([tst_imgs, tst_labels])
graph_dict = {graph['img']: tst_img_batch, graph['label']: tst_label_batch, graph['keep_prob']: 1.0}
accuracy, test_summary = sess.run([graph['accuracy'], graph['summary']], feed_dict=graph_dict)
tst_summary.add_summary(test_summary, step)
print("Accuracy: %.8f" % accuracy)
# Save checkpoint
if step % 1000 == 0:
saver.save(sess, os.path.join(FLAGS.checkpoint, 'hccr'), global_step=graph['global_step'])
elapsed = (time.clock() - start)
# file = open('log.txt', 'w')
#
# # 遍历字典的元素,将每项元素的key和value分拆组成字符串,注意添加分隔符和换行符
# file.write(str(elapsed) +'\n'+str(accuracy))
#
# # 注意关闭文件
# file.close()
# coord.request_stop()
coord.join(threads)
saver.save(sess, os.path.join(FLAGS.checkpoint, 'hccr'), global_step=graph['global_step'])
sess.close()
return
def test(path):
# Read test picture and resize it, turn it to grey scale.
tst_image = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
tst_image = cv2.resize(tst_image,(64, 64))
tst_image = numpy.asarray(tst_image) / 255.0
tst_image = tst_image.reshape([-1, 64, 64, 1])
# feed the test picture into network and estimate probability distribution
with tf.Session() as sess:
graph = cnn()
saver = tf.train.Saver()
saver.restore(sess=sess, save_path=tf.train.latest_checkpoint(FLAGS.checkpoint))
graph_dict = {graph['img']: tst_image, graph['keep_prob']: 1.0}
val, index = sess.run([graph['val_top3'], graph['index_top3']], feed_dict=graph_dict)
for i in range(3):
print("Probability: %.5f"%val[0][i]+ " with label:"+str(fileToNumpy('img_recognize/char_dict.txt')[int(index[0][i])]))
path=FLAGS.train_dir+"/" + '%0.5d' % index[0][0]
# select one of the picture from the label with top 1 probability
# for root,dir,files in os.walk(path):
# for f in files:
# img=cv2.imread(path+"/"+f)
# enlarged=cv2.resize(img,(img.shape[1]*5,img.shape[0]*5))
# cv2.imshow("Top1",enlarged)
# cv2.waitKey()
# break
# break
return val, index
#识别整个图片的接口,传入(-1,64,64)的nparray,以字符串形式输出图片中信息
def recongnize(img_list):
# Read test picture and resize it, turn it to grey scale.
# tst_image = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
# tst_image = cv2.resize(tst_image,(64, 64))
tst_image = numpy.asarray(img_list) / 255.0
tst_image = tst_image.reshape([-1, 64, 64, 1])
# feed the test picture into network and estimate probability distribution
with tf.Session() as sess:
graph = cnn()
saver = tf.train.Saver()
saver.restore(sess=sess, save_path=tf.train.latest_checkpoint(FLAGS.checkpoint))
graph_dict = {graph['img']: tst_image, graph['keep_prob']: 1.0}
val, index = sess.run([graph['val_top3'], graph['index_top3']], feed_dict=graph_dict)
str=''
for i in range(len(index)):
str+=fileToNumpy('img_recognize/char_dict.txt')[int(index[i][0])]
return str
def fileToNumpy(filename):
file = open(filename)
file_lines = file.readlines()
labels = {}
index = 0
for line in file_lines:
line = line.strip() # 参数为空时,默认删除开头、结尾处空白符(包括'\n', '\r', '\t', ' ')
formLine = line.split(' ')
labels[index]=formLine[0]
index += 1
return labels
def main(_):
if FLAGS.mode == "train":
train()
if FLAGS.mode == "test":
test(FLAGS.test_pic)
if __name__ == '__main__':
tf.app.run()
|
[
"1261707134@qq.com"
] |
1261707134@qq.com
|
5c035449207a17df07ce270686655074a7e4a56a
|
9fc768c541145c1996f2bdb8a5d62d523f24215f
|
/code/jPB371/ch07ok/dict_example.py
|
7360aeaf11d44b2e88719a52fc6d5556685ddb27
|
[] |
no_license
|
jumbokh/pyclass
|
3b624101a8e43361458130047b87865852f72734
|
bf2d5bcca4fff87cb695c8cec17fa2b1bbdf2ce5
|
refs/heads/master
| 2022-12-25T12:15:38.262468
| 2020-09-26T09:08:46
| 2020-09-26T09:08:46
| 283,708,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
# -*- coding: utf-8 -*-
dictStr = {'bird':'鳥', 'cat':'貓', 'dog':'狗', 'pig':'豬'}
#新增wolf
dictStr['wolf']="狼"
#刪除pig
dictStr.pop("pig")
#列出dictStr所有的value
print("dictStr目前的元素:")
for v in dictStr.values():
print(v)
#搜尋
print("搜尋dog==>"+dictStr.get("dog","不在dictStr"))
|
[
"jumbokh@gmail.com"
] |
jumbokh@gmail.com
|
61575ab318d216749a261b7ee776b9197c0b61a1
|
49156df32b5be28514a5507c7bc26c2872fa7dfc
|
/second task/task2.py
|
f0f1f3d163ca0ffdf324644c3388247b50d035e5
|
[] |
no_license
|
ahmedshaaban97/MRI-tasks
|
40123656e8bc0e8195f510b9b8a1428b31484407
|
e450951dfc44ef721e22b121fe52baf45aab269a
|
refs/heads/master
| 2020-04-22T07:40:26.446527
| 2019-06-17T23:51:17
| 2019-06-17T23:51:17
| 170,222,564
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,998
|
py
|
import sys
from PyQt5 import QtCore, QtWidgets , QtGui
from PyQt5.QtWidgets import QMainWindow, QLabel, QGridLayout, QWidget , QApplication,QPushButton,QInputDialog,QSpinBox,QFileDialog,QProgressBar,QMessageBox,QGraphicsView,QSizePolicy,QComboBox
from PyQt5.QtCore import QSize,pyqtSlot,QTimer,QThread,QRect
from PyQt5.QtGui import QIcon, QPixmap
from PIL import Image
import numpy as np
from numpy import array
from PIL.ImageQt import ImageQt
import time
import threading
from scipy.fftpack import ifft
import cv2
import pyqtgraph as pg
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
class PlotCanvas(FigureCanvas):
def __init__(self, parent=None, width=5, height=4, dpi=50, data = []):
fig = Figure(figsize=(width, height), dpi=dpi)
#self.axes = fig.add_subplot(111)
FigureCanvas.__init__(self, fig)
self.setParent(parent)
self.data = data
FigureCanvas.setSizePolicy(self,
QSizePolicy.Expanding,
QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.plot()
def plot(self):
ax = self.figure.add_subplot(111)
ax.plot(self.data, 'r-')
self.draw()
class Window(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Image Converter")
self.setGeometry(500,500,500,500)
self.imgSize = 0
self.path = 'this is empyt pass'
self.currentPhantom = []
self.currentImgArray = []
self.numOfPlots = 0
self.show = 1
self.imglabel = ''
#self.label1 = QLabel(self)
self.layout()
def layout(self):
#self.label()
self.browse()
#self.spinb = self.spinBox()
self.sizeBox = self.imageSizeCombobox()
print(type(self.sizeBox.currentText()))
self.typeBox = self.imageTypeCombobox()
print(self.typeBox.currentText())
self.showMaximized()
def imageSizeCombobox(self):
sizeBox = QComboBox(self)
sizeBox.setObjectName(("comboBox"))
sizeBox.setGeometry(QRect(5, 150, 100, 50))
sizeBox.addItem("128")
sizeBox.addItem("256")
sizeBox.addItem("512")
return sizeBox
def imageTypeCombobox(self):
sizeBox = QComboBox(self)
sizeBox.setObjectName(("comboBox"))
sizeBox.setGeometry(QRect(5, 250, 100, 50))
sizeBox.addItem("T1")
sizeBox.addItem("T2")
sizeBox.addItem("ProtonDencity")
sizeBox.addItem("PixelIntencity")
return sizeBox
# def label(self):
# li = QLabel(self)
# li.setText('Conversion Speed')
# li.setGeometry(145,100,120,20)
# #li.move(145,100)
def label(self):
li1 = QLabel(self)
return li1
def label2(self):
li2 = QLabel(self)
return li2
def browse(self):
btn = QPushButton("Select image",self)
btn.setToolTip('This is an example button')
btn.clicked.connect(self.on_click)
btn.move(5,10)
def spinBox(self):
sbox = QSpinBox(self)
sbox.setValue(1)
sbox.move(255,100)
sbox.setMaximum(64)
return sbox
def progressBar(self):
progressBar = QProgressBar(self)
progressBar.setGeometry(5,380,500,20)
progressBar.setValue(0)
return progressBar
@pyqtSlot()
def on_click(self):
self.show = 0
name = QFileDialog()
imgPath = name.getOpenFileName(self,'open file','','Image files (*.jpg *.png *.jpeg)')
self.checkImage(imgPath[0])
def checkImage(self,path):
self.show = 1
self.numOfPlots = 0
self.imgSize = self.sizeBox.currentText()
self.imgType = self.typeBox.currentText()
self.label1 = self.label()
self.imglabel = self.label1
img = Image.open(path).convert('L')
arrayImage = self.convertImageToArray(img)
phantonArray = self.genrate_total_phantom(arrayImage)
self.currentPhantom = phantonArray
im = Image.open(path)
width, height = im.size
self.path = path
#img = cv2.imread(path)
while self.show == 1 :
self.imgSize = self.sizeBox.currentText()
self.imgType = self.typeBox.currentText()
index = self.getPropertyIndex(self.imgType)
indexedArray = self.conver3dTo2dArray(phantonArray,index)
#print('this is index array', indexedArray)
imgArray = Image.fromarray(indexedArray).convert('RGB')
self.currentImgArray = np.array(imgArray)
QApplication.processEvents()
#print(type(img))
self.showArrayImage(self.label1,self.currentImgArray,200,200)
QApplication.processEvents()
def showArrayImage(self,label,img,x,y):
#label = QLabel(self)
img = Image.fromarray(img)
qimage = ImageQt(img)
pixmap = QPixmap.fromImage(qimage)
label.setPixmap(pixmap)
label.setGeometry(128,10,int(self.imgSize),int(self.imgSize))
label.mousePressEvent = self.getPixel
label.show()
#print('image is displayed')
def clearLabel(self):
print('clear called')
self.label1.setVisible(False)
def getPropertyIndex(self,imgType):
if imgType == 'T1':
return 0
elif imgType == 'T2':
return 1
elif imgType == 'ProtonDencity':
return 2
else:
return 3
# def showArrayImage(self,label,img,x,y):
# qimage = ImageQt(img)
# pixmap = QPixmap.fromImage(qimage)
# label.setPixmap(pixmap)
# label.setGeometry(x,y,256,256)
# label.mousePressEvent = self.getPixel
# label.show()
def convertImageToArray(self,img):
arr = np.asarray(img, np.uint8)
#print('this is image pixel function')
return arr
def getPixel (self, event):
x = event.pos().x()
y = event.pos().y()
self.givePixelColor(y,x)
self.showArrayImage(self.imglabel ,self.currentImgArray,200,200)
t1,t2 = self.getPhantomPixelData(y,x)
print(x,y)
self.plot(t1,t2)
def givePixelColor(self,row,colomn):
self.currentImgArray[row][colomn][0] = 255
self.currentImgArray[row][colomn][1] = 0
self.currentImgArray[row][colomn][2] = 0
print(self.currentImgArray[row][colomn])
def getPhantomPixelData(self,row,col):
t1 = self.currentPhantom[row][col][0]
t2 = self.currentPhantom[row][col][1]
return t1,t2
def conver3dTo2dArray(self,arr,index):
arr2 = np.array(np.zeros([len(arr),len(arr[0])]))
for i in range(len(arr2)):
for j in range(len(arr2[0])):
arr2[i][j] = arr[i][j][index]
arr2N = np.uint8(arr2)
return arr2N
def plot(self,t1,t2):
if(self.numOfPlots >4):
return
else:
t1CurveData = np.exp(range(int(t1)))
t2CurveData = np.exp(range(int(-1*t2)))
m = PlotCanvas(self, width=4, height=4, data = t1CurveData)
m.move(400,(10+self.numOfPlots * 200))
m.show()
m2 = PlotCanvas(self, width=4, height=4, data = t2CurveData)
m2.move(700,(10+self.numOfPlots * 200))
m2.show()
self.numOfPlots = self.numOfPlots + 1
def genrate_total_phantom (self , phantom_array ):
print('this is phantom array type ',phantom_array[0][0])
t1_array = np.ones((int(self.imgSize),int(self.imgSize)))
t2_array = np.ones((int(self.imgSize),int(self.imgSize)))
pd_array = np.ones((int(self.imgSize),int(self.imgSize)))
for i in range (0 , int(self.imgSize)) :
for j in range (0,int(self.imgSize)):
if phantom_array[i][j]< int(self.imgSize) and phantom_array[i][j] >80 :
t1_array[i][j]=int (phantom_array[i][j]*(.2))
t2_array[i][j]=256-t1_array[i][j]
pd_array[i][j]=0
elif phantom_array[i][j]< 100 and phantom_array[i][j] >20 :
t1_array[i][j]=int (phantom_array[i][j]*3)
t2_array[i][j]=256-t1_array[i][j]
pd_array[i][j]=100
else :
t1_array[i][j]=60+(phantom_array[i][j])
t2_array[i][j]=40+(phantom_array[i][j])
pd_array[i][j]=200
QApplication.processEvents()
QApplication.processEvents()
#return phantom_array,t1_array,t2_array,pd_array
phantomArray = self.convert2dArraysTo3d(phantom_array,t1_array,t2_array,pd_array)
return phantomArray
def convert2dArraysTo3d(self,pixelsArray,t1Array,t2Array,pdArray):
phantomArray = np.array(np.zeros([int(self.imgSize),int(self.imgSize),4]))
for i in range(int(self.imgSize)):
for j in range(int(self.imgSize)):
phantomArray[i][j][0] = t1Array[i][j]
phantomArray[i][j][1] = t2Array[i][j]
phantomArray[i][j][2] = pdArray[i][j]
phantomArray[i][j][3] = pixelsArray[i][j]
QApplication.processEvents()
QApplication.processEvents()
return phantomArray
def main():
app = QApplication(sys.argv)
window = Window()
sys.exit(app.exec_())
main()
|
[
"ahmedshaaban97"
] |
ahmedshaaban97
|
ef43030c0c8f4b27008999a73e61c4b81c8a4763
|
d06b776440822a7e7971ec2616ceb064cc8ff0ac
|
/core/api/routers.py
|
15925c6559bc244a29104dc0844743df8ef23aa8
|
[] |
no_license
|
holdbar/electroq
|
db42533bd7daff97ac109b82a66c58b90d16cc5d
|
42a7e3565966b3315ab9814a171e549c3340a23b
|
refs/heads/master
| 2021-04-09T14:34:47.794392
| 2018-04-01T09:48:59
| 2018-04-01T09:48:59
| 125,653,680
| 0
| 2
| null | 2018-03-25T18:15:48
| 2018-03-17T17:34:12
|
Python
|
UTF-8
|
Python
| false
| false
| 180
|
py
|
from rest_framework.routers import DefaultRouter
from core.api.viewsets import UserViewSet
router = DefaultRouter()
# registering viewsets
router.register(r"user", UserViewSet)
|
[
"holdbarhet@gmail.com"
] |
holdbarhet@gmail.com
|
270f1bcbb22967b643e8806b6c98d02f31a38f9b
|
481a4f156d97166d0f82e85a572137bc7d8ffbd2
|
/AWSLambdaSynchCalendar.py
|
98c528105e5d6195247dbce22f7616eceb02a032
|
[] |
no_license
|
rajeshpednekar/flufcode
|
db28f9f01e0e443c8e3f5f898d978733151d429d
|
d49ae2e7a22b3282fb32b7f2548025787015ce77
|
refs/heads/master
| 2022-11-14T09:01:32.853879
| 2020-07-05T03:05:58
| 2020-07-05T03:05:58
| 262,886,503
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,939
|
py
|
#####################################################################
#### Lambda function: funcSynchCalendar
#### Author: fluff code
#### A boto3 python handler function
#### This lambda function is triggered whenever there is a
#### message in the SQL Queue. It reads the event messages as input
### from AWS SQS and then submits the messages to the
#### remote api call at thedoctorproctor website.
#### Subsequently the SQS message is deleted
#### Most of the static environment variables are read using import 'os'
#### SQS queue: git-calendar-sqs.fifo
#####################################################################
import boto3
import requests
import json
import os
#####################################################################
## fn_auth_proctor : function to do the auth for AWS api call
#####################################################################
def fn_auth_proctor ( login_url, sessiondata):
response = requests.post(login_url, data = sessiondata)
print("*"*30)
print(response.content)
print("*"*30)
token = json.loads(response.content)['token']
headers = { 'Authorization' : 'Bearer ' + token }
print("*"*30)
print(headers)
print("*"*30)
return headers
#####################################################################
### fn_send_msg_api: function to send message through api to website
#####################################################################
def fn_send_msg_api( url, payload, headers ):
print("function fn_send_msg_api starts")
print(headers)
print(payload)
response = requests.request("POST", url, headers=headers, data=payload)
print('Response from thedoctorproctor website for api call: ')
print(response.text.encode('utf8'))
print("function fn_send_msg_api ends")
#print(response)
return 0
#####################################################################
### lambda_handler : main lambda handler function
#####################################################################
def lambda_handler(event, context):
print("*"*30)
print('Lambda handler function starts')
print("*"*30)
#url = "https://api.thedoctorproctor.com/api/organisations/1/ext-sessions"
#login_url = "https://api.thedoctorproctor.com/api/auth/login"
# standby instance
url = "https://sbapi.thedoctorproctor.com/api/organisations/1/ext-sessions"
login_url = "https://sbapi.thedoctorproctor.com/api/auth/login"
#http://sbapp.thedoctorproctor.com/#/login
sessiondata = { 'email' : 'test@gmail.com', 'password' : '1234566758' }
s_aws_access_key_id = os.environ['aws_access_key_id']
s_aws_secret_access_key = os.environ['aws_secret_access_key']
# Create SQS client:
#sqs = boto3.client('sqs',aws_access_key_id = 'XXXXXYUSUAKIA5XKNM3' , aws_secret_access_key = 'tughuydushTYUdTRM67868GHUYUtd7aDPkEQukIYfGDpn')
sqs = boto3.client('sqs',aws_access_key_id = s_aws_access_key_id , aws_secret_access_key = s_awe_secret_access_key )
#queue_url = 'https://sqs.us-east-1.amazonaws.com/xxxxx/git-calendar-sqs.fifo'
queue_url = os.environ['sqs_queue_url']
## Authenticate function
authheaders = fn_auth_proctor(login_url, sessiondata)
nucount = len(event['Records'])
print("no of SQS messages is:")
print(nucount)
if nucount > 0:
#for message in messages['Messages']:
for message in event['Records']:
print(message['body'])
messagebody=(message['body'])
receipt_handle=(message['receiptHandle'])
print(type(messagebody))
payload=json.loads(messagebody)
print("new message payload is:")
print(payload)
#### call to api
fn_send_msg_api( url, payload, authheaders )
###delete the message
response = sqs.delete_message(
QueueUrl=queue_url,
ReceiptHandle=receipt_handle
)
print('Received and deleted message: %s' % payload)
print(response)
else:
print('Queue is empty')
print("*"*30)
print('End of the Lambda handler function')
print("*"*30)
#break
return 0
|
[
"noreply@github.com"
] |
rajeshpednekar.noreply@github.com
|
0c10b24899012834b1ac521f5c9a3493e63f0a99
|
14ad7023d5440206f116f2c97db74d0b95c59b67
|
/findcountour.py
|
50dcbd94136c415d4b1339dd43eb6f2fc627e409
|
[] |
no_license
|
selmandridi/BlackscreenDetector
|
69ffcea6b6a131ceadaadae340b6f596644a2f56
|
d9525118f3286a208b7a5f72bf7b4dec2878371a
|
refs/heads/master
| 2020-12-04T05:28:54.740490
| 2020-01-03T17:19:00
| 2020-01-03T17:19:00
| 231,632,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
import cv2
import numpy as np
import time
import os
import datetime
import array
def sizeFiltering(contours):
y = 0
x = 0
w = 0
h = 0
filtredContours = []
heightmax = 200
widthmax = 200
widthMin = 100
heightMin = 100
for cnt in contours:
rect = cv2.minAreaRect(cnt) #I have used min Area rect for better result
width = rect[1][0]
height = rect[1][1]
if (width < widthmax) and (height < heightmax) and (width > widthMin) and (height > heightMin):
filtredContours.append(cnt)
return filtredContours, [y,x,h,w]
vidcap = cv2.VideoCapture(0)
ret,frame = vidcap.read()
if ret:
imgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 110, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours, rectCountour = sizeFiltering(contours)
cv2.drawContours(frame, contours, -1, (0,255,0), 3)
print(contours)
cv2.imshow('image',frame)
cv2.waitKey(0)
vidcap.release()
|
[
"selman.dridi@comelit.it"
] |
selman.dridi@comelit.it
|
1e38c1f391c58cea34bcef59efc19ba38866231c
|
c452b7204d2f0817069874a57f3c8b0a0124b351
|
/main.py
|
54ce8a6880c3cb6d99cdf996945d5afa2cca80b3
|
[] |
no_license
|
SunsetGolfer/ConvertCSVFileDateFormat
|
1710ab49b7ffbc7ecb8debbd688aab289adfab23
|
93bf7eae43cd64cc7b3ae9c6d3af42500fa7de98
|
refs/heads/master
| 2023-02-14T01:00:28.009468
| 2021-01-09T23:37:43
| 2021-01-09T23:37:43
| 328,267,122
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,654
|
py
|
# This is a sample Python script.
# Press ⌃R to execute it or replace it with your code.
# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press ⌘F8 to toggle the breakpoint.
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print_hi('PyCharm')
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
import csv
import tkinter as tk
from tkinter import filedialog
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename()
print('Reading: ',file_path)
def readmyfile(filename):
dates = []
scores = []
with open(filename) as csvDataFile:
csvReader = csv.reader(csvDataFile)
for row in csvReader:
Subject.append(row[0])
Startdate.append(row[1])
Starttime.append(row[2])
Enddate.append(row[3])
Endtime.append(row[4])
Alldayevent.append(row[5])
Description.append(row[6])
Location.append(row[7])
Private.append(row[8])
return Subject, Startdate, Starttime, Enddate, Endtime, Alldayevent, Description, Location, Private
print(Subject)
print(Enddate)
# Subject,Start date,Start time,End Date,End Time,All day Event,Description,Location,Private
# Subject, Startdate, Starttime, Enddate, Endtime, Alldayevent, Description, Location, Private
# Douglas Manfred Winni Udo ,08.06.2020,11:00:00,08.06.2020,13:00:00,FALSE,Douglas Manfred Winni Udo ,Borkstraße 17 - 48163 Münster,FALSE
|
[
"jweichelt@Jorgs-MacBook-Pro.local"
] |
jweichelt@Jorgs-MacBook-Pro.local
|
d7568161d832b4180e4a39bd6746103eeafd6a9b
|
240e245fbed52b54956bcc1f28d5f6522e95a3b2
|
/factorial/loader/work/work_break.py
|
ed01c5f242a7233382c535792260e81a9f6b282d
|
[
"MIT"
] |
permissive
|
hyper750/factorialhr
|
47b9a5741b1b297e4341cfeebf39236a1802467d
|
fe48f73c5dd5b1300347db56b8c1f4709c6c59ec
|
refs/heads/master
| 2022-08-11T16:35:30.355134
| 2021-04-06T07:18:48
| 2021-04-06T07:18:48
| 211,069,416
| 7
| 4
|
MIT
| 2021-04-06T07:18:49
| 2019-09-26T11:06:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,112
|
py
|
class WorkBreak:
def __init__(self, start_hour: str, end_hour: str, minutes_variation: int):
self.start_hour = start_hour
self.end_hour = end_hour
self.minutes_variation = minutes_variation
def get_start_hour(self) -> str:
"""Get the start hour of the break
:return: str eg: "10:30"
"""
return self.start_hour
def get_end_hour(self) -> str:
"""Get the end hour of the break
:return: str eg: "11:00"
"""
return self.end_hour
def get_minutes_variation(self) -> int:
"""Randomly variate the hour of start and end
Eg:
- start_hour: "10:30"
- end_hour: "11:00"
- minutes_variation: 15
With a minimum of "10:15" - "10:45" and a max of "10:45" - "11:15"
Possible outputs:
· "10:35" - "11:05"
· "10:40" - "11:10"
...
:return: int eg: 15
"""
return self.minutes_variation
def __repr__(self) -> str:
return f'{self.get_start_hour()} - {self.get_end_hour()} ~{self.get_minutes_variation()}m'
|
[
"raulmarquespalmer_150@hotmail.com"
] |
raulmarquespalmer_150@hotmail.com
|
5d65c0cba54693d041f010966fda9b671c8d2e76
|
2d9e1cb8e1877b3fe1725a202649136ce5f7bd1e
|
/helllo/migrations/0001_initial.py
|
610671ef02bf7ed85b828f1a3ac8f31d26b90519
|
[] |
no_license
|
soobinlee1713/apiproject
|
7a62a993beabbc4da63c93a51c341d824a526b79
|
15c9ca1318f5c20865ef94c400fc0daec7c9a0c8
|
refs/heads/master
| 2020-08-31T16:32:31.756580
| 2019-10-31T09:55:16
| 2019-10-31T09:55:16
| 218,733,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 822
|
py
|
# Generated by Django 2.2.3 on 2019-10-31 06:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Essay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('body', models.TextField()),
('author', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"noreply@github.com"
] |
soobinlee1713.noreply@github.com
|
4235979874e5ac83d7a5b5e6c24d6cc982bbab4e
|
8846b8c64aa97a8abd0940e6ed3a527286e3009b
|
/02_RPS/action.py
|
73817f9cc82c5489051ebe340da739ce2ff10e17
|
[] |
no_license
|
stianbm/tdt4113
|
cfc0e514a4dd926975b533c5b41c5678032bf278
|
ce84dc221e1cf938dc931c622dbc1cb5ecfa55f6
|
refs/heads/master
| 2020-07-09T13:48:31.935715
| 2019-09-23T13:45:14
| 2019-09-23T13:45:14
| 203,986,941
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 902
|
py
|
"""Contains the class for possible actions"""
class Action:
"""Class for an action that the different players can perform"""
ROCK = "ROCK"
PAPER = "PAPER"
SCISSORS = "SCISSORS"
action_types = [ROCK, PAPER, SCISSORS]
def __init__(self, action_type):
if action_type in self.action_types:
self.action_type = action_type
else:
raise Exception(
'action_type {} is not in action_types'.format(action_type))
def __lt__(self, other):
if self.action_type == self.ROCK:
if other.action_type == self.PAPER:
return True
elif self.action_type == self.PAPER:
if other.action_type == self.SCISSORS:
return True
elif self.action_type == self.SCISSORS:
if other.action_type == self.ROCK:
return True
return False
|
[
"stian.malmanger@gmail.com"
] |
stian.malmanger@gmail.com
|
322a61c28286dc7c947a815b8b30a972a7d41373
|
671022d37e94aa53b85094b4084ad7afd31132c9
|
/website/shortcuts.py
|
878442a3100a808b9fdc0253e01357b496376e5f
|
[
"MIT"
] |
permissive
|
gmferise/tree-model-demo
|
b1aa6724571425573cd5884503020c60272fb0d4
|
6305e033c1683651ddbfb45d9987c4f6461efdd0
|
refs/heads/main
| 2023-08-15T08:18:31.271625
| 2021-10-07T14:35:30
| 2021-10-07T14:35:30
| 414,634,796
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 971
|
py
|
from django.shortcuts import (
render as original_render,
redirect,
resolve_url,
HttpResponseRedirect,
)
def render(request, template, context=None, content_type=None, status=None, using=None):
"""
Render with some automatically added context variables
"""
defaults = {
'request': request,
}
if request.user.is_authenticated:
defaults['user'] = request.user
return original_render(
request,
template,
{ **defaults, **context } if context else defaults, content_type, status, using
)
def redirect_back(request):
"""
Return a redirect to the previous page
"""
try:
return redirect(request.headers['Referer'])
except KeyError:
return redirect('home')
def redirect_hash(to, hash, *args, **kwargs):
"""
Return a redirect to the location with an added hash
"""
return HttpResponseRedirect(f'{resolve_url(to, *args, **kwargs)}#{hash}')
|
[
"gmferise@gmail.com"
] |
gmferise@gmail.com
|
865269fec11f88e22a6a9fe77818422add57c727
|
1b85d130cca2334bde6135c27c66491f1225be5c
|
/models/models.py
|
a62bf5e7db1d3357ff8dbb2801274ef71c2948b5
|
[
"Apache-2.0"
] |
permissive
|
ksh/gpitraining
|
a1120d2aecd3089594bf0862dccf2b26fc509aed
|
1873356c3eddb9c082ef67fc5452676b603cfa02
|
refs/heads/master
| 2021-01-19T10:23:00.035891
| 2013-07-03T13:05:35
| 2013-07-03T13:05:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,453
|
py
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core data model classes."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import appengine_config
from config import ConfigProperty
from counters import PerfCounter
from entities import BaseEntity
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
# The default amount of time to cache the items for in memcache.
DEFAULT_CACHE_TTL_SECS = 60 * 5
# Global memcache controls.
CAN_USE_MEMCACHE = ConfigProperty(
'gcb_can_use_memcache', bool, (
'Whether or not to cache various objects in memcache. For production '
'this value should be on to enable maximum performance. For '
'development this value should be off so you can see your changes to '
'course content instantaneously.'),
appengine_config.PRODUCTION_MODE)
# performance counters
CACHE_PUT = PerfCounter(
'gcb-models-cache-put',
'A number of times an object was put into memcache.')
CACHE_HIT = PerfCounter(
'gcb-models-cache-hit',
'A number of times an object was found in memcache.')
CACHE_MISS = PerfCounter(
'gcb-models-cache-miss',
'A number of times an object was not found in memcache.')
CACHE_DELETE = PerfCounter(
'gcb-models-cache-delete',
'A number of times an object was deleted from memcache.')
class MemcacheManager(object):
"""Class that consolidates all memcache operations."""
@classmethod
def get(cls, key):
"""Gets an item from memcache if memcache is enabled."""
if not CAN_USE_MEMCACHE.value:
return None
value = memcache.get(key)
if value:
CACHE_HIT.inc()
else:
CACHE_MISS.inc()
return value
@classmethod
def set(cls, key, value, ttl=DEFAULT_CACHE_TTL_SECS):
"""Sets an item in memcache if memcache is enabled."""
if CAN_USE_MEMCACHE.value:
CACHE_PUT.inc()
memcache.set(key, value, ttl)
@classmethod
def delete(cls, key):
"""Deletes an item from memcache if memcache is enabled."""
if CAN_USE_MEMCACHE.value:
CACHE_DELETE.inc()
memcache.delete(key)
class Student(BaseEntity):
"""Student profile."""
enrolled_on = db.DateTimeProperty(auto_now_add=True, indexed=True)
user_id = db.StringProperty(indexed=False)
name = db.StringProperty(indexed=False)
is_enrolled = db.BooleanProperty(indexed=False)
# Each of the following is a string representation of a JSON dict.
scores = db.TextProperty(indexed=False)
def put(self):
"""Do the normal put() and also add the object to memcache."""
result = super(Student, self).put()
MemcacheManager.set(self.key().name(), self)
return result
def delete(self):
"""Do the normal delete() and also remove the object from memcache."""
super(Student, self).delete()
MemcacheManager.delete(self.key().name())
@classmethod
def get_by_email(cls, email):
return Student.get_by_key_name(email.encode('utf8'))
@classmethod
def get_enrolled_student_by_email(cls, email):
student = MemcacheManager.get(email)
valid = ValidStudent.get_valid(email)
if not student:
student = Student.get_by_email(email)
MemcacheManager.set(email, student)
if student and student.is_enrolled and valid:
return student
else:
return None
@classmethod
def rename_current(cls, new_name):
"""Gives student a new name."""
user = users.get_current_user()
if not user:
raise Exception('No current user.')
if new_name:
student = Student.get_by_email(user.email())
student.name = new_name
student.put()
@classmethod
def set_enrollment_status_for_current(cls, is_enrolled):
"""Changes student enrollment status."""
user = users.get_current_user()
if not user:
raise Exception('No current user.')
student = Student.get_by_email(user.email())
student.is_enrolled = is_enrolled
student.put()
# list of google users who can attend this course
class ValidStudent(BaseEntity):
id = db.IntegerProperty()
profile = db.StringProperty()
# email = db.StringProperty()
@classmethod
def get_valid(cls, email):
em = email.lower()
return ValidStudent.get_by_key_name(em.encode('utf8'))
class Profile(BaseEntity):
name = db.StringProperty()
# string representation of a JSON dict.
auth = db.TextProperty(indexed=False)
class EventEntity(BaseEntity):
"""Generic events.
Each event has a 'source' that defines a place in a code where the event was
recorded. Each event has a 'user_id' to represent an actor who triggered
the event. The event 'data' is a JSON object, the format of which is defined
elsewhere and depends on the type of the event.
"""
recorded_on = db.DateTimeProperty(auto_now_add=True, indexed=True)
source = db.StringProperty(indexed=False)
user_id = db.StringProperty(indexed=False)
# Each of the following is a string representation of a JSON dict.
data = db.TextProperty(indexed=False)
@classmethod
def record(cls, source, user, data):
"""Records new event into a datastore."""
event = EventEntity()
event.source = source
event.user_id = user.user_id()
event.data = data
event.put()
class StudentAnswersEntity(BaseEntity):
"""Student answers to the assessments."""
updated_on = db.DateTimeProperty(indexed=True)
# Each of the following is a string representation of a JSON dict.
data = db.TextProperty(indexed=False)
# @classmethod
# def get_by_key(cls,key):
# return StudentAnswers.get_by_key(key.Name)
|
[
"carlos.c.saraiva@gmail.com"
] |
carlos.c.saraiva@gmail.com
|
53a6dd1de601d8b9357c3613cb0a1c93f4bddf8e
|
daec47a34daebac2c00fe70f1dfa550dad6e0e9a
|
/0x0B-python-input_output/2-read_lines.py
|
e369be8069e6a4d7f1291aea84088fb783b25d0a
|
[] |
no_license
|
dbravo0/holbertonschool-higher_level_programming
|
70816f575279d08720f3c480797f0ce7ca3bdd40
|
99d98f42c1f53991aa49ed262faa2b49f5203151
|
refs/heads/master
| 2020-09-29T04:40:11.829269
| 2020-09-25T00:32:21
| 2020-09-25T00:32:21
| 207,944,349
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 244
|
py
|
#!/usr/bin/python3
def read_lines(filename="", nb_lines=0):
with open(filename, encoding="utf-8") as f:
if nb_lines <= 0:
print(f.read(), end="")
for i in range(nb_lines):
print(f.readline(), end="")
|
[
"stevenbravobel@gmail.com"
] |
stevenbravobel@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.