blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3761f78794812b198b0c941d3b519e3150947df5
|
88dde3533d8283452b780fd120d8da94a2231876
|
/lab1/encoders.py
|
e670db5d8e2da9dfbcba5f7d55128dd8dacf95cf
|
[] |
no_license
|
jando16407/ControlofMobileRobots
|
035e654cd556138321eb8c442a8c8f535edbcfdb
|
10806892b812296bb5fc83124094a802596760b4
|
refs/heads/master
| 2020-03-28T17:45:50.229926
| 2018-09-16T01:49:10
| 2018-09-16T01:49:10
| 148,688,281
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,469
|
py
|
import time
import RPi.GPIO as GPIO
import signal
import sys
import tty
import termios
# Pins that the encoders are connected to
LENCODER = 17
RENCODER = 18
left = 0
right = 0
# declare tuple
#counts = ("Left count : ", str(left), ", RIght count : ", str(right));
# The det_ch method will determine which key has been pressed
def det_ch():
aa = sys.stdin.fileno()
settings = termios.tcgetattr(aa)
try:
tty.setraw(sys.stdin.fileno())
key = sys.stdin.read(1)
finally:
termios.tcsetattr(aa, termios.TCSADRAIN, settings)
return key
# This function is called when the left encoder detects a rising edge signal.
def onLeftEncode(pin):
global left
left += 1
display_ticks()
# This function is called when the right encoder detects a rising edge signal.
def onRightEncode(pin):
global right
right += 1
display_ticks()
# This function displays current number of left and right ticks
def display_ticks():
sys.stdout.write('\r')
sys.stdout.write("Left encoder ticked! ")
sys.stdout.write(str(left))
sys.stdout.write(" : Right encoder ticked! ")
sys.stdout.write(str(right))
sys.stdout.flush()
# This function is called when Ctrl+C is pressed.
# It's intended for properly exiting the program.
def ctrlC(signum, frame):
print(str(left))
print("\n", str(right))
print("\nExiting")
GPIO.cleanup()
exit()
# This function resets the tick count
def resetCounts():
print("RESETCOUNTS CALLED")
global left
global right
left = 0
right = 0
# This function return the tuple of tick counts
def getCounts():
print("GETCOUNTS CALLED\n")
return (str(left), str(right))
# Attach the Ctrl+C signal interrupt
signal.signal(signal.SIGINT, ctrlC)
# Set the pin numbering scheme to the numbering shown on the robot itself.
GPIO.setmode(GPIO.BCM)
# Set encoder pins as input
# Also enable pull-up resistors on the encoder pins
# This ensures a clean 0V and 3.3V is always outputted from the encoders.
GPIO.setup(LENCODER, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(RENCODER, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Attach a rising edge interrupt to the encoder pins
GPIO.add_event_detect(LENCODER, GPIO.RISING, onLeftEncode)
GPIO.add_event_detect(RENCODER, GPIO.RISING, onRightEncode)
# Prevent the program from exiting by adding a looping delay.
while True:
time.sleep(1)
key_input = det_ch()
if key_input == "g":
print(getCounts())
elif key_input == "r":
resetCounts()
elif key_input == "c":
GPIO.cleanup()
print("Exiting")
exit()
|
[
"jando16407c@gmail.com"
] |
jando16407c@gmail.com
|
b10a7d8f06eea9e1ba7d3bd0fad062389e44d262
|
096ecb1ae95b3bcfd002480415a04c5191e01419
|
/ttbc.py
|
3973b4f37e4719b025aea65d6b2b2d4d96188280
|
[
"Apache-2.0"
] |
permissive
|
iamaris/pystock
|
7f3c955977c662e384f23f3113c0c5ac8fc3f4ff
|
864f8beba0cf50a7a4f52bf7c67e83fdfd774a9c
|
refs/heads/master
| 2021-01-25T07:08:14.604437
| 2015-09-07T17:17:48
| 2015-09-07T17:17:48
| 16,152,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,036
|
py
|
import urllib
import pandas as pd
import pandas.io.data as web
#from datetime import datetime
import matplotlib.pyplot as plt
import pickle as pk
from pandas.tseries.offsets import BDay
# pd.datetime is an alias for datetime.datetime
#today = pd.datetime.today()
import time
#time.sleep(5) # delays for 5 seconds
#today = pd.datetime.today()
today = pd.datetime.today()
yesterday = today - BDay(5000)
p = web.DataReader("SPY", "yahoo",yesterday,today)
#p = web.DataReader("YELP", "yahoo",yesterday,today)
#print p.head()
#print p.tail()
#print len(p)
up = 0
down = 0
N = 0
for i in range(len(p)-3):
if p.at[p.index[i],'Open'] > p.at[p.index[i],'Close']:
if p.at[p.index[i+1],'Open'] > p.at[p.index[i+1],'Close']:
N = N + 1
if p.at[p.index[i+2],'Open'] >= p.at[p.index[i+2],'Close']:
down = down + 1
else:
up = up + 1
print "total = ",N
print "up = ",up,"(",float(up)/N,")"
print "down = ",down,"(",float(down)/N,")"
|
[
"aris@cmu.edu"
] |
aris@cmu.edu
|
39ce07857213f8a281675528cad52ce7943c5bf1
|
2bcf18252fa9144ece3e824834ac0e117ad0bdf3
|
/zpt/trunk/site-packages/zpt/_pytz/zoneinfo/US/Indiana_minus_Starke.py
|
f06a4f85e29494d5c49f070ed6153788987fe72d
|
[
"MIT",
"ZPL-2.1"
] |
permissive
|
chadwhitacre/public
|
32f65ba8e35d38c69ed4d0edd333283a239c5e1d
|
0c67fd7ec8bce1d8c56c7ff3506f31a99362b502
|
refs/heads/master
| 2021-05-10T14:32:03.016683
| 2010-05-13T18:24:20
| 2010-05-13T18:24:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,743
|
py
|
'''tzinfo timezone information for US/Indiana_minus_Starke.'''
from zpt._pytz.tzinfo import DstTzInfo
from zpt._pytz.tzinfo import memorized_datetime as d
from zpt._pytz.tzinfo import memorized_ttinfo as i
class Indiana_minus_Starke(DstTzInfo):
'''US/Indiana_minus_Starke timezone definition. See datetime.tzinfo for details'''
zone = 'US/Indiana_minus_Starke'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1918,3,31,8,0,0),
d(1918,10,27,7,0,0),
d(1919,3,30,8,0,0),
d(1919,10,26,7,0,0),
d(1942,2,9,8,0,0),
d(1945,8,14,23,0,0),
d(1945,9,30,7,0,0),
d(1947,4,27,8,0,0),
d(1947,9,28,7,0,0),
d(1948,4,25,8,0,0),
d(1948,9,26,7,0,0),
d(1949,4,24,8,0,0),
d(1949,9,25,7,0,0),
d(1950,4,30,8,0,0),
d(1950,9,24,7,0,0),
d(1951,4,29,8,0,0),
d(1951,9,30,7,0,0),
d(1952,4,27,8,0,0),
d(1952,9,28,7,0,0),
d(1953,4,26,8,0,0),
d(1953,9,27,7,0,0),
d(1954,4,25,8,0,0),
d(1954,9,26,7,0,0),
d(1955,4,24,8,0,0),
d(1955,10,30,7,0,0),
d(1956,4,29,8,0,0),
d(1956,10,28,7,0,0),
d(1957,4,28,8,0,0),
d(1957,9,29,7,0,0),
d(1958,4,27,8,0,0),
d(1958,9,28,7,0,0),
d(1959,4,26,8,0,0),
d(1959,10,25,7,0,0),
d(1960,4,24,8,0,0),
d(1960,10,30,7,0,0),
d(1961,4,30,8,0,0),
d(1961,10,29,7,0,0),
d(1962,4,29,8,0,0),
d(1963,10,27,7,0,0),
d(1967,4,30,8,0,0),
d(1967,10,29,7,0,0),
d(1968,4,28,8,0,0),
d(1968,10,27,7,0,0),
d(1969,4,27,8,0,0),
d(1969,10,26,7,0,0),
d(1970,4,26,8,0,0),
d(1970,10,25,7,0,0),
d(1971,4,25,8,0,0),
d(1971,10,31,7,0,0),
d(1972,4,30,8,0,0),
d(1972,10,29,7,0,0),
d(1973,4,29,8,0,0),
d(1973,10,28,7,0,0),
d(1974,1,6,8,0,0),
d(1974,10,27,7,0,0),
d(1975,2,23,8,0,0),
d(1975,10,26,7,0,0),
d(1976,4,25,8,0,0),
d(1976,10,31,7,0,0),
d(1977,4,24,8,0,0),
d(1977,10,30,7,0,0),
d(1978,4,30,8,0,0),
d(1978,10,29,7,0,0),
d(1979,4,29,8,0,0),
d(1979,10,28,7,0,0),
d(1980,4,27,8,0,0),
d(1980,10,26,7,0,0),
d(1981,4,26,8,0,0),
d(1981,10,25,7,0,0),
d(1982,4,25,8,0,0),
d(1982,10,31,7,0,0),
d(1983,4,24,8,0,0),
d(1983,10,30,7,0,0),
d(1984,4,29,8,0,0),
d(1984,10,28,7,0,0),
d(1985,4,28,8,0,0),
d(1985,10,27,7,0,0),
d(1986,4,27,8,0,0),
d(1986,10,26,7,0,0),
d(1987,4,5,8,0,0),
d(1987,10,25,7,0,0),
d(1988,4,3,8,0,0),
d(1988,10,30,7,0,0),
d(1989,4,2,8,0,0),
d(1989,10,29,7,0,0),
d(1990,4,1,8,0,0),
d(1990,10,28,7,0,0),
d(1991,4,7,8,0,0),
d(1991,10,27,7,0,0),
d(2006,4,2,7,0,0),
d(2006,10,29,6,0,0),
d(2007,3,11,7,0,0),
d(2007,11,4,6,0,0),
d(2008,3,9,7,0,0),
d(2008,11,2,6,0,0),
d(2009,3,8,7,0,0),
d(2009,11,1,6,0,0),
d(2010,3,14,7,0,0),
d(2010,11,7,6,0,0),
d(2011,3,13,7,0,0),
d(2011,11,6,6,0,0),
d(2012,3,11,7,0,0),
d(2012,11,4,6,0,0),
d(2013,3,10,7,0,0),
d(2013,11,3,6,0,0),
d(2014,3,9,7,0,0),
d(2014,11,2,6,0,0),
d(2015,3,8,7,0,0),
d(2015,11,1,6,0,0),
d(2016,3,13,7,0,0),
d(2016,11,6,6,0,0),
d(2017,3,12,7,0,0),
d(2017,11,5,6,0,0),
d(2018,3,11,7,0,0),
d(2018,11,4,6,0,0),
d(2019,3,10,7,0,0),
d(2019,11,3,6,0,0),
d(2020,3,8,7,0,0),
d(2020,11,1,6,0,0),
d(2021,3,14,7,0,0),
d(2021,11,7,6,0,0),
d(2022,3,13,7,0,0),
d(2022,11,6,6,0,0),
d(2023,3,12,7,0,0),
d(2023,11,5,6,0,0),
d(2024,3,10,7,0,0),
d(2024,11,3,6,0,0),
d(2025,3,9,7,0,0),
d(2025,11,2,6,0,0),
d(2026,3,8,7,0,0),
d(2026,11,1,6,0,0),
d(2027,3,14,7,0,0),
d(2027,11,7,6,0,0),
d(2028,3,12,7,0,0),
d(2028,11,5,6,0,0),
d(2029,3,11,7,0,0),
d(2029,11,4,6,0,0),
d(2030,3,10,7,0,0),
d(2030,11,3,6,0,0),
d(2031,3,9,7,0,0),
d(2031,11,2,6,0,0),
d(2032,3,14,7,0,0),
d(2032,11,7,6,0,0),
d(2033,3,13,7,0,0),
d(2033,11,6,6,0,0),
d(2034,3,12,7,0,0),
d(2034,11,5,6,0,0),
d(2035,3,11,7,0,0),
d(2035,11,4,6,0,0),
d(2036,3,9,7,0,0),
d(2036,11,2,6,0,0),
d(2037,3,8,7,0,0),
d(2037,11,1,6,0,0),
]
_transition_info = [
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CWT'),
i(-18000,3600,'CPT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,0,'EST'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
]
Indiana_minus_Starke = Indiana_minus_Starke()
|
[
"chad@zetaweb.com"
] |
chad@zetaweb.com
|
5916126d6b2b7816ef167915954b5ddf8cb45d8f
|
54b238d50baee4f483c0690d77d106ebc30a4c0a
|
/tests/test_space_time/test_type_helpers.py
|
cb9dab6c31fed82502d58d190cd6d40c0ba43739
|
[
"MIT"
] |
permissive
|
David-Durst/aetherling
|
4a5d663a98428769834e8ebbf7e9b63cb7788319
|
91bcf0579608ccbf7d42a7bddf90ccd4257d6571
|
refs/heads/master
| 2021-08-16T01:48:20.476097
| 2020-06-19T19:25:46
| 2020-06-19T19:25:46
| 114,405,958
| 10
| 1
|
MIT
| 2021-03-29T17:44:39
| 2017-12-15T19:46:51
|
Python
|
UTF-8
|
Python
| false
| false
| 2,731
|
py
|
from aetherling.space_time.space_time_types import *
from aetherling.space_time.type_helpers import *
def test_same_type():
x = ST_TSeq(3, 0, ST_Int())
y = ST_TSeq(3, 0, ST_Int())
shared_diff = get_shared_and_diff_subtypes(x,y)
assert shared_diff.diff_input == ST_Tombstone()
assert shared_diff.diff_output == ST_Tombstone()
assert shared_diff.shared_inner == x
assert shared_diff.shared_outer == ST_Tombstone()
def test_same_type_nested():
x = ST_TSeq(3, 0, ST_SSeq(4, ST_Int))
y = ST_TSeq(3, 0, ST_SSeq(4, ST_Int))
shared_diff = get_shared_and_diff_subtypes(x,y)
assert shared_diff.diff_input == ST_Tombstone()
assert shared_diff.diff_output == ST_Tombstone()
assert shared_diff.shared_inner == x
assert shared_diff.shared_outer == ST_Tombstone()
def test_diff_no_outer_same():
x = ST_SSeq(6, ST_TSeq(3, 0, ST_SSeq(4, ST_Int)))
y = ST_TSeq(3, 0, ST_SSeq(6, ST_SSeq(4, ST_Int)))
shared_diff = get_shared_and_diff_subtypes(x,y)
assert shared_diff.diff_input == ST_SSeq(6, ST_TSeq(3, 0, ST_Tombstone()))
assert shared_diff.diff_output == ST_TSeq(3, 0, ST_SSeq(6, ST_Tombstone()))
assert shared_diff.shared_inner == x.t.t
assert shared_diff.shared_outer == ST_Tombstone()
def test_diff_with_outer_same():
x = ST_TSeq(9, 2, ST_SSeq(6, ST_TSeq(3, 0, ST_SSeq(4, ST_Int))))
y = ST_TSeq(9, 2, ST_TSeq(3, 0, ST_SSeq(6, ST_SSeq(4, ST_Int))))
shared_diff = get_shared_and_diff_subtypes(x,y)
assert shared_diff.diff_input == ST_SSeq(6, ST_TSeq(3, 0, ST_Tombstone()))
assert shared_diff.diff_output == ST_TSeq(3, 0, ST_SSeq(6, ST_Tombstone()))
assert shared_diff.shared_inner == x.t.t.t
assert shared_diff.shared_outer == ST_TSeq(9, 2, ST_Tombstone())
def test_diff_with_partially_diff_inner():
x = ST_TSeq(9, 2, ST_SSeq(6, ST_SSeq(7, ST_TSeq(3, 0, ST_SSeq(4, ST_Int)))))
y = ST_TSeq(9, 2, ST_TSeq(3, 0, ST_SSeq(7, ST_SSeq(6, ST_SSeq(4, ST_Int)))))
shared_diff = get_shared_and_diff_subtypes(x,y)
assert shared_diff.diff_input == ST_SSeq(6, ST_SSeq(7, ST_TSeq(3, 0, ST_Tombstone())))
assert shared_diff.diff_output == ST_TSeq(3, 0, ST_SSeq(7, ST_SSeq(6, ST_Tombstone())))
assert shared_diff.shared_inner == x.t.t.t.t
assert shared_diff.shared_outer == ST_TSeq(9, 2, ST_Tombstone())
def test_diff_depths():
x = ST_TSeq(4, 12, ST_Int())
y = ST_TSeq(2, 2, ST_TSeq(2, 2, ST_Int()))
shared_diff = get_shared_and_diff_subtypes(x,y)
assert shared_diff.diff_input == ST_TSeq(4, 12, ST_Tombstone())
assert shared_diff.diff_output == ST_TSeq(2, 2, ST_TSeq(2, 2, ST_Tombstone()))
assert shared_diff.shared_inner == ST_Int()
assert shared_diff.shared_outer == ST_Tombstone()
|
[
"davidbdurst@gmail.com"
] |
davidbdurst@gmail.com
|
a95c8e9ee9b4167c2ef845c2453d3b7c424026ec
|
4df63456e42591b5858c29986089b84ecac01fea
|
/tracker-visual/read_cluster.py
|
3739ddc9ef84e61e5178f922a6728920b78a1a44
|
[
"MIT"
] |
permissive
|
will-fawcett/trackerSW
|
6f71a8ab9e2013e439e1e24326c1cc59f7be1e7f
|
fc097b97539d0b40a15e1d6e112f4048cb4122b4
|
refs/heads/master
| 2021-04-25T21:39:17.321302
| 2018-06-14T13:31:13
| 2018-06-14T13:31:13
| 109,404,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,055
|
py
|
"""
Quick script to dump FCCSW tracker validation data to plaintext spacepoints.
Requires podio, fcc-edm in pythonpath and ld-library-path.
"""
from EventStore import EventStore
import numpy as np
import sys
filename = sys.argv[1]
basefilename = filename.replace(".root", "")
events = EventStore([filename])
print 'number of events: ', len(events)
pos_b = []
ids_b = []
pos_e = []
ids_e = []
barrel_ids = []
nEventsMax = 100000
#nEventsMax = 1
for i, store in enumerate(events):
# Only run over 100000 events
if i > nEventsMax:
break
# <class 'ROOT.fcc.PositionedTrackHitCollection'>
clusters = store.get('positionedHits')
#print clusters
layerIdmax = 0
for c in clusters:
#c is of type <class 'ROOT.fcc.PositionedTrackHit'>
# <class 'ROOT.fcc.Point'>
cor = c.position()
layerId = (c.cellId() / 32) %32
if layerId > 20:
print c.cellId(), c.cellId() % 32, c.cellId() / 32, (c.cellId() / 32) %32
if layerId > layerIdmax:
layerIdmax = layerId
if (c.cellId() % 32) == 0:
# Select only the triplet layers
#if (c.cellId() / 32) %32 == 20 or (c.cellId() / 32) %32 == 21 (c.cellId() / 32) %32 == 22:
if (c.cellId() / 32) %32 == 1: # or (c.cellId() / 32) %32 == 21 (c.cellId() / 32) %32 == 22:
pass
pos_b.append([cor.x, cor.y, cor.z])
ids_b.append([c.bits(), c.cellId()])
#print c.cellId() % 32, np.sqrt(cor.x**2 + cor.y**2)
#else:
# pos_e.append([cor.x, cor.y, cor.z])
# ids_e.append([c.bits(), c.cellId()])
pos_e = np.array(pos_e)
ids_e = np.array(ids_e)
pos_b = np.array(pos_b)
ids_b = np.array(ids_b)
print "number of endcap hits: ", len(pos_e)
print "number of barrel hits: ", len(pos_b)
np.savetxt(basefilename + 'hit_positions_e.dat', pos_e)
np.savetxt(basefilename + 'hit_ids_e.dat', ids_e, fmt="%i")
np.savetxt(basefilename + 'hit_positions_b.dat', pos_b)
np.savetxt(basefilename + 'hit_ids_b.dat', ids_b, fmt="%i")
|
[
"william.fawcett@cern.ch"
] |
william.fawcett@cern.ch
|
d4efd4910a49742035a254ce235200d20ebfb4ca
|
e77027cb5ffa4754a5ac1cf8d1cd1e2035710cfe
|
/VectorComplexLibrary.py
|
75f3378e79e5fcb97984b6c1edbeea7dd22f7ffb
|
[] |
no_license
|
wilmer-rodriguez-r/LibraryComplexVectors
|
36e5fc8bb19219cee3db327ace7c2406c61aead3
|
9fa83a829aaeb1e869376e4000389cf9b2ca941f
|
refs/heads/master
| 2023-03-02T17:04:03.423077
| 2021-02-09T15:07:34
| 2021-02-09T15:07:34
| 336,027,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,704
|
py
|
# Librería de funciones para vectores y matrices complejos
import numpy as np
def auxiliar(matriz_vector):
# función que corrobora si es una matriz o un arreglo devolviendo la cantidad de columnas de este
try:
column = len(matriz_vector[0])
return column
except TypeError:
column = 1
return column
def auxiliar_1(matriz_vector):
# función que corrobora si es una matriz o un arreglo devolviendo un valor booleano
try:
len(matriz_vector[0])
return True
except TypeError:
return False
def auxiliar_2(num_complex):
# función que conjuga los números complejos
real = num_complex.real
imaginaria = -1 * num_complex.imag
if imaginaria == 0:
imaginaria = 0
if imaginaria < 0:
num = str(real) + str(imaginaria) + 'j'
else:
num = str(real) + '+' + str(imaginaria) + 'j'
return complex(num)
def vectorSumaComplex(vector_1, vector_2):
for i in range(len(vector_1)):
vector_1[i] = vector_1[i] + vector_2[i]
return vector_1[:]
def vectorInverComplex(vector):
for i in range(len(vector)):
vector[i] = -1 * vector[i]
return vector[:]
def vectorMultEsComplex(vector, escalar):
for i in range(len(vector)):
vector[i] = escalar * vector[i]
return vector[:]
def matrizSumaComplex(vector_1, vector_2):
rows = len(vector_1)
colums = len(vector_1[0])
for i in range(rows):
for j in range(colums):
vector_1[i][j] = vector_1[i][j] + vector_2[i][j]
return vector_1[:]
def matrizInverComplex(matriz):
matriz = [[-1 * matriz[j][k] for k in range(len(matriz[0]))] for j in range(len(matriz))]
return matriz[:]
def matrizMultEsComplex(matriz, escalar):
matriz = [[escalar * matriz[j][k] for k in range(len(matriz[0]))] for j in range(len(matriz))]
return matriz[:]
def trasMatrizVector (matriz_vector):
rows = len(matriz_vector)
column = auxiliar(matriz_vector)
if auxiliar_1(matriz_vector):
aux = [[matriz_vector[k][j] for k in range(rows)] for j in range(column)]
aux = (aux[0] if len(aux) == 1 else aux)
else:
aux = [[matriz_vector[j] for k in range(column)] for j in range(rows)]
return aux
def conjMatrizVector(matriz_vector):
rows = len(matriz_vector)
column = auxiliar(matriz_vector)
if auxiliar_1(matriz_vector):
for j in range(rows):
for k in range(column):
matriz_vector[j][k] = auxiliar_2(matriz_vector[j][k])
else:
for j in range(rows):
matriz_vector[j] = auxiliar_2(matriz_vector[j])
return matriz_vector[:]
def adjuntMatrizVector (matriz_vector):
return trasMatrizVector(conjMatrizVector(matriz_vector))
def multMatrices (matriz_a, matriz_b):
rows_a, rows_b, column_a, column_b = len(matriz_a), len(matriz_b), auxiliar(matriz_a), auxiliar(matriz_b)
if rows_b == column_a:
aux = [[0 for i in range(column_b)] for j in range(rows_a)]
for h in range(column_b):
for j in range(rows_a):
for k in range(column_a):
aux[j][h] += matriz_a[j][k] * matriz_b[k][h]
return aux
else:
return 'Las matrices no se pueden operar'
def accion(matriz, vector):
rows, columns, size = len(matriz), len(matriz[0]), len(vector)
aux = [0 for i in range(rows)]
for j in range(rows):
for k in range(columns):
aux[j] += matriz[j][k] * vector[k]
return aux
def dotProduct (vector_a, vector_b):
size = len(vector_a)
try:
for i in range(size):
int(vector_a[i])
suma = 0
for j in range(size):
suma += vector_a[j] * vector_b[j]
return suma
except TypeError:
vector_a = conjMatrizVector(vector_a[:])
suma = 0
for j in range(size):
suma += complex(vector_a[j]) * complex(vector_b[j])
return suma
def normVector(vector):
try:
for i in range(len(vector)):
int(vector[i])
return (dotProduct(vector[:], vector[:]))**(1/2)
except TypeError:
return (dotProduct(vector[:], vector[:]))**(1/2)
def disVectors(vector_a, vector_b):
vector = vectorSumaComplex(vectorInverComplex(vector_a), vector_b)
return normVector(vector)
def matrizHermitian(matriz):
matriz_a = conjMatrizVector(trasMatrizVector(matriz[:]))
if matriz_a == matriz:
return True
else:
return False
def matrizUnitary(matriz):
size = len(matriz)
identidad = [[(1 if j == k else 0) for k in range(size)]for j in range(size)]
matriz = multMatrices(matriz, conjMatrizVector(trasMatrizVector(matriz[:])))
if matriz == identidad:
return True
else:
return False
def tensorProduct(matriz_vector_0,matriz_vector_1):
rows_0, columns_0, valor = len(matriz_vector_0), auxiliar(matriz_vector_0), auxiliar_1(matriz_vector_1)
if columns_0 == 1 and valor:
for j in range(rows_0):
matriz_vector_0[j] = matrizMultEsComplex(matriz_vector_1[:], matriz_vector_0[j])
elif columns_0 == 1:
for j in range(rows_0):
matriz_vector_0[j] = vectorMultEsComplex(matriz_vector_1[:], matriz_vector_0[j])
elif columns_0 != 1 and valor:
for j in range(rows_0):
for k in range(columns_0):
matriz_vector_0[j][k] = matrizMultEsComplex(matriz_vector_1[:], matriz_vector_0[j][k])
else:
for j in range(rows_0):
for k in range(columns_0):
matriz_vector_0[j][k] = vectorMultEsComplex(matriz_vector_1[:], matriz_vector_0[j][k])
return matriz_vector_0[:]
|
[
"wilmer.rodriguez-r@mail.escuelaing.edu.co"
] |
wilmer.rodriguez-r@mail.escuelaing.edu.co
|
79f4e2ff02c9db62970001cd7f0a7386496d11e2
|
e8ea8326756378702052f5a785fab02e92abb21f
|
/Bluebook/Data/preprocess.py
|
9f07a132942a8c356a14b0de37f030f0fc828ee7
|
[] |
no_license
|
zyx061212/Kaggle
|
a6111464b3acf9e276a98844f65cd27852619f44
|
6051051882d41ea1bcb6930a9d1a9d0525fc869a
|
refs/heads/master
| 2020-03-26T15:48:59.545030
| 2015-02-23T22:23:35
| 2015-02-23T22:23:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,365
|
py
|
#!/usr/bin/env python
import util
from collections import defaultdict
import numpy as np
import pandas as pd
import csv_io
import math
def get_date_dataframe(date_column):
return pd.DataFrame({
"SaleYear": [d.year for d in date_column],
"SaleMonth": [d.month for d in date_column],
"SaleDay": [d.day for d in date_column]
}, index=date_column.index)
def preprocess():
train, test = util.get_train_test_df()
columns = set(train.columns)
#columns.remove("SalesID")
#columns.remove("SalePrice")
#columns.remove("saledate")
#train_fea = get_date_dataframe(train["saledate"])
#test_fea = get_date_dataframe(test["saledate"])
#parseColumns = ["UsageBand"]
parseColumns = [ "UsageBand","fiBaseModel","fiModelSeries","fiModelDescriptor","ProductSize","ProductGroup","Drive_System","Enclosure","Forks","Pad_Type","Ride_Control","Stick","Transmission","Turbocharged","Blade_Extension","Blade_Width","Enclosure_Type","Engine_Horsepower","Hydraulics","Pushblock","Ripper","Scarifier","Tip_ControlCoupler","Coupler_System","Grouser_Tracks","Hydraulics_Flow","Track_Type","Thumb","Pattern_Changer","Grouser_Type","Backhoe_Mounting","Blade_Type","Travel_Controls","Differential_Type","Steering_Controls"]
#"auctioneerID","state","ProductGroupDesc",,"fiSecondaryDesc"
# this is redundant "fiModelDesc", and has too many options...
# Q, AC, AL AR AS
colDict = {}
for col in parseColumns:
colDict[col] = []
colMap = {}
notInTest = []
for index, col in enumerate(train.columns):
print "MAP:", col, index
colMap[col] = index
if col in parseColumns:
#print "start"
s = set(x for x in train[col].fillna(0)) # 0 if x == "" or not isinstance(x, float) else x
s.update(x for x in test[col].fillna(0)) # math.isnan(x)
colDict[col] = s
print s
if col == "fiBaseModel":
a = set(x for x in train[col].fillna(0))
b = set(x for x in test[col].fillna(0))
print "fiBaseModel"
print
print
# found 11 type in test not in train
print [x for x in b if x not in a]
print
print
# found several hundred in train that are not in test, try dropping these...
print [x for x in a if x not in b]
notInTest = [x for x in a if x not in b]
SaleIDArr = []
trainSalePriceArr = []
count = 0
csv_io.delete_file("train1.csv")
for row in train.iterrows():
trainSalePrice = []
rowVals = row[1].fillna(0)
newSet = []
newRow = []
if rowVals["fiBaseModel"] not in notInTest:
continue
trainSalePrice.append(rowVals["SalePrice"])
trainSalePriceArr.append(trainSalePrice)
SaleID = []
SaleID.append(rowVals["SalesID"])
SaleIDArr.append(SaleID)
for col in colDict.keys():
for val in colDict[col]:
if val == rowVals[col] :
newRow.append(1)
else:
newRow.append(0)
#newRow.append(rowVals["YearMade"]) # need to calculate age, sale date minus year
newRow.append(rowVals["MachineHoursCurrentMeter"])
count += 1
if count % 10000 == 0:
print "Count", count
newSet.append(newRow)
csv_io.write_delimited_file("train1.csv", newSet ,header=None, delimiter=",", filemode="a")
csv_io.write_delimited_file("target.csv", trainSalePriceArr ,header=None, delimiter=",")
csv_io.write_delimited_file("train_salesID.csv", SaleIDArr ,header=None, delimiter=",")
# -------------------------------------------
SaleIDArr = []
count = 0
csv_io.delete_file("test1.csv")
for row in test.iterrows():
rowVals = row[1].fillna(0)
newSet = []
newRow = []
SaleID = []
SaleID.append(rowVals["SalesID"])
SaleIDArr.append(SaleID)
for col in colDict.keys():
for val in colDict[col]:
if val == rowVals[col] :
newRow.append(1)
else:
newRow.append(0)
#newRow.append(rowVals["YearMade"]) # need to calculate age, sale date minus year
newRow.append(rowVals["MachineHoursCurrentMeter"])
count += 1
if count % 10000 == 0:
print "Count", count
newSet.append(newRow)
csv_io.write_delimited_file("test1.csv", newSet ,header=None, delimiter=",", filemode="a")
csv_io.write_delimited_file("test_salesID.csv", SaleIDArr ,header=None, delimiter=",")
if __name__=="__main__":
preprocess()
|
[
"mb16@hood.edu"
] |
mb16@hood.edu
|
ee27313bde085575df70e1d42550c376748fe931
|
08a9dc04e6defa9dc9378bfbfbe0b6185af6a86a
|
/manager/views.py
|
78b92fee93ead9c43d6d958d58f90642c7277c7f
|
[] |
no_license
|
Felicity-jt/50.008-Project-1
|
8ecc63d2302b2eaa4060f4c900d7fed2e958927c
|
960b5e57a39bfda1c31653798c23ddc051a2ff19
|
refs/heads/master
| 2021-08-24T00:40:27.886634
| 2017-12-07T09:14:12
| 2017-12-07T09:14:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,191
|
py
|
from json import loads
from django.http import Http404
from django.shortcuts import render
from django.core.exceptions import PermissionDenied
from django.views.decorators.http import require_POST
from common.db import sql, page
from common.utils import pagination
from common.messages import NOT_STAFF
from common.decorators import json_response
@require_POST
@json_response
def new(request):
"""Add item or entity into inventory."""
if not request.user.is_staff:
raise PermissionDenied(NOT_STAFF)
s = """INSERT INTO item (id, name)
VALUES (DEFAULT, %s)"""
try:
rq = loads(request.body)
# sanitize before inserting
values = (rq['name'],)
except (ValueError, KeyError):
return None
sql(s, *values)
return {}
@json_response
def stock(request, item_id):
"""Get or update current stock."""
if not request.user.is_staff:
raise PermissionDenied(NOT_STAFF)
q = 'SELECT id, price, quantity FROM item WHERE id = %s'
if request.method == 'POST':
# update price and/or quantity from post data
s = """UPDATE item SET
quantity = %s
WHERE id = %s"""
try:
rq = loads(request.body)
# sanitize before inserting
values = (int(rq['quantity']),)
except (ValueError, KeyError):
return None
sql(s, *values, item_id)
try:
r = sql(q, item_id)[0]
except IndexError:
raise Http404
return {
'id': r[0],
'price': r[1],
'quantity': r[2],
}
@json_response
def stats(request, entity, year, month):
"""Get stats for entity."""
if not request.user.is_staff:
raise PermissionDenied(NOT_STAFF)
if entity not in ('item', 'company', 'creator'):
raise Http404
q = """SELECT item_id, SUM(quantity) AS total FROM purchase_item
INNER JOIN purchase p ON p.id = purchase_item.purchase_id
WHERE YEAR(p.made_on) = %s AND MONTH(p.made_on) = %s
GROUP BY item_id"""
pg = pagination(request)
pg['sort'].append('-total')
return sql(q + page(**pg), year, month)
|
[
"kwokshungit@gmail.com"
] |
kwokshungit@gmail.com
|
2db64ec71071efedc4af263b7ea7732384d88f4b
|
25795fef5bc22080645b8e549da924cb7025526f
|
/app/forms.py
|
4d2f3b6ae9d04ec86f7da110268f4c6cf9b3152d
|
[] |
no_license
|
ryanermita/best-route
|
a0a68c4c9572ce73161109f198e301aaa307aab1
|
3480fd356e20d27cdd85397fea5960f4e69b4c44
|
refs/heads/master
| 2021-01-18T14:59:01.940788
| 2013-12-21T03:43:59
| 2013-12-21T03:43:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 760
|
py
|
from flask_wtf import Form
from wtforms import TextField, TextAreaField
from wtforms.validators import DataRequired
class SearchForm(Form):
departure_place = TextField('departure_place', validators = [DataRequired()])
destination_place = TextField('destination_place', validators = [DataRequired()])
class SignUpForm(Form):
username = TextField('username', validators = [DataRequired()])
email = TextField('email', validators = [DataRequired()])
pwd = TextField('pwd', validators = [DataRequired()])
class LoginForm(Form):
username = TextField('username', validators = [DataRequired()])
pwd = TextField('pwd', validators = [DataRequired()])
class AddRouteForm(Form):
suggested_route = TextAreaField('suggested_route', validators = [DataRequired()])
|
[
"ryanermita@gmail.com"
] |
ryanermita@gmail.com
|
1247de660c728a5f32d9fabdfa9b10b2947e596d
|
3a9cee71d23cfa7176e29beb9a9e509674c0bfd9
|
/6.2.0_201312041840_apitraderapi_linux64/test3/test4/scanConf.py
|
ecb31285adb61571a54465ea5e8bb06730d53a71
|
[] |
no_license
|
fuckfuckfuckfuck/download
|
4041fba8de20a267aa001e363a005098bb93fb0e
|
e67c22dab648f9bc1ebca444785401f63f0cc2dc
|
refs/heads/master
| 2021-01-10T11:11:10.708805
| 2015-12-05T13:18:52
| 2015-12-05T13:18:52
| 47,456,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,178
|
py
|
import sys
import re
#~ config = {
#~ 'user' : 'botel',
#~ 'password': '12345678',
#~ 'database' : 'mktinfo',
#~ 'host' : 'rdsryfjb2vqrni2.mysql.rds.aliyuncs.com',
#~ 'charset' : 'utf8',
#~ 'raise_on_warnings' : True
#~ }
Handler = {
'user' : str,
'password' : str,
'database' : str,
'host' : str,
'charset' : str,
'port' : str,
'raise_on_warnings' : bool
}
dir = '/home/dell/Downloads/6.2.0_201312041840_apitraderapi_linux64/test3/test4/'
def scanParam(fileStr):
#~ reader = open(sys.argv[1], 'r')
# fileStr = dir + fileStr
reader = open(fileStr,'r')
param = {}
for line in reader:
tmp = re.search('[%\[\]]',line) #
if tmp:
print tmp.group()
continue
line = line.split('#')[0].strip()
if not line:
continue
name, value = line.split()
if name not in Handler:
print >> sys.stderr, 'Bad parameter name "%s"' % name
sys.exit(1)
if name in param:
print >> sys.stderr, 'Duplicate parameter name "%s"' % name
sys.exit(1)
conversion_func = Handler[name]
param[name] = conversion_func(value)
return param
# file = 'conf'
# scanedParams = scanParam(dir + file)
|
[
"wchongyang@foxmail.com"
] |
wchongyang@foxmail.com
|
dc5c45a0d9c44cc2b7f3bb35d23198d69dbd715b
|
e73430ff79c2d9325037bd07d0dbdf9cc7c93d84
|
/main.py
|
96f07d51325003fb5c336ddcad218beaa913f938
|
[] |
no_license
|
GianBkk/python_les3_syntra
|
8232b6773f6d82ff819c1a6a9823d21cd471a5b0
|
1abb1c57c862fbc29d1a1b7245ede151e24b15f2
|
refs/heads/master
| 2023-07-10T23:32:59.059183
| 2021-08-20T22:38:45
| 2021-08-20T22:38:45
| 398,345,240
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35
|
py
|
# Hello Word
print('Hello World7')
|
[
"gian200308@gmail.com"
] |
gian200308@gmail.com
|
bfa4051b7daa99e35be4c69d94d185b37ba84f1b
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_035/ch37_2020_03_25_14_04_04_120072.py
|
a165e2f3c23563f7b30d6684819d8aca366bc2cd
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
Senha = True
resposta = input("Qual é a senha")
while Senha:
if resposta=="desisto":
Senha = False
else:
Senha = True
return resposta
print("Você acertou a senha!")
|
[
"you@example.com"
] |
you@example.com
|
dc2cf902fa9faa242b7a3024eb996183b816db91
|
e48bc8299aa342a74edf09945fac10f812130604
|
/www/transwarp/web.py
|
4e20b8e6d16fb71c05e55ca8baeab9c140d4ab96
|
[] |
no_license
|
zhongsihang/blog-python-app
|
af4be1221baccf501e91e3dc7e39e3a0abdb2b21
|
bc5eb1bef4298ff4ceff7d3aafc4d235651a27ab
|
refs/heads/master
| 2021-01-21T01:34:58.196036
| 2015-09-18T06:22:36
| 2015-09-18T06:22:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 48,867
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
这是一个简单的, 轻量级的, WSGI兼容(Web Server Gateway Interface)的web 框架
WSGI概要:
工作方式: WSGI server -----> WSGI 处理函数
作用:将HTTP原始的请求、解析、响应 这些交给WSGI server 完成,
让我们专心用Python编写Web业务,也就是 WSGI 处理函数
所以WSGI 是HTTP的一种高级封装。
例子:
def application(environ, start_response):
method = environ['REQUEST_METHOD']
path = environ['PATH_INFO']
if method=='GET' and path=='/':
return handle_home(environ, start_response)
if method=='POST' and path='/signin':
return handle_signin(environ, start_response)
设计web框架的原因:
1. WSGI提供的接口虽然比HTTP接口高级了不少,但和Web App的处理逻辑比,还是比较低级,
我们需要在WSGI接口之上能进一步抽象,让我们专注于用一个函数处理一个URL,
至于URL到函数的映射,就交给Web框架来做。
设计web框架接口:
1. URL路由: 用于URL 到 处理函数的映射
2. URL拦截: 用于根据URL做权限检测
3. 视图: 用于HTML页面生成
4. 数据模型: 用于抽取数据(见models模块)
5. 事物数据:request数据和response数据的封装(thread local)
"""
import types, os, re, cgi, sys, time, datetime, functools, mimetypes, threading, logging, traceback, urllib
from db import Dict
import utils
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
#################################################################
# 实现事物数据接口, 实现request 数据和response数据的存储,
# 是一个全局ThreadLocal对象
#################################################################
ctx = threading.local()
_RE_RESPONSE_STATUS = re.compile(r'^\d\d\d(\ [\w\ ]+)?$')
_HEADER_X_POWERED_BY = ('X-Powered-By', 'transwarp/1.0')
# 用于时区转换
_TIMEDELTA_ZERO = datetime.timedelta(0)
_RE_TZ = re.compile('^([\+\-])([0-9]{1,2})\:([0-9]{1,2})$')
# response status
_RESPONSE_STATUSES = {
# Informational
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing',
# Successful
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi Status',
226: 'IM Used',
# Redirection
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
# Client Error
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
418: "I'm a teapot",
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
# Server Error
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
507: 'Insufficient Storage',
510: 'Not Extended',
}
_RESPONSE_HEADERS = (
'Accept-Ranges',
'Age',
'Allow',
'Cache-Control',
'Connection',
'Content-Encoding',
'Content-Language',
'Content-Length',
'Content-Location',
'Content-MD5',
'Content-Disposition',
'Content-Range',
'Content-Type',
'Date',
'ETag',
'Expires',
'Last-Modified',
'Link',
'Location',
'P3P',
'Pragma',
'Proxy-Authenticate',
'Refresh',
'Retry-After',
'Server',
'Set-Cookie',
'Strict-Transport-Security',
'Trailer',
'Transfer-Encoding',
'Vary',
'Via',
'Warning',
'WWW-Authenticate',
'X-Frame-Options',
'X-XSS-Protection',
'X-Content-Type-Options',
'X-Forwarded-Proto',
'X-Powered-By',
'X-UA-Compatible',
)
class UTC(datetime.tzinfo):
"""
tzinfo 是一个基类,用于给datetime对象分配一个时区
使用方式是 把这个子类对象传递给datetime.tzinfo属性
传递方法有2种:
1. 初始化的时候传入
datetime(2009,2,17,19,10,2,tzinfo=tz0)
2. 使用datetime对象的 replace方法传入,从新生成一个datetime对象
datetime.replace(tzinfo= tz0)
>>> tz0 = UTC('+00:00')
>>> tz0.tzname(None)
'UTC+00:00'
>>> tz8 = UTC('+8:00')
>>> tz8.tzname(None)
'UTC+8:00'
>>> tz7 = UTC('+7:30')
>>> tz7.tzname(None)
'UTC+7:30'
>>> tz5 = UTC('-05:30')
>>> tz5.tzname(None)
'UTC-05:30'
>>> from datetime import datetime
>>> u = datetime.utcnow().replace(tzinfo=tz0)
>>> l1 = u.astimezone(tz8)
>>> l2 = u.replace(tzinfo=tz8)
>>> d1 = u - l1
>>> d2 = u - l2
>>> d1.seconds
0
>>> d2.seconds
28800
"""
def __init__(self, utc):
utc = str(utc.strip().upper())
mt = _RE_TZ.match(utc)
if mt:
minus = mt.group(1) == '-'
h = int(mt.group(2))
m = int(mt.group(3))
if minus:
h, m = (-h), (-m)
self._utcoffset = datetime.timedelta(hours=h, minutes=m)
self._tzname = 'UTC%s' % utc
else:
raise ValueError('bad utc time zone')
def utcoffset(self, dt):
"""
表示与标准时区的 偏移量
"""
return self._utcoffset
def dst(self, dt):
"""
Daylight Saving Time 夏令时
"""
return _TIMEDELTA_ZERO
def tzname(self, dt):
"""
所在时区的名字
"""
return self._tzname
def __str__(self):
return 'UTC timezone info object (%s)' % self._tzname
__repr__ = __str__
UTC_0 = UTC('+00:00')
# 用于异常处理
class _HttpError(Exception):
"""
HttpError that defines http error code.
>>> e = _HttpError(404)
>>> e.status
'404 Not Found'
"""
def __init__(self, code):
"""
Init an HttpError with response code.
"""
super(_HttpError, self).__init__()
self.status = '%d %s' % (code, _RESPONSE_STATUSES[code])
self._headers = None
def header(self, name, value):
"""
添加header, 如果header为空则 添加powered by header
"""
if not self._headers:
self._headers = [_HEADER_X_POWERED_BY]
self._headers.append((name, value))
@property
def headers(self):
"""
使用setter方法实现的 header属性
"""
if hasattr(self, '_headers'):
return self._headers
return []
def __str__(self):
return self.status
__repr__ = __str__
class _RedirectError(_HttpError):
"""
RedirectError that defines http redirect code.
>>> e = _RedirectError(302, 'http://www.apple.com/')
>>> e.status
'302 Found'
>>> e.location
'http://www.apple.com/'
"""
def __init__(self, code, location):
"""
Init an HttpError with response code.
"""
super(_RedirectError, self).__init__(code)
self.location = location
def __str__(self):
return '%s, %s' % (self.status, self.location)
__repr__ = __str__
class HttpError(object):
"""
HTTP Exceptions
"""
@staticmethod
def badrequest():
"""
Send a bad request response.
>>> raise HttpError.badrequest()
Traceback (most recent call last):
...
_HttpError: 400 Bad Request
"""
return _HttpError(400)
@staticmethod
def unauthorized():
"""
Send an unauthorized response.
>>> raise HttpError.unauthorized()
Traceback (most recent call last):
...
_HttpError: 401 Unauthorized
"""
return _HttpError(401)
@staticmethod
def forbidden():
"""
Send a forbidden response.
>>> raise HttpError.forbidden()
Traceback (most recent call last):
...
_HttpError: 403 Forbidden
"""
return _HttpError(403)
@staticmethod
def notfound():
"""
Send a not found response.
>>> raise HttpError.notfound()
Traceback (most recent call last):
...
_HttpError: 404 Not Found
"""
return _HttpError(404)
@staticmethod
def conflict():
"""
Send a conflict response.
>>> raise HttpError.conflict()
Traceback (most recent call last):
...
_HttpError: 409 Conflict
"""
return _HttpError(409)
@staticmethod
def internalerror():
"""
Send an internal error response.
>>> raise HttpError.internalerror()
Traceback (most recent call last):
...
_HttpError: 500 Internal Server Error
"""
return _HttpError(500)
@staticmethod
def redirect(location):
"""
Do permanent redirect.
>>> raise HttpError.redirect('http://www.itranswarp.com/')
Traceback (most recent call last):
...
_RedirectError: 301 Moved Permanently, http://www.itranswarp.com/
"""
return _RedirectError(301, location)
@staticmethod
def found(location):
"""
Do temporary redirect.
>>> raise HttpError.found('http://www.itranswarp.com/')
Traceback (most recent call last):
...
_RedirectError: 302 Found, http://www.itranswarp.com/
"""
return _RedirectError(302, location)
@staticmethod
def seeother(location):
"""
Do temporary redirect.
>>> raise HttpError.seeother('http://www.itranswarp.com/')
Traceback (most recent call last):
...
_RedirectError: 303 See Other, http://www.itranswarp.com/
>>> e = HttpError.seeother('http://www.itranswarp.com/seeother?r=123')
>>> e.location
'http://www.itranswarp.com/seeother?r=123'
"""
return _RedirectError(303, location)
_RESPONSE_HEADER_DICT = dict(zip(map(lambda x: x.upper(), _RESPONSE_HEADERS), _RESPONSE_HEADERS))
class Request(object):
"""
请求对象, 用于获取所有http请求信息。
"""
def __init__(self, environ):
"""
environ wsgi处理函数里面的那个 environ
wsgi server调用 wsgi 处理函数时传入的
包含了用户请求的所有数据
"""
self._environ = environ
def _parse_input(self):
"""
将通过wsgi 传入过来的参数,解析成一个字典对象 返回
比如: Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('a=1&b=M%20M&c=ABC&c=XYZ&e=')})
这里解析的就是 wsgi.input 对象里面的字节流
"""
def _convert(item):
if isinstance(item, list):
return [utils.to_unicode(i.value) for i in item]
if item.filename:
return MultipartFile(item)
return utils.to_unicode(item.value)
fs = cgi.FieldStorage(fp=self._environ['wsgi.input'], environ=self._environ, keep_blank_values=True)
inputs = dict()
for key in fs:
inputs[key] = _convert(fs[key])
return inputs
def _get_raw_input(self):
"""
将从wsgi解析出来的 数据字典,添加为Request对象的属性
然后 返回该字典
"""
if not hasattr(self, '_raw_input'):
self._raw_input = self._parse_input()
return self._raw_input
def __getitem__(self, key):
"""
实现通过键值访问Request对象里面的数据,如果该键有多个值,则返回第一个值
如果键不存在,这会 raise KyeError
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('a=1&b=M%20M&c=ABC&c=XYZ&e=')})
>>> r['a']
u'1'
>>> r['c']
u'ABC'
>>> r['empty']
Traceback (most recent call last):
...
KeyError: 'empty'
>>> b = '----WebKitFormBoundaryQQ3J8kPsjFpTmqNz'
>>> pl = ['--%s' % b, 'Content-Disposition: form-data; name=\\"name\\"\\n', 'Scofield', '--%s' % b, 'Content-Disposition: form-data; name=\\"name\\"\\n', 'Lincoln', '--%s' % b, 'Content-Disposition: form-data; name=\\"file\\"; filename=\\"test.txt\\"', 'Content-Type: text/plain\\n', 'just a test', '--%s' % b, 'Content-Disposition: form-data; name=\\"id\\"\\n', '4008009001', '--%s--' % b, '']
>>> payload = '\\n'.join(pl)
>>> r = Request({'REQUEST_METHOD':'POST', 'CONTENT_LENGTH':str(len(payload)), 'CONTENT_TYPE':'multipart/form-data; boundary=%s' % b, 'wsgi.input':StringIO(payload)})
>>> r.get('name')
u'Scofield'
>>> r.gets('name')
[u'Scofield', u'Lincoln']
>>> f = r.get('file')
>>> f.filename
u'test.txt'
>>> f.file.read()
'just a test'
"""
r = self._get_raw_input()[key]
if isinstance(r, list):
return r[0]
return r
def get(self, key, default=None):
"""
实现了字典里面的get功能
和上面的__getitem__一样(request[key]),但如果没有找到key,则返回默认值。
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('a=1&b=M%20M&c=ABC&c=XYZ&e=')})
>>> r.get('a')
u'1'
>>> r.get('empty')
>>> r.get('empty', 'DEFAULT')
'DEFAULT'
"""
r = self._get_raw_input().get(key, default)
if isinstance(r, list):
return r[0]
return r
def gets(self, key):
'''
Get multiple values for specified key.
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('a=1&b=M%20M&c=ABC&c=XYZ&e=')})
>>> r.gets('a')
[u'1']
>>> r.gets('c')
[u'ABC', u'XYZ']
>>> r.gets('empty')
Traceback (most recent call last):
...
KeyError: 'empty'
'''
r = self._get_raw_input()[key]
if isinstance(r, list):
return r[:]
return [r]
def input(self, **kw):
"""
返回一个由传入的数据和从environ里取出的数据 组成的Dict对象,Dict对象的定义 见db模块
Get input as dict from request, fill dict using provided default value if key not exist.
i = ctx.request.input(role='guest')
i.role ==> 'guest'
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('a=1&b=M%20M&c=ABC&c=XYZ&e=')})
>>> i = r.input(x=2008)
>>> i.a
u'1'
>>> i.b
u'M M'
>>> i.c
u'ABC'
>>> i.x
2008
>>> i.get('d', u'100')
u'100'
>>> i.x
2008
"""
copy = Dict(**kw)
raw = self._get_raw_input()
for k, v in raw.iteritems():
copy[k] = v[0] if isinstance(v, list) else v
return copy
def get_body(self):
"""
从HTTP POST 请求中取得 body里面的数据,返回为一个str对象
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('<xml><raw/>')})
>>> r.get_body()
'<xml><raw/>'
"""
fp = self._environ['wsgi.input']
return fp.read()
@property
def remote_addr(self):
"""
Get remote addr. Return '0.0.0.0' if cannot get remote_addr.
>>> r = Request({'REMOTE_ADDR': '192.168.0.100'})
>>> r.remote_addr
'192.168.0.100'
"""
return self._environ.get('REMOTE_ADDR', '0.0.0.0')
@property
def document_root(self):
"""
Get raw document_root as str. Return '' if no document_root.
>>> r = Request({'DOCUMENT_ROOT': '/srv/path/to/doc'})
>>> r.document_root
'/srv/path/to/doc'
"""
return self._environ.get('DOCUMENT_ROOT', '')
@property
def query_string(self):
"""
Get raw query string as str. Return '' if no query string.
>>> r = Request({'QUERY_STRING': 'a=1&c=2'})
>>> r.query_string
'a=1&c=2'
>>> r = Request({})
>>> r.query_string
''
"""
return self._environ.get('QUERY_STRING', '')
@property
def environ(self):
"""
Get raw environ as dict, both key, value are str.
>>> r = Request({'REQUEST_METHOD': 'GET', 'wsgi.url_scheme':'http'})
>>> r.environ.get('REQUEST_METHOD')
'GET'
>>> r.environ.get('wsgi.url_scheme')
'http'
>>> r.environ.get('SERVER_NAME')
>>> r.environ.get('SERVER_NAME', 'unamed')
'unamed'
"""
return self._environ
@property
def request_method(self):
"""
Get request method. The valid returned values are 'GET', 'POST', 'HEAD'.
>>> r = Request({'REQUEST_METHOD': 'GET'})
>>> r.request_method
'GET'
>>> r = Request({'REQUEST_METHOD': 'POST'})
>>> r.request_method
'POST'
"""
return self._environ['REQUEST_METHOD']
@property
def path_info(self):
"""
Get request path as str.
>>> r = Request({'PATH_INFO': '/test/a%20b.html'})
>>> r.path_info
'/test/a b.html'
"""
return urllib.unquote(self._environ.get('PATH_INFO', ''))
@property
def host(self):
"""
Get request host as str. Default to '' if cannot get host..
>>> r = Request({'HTTP_HOST': 'localhost:8080'})
>>> r.host
'localhost:8080'
"""
return self._environ.get('HTTP_HOST', '')
def _get_headers(self):
"""
从environ里 取得HTTP_开通的 header
"""
if not hasattr(self, '_headers'):
hdrs = {}
for k, v in self._environ.iteritems():
if k.startswith('HTTP_'):
# convert 'HTTP_ACCEPT_ENCODING' to 'ACCEPT-ENCODING'
hdrs[k[5:].replace('_', '-').upper()] = v.decode('utf-8')
self._headers = hdrs
return self._headers
@property
def headers(self):
"""
获取所有的header, setter实现的属性
Get all HTTP headers with key as str and value as unicode. The header names are 'XXX-XXX' uppercase.
>>> r = Request({'HTTP_USER_AGENT': 'Mozilla/5.0', 'HTTP_ACCEPT': 'text/html'})
>>> H = r.headers
>>> H['ACCEPT']
u'text/html'
>>> H['USER-AGENT']
u'Mozilla/5.0'
>>> L = H.items()
>>> L.sort()
>>> L
[('ACCEPT', u'text/html'), ('USER-AGENT', u'Mozilla/5.0')]
"""
return dict(**self._get_headers())
def header(self, header, default=None):
"""
获取指定的header的值
Get header from request as unicode, return None if not exist, or default if specified.
The header name is case-insensitive such as 'USER-AGENT' or u'content-Type'.
>>> r = Request({'HTTP_USER_AGENT': 'Mozilla/5.0', 'HTTP_ACCEPT': 'text/html'})
>>> r.header('User-Agent')
u'Mozilla/5.0'
>>> r.header('USER-AGENT')
u'Mozilla/5.0'
>>> r.header('Accept')
u'text/html'
>>> r.header('Test')
>>> r.header('Test', u'DEFAULT')
u'DEFAULT'
"""
return self._get_headers().get(header.upper(), default)
def _get_cookies(self):
"""
从environ里取出cookies字符串,并解析成键值对 组成的字典
"""
if not hasattr(self, '_cookies'):
cookies = {}
cookie_str = self._environ.get('HTTP_COOKIE')
if cookie_str:
for c in cookie_str.split(';'):
pos = c.find('=')
if pos > 0:
cookies[c[:pos].strip()] = utils.unquote(c[pos+1:])
self._cookies = cookies
return self._cookies
@property
def cookies(self):
"""
setter 以Dict对象返回cookies
Return all cookies as dict. The cookie name is str and values is unicode.
>>> r = Request({'HTTP_COOKIE':'A=123; url=http%3A%2F%2Fwww.example.com%2F'})
>>> r.cookies['A']
u'123'
>>> r.cookies['url']
u'http://www.example.com/'
"""
return Dict(**self._get_cookies())
def cookie(self, name, default=None):
"""
获取指定的cookie
Return specified cookie value as unicode. Default to None if cookie not exists.
>>> r = Request({'HTTP_COOKIE':'A=123; url=http%3A%2F%2Fwww.example.com%2F'})
>>> r.cookie('A')
u'123'
>>> r.cookie('url')
u'http://www.example.com/'
>>> r.cookie('test')
>>> r.cookie('test', u'DEFAULT')
u'DEFAULT'
"""
return self._get_cookies().get(name, default)
class Response(object):
def __init__(self):
self._status = '200 OK'
self._headers = {'CONTENT-TYPE': 'text/html; charset=utf-8'}
def unset_header(self, name):
"""
删除指定的header
>>> r = Response()
>>> r.header('content-type')
'text/html; charset=utf-8'
>>> r.unset_header('CONTENT-type')
>>> r.header('content-type')
"""
key = name.upper()
if key not in _RESPONSE_HEADER_DICT:
key = name
if key in self._headers:
del self._headers[key]
def set_header(self, name, value):
"""
给指定的header 赋值
>>> r = Response()
>>> r.header('content-type')
'text/html; charset=utf-8'
>>> r.set_header('CONTENT-type', 'image/png')
>>> r.header('content-TYPE')
'image/png'
"""
key = name.upper()
if key not in _RESPONSE_HEADER_DICT:
key = name
self._headers[key] = utils.to_str(value)
def header(self, name):
"""
获取Response Header 里单个 Header的值, 非大小写敏感
>>> r = Response()
>>> r.header('content-type')
'text/html; charset=utf-8'
>>> r.header('CONTENT-type')
'text/html; charset=utf-8'
>>> r.header('X-Powered-By')
"""
key = name.upper()
if key not in _RESPONSE_HEADER_DICT:
key = name
return self._headers.get(key)
@property
def headers(self):
"""
setter 构造的属性,以[(key1, value1), (key2, value2)...] 形式存储 所有header的值,
包括cookies的值
>>> r = Response()
>>> r.headers
[('Content-Type', 'text/html; charset=utf-8'), ('X-Powered-By', 'transwarp/1.0')]
>>> r.set_cookie('s1', 'ok', 3600)
>>> r.headers
[('Content-Type', 'text/html; charset=utf-8'), ('Set-Cookie', 's1=ok; Max-Age=3600; Path=/; HttpOnly'), ('X-Powered-By', 'transwarp/1.0')]
"""
L = [(_RESPONSE_HEADER_DICT.get(k, k), v) for k, v in self._headers.iteritems()]
if hasattr(self, '_cookies'):
for v in self._cookies.itervalues():
L.append(('Set-Cookie', v))
L.append(_HEADER_X_POWERED_BY)
return L
@property
def content_type(self):
"""
setter 方法实现的属性,用户保存header: Content-Type的值
>>> r = Response()
>>> r.content_type
'text/html; charset=utf-8'
>>> r.content_type = 'application/json'
>>> r.content_type
'application/json'
"""
return self.header('CONTENT-TYPE')
@content_type.setter
def content_type(self, value):
"""
让content_type 属性可写, 及设置Content-Type Header
"""
if value:
self.set_header('CONTENT-TYPE', value)
else:
self.unset_header('CONTENT-TYPE')
@property
def content_length(self):
"""
获取Content-Length Header 的值
>>> r = Response()
>>> r.content_length
>>> r.content_length = 100
>>> r.content_length
'100'
"""
return self.header('CONTENT-LENGTH')
@content_length.setter
def content_length(self, value):
"""
设置Content-Length Header 的值
>>> r = Response()
>>> r.content_length = '1024'
>>> r.content_length
'1024'
>>> r.content_length = 1024 * 8
>>> r.content_length
'8192'
"""
self.set_header('CONTENT-LENGTH', str(value))
def delete_cookie(self, name):
"""
Delete a cookie immediately.
Args:
name: the cookie name.
"""
self.set_cookie(name, '__deleted__', expires=0)
def set_cookie(self, name, value, max_age=None, expires=None, path='/', domain=None, secure=False, http_only=True):
"""
Set a cookie.
Args:
name: the cookie name.
value: the cookie value.
max_age: optional, seconds of cookie's max age.
expires: optional, unix timestamp, datetime or date object that indicate an absolute time of the
expiration time of cookie. Note that if expires specified, the max_age will be ignored.
path: the cookie path, default to '/'.
domain: the cookie domain, default to None.
secure: if the cookie secure, default to False.
http_only: if the cookie is for http only, default to True for better safty
(client-side script cannot access cookies with HttpOnly flag).
>>> r = Response()
>>> r.set_cookie('company', 'Abc, Inc.', max_age=3600)
>>> r._cookies
{'company': 'company=Abc%2C%20Inc.; Max-Age=3600; Path=/; HttpOnly'}
>>> r.set_cookie('company', r'Example="Limited"', expires=1342274794.123, path='/sub/')
>>> r._cookies
{'company': 'company=Example%3D%22Limited%22; Expires=Sat, 14-Jul-2012 14:06:34 GMT; Path=/sub/; HttpOnly'}
>>> dt = datetime.datetime(2012, 7, 14, 22, 6, 34, tzinfo=UTC('+8:00'))
>>> r.set_cookie('company', 'Expires', expires=dt)
>>> r._cookies
{'company': 'company=Expires; Expires=Sat, 14-Jul-2012 14:06:34 GMT; Path=/; HttpOnly'}
"""
if not hasattr(self, '_cookies'):
self._cookies = {}
L = ['%s=%s' % (utils.quote(name), utils.quote(value))]
if expires is not None:
if isinstance(expires, (float, int, long)):
L.append('Expires=%s' % datetime.datetime.fromtimestamp(expires, UTC_0).strftime('%a, %d-%b-%Y %H:%M:%S GMT'))
if isinstance(expires, (datetime.date, datetime.datetime)):
L.append('Expires=%s' % expires.astimezone(UTC_0).strftime('%a, %d-%b-%Y %H:%M:%S GMT'))
elif isinstance(max_age, (int, long)):
L.append('Max-Age=%d' % max_age)
L.append('Path=%s' % path)
if domain:
L.append('Domain=%s' % domain)
if secure:
L.append('Secure')
if http_only:
L.append('HttpOnly')
self._cookies[name] = '; '.join(L)
def unset_cookie(self, name):
"""
Unset a cookie.
>>> r = Response()
>>> r.set_cookie('company', 'Abc, Inc.', max_age=3600)
>>> r._cookies
{'company': 'company=Abc%2C%20Inc.; Max-Age=3600; Path=/; HttpOnly'}
>>> r.unset_cookie('company')
>>> r._cookies
{}
"""
if hasattr(self, '_cookies'):
if name in self._cookies:
del self._cookies[name]
@property
def status_code(self):
"""
Get response status code as int.
>>> r = Response()
>>> r.status_code
200
>>> r.status = 404
>>> r.status_code
404
>>> r.status = '500 Internal Error'
>>> r.status_code
500
"""
return int(self._status[:3])
@property
def status(self):
"""
Get response status. Default to '200 OK'.
>>> r = Response()
>>> r.status
'200 OK'
>>> r.status = 404
>>> r.status
'404 Not Found'
>>> r.status = '500 Oh My God'
>>> r.status
'500 Oh My God'
"""
return self._status
@status.setter
def status(self, value):
"""
Set response status as int or str.
>>> r = Response()
>>> r.status = 404
>>> r.status
'404 Not Found'
>>> r.status = '500 ERR'
>>> r.status
'500 ERR'
>>> r.status = u'403 Denied'
>>> r.status
'403 Denied'
>>> r.status = 99
Traceback (most recent call last):
...
ValueError: Bad response code: 99
>>> r.status = 'ok'
Traceback (most recent call last):
...
ValueError: Bad response code: ok
>>> r.status = [1, 2, 3]
Traceback (most recent call last):
...
TypeError: Bad type of response code.
"""
if isinstance(value, (int, long)):
if 100 <= value <= 999:
st = _RESPONSE_STATUSES.get(value, '')
if st:
self._status = '%d %s' % (value, st)
else:
self._status = str(value)
else:
raise ValueError('Bad response code: %d' % value)
elif isinstance(value, basestring):
if isinstance(value, unicode):
value = value.encode('utf-8')
if _RE_RESPONSE_STATUS.match(value):
self._status = value
else:
raise ValueError('Bad response code: %s' % value)
else:
raise TypeError('Bad type of response code.')
#################################################################
# 实现URL路由功能
# 将URL 映射到 函数上
#################################################################
# 用于捕获变量的re
_re_route = re.compile(r'(:[a-zA-Z_]\w*)')
# 方法的装饰器,用于捕获url
def get(path):
"""
A @get decorator.
@get('/:id')
def index(id):
pass
>>> @get('/test/:id')
... def test():
... return 'ok'
...
>>> test.__web_route__
'/test/:id'
>>> test.__web_method__
'GET'
>>> test()
'ok'
"""
def _decorator(func):
func.__web_route__ = path
func.__web_method__ = 'GET'
return func
return _decorator
def post(path):
"""
A @post decorator.
>>> @post('/post/:id')
... def testpost():
... return '200'
...
>>> testpost.__web_route__
'/post/:id'
>>> testpost.__web_method__
'POST'
>>> testpost()
'200'
"""
def _decorator(func):
func.__web_route__ = path
func.__web_method__ = 'POST'
return func
return _decorator
def _build_regex(path):
r"""
用于将路径转换成正则表达式,并捕获其中的参数
>>> _build_regex('/path/to/:file')
'^\\/path\\/to\\/(?P<file>[^\\/]+)$'
>>> _build_regex('/:user/:comments/list')
'^\\/(?P<user>[^\\/]+)\\/(?P<comments>[^\\/]+)\\/list$'
>>> _build_regex(':id-:pid/:w')
'^(?P<id>[^\\/]+)\\-(?P<pid>[^\\/]+)\\/(?P<w>[^\\/]+)$'
"""
re_list = ['^']
var_list = []
is_var = False
for v in _re_route.split(path):
if is_var:
var_name = v[1:]
var_list.append(var_name)
re_list.append(r'(?P<%s>[^\/]+)' % var_name)
else:
s = ''
for ch in v:
if '0' <= ch <= '9':
s += ch
elif 'A' <= ch <= 'Z':
s += ch
elif 'a' <= ch <= 'z':
s += ch
else:
s = s + '\\' + ch
re_list.append(s)
is_var = not is_var
re_list.append('$')
return ''.join(re_list)
def _static_file_generator(fpath, block_size=8192):
"""
读取静态文件的一个生成器
"""
with open(fpath, 'rb') as f:
block = f.read(block_size)
while block:
yield block
block = f.read(block_size)
class Route(object):
"""
动态路由对象,处理 装饰器捕获的url 和 函数
比如:
@get('/:id')
def index(id):
pass
在构造器中 path、method、is_static、route 和url相关
而 func 则指的装饰器里的func,比如上面的index函数
"""
def __init__(self, func):
"""
path: 通过method的装饰器捕获的path
method: 通过method装饰器捕获的method
is_static: 路径是否含变量,含变量为True
route:动态url(含变量)则捕获其变量的 re
func: 方法装饰器里定义的函数
"""
self.path = func.__web_route__
self.method = func.__web_method__
self.is_static = _re_route.search(self.path) is None
if not self.is_static:
self.route = re.compile(_build_regex(self.path))
self.func = func
def match(self, url):
"""
传入url,返回捕获的变量
"""
m = self.route.match(url)
if m:
return m.groups()
return None
def __call__(self, *args):
"""
实例对象直接调用时,执行传入的函数对象
"""
return self.func(*args)
def __str__(self):
if self.is_static:
return 'Route(static,%s,path=%s)' % (self.method, self.path)
return 'Route(dynamic,%s,path=%s)' % (self.method, self.path)
__repr__ = __str__
class StaticFileRoute(object):
"""
静态文件路由对象,和Route相对应
"""
def __init__(self):
self.method = 'GET'
self.is_static = False
self.route = re.compile('^/static/(.+)$')
def match(self, url):
if url.startswith('/static/'):
return (url[1:], )
return None
def __call__(self, *args):
fpath = os.path.join(ctx.application.document_root, args[0])
if not os.path.isfile(fpath):
raise HttpError.notfound()
fext = os.path.splitext(fpath)[1]
ctx.response.content_type = mimetypes.types_map.get(fext.lower(), 'application/octet-stream')
return _static_file_generator(fpath)
class MultipartFile(object):
"""
Multipart file storage get from request input.
f = ctx.request['file']
f.filename # 'test.png'
f.file # file-like object
"""
def __init__(self, storage):
self.filename = utils.to_unicode(storage.filename)
self.file = storage.file
#################################################################
# 实现视图功能
# 主要涉及到模板引擎和View装饰器的实现
#################################################################
class Template(object):
def __init__(self, template_name, **kw):
"""
Init a template object with template name, model as dict, and additional kw that will append to model.
>>> t = Template('hello.html', title='Hello', copyright='@2012')
>>> t.model['title']
'Hello'
>>> t.model['copyright']
'@2012'
>>> t = Template('test.html', abc=u'ABC', xyz=u'XYZ')
>>> t.model['abc']
u'ABC'
"""
self.template_name = template_name
self.model = dict(**kw)
class TemplateEngine(object):
"""
Base template engine.
"""""
def __call__(self, path, model):
return '<!-- override this method to render template -->'
class Jinja2TemplateEngine(TemplateEngine):
"""
Render using jinja2 template engine.
>>> templ_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'test')
>>> engine = Jinja2TemplateEngine(templ_path)
>>> engine.add_filter('datetime', lambda dt: dt.strftime('%Y-%m-%d %H:%M:%S'))
>>> engine('jinja2-test.html', dict(name='Michael', posted_at=datetime.datetime(2014, 6, 1, 10, 11, 12)))
'<p>Hello, Michael.</p><span>2014-06-01 10:11:12</span>'
"""
def __init__(self, templ_dir, **kw):
from jinja2 import Environment, FileSystemLoader
if 'autoescape' not in kw:
kw['autoescape'] = True
self._env = Environment(loader=FileSystemLoader(templ_dir), **kw)
def add_filter(self, name, fn_filter):
self._env.filters[name] = fn_filter
def __call__(self, path, model):
return self._env.get_template(path).render(**model).encode('utf-8')
def _debug():
"""
:return:
"""
pass
def _default_error_handler(e, start_response, is_debug):
"""
用于处理异常,主要是响应一个异常页面
:param e:
:param start_response: wsgi里面的 start_response 函数
:param is_debug:
:return:
"""
if isinstance(e, HttpError):
logging.info('HttpError: %s' % e.status)
headers = e.headers[:]
headers.append(('Content-Type', 'text/html'))
start_response(e.status, headers)
return ('<html><body><h1>%s</h1></body></html>' % e.status)
logging.exception('Exception:')
start_response('500 Internal Server Error', [('Content-Type', 'text/html'), _HEADER_X_POWERED_BY])
if is_debug:
return _debug()
return ('<html><body><h1>500 Internal Server Error</h1><h3>%s</h3></body></html>' % str(e))
def view(path):
"""
被装饰的函数 需要返回一个字典对象,用于渲染
装饰器通过Template类将 path 和 dict 关联在一个 Template对象上
A view decorator that render a view by dict.
>>> @view('test/view.html')
... def hello():
... return dict(name='Bob')
>>> t = hello()
>>> isinstance(t, Template)
True
>>> t.template_name
'test/view.html'
>>> @view('test/view.html')
... def hello2():
... return ['a list']
>>> t = hello2()
Traceback (most recent call last):
...
ValueError: Expect return a dict when using @view() decorator.
"""
def _decorator(func):
@functools.wraps(func)
def _wrapper(*args, **kw):
r = func(*args, **kw)
if isinstance(r, dict):
logging.info('return Template')
return Template(path, **r)
raise ValueError('Expect return a dict when using @view() decorator.')
return _wrapper
return _decorator
#################################################################
# 实现URL拦截器
# 主要interceptor的实现
#################################################################
_RE_INTERCEPTOR_STARTS_WITH = re.compile(r'^([^\*\?]+)\*?$')
_RE_INTERCEPTOR_ENDS_WITH = re.compile(r'^\*([^\*\?]+)$')
def _build_pattern_fn(pattern):
"""
传入需要匹配的字符串: URL
返回一个函数,该函数接收一个字符串参数,检测该字符串是否
符合pattern
"""
m = _RE_INTERCEPTOR_STARTS_WITH.match(pattern)
if m:
return lambda p: p.startswith(m.group(1))
m = _RE_INTERCEPTOR_ENDS_WITH.match(pattern)
if m:
return lambda p: p.endswith(m.group(1))
raise ValueError('Invalid pattern definition in interceptor.')
def interceptor(pattern='/'):
"""
An @interceptor decorator.
@interceptor('/admin/')
def check_admin(req, resp):
pass
"""
def _decorator(func):
func.__interceptor__ = _build_pattern_fn(pattern)
return func
return _decorator
def _build_interceptor_fn(func, next):
"""
拦截器接受一个next函数,这样,一个拦截器可以决定调用next()继续处理请求还是直接返回
"""
def _wrapper():
if func.__interceptor__(ctx.request.path_info):
return func(next)
else:
return next()
return _wrapper
def _build_interceptor_chain(last_fn, *interceptors):
"""
Build interceptor chain.
>>> def target():
... print 'target'
... return 123
>>> @interceptor('/')
... def f1(next):
... print 'before f1()'
... return next()
>>> @interceptor('/test/')
... def f2(next):
... print 'before f2()'
... try:
... return next()
... finally:
... print 'after f2()'
>>> @interceptor('/')
... def f3(next):
... print 'before f3()'
... try:
... return next()
... finally:
... print 'after f3()'
>>> chain = _build_interceptor_chain(target, f1, f2, f3)
>>> ctx.request = Dict(path_info='/test/abc')
>>> chain()
before f1()
before f2()
before f3()
target
after f3()
after f2()
123
>>> ctx.request = Dict(path_info='/api/')
>>> chain()
before f1()
before f3()
target
after f3()
123
"""
L = list(interceptors)
L.reverse()
fn = last_fn
for f in L:
fn = _build_interceptor_fn(f, fn)
return fn
def _load_module(module_name):
"""
Load module from name as str.
>>> m = _load_module('xml')
>>> m.__name__
'xml'
>>> m = _load_module('xml.sax')
>>> m.__name__
'xml.sax'
>>> m = _load_module('xml.sax.handler')
>>> m.__name__
'xml.sax.handler'
"""
last_dot = module_name.rfind('.')
if last_dot == (-1):
return __import__(module_name, globals(), locals())
from_module = module_name[:last_dot]
import_module = module_name[last_dot+1:]
m = __import__(from_module, globals(), locals(), [import_module])
return getattr(m, import_module)
#################################################################
# 全局WSGIApplication的类,实现WSGI接口
# WSGIApplication 封装了 wsgi Server(run方法) 和 wsgi 处理函数(wsgi静态方法)
# 上面的所有的功能都是对 wsgi 处理函数的装饰
#################################################################
class WSGIApplication(object):
def __init__(self, document_root=None, **kw):
"""
Init a WSGIApplication.
Args:
document_root: document root path.
"""
self._running = False
self._document_root = document_root
self._interceptors = []
self._template_engine = None
self._get_static = {}
self._post_static = {}
self._get_dynamic = []
self._post_dynamic = []
def _check_not_running(self):
"""
检测app对象 是否运行
"""
if self._running:
raise RuntimeError('Cannot modify WSGIApplication when running.')
@property
def template_engine(self):
return self._template_engine
@template_engine.setter
def template_engine(self, engine):
"""
设置app 使用的模板引擎
"""
self._check_not_running()
self._template_engine = engine
def add_module(self, mod):
self._check_not_running()
m = mod if type(mod) == types.ModuleType else _load_module(mod)
logging.info('Add module: %s' % m.__name__)
for name in dir(m):
fn = getattr(m, name)
if callable(fn) and hasattr(fn, '__web_route__') and hasattr(fn, '__web_method__'):
self.add_url(fn)
def add_url(self, func):
"""
添加URL,主要是添加路由
"""
self._check_not_running()
route = Route(func)
if route.is_static:
if route.method == 'GET':
self._get_static[route.path] = route
if route.method == 'POST':
self._post_static[route.path] = route
else:
if route.method == 'GET':
self._get_dynamic.append(route)
if route.method == 'POST':
self._post_dynamic.append(route)
logging.info('Add route: %s' % str(route))
def add_interceptor(self, func):
"""
添加拦截器
"""
self._check_not_running()
self._interceptors.append(func)
logging.info('Add interceptor: %s' % str(func))
def run(self, port=9000, host='127.0.0.1'):
"""
启动python自带的WSGI Server
"""
from wsgiref.simple_server import make_server
logging.info('application (%s) will start at %s:%s...' % (self._document_root, host, port))
server = make_server(host, port, self.get_wsgi_application(debug=True))
server.serve_forever()
def get_wsgi_application(self, debug=False):
self._check_not_running()
if debug:
self._get_dynamic.append(StaticFileRoute())
self._running = True
_application = Dict(document_root=self._document_root)
def fn_route():
request_method = ctx.request.request_method
path_info = ctx.request.path_info
if request_method == 'GET':
fn = self._get_static.get(path_info, None)
if fn:
return fn()
for fn in self._get_dynamic:
args = fn.match(path_info)
if args:
return fn(*args)
raise HttpError.notfound()
if request_method == 'POST':
fn = self._post_static.get(path_info, None)
if fn:
return fn()
for fn in self._post_dynamic:
args = fn.match(path_info)
if args:
return fn(*args)
raise HttpError.notfound()
raise HttpError.badrequest()
fn_exec = _build_interceptor_chain(fn_route, *self._interceptors)
def wsgi(env, start_response):
"""
WSGI 处理函数
"""
ctx.application = _application
ctx.request = Request(env)
response = ctx.response = Response()
try:
r = fn_exec()
if isinstance(r, Template):
r = self._template_engine(r.template_name, r.model)
if isinstance(r, unicode):
r = r.encode('utf-8')
if r is None:
r = []
start_response(response.status, response.headers)
return r
except _RedirectError, e:
response.set_header('Location', e.location)
start_response(e.status, response.headers)
return []
except HttpError, e:
start_response(e.status, response.headers)
return ['<html><body><h1>', e.status, '</h1></body></html>']
except Exception, e:
logging.exception(e)
if not debug:
start_response('500 Internal Server Error', [])
return ['<html><body><h1>500 Internal Server Error</h1></body></html>']
exc_type, exc_value, exc_traceback = sys.exc_info()
fp = StringIO()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=fp)
stacks = fp.getvalue()
fp.close()
start_response('500 Internal Server Error', [])
return [
r'''<html><body><h1>500 Internal Server Error</h1><div style="font-family:Monaco, Menlo, Consolas, 'Courier New', monospace;"><pre>''',
stacks.replace('<', '<').replace('>', '>'),
'</pre></div></body></html>']
finally:
del ctx.application
del ctx.request
del ctx.response
return wsgi
if __name__ == '__main__':
sys.path.append('.')
import doctest
doctest.testmod()
|
[
"719118794@qq.com"
] |
719118794@qq.com
|
e20f6bd63d39fd455755dbb515cb9dc5635e3808
|
a552f40c687d7e11e4c90350339286bd636f14ec
|
/mpproj/mpproj/wsgi.py
|
8c796e81e3d6e6bfa02bc2d7a6fabf4f39a2df7f
|
[
"Apache-2.0"
] |
permissive
|
rankrh/MountainProject
|
26e851bf266ba488f98f798c8f6d0030f439cac1
|
0e89e64ea15b09cbd3845ad9579fe8434a1f02c0
|
refs/heads/master
| 2021-06-24T19:58:51.511835
| 2020-11-20T01:30:51
| 2020-11-20T01:30:51
| 162,328,722
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
"""
WSGI config for mpproj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mpproj.settings')
application = get_wsgi_application()
|
[
"rankrh@gmail.com"
] |
rankrh@gmail.com
|
d2989e04adcb61408d61d1acff049248324876bf
|
7892e5cad76cb0de81dd4d2962b865d749111f2d
|
/pyFTS/sfts.py
|
656514329a9843578d4d13744a872a2ee9252951
|
[] |
no_license
|
cseveriano/solarenergyforecasting
|
5c70932d5168c12242efe41c85d183a41be71abf
|
b61fe76a4f7fc5863448b35881e01e1ccdb2dedd
|
refs/heads/master
| 2021-01-11T20:31:45.533149
| 2017-02-02T13:13:06
| 2017-02-02T13:13:06
| 79,134,373
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,645
|
py
|
import numpy as np
from pyFTS.common import FuzzySet,FLR
import fts
class SeasonalFLRG(fts.FTS):
def __init__(self, seasonality):
self.LHS = seasonality
self.RHS = []
def append(self, c):
self.RHS.append(c)
def __str__(self):
tmp = str(self.LHS) + " -> "
tmp2 = ""
for c in sorted(self.RHS, key=lambda s: s.name):
if len(tmp2) > 0:
tmp2 = tmp2 + ","
tmp2 = tmp2 + c.name
return tmp + tmp2
class SeasonalFTS(fts.FTS):
def __init__(self, name):
super(SeasonalFTS, self).__init__(1, "SFTS")
self.name = "Seasonal FTS"
self.detail = "Chen"
self.seasonality = 1
self.hasSeasonality = True
def generateFLRG(self, flrs):
flrgs = []
season = 1
for flr in flrs:
if len(flrgs) < self.seasonality:
flrgs.append(SeasonalFLRG(season))
flrgs[season].append(flr.RHS)
season = (season + 1) % (self.seasonality + 1)
if season == 0: season = 1
return (flrgs)
def train(self, data, sets, seasonality):
self.sets = sets
self.seasonality = seasonality
tmpdata = FuzzySet.fuzzySeries(data, sets)
flrs = FLR.generateRecurrentFLRs(tmpdata)
self.flrgs = self.generateFLRG(flrs)
def forecast(self, data):
ndata = np.array(data)
l = len(ndata)
ret = []
for k in np.arange(1, l):
flrg = self.flrgs[data[k]]
mp = self.getMidpoints(flrg)
ret.append(sum(mp) / len(mp))
return ret
|
[
"carlossjr@gmail.com"
] |
carlossjr@gmail.com
|
a37479b83e3e6ebdfa280a2339adbc632b8a3439
|
2611d6ab0963ba5da8cec69559a6ad4b7efb92d9
|
/emojisss.py
|
f95a3d4789d4e416f67fe803c0f0d48a4f73054b
|
[] |
no_license
|
myke-oliveira/curso-em-video-python3
|
f4108eebc8dc5fde1574dc2a8ab0f6eff7650e5d
|
97dc244ff50720190a134a1eb2fef9f8b43cdae3
|
refs/heads/master
| 2021-05-12T11:34:28.036631
| 2018-01-16T19:28:44
| 2018-01-16T19:28:44
| 117,392,606
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 54
|
py
|
import emoji
print(emoji.emojize(':thumbs_up_sign:'))
|
[
"mykeapo@gmail.com"
] |
mykeapo@gmail.com
|
b6a99fdefc842c281a110c3f4728fbd2907c0806
|
eb011bbc2e7f572d10b29c78f417645fc9eef247
|
/deepchem/models/torch_models/pagtn.py
|
340ac011ec73de75afa60a169fa6ce822d899dd0
|
[
"MIT"
] |
permissive
|
tianqin91/deepchem
|
1cc81310101e5ac4e9886db6ad97820a54bba61f
|
9c36987f735af3ebf602247ddf06c575ede85d44
|
refs/heads/master
| 2021-08-02T07:55:41.828374
| 2021-07-29T19:02:47
| 2021-07-29T19:02:47
| 191,236,748
| 0
| 0
|
MIT
| 2019-06-10T19:57:40
| 2019-06-10T19:57:40
| null |
UTF-8
|
Python
| false
| false
| 10,834
|
py
|
"""
DGL-based PAGTN for graph property prediction.
"""
import torch.nn as nn
import torch.nn.functional as F
from deepchem.models.losses import Loss, L2Loss, SparseSoftmaxCrossEntropy
from deepchem.models.torch_models.torch_model import TorchModel
class Pagtn(nn.Module):
"""Model for Graph Property Prediction
This model proceeds as follows:
* Update node representations in graphs with a variant of GAT, where a
linear additive form of attention is applied. Attention Weights are derived
by concatenating the node and edge features for each bond.
* Update node representations with multiple rounds of message passing.
* For each layer has, residual connections with its previous layer.
* The final molecular representation is computed by combining the representations
of all nodes in the molecule.
* Perform the final prediction using a linear layer
Examples
--------
>>> import deepchem as dc
>>> import dgl
>>> from deepchem.models import Pagtn
>>> smiles = ["C1CCC1", "C1=CC=CN=C1"]
>>> featurizer = dc.feat.PagtnMolGraphFeaturizer(max_length=5)
>>> graphs = featurizer.featurize(smiles)
>>> print(type(graphs[0]))
<class 'deepchem.feat.graph_data.GraphData'>
>>> dgl_graphs = [graphs[i].to_dgl_graph() for i in range(len(graphs))]
>>> batch_dgl_graph = dgl.batch(dgl_graphs)
>>> model = Pagtn(n_tasks=1, mode='regression')
>>> preds = model(batch_dgl_graph)
>>> print(type(preds))
<class 'torch.Tensor'>
>>> preds.shape == (2, 1)
True
References
----------
.. [1] Benson Chen, Regina Barzilay, Tommi Jaakkola. "Path-Augmented
Graph Transformer Network." arXiv:1905.12712
Notes
-----
This class requires DGL (https://github.com/dmlc/dgl) and DGL-LifeSci
(https://github.com/awslabs/dgl-lifesci) to be installed.
"""
def __init__(self,
n_tasks: int,
number_atom_features: int = 94,
number_bond_features: int = 42,
mode: str = 'regression',
n_classes: int = 2,
output_node_features: int = 256,
hidden_features: int = 32,
num_layers: int = 5,
num_heads: int = 1,
dropout: float = 0.1,
nfeat_name: str = 'x',
efeat_name: str = 'edge_attr',
pool_mode: str = 'sum'):
"""
Parameters
----------
n_tasks: int
Number of tasks.
number_atom_features : int
Size for the input node features. Default to 94.
number_bond_features : int
Size for the input edge features. Default to 42.
mode: str
The model type, 'classification' or 'regression'. Default to 'regression'.
n_classes: int
The number of classes to predict per task
(only used when ``mode`` is 'classification'). Default to 2.
output_node_features : int
Size for the output node features in PAGTN layers. Default to 256.
hidden_features : int
Size for the hidden node features in PAGTN layers. Default to 32.
num_layers : int
Number of PAGTN layers to be applied. Default to 5.
num_heads : int
Number of attention heads. Default to 1.
dropout : float
The probability for performing dropout. Default to 0.1
nfeat_name: str
For an input graph ``g``, the model assumes that it stores node features in
``g.ndata[nfeat_name]`` and will retrieve input node features from that.
Default to 'x'.
efeat_name: str
For an input graph ``g``, the model assumes that it stores edge features in
``g.edata[efeat_name]`` and will retrieve input edge features from that.
Default to 'edge_attr'.
pool_mode : 'max' or 'mean' or 'sum'
Whether to compute elementwise maximum, mean or sum of the node representations.
"""
try:
import dgl
except:
raise ImportError('This class requires dgl.')
try:
import dgllife
except:
raise ImportError('This class requires dgllife.')
if mode not in ['classification', 'regression']:
raise ValueError("mode must be either 'classification' or 'regression'")
super(Pagtn, self).__init__()
self.n_tasks = n_tasks
self.mode = mode
self.n_classes = n_classes
self.nfeat_name = nfeat_name
self.efeat_name = efeat_name
if mode == 'classification':
out_size = n_tasks * n_classes
else:
out_size = n_tasks
from dgllife.model import PAGTNPredictor as DGLPAGTNPredictor
self.model = DGLPAGTNPredictor(
node_in_feats=number_atom_features,
node_out_feats=output_node_features,
node_hid_feats=hidden_features,
edge_feats=number_bond_features,
depth=num_layers,
nheads=num_heads,
dropout=dropout,
n_tasks=out_size,
mode=pool_mode)
def forward(self, g):
"""Predict graph labels
Parameters
----------
g: DGLGraph
A DGLGraph for a batch of graphs. It stores the node features in
``dgl_graph.ndata[self.nfeat_name]`` and edge features in
``dgl_graph.edata[self.efeat_name]``.
Returns
-------
torch.Tensor
The model output.
* When self.mode = 'regression',
its shape will be ``(dgl_graph.batch_size, self.n_tasks)``.
* When self.mode = 'classification', the output consists of probabilities
for classes. Its shape will be
``(dgl_graph.batch_size, self.n_tasks, self.n_classes)`` if self.n_tasks > 1;
its shape will be ``(dgl_graph.batch_size, self.n_classes)`` if self.n_tasks is 1.
torch.Tensor, optional
This is only returned when self.mode = 'classification', the output consists of the
logits for classes before softmax.
"""
node_feats = g.ndata[self.nfeat_name]
edge_feats = g.edata[self.efeat_name]
out = self.model(g, node_feats, edge_feats)
if self.mode == 'classification':
if self.n_tasks == 1:
logits = out.view(-1, self.n_classes)
softmax_dim = 1
else:
logits = out.view(-1, self.n_tasks, self.n_classes)
softmax_dim = 2
proba = F.softmax(logits, dim=softmax_dim)
return proba, logits
else:
return out
class PagtnModel(TorchModel):
"""Model for Graph Property Prediction.
This model proceeds as follows:
* Update node representations in graphs with a variant of GAT, where a
linear additive form of attention is applied. Attention Weights are derived
by concatenating the node and edge features for each bond.
* Update node representations with multiple rounds of message passing.
* For each layer has, residual connections with its previous layer.
* The final molecular representation is computed by combining the representations
of all nodes in the molecule.
* Perform the final prediction using a linear layer
Examples
--------
>>>
>> import deepchem as dc
>> from deepchem.models import PagtnModel
>> featurizer = dc.feat.PagtnMolGraphFeaturizer(max_length=5)
>> tasks, datasets, transformers = dc.molnet.load_tox21(
.. reload=False, featurizer=featurizer, transformers=[])
>> train, valid, test = datasets
>> model = PagtnModel(mode='classification', n_tasks=len(tasks),
.. batch_size=16, learning_rate=0.001)
>> model.fit(train, nb_epoch=50)
References
----------
.. [1] Benson Chen, Regina Barzilay, Tommi Jaakkola. "Path-Augmented
Graph Transformer Network." arXiv:1905.12712
Notes
-----
This class requires DGL (https://github.com/dmlc/dgl) and DGL-LifeSci
(https://github.com/awslabs/dgl-lifesci) to be installed.
"""
def __init__(self,
n_tasks: int,
number_atom_features: int = 94,
number_bond_features: int = 42,
mode: str = 'regression',
n_classes: int = 2,
output_node_features: int = 256,
hidden_features: int = 32,
num_layers: int = 5,
num_heads: int = 1,
dropout: float = 0.1,
pool_mode: str = 'sum',
**kwargs):
"""
Parameters
----------
n_tasks: int
Number of tasks.
number_atom_features : int
Size for the input node features. Default to 94.
number_bond_features : int
Size for the input edge features. Default to 42.
mode: str
The model type, 'classification' or 'regression'. Default to 'regression'.
n_classes: int
The number of classes to predict per task
(only used when ``mode`` is 'classification'). Default to 2.
output_node_features : int
Size for the output node features in PAGTN layers. Default to 256.
hidden_features : int
Size for the hidden node features in PAGTN layers. Default to 32.
num_layers: int
Number of graph neural network layers, i.e. number of rounds of message passing.
Default to 2.
num_heads : int
Number of attention heads. Default to 1.
dropout: float
Dropout probability. Default to 0.1
pool_mode : 'max' or 'mean' or 'sum'
Whether to compute elementwise maximum, mean or sum of the node representations.
kwargs
This can include any keyword argument of TorchModel.
"""
model = Pagtn(
n_tasks=n_tasks,
number_atom_features=number_atom_features,
number_bond_features=number_bond_features,
mode=mode,
n_classes=n_classes,
output_node_features=output_node_features,
hidden_features=hidden_features,
num_layers=num_layers,
num_heads=num_heads,
dropout=dropout,
pool_mode=pool_mode)
if mode == 'regression':
loss: Loss = L2Loss()
output_types = ['prediction']
else:
loss = SparseSoftmaxCrossEntropy()
output_types = ['prediction', 'loss']
super(PagtnModel, self).__init__(
model, loss=loss, output_types=output_types, **kwargs)
def _prepare_batch(self, batch):
"""Create batch data for Pagtn.
Parameters
----------
batch: tuple
The tuple is ``(inputs, labels, weights)``.
Returns
-------
inputs: DGLGraph
DGLGraph for a batch of graphs.
labels: list of torch.Tensor or None
The graph labels.
weights: list of torch.Tensor or None
The weights for each sample or sample/task pair converted to torch.Tensor.
"""
try:
import dgl
except:
raise ImportError('This class requires dgl.')
inputs, labels, weights = batch
dgl_graphs = [graph.to_dgl_graph() for graph in inputs[0]]
inputs = dgl.batch(dgl_graphs).to(self.device)
_, labels, weights = super(PagtnModel, self)._prepare_batch(([], labels,
weights))
return inputs, labels, weights
|
[
"mvenkataraman@ph.iitr.ac.in"
] |
mvenkataraman@ph.iitr.ac.in
|
64facd39145200dcd135636db0dd618f2d8a637a
|
b62dfe6d3049ea7fc7488d5dddce5a7fc968cc73
|
/venv/bin/pip
|
768260327e139dbb09ab6fd61c27320a344ee4ce
|
[] |
no_license
|
jiwenyu0531/MyAI
|
9764299498abbe742e3b7112acdb6fc6d4802adf
|
15808c3bcce8e861690039e0a2bc7819c12c6743
|
refs/heads/master
| 2020-03-28T19:44:27.717510
| 2018-09-16T14:46:10
| 2018-09-16T14:46:10
| 149,005,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
#!/Users/jiwenyu/PycharmProjects/MyAI/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip')()
)
|
[
"jiwenyu0531@qq.com"
] |
jiwenyu0531@qq.com
|
|
cc43cf0ec8d3d75f01f17a65aa8ee501efb3738b
|
6d08b4d926519dbb47b45addc53cd92f5f75e569
|
/app.py
|
799df4c64b398e9bacee9edcd9e21d237bd2a496
|
[] |
no_license
|
elieu17/bellybuttonhmwk
|
f09daf2f381c9a23ce3b686e89d9d639d8a459ca
|
4a8439667aff6ec36a83e50af278c5546b797d77
|
refs/heads/master
| 2020-04-29T08:57:08.234124
| 2019-04-17T22:57:35
| 2019-04-17T22:57:35
| 176,005,682
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,886
|
py
|
import os
import pandas as pd
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
from flask import Flask, jsonify, render_template
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
################################################
# Database Setup
#################################################
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///db/bellybutton.sqlite"
db = SQLAlchemy(app)
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(db.engine, reflect=True)
# Save references to each table
Samples_Metadata = Base.classes.sample_metadata
Samples = Base.classes.samples
@app.route("/")
def index():
"""Return the homepage."""
return render_template("index.html")
@app.route("/names")
def names():
"""Return a list of sample names."""
# Use Pandas to perform the sql query
stmt = db.session.query(Samples).statement
df = pd.read_sql_query(stmt, db.session.bind)
# Return a list of the column names (sample names)
return jsonify(list(df.columns)[2:])
@app.route("/metadata/<sample>")
def sample_metadata(sample):
"""Return the MetaData for a given sample."""
sel = [
Samples_Metadata.sample,
Samples_Metadata.ETHNICITY,
Samples_Metadata.GENDER,
Samples_Metadata.AGE,
Samples_Metadata.LOCATION,
Samples_Metadata.BBTYPE,
Samples_Metadata.WFREQ,
]
results = db.session.query(*sel).filter(Samples_Metadata.sample == sample).all()
# Create a dictionary entry for each row of metadata information
sample_metadata = {}
for result in results:
sample_metadata["sample"] = result[0]
sample_metadata["ETHNICITY"] = result[1]
sample_metadata["GENDER"] = result[2]
sample_metadata["AGE"] = result[3]
sample_metadata["LOCATION"] = result[4]
sample_metadata["BBTYPE"] = result[5]
sample_metadata["WFREQ"] = result[6]
print(sample_metadata)
return jsonify(sample_metadata)
@app.route("/samples/<sample>")
def samples(sample):
"""Return `otu_ids`, `otu_labels`,and `sample_values`."""
stmt = db.session.query(Samples).statement
df = pd.read_sql_query(stmt, db.session.bind)
# Filter the data based on the sample number and
# only keep rows with values above 1
sample_data = df.loc[df[sample] > 1, ["otu_id", "otu_label", sample]]
sample_data.sort_values(by =[sample], ascending=False, inplace=True)
# Format the data to send as json
data = {
"otu_ids": sample_data.otu_id.values.tolist(),
"sample_values": sample_data[sample].values.tolist(),
"otu_labels": sample_data.otu_label.tolist(),
}
return jsonify(data)
if __name__ == "__main__":
app.run()
|
[
"noreply@github.com"
] |
noreply@github.com
|
99f477ff8ee5eee19b30adddfcaa704802c97c42
|
9b9a02657812ea0cb47db0ae411196f0e81c5152
|
/repoData/Floobits-flootty/allPythonContent.py
|
233f4eda6ac3b66566c18b3214288161442dcb88
|
[] |
no_license
|
aCoffeeYin/pyreco
|
cb42db94a3a5fc134356c9a2a738a063d0898572
|
0ac6653219c2701c13c508c5c4fc9bc3437eea06
|
refs/heads/master
| 2020-12-14T14:10:05.763693
| 2016-06-27T05:15:15
| 2016-06-27T05:15:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38,459
|
py
|
__FILENAME__ = api
import sys
import base64
import json
try:
from urllib.request import Request, urlopen
assert Request and urlopen
except ImportError:
from urllib2 import Request, urlopen
try:
from . import shared as G, utils
assert G and utils
except (ImportError, ValueError):
import shared as G
import utils
def get_basic_auth():
# TODO: use api_key if it exists
basic_auth = ('%s:%s' % (G.USERNAME, G.SECRET)).encode('utf-8')
basic_auth = base64.encodestring(basic_auth)
return basic_auth.decode('ascii').replace('\n', '')
def api_request(url, data=None):
if data:
data = json.dumps(data).encode('utf-8')
r = Request(url, data=data)
r.add_header('Authorization', 'Basic %s' % get_basic_auth())
r.add_header('Accept', 'application/json')
r.add_header('Content-type', 'application/json')
r.add_header('User-Agent', 'Flootty py-%s.%s' % (sys.version_info[0], sys.version_info[1]))
return urlopen(r, timeout=5)
def create_workspace(post_data):
url = 'https://%s/api/workspace/' % G.DEFAULT_HOST
return api_request(url, post_data)
def get_workspace_by_url(url):
result = utils.parse_url(url)
api_url = 'https://%s/api/workspace/%s/%s/' % (result['host'], result['owner'], result['workspace'])
return api_request(api_url)
def get_workspace(owner, workspace):
api_url = 'https://%s/api/workspace/%s/%s/' % (G.DEFAULT_HOST, owner, workspace)
return api_request(api_url)
def get_workspaces():
api_url = 'https://%s/api/workspace/can/view/' % (G.DEFAULT_HOST)
return api_request(api_url)
def get_now_editing_workspaces():
api_url = 'https://%s/api/workspaces/now_editing/' % (G.DEFAULT_HOST)
return api_request(api_url)
def get_orgs():
api_url = 'https://%s/api/orgs/' % (G.DEFAULT_HOST)
return api_request(api_url)
def get_orgs_can_admin():
api_url = 'https://%s/api/orgs/can/admin/' % (G.DEFAULT_HOST)
return api_request(api_url)
def send_error(data):
try:
api_url = 'https://%s/api/error/' % (G.DEFAULT_HOST)
return api_request(api_url, data)
except Exception as e:
print(e)
return None
########NEW FILE########
__FILENAME__ = cert
CA_CERT = '''-----BEGIN CERTIFICATE-----
MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW
MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg
Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh
dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM2WhcNMzYwOTE3MTk0NjM2WjB9
MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi
U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh
cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA
A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk
pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf
OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C
Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT
Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi
HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM
Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w
+2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+
Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3
Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B
26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID
AQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE
FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9j
ZXJ0LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3Js
LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFM
BgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUHAgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0
Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRwOi8vY2VydC5zdGFy
dGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYgU3Rh
cnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlh
YmlsaXR5LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2Yg
dGhlIFN0YXJ0Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFp
bGFibGUgYXQgaHR0cDovL2NlcnQuc3RhcnRjb20ub3JnL3BvbGljeS5wZGYwEQYJ
YIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNT
TCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOCAgEAFmyZ
9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8
jhvh3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUW
FjgKXlf2Ysd6AgXmvB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJz
ewT4F+irsfMuXGRuczE6Eri8sxHkfY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1
ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3fsNrarnDy0RLrHiQi+fHLB5L
EUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZEoalHmdkrQYu
L6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq
yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuC
O3NJo2pXh5Tl1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6V
um0ABj6y6koQOdjQK/W/7HW/lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkySh
NOsF/5oirpt9P/FlUQqmMGqz9IgcgA38corog14=
-----END CERTIFICATE-----'''
########NEW FILE########
__FILENAME__ = flootty
#!/usr/bin/env python
# coding: utf-8
try:
unicode()
except NameError:
unicode = str
# Heavily influenced by the work of Joshua D. Bartlett
# see: http://sqizit.bartletts.id.au/2011/02/14/pseudo-terminals-in-python/
# original copyright
# Copyright (c) 2011 Joshua D. Bartlett
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# boilerplate to allow running as script directly
if __name__ == "__main__" and __package__ is None:
import sys
import os
# The following assumes the script is in the top level of the package
# directory. We use dirname() to help get the parent directory to add to
# sys.path, so that we can import the current package. This is necessary
# since when invoked directly, the 'current' package is not automatically
# imported.
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parent_dir)
import flootty
assert flootty
__package__ = str("flootty")
del sys, os
import atexit
import fcntl
import json
import optparse
import array
import os
import pty
import select
import socket
import ssl
import sys
import tempfile
import termios
import tty
import signal
import time
import base64
import collections
import errno
PY2 = sys.version_info < (3, 0)
try:
import __builtin__
input = getattr(__builtin__, 'raw_input')
except (ImportError, AttributeError):
pass
try:
from . import api, cert, shared as G, utils, version
assert api and cert and G and utils
except (ImportError, ValueError):
import api
import cert
import shared as G
import utils
import version
PROTO_VERSION = '0.11'
CLIENT = 'flootty %s' % version.FLOOTTY_VERSION
INITIAL_RECONNECT_DELAY = 1000
FD_READ_BYTES = 65536
# Seconds
SELECT_TIMEOUT = 0.1
NET_TIMEOUT = 10
MAX_BYTES_TO_BUFFER = 65536
DEFAULT_HOST = "floobits.com"
DEFAULT_PORT = 3448
def read_floorc():
settings = {}
p = os.path.expanduser('~/.floorc')
try:
fd = open(p, 'rb')
except IOError as e:
if e.errno == 2:
return settings
raise
data = fd.read().decode('utf-8')
fd.close()
for line in data.split('\n'):
position = line.find(' ')
if position < 0:
continue
settings[line[:position]] = line[position + 1:]
return settings
def write(fd, b):
if (not PY2) and isinstance(b, str):
b = b.encode('utf-8')
elif PY2 and isinstance(b, unicode):
b = b.encode('utf-8')
while len(b):
try:
n = os.write(fd, b)
b = b[n:]
except (IOError, OSError):
pass
def read(fd):
buf = b''
while True:
try:
d = os.read(fd, FD_READ_BYTES)
if not d or d == '':
break
buf += d
except (IOError, OSError):
break
return buf
def out(*args):
buf = "%s\r\n" % " ".join(args)
write(pty.STDOUT_FILENO, buf)
def err(*args):
buf = "%s\r\n" % " ".join(args)
write(pty.STDERR_FILENO, buf)
def die(*args):
err(*args)
sys.exit(1)
usage = '''usage: %prog [options] [terminal_name]\n
For more help, see https://github.com/Floobits/flootty'''
def main():
settings = read_floorc()
parser = optparse.OptionParser(usage=usage)
parser.add_option("-u", "--username",
dest="username",
default=settings.get('username'),
help="Your Floobits username")
parser.add_option("-s", "--secret",
dest="secret",
default=settings.get('secret'),
help="Your Floobits secret (api key)")
parser.add_option("-c", "--create",
dest="create",
default=False,
action="store_true",
help="The terminal name to create")
parser.add_option("--host",
dest="host",
default=DEFAULT_HOST,
help="The host to connect to. Deprecated. Use --url instead.")
parser.add_option("-p", "--port",
dest="port",
default=DEFAULT_PORT,
help="The port to connect to. Deprecated. Use --url instead.")
parser.add_option("-w", "--workspace",
dest="workspace",
help="The workspace name. --owner is required with this option. Deprecated. Use --url instead.")
parser.add_option("-o", "--owner",
dest="owner",
help="The workspace owner. --workspace is required with this option. Deprecated. Use --url instead.")
parser.add_option("-l", "--list",
dest="list",
default=False,
action="store_true",
help="List all terminals in the workspace")
parser.add_option("--unsafe",
dest="safe",
default=True,
action="store_false",
help="Less safe terminal. This allows other users to send enter in your terminal.")
parser.add_option("--no-ssl",
dest="use_ssl",
default=True,
action="store_false",
help="Do not use this option unless you know what you are doing!")
parser.add_option("--url",
dest="workspace_url",
default=None,
help="The URL of the workspace to connect to.")
parser.add_option("--resize",
dest="resize",
default=False,
action="store_true",
help="Resize your terminal to the host terminal size.")
parser.add_option("-P", "--preserve-ps1",
dest="set_prompt",
default=True,
action="store_false",
help="Don't change $PS1 (bash/zsh prompt)")
parser.add_option("-v", "--version",
dest="version",
default=False,
action="store_true",
help="Print version")
options, args = parser.parse_args()
if options.version:
print(CLIENT)
return
G.USERNAME = options.username
G.SECRET = options.secret
default_term_name = ""
if options.create:
default_term_name = "ftty"
term_name = args and args[0] or default_term_name
if options.workspace and options.owner and options.workspace_url:
# TODO: confusing
parser.error("You can either specify --workspace and --owner, or --url, but not both.")
if bool(options.workspace) != bool(options.owner):
parser.error("You must specify a workspace and owner or neither.")
for opt in ['owner', 'workspace']:
if getattr(options, opt):
print('%s is deprecated. Please use --url instead.' % opt)
if not options.workspace or not options.owner:
floo = {}
if options.workspace_url:
floo = utils.parse_url(options.workspace_url)
else:
for floo_path in walk_up(os.path.realpath('.')):
try:
floo = json.loads(open(os.path.join(floo_path, '.floo'), 'rb').read().decode('utf-8'))
floo = utils.parse_url(floo['url'])
except Exception:
pass
else:
break
options.host = floo.get('host')
options.workspace = floo.get('workspace')
options.owner = floo.get('owner')
options.use_ssl = floo.get('secure')
if not options.port:
options.port = floo.get('port')
if not options.host:
options.host = floo.get('host')
if not options.workspace or not options.owner:
now_editing = api.get_now_editing_workspaces()
now_editing = json.loads(now_editing.read().decode('utf-8'))
if len(now_editing) == 1:
options.workspace = now_editing[0]['name']
options.owner = now_editing[0]['owner']
# TODO: list possible workspaces to join if > 1 is active
if options.list:
if len(term_name) != 0:
die("I don't understand why you gave me a positional argument.")
for opt in ['workspace', 'owner', 'username', 'secret']:
if not getattr(options, opt):
parser.error('%s not given' % opt)
color_reset = '\033[0m'
if options.safe:
green = '\033[92m'
print('%sTerminal is safe. Other users will not be able to send [enter]%s' % (green, color_reset))
else:
yellorange = '\033[93m'
print('%sTerminal is unsafe. Other users will be able to send [enter]. Be wary!%s' % (yellorange, color_reset))
f = Flootty(options, term_name)
atexit.register(f.cleanup)
f.connect_to_internet()
f.select()
def walk_up(path):
step_up = lambda x: os.path.realpath(os.path.join(x, '..'))
parent = step_up(path)
while parent != path:
yield path
path = parent
parent = step_up(path)
yield path
class FD(object):
def __init__(self, fileno, reader=None, writer=None, errer=None, name=None):
self.fileno = fileno
self.reader = reader
self.writer = writer
self.errer = errer
self.name = name
def __getitem__(self, key):
return getattr(self, key, None)
def __str__(self):
return str(self.name)
class Flootty(object):
'''Mostly OK at sharing a shell'''
def __init__(self, options, term_name):
self.master_fd = None
self.original_wincher = None
self.fds = {}
self.readers = set()
self.writers = set()
self.errers = set()
self.empty_selects = 0
self.reconnect_timeout = None
self.buf_out = collections.deque()
self.buf_in = b''
self.host = options.host
self.port = int(options.port)
self.workspace = options.workspace
self.owner = options.owner
self.options = options
self.term_name = term_name
self.authed = False
self.term_id = None
self.orig_stdin_atts = None
self.orig_stdout_atts = None
self.last_stdin = 0
self.reconnect_delay = INITIAL_RECONNECT_DELAY
def add_fd(self, fileno, **kwargs):
try:
fileno = fileno.fileno()
except:
fileno = fileno
fd = FD(fileno, **kwargs)
self.fds[fileno] = fd
if fd.reader:
self.readers.add(fileno)
if fd.writer:
self.writers.add(fileno)
if fd.errer:
self.errers.add(fileno)
def remove_fd(self, fileno):
self.readers.discard(fileno)
self.writers.discard(fileno)
self.errers.discard(fileno)
try:
del self.fds[fileno]
except KeyError:
pass
def transport(self, name, data):
data['name'] = name
self.buf_out.append(data)
def select(self):
'''
'''
attrs = ('errer', 'reader', 'writer')
while True:
utils.call_timeouts()
if len(self.buf_out) == 0 and self.sock:
self.writers.remove(self.sock.fileno())
try:
# NOTE: you will never have to write anything without reading first from a different one
_in, _out, _except = select.select(self.readers, self.writers, self.errers, SELECT_TIMEOUT)
except (IOError, OSError) as e:
continue
except (select.error, socket.error, Exception) as e:
# Interrupted system call.
if e[0] == 4:
continue
self.reconnect()
continue
finally:
if self.sock:
self.writers.add(self.sock.fileno())
for position, fds in enumerate([_except, _in, _out]):
attr = attrs[position]
for fd in fds:
# the handler can remove itself from self.fds...
handler = self.fds.get(fd)
if handler is None:
continue
handler = handler[attr]
if handler:
handler(fd)
else:
raise Exception('no handler for fd: %s %s' % (fd, attr))
def cloud_read(self, fd):
buf = b''
try:
while True:
d = self.sock.recv(FD_READ_BYTES)
if not d:
break
buf += d
except (socket.error, TypeError):
pass
if buf:
self.empty_selects = 0
self.handle(buf)
else:
self.empty_selects += 1
if (int(self.empty_selects * SELECT_TIMEOUT)) > NET_TIMEOUT:
err('No data from sock.recv() {0} times.'.format(self.empty_selects))
return self.reconnect()
def cloud_write(self, fd):
new_buf_out = collections.deque()
try:
while True:
item = self.buf_out.popleft()
data = json.dumps(item) + '\n'
if self.authed or item['name'] == 'auth':
if not PY2:
data = data.encode('utf-8')
self.sock.sendall(data)
else:
new_buf_out.append(item)
except socket.error:
self.buf_out.appendleft(item)
self.reconnect()
except IndexError:
pass
self.buf_out.extendleft(new_buf_out)
def cloud_err(self, err):
out('reconnecting because of %s' % err)
self.reconnect()
def handle(self, req):
self.buf_in += req
while True:
before, sep, after = self.buf_in.partition(b'\n')
if not sep:
break
data = json.loads(before.decode('utf-8'), encoding='utf-8')
self.handle_event(data)
self.buf_in = after
def handle_event(self, data):
name = data.get('name')
if not name:
return out('no name in data?!?')
func = getattr(self, "on_%s" % (name), None)
if not func:
return
func(data)
def on_room_info(self, ri):
self.authed = True
self.ri = ri
def list_terms(terms):
term_name = ""
for term_id, term in terms.items():
owner = str(term['owner'])
term_name = term['term_name']
out('terminal %s created by %s' % (term['term_name'], ri['users'][owner]['username']))
return term_name
if self.options.create:
buf = self._get_pty_size()
term_name = self.term_name
i = 0
term_names = [term['term_name'] for term_id, term in ri['terms'].items()]
while term_name in term_names:
i += 1
term_name = self.term_name + str(i)
self.term_name = term_name
return self.transport('create_term', {'term_name': self.term_name, 'size': [buf[1], buf[0]]})
elif self.options.list:
out('Terminals in %s::%s' % (self.owner, self.workspace))
list_terms(ri['terms'])
return die()
elif not self.term_name:
if len(ri['terms']) == 0:
out('There is no active terminal in this workspace. Do you want to share your terminal? (y/n)')
choice = input().lower()
self.term_name = "_"
if choice == 'y':
self.options.create = True
buf = self._get_pty_size()
return self.transport('create_term', {'term_name': self.term_name, 'size': [buf[1], buf[0]]})
else:
die('If you ever change your mind, you can share your terminal using the --create [super_awesome_name] flag.')
elif len(ri['terms']) == 1:
term_id, term = list(ri['terms'].items())[0]
self.term_id = int(term_id)
self.term_name = term['term_name']
else:
out('More than one active term exists in this workspace.')
example_name = list_terms(ri['terms'])
die('Please pick a workspace like so: flootty %s' % example_name)
else:
for term_id, term in ri['terms'].items():
if term['term_name'] == self.term_name:
self.term_id = int(term_id)
break
if self.term_id is None:
die('No terminal with name %s' % self.term_name)
return self.join_term()
def on_ping(self, data):
self.transport('pong', {})
def on_disconnect(self, data):
reason = data.get('reason')
out('Disconnected by server!')
if reason:
# TODO: don't kill terminal until current process is done or something
die('Reason: %s' % reason)
self.reconnect()
def on_error(self, data):
if self.term_id is None:
die(data.get('msg'))
else:
out('Error from server: %s' % data.get('msg'))
def on_create_term(self, data):
if data.get('term_name') != self.term_name:
return
self.term_id = data.get('id')
self.create_term()
def on_delete_term(self, data):
if data.get('id') != self.term_id:
return
die('User %s killed the terminal. Exiting.' % (data.get('username')))
def on_update_term(self, data):
if data.get('id') != self.term_id:
return
self._set_pty_size()
def on_term_stdin(self, data):
if data.get('id') != self.term_id:
return
if not self.options.create:
return
self.handle_stdio(base64.b64decode(data['data']), data.get('user_id'))
def on_term_stdout(self, data):
if data.get('id') != self.term_id:
return
self.handle_stdio(data['data'])
def reconnect(self):
if self.reconnect_timeout:
return
new_buf_out = collections.deque()
total_len = 0
while True:
try:
item = self.buf_out.popleft()
except IndexError:
break
if item['name'] == 'term_stdout':
total_len += len(item['data'])
if total_len > MAX_BYTES_TO_BUFFER:
continue
new_buf_out.appendleft(item)
self.buf_out = new_buf_out
if self.sock:
self.remove_fd(self.sock.fileno())
try:
self.sock.shutdown(2)
except Exception:
pass
try:
self.sock.close()
except Exception:
pass
self.sock = None
self.authed = False
self.reconnect_delay *= 1.5
if self.reconnect_delay > 10000:
self.reconnect_delay = 10000
self.reconnect_timeout = utils.set_timeout(self.connect_to_internet, self.reconnect_delay)
def send_auth(self):
self.buf_out.appendleft({
'name': 'auth',
'username': self.options.username,
'secret': self.options.secret,
'room': self.workspace,
'room_owner': self.owner,
'client': CLIENT,
'platform': sys.platform,
'version': PROTO_VERSION
})
def connect_to_internet(self):
self.empty_selects = 0
self.reconnect_timeout = None
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.options.use_ssl:
self.cert_fd = tempfile.NamedTemporaryFile()
self.cert_fd.write(cert.CA_CERT.encode('utf-8'))
self.cert_fd.flush()
self.sock = ssl.wrap_socket(self.sock, ca_certs=self.cert_fd.name, cert_reqs=ssl.CERT_REQUIRED)
elif self.port == 3448:
self.port = 3148
out('Connecting to %s' % self.workspace_url())
try:
self.sock.connect((self.host, self.port))
if self.options.use_ssl:
self.sock.do_handshake()
except socket.error as e:
out('Error connecting: %s.' % e)
return self.reconnect()
self.sock.setblocking(0)
out('Connected!')
self.send_auth()
self.add_fd(self.sock, reader=self.cloud_read, writer=self.cloud_write, errer=self.cloud_err, name='net')
self.reconnect_delay = INITIAL_RECONNECT_DELAY
def workspace_url(self):
proto = {True: "https", False: "http"}
proto_str = proto[self.options.use_ssl]
port_str = ''
if self.options.use_ssl:
if self.port != 3448:
port_str = ':%s' % self.port
else:
if self.port != 3148:
port_str = ':%s' % self.port
return '%s://%s%s/%s/%s' % (proto_str, self.host, port_str, self.owner, self.workspace)
def join_term(self):
out('Successfully joined %s' % (self.workspace_url()))
self.orig_stdout_atts = tty.tcgetattr(sys.stdout)
stdout = sys.stdout.fileno()
tty.setraw(stdout)
fl = fcntl.fcntl(stdout, fcntl.F_GETFL)
fcntl.fcntl(stdout, fcntl.F_SETFL, fl | os.O_NONBLOCK)
self.orig_stdin_atts = tty.tcgetattr(sys.stdin)
stdin = sys.stdin.fileno()
tty.setraw(stdin)
fl = fcntl.fcntl(stdin, fcntl.F_GETFL)
fcntl.fcntl(stdin, fcntl.F_SETFL, fl | os.O_NONBLOCK)
def ship_stdin(fd):
data = read(fd)
if data:
self.transport("term_stdin", {'data': base64.b64encode(data).decode('utf8'), 'id': self.term_id})
if 'term_stdin' in self.ri['perms']:
out('You have permission to write to this terminal. Remember: With great power comes great responsibility.')
self.add_fd(stdin, reader=ship_stdin, name='join_term_stdin')
else:
out('You do not have permission to write to this terminal.')
def stdout_write(buf):
write(stdout, base64.b64decode(buf))
self.handle_stdio = stdout_write
self._set_pty_size(self.ri['terms'][str(self.term_id)]['size'])
def create_term(self):
'''
Create a spawned process.
Based on the code for pty.spawn().
'''
if self.master_fd:
# reconnected. don't spawn a new shell
out('Reconnected to %s' % (self.workspace_url()))
return
shell = os.environ['SHELL']
out('Successfully joined %s' % (self.workspace_url()))
self.child_pid, self.master_fd = pty.fork()
if self.child_pid == pty.CHILD:
os.execlpe(shell, shell, '--login', os.environ)
self.orig_stdin_atts = tty.tcgetattr(sys.stdin.fileno())
tty.setraw(pty.STDIN_FILENO)
self.original_wincher = signal.signal(signal.SIGWINCH, self._signal_winch)
self._set_pty_size()
def slave_death(fd):
die('Exiting flootty because child exited.')
self.extra_data = b''
def stdout_write(fd):
'''
Called when there is data to be sent from the child process back to the user.
'''
try:
data = self.extra_data + os.read(fd, FD_READ_BYTES)
except:
data = None
if not data:
return die("Time to go!")
self.transport("term_stdout", {'data': base64.b64encode(data).decode('utf8'), 'id': self.term_id})
write(pty.STDOUT_FILENO, data)
self.add_fd(self.master_fd, reader=stdout_write, errer=slave_death, name='create_term_stdout_write')
def stdin_write(fd):
data = os.read(fd, FD_READ_BYTES)
if data:
write(self.master_fd, data)
now = time.time()
# Only send stdin event if it's been > 2 seconds. This prevents people from figuring out password lengths
if now - self.last_stdin > 2:
self.transport("term_stdin", {'data': ' ', 'id': self.term_id})
self.last_stdin = now
self.add_fd(pty.STDIN_FILENO, reader=stdin_write, name='create_term_stdin_write')
def net_stdin_write(buf, user_id=None):
if self.options.safe:
if buf.find('\n') != -1 or buf.find('\r') != -1:
to = user_id or []
self.transport('datamsg', {
'to': to,
'data': {
'name': 'safe_term',
'term_id': self.term_id,
'msg': 'Terminal %s is in safe mode. Other users are not allowed to press enter.' % self.term_name,
}})
self.transport('term_stdout', {
'id': self.term_id,
'data': base64.b64encode('\a').decode('utf8'),
})
buf = buf.replace('\n', '')
buf = buf.replace('\r', '')
if not buf:
return
write(self.master_fd, buf)
self.handle_stdio = net_stdin_write
color_green = '\\[\\e[32m\\]'
color_reset = '\\[\\033[0m\\]'
color_yellorange = '\\[\\e[93m\\]'
# TODO: other shells probably use weird color escapes
if 'zsh' in shell:
color_green = "%{%F{green}%}"
color_reset = "%{%f%}"
color_yellorange = "%{%F{yellow}%}"
if self.options.set_prompt:
term_color = color_yellorange
if self.options.safe:
term_color = color_green
# Not confusing at all </sarcasm>
cmd = 'PS1="%s%s::%s::%s%s%s%s $PS1"\n' % (color_green, self.owner, self.workspace, color_reset, term_color, self.term_name, color_reset)
write(self.master_fd, cmd)
def _signal_winch(self, signum, frame):
'''
Signal handler for SIGWINCH - window size has changed.
'''
self._set_pty_size()
def _get_pty_size(self):
buf = array.array('h', [0, 0, 0, 0])
fcntl.ioctl(pty.STDOUT_FILENO, termios.TIOCGWINSZ, buf, True)
return buf
def _set_pty_size(self, size=None):
'''
Sets the window size of the child pty based on the window size of our own controlling terminal.
'''
# Get the terminal size of the real terminal, set it on the pseudoterminal.
buf = self._get_pty_size()
if size:
buf[0] = size[1]
buf[1] = size[0]
if self.options.create:
assert self.master_fd is not None
fcntl.ioctl(self.master_fd, termios.TIOCSWINSZ, buf)
if self.term_id:
self.transport('update_term', {'id': self.term_id, 'size': [buf[1], buf[0]]})
else:
# XXXX: this resizes the window :/
if self.options.resize:
os.write(pty.STDOUT_FILENO, "\x1b[8;{rows};{cols}t".format(rows=buf[0], cols=buf[1]))
fcntl.ioctl(pty.STDOUT_FILENO, termios.TIOCSWINSZ, buf)
def cleanup(self):
if self.orig_stdout_atts:
self.orig_stdout_atts[3] = self.orig_stdout_atts[3] | termios.ECHO
tty.tcsetattr(sys.stdout, tty.TCSAFLUSH, self.orig_stdout_atts)
if self.orig_stdin_atts:
self.orig_stdin_atts[3] = self.orig_stdin_atts[3] | termios.ECHO
tty.tcsetattr(sys.stdin, tty.TCSAFLUSH, self.orig_stdin_atts)
if self.original_wincher:
signal.signal(signal.SIGWINCH, self.original_wincher)
try:
self.cert_fd.close()
except Exception:
pass
print('ciao.')
if __name__ == '__main__':
main()
########NEW FILE########
__FILENAME__ = shared
import os
__VERSION__ = ''
__PLUGIN_VERSION__ = ''
# Config settings
USERNAME = ''
SECRET = ''
API_KEY = ''
DEBUG = False
SOCK_DEBUG = False
ALERT_ON_MSG = True
LOG_TO_CONSOLE = False
BASE_DIR = os.path.expanduser(os.path.join('~', 'floobits'))
# Shared globals
DEFAULT_HOST = 'floobits.com'
DEFAULT_PORT = 3448
SECURE = True
SHARE_DIR = None
COLAB_DIR = ''
PROJECT_PATH = ''
JOINED_WORKSPACE = False
PERMS = []
STALKER_MODE = False
AUTO_GENERATED_ACCOUNT = False
PLUGIN_PATH = None
WORKSPACE_WINDOW = None
CHAT_VIEW = None
CHAT_VIEW_PATH = None
TICK_TIME = 100
AGENT = None
IGNORE_MODIFIED_EVENTS = False
VIEW_TO_HASH = {}
FLOORC_PATH = os.path.expanduser(os.path.join('~', '.floorc'))
########NEW FILE########
__FILENAME__ = utils
import re
import time
from collections import defaultdict
try:
from urllib.parse import urlparse
assert urlparse
except ImportError:
from urlparse import urlparse
try:
from . import shared as G
assert G
except (ImportError, ValueError):
import shared as G
top_timeout_id = 0
cancelled_timeouts = set()
timeout_ids = set()
timeouts = defaultdict(list)
def set_timeout(func, timeout, *args, **kwargs):
global top_timeout_id
timeout_id = top_timeout_id
top_timeout_id += 1
if top_timeout_id > 100000:
top_timeout_id = 0
def timeout_func():
timeout_ids.discard(timeout_id)
if timeout_id in cancelled_timeouts:
cancelled_timeouts.remove(timeout_id)
return
func(*args, **kwargs)
then = time.time() + (timeout / 1000.0)
timeouts[then].append(timeout_func)
timeout_ids.add(timeout_id)
return timeout_id
def cancel_timeout(timeout_id):
if timeout_id in timeout_ids:
cancelled_timeouts.add(timeout_id)
def call_timeouts():
now = time.time()
to_remove = []
for t, tos in timeouts.copy().items():
if now >= t:
for timeout in tos:
timeout()
to_remove.append(t)
for k in to_remove:
del timeouts[k]
def parse_url(workspace_url):
secure = G.SECURE
owner = None
workspace_name = None
parsed_url = urlparse(workspace_url)
port = parsed_url.port
if not port:
port = G.DEFAULT_PORT
if parsed_url.scheme == 'http':
if not port:
port = 3148
secure = False
result = re.match('^/([-\@\+\.\w]+)/([-\@\+\.\w]+)/?$', parsed_url.path)
if not result:
result = re.match('^/r/([-\@\+\.\w]+)/([-\@\+\.\w]+)/?$', parsed_url.path)
if result:
(owner, workspace_name) = result.groups()
else:
raise ValueError('%s is not a valid Floobits URL' % workspace_url)
return {
'host': parsed_url.hostname,
'owner': owner,
'port': port,
'workspace': workspace_name,
'secure': secure,
}
########NEW FILE########
__FILENAME__ = version
FLOOTTY_VERSION = '2.1.4'
########NEW FILE########
|
[
"dyangUCI@github.com"
] |
dyangUCI@github.com
|
75470a97bd504c9d4dc72ff45e08c475eb574228
|
921e109a719351ac053f33fccacb55e9b54bbee1
|
/courses/admin.py
|
5002e52cdf17036a60919ea3e27033b02d16451d
|
[
"MIT"
] |
permissive
|
deboraazevedo/udemy-free-courses
|
a49db24b89ca7a10444fbd8551d2800347ee4c25
|
6365321de0ed1c8521e1db96a63d17a06e056e4b
|
refs/heads/master
| 2021-06-25T23:04:45.749555
| 2018-02-27T23:48:26
| 2018-02-27T23:48:26
| 123,203,654
| 1
| 2
|
MIT
| 2020-10-04T09:44:06
| 2018-02-27T23:58:27
|
Python
|
UTF-8
|
Python
| false
| false
| 90
|
py
|
from django.contrib import admin
from .models import Course
admin.site.register(Course)
|
[
"contato.hudsonbrendon@gmail.com"
] |
contato.hudsonbrendon@gmail.com
|
9269612b5c1989fb509e058f4f2198d446452df2
|
63f783b298c90d8c71d49be208172bd201e4cbca
|
/manage.py
|
fb2c235c6d28e1a55533132eed856d3ae6bffdb3
|
[
"MIT"
] |
permissive
|
dbobbgit/recipebox
|
58c41b1d89befc60cf4d5b7834b59d02d2dcf227
|
8e4e5c6f609e2524726954c9382ca37e844721f9
|
refs/heads/main
| 2023-05-26T00:56:38.292455
| 2021-06-09T14:11:46
| 2021-06-09T14:11:46
| 375,378,400
| 0
| 0
|
MIT
| 2021-06-09T14:11:47
| 2021-06-09T14:10:06
|
Python
|
UTF-8
|
Python
| false
| false
| 665
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'recipebox.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"danielle@oakandivymedia.com"
] |
danielle@oakandivymedia.com
|
511fe8b79650e5129209a33e6c7d768af423c6e6
|
2a1f4c4900693c093b2fcf4f84efa60650ef1424
|
/py/dome/backend/apps.py
|
fc8e9e1db58cfc9dbc955eb7df36461f862fe2b5
|
[
"BSD-3-Clause"
] |
permissive
|
bridder/factory
|
b925f494303728fa95017d1ba3ff40ac5cf6a2fd
|
a1b0fccd68987d8cd9c89710adc3c04b868347ec
|
refs/heads/master
| 2023-08-10T18:51:08.988858
| 2021-09-21T03:25:28
| 2021-09-21T03:25:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import unicode_literals
from django.apps import AppConfig
class BackendConfig(AppConfig):
name = 'backend'
|
[
"chrome-bot@chromium.org"
] |
chrome-bot@chromium.org
|
cb5e4cf9e89b4161efe98de831afd47a6290f0db
|
1c8a05b18d1e895a99e92c9d9d48f30296342a76
|
/2. socket通信.py
|
98e28c13d8edce629a2e35f4eaa157199c53bdfa
|
[] |
no_license
|
codingtruman/socket-UDP
|
1190bbd1a5055bf6461d6cb2b499a81e234760f0
|
6b4e9839b14571f6472f6fc7b37514c31f353789
|
refs/heads/master
| 2023-01-04T18:38:37.356220
| 2020-11-04T01:20:21
| 2020-11-04T01:20:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,374
|
py
|
from socket import *
# 创建TCP socket
tcpSocket = socket(AF_INET, SOCK_STREAM)
print('TCP socket created')
# 设置地址,端口和要发送的数据
sendAddr = ('192.168.0.19', 3306)
# 如果此处没写b,下面使用sendData时要变成sendData.encode('utf-8')
sendData = b'ok, tcp'
# 连接服务器和发送
# tcpSocket.connect(sendAddr)
# tcpSocket.send(sendData)
# 关闭socket
tcpSocket.close()
##############################################
# 创建UDP socket
udpSocket = socket(AF_INET, SOCK_DGRAM)
print('UDP socket created')
# 绑定本地信息,如果一个网络程序不稳定,系统就会随机分配。但一般情况作为接收方时需要绑定
# bind()参数是个元祖 #IP一般不用写,表示本机任何一个IP,有几个网卡就几个IP
udpSocket.bind(('', 3304))
# 设置地址,端口和要发送的数据
sendAddr = ('192.168.0.19', 3306)
sendData = 'ok, 尼玛123'
# 在这里就他妈的提前转换好编码
sendData = sendData.encode('utf-8')
# 连接服务器和发送
udpSocket.sendto(sendData, sendAddr)
# 等待接收方发送数据
# 设置一次只接收1024 bytes
recvdata = udpSocket.recvfrom(1024)
# recvData是个元祖
content, host_info = recvdata
# 如果接收的是中文消息,用utf-8解码会报错的!
content = content.decode('gb2312')
print(content, host_info)
# 关闭socket
udpSocket.close()
|
[
"maplewong04@gmail.com"
] |
maplewong04@gmail.com
|
9d0d9428ad332411b938df93cd900f02fefc493a
|
f921b215086d6556885da0c50b68e4de861216ac
|
/src/helpers/operations.py
|
b533530fbdc6a9316593c2f4f7a1a166640e31d8
|
[] |
no_license
|
tonaflcastelan/prueba-ing-backend
|
afee173d31d78139306a38a55d4e98f219281068
|
7c114be5e4c59c0e2b0fd3775ba237ac94e6f4a4
|
refs/heads/master
| 2022-12-03T23:58:36.924155
| 2020-08-04T00:17:54
| 2020-08-04T00:17:54
| 284,596,408
| 0
| 0
| null | 2020-08-03T03:42:34
| 2020-08-03T03:42:33
| null |
UTF-8
|
Python
| false
| false
| 241
|
py
|
def get_average(value1, value2) -> float:
"""
Get average
"""
return (value1 + value2) / 2
def get_percent(dividend, divider) -> float:
"""
Get percent
"""
return round((float(dividend / divider)), 2) * 100
|
[
"tona.castelan16@gmail.com"
] |
tona.castelan16@gmail.com
|
091a0e98da2ac80625b44ff0b4fd86db82597c27
|
0e50b69d115d90eccec55b7049e5d8f5ee438ba3
|
/brabbl/accounts/migrations/0013_user_image.py
|
6b2bb515e6d0afc8c80631c7a3cc8641dcf7ed27
|
[] |
no_license
|
strader07/brabbl-backend-django
|
bd7a5d3e328e3ff975e450e9fd8de933b2a3992c
|
795113ee7e1c3d7ed3b093f8c9435777a29bfd10
|
refs/heads/master
| 2023-05-15T05:31:51.422840
| 2021-06-04T16:12:23
| 2021-06-04T16:12:23
| 373,679,407
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0012_auto_20160308_1500'),
]
operations = [
migrations.AddField(
model_name='user',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='images/profiles/', verbose_name='Profilbild'),
),
]
|
[
"v7sion@topdogtek.com"
] |
v7sion@topdogtek.com
|
c55382d209d32058d1850a42aceb160d7068f6a1
|
9a65cdc78a860ecc176985860f2d02a841b72ef2
|
/editors/vinegar.py
|
bef3383bb94eb863957f44686bc9bb7a51fb3207
|
[] |
no_license
|
akanevs/dsp
|
4da0354c78b8c0b33bce94af471d3d65ef3e7c0c
|
7e97f2fbfdfd0a6647affcd74303ee8e4a30bde6
|
refs/heads/master
| 2021-01-22T18:15:30.340275
| 2017-09-18T02:26:01
| 2017-09-18T02:26:01
| 100,755,288
| 0
| 0
| null | 2017-08-18T22:42:07
| 2017-08-18T22:42:07
| null |
UTF-8
|
Python
| false
| false
| 123
|
py
|
print('This is not my first or even second file created using vim')
print('This was was created by Alex in Millburn, NJ!')
|
[
"noreply@github.com"
] |
noreply@github.com
|
719fe210dc977586a94742653b7a84094eb6aa0d
|
68ff38f2dd0188909737b395ac227ec0e205d024
|
/Project-2/src/UtilsModule.py
|
d2d03210a398be119b8953bc2db5d6cfbc88b347
|
[] |
no_license
|
kushagraThapar/Data-Mining
|
7c3992c5efe820185e5197c8098168ae92e68cf9
|
658f3fdc8c4ea91d717cb36d5504d5e1fb803960
|
refs/heads/master
| 2021-01-18T15:40:23.412283
| 2017-01-29T08:49:04
| 2017-01-29T08:49:04
| 68,418,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,160
|
py
|
import sys
def write_file(filename, text):
try:
f = open(filename, "w")
f.write(text)
f.close()
except IOError:
print("IOError occurred in file [" + filename + "]")
exit_program()
return
def read_file(filename):
try:
f = open(filename, "rU")
text = f.read()
f.close()
return text
except FileNotFoundError:
print("File not found with name [" + filename + "]")
except IOError:
print("IOError occurred in file [" + filename + "]")
exit_program()
def process_tweets(tweet_data):
if tweet_data is None or tweet_data.strip() is "":
print("Tweet Data is Empty")
exit_program()
for single_row in tweet_data.split("\n"):
single_row = single_row.strip()
single_row_array = single_row.split(",", 3)
if len(single_row_array) >= 3:
last_index = single_row_array[3].rfind("\"")
tweet = single_row_array[3][0:last_index + 1]
tweet_class = single_row_array[3][last_index + 2:last_index + 3]
def exit_program():
print("Program will exit now... ")
sys.exit(1)
|
[
"kthapa2@uic.edu"
] |
kthapa2@uic.edu
|
52b11a09076f3904dc2f45e1e998edf62a885d87
|
aae0432eede626a0ac39ff6d81234e82f8d678c2
|
/leetcode/algorithm/4.median-of-two-sorted-arrays.py
|
63670a63bf49ee10613895df33ff3b9ae3388fc8
|
[] |
no_license
|
KIDJourney/algorithm
|
81c00186a6dfdc278df513d25fad75c78eb1bf68
|
e1cf8e12050b9f1419a734ff93f9c626fc10bfe0
|
refs/heads/master
| 2022-11-24T09:30:16.692316
| 2022-11-06T09:33:51
| 2022-11-06T09:33:51
| 40,428,125
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,035
|
py
|
#
# @lc app=leetcode id=4 lang=python3
#
# [4] Median of Two Sorted Arrays
#
# @lc code=start
class Solution:
def findMedianSortedArrays(self, nums1, nums2) -> float:
return self.easy(nums1, nums2)
def easy(self, nums1, nums2):
result = []
idx1, idx2 = 0, 0
while True:
if idx1 == len(nums1) and idx2 == (len(nums2)):
break
if idx1 == len(nums1):
result.append(nums2[idx2])
idx2 += 1
continue
if idx2 == len(nums2):
result.append(nums1[idx1])
idx1 += 1
continue
if nums1[idx1] > nums2[idx2]:
result.append(nums2[idx2])
idx2 += 1
else:
result.append(nums1[idx1])
idx1 += 1
mid = len(result) // 2
if len(result) % 2 == 0:
return (result[mid] + result[mid-1]) / 2.0
else:
return (result[mid])
# @lc code=end
|
[
"kingdeadfish@qq.com"
] |
kingdeadfish@qq.com
|
9532029f8f9affb83b76eb28c4c4b4ce1d9c037f
|
bf3d802cf7b75ab84fc0bfae125d9b0cc1aed1e4
|
/API/manager/salt.py
|
c39acfedaf0ef0d82652fed0385c66ddd4be0276
|
[] |
no_license
|
AlesKas/BAK
|
cf93c6b2a7475139bbfc8833e811264efcf0d8ff
|
56f4ac00a523c4ac02da0b7e33590d0b3dd2b190
|
refs/heads/master
| 2023-04-21T10:46:44.359788
| 2021-05-12T14:17:40
| 2021-05-12T14:17:40
| 197,364,568
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
import json
from utils.logger import initLogging
from .base import GetRequest, BaseException
from utils.db.model import NtwSalt
from peewee import DoesNotExist
LOGGER = initLogging()
class Salt(GetRequest):
@classmethod
def handle_get(cls, **kwargs):
salt = NtwSalt.get().salt
response = {}
response['salt'] = salt
return response
|
[
"akaspare@redhat.com"
] |
akaspare@redhat.com
|
82f5cd9bfe71e1f9c34be4f1d8c1789d269f969b
|
61267e7bb146e67d7ce5b81ef8c6fb32cdb1088e
|
/apps/forums/migrations/0002_auto_20190408_2007.py
|
b928de461230382f7b0e27ca992d15780a476ae9
|
[] |
no_license
|
xiaoyaochen/ACshare
|
8f7e294724d90925f9fb80799c9fbd3680c01057
|
482985231e0e6d8632c8504a30f994ba246a060a
|
refs/heads/master
| 2020-05-07T11:57:43.663344
| 2019-04-20T14:55:55
| 2019-04-20T14:55:55
| 180,483,088
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 356
|
py
|
# Generated by Django 2.0 on 2019-04-08 20:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('forums', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='forumscomment',
old_name='reviewer',
new_name='user',
),
]
|
[
"1595029296@qq.com"
] |
1595029296@qq.com
|
e56f0bd33da3d74267fd6ab2971ead15aa9263b8
|
1c488f486d14c19e19af1a46474af224498be193
|
/experimental/serengeti/blankIBCC.py
|
649a35a733279dc7605d90eb8296b4e245101794
|
[
"Apache-2.0"
] |
permissive
|
JiaminXuan/aggregation
|
fc2117494372428adeed85a9a413e2ff47244664
|
9a7ecbc2d4b143a73e48b1826b3727b6976fa770
|
refs/heads/master
| 2020-12-11T01:49:42.977664
| 2015-05-22T16:21:15
| 2015-05-22T16:21:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,087
|
py
|
#!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import matplotlib.pyplot as plt
import csv
import sys
import os
import pymongo
import matplotlib.cbook as cbook
sys.path.append("/home/greg/github/pyIBCC/python")
import ibcc
client = pymongo.MongoClient()
db = client['serengeti_2014-07-28']
collection = db["serengeti_classifications"]
collection2 = db["serengeti_subjects"]
subjects = []
users = []
classifications = []
class_count = {}
blank_count = {}
retiredBlanks = {}
with open("/home/greg/Databases/serengeti_ibcc.py","wb") as f:
f.write("import numpy as np\n")
f.write("scores = np.array([0,1])\n")
f.write("nScores = len(scores)\n")
f.write("nClasses = 2\n")
f.write("inputFile = \"/home/greg/Databases/serengeti_ibcc.csv\"\n")
f.write("outputFile = \"/home/greg/Databases/serengeti_ibcc.out\"\n")
f.write("confMatFile = \"/home/greg/Databases/serengeti_ibcc.mat\"\n")
f.write("nu0 = np.array([30,70])\n")
f.write("alpha0 = np.array([[3, 1], [1,3]])\n")
with open("/home/greg/Databases/serengeti_ibcc.csv","wb") as f:
f.write("a,b,c\n")
import datetime
def update(individual_classifications):
#start by removing all temp files
try:
os.remove("/home/greg/Databases/serengeti_ibcc.out")
except OSError:
pass
try:
os.remove("/home/greg/Databases/serengeti_ibcc.mat")
except OSError:
pass
try:
os.remove("/home/greg/Databases/serengeti_ibcc.csv.dat")
except OSError:
pass
with open("/home/greg/Databases/serengeti_ibcc.csv","a") as f:
for u, s, b in individual_classifications:
f.write(str(u)+","+str(s)+","+str(b)+"\n")
print datetime.datetime.time(datetime.datetime.now())
ibcc.runIbcc("/home/greg/Databases/serengeti_ibcc.py")
print datetime.datetime.time(datetime.datetime.now())
def analyze():
with open("/home/greg/Databases/serengeti_ibcc.out","rb") as f:
reader = csv.reader(f,delimiter=" ")
for subject_index,p0,p1 in reader:
subject_index = int(float(subject_index))
subject_id = subjects[subject_index]
c = class_count[subject_id]
if (float(p1) >= 0.995) and (c>= 2):
if not(subject_id in retiredBlanks):
retiredBlanks[subject_id] = c
#print str(c) + " :: " + str(p1)
i = 0
unknownUsers = []
for r in collection.find({"tutorial": {"$ne": True}}):
try:
user_name = r["user_name"]
except KeyError:
unknownUsers.append(r["user_ip"])
continue
zooniverse_id = r["subjects"][0]["zooniverse_id"]
if zooniverse_id in retiredBlanks:
continue
if ((i%10000) == 0) and (i > 0):
print i
update(classifications)
classifications = []
analyze()
if not(user_name in users):
users.append(user_name)
if not(zooniverse_id in subjects):
subjects.append(zooniverse_id)
class_count[zooniverse_id] = 0
blank_count[zooniverse_id] = 0
i += 1
user_index = users.index(user_name)
subject_index = subjects.index(zooniverse_id)
class_count[zooniverse_id] += 1
a = r["annotations"]
if not("nothing" in a[-1]):
assert('species' in a[0])
blank = 0
else:
blank = 1
blank_count[zooniverse_id] += 1
classifications.append((user_index,subject_index,blank))
if i >= 300000:
break
#print len(unknownUsers)
#print len(list(set(unknownUsers)))
tBlank = 0
fBlank = 0
speciesList = ['blank','elephant','zebra','warthog','impala','buffalo','wildebeest','gazelleThomsons','dikDik','giraffe','gazelleGrants','lionFemale','baboon','hippopotamus','ostrich','human','otherBird','hartebeest','secretaryBird','hyenaSpotted','mongoose','reedbuck','topi','guineaFowl','eland','aardvark','lionMale','porcupine','koriBustard','bushbuck','hyenaStriped','jackal','cheetah','waterbuck','leopard','reptiles','serval','aardwolf','vervetMonkey','rodents','honeyBadger','batEaredFox','rhinoceros','civet','genet','zorilla','hare','caracal','wildcat']
errors = {s.lower():0 for s in speciesList}
for zooniverse_id in retiredBlanks:
r = collection2.find_one({"zooniverse_id" : zooniverse_id})
retire_reason = r["metadata"]["retire_reason"]
if retire_reason in ["blank", "blank_consensus"]:
tBlank += 1
else:
fBlank += 1
print zooniverse_id + " :: " + str(r["location"]["standard"][0])
f = max(r["metadata"]["counters"].items(), key = lambda x:x[1])
print f
try:
errors[f[0].lower()] += 1
print str(blank_count[zooniverse_id]) + "/" + str(class_count[zooniverse_id])
except KeyError:
print "---***"
#print str(r["metadata"]["counters"].values())
print "==---"
print tBlank
print fBlank
print np.mean(retiredBlanks.values())
print np.median(retiredBlanks.values())
print "===---"
for s in speciesList:
if errors[s.lower()] != 0:
print s + " - " + str(errors[s.lower()])
|
[
"greg@zooniverse.org"
] |
greg@zooniverse.org
|
b217f0ad3fe6dbfaa9f4171e6b8a876290441a13
|
b80ab06996845b63d78158e9713e4f1ad7229ee7
|
/main.py
|
e14d58e06f9e5dcfadc2df6bf0025c32e3c3c5c5
|
[] |
no_license
|
Pepega123/tracker
|
1bd96244a1d65f06292e20a3a81e07c86d70ba65
|
ad639d19cd1657d06e04afb32e91979603afd88f
|
refs/heads/master
| 2022-09-10T22:16:36.226686
| 2020-06-05T12:04:30
| 2020-06-05T12:04:30
| 269,622,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,171
|
py
|
import datetime
import hashlib
import logging
import os
import psutil
import psycopg2
import time
import atexit
from db import *
logger = logging.getLogger('tracker')
logger.setLevel(logging.DEBUG)
TOTAL_MINUTES = 0
NEW_DAY = False
START_TIME = 0
CURRENT_DATE = datetime.date.today()
CONN = psycopg2.connect(**config())
CUR = CONN.cursor()
def get_hash(filename):
# make a hash object
h = hashlib.sha1()
# open file for reading in binary mode
with open(filename,'rb') as file:
# loop till the end of the file
chunk = 0
while chunk != b'':
# read only 1024 bytes at a time
chunk = file.read(1024)
h.update(chunk)
# return the hex representation of digest
return h.hexdigest()
def find_proc_by_hash(hash="fdd2fe36c6e859779f8e5e65155acf55d625f7f1"):
#find by hash
for p in psutil.process_iter():
try:
#reduce computational intensity, remove for more thorough search
if("C:\\Program Files (x86)\\" in p.exe()):
if get_hash(p.exe()) == hash:
return True
except (psutil.AccessDenied, FileNotFoundError):
pass
return False
def calc_time_diff(start_time, end_time):
return int(time.mktime(end_time.timetuple()) - time.mktime(start_time.timetuple())) / 60
def execute_stmt(date, minutes, cur):
global NEW_DAY
#print(NEW_DAY)
#new day, create new entry:
if(NEW_DAY):
stmnt = "INSERT INTO times(date, minutes) VALUES (\'" + str(date) + "\', " + str(minutes) + ");"
NEW_DAY = False
#same day, update entry:
else:
stmnt = "UPDATE times SET minutes = " + str(minutes) + " WHERE date = \'" + str(date) + "\';"
logger.debug(stmnt)
cur.execute(stmnt)
def init():
fh = logging.FileHandler('log/spam.log')
fh.setLevel(logging.DEBUG)
info = logging.FileHandler('log/info.log')
info.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(info)
logger.debug("-------------------------------")
logger.debug("STARTING APPLICATION")
def main():
logger.info("Initializing tracker")
init()
stmnt = ""
global TOTAL_MINUTES
global NEW_DAY
global START_TIME
global CURRENT_DATE
UPDATE_INTERVAL = 10
INITIAL_INTERVAL = 10
CURRENT_DATE = datetime.date.today()
params = config()
conn = psycopg2.connect(**params)
cur = conn.cursor()
#proc_closed = True
#queried_after_close = False
#print(get_hash("C:\\Program Files (x86)\\World of Warcraft\\_classic_\\abc.exe"))
# get date from previous run
stmnt = "SELECT date FROM last_run;"
cur.execute(stmnt)
last_run_date = cur.fetchall()[0][0]
logger.debug(stmnt)
logger.debug(str(last_run_date))
if str(last_run_date) != str(CURRENT_DATE):
NEW_DAY = True
#same day, start counting from already counted hours
else:
stmnt = "SELECT minutes FROM times WHERE date = \'" + str(CURRENT_DATE) + "\';"
cur.execute("SELECT minutes FROM times WHERE date = \'" + str(CURRENT_DATE) + "\';")
TOTAL_MINUTES += cur.fetchall()[0][0]
logger.debug(stmnt)
logger.debug(str(TOTAL_MINUTES))
#TODO: maybe close connection to DB here and reopen at end?
#outer while, wait for application to be opened
while(True):
time.sleep(INITIAL_INTERVAL)
process_running = "WowClassic.exe" in (p.name() for p in psutil.process_iter()) or find_proc_by_hash()
#process found
if(process_running):
logger.info("Process started!")
START_TIME = datetime.datetime.now()
#inner while, count time app is open
while("WowClassic.exe" in (p.name() for p in psutil.process_iter()) or find_proc_by_hash()):
logger.info("WoW running still...") #TODO: ADD LOGGING STATEMENTS
time.sleep(UPDATE_INTERVAL) #wait 10 seconds between checks
end_time = datetime.datetime.now()
#calculate amount of time application was open
elapsed_minutes = calc_time_diff(START_TIME, end_time)
TOTAL_MINUTES = TOTAL_MINUTES + elapsed_minutes
#wait for process to start
else:
continue
logger.info("Total time active this session: " + str(elapsed_minutes))
logger.info("Total time active today: " + str(TOTAL_MINUTES))
#update last_run
cur.execute("UPDATE last_run SET date = \'" + str(CURRENT_DATE) + "\';")
#update/insert into times
execute_stmt(CURRENT_DATE, TOTAL_MINUTES, cur)
#update times
#new day, create new entry:
# if(NEW_DAY):
# stmnt = "INSERT INTO times(date, minutes) VALUES (\'" + str(CURRENT_DATE) + "\', " + str(TOTAL_MINUTES) + ");"
# logger.debug(stmnt)
# cur.execute(stmnt)
# #process has been closed, if it is opened again later it is (most likely) on the same day
# NEW_DAY = False
# #same day, update entry:
# else:
# stmnt = "UPDATE times SET minutes = " + str(TOTAL_MINUTES) + " WHERE date = \'" + str(CURRENT_DATE) + "\';"
# logger.debug(stmnt)
# cur.execute("UPDATE times SET minutes = " + str(TOTAL_MINUTES) + " WHERE date = \'" + str(CURRENT_DATE) + "\';")
# cur.execute(stmnt)
conn.commit()
#start counting fresh next time process is opened
#TOTAL_MINUTES = 0
#never reached, maybe close/open before each update instead?
#conn.close()
#cleanup, log times before exiting
atexit.register(execute_stmt, CURRENT_DATE, TOTAL_MINUTES, CUR)
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
2baf8dc9a7b2bfb654cb56694bc8420624a2c939
|
7cb5a8a08d21f2e4ae2b5b0de3cd53b1074f772c
|
/aws_django/webproj/homepage/views.py
|
67a0fd3691812ad271e7c1d06af89b09fdcee0ac
|
[] |
no_license
|
KDT-Hot6/Coogle
|
29d8778f8510dd14cc10dd7de8f11aab9cfe1e46
|
9dd742a022f209215df650f016f06bb2cbb0d3b9
|
refs/heads/main
| 2023-05-04T23:51:09.554583
| 2021-05-30T07:15:03
| 2021-05-30T07:15:03
| 353,909,037
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,005
|
py
|
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from elasticsearch import Elasticsearch
from sentence_transformers import SentenceTransformer, util
import numpy as np
import json
import re
# Create your views here.
######################################## return html page ##########################################
# def main(request):
# return render(request, 'main.html', {})
# 식당 리스트를 보여주는 페이지
def getResListpage(request):
print(request.GET)
search = request.GET.get('q', '')
if 'search_key' in request.GET:
search = request.GET.get('search_key')
info = getInfo(search)
res_list = groupingBoard((jsonPaser(info)))
content = {'res_list': res_list,}
return render(request, 'res_list_page.html', content)
# 메인 검색 페이지
def getSearchPage(request):
return render(request, 'coogle_search.html')
#######################################################################
########################## elastic search #############################
#######################################################################
model_path = '/home/ubuntu/hot6/bh/KoSentenceBERT_SKTBERT/output/training_stsbenchmark_skt_kobert_model_-2021-03-28_05-25-43_best'
embedder = SentenceTransformer(model_path)
client = Elasticsearch()
res_size = 18 #120
def getInfo(search):
query = search
vectors = embedder.encode(query, convert_to_tensor=True)
query_vector = [vector.tolist() for vector in vectors]
script_query = {
"script_score": {
"query": {"match_all": {}},
"script": {
"source": "cosineSimilarity(params.query_vector, doc['comment_vector']) + 1.0",
"params": {"query_vector": query_vector}
}
}
}
response = client.search(
index='revd',
body={
"size": res_size,
"query": script_query
# "_source": {"includes": ["res_id", "res_name", "comment", "adress", "keywords"]}
}
)
# data_list = response['hits']
return response
################################## funcs to preprocess restaurant infomation #######################
###### elastic search에서 받아온 json 정보를 html에서 보여줄 수 있도록 파싱
def jsonPaser(info):
# res_info_key = info['hits']['hits'][0]['_source'].keys()
res_list = {}
number = 0
for i in range(len(info['hits']['hits'])):
# 'res_id', 'res_name', 'adress', 'comment', 'keywords'
number += 1
res_comments = []
res_number = number
res_id = info['hits']['hits'][i]['_source']['res_id']
res_name = info['hits']['hits'][i]['_source']['res_name']
res_addr = info['hits']['hits'][i]['_source']['adress']
res_comment = info['hits']['hits'][i]['_source']['comment']
res_keywords = info['hits']['hits'][i]['_source']['keywords']
#식당 주소 전처리 ('\n' -> '<br>')
res_addr = preprocessAddr(res_addr)
# 리뷰 데이터가 1개만 있어서 type이 string일 때, type을 리스트로 만들어준다.
# (만약, 같은 식당이 나오면 리스트들끼리 병합하기 위해)
if type(res_comment) != type([]):
res_comments.append(res_comment)
else:
res_comments = res_comment
# res_id를 기준으로 같은 식당이 나오는지 검사
if res_list.get(res_id) == None:
res_comments.sort(reverse=True)
res_info = {'res_name':res_name, 'res_addr':res_addr,
'res_comment':res_comments, 'res_keywords':res_keywords,
'res_number': res_number,
}
res_list[res_id] = res_info
else:
comments = res_list[res_id]['res_comment'] + res_comments
comments.sort(reverse=True)
res_list[res_id]['res_comment'] = comments
number -= 1
return res_list
###### 식당들을 6개씩 그룹핑
def groupingBoard(info):
res_list = []
group = []
count = 0
for v in info.values():
if count == 0:
group = []
group.append(v)
count += 1
if count == 6:
res_list.append(group)
count = 0
return res_list
def groupingPage(info):
res_list = []
group = []
count = 0
for res_board in info:
if count == 0:
group = []
group.append(res_board)
count += 1
if count == 2:
res_list.append(group)
count = 0
return res_list
def preprocessAddr(addr):
addr2 = re.sub('\n', '<br> ', addr)
return addr2
|
[
"kbh122369@gmail.com"
] |
kbh122369@gmail.com
|
b68ab3cba51f473017a3cad5f4f2bf14b108ee1f
|
0aa0f63e54368583f0aa9d5df6def2a2abffc029
|
/13-Intro-to-Trees-master/pitchscrape/reviews/settings.py
|
f209def26a7ae16bd9761691042759fd13303b87
|
[] |
no_license
|
abendm/Pandas-Stuff
|
d9c13e3cd2ff5f0a210aed83fed3cc0531b590b9
|
f623d42100e53602ece47f079cb6b80288fbef55
|
refs/heads/master
| 2020-03-24T04:32:05.350498
| 2018-09-21T17:25:07
| 2018-09-21T17:25:07
| 142,456,361
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,083
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for reviews project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'reviews'
SPIDER_MODULES = ['reviews.spiders']
NEWSPIDER_MODULE = 'reviews.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'reviews (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'reviews.middlewares.ReviewsSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'reviews.middlewares.ReviewsDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'reviews.pipelines.ReviewsPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"abendm@gmail.com"
] |
abendm@gmail.com
|
de68899e48dd15c7bcd79c0be0f42079daac4b5c
|
05b9797a76bf0ee17f3d6ef812e5dd2a186a17fa
|
/jogo/models/conta.py
|
ab32d99b26083355e15848f3edf07d53d93b7565
|
[] |
no_license
|
renatoaloi/desafio-jogo
|
e9b99d39ad46bfeca006297d492615782e008179
|
c2bb464f17132df2ece18e99d5bb35a767dfaa53
|
refs/heads/main
| 2023-03-06T20:57:29.912528
| 2021-02-22T15:34:53
| 2021-02-22T15:34:53
| 341,075,416
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
from sqlalchemy import Column, Integer, String, Float, ForeignKey
from jogo.dao.database import Base
class Conta(Base):
__tablename__ = 'conta'
id = Column(Integer, primary_key=True)
jogo_id = Column(Integer, ForeignKey('jogo.id'), nullable=False)
jogador_id = Column(Integer, ForeignKey('jogador.id'), nullable=True)
saldo = Column(Float, nullable=False)
def __init__(self, jogo_id, jogador_id, saldo):
self.jogo_id = jogo_id
self.jogador_id = jogador_id
self.saldo = saldo
def __repr__(self):
return '<Conta %r>' % self.saldo
|
[
"renato.aloi@gmail.com"
] |
renato.aloi@gmail.com
|
395c2604b50278ae902f02181ae78d04c84bb3d9
|
23acc2a4c8ad278d998cde78eb2340bc138844de
|
/alibaba_scrawl/listen.py
|
ec93c236eb4a877cbf9570a6e3b9a5f763bef6ed
|
[] |
no_license
|
jorliang/pthion_basic_learning
|
f915b137abd9116819cd0692d5d859285d68616c
|
425e01b322324947c93cfc9c345cfc2bafb492fe
|
refs/heads/master
| 2022-12-12T18:29:13.250193
| 2022-12-03T01:42:51
| 2022-12-03T01:42:51
| 175,117,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
import winsound
import win32com.client
import time
import speech
# speak_out = win32com.client.Dispatch('SAPI.SPVOICE')
# speak_out = win32com.client.Dispatch('Word.Application')
# def speak(str):
# print(str)
# speak_out.Speak(str)
# winsound.PlaySound(str,winsound.SND_ASYNC)
#
#
# ak='簡単'
# time.sleep(1)
# speak(ak)
speech.say('でわ')
|
[
"liangjiao@imagingdynamics.com"
] |
liangjiao@imagingdynamics.com
|
cd2694476723a2181a5d941f4cd30cf0eec0d1b6
|
035df9fcfd3737dbb18c804c7c396b5f94bed435
|
/WatershedFermeture/watershed/Watershed - femeture.py
|
1ae445772cdcc995e3d744876151f5ea11d20d4b
|
[] |
no_license
|
ASTAugustin/IMA_P1_Projet
|
01d93759eaa8c180ec809de7e6359b9e3249061d
|
d48bc5b3d3f12acde0a1f2cee0ff6b1dcb4f197c
|
refs/heads/master
| 2020-07-30T09:11:55.913404
| 2019-11-05T10:08:38
| 2019-11-05T10:08:38
| 210,168,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,475
|
py
|
import numpy as np
from collections import deque
import cv2
# Implementation of:
# Pierre Soille, Luc M. Vincent, "Determining watersheds in digital pictures via
# flooding simulations", Proc. SPIE 1360, Visual Communications and Image Processing
# '90: Fifth in a Series, (1 September 1990); doi: 10.1117/12.24211;
# http://dx.doi.org/10.1117/12.24211
class Watershed(object):
MASK = -2
WSHD = 0
INIT = -1
INQE = -3
def __init__(self, levels=256):
self.levels = levels
# Neighbour (coordinates of) pixels, including the given pixel.
def _get_neighbors(self, height, width, pixel):
return np.mgrid[
max(0, pixel[0] - 1):min(height, pixel[0] + 2),
max(0, pixel[1] - 1):min(width, pixel[1] + 2)
].reshape(2, -1).T
'''使用示例:
Ex.返回包括自己的邻居
>>> np.mgrid[0:3,0:3].reshape(2,-1).T
array([[0, 0],
[0, 1],
[0, 2],
[1, 0],
[1, 1],
[1, 2],
[2, 0],
[2, 1],
[2, 2]])
'''
def apply(self, image):
current_label = 0
flag = False
fifo = deque() # 堆栈
height, width = image.shape
total = height * width
labels = np.full((height, width), self.INIT, np.int32) ## self 表示 this #形成一个元素为-1的矩阵
reshaped_image = image.reshape(total) ## grey level of one degree
# [y, x] pairs of pixel coordinates of the flattened image.
pixels = np.mgrid[0:height, 0:width].reshape(2, -1).T
# Coordinates of neighbour pixels for each pixel.
neighbours = np.array([self._get_neighbors(height, width, p) for p in pixels])
## Not Clear
if len(neighbours.shape) == 3:
# Case where all pixels have the same number of neighbours.
neighbours = neighbours.reshape(height, width, -1, 2)
else:
# Case where pixels may have a different number of pixels.
neighbours = neighbours.reshape(height, width)
indices = np.argsort(reshaped_image) ## sort of index from small value to big value
sorted_image = reshaped_image[indices] ## At hte beginning of this array locate the ligntest pixel
sorted_pixels = pixels[indices] ## At hte beginning of this array locate the index of the ligntest pixel
# self.levels evenly spaced steps from minimum to maximum.
levels = np.linspace(sorted_image[0], sorted_image[-1], self.levels) ## return an array from the min to the max containing levels numbers.
level_indices = []
current_level = 0
# Get the indices that deleimit pixels with different values.
for i in range(total):
if sorted_image[i] > levels[current_level]: ## higher than sea level
# Skip levels until the next highest one is reached.
while sorted_image[i] > levels[current_level]: current_level += 1
level_indices.append(i) ## a ladder array
level_indices.append(total)
start_index = 0
for stop_index in level_indices:
# Mask all pixels at the current level.
for p in sorted_pixels[start_index:stop_index]:
labels[p[0], p[1]] = self.MASK
# Initialize queue with neighbours of existing basins at the current level.
for q in neighbours[p[0], p[1]]:
# p == q is ignored here because labels[p] < WSHD
if labels[q[0], q[1]] >= self.WSHD:
labels[p[0], p[1]] = self.INQE
fifo.append(p)
break
# Extend basins.
while fifo:
p = fifo.popleft()
# Label p by inspecting neighbours.
for q in neighbours[p[0], p[1]]:
# Don't set lab_p in the outer loop because it may change.
lab_p = labels[p[0], p[1]]
lab_q = labels[q[0], q[1]]
if lab_q > 0:
if lab_p == self.INQE or (lab_p == self.WSHD and flag):
labels[p[0], p[1]] = lab_q
elif lab_p > 0 and lab_p != lab_q:
labels[p[0], p[1]] = self.WSHD
flag = False
elif lab_q == self.WSHD:
if lab_p == self.INQE:
labels[p[0], p[1]] = self.WSHD
flag = True
elif lab_q == self.MASK:
labels[q[0], q[1]] = self.INQE
fifo.append(q)
# Detect and process new minima at the current level.
for p in sorted_pixels[start_index:stop_index]:
# p is inside a new minimum. Create a new label.
if labels[p[0], p[1]] == self.MASK:
current_label += 1
fifo.append(p)
labels[p[0], p[1]] = current_label
while fifo:
q = fifo.popleft()
for r in neighbours[q[0], q[1]]:
if labels[r[0], r[1]] == self.MASK:
fifo.append(r)
labels[r[0], r[1]] = current_label
start_index = stop_index
return labels
if __name__ == "__main__":
import numpy as np
np.set_printoptions(threshold=np.inf)
##from Watershed import Watershed
from PIL import Image
import matplotlib.pyplot as plt
import cv2
w = Watershed()
image = np.array(cv2.imread('Ex1.PNG', 0))
print(image)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))
## Here, (2,2) means the two diameters of the ellipse.
binary = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel)
print(binary)
plt.imshow(binary)
plt.show()
##gray = cv2.cvtColor('Ex1.PNG', cv2.COLOR_RGB2GRAY)
##ret, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
##kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
##binary = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel)
##print("xxxxxxxx")
##/labels = w.apply(binary)
#label = w.apply(image)
#print(labels)
##plt.imshow(labels, cmap='Paired', interpolation='nearest')
##/plt.imshow(labels)
##/plt.show()
|
[
"celestine_jinyi@163.com"
] |
celestine_jinyi@163.com
|
dd86851e1b6b44d25f0a1e670ee136136ddee155
|
71f894d99a5e3118bc1d53953b22eb1f3097a679
|
/LkCRM/LkCRM/wsgi.py
|
31588500062e9ed1070e86b2092bad28ab2f7a5d
|
[] |
no_license
|
kevinliu007/CRM
|
081db5c942a97952b82598e7046747d820abe12f
|
84ce7d41dddac428cc41367f4888ada6bc972d55
|
refs/heads/master
| 2020-09-14T07:35:01.883726
| 2019-07-11T15:06:46
| 2019-07-11T15:06:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
"""
WSGI config for LkCRM project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'LkCRM.settings')
application = get_wsgi_application()
|
[
"caixiaobai@caixiaobai.com"
] |
caixiaobai@caixiaobai.com
|
4f21bdabf36e65773d6c9289dad471ce6aa16e31
|
178ae62be7de20a50f96361e80bdcff5a5493ae2
|
/koica/templatetags/koica.py
|
36b3a706fcb6f684e4f9896f13b5cc8b25353d75
|
[
"MIT"
] |
permissive
|
synw/django-koica
|
a043800c15fad69f2024557e62fcf0ac4808ffae
|
d8b1c9fa70c428f0aa0db0c523524e9d2ef27377
|
refs/heads/master
| 2021-01-10T03:15:24.570691
| 2015-12-09T14:55:29
| 2015-12-09T14:55:29
| 46,188,691
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 200
|
py
|
from django import template
from koica.utils import sanitize_html
register = template.Library()
@register.filter(is_safe=True)
def remove_pre(value):
return sanitize_html(value, remove_pre=True)
|
[
"synwe@yahoo.fr"
] |
synwe@yahoo.fr
|
9202483cde896d82ddfe2dbd4a5a205224bd657e
|
8c8eaf2a82d74d9652fbe23e23b0f5856a947e7e
|
/tokens/models.py
|
6fc0c00181648dcf65eb4b1f0060c3ce5fc4f7a9
|
[] |
no_license
|
CallistoNetwork/Galileo-Backend
|
2cb0c1dbe20f43a56cab566ee77338cc68f8fda8
|
beec34cc9e480b49b6efbe0bd1cd19ddcfcb8340
|
refs/heads/master
| 2020-04-03T02:29:00.910607
| 2019-01-27T21:12:07
| 2019-01-27T21:12:07
| 151,890,661
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,584
|
py
|
from django.db import models
from explorer.models import TimeStampModel
class Token(TimeStampModel):
"""
* name -> Name of token | String | optional
* symbol -> Trading symbol | String | optional
* total_supply -> The total supply if the token | Decimal | optional
* decimals -> Number of decimal place the token can be subdivided to | Int
| optional
* type -> type of Token | String | required
* cataloged -> if token information has been cataloged | Boolean | optional
* contract_address_hash -> Foreign key to address | ForeignKey | required
"""
name = models.CharField(
max_length=255,
blank=True
)
symbol = models.CharField(
max_length=50,
blank=True
)
total_supply = models.DecimalField(
null=True,
max_digits=120,
decimal_places=0
)
decimals = models.PositiveIntegerField(
null=True
)
token_type = models.CharField(
max_length=100,
)
cataloged = models.BooleanField(
null=True
)
contract_address_hash = models.ForeignKey(
'address.Address',
on_delete=models.PROTECT
)
class TokenTransfer(TimeStampModel):
"""
* amount -> token transfer amount | Decimal | Optional
* from_address_hash -> Address send token | Foreign Key | Required
* to_address_hash -> Address received token | Foreign Key | Required
* token_contract_address_hash -> Address of the token contract
| Forreign Key | Required
* token_id -> Id of the token, only ERC-721 tokens | Optional
* transaction_hash -> Transaction token | Foreign Key | Required
* log_index -> Index of the corresponding Log in the transaction
| Positive Int | Required
"""
amount = models.DecimalField(
null=True,
max_digits=120,
decimal_places=0
)
from_address_hash = models.ForeignKey(
'address.Address',
on_delete=models.PROTECT,
related_name='token_transfer_from_address'
)
to_address_hash = models.ForeignKey(
'address.Address',
on_delete=models.PROTECT,
related_name='token_transfer_to_address'
)
token_contract_address_hash = models.ForeignKey(
'address.Address',
on_delete=models.PROTECT,
related_name='token_contract_address'
)
token_id = models.PositiveIntegerField(
null=True
)
transaction_hash = models.ForeignKey(
'transactions.Transaction',
on_delete=models.PROTECT
)
log_index = models.PositiveIntegerField()
|
[
"gollum23@gmail.com"
] |
gollum23@gmail.com
|
15e3cb84a94201fa536f06f31f13a17e5a8b6dfb
|
5f809898a9f153d8645205aa601b2b3083beafa1
|
/krrThomas/searchStat/plot_structs_near_best.py
|
e0263c56ba0312ee7915e88edc7e6a07acbb6a67
|
[] |
no_license
|
MaltheBisbo/learningForces
|
ea1b258e115327e1e0876a60345366f349afb1ac
|
7a726a5931454534585563dd607faf75c5d706c6
|
refs/heads/master
| 2021-01-23T22:31:02.654738
| 2018-12-25T21:48:54
| 2018-12-25T21:48:54
| 102,938,949
| 0
| 0
| null | 2018-12-25T21:48:55
| 2017-09-09T08:22:29
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,410
|
py
|
import numpy as np
from scipy.spatial.distance import euclidean
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from ase.io import read, write
from ase.visualize import view
from gaussComparator import gaussComparator
from featureCalculators_multi.angular_fingerprintFeature_cy import Angular_Fingerprint
from delta_functions_multi.delta import delta as deltaFunc
from krr_errorForce import krr_class
def plot_near_best(traj, MLmodel):
Ndata = len(traj)
# Sort traj after energy
E = np.array([a.get_potential_energy() for a in traj])
index_best = np.argmin(E)
a_best = traj[index_best]
f_traj = MLmodel.featureCalculator.get_featureMat(traj)
f_best = MLmodel.featureCalculator.get_feature(a_best)
d = cdist(f_best.reshape((1,len(f_best))), f_traj, metric='euclidean')
index_closest = np.argsort(d[0])[:5]
print('d:\n', d[0][index_closest])
print('E:\n', E[index_closest])
traj_nearby = [traj[i] for i in index_closest]
return traj_nearby
if __name__ == '__main__':
n = 2
i = 0
traj_init = read('/home/mkb/DFT/gpLEA/anatase/step/sanity_check/test_new_calc/runs{}/run{}/global{}_initTrain.traj'.format(n,i,i), index=':')
traj_sp = read('/home/mkb/DFT/gpLEA/anatase/step/sanity_check/test_new_calc/runs{}/run{}/global{}_spTrain.traj'.format(n,i,i), index=':')
traj = traj_init + traj_sp
#ref = read('/home/mkb/DFTB/TiO_2layer/ref/Ti13O26_GM_done.traj', index='0')
### Set up feature ###
# Template structure
a = traj[0]
# Radial part
Rc1 = 6
binwidth1 = 0.2
sigma1 = 0.2
# Angular part
Rc2 = 4
Nbins2 = 30
sigma2 = 0.2
gamma = 2
# Radial/angular weighting
eta = 20
use_angular = True
# Initialize feature
featureCalculator = Angular_Fingerprint(a, Rc1=Rc1, Rc2=Rc2, binwidth1=binwidth1, Nbins2=Nbins2, sigma1=sigma1, sigma2=sigma2, gamma=gamma, eta=eta, use_angular=use_angular)
### Set up KRR-model ###
comparator = gaussComparator(featureCalculator=featureCalculator, max_looks_like_dist=0.2)
delta_function = deltaFunc(atoms=a, rcut=6)
krr = krr_class(comparator=comparator,
featureCalculator=featureCalculator,
delta_function=delta_function,
bias_std_add=0)
traj_nearby = plot_near_best(traj, krr)
view(traj_nearby)
|
[
"mkb@s81n11.grendel.cscaa.dk"
] |
mkb@s81n11.grendel.cscaa.dk
|
3fd8971af0057cfe6f9120d8654640df8c971099
|
99e76e9e4c8031418c4c50217b48adf1d880cf2f
|
/setup.py
|
6974fdc5b21fd1b544eac798d4363569ad4198d7
|
[
"MIT"
] |
permissive
|
grow/grow-ext-responsive-styles
|
d75a5abb070613641e3da9f3f4cf7dc07e88c51f
|
bb3d8f68edc1f3e1bdf508bb5df8d5b296574e9b
|
refs/heads/master
| 2021-01-03T14:04:15.882718
| 2020-05-20T20:38:09
| 2020-05-20T20:38:09
| 240,096,948
| 0
| 0
|
MIT
| 2020-05-20T20:34:58
| 2020-02-12T19:27:42
|
HTML
|
UTF-8
|
Python
| false
| false
| 349
|
py
|
from setuptools import setup
setup(
name='grow-ext-responsive-styles',
version='1.0.0',
zip_safe=False,
license='MIT',
author='Grow Authors',
author_email='hello@grow.io',
include_package_data=True,
packages=[
'responsive_styles',
],
package_data={
'responsive_styles': ['*.html'],
},
)
|
[
"jeremydw@gmail.com"
] |
jeremydw@gmail.com
|
f0350d9b871c404aac008aac028f9754e35939df
|
cdf14be1c33ad4dad24dfc4823443caf629e8787
|
/reversi.py
|
fc9053bf0d31a6c6d2fd296fabaac28f20d53bb8
|
[] |
no_license
|
fhalambek/reversi
|
541f63048d57e44bdf6510ca65184c889f3fcea4
|
47dad459b8bcd8fec62614865196a11a7e2d7e00
|
refs/heads/master
| 2021-09-07T15:06:00.695585
| 2018-02-24T15:49:01
| 2018-02-24T15:49:01
| 114,654,912
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35,116
|
py
|
from tkinter import *
from time import sleep
from threading import Thread
from bot import easy, greedy, weighted
from os import replace, name
from _tkinter import TclError
pilImported = False
try:
from PIL import Image, ImageTk
pilImported = True
except ImportError:
try:
try:
import pip
except ImportError:
print("pip and Pillow libraries are not installed. Install one of them to view transitions.\n" +
"Linux: sudo apt-get install python3-pip\n pip install Pillow\n" +
"Windows: pip is installed, find it among installation files, e.g.: Python/Python36-32/Scripts/")
raise ImportError
def install(package):
pip.main(['install', package])
install("Pillow")
from PIL import Image, ImageTk
pilImported = True
except PermissionError:
print("PIL not installed. Try running as administrator to view transitions.")
pilImported = False
except ImportError:
print()
pilImported = False
MARGIN_X = 10
MARGIN_Y = 10
WINDOW_DIMS = "1000x500"
WINDOW_BG = ["#00BB00", "#44FF44"]
CELL_BG = "#228822"
CELL_BG_HIGHLIGHT = "#44CC44"
CELL_SIZE = 56
DIRECTIONS = [((i//3)-1, (i%3)-1) for i in range(9)]
PLAYERS = [None, "black", "white"]
BOTS = [easy, greedy, weighted]
LANGUAGES = ("hrvatski", "English")
OPTION_BUTTON_WIDTH = 250
OPTION_BUTTON_HEIGHT = 50
blockInOut = (False, False) #program input i output
co = False #cells input
ci = False #cells output
botSpeed = .5
animationSpeed = .01
animationsEnabled = True
stopBot = True
pause = False
class Game(Frame): #glavni frame unutar kojega se sve nalazi. self.master je Tk()
def __init__(self):
Frame.__init__(self)
global IMAGES, backImage, DISKS
IMAGES = (PhotoImage(), PhotoImage(file = "res/drawables/disks/black74.png"), PhotoImage(file = "res/drawables/disks/white74.png"))
DISKS = []
for i in range(9):
DISKS.append([PhotoImage(file = "res/drawables/disks/black" + str(j) + str(i) + ".png") for j in range(7)])
DISKS.append([PhotoImage(file = "res/drawables/disks/white" + str(i) + "4.png") for i in range(7)])
backImage = PhotoImage(file = "res/drawables/back.png")
self.master.title("Reversi")
self.master.resizable(False, False)
self.master.geometry(WINDOW_DIMS)
SettingsView.loadSettings()
Game.loadStrings()
self.halves = [None, ImageView(self, position = 1, color = WINDOW_BG[0], hierarchy = (0, 0)), MenuView(master = self, position = 2, color = WINDOW_BG[1], hierarchy = (1, 0)), None]
self.pack(expand = YES, fill = BOTH)
self.master.protocol("WM_DELETE_WINDOW", lambda: closeWindow(self.master))
def switch(self, target, reverse, position): #poziva se kada trebaju mijenjati polovice ekrana
global stopBot, pause
pause = False
stopBot = True
cBlock()
if(blockInOut[0] or blockInOut[1]): return
block(o = False)
self.halves[3 - reverse * 3] = HIERARCHY[target[0]][target[1]][0](master = self, position = 3 - reverse * 3, color = WINDOW_BG[target[0]%2], hierarchy = target)
runnables = [self.frameSwapAnimationRight, self.frameSwapAnimation]
runnables[position + reverse > 1](reverse)
def frameSwapAnimation(self, reverse): #ako se mijenjaju obije polovice
def postProcessing(self, reverse):
flag = None
for i, j in enumerate(self.halves):
if(j):
again = j.replace(i + (reverse*2 - 1))
if(again[0]): flag = again[1]
del self.halves[self.halves.index(None)]
self.halves[-reverse].destroy()
self.halves[-reverse] = None
self.halves.insert(3 - 3*reverse, None)
block(0, 0)
if(flag and reverse):
self.switch(HIERARCHY[HIERARCHY[flag[0]][flag[1]][2][0]][HIERARCHY[flag[0]][flag[1]][2][1]][2], reverse, 2)
elif(flag):
self.switch((flag[0] + 1, flag[1]), 0, 2)
if(pilImported):
transition = TransitionImage(master = self,
position = 2 - reverse,
transparent = False,
hierarchy = self.halves[3*(1-reverse)].myHierarchy)
self.after(10, self.halves[2-reverse].move, 0, transition, postProcessing, self, reverse)
else:
postProcessing(self, reverse)
def frameSwapAnimationRight(self, reverse): #ako se mijenja desna polovica
def postProcessing(self, reverse):
again = self.halves[3].replace(2)
print(again)
self.halves[2].destroy()
del self.halves[2]
self.halves.append(None)
block(0, 0)
if(again[0]): self.switch((again[1][0] + 1, again[1][1]), 0, 2)
if(pilImported):
transition = TransitionImage(master = self,
position = 2,
transparent = True,
hierarchy = self.halves[3].myHierarchy,
hierarchy2 = self.halves[2].myHierarchy)
self.after(10, transition.setAlpha, 0, postProcessing, self, reverse)
else:
postProcessing(self, reverse)
def loadStrings():
if(language == "eng"):
Game.initializeStrings()
return
file = open("res/strings/" + language + ".txt", "r", encoding = "cp1250")
lines = file.readlines()
global stringsDict
for line in lines:
key, value = tuple(line.split(":")) #maknuti ovaj repr ako nam ne treba
stringsDict[key] = value[:-1].upper()
file.close()
def initializeStrings():
global stringsDict, selectBotText, mainMenuButtonText, modeMenuButtonText, botMenuButtonText
stringsDict = {"Stats":"STATS",
"Wins by player":"WINS BY PLAYER",
"Wins by color":"WINS BY COLOR",
"Disks":"DISKS",
"Main Menu":"MAIN MENU",
"Play":"PLAY",
"Mode":"MODE",
"Rules":"RULES",
"Settings":"SETTINGS",
"About":"ABOUT",
"Bot VS Bot":"BOT VS BOT",
"Human VS Bot":"HUMAN VS BOT",
"Human VS Human":"HUMAN VS HUMAN",
"Easy":"EASY",
"Medium":"MEDIUM",
"Hard":"HARD",
"Select bot 1":"SELECT BOT 1",
"Select bot 2":"SELECT BOT 2",
"Language":"LANGUAGE",
"Bot speed":"BOT SPEED",
"Animations":"ANIMATIONS",
"On":"ON",
"Off":"OFF",
"Pause":"PAUSE",
"Resume":"RESUME"}
selectBotText = ("Select bot 1", "Select bot 2")
mainMenuButtonText = ("Play", "Rules", "Settings", "About")
modeMenuButtonText = ("Bot VS Bot", "Human VS Bot", "Human VS Human")
botMenuButtonText = ("Easy", "Medium", "Hard")
class TransitionImage(Label): #overlay sa screenshotom za fade in efekat
def __init__(self, master, position, transparent, hierarchy, hierarchy2 = None):
Label.__init__(self, master, image = IMAGES[0], width = 500, height = 500, bd = 0, highlightthickness = 0, bg = WINDOW_BG[hierarchy[0]%2])
self.transparent = transparent
if(transparent):
self.lastImage = Image.open("res/drawables/ss/pic"+str(hierarchy2[0])+str(hierarchy2[1])+".png")
self.rawImage = Image.open("res/drawables/ss/pic"+str(hierarchy[0])+str(hierarchy[1])+".png")
self.sourceImage = self.rawImage.copy()
self.rawImage.close()
self.place(x = 500*(position - 1), y = 0)
def setAlpha(self, frameNumber, postProcess, master, reverse):
if(frameNumber > 50):
self.destroy()
postProcess(master, reverse)
return
if(self.transparent):
self.frameImage = ImageTk.PhotoImage(Image.blend(self.lastImage, self.sourceImage, frameNumber*1/50))
else:
self.sourceImage.putalpha(frameNumber * 5)
self.frameImage = ImageTk.PhotoImage(self.sourceImage)
self.config(image = self.frameImage)
master.after(10, self.setAlpha, frameNumber + 1, postProcess, master, reverse)
class Half(Frame): #ono sto je zajednicko svim tim frameovima/polovicama prozora
def __init__(self, master, position, color, hierarchy):
Frame.__init__(self, master, bg = color, padx = MARGIN_X, pady = MARGIN_Y)
self.place(x = (position-1) * 500, y = 0, width = 500, height = 500)
self.myHierarchy = hierarchy
self.myColor = color
self.myPosition = position
def replace(self, newPosition):
self.place(x = (newPosition - 1) * 500)
self.myPosition = newPosition
return (0,)
def move(self, frameNumber, t, pp, master, reverse):
if(frameNumber > 50):
t.setAlpha(0, pp, master, reverse)
return
self.place(x = 500*(1 - reverse) + (2*reverse - 1)*frameNumber*10, y = 0)
self.lift(t)
master.after(10, self.move, frameNumber + 1, t, pp, master, reverse)
class ImageView(Half): #na pocetnom zaslonu s lijeve strane.. tu bi mogla doci neka zgodna slika
def __init__(self, master, position, color, hierarchy):
Half.__init__(self, master, position, color, hierarchy)
self.picture = PhotoImage(file = "front.png")
self.label = Label(self, image = self.picture)
self.label.place(x = 0, y = 0, width = 480, height = 480)
class TextView(Half): #pravila i o igri
def __init__(self, master, position, color, hierarchy):
Half.__init__(self, master, position, color, hierarchy)
self.actionBar = ActionBar(self, HIERARCHY[hierarchy[0]][hierarchy[1]][2], color)
file = open("res/strings/"+HIERARCHY[hierarchy[0]][hierarchy[1]][1] + "_" + language + ".txt", "r", encoding = "cp1250")
text = ""
for line in file.readlines():
text += line
self.textMsg = Message(self, text = text, bg = color, justify = CENTER, anchor = CENTER)
self.textMsg.pack()
file.close()
class SettingsView(Half): #onaj frame s postavkama
def __init__(self, master, position, color, hierarchy):
Half.__init__(self, master, position, color, hierarchy)
self.actionBar = ActionBar(self, HIERARCHY[hierarchy[0]][hierarchy[1]][2], color)
self.languageLabel = Label(self, text = stringsDict["Language"], bg = color, highlightthickness = 0)
self.languageLabel.pack(pady = (50, 0))
var = StringVar()
var.set(language)
self.languageOM = OptionMenu(self, var, *LANGUAGES, command = self.omCommand)
self.languageOM.config(bg = color, highlightthickness = 0, width = 15)
self.languageOM.pack()
self.botSpeedLabel = Label(self, text = stringsDict["Bot speed"], bg = color, highlightthickness = 0)
self.botSpeedLabel.pack(pady = (20, 0))
self.botSpeedScale = Scale(self, from_ = 1,
to = 100,
orient = HORIZONTAL,
command = SettingsView.sCommand,
length = OPTION_BUTTON_WIDTH,
bg = color,
troughcolor = WINDOW_BG[1],
highlightthickness = 0,
cursor = "hand2")
self.botSpeedScale.set((1-botSpeed)*100)
self.botSpeedScale.pack()
self.animationsLabel = Label(self, text = stringsDict["Animations"], bg = color, highlightthickness = 0)
self.animationsLabel.pack(pady = (20, 0))
v = IntVar()
v.set(animationsEnabled)
self.rb = []
for i, j in enumerate(("Off", "On")):
self.rb.append(Radiobutton(self,
text = stringsDict[j],
variable = v,
value = i,
command = lambda: SettingsView.rbCommand(v.get()),
indicatoron = 0,
bg = color,
width = 20,
selectcolor = WINDOW_BG[1]))
self.rb[i].pack()
def rbCommand(var): #kad se stisne na radiobutton
global animationsEnabled
animationsEnabled = var
SettingsView.saveSettings()
def sCommand(var): #kad se pomakne onaj slider
global botSpeed
botSpeed = 1-(int(var)/100)
SettingsView.saveSettings()
def refreshLanguage(self):
self.actionBar.refreshLanguage()
for i, j in zip(("Language", "Bot speed", "Animations", "Off", "On"), (self.languageLabel, self.botSpeedLabel, self.animationsLabel, self.rb[0], self.rb[1])):
j.config(text = stringsDict[i])
def omCommand(self, var): #kad se u optionMenuu promijeni jezik
global language
language = var[:3].lower()
Game.loadStrings()
for i in self.master.halves:
if i: i.refreshLanguage()
SettingsView.saveSettings()
def loadSettings():
file = open("Preferences/Settings.txt", "r")
global language, botSpeed, animationsEnabled
for i in file.readlines():
key, value = tuple(i.split(":"))
if(key == "language"):
language = value[:-1]
elif(key == "botSpeed"):
botSpeed = float(value[:-1])
elif(key == "animations"):
animationsEnabled = ("On" in value)
file.close()
return
def saveSettings():
option = "Off\n"
if(animationsEnabled):
option = "On\n"
text = "language:" + language + "\nbotSpeed:" + str(botSpeed) + "\nanimations:" + option
file = open("Preferences/SettingsTemp.txt","w")
file.write(text)
file.close()
replace("Preferences/SettingsTemp.txt", "Preferences/Settings.txt")
return
class MenuView(Half): #oni frameovi s nekoliko gumba za odabir
def __init__(self, master, position, color, hierarchy):
Half.__init__(self, master, position, color, hierarchy)
self.actionBar = ActionBar(self, HIERARCHY[hierarchy[0]][hierarchy[1]][2], color)
self.optionButtons = []
for i in range(len(HIERARCHY[hierarchy[0]][hierarchy[1]][-1])):
self.optionButtons.append(Button(self, text = stringsDict[HIERARCHY[hierarchy[0]][hierarchy[1]][-1][i]], highlightthickness = 0, font = ("Verdana", 14)))
self.optionButtons[i].targetFrame = HIERARCHY[hierarchy[0]][hierarchy[1]][3][i]
self.optionButtons[i].bind("<Button-1>", self.buttonClick)
self.optionButtons[i].place(x = (500 - 2*MARGIN_X) // 2, y = int((i+1)*(500 - 2*MARGIN_Y)/(len(HIERARCHY[hierarchy[0]][hierarchy[1]][-1])+1)), width = OPTION_BUTTON_WIDTH, height = OPTION_BUTTON_HEIGHT, anchor = CENTER)
def replace(self, newPosition):
super(MenuView, self).replace(newPosition)
self.actionBar.enableButton(not(newPosition - 1), 0)
return (0,)
def buttonClick(self, event):
if(blockInOut[0] or blockInOut[1]):
return
if(self.myHierarchy[0] < 3):
PM.bots = [None, 8, 8]
for i, j in enumerate(botMenuButtonText):
if(event.widget.cget("text") == stringsDict[j]):
PM.bots[-1] = 8
PM.bots[selectBotText.index(HIERARCHY[self.myHierarchy[0]][self.myHierarchy[1]][1]) + 1] = i
self.master.switch(event.widget.targetFrame, 0, self.myPosition)
def refreshLanguage(self):
self.actionBar.refreshLanguage()
for i in range(len(self.optionButtons)):
self.optionButtons[i].config(text = stringsDict[HIERARCHY[self.myHierarchy[0]][self.myHierarchy[1]][-1][i]])
class GameView(Half): #prikazuje plocu
stats = None
def __init__(self, master, position, color, hierarchy):
Half.__init__(self, master, position, color, hierarchy)
global table, winCount
table = []
winCount = [[0, 0, 0], [0, 0, 0]]
for i in range(8):
table.append([])
for j in range(8):
table[i].append(Cell(self, (i, j), 0))
def replace(self, newPosition):
super(GameView, self).replace(newPosition)
if(newPosition == 2):
global stopBot
resetBoard()
stopBot = False
PM.startGame()
return (0,)
def setStats(stats):
GameView.stats = stats
class StatsView(Half): #ono s lijeve strane ploce sto prikazuje info o igri
def __init__(self, master, position, color, hierarchy):
Half.__init__(self, master, position, color, hierarchy)
self.actionBar = ActionBar(self, HIERARCHY[hierarchy[0]][hierarchy[1]][2], color)
self.turnFrame = Frame(self, bg = color)
self.labels = [None, TurnLabel(self.turnFrame, 1), TurnLabel(self.turnFrame, -1)]
self.turnFrame.pack(side = TOP, expand = YES, fill = X)
self.charts = [ChartFrame(self, i, color) for i in (0, 1, 2)]
self.pauseButton = Button(self, text = stringsDict["Pause"].upper(), command = self.pause, highlightthickness = 0, height = 1, width = 10, font = ("Verdana", 15))
self.pauseButton.place(x = 240, y = 120, anchor = CENTER)
GameView.setStats(self)
def pause(self):
global pause
pause = not(pause)
text = ("Pause", "Resume")
self.pauseButton.config(text = stringsDict[text[pause]])
def upDate(self):
if(blockInOut[1]):
return
try:
for i in (1, -1): self.labels[i].upDate()
for i in self.charts: i.upDate()
except(TclError, RuntimeError):
return
def replace(self, newPosition): #sto se dogodi kad mijenja poziciju na ekranu
super(StatsView, self).replace(newPosition)
self.actionBar.enableButton(not(newPosition - 1), 0)
return (newPosition == 2, self.myHierarchy)
Game.initializeStrings()
HIERARCHY = (((ImageView, "", (0, 0), (1, 0)),),
((MenuView, "Main Menu", (0, 0), ((2, 0), (2, 1), (2, 2), (2, 3)), mainMenuButtonText),),
((MenuView, "Mode", (1, 0), ((3, 0), (3, 1), (3, 2)), modeMenuButtonText), (TextView, "Rules", (1, 0)), (SettingsView, "Settings", (1, 0)), (TextView, "About", (1, 0))),
((MenuView, "Select bot 1", (2, 0), ((4, 0),)*4, botMenuButtonText), (MenuView, "Select bot 1", (2, 0), ((4, 1),)*4, botMenuButtonText), (StatsView, "Stats", (2, 0), (4, 2))),
((MenuView, "Select bot 2", (3, 0), ((5, 0),)*4, botMenuButtonText), (StatsView, "Stats", (3, 1), (5, 1)), (GameView, "Human VS Human", (3, 2))),
((StatsView, "Stats", (4, 0), (6, 0)), (GameView, "Human VS Bot", (4, 1))),
((GameView, "Bot VS Bot", (5, 0)),))#struktura programa (klasa, naslov, kamoNazad[, kamoNaprijed, stringoviZaGumbePremaNaprijed])
class PM(object): #brine o tijeku igre, koji su botovi, tko je na redu itd.
player = 1
bots = [None, 8, 8]
bot = 8
seatChanged = 1
def switchPlayer(newPlayer):
PM.player = newPlayer
PM.bot = PM.bots[newPlayer*PM.seatChanged]
def startGame():
if(PM.bots != [None, 8, 8]):
myThread = Thread(target = PM.runnable)
myThread.start()
def runnable():
while(not(blockInOut[1] or stopBot)):
if(botSpeed or animationsEnabled):
sleep(botSpeed + animationsEnabled/200)
if(blockInOut[0] or ci or pause):
continue
if(PM.bot != 8 and not(blockInOut[1]) and len(Cell.availableCoordinates[PM.player])): #kompleksnost len je O(1), pa se mogu razbacivati ovako njome
x, y = BOTS[PM.bot](table, Cell.availableCoordinates[PM.player], PM.player, -PM.player)
cellPress(x, y)
def changeSeats():
PM.seatChanged *= -1
class Cell(Button):
availableCoordinates = [[],[],[]]
def __init__(self, master, coordinates, fill):
Button.__init__(self,
master = master,
image = IMAGES[fill],
width = CELL_SIZE,
height = CELL_SIZE,
bg = CELL_BG, bd = 1,
highlightthickness = name == "nt")
self.bind("<Button>", cellPress)
self.coordinates = coordinates
self.reset()
self.grid(row = coordinates[0], column = coordinates[1])
def switch(self, fill): #mijenja boju polja
self.fill = fill
try:
self.config(image = IMAGES[fill])
except(TclError, RuntimeError):
return
def reset(self):
self.availableCoordinates = [[],[],[]] #ovo mozda i ne treba, dosta memorije uzima - tu je za svaki slucaj
self.lenAC = [[0] * 9, [0] * 9, [0] * 9]
if(self.coordinates == (3, 3) or self.coordinates == (4, 4)):
self.switch(-1)
elif(self.coordinates == (3, 4) or self.coordinates == (4, 3)):
self.switch(1)
else:
self.switch(0)
def p(x, y): #pazi na rub ploce
if (x >= 0 and y >= 0 and x < 8 and y < 8):
return True
return False
def block(i = True, o = True): #blokira sav IO programa
global blockInOut
blockInOut = (i, o)
def cBlock(i = True, o = True): #blokira klikanje i bojanje celija
global ci, co
ci = i
co = o
def closeWindow(window): #poziva se kad netko stisne X gumb gore desno, gasi program
cBlock()
block()
for i in range(25):
window.attributes("-alpha", 1-i/25)
sleep(.03)
window.destroy()
def getAvailableCoordinates(): #totalno neoptimizirana funkcija, ali zanemarivo - slobodno optimiziraj ako ti se da xD
Cell.availableCoordinates = [[],[],[]] #resetiraj matricu u kojoj su spremljena ona highlightana polja
for i in range(8):
for j in range(8): #za svako polje u tablici
table[i][j].availableCoordinates = [[],[],[]] #resetiraj polja koja pritisak na to polje mijenja
table[i][j].lenAC = [[0] * 9, [0] * 9, [0] * 9] #resetiraj duljine po smjerovima
if(table[i][j].fill == 0): #ako je polje prazno
for r, s in DIRECTIONS: #za svaki smjer
if p(i + r, j + s) and table[i + r][j + s].fill: #ako nije preko ruba to polje i nije prazno
for k in (-1, 1): #za obije boje
temp = len(table[i][j].availableCoordinates[k]) #duljina trenutne liste s poljima koja ce se obojati pritiskom na trenutno polje
table[i][j].availableCoordinates[k] += getCellsToColor((r, s), (i, j), k) #ubaci u tu listu polja koja ce se obojati za ovo polje, ovaj smjer i ovu boju
table[i][j].lenAC[k][DIRECTIONS.index((r, s))] = len(table[i][j].availableCoordinates[k]) - temp #broj polja u ovom smjeru
if(len(table[i][j].availableCoordinates[k]) > 0 and not((i, j) in Cell.availableCoordinates[k])): #ako postoji nesto sto ce se obojati pritiskom na ovo polje
Cell.availableCoordinates[k].append((i, j)) #dodaj polje u listu polja za highlightanje
def markAvailableCoordinates(mark = True): #oznacava polja na koja se moze stati
if mark: #mark znaci oznacujemo li nove ili oDznacujemo stare
bgd = CELL_BG_HIGHLIGHT
else:
bgd = CELL_BG
if(botSpeed < 0.125 and PM.bot != 8): return
try:
for i in Cell.availableCoordinates[PM.player]:
table[i[0]][i[1]].config(bg = bgd)
if not(mark):
for i in Cell.availableCoordinates[-PM.player]:
table[i[0]][i[1]].config(bg = bgd)
except(TclError, RuntimeError):
return
def resetBoard(): #prije pocetka svake partije resetira/postavlja plocu
def createDisksAnimation():
cBlock(o = False)
try:
createDisks(((3, 3, -1), (4, 4, -1), (3, 4, 1), (4, 3, 1)))
getAvailableCoordinates()
markAvailableCoordinates()
GameView.stats.upDate()
except(TclError, RuntimeError):
return
cBlock(False, False)
PM.changeSeats()
PM.switchPlayer(1)
for i in range(8):
for j in range(8):
table[i][j].reset()
if(animationsEnabled and name != "posix"):
cdaThread = Thread(target = createDisksAnimation)
cdaThread.start()
else:
createDisksAnimation()
def getScore(): #ovo broji diskove na ploci
l = [0, 0, 0]
for i in range(8):
for j in range(8):
l[table[i][j].fill] += 1
return l
def getFrame(fn, p, d): #ovo se koristi u cellAnimation - daje sliku za neki frame u animaciji
if(fn == 7):
return IMAGES[p]
else:
if(d == 4 and (p == -1 or p == 2)):
return DISKS[-1][fn]
return DISKS[(d-4)*p+4][(fn-3)*p+3]
def gameOver(): #kad je jedna partija gotova, ovo biljezi tko je pobjedio i poziva funkciju za resetirati plocu
global winCount
score = getScore()
if(score[-1] > score[1]):
winColor = -1
elif(score[1] > score[-1]):
winColor = 1
else:
winColor = 0
winCount[0][winColor] += 1
winCount[1][winColor * PM.seatChanged] += 1
resetBoard()
def cellPress(event, y = 8): #kad se klikne na neko polje
if(y == 8):
coordinates = event.widget.coordinates
else: coordinates = event, y
if(coordinates in Cell.availableCoordinates[PM.player] and not(blockInOut[0] or blockInOut[1] or (y == 8 and PM.bot != 8) or ci or co or pause)):
cBlock(o = False)
markAvailableCoordinates(False)
if(animationsEnabled):
cellAnimationThread = Thread(target = cellAnimation, args = (coordinates,
table[coordinates[0]][coordinates[1]].availableCoordinates[PM.player],
table[coordinates[0]][coordinates[1]].lenAC[PM.player]))
cellAnimationThread.start()
else:
table[coordinates[0]][coordinates[1]].switch(PM.player)
for i in range(max(table[coordinates[0]][coordinates[1]].lenAC[PM.player])):
for j in range(9):
if(i < table[coordinates[0]][coordinates[1]].lenAC[PM.player][j] and not(blockInOut[1] or co)):
temp = [coordinates[k] + DIRECTIONS[j][k]*(i+1) for k in (0, 1)]
table[temp[0]][temp[1]].switch(PM.player)
PM.switchPlayer(-PM.player)
getAvailableCoordinates()
markAvailableCoordinates()
GameView.stats.upDate()
if(not(len(Cell.availableCoordinates[PM.player]) or len(Cell.availableCoordinates[-PM.player]))):
gameOver()
return
elif(not(len(Cell.availableCoordinates[PM.player]))):
PM.switchPlayer(-PM.player)
markAvailableCoordinates()
cBlock(False, False)
def createDisks(diskInfo): #postavlja novi disk na plocu
if(animationsEnabled):
for i in range(len(DISKS[0])):
for j in diskInfo:
if(not(blockInOut[1] or co)):
table[j[0]][j[1]].config(image = getFrame(fn = i, p = j[2], d = 4))
sleep(.03)
if(blockInOut[1] or co): return
for i in diskInfo:
table[i[0]][i[1]].switch(i[2])
def cellAnimation(coordinates, cellsToColor, directionLengths): #animacija zamjene diskova
createDisks(((coordinates[0], coordinates[1], PM.player),))
for i in range(max(directionLengths)):
for frameNumber in range(len(DISKS[0])+1):
for j in range(9):
if(i < directionLengths[j] and not(blockInOut[1] or co)):
temp = [coordinates[k] + DIRECTIONS[j][k]*(i+1) for k in (0, 1)]
try:
table[temp[0]][temp[1]].config(image = getFrame(fn = frameNumber, p = PM.player, d = j))
except(TclError, RuntimeError):
return
if(frameNumber == 7):
table[temp[0]][temp[1]].fill = PM.player
sleep(animationSpeed)
PM.switchPlayer(-PM.player)
getAvailableCoordinates()
markAvailableCoordinates()
GameView.stats.upDate()
if(not(len(Cell.availableCoordinates[PM.player]) or len(Cell.availableCoordinates[-PM.player]))):
gameOver()
return
elif(not(len(Cell.availableCoordinates[PM.player]))):
PM.switchPlayer(-PM.player)
markAvailableCoordinates()
cBlock(False, False)
def getCellsToColor(direction, coordinates, fill): #vraca listu polja koja ce se obojati pritiskom na odredeno polje
if(direction == (0, 0)):
return 0
cR, cC = coordinates[0] + direction[0], coordinates[1] + direction[1]
c = []
while (p(cR, cC) and table[cR][cC].fill == fill * -1):
c.append((cR, cC,))
cR += direction[0]
cC += direction[1]
if(p(cR, cC) and table[cR][cC].fill):
return c
return []
class ActionBar(Frame): #onaj dio na vrhu MenuViewa i StatsViewa gdje pise naslov i gdje je gumb za nazad
def __init__(self, master, targetFrame, color):
Frame.__init__(self, master)
self.config(bg = color)
self.backButton = Label(self, bg = color, image = backImage, highlightthickness = 0, relief = FLAT, bd = 0)
self.backButton.bind("<Button-1>", lambda f: self.master.master.switch(targetFrame, 1, self.master.myPosition))
self.nameLabel = Label(self, text = stringsDict[HIERARCHY[self.master.myHierarchy[0]][self.master.myHierarchy[1]][1]], bg = color)
self.nameLabel.pack(side = TOP, expand = NO, fill = Y)
self.place(x = 0, y = 0, width = 480, height = 480)
self.myColor = color
self.backEnabled = False
if(pilImported):
self.rawImage = Image.open("res/drawables/back" + str(WINDOW_BG.index(color)) + ".png")
self.sourceImage = self.rawImage.copy()
self.rawImage.close()
def enableButton(self, enabled, frameNumber):
if(pilImported and (enabled != self.backEnabled)):
if(frameNumber > 25):
self.backEnabled = enabled
if(not(enabled)):
self.backButton.place_forget()
return
self.sourceImage.putalpha((25*(1-enabled) + (2*enabled-1)*frameNumber)*10)
self.im = ImageTk.PhotoImage(self.sourceImage)
self.backButton.config(image = self.im)
if(frameNumber == 0):
self.backButton.place(x = 0, y = 0, height = 48, width = 48)
self.after(10, self.enableButton, enabled, frameNumber+1)
else:
if(enabled):
self.backButton.place(x = 0, y = 0)
else:
self.backButton.place_forget()
def refreshLanguage(self):
self.nameLabel.config(text = stringsDict[HIERARCHY[self.master.myHierarchy[0]][self.master.myHierarchy[1]][1]])
class TurnLabel(Label): #oni kvadrati crni i bijeli koji oznacuju tko je na redu
def __init__(self, master, color):
Label.__init__(self, master, bg = PLAYERS[color], image = IMAGES[0],
width = 50, height = 50, highlightcolor = PLAYERS[color], highlightbackground = PLAYERS[color])
if (color > 0):
self.pack(side = LEFT)
else:
self.pack(side = RIGHT)
self.myColor = color
def upDate(self):
if(co or blockInOut[1]):
return
if(self.myColor == PM.player):
self.config(highlightthickness = 5)
self.pack(padx = 5, pady = 5)
else:
self.config(highlightthickness = 0)
self.pack(padx = 10, pady = 10)
class ChartFrame(Frame): #dio StatsViewa gdje su chartovi
chartNames = ["Disks", "Wins by color", "Wins by player"]
def __init__(self, master, order, color):
Frame.__init__(self, master, bg = color)
self.chartName = Label(self, text = stringsDict[ChartFrame.chartNames[order]], bg = color, highlightthickness = 0)
self.chart = Frame(self, height = 50, bg = "white")
self.blackLabel = Label(self.chart, bg = "black", bd = 0, highlightthickness = 0, height = 50, width = 0, image = IMAGES[0])
self.blackLabel.pack(side = LEFT, fill = Y)
self.chartName.pack(side = TOP, fill = X, anchor = W)
self.chart.pack(side = TOP, fill = X)
self.pack(side = TOP, fill = X)
self.order = order
def upDate(self):
if (not(self.order)):
score = getScore()
else:
score = winCount[self.order - 1]
try:
self.blackLabel.config(width = int((500 - 2*MARGIN_X)*score[1]/(score[-1]+score[1])), bg = "black")
except(ZeroDivisionError):
self.blackLabel.config(width = 0, bg = "white")
Game().mainloop()
'''
print()
print(table[event.widget.grid_info()["row"]][event.widget.grid_info()["column"]].availableCoordinates[player])
print(table[event.widget.grid_info()["row"]][event.widget.grid_info()["column"]].lenAC[player])
if __name__ == "__main__":
root =
Game()
root.mainloop()
dok drzis polje ono se mrvicu smanji
kad pustis pretvori se u tvoju boju, a onda se ostala polja mijenjaju sirenje boje
'''
#koristim None, 1 i -1 radi jednostavnosti - da manje bugova ima
|
[
"noreply@github.com"
] |
noreply@github.com
|
b3b85534011c46c43575a3576f1acb0d4bd933bd
|
dc939ac0e50b9a03ba1b24215415e628279fd17f
|
/mysite/congratulation/migrations/0003_auto_20150724_1413.py
|
5abe0836e4055ba37f9680a5f827444fc4d0b156
|
[] |
no_license
|
RamiliaR/django
|
d3b3e084bb3a860a0d67e1e10cb5a844472b533b
|
6fe2e0455578ea3c53365239d74c4274be0ee859
|
refs/heads/master
| 2021-01-10T18:50:07.143708
| 2015-08-20T18:00:58
| 2015-08-20T18:00:58
| 41,083,132
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 751
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('congratulation', '0002_auto_20150714_1310'),
]
operations = [
migrations.AddField(
model_name='customer',
name='email',
field=models.CharField(null=True, max_length=75),
),
migrations.AddField(
model_name='customer',
name='password',
field=models.CharField(null=True, max_length=30),
),
migrations.AddField(
model_name='customer',
name='username',
field=models.CharField(null=True, max_length=30),
),
]
|
[
"RamiliaNigmatullina@gmail.com"
] |
RamiliaNigmatullina@gmail.com
|
5a46605486c336baa1b97ab5f88da4b51e4a3852
|
4425cd9025e430735ad81cc09d126d0ce9929e07
|
/order/api/serializers.py
|
135e26fec2699d1787648ba38587774330d16a14
|
[] |
no_license
|
omkarrane/ecommerce-api
|
e7c611776977be0c753157fe9d2819f08bf86f78
|
f1d11277571df9cadbba7c8f1d2892cf8895b43c
|
refs/heads/master
| 2020-03-19T21:11:11.065801
| 2018-06-11T13:44:09
| 2018-06-11T13:44:09
| 136,931,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,058
|
py
|
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import get_user_model
from django.db.models import Q
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.template.loader import render_to_string
from django.contrib.auth.models import User
from django.core.mail import EmailMessage
from accounts.api.utils import generateToken, decodeToken
import random
from rest_framework.serializers import (
CharField,
EmailField,
HyperlinkedIdentityField,
ModelSerializer,
SerializerMethodField,
ValidationError,
PrimaryKeyRelatedField
)
from retail.models import (
Retail_Info,
Retail_Warehouse,
Retail_Product
)
from order.models import (
Order,
)
User = get_user_model()
class OrderSerializer(ModelSerializer):
class Meta:
model = Order
fields = [
'item_list',
'total_cost'
]
|
[
"omkarrane10@gmail.com"
] |
omkarrane10@gmail.com
|
f7675475bf4180ae4b05a6af1aebe4521077a136
|
e131e752d826ae698e12e7bc0583362741f9d942
|
/AWS.py
|
c886890f56cf208b48066e6c151d54611fc0b574
|
[] |
no_license
|
abalberchak/TouchFace
|
ba30565be91b848126524aa47377789253370e04
|
d093ece8890b68c72e0855a024d908105df99b94
|
refs/heads/master
| 2021-01-11T01:43:35.067808
| 2016-09-29T03:41:13
| 2016-09-29T03:41:13
| 69,530,129
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,318
|
py
|
#----------------------------------------- Intent Schema Below:------------------------------
{
"intents": [
{
"intent": "AMAZON.ResumeIntent"
},
{
"intent": "AMAZON.PauseIntent"
},
{
"intent": "DojoInfoIntent"
},
{
"intent": "AMAZON.HelpIntent"
},
{
"intent": "AMAZON.StopIntent"
},
{
"intent": "TextBrendenIntent"
},
{
"intent": "GetTouchFaceIntent"
},
{
"intent": "DojoBrendenIntent"
},
{
"intent": "AskBrendan"
},
{
"intent": "twilioIntent"
},
{
"intent": "GroupTextIntent",
"slots": [
{
"name": "Name",
"type": "MEMBERS"
}
]
}
]
}
#----------------------------------------- Utterances Below:------------------------------
DojoInfoIntent what is the coding dojo
DojoInfoIntent tell me about the coding dojo
TextBrendenIntent Text Brendan
GetTouchFaceIntent Tell what does Brenden say
DojoBrendenIntent who is brenden
AskBrendan what is touchface
twilioIntent hi annet
GroupTextIntent text {Name}
|
[
"minhpn.org.ec@gmail.com"
] |
minhpn.org.ec@gmail.com
|
8e86bc7463a15ee8ba540cebbdc6dbebe01e0474
|
461d7bf019b9c7a90d15b3de05891291539933c9
|
/bip_utils/bip39/bip39_entropy_generator.py
|
47c75cf8f3c76ff3b2cb1f678605ec4780e1d6e9
|
[
"MIT"
] |
permissive
|
renauddahou/bip_utils
|
5c21503c82644b57ddf56735841a21b6306a95fc
|
b04f9ef493a5b57983412c0ce460a9ca05ee1f50
|
refs/heads/master
| 2023-07-16T05:08:45.042084
| 2021-08-19T09:33:03
| 2021-08-19T09:33:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,419
|
py
|
# Copyright (c) 2021 Emanuele Bellocchia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Imports
import os
from enum import IntEnum, unique
from typing import List, Union
@unique
class Bip39EntropyBitLen(IntEnum):
""" Enumerative for BIP-0039 entropy bit lengths. """
BIT_LEN_128 = 128,
BIT_LEN_160 = 160,
BIT_LEN_192 = 192,
BIT_LEN_224 = 224,
BIT_LEN_256 = 256,
class Bip39EntropyGeneratorConst:
""" Class container for BIP39 entropy generator constants. """
# Accepted entropy lengths in bit
ENTROPY_BIT_LEN: List[Bip39EntropyBitLen] = [
Bip39EntropyBitLen.BIT_LEN_128,
Bip39EntropyBitLen.BIT_LEN_160,
Bip39EntropyBitLen.BIT_LEN_192,
Bip39EntropyBitLen.BIT_LEN_224,
Bip39EntropyBitLen.BIT_LEN_256,
]
class Bip39EntropyGenerator:
""" Entropy generator class. It generates random entropy bytes with the specified length. """
def __init__(self,
bits_len: Union[int, Bip39EntropyBitLen]) -> None:
""" Construct class by specifying the bits length.
Args:
bits_len (int or Bip39EntropyBitLen): Entropy length in bits
Raises:
ValueError: If the bit length is not valid
"""
if not self.IsValidEntropyBitLen(bits_len):
raise ValueError("Entropy bit length is not valid (%d)" % bits_len)
self.m_bits_len = bits_len
def Generate(self) -> bytes:
""" Generate random entropy bytes with the length specified during construction.
Returns:
bytes: Generated entropy bytes
"""
return os.urandom(self.m_bits_len // 8)
@staticmethod
def IsValidEntropyBitLen(bits_len: Union[int, Bip39EntropyBitLen]) -> bool:
""" Get if the specified entropy bit length is valid.
Args:
bits_len (int or Bip39EntropyBitLen): Entropy length in bits
Returns:
bool: True if valid, false otherwise
"""
return bits_len in Bip39EntropyGeneratorConst.ENTROPY_BIT_LEN
@staticmethod
def IsValidEntropyByteLen(bytes_len: int) -> bool:
""" Get if the specified entropy byte length is valid.
Args:
bytes_len (int): Entropy length in bytes
Returns:
bool: True if valid, false otherwise
"""
return Bip39EntropyGenerator.IsValidEntropyBitLen(bytes_len * 8)
|
[
"54482000+ebellocchia@users.noreply.github.com"
] |
54482000+ebellocchia@users.noreply.github.com
|
0389e54314c5db68c26748f6b8dc17cb73d58775
|
fc2eb6f42183d7ca0142a039400548194130ff47
|
/items.py
|
23c16a8d8a825dcbafff96eb5e47f06777634b2e
|
[] |
no_license
|
vinaygulani1/RealEstatePricePrediction
|
5e3cf7ab5659f1f28a3cf81853fca2a42e4044ac
|
8cb30eea4c10147b4dba69058620e092b06617a1
|
refs/heads/master
| 2021-01-10T01:27:21.865451
| 2015-11-20T08:15:18
| 2015-11-20T08:15:18
| 46,197,917
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class Property(scrapy.Item):
data_code = scrapy.Field()
latitude = scrapy.Field()
longtitude = scrapy.Field()
property_type = scrapy.Field()
address = scrapy.Field()
city = scrapy.Field()
askprice = scrapy.Field()
sellingprice = scrapy.Field()
year_built = scrapy.Field()
living_area = scrapy.Field()
num_parking = scrapy.Field()
num_bath = scrapy.Field()
num_bed = scrapy.Field()
num_room = scrapy.Field()
sold_date = scrapy.Field()
|
[
"vinay.gulani@relishly.com"
] |
vinay.gulani@relishly.com
|
3b9a743d044c3344202f2e666d176447cdb0156d
|
d8fa64b4176c8c434e8812faed51bc246e6794d7
|
/pythoncode/homework_api/test_tag_manage.py
|
a36ea37af271617e587d652c840657a68702532f
|
[] |
no_license
|
tanya931117/TestDev
|
0103ff216af48d80811e81ac91756fdc878ef43d
|
3eba391c1405238e6a7ccc267b1f3722364a0a5c
|
refs/heads/master
| 2022-12-09T18:42:29.940915
| 2020-09-10T11:38:07
| 2020-09-10T11:38:07
| 270,223,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,658
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/8/8 13:27
# @Author : tanya
# @File : test_tag_manage.py
# @Software: PyCharm
import pytest
import yaml
from pythoncode.PO.contact_tag_api import ContactTagApi
from pythoncode.PO.we_work_api import WeWorkApi
@pytest.mark.usefixtures("start_case")
class TestTagManage():
def setup_class(self):
self.contact_tag = ContactTagApi()
wework =WeWorkApi()
self.token = wework.get_token(self.contact_tag._contact_secret)
#用钩子函数pytest_generate_tests加载参数
func_params = {"test_all": ["tagname", "add_tag_api.yml"],
"test_tag_mem":["tagid,userlist","add_tag_mem_api.yml"]}
def get_params(path):
with open(path, "r",encoding="utf-8") as f:
params = yaml.safe_load(f)
return params
@pytest.mark.skip
def test_all(self,tagname):
response = self.contact_tag.get_tag_list(self.token)
result = response.json()
#{'errcode': 0, 'errmsg': 'ok', 'taglist': [{'tagid': 1, 'tagname': '标签一'}, {'tagid': 2, 'tagname': '标签二'}]}
if len(result["taglist"]) >0:
for tag in result["taglist"]:
if tag["tagname"] == tagname:
self.contact_tag.del_tag({"tagid":tag["tagid"]},self.token)
break
tag = {
"tagname": tagname
}
res = self.contact_tag.add_tag(tag,self.token)
assert res.json()["errmsg"].startswith("created")
def test_tag_mem(self,tagid,userlist):
response = self.contact_tag.get_tag_mems({"tagid":tagid},self.token)
result = response.json()
#{'errcode': 0, 'errmsg': 'ok', 'userlist': [{'userid': 'LiTan', 'name': '李土云'}, {'userid': 'Miao', 'name': '张世锋'}, {'userid': 'eunhyuk.lee', 'name': 'LeeEunHyuk'}, {'userid': 'donghae.lee', 'name': 'LeeDongHae'}], 'partylist': [], 'tagname': '标签一'}
if len(result["userlist"]) > 0:
userlist_del = []
for user in result["userlist"]:
if user["userid"] in userlist:
userlist_del.append(user["userid"])
if len(userlist_del) > 0 :
params = {
"tagid": tagid,
"userlist": userlist_del
}
self.contact_tag.del_tag_mem(params,self.token)
params = {
"tagid": tagid,
"userlist": userlist
}
res = self.contact_tag.add_tag_mem(params,self.token).json()
#{'errcode': 0, 'errmsg': 'ok', 'invalidparty': []}
assert res["errmsg"] == "ok"
|
[
"tanya_li_931117@163.com"
] |
tanya_li_931117@163.com
|
19c119d0ed1fe30a4bd6eede46042eb475aa7159
|
eb58d60b139300e99b4c5b0018e49a1b951d9b49
|
/hw1/temp.py
|
2c31d2677edfd1c2763e5a35b4ea2c2604b60b0f
|
[] |
no_license
|
Vamanan/inf553
|
114e6186f5349da996fc18c00773fc1ecb799edd
|
b2b826d3d0e2939eeba12b4b8df3f6bbd2b673da
|
refs/heads/master
| 2021-01-19T05:01:14.230363
| 2015-11-06T02:17:05
| 2015-11-06T02:17:05
| 42,913,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 815
|
py
|
import MapReduce
import sys
import re
"""
tf df Example in the Simple Python MapReduce Framework
"""
mr = MapReduce.MapReduce()
# =============================
# Do not modify above this line
def mapper(record):
# key: document identifier
# value: document contents
dname = record[0]
content = record[1].lower()
words = content.split()
for w in set(words):
if re.match(r'\w+$', w):
mr.emit_intermediate(w, (dname,words.count(w)))
def reducer(key, list_of_values):
# key: word
# value: df along with individual tf tuples
dftotal = len(list_of_values)
mr.emit((key, dftotal,list_of_values))
# Do not modify below this line
# =============================
if __name__ == '__main__':
inputdata = open(sys.argv[1])
mr.execute(inputdata, mapper, reducer)
|
[
"passmaster10@bitbucket.org"
] |
passmaster10@bitbucket.org
|
17ebad59e8fb8cac9e54303768189e0f854b5502
|
e8fa6b783794bbd636d4ba815fd90390aabb4d73
|
/integration/combination/test_state_machine_with_api.py
|
20deaad43f516f96a4e5f315b5d52d5a729aa9ee
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
Skarlso/serverless-application-model
|
eb74d7dee11bf0a911e1e0dbb70bd03d4cbbbad7
|
172c832c053b3b5405dd4e85853386cc5a98841e
|
refs/heads/develop
| 2023-08-21T09:24:32.577637
| 2022-10-28T23:14:59
| 2022-10-28T23:14:59
| 325,041,387
| 1
| 1
|
Apache-2.0
| 2022-10-31T17:41:47
| 2020-12-28T15:01:05
|
Python
|
UTF-8
|
Python
| false
| false
| 4,250
|
py
|
from unittest.case import skipIf
from integration.helpers.base_test import BaseTest
from integration.helpers.common_api import get_policy_statements
from integration.helpers.resource import current_region_does_not_support
from integration.config.service_names import STATE_MACHINE_WITH_APIS
@skipIf(
current_region_does_not_support([STATE_MACHINE_WITH_APIS]),
"StateMachine with APIs is not supported in this testing region",
)
class TestStateMachineWithApi(BaseTest):
def test_state_machine_with_api(self):
self.create_and_verify_stack("combination/state_machine_with_api")
outputs = self.get_stack_outputs()
region = outputs["Region"]
partition = outputs["Partition"]
state_name_machine_arn = outputs["MyStateMachineArn"]
implicit_api_role_name = outputs["MyImplicitApiRoleName"]
implicit_api_role_arn = outputs["MyImplicitApiRoleArn"]
explicit_api_role_name = outputs["MyExplicitApiRoleName"]
explicit_api_role_arn = outputs["MyExplicitApiRoleArn"]
rest_apis = self.get_stack_resources("AWS::ApiGateway::RestApi")
implicit_rest_api_id = next(
(x["PhysicalResourceId"] for x in rest_apis if x["LogicalResourceId"] == "ServerlessRestApi"), None
)
explicit_rest_api_id = next(
(x["PhysicalResourceId"] for x in rest_apis if x["LogicalResourceId"] == "ExistingRestApi"), None
)
self._test_api_integration_with_state_machine(
implicit_rest_api_id,
"POST",
"/pathpost",
implicit_api_role_name,
implicit_api_role_arn,
"MyStateMachinePostApiRoleStartExecutionPolicy",
state_name_machine_arn,
partition,
region,
)
self._test_api_integration_with_state_machine(
explicit_rest_api_id,
"GET",
"/pathget",
explicit_api_role_name,
explicit_api_role_arn,
"MyStateMachineGetApiRoleStartExecutionPolicy",
state_name_machine_arn,
partition,
region,
)
def _test_api_integration_with_state_machine(
self, api_id, method, path, role_name, role_arn, policy_name, state_machine_arn, partition, region
):
apigw_client = self.client_provider.api_client
resources = apigw_client.get_resources(restApiId=api_id)["items"]
resource = get_resource_by_path(resources, path)
post_method = apigw_client.get_method(restApiId=api_id, resourceId=resource["id"], httpMethod=method)
method_integration = post_method["methodIntegration"]
self.assertEqual(method_integration["credentials"], role_arn)
# checking if the uri in the API integration is set for Step Functions State Machine execution
expected_integration_uri = "arn:" + partition + ":apigateway:" + region + ":states:action/StartExecution"
self.assertEqual(method_integration["uri"], expected_integration_uri)
# checking if the role used by the event rule to trigger the state machine execution is correct
start_execution_policy = get_policy_statements(role_name, policy_name, self.client_provider.iam_client)
self.assertEqual(len(start_execution_policy), 1, "Only one statement must be in Start Execution policy")
start_execution_policy_statement = start_execution_policy[0]
self.assertTrue(type(start_execution_policy_statement["Action"]) != list)
policy_action = start_execution_policy_statement["Action"]
self.assertEqual(
policy_action,
"states:StartExecution",
"Action referenced in event role policy must be 'states:StartExecution'",
)
self.assertTrue(type(start_execution_policy_statement["Resource"]) != list)
referenced_state_machine_arn = start_execution_policy_statement["Resource"]
self.assertEqual(
referenced_state_machine_arn,
state_machine_arn,
"State machine referenced in event role policy is incorrect",
)
def get_resource_by_path(resources, path):
return next((resource for resource in resources if resource["path"] == path), None)
|
[
"noreply@github.com"
] |
noreply@github.com
|
36a0dfc565c95c26b1822a0d3b66a4c426abe740
|
f07a5976c336c738d00984945e62aec7054f6870
|
/kore4.py
|
e71b96a4a91ccfccf82c9a18a7a8ec259a4088b4
|
[] |
no_license
|
snorey/lexical-tools
|
8bca81e8b0231619ba9750c91f425b03ae652a01
|
3ab390885cb1b309a2727fdffe98efa3923e0db5
|
refs/heads/master
| 2021-01-24T00:44:20.052004
| 2018-02-24T22:22:20
| 2018-02-24T22:22:20
| 122,780,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,989
|
py
|
import datetime
import os
import re
import time
import urllib
import urllib2
dirr="C:\\Code\\Naver"
def naverextract(text):
title=text.split("<title>")[1].split(":")[0].strip()
if "'" in title:
title=title.split("'")[1].split("'")[0].strip()
pieces=text.split('<SPAN class="EQUIV">')[1:]
pieces=[x.split("</span>")[0] for x in pieces]
pieces=[re.sub("\<.*?\>","",x) for x in pieces]
pieces=[re.sub("[\<\(\,\[\r\n].*","",x) for x in pieces]
glosses=[x.strip() for x in pieces]
return title,glosses
def naverextract_ko(text):
pass
def naverloop(directory=dirr):
files=os.listdir(directory)
files=[os.path.join(directory,x) for x in files if "G" not in x] # skip Googley files
outlines=[]
for f in files:
stamp=f.split("\\")[-1].split(".")[0]
print stamp
text=open(f).read()
if not text: continue
title,glosses=naverextract(text)
outlines.append(stamp+"\t"+title+"\t"+", ".join(glosses))
print stamp,str(glosses)
return outlines
def googleextract(text):
catchstring1='<meta name="description" content="'
catchstring2="- Google"
if catchstring1 not in text:
return "",""
caught=text.split(catchstring1)[1].split(catchstring2)[0].strip()
if '"' in caught: caught=caught.split('"')[0].strip()
if ":" not in caught:
return "",""
title=caught.split(":")[0].strip()
glosses=caught.split(":")[1].split(";")
glosses=[x.strip() for x in glosses]
return title,glosses
def googloop(directory=dirr):
files=os.listdir(directory)
files=[os.path.join(directory,x) for x in files if "G" in x] # Googles only
outlines=[]
for f in files:
stamp=f.split("\\")[-1].split(".")[0]
print stamp
text=open(f).read()
if not text: continue
title,glosses=googleextract(text)
outlines.append(stamp+"\t"+title+"\t"+", ".join(glosses))
print stamp,str(glosses)
return outlines
def list2voc(path="C:\\Code\\koreanvocab2.txt"):
import pie
vocab=pie.Vocabulary(filter=False,language="Korean")
text=open(path).read()
text=text.decode("utf-8","ignore")
lines=text.split("\n")
lines=[tuple(x.split("\t")) for x in lines if "\t" in x]
for line in lines:
rank=line[0]
print rank.encode('utf-8','ignore')
if rank:
try:
tally=1000000/int(rank)
except:
tally=0
else:
tally=0
word=line[1]
newword=pie.Word(text=word)
newword.tally=tally
vocab.allwords.add(newword)
return vocab
def combine(file1,file2):# TSV of CSV glosses
dixie={}
dixie2={}
for line in file1.split("\n"): #files come in as text, not handles
parts=line.split("\t")
dixie[parts[1]]=[x.strip() for x in parts[2].split(",") if x.strip()]
for line in file2.split("\n"):
parts=line.split("\t")
if parts[1] in dixie.keys():
dixie[parts[1]].extend([x.strip() for x in parts[2].split(",") if x.strip()])
else:
dixie[parts[1]]=[x.strip() for x in parts[2].split(",") if x.strip()]
for d in dixie.keys():
newlist=[]
newlist2=[]
countlist=[]
for i in dixie[d]:
newlist.extend([x.strip() for x in re.split("[^a-zA-Z0-9\-\s]+",i) if x])
for n in newlist:
testers=["a","an","the","to"]
for t in testers:
if (n.startswith(t+" ") or n.startswith(t.title()+" ")) and len(n) > 1+len(t):
n=n[len(t):].strip()
break
newlist2.append(n)
countlist=list(set((newlist2.count(x),x) for x in newlist2))
countlist.sort()
countlist.reverse()
dixie[d]=newlist2
dixie2[d]=countlist
return dixie,dixie2
def get_naver_en(word):
pass
def get_naver_ko(word):
import urllib, urllib2
url="http://krdic.naver.com/search.nhn?dic_where=krdic&query=%s&kind=keyword" % urllib.quote(word)
page=urllib2.urlopen(url,timeout=60).read()
matcher=re.escape('<a class="fnt15" href="') + '([^\"]*)' + re.escape('"><strong>') + '([^\<]*)' + re.escape('</strong><') # trailing "<" excludes partial headword matches
pieces=re.findall(matcher,page)
defs=[]
for p in pieces: # keep this simple for now; don't bother actually chasing to next page
if word not in p:
# print "No words!"
continue
else:
# print "Yay!"
pass
try:
chunk=page.split(p[0])[1].split("<p>")[1].split("<div")[0].split("<p")[0]
except Exception, e:
print "Caught",e
continue
chunk=re.sub("\<[^\>]*\>","",chunk)
chunk=chunk.replace("<","<").replace(">",">")
lines=[x.strip() for x in chunk.split("\n") if x.strip()]
defs.append(" / ".join(lines))
return defs
def naver_ko_loop(inpath,outpath="",directory="C:\\Code"):
if not outpath:
outpath=os.path.join(directory,"testdefs-"+datetime.date.today().isoformat()+".txt")
words=open(inpath).read().split("\n")
output=""
words=[x.strip() for x in words if x.strip()]
print len(words)
for w in words:
done=False
print words.index(w)
while not done:
try:
defs=get_naver_ko(w)
time.sleep(1)
except Exception, e:
print str(e)
time.sleep(5)
continue
done=True
if defs:
defstring=" // ".join(defs)
output+=w+"\t"+defstring+"\n"
else:
output+=w+"\t\n"
if outpath:
try:
open(outpath,"w").write(output)
print outpath
except Exception, e:
print str(e)
outdefs=dict([tuple(x.split("\t")) for x in output.split("\n") if x.strip()])
return outpath,outdefs
def get_examples_naver(word,kill_html=True): #UTF8-encoded hangul string
url="http://krdic.naver.com/search.nhn?kind=all&scBtn=true&query="+urllib.quote(word)
print url
output=[]
done=False
while not done:
try:
page=urllib2.urlopen(url).read()
done=True
except Exception, e:
print e
continue
try:
section=page.split('<span class="head_ex">')[1].split("</ul>")[0]
except IndexError, e:
print str(e)
return output
lines=section.split("<li>")[1:]
lines=[x.split("<p>")[1].split("<span class")[0].strip() for x in lines]
if kill_html:
lines=[re.sub("\<[^\<]*\>","",x) for x in lines]
return lines
|
[
"noreply@github.com"
] |
noreply@github.com
|
c74987a27b063d67e8ce049ee6e834b52db5fe03
|
c09b899e42e867e20993488e5f4e3d79c4eb779f
|
/movies/views.py
|
1aa3c1ecd81aa00297a088047aaa610c2979330b
|
[] |
no_license
|
JaycobDuffel/Vidly
|
33913b50096ac8b1cd54bcc62c503287d1b11c47
|
b0aa76e6e9634a7ab64b50652db941577567932a
|
refs/heads/master
| 2022-12-23T14:26:34.360432
| 2020-10-07T02:37:18
| 2020-10-07T02:37:18
| 300,044,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
from django.http import HttpResponse, Http404
from django.shortcuts import render, get_object_or_404
from .models import Movie
# Create your views here.
def index(request):
movies = Movie.objects.all()
return render(request, "movies/index.html", {"movies": movies})
def detail(request, movie_id):
movie = get_object_or_404(Movie, pk=movie_id)
return render(request, "movies/detail.html", {"movie": movie})
|
[
"jbdcoding@gmail.com"
] |
jbdcoding@gmail.com
|
ed949aaf72486dd9f746cc86f1ee975b0529cb89
|
3bc7c8a96be2cf2b60f8c1757e71a980140bd43b
|
/bus_plotter.py
|
91c2983b5b0cd903810a8fe00e57eba7302aea54
|
[] |
no_license
|
chrisjkuch/busdata
|
98bff373efaacdd4cb6d1e618165d3de63296d69
|
f68ba523191dbcb50d6ffaeb20c73e7fc6225c0e
|
refs/heads/master
| 2020-12-31T06:56:27.346245
| 2017-05-17T03:15:27
| 2017-05-17T03:15:27
| 80,571,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,629
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 9 17:28:01 2016
@author: chris
"""
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as md
import numpy as np
import datetime as dt
def dayTimeToDateTime(fltime):
hours = int(fltime * 24)
minutes = int(fltime * 24 * 60 - hours * 60)
seconds = 0 #int(fltime * 24 * 3600 - hours * 60 * 60 - minutes * 60)
return(dt.datetime(2016,8,1,hours,minutes,seconds))
def setSameDay(dt_obj):
return(dt.datetime(2016,8,1,dt_obj.hour,dt_obj.minute,dt_obj.second))
# Temporary: Pick the stop we are interested in
stop = u'Sherman/Foster'
etoc = pd.read_csv('evanstontochicago.csv')
ctoe = pd.read_csv('chicagotoevanston.csv')
# Get the x-coordinates of vertical lines marking scheduled times
# Read the log data from the spreadsheet
busdata = pd.read_csv('test.csv')
bd = busdata[[u'Stop Name', u'Time']]
bd[u'Time'] = bd[u'Time'].astype('datetime64').apply(lambda x: md.date2num(setSameDay(x)))
groupedstops = bd.groupby(u'Stop Name')
curStop = bd.groupby(u'Stop Name').groups[stop]
times = bd.iloc[curStop, 1]
#times = times.apply(lambda x: md.date2num(setSameDay(x)))
# Set up the histogram
fig = plt.figure()
my_bins = md.date2num(dt.datetime(2016,8,1,0,0,0)) + np.linspace(6,24,(18*60)+1)/24.
hfmt = md.DateFormatter('%H:%M')
thisroute = etoc
thesestops = list(thisroute.columns.values)
nplots = len(thesestops)
i = 1
all_axes = []
for stop in thesestops:
if(i > 1):
fig.add_subplot(nplots, 1, i, sharex=all_axes[0], sharey=all_axes[0])
else:
fig.add_subplot(nplots, 1, 1)
i += 1
curStop = groupedstops.groups[stop]
curTimes = bd.iloc[curStop, 1]
ax = curTimes.plot.hist(bins=my_bins)
ax2 = curTimes[-1:].plot.hist(bins=my_bins)
all_axes.append(ax)
#nboundsched = etoc[stop].apply(lambda x: md.date2num(dayTimeToDateTime(x)))
cursched = thisroute[stop].apply(lambda x: md.date2num(dayTimeToDateTime(x)))
top = 6
#y1n = [0] * len(nboundsched)
#y2n = [top] * len(nboundsched)
y1 = [0] * len(cursched)
y2 = [top] * len(cursched)
plt.vlines(cursched, y1, y2)
ax.xaxis.set_major_locator(md.HourLocator())
ax.xaxis.set_major_formatter(hfmt)
ax.yaxis.set_ticks([])
ax.yaxis.set_label_position('right')
plt.ylabel(stop, rotation=0)
#stopdata = scheduledata[stop].apply(lambda x: md.date2num(dayTimeToDateTime(x)))
#y1 = [0] * len(stopdata)
#y2 = [2] * len(stopdata)
plt.xticks(rotation=45)
plt.xlim([md.date2num(dt.datetime(2016,8,1,6,0,0)), md.date2num(dt.datetime(2016,8,1,23,59,0))])
plt.gcf().subplots_adjust(hspace=0)
plt.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
928fcb4241acb702ec2df763ded6e985a2e6fec8
|
8ff979eb571966fcd3a8a4ac359110945b21a471
|
/ud120/ex01/ClassifyNB.py
|
cc4faeceb6d24854ce4a127805292bcd20862bcb
|
[] |
no_license
|
ziriuz/udacity_learn
|
4a1ad2f4f6ed3670321b431a6a8d35027b1cede3
|
acdf479936368e0f5803fb0c1f004ee8a85fdbe1
|
refs/heads/master
| 2021-08-28T23:16:26.303069
| 2017-12-13T08:11:45
| 2017-12-13T08:11:45
| 113,209,680
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
# import the sklearn module for GaussianNB
from sklearn.naive_bayes import GaussianNB
def classify(features_train, labels_train):
# create classifier
gnb = GaussianNB()
# fit the classifier on the training features and labels
cls = gnb.fit(features_train, labels_train)
# return the fit classifier
return cls
|
[
"noreply@github.com"
] |
noreply@github.com
|
394178ecf0b8ba5aa5f8ffac26bfc54459935fb5
|
812f7c3982f2525bc7c3013938f70ffdda4abe1e
|
/vehicles/admin.py
|
875e37a21f750efb0d84da238affc5a7c39c522c
|
[] |
no_license
|
roditashakya/ebooking
|
7393aa651d2ddc979016000d62d6f44a3caddf57
|
3cdfe3a9b911835b4fcc32ae63e1e28983a4d6d9
|
refs/heads/master
| 2020-05-01T09:42:29.961770
| 2019-03-24T19:54:56
| 2019-03-24T19:54:56
| 177,406,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
from django.contrib import admin
# Register your models here.
from .models import SeatTemplate, TravelDetail, Vehicle, Booking, BookingUser, Seat, Schedule, Search
admin.site.register(SeatTemplate)
admin.site.register(TravelDetail)
admin.site.register(Vehicle)
admin.site.register(Booking)
admin.site.register(BookingUser)
admin.site.register(Seat)
admin.site.register(Schedule)
admin.site.register(Search)
|
[
"rodishakya@gmail.com"
] |
rodishakya@gmail.com
|
af935ba661ffbdb6c3921e41c3c65c2ba9235ccd
|
843d9f17acea5cfdcc5882cf8b46da82160c251c
|
/adafruit_stepper.py
|
8e9319c17ea13b32312acbe50d018791ab2ea40a
|
[] |
no_license
|
gunny26/raspberry
|
7c1da63785c86412af9fa467ea231b19a97f4384
|
e4eb0d2f537b319d41b6c50b59e69fb297c62d25
|
refs/heads/master
| 2016-09-06T14:02:30.122102
| 2014-01-29T16:31:08
| 2014-01-29T16:31:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,335
|
py
|
import RPi.GPIO as GPIO
import time
GPIO.cleanup()
GPIO.setmode(GPIO.BCM)
enable_pin = 18
coil_A_1_pin = 4
coil_A_2_pin = 17
coil_B_1_pin = 23
coil_B_2_pin = 24
GPIO.setup(enable_pin, GPIO.OUT)
GPIO.setup(coil_A_1_pin, GPIO.OUT)
GPIO.setup(coil_A_2_pin, GPIO.OUT)
GPIO.setup(coil_B_1_pin, GPIO.OUT)
GPIO.setup(coil_B_2_pin, GPIO.OUT)
GPIO.output(enable_pin, 1)
def forward(delay, steps):
for i in range(0, steps):
setStep(1, 0, 1, 0)
time.sleep(delay)
setStep(0, 1, 1, 0)
time.sleep(delay)
setStep(0, 1, 0, 1)
time.sleep(delay)
setStep(1, 0, 0, 1)
time.sleep(delay)
def backwards(delay, steps):
for i in range(0, steps):
setStep(1, 0, 0, 1)
time.sleep(delay)
setStep(0, 1, 0, 1)
time.sleep(delay)
setStep(0, 1, 1, 0)
time.sleep(delay)
setStep(1, 0, 1, 0)
time.sleep(delay)
def setStep(w1, w2, w3, w4):
GPIO.output(coil_A_1_pin, w1)
GPIO.output(coil_A_2_pin, w2)
GPIO.output(coil_B_1_pin, w3)
GPIO.output(coil_B_2_pin, w4)
while True:
try:
delay = raw_input("Delay between steps (milliseconds)?")
steps = raw_input("How many steps forward? ")
forward(int(delay) / 1000.0, int(steps))
steps = raw_input("How many steps backwards? ")
backwards(int(delay) / 1000.0, int(steps))
except KeyboardInterrupt:
GPIO.cleanup()
|
[
"arthur.messner@gmail.com"
] |
arthur.messner@gmail.com
|
6e8d64feec1287a4b57e616953041d7efd2c6bc7
|
245a9680c18bb08b338b024e8cb61da899097cec
|
/gamestore/store/urls.py
|
391ff5e89cf9119bd07a47ae7fdfcfa0e21e395f
|
[] |
no_license
|
arnold1000/onlinestore
|
c5f2c003076d248cc18a3e2698c0e09cb4c0a18c
|
fe8a393a270dfb6cd32c0628385a0777d815f8e9
|
refs/heads/master
| 2021-09-22T23:19:15.736468
| 2020-02-20T10:04:50
| 2020-02-20T10:04:50
| 240,704,666
| 0
| 0
| null | 2020-02-15T12:58:46
| 2020-02-15T12:17:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 955
|
py
|
from django.urls import include, path
from . import views
urlpatterns = [
path('', views.games, name='store-home'),
path(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.activate, name='activate'),
path('games/', views.games, name='games'),
path('games/<int:game_id>/', views.game, name='game'),
path('games/<int:game_id>/score', views.save_score, name='save_score'),
path('games/<int:game_id>/save', views.save_game, name='save_game'),
path('games/<int:game_id>/load', views.load_game, name='load_game'),
path('games/new/', views.add_new, name='add_game'),
path('games/<int:game_id>/modify', views.modify, name='modify'),
path('games/<int:game_id>/delete', views.delete, name='delete'),
path('shop/', views.shop, name='shop'),
path('shop/<int:game_id>/', views.buy, name="buy"),
path('shop/payment/', views.buy_response, name="buy_response")
]
|
[
"arttu.e.koponen@aalto.fi"
] |
arttu.e.koponen@aalto.fi
|
f0ebbd9f94e46663f17baf0ce5d22f3445f7b76f
|
b1a584df32c2d11a0648dec27e2f9cacd540a7f2
|
/realtors/migrations/0001_initial.py
|
22c019188655b19dcb1f7eae14c3d94742f64f7b
|
[] |
no_license
|
sree61/Django_project_realestate
|
b822f65dff1ea03e3739208c66b8a5f559bce1c0
|
8da46343a0275f8cd13bd71ed74eee6e4efb003a
|
refs/heads/master
| 2020-04-26T04:02:40.594223
| 2019-03-12T18:01:51
| 2019-03-12T18:01:51
| 173,288,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 922
|
py
|
# Generated by Django 2.1.7 on 2019-02-27 03:20
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Realtor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('photo', models.ImageField(upload_to='photos/%Y/%m/%d/')),
('description', models.TextField(blank=True)),
('phone', models.CharField(max_length=20)),
('email', models.CharField(max_length=50)),
('is_mvp', models.BooleanField(default=False)),
('hire_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
],
),
]
|
[
"sreeamaravila@gmmail.com"
] |
sreeamaravila@gmmail.com
|
f6f2eef9a2b17f09fa5e8751ab81ae99d47ae64e
|
e85a47a94e59f4c2feaec8aa635aa9d42e0edc00
|
/django_project/urls.py
|
f03e6a43c2f71e2fc1f25d65ab2be1a55421f207
|
[] |
no_license
|
afAditya/Django_Blog
|
1bf0e2949db03a6813db380c49cdca324cd7173b
|
7662d4765a03b25814481edc5a189b97d6899b41
|
refs/heads/master
| 2023-02-17T03:58:49.091498
| 2021-01-20T18:18:51
| 2021-01-20T18:18:51
| 330,613,546
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,393
|
py
|
"""django_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
from users import views as users_views
urlpatterns = [
path('admin/', admin.site.urls),
path('register/', users_views.register, name='register'),
path('profile/', users_views.profile, name='profile'),
path('login/', auth_views.LoginView.as_view(template_name='users/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='users/logout.html'), name='logout'),
path('', include('blog.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"adityasinghrajput971@gmail.com"
] |
adityasinghrajput971@gmail.com
|
630cda4283ce85c051de8920c72c86b1b92d2ca7
|
a305456a1b6509437883bb3de8d0c3f2968694a1
|
/apis/connect/auth.py
|
55e8b22c76ffb214dbba1f656da60e331a09384f
|
[] |
no_license
|
yoshow/tornado-quick-start
|
6ebeeb87e09b5a9c357cdc2332c7d80fdaa96d06
|
6e00dd3a873e624c1a39cc5f94d47ddc1a366a00
|
refs/heads/master
| 2021-01-18T15:27:31.410263
| 2018-06-19T16:21:59
| 2018-06-19T16:21:59
| 86,654,471
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,223
|
py
|
# -*- coding: utf-8 -*-
"""
OAuth 2.0
"""
import uuid
import json
from sqlalchemy import select, text
import x.data.orm
from x.web.apis import WebApiResponse
from models.membership import AccountInfo, MemberInfo
from models.connect import ConnectAuthorizationCodeInfo, ConnectAccessTokenInfo
class Auth(object):
""" 验证管理 """
def authorize(self, req, res=WebApiResponse()):
"""
授权验证
:param clientId: 客户端应用
:param redirectUri: 重定向地址
:param responseType: 响应类型
:param scope: 授权范围
:param style: 样式 自定义样式
:param loginName: 登录名
:param password: 密码
:returns: this is a description of what is returned
:raises keyError: raises an exception
"""
clientId = req.get("clientId")
redirectUri = req.get("redirectUri")
responseType = req.get("responseType")
scope = req.get("scope")
style = req.get("style")
loginName = req.get("loginName")
password = req.get("password")
session = x.data.orm.createSession()
# 获取当前用户信息
account = session.query(AccountInfo).filter(
text("loginName='" + loginName + "' and password='" + password + "'")).first()
if account is None:
if responseType is None:
res.message.returnCode = 1
res.message.value = u"帐号或者密码错误。"
return res
else:
# 如果响应类型
# TODO: 输出登录页面
pass
else:
# 检验是否有授权码
# cliendId account
authorizationCode = session.query(ConnectAuthorizationCodeInfo).filter(
text("appKey='" + clientId + "' and accountId='" + account.id + "'")).first()
# 如果不存在则新增授权码信息
if authorizationCode is None:
# 设置对象信息
authorizationCode = ConnectAuthorizationCodeInfo()
authorizationCode.id = str(uuid.uuid4())
authorizationCode.appKey = clientId
authorizationCode.accountId = account.id
authorizationCode.authorizationScope = scope == '' and "public" or scope
session.add(authorizationCode)
# 写数据库,但并不提交
session.flush()
session.commit()
# 设置访问令牌
# 设置会话信息
accessToken = session.query(ConnectAccessTokenInfo).filter(
text("appKey='" + clientId + "' and accountId='" + account.id + "'")).first()
# 如果不存在则新增授权码信息
if accessToken is None:
accessToken = ConnectAccessTokenInfo(id=str(uuid.uuid4()))
# 设置对象信息
accessToken.id = str(uuid.uuid4())
accessToken.appKey = clientId
accessToken.accountId = account.id
accessToken.authorizationScope = scope == '' and "public" or scope
session.add(accessToken)
# 写数据库,但并不提交
# session.flush()
session.commit()
# 记录日志
res.data = accessToken
res.message.returnCode = 0
res.message.value = u"验证成功。"
return res
print "authorize loginName:" + loginName + " password:" + password
res.message.returnCode = 0
res.message.value = u"执行成功。"
return res
def token(self, req, res=WebApiResponse()):
"""
获取令牌信息
:param code: 授权码信息
:returns: this is a description of what is returned
:raises keyError: raises an exception
"""
code = req.get("code")
session = x.data.orm.createSession()
authorizationCode = session.query(ConnectAuthorizationCodeInfo).filter_by(
id=code).first()
# 如果不存在则新增授权码信息
if authorizationCode is None:
res.message.returnCode = 1
res.message.value = "authorization code not find"
return res
accessToken = session.query(ConnectAccessTokenInfo).filter(
text("appKey='" + authorizationCode.appKey + "' and accountId='" + authorizationCode.accountId + "'")).first()
if accessToken is None:
res.message.returnCode = 1
res.message.value = "access code not find"
return res
return res
def refresh(self, req, res=WebApiResponse()):
""" 刷新令牌信息 """
print "token"
return "connect.auth.refresh"
def me(self, req, res=WebApiResponse()):
""" 当前用户信息 """
token = req.get("token")
session = x.data.orm.createSession()
accessToken = session.query(
ConnectAccessTokenInfo).filter_by(id=token).first()
if accessToken is None:
res.message.returnCode = 1
res.message.value = "people not find"
return res
else:
# 根据访问令牌返回当前湖用户
# IMemberInfo member =
# MembershipManagement.Instance.MemberService[accessTokenInfo.AccountId]
member = session.query(MemberInfo).filter_by(
id=accessToken.accountId).first()
if member is None:
res.message.returnCode = 1
res.message.value = "people not find"
return res
# 输出个人信息
res.data = member
res.message.returnCode = 0
res.message.value = "success"
return res
def ToPeopleJson(self, account):
""" 将人员信息格式化为特定格式 """
return {
"id": account.id,
"name": account.name,
"loginName": account.loginName,
# "certifiedAvatar": account.certifiedAvatar,
"status": account.status
}
|
[
"ruanyu@live.com"
] |
ruanyu@live.com
|
70b411ba66521bde662ff464e6ab782442fa0581
|
1508f7da93705839660e4fdfb87df7a9664bf087
|
/a10API/a10API/flask/bin/migrate
|
bff34539b04e8d820b8b866d8ef3ee3bbc9995fb
|
[] |
no_license
|
Younglu125/A10_Networks
|
1a1ecebb28dd225f6a1f901a7c28350300df356d
|
78a177ae4c8638d58dc873e4b1c589a1d5aaa717
|
refs/heads/master
| 2020-06-17T00:35:30.325740
| 2016-03-21T18:17:30
| 2016-03-21T18:17:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
#!/home/echou/a10API/flask/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'sqlalchemy-migrate==0.7.2','console_scripts','migrate'
__requires__ = 'sqlalchemy-migrate==0.7.2'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('sqlalchemy-migrate==0.7.2', 'console_scripts', 'migrate')()
)
|
[
"info@pythonicneteng.com"
] |
info@pythonicneteng.com
|
|
bee4f557d4eaf73b50b39fe6da8aff6cd3065efd
|
fc8ef4d310eaf76384cd1f29a45e65ffd1849a6b
|
/back-end/api/migrations/0004_article_source_name.py
|
371ef4c62f6d991ee5521f8b6c4f6db3a7a673c5
|
[] |
no_license
|
mykola829/webl_lab_3_4
|
70c6c52e23962741971efc65a5a15aed5822059f
|
c47f09a03157e71687294b7a61863d775c6e95e2
|
refs/heads/master
| 2022-09-13T02:27:17.381772
| 2020-06-04T11:57:55
| 2020-06-04T11:57:55
| 268,151,334
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 434
|
py
|
# Generated by Django 3.0.6 on 2020-06-03 08:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20200603_1059'),
]
operations = [
migrations.AddField(
model_name='article',
name='source_name',
field=models.CharField(default=0, max_length=512),
preserve_default=False,
),
]
|
[
"mykola829@gmail.com"
] |
mykola829@gmail.com
|
5c578a84b20bd789b433432dfab0e9c7bdd67379
|
b08a6adc56016a706d84752bcfb6d5bdf014f9fd
|
/easyocr/DBNet/assets/ops/dcn/functions/deform_pool.py
|
b4c9943cbc10212f1db23910dcafbd2a2d1b7435
|
[
"Apache-2.0"
] |
permissive
|
JaidedAI/EasyOCR
|
c83903d2f0ac2adfda89b35274e71a410f7d12e8
|
f947eaa36a55adb306feac58966378e01cc67f85
|
refs/heads/master
| 2023-08-08T08:34:28.434530
| 2023-07-04T12:44:09
| 2023-07-04T12:44:09
| 247,266,215
| 20,057
| 2,937
|
Apache-2.0
| 2023-09-12T22:16:00
| 2020-03-14T11:46:39
|
Python
|
UTF-8
|
Python
| false
| false
| 5,953
|
py
|
'''
Modified by Jaided AI
Released Date: 31/08/2022
Description:
- Add support for Deformable convolution operator on CPU for forward propagation.
- Change to Just-in-Time loading approach
'''
import os
import warnings
import torch
from torch.autograd import Function
from torch.utils import cpp_extension
# TODO - Jaided AI:
# 1. Find a better way to handle and support both Ahead-of-Time (AoT) and Just-in-Time (JiT) compilation.
# 2. Find a better way to report error to help pinpointing issues if there is any.
# Note on JiT and AoT compilation:
# This module supports both AoT and JiT compilation approaches. JiT is hardcoded as the default. If AoT compiled objects are present, it will supercede JiT compilation.
def custom_formatwarning(msg, *args, **kwargs):
# ignore everything except the message
return str(msg) + '\n'
warnings.formatwarning = custom_formatwarning
dcn_dir = os.path.dirname(os.path.dirname(__file__))
try:
from .. import deform_pool_cpu
warnings.warn("Using precompiled deform_pool_cpu from {}".format(deform_pool_cpu.__file__))
dcn_cpu_ready = True
except:
try:
warnings.warn("Compiling deform_pool_cpu ...")
warnings.warn("(This may take a while if this module is loaded for the first time.)")
deform_pool_cpu = cpp_extension.load(
name="deform_pool_cpu",
sources=[os.path.join(dcn_dir, 'src', "deform_pool_cpu.cpp"),
os.path.join(dcn_dir, 'src', "deform_pool_cpu_kernel.cpp")])
warnings.warn("Done.")
dcn_cpu_ready = True
except Exception as error:
warnings.warn(' '.join([
"Failed to import or compile 'deform_pool_cpu' with the following error",
"{}".format(error),
"Deformable convulution and DBNet will not be able to run on CPU."
]))
dcn_cpu_ready = False
if torch.cuda.is_available():
try:
from .. import deform_pool_cuda
warnings.warn("Using precompiled deform_pool_cuda from {}".format(deform_pool_cuda.__file__))
dcn_cuda_ready = True
except:
try:
warnings.warn("Compiling deform_pool_cuda ...")
warnings.warn("(This may take a while if this module is loaded for the first time.)")
deform_pool_cuda = cpp_extension.load(
name="deform_pool_cuda",
sources=[os.path.join(dcn_dir, 'src', "deform_pool_cuda.cpp"),
os.path.join(dcn_dir, 'src', "deform_pool_cuda_kernel.cu")])
warnings.warn("Done.")
dcn_cuda_ready = True
except Exception as error:
warnings.warn(' '.join([
"Failed to import or compile 'deform_pool_cuda' with the following error",
"{}".format(error),
"Deformable convulution and DBNet will not be able to run on GPU."
]))
dcn_cuda_ready = False
class DeformRoIPoolingFunction(Function):
@staticmethod
def forward(ctx,
data,
rois,
offset,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0):
ctx.spatial_scale = spatial_scale
ctx.out_size = out_size
ctx.out_channels = out_channels
ctx.no_trans = no_trans
ctx.group_size = group_size
ctx.part_size = out_size if part_size is None else part_size
ctx.sample_per_part = sample_per_part
ctx.trans_std = trans_std
assert 0.0 <= ctx.trans_std <= 1.0
n = rois.shape[0]
output = data.new_empty(n, out_channels, out_size, out_size)
output_count = data.new_empty(n, out_channels, out_size, out_size)
if not data.is_cuda and dcn_cpu_ready:
deform_pool_cpu.deform_psroi_pooling_cpu_forward(
data, rois, offset, output, output_count, ctx.no_trans,
ctx.spatial_scale, ctx.out_channels, ctx.group_size, ctx.out_size,
ctx.part_size, ctx.sample_per_part, ctx.trans_std)
elif data.is_cuda and dcn_cuda_ready:
deform_pool_cuda.deform_psroi_pooling_cuda_forward(
data, rois, offset, output, output_count, ctx.no_trans,
ctx.spatial_scale, ctx.out_channels, ctx.group_size, ctx.out_size,
ctx.part_size, ctx.sample_per_part, ctx.trans_std)
else:
device_ = input.device.type
raise RuntimeError(
"Input type is {}, but 'deform_conv_{}.*.so' is not imported successfully.".format(device_, device_),
)
if data.requires_grad or rois.requires_grad or offset.requires_grad:
ctx.save_for_backward(data, rois, offset)
ctx.output_count = output_count
return output
@staticmethod
def backward(ctx, grad_output):
if not grad_output.is_cuda:
raise NotImplementedError("DCN operator for cpu for backward propagation is not implemented.")
data, rois, offset = ctx.saved_tensors
output_count = ctx.output_count
grad_input = torch.zeros_like(data)
grad_rois = None
grad_offset = torch.zeros_like(offset)
deform_pool_cuda.deform_psroi_pooling_cuda_backward(
grad_output, data, rois, offset, output_count, grad_input,
grad_offset, ctx.no_trans, ctx.spatial_scale, ctx.out_channels,
ctx.group_size, ctx.out_size, ctx.part_size, ctx.sample_per_part,
ctx.trans_std)
return (grad_input, grad_rois, grad_offset, None, None, None, None,
None, None, None, None)
deform_roi_pooling = DeformRoIPoolingFunction.apply
|
[
"rkcosmos@gmail.com"
] |
rkcosmos@gmail.com
|
3ad99e3d7e9841da8f65b2003210f661dc96df4a
|
0296bc69a0d9608ed826ad7a719395f019df098f
|
/Tools/Compare_images.py
|
f4ba586d2dfa3fcae52e277676f2b4a82ffdf59a
|
[] |
no_license
|
jcn16/Blender_HDRmap_render
|
c0486a77e04c5b41a6f75f123dbdb3d10c682367
|
50e6cdb79fef83081de9830e7105dd425a235a9e
|
refs/heads/main
| 2023-07-19T22:22:53.622052
| 2021-08-20T06:29:10
| 2021-08-20T06:29:10
| 377,757,283
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,771
|
py
|
import cv2
import numpy as np
def tianchong(img):
m = img.shape[0]
n = img.shape[1]
append = int(np.ceil(abs(m - n) / 2))
if m > n:
constant = cv2.copyMakeBorder(img, 0, 0, append, append, cv2.BORDER_CONSTANT, value=(0, 0, 0))
else:
constant = cv2.copyMakeBorder(img, append, append, 0, 0, cv2.BORDER_CONSTANT, value=(0, 0, 0))
constant = cv2.resize(constant, (512, 512))
return constant
def compare():
image_1=cv2.imread('/media/jcn/新加卷/JCN/JCN_test_datset/RayTracing/Train_HDR_512/126111539900259-h/0_-32_1_dikhololo_sunset_8k_324/raytracing.png')
mask_1=cv2.imread('/media/jcn/新加卷/JCN/JCN_test_datset/RayTracing/Train_HDR_512/126111539900259-h/0_-32_1_dikhololo_sunset_8k_324/alpha.png')
image_1=tianchong(image_1)
mask_1=tianchong(mask_1)
image_2=cv2.imread('/media/jcn/新加卷/JCN/JCN_test_datset/RayTracing/Train_HDR_512/126111539900259-h/0_-32_1_dikhololo_sunset_8k_324/shading.png')
image_1=image_1/255.0*mask_1/255.0
image_2=image_2/255.0*mask_1/255.0
cv2.imshow('image_1',np.asarray(image_1*255,dtype=np.uint8))
cv2.imshow('image_2',np.asarray(image_2*255,dtype=np.uint8))
res=np.asarray(np.clip((image_1-image_2)*255,0,255),dtype=np.uint8)
cv2.imshow('res',res)
cv2.waitKey(0)
def composite():
shading=cv2.imread('/media/jcn/新加卷/JCN/RelightHDR/TEST/images_high_res/10/raytracing.png')
albedo=cv2.imread('/home/jcn/桌面/Oppo/Results_albedo/10/p_albedo.png')
mask=cv2.imread('/home/jcn/桌面/Oppo/Results_albedo/10/gt_mask.png')
relight=albedo/255.0*shading/255.0*mask/255.0
relight=np.asarray(relight*255,dtype=np.uint8)
cv2.imshow('relight',relight)
cv2.waitKey(0)
if __name__=='__main__':
compare()
|
[
"591599635@qq.com"
] |
591599635@qq.com
|
db365ccaef28c337a5d9c69e8c10f082020063ee
|
c940bcb25e1ed315263b25cbdac49cc4bf92cac1
|
/env/vkviewer/python/georef/georeferenceutils.py
|
92de981594a95d6365cfb3fdb3f7e7f015ad83b1
|
[] |
no_license
|
kwaltr/vkviewer
|
281a3f1b5b08a18a89f232ecd096cea44faca58b
|
01d64df0a9266c65e0c3fb223e073ef384281bdc
|
refs/heads/master
| 2021-01-16T22:09:41.821531
| 2014-02-07T17:19:04
| 2014-02-07T17:19:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,844
|
py
|
'''
Created on Oct 15, 2013
@author: mendt
'''
import subprocess
""" function: parseYSize
@param - imageFile {String} - path to a image file
@return - {Integer} - value which represents the y size of the file
This function parse the x,y size of a given image file """
def parseXYSize(imageFile):
# run gdalinfo command on imageFile and catch the response via Popen
response = subprocess.Popen("gdalinfo %s"%imageFile, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
# read the console output line by line
for line in response.stdout:
if 'Size is ' in line:
x,y = line[8:].split(', ')
#print "X: %s, Y: %s"%(x,y)
return float(x),float(y)
""" Functions for getting the gcps. """
def getGCPsAsString(unorderedPixels, verzeichnispfad, georefCoords):
pure_gcps = getGCPs(unorderedPixels, verzeichnispfad, georefCoords)
str_gcps = []
for tuple in pure_gcps:
string = " ".join(str(i) for i in tuple[0])+", "+" ".join(str(i) for i in tuple[1])
str_gcps.append(string)
return str_gcps
def getGCPs(unorderedPixels, verzeichnispfad, georefCoords):
# transformed the pixel coordinates to the georef coordinates by recalculating the y values,
# because of a different coordinate origin
transformedUnorderedPixels = []
xSize, ySize = parseXYSize(verzeichnispfad)
for tuple in unorderedPixels:
transformedUnorderedPixels.append((tuple[0],ySize-tuple[1]))
# now order the pixel coords so that there sorting represents the order llc, ulc, urc, lrc
transformedOrderedPixels = orderPixels(transformedUnorderedPixels)
# now create the gcp list
try:
gcpPoints = []
for i in range(0,len(transformedOrderedPixels)):
pixelPoints = (transformedOrderedPixels[i][0],transformedOrderedPixels[i][1])
georefPoints = (georefCoords[i][0],georefCoords[i][1])
gcpPoints.append((pixelPoints,georefPoints))
return gcpPoints
except:
raise
def orderPixels(unorderdPixels):
"""
Function brings a list of tuples which are representing the clipping parameter from the client
in the order llc ulc urc lrc and gives them back at a list. Only valide for pixel coords
@param clippingParameterList: list whichcomprises 4 tuples of x,y coordinates
"""
xList = []
yList = []
for tuple in unorderdPixels:
xList.append(tuple[0])
yList.append(tuple[1])
orderedList = [0, 0, 0, 0]
xList.sort()
yList.sort()
for tuple in unorderdPixels:
if (tuple[0] == xList[0] or tuple[0] == xList[1]) and \
(tuple[1] == yList[2] or tuple[1] == yList[3]):
orderedList[0] = tuple
elif (tuple[0] == xList[0] or tuple[0] == xList[1]) and \
(tuple[1] == yList[0] or tuple[1] == yList[1]):
orderedList[1] = tuple
elif (tuple[0] == xList[2] or tuple[0] == xList[3]) and \
(tuple[1] == yList[0] or tuple[1] == yList[1]):
orderedList[2] = tuple
elif (tuple[0] == xList[2] or tuple[0] == xList[3]) and \
(tuple[1] == yList[2] or tuple[1] == yList[3]):
orderedList[3] = tuple
return orderedList
""" Functions for creating the commands for command line """
""" function: addGCPToTiff
@param - gcPoints {list of gcp} - list of ground control points
@param - srid {Integer} - epsg code of coordiante system
@param - srcPath {String}
@param - destPath {String}
@return - command {String}
Add the ground control points via gdal_translate to the src tiff file """
def addGCPToTiff(gcPoints,srs,srcPath,destPath):
def addGCPToCommandStr(command,gcPoints):
for string in gcPoints:
command = command+"-gcp "+str(string)+" "
return command
command = "gdal_translate --config GDAL_CACHEMAX 500 -a_srs epsg:%s "%srs
command = addGCPToCommandStr(command,gcPoints)
command = command+str(srcPath)+" "+str(destPath)
return command
""" function: georeferenceTiff
@param - shapefilePath {String}
@param - srid {Integer} - epsg code of coordiante system
@param - srcPath {String}
@param - destPath {String}
@param - tyoe {String} - if 'fast' there is less compression
@return - command {String}
Georeferencing via gdalwarp """
def georeferenceTiff(shapefilePath, srid, srcPath, destPath, type=None):
if type == 'fast':
command = "gdalwarp --config GDAL_CACHEMAX 500 -wm 500 -overwrite -co TILED=YES -cutline %s \
-crop_to_cutline -t_srs epsg:%s %s %s"%(shapefilePath,srid,srcPath,destPath)
return command
|
[
"jacobmendt@googlemail.com"
] |
jacobmendt@googlemail.com
|
a7072cf5db1b5527272336c6191bab4e1770b928
|
c840f190b3540bf212de2c70563e57da278fa9cb
|
/hyacinth.py
|
055e735da50162825883a5c29dfd69fcd0f7242d
|
[] |
no_license
|
edelooff/hyacinth
|
b768a871d476dd120f7d2d1acb039a6a9ebf2e19
|
0a6dd15fa1b1357afa566f924ad27b744582464b
|
refs/heads/master
| 2022-04-16T13:24:18.986246
| 2020-04-01T08:15:36
| 2020-04-01T08:15:36
| 251,756,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,538
|
py
|
from collections import (
Counter,
defaultdict)
import random
import re
import sys
DESIGN = re.compile(r'''
(?P<design>[A-Z])
(?P<size>[SL])
(?P<flowers>(:?\d+[a-z])*) # The specification is fuzzy on 1+ or 0+
(?P<total>\d+)''', re.VERBOSE)
DESIGN_FLOWER = re.compile(r'''
(?P<count>\d+)
(?P<species>[a-z])''', re.VERBOSE)
class Pool:
def __init__(self):
self.common_species = set()
self.designers = []
self.flowers = Counter()
def add_designer(self, designer):
"""Adds a BouquetDesigner for the pool size.
It also updates the set of known required species, allowing better
picking of 'filler' flowers for requested bouquets.
"""
self.designers.append(designer)
self.common_species |= designer.required_flowers.keys()
def add_flower(self, species):
"""Adds a flower of given species to the pool of available flowers."""
self.flowers[species] += 1
for designer in self.designers:
if designer.add(species):
print(self.create_bouquet(designer))
def create_bouquet(self, designer):
"""Creates a bouquet according to the given designers design.
After creating the bouquet, other designers are informed of the
removal of flower species from the shared pool.
"""
bouquet = designer.create(self.flowers, self.common_species)
bouquet_string = designer.stringify_bouquet(bouquet)
for bundle in bouquet.items():
for designer in self.designers:
designer.remove(*bundle)
return bouquet_string
class BouquetDesigner:
def __init__(self, design, flower_size, required_flowers, bouquet_size):
self.design = design
self.flower_size = flower_size
self.bouquet_size = bouquet_size
self.required_flowers = required_flowers
self.filler_quantity = bouquet_size - sum(required_flowers.values())
self.available_filler = 0
self.available_flowers = Counter()
def add(self, species):
"""Adds a species of flower to the local availability cache.
In addition. this will check whether a bouquet can be created based on
the recently seen flowers. If one can be created, this returns True.
"""
if species in self.required_flowers:
self.available_flowers[species] += 1
else:
self.available_filler += 1
return self.can_create()
def can_create(self):
"""Checks whether there are enough flowers to create a bouquet.
This will check if there is enough quantity of the required flowers and
if so, will check if there is enough filler to create a full bouquet.
"""
for flower, quantity in self.required_flowers.items():
if self.available_flowers[flower] < quantity:
return False
available = sum(self.available_flowers.values(), self.available_filler)
if available >= self.bouquet_size:
return True
return False
def create(self, pool, common_species):
"""Returns a bouquet (species listing) assembled from the given pool.
After picking the required flowers, if additional flowers are needed
as filler, this method selects a sample of flowers from the rest of
the pool in two steps:
1. Species of flowers used by other BouquetDesigners are avoided so
that selection for this bouquet causes the least conflict.
2. A random sample of flowers is picked, to avoid consistently stealing
from the same other designers. Randomly selecting also hopefully
generates nice and pleasing outcomes for the recipient, though this
hypothesis has not been tested in the least ;-)
In all cases we bias to picking filler flowers that we have a surplus
of. In an ideal world we would have a function that determines the
correct bias to introduce here.
"""
bouquet = Counter()
for species, quantity in self.required_flowers.items():
pool[species] -= quantity
bouquet[species] += quantity
# Pick the remaining flowers
if self.filler_quantity:
remaining = self.filler_quantity
for do_not_pick in (common_species, set()):
population = []
for species in pool.keys() ^ do_not_pick:
population.extend([species] * pool[species])
sample_size = min(len(population), remaining)
for species in random.sample(population, sample_size):
pool[species] -= 1
bouquet[species] += 1
remaining -= sample_size
if not remaining:
break
return bouquet
def remove(self, species, quantity):
"""Proceses removal of flowers from the flower pool.
This will update either the cache for available required flowers, or
if it's a species not -required- for this design, the filler count.
"""
if species in self.required_flowers:
self.available_flowers[species] -= quantity
else:
self.available_filler -= quantity
def stringify_bouquet(self, bouquet):
"""Returns the formatted bouquet string for this designer."""
flowers = sorted(bouquet.items())
flowerstring = (f'{count}{species}' for species, count in flowers)
return f'{self.design}{self.flower_size}{"".join(flowerstring)}'
@classmethod
def from_specification(cls, design):
"""Creates a BouquetDesigner instance from a string specification."""
spec = DESIGN.match(design).groupdict()
spec_flowers = DESIGN_FLOWER.findall(spec['flowers'])
flowers = {species: int(count) for count, species in spec_flowers}
return cls(spec['design'], spec['size'], flowers, int(spec['total']))
def read_until_empty(fp):
"""Yields lines from the given filepointer until an empty line is hit."""
while (line := fp.readline().strip()):
yield line
def main():
pools = defaultdict(Pool)
for design in read_until_empty(sys.stdin):
designer = BouquetDesigner.from_specification(design)
pools[designer.flower_size].add_designer(designer)
for species, size in read_until_empty(sys.stdin):
pools[size].add_flower(species)
if __name__ == '__main__':
main()
|
[
"elmer.delooff@gmail.com"
] |
elmer.delooff@gmail.com
|
9f3846001c1c354cfeae9bb360ec909db95dbc28
|
3105edcc2326ed9d49c408833268453a942ba474
|
/web/modules/api/__init__.py
|
a164dbb5f0124fc34c059789ae5b31e2ac14489d
|
[] |
no_license
|
cgle/sumopromo
|
641e56a14654fbd9368f1653a0d5282726d3d533
|
1e395eaeeb44acaa23f2ffb63ad68b7ded0799cf
|
refs/heads/master
| 2019-08-01T02:54:44.736669
| 2017-03-31T00:32:26
| 2017-03-31T00:32:26
| 75,031,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 98
|
py
|
from flask import Blueprint
bp = Blueprint('api', __name__)
from web.modules.api.views import *
|
[
"cglehh@gmail.com"
] |
cglehh@gmail.com
|
70c1632fe562644b920d482abf735a3bc08211cc
|
673d7a7b8c43523c459c661c2e360d4398a53b28
|
/pip_upgrade-runner.py
|
c2d4d9070ba92561c8da342417040194fe860ecc
|
[
"MIT"
] |
permissive
|
addisonElliott/pip_upgrade_outdated
|
19e3c16c5f8910cb20c142842dc2a992fd8801b7
|
2cf809ff9be2ab4070b75f5959ebcf21d9d34d82
|
refs/heads/master
| 2020-04-09T19:03:26.602029
| 2019-01-15T16:29:57
| 2019-01-15T16:29:57
| 160,532,458
| 0
| 0
|
MIT
| 2018-12-05T14:43:44
| 2018-12-05T14:43:43
| null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Convenience wrapper for running pushover directly from source tree."""
from pip_upgrade_outdated.upgrade_pip_packages import main
if __name__ == '__main__':
main()
|
[
"a.h.jaffe@gmail.com"
] |
a.h.jaffe@gmail.com
|
94d24bbab5fe5d3c8c83cd1748d41d53ea82f907
|
1f40d003bdba15086e0c2e7828398e3e8e6041e3
|
/robotics/fileread.py
|
a9bd74f6e7dc443010797d5ad2675de000633796
|
[] |
no_license
|
qwertpas/pythontests
|
0bb4982479f998625c0fd9d852df1ef66e3ada71
|
37fc2b253bf24d210364bacaf53f27347e9d29c1
|
refs/heads/master
| 2023-08-09T03:20:35.853236
| 2023-08-05T16:43:44
| 2023-08-05T16:43:44
| 177,456,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,817
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import collections as mc
import re
def extract_nums(text):
# text.replace('[', ' ')
# text.replace(']', ' ')
p = re.compile(r'\d+\.\d+') # Compile a pattern to capture float values
floats = [float(i) for i in p.findall(text)] # Convert strings to float
return np.array(floats)
with open('ece470/data.txt') as f:
lines = f.readlines()
trials = []
for line in lines:
if line == '\n':
continue
if 'start kill' in line:
trials.append({})
continue
if 'actual pos' in line:
nums = list(extract_nums(line))
trials[-1]['actual'] = nums
if 'detect pos' in line:
nums = list(extract_nums(line))
trials[-1]['detect'] = nums
if 'camera err' in line:
nums = list(extract_nums(line))
trials[-1]['camera'] = nums
actuals = []
detects = []
cameras = []
lines = []
for trial in trials:
actuals.append(trial['actual'])
detects.append(trial['detect'])
cameras.append(trial['camera'])
lines.append((trial['actual'], trial['detect']))
actuals = np.array(actuals)
detects = np.array(detects)
cameras = np.array(cameras)
fig, ax = plt.subplots()
ax.scatter(x=actuals[:,0], y=actuals[:,1], label='Actual position')
ax.scatter(x=detects[:,0], y=detects[:,1], label='Detected position')
ax.scatter(x=0, y=0, label='Robot base')
# lines = [[(0, 1), (1, 1)], [(2, 3), (3, 3)], [(1, 2), (1, 3)]]
lc = mc.LineCollection(lines, color='black', linewidths=1)
ax.add_collection(lc)
print(len(actuals))
ax.autoscale()
accuracy = np.mean(cameras)
ax.set_title(f"Cockroach detection accuracy with average error: {np.round(accuracy, 3)} m")
ax.set_xlabel("Global X axis (m)")
ax.set_ylabel("Global Y axis (m)")
ax.legend()
plt.show()
|
[
"cyx3@illinois.edu"
] |
cyx3@illinois.edu
|
97c5e4d3a1702140c12048536f8ec60595290914
|
b2be86ae60e9698a3daf78fdedacac631dd60cab
|
/django/user/tests/test_models.py
|
b0d94ce44fa3f235d6c64af4738666ba937c104d
|
[] |
no_license
|
TigenTigen/do_you_know
|
351326388bb1ae9caffed246b8fcdcb8ba7af594
|
0c6a5b001243fafa1b8382f89ec8bf7aef640f2e
|
refs/heads/master
| 2020-06-13T13:26:29.402100
| 2019-08-07T06:27:37
| 2019-08-07T06:27:37
| 194,670,788
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,716
|
py
|
from django.test import TestCase
from user.factories import AdvUserFactory
from user.models import AdvUser, signer, dt_engine, mail
from django.urls import reverse
class TestAdvUserModel(TestCase):
def test_str_for_user_created_by_factory(self):
user = AdvUserFactory(username='some_user')
self.assertEqual(str(user), 'some_user')
def test_save_for_user_created_by_social_auth(self):
user = AdvUser(username='id123456789', first_name='fn', last_name='ln', password='test_password')
user.save()
self.assertEqual(user.username, 'fn ln')
def test_confirm_for_not_active_user(self):
user = AdvUserFactory(is_active=False)
self.assertFalse(user.is_active)
user.confirm()
self.assertTrue(user.is_active)
def test_get_email_context(self):
user = AdvUserFactory()
link = reverse('user:registration_confirmed', kwargs={'sign': signer.sign(user.username)})
context = user.get_email_context()
self.assertIsNotNone(context)
self.assertIn('confirmation_link', str(context))
self.assertIn(link, str(context))
def test_send_confirmation_email(self):
user = AdvUserFactory()
link = reverse('user:registration_confirmed', kwargs={'sign': signer.sign(user.username)})
connection = mail.get_connection(backend='django.core.mail.backends.locmem.EmailBackend')
outbox = user.send_confirmation_email(connection)
self.assertEqual(len(outbox), 1)
self.assertEqual(outbox[0].subject, 'Подтверждение регистрации')
self.assertIn(link, outbox[0].body)
def create_users_for_test(self, number):
for i in range(number):
user = AdvUserFactory()
return AdvUser.objects.all()
def test_social_count(self):
users = self.create_users_for_test(10)
for user in users:
if user.social_auth.exists():
self.assertNotEqual(user.social_count(), 0)
else:
self.assertEqual(user.social_count(), 0)
def test_total_points_count(self):
users = self.create_users_for_test(10)
for user in users:
if user.replies.exists():
self.assertNotEqual(user.total_points_count(), 0)
else:
self.assertEqual(user.total_points_count(), 0)
def test_get_points_rating_queryset_manager_with_users_and_no_replies(self):
users = self.create_users_for_test(10)
full_qs = AdvUser.objects.get_queryset()
test_qs = AdvUser.objects.get_points_rating_queryset()
self.assertEqual(full_qs.count(), 10)
self.assertEqual(test_qs.count(), 0)
|
[
"Eleriya-25@yandex.ru"
] |
Eleriya-25@yandex.ru
|
1ea1be419f42ba190fe16b9772f1ad7bd9ddae47
|
afcb1837c0c50fd823964594780111d530a73f8e
|
/qtjsonschema/__main__.py
|
d267aee40ac8028b7f4edc49b45fb37004ea312c
|
[] |
no_license
|
ArtemioGiovanni/pyqtschema
|
332a60462db5ac2e113256c940b557d77e16c0cf
|
7d84d8754d039504a5905289a33574abe5318e89
|
refs/heads/master
| 2020-05-04T20:38:22.850664
| 2014-08-15T22:07:40
| 2014-08-15T22:07:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,848
|
py
|
#!/usr/bin/env python
"""
pyqtschema - Python Qt JSON Schema Tool
Generate a dynamic Qt form representing a JSON Schema.
Filling the form will generate JSON.
"""
from PyQt4 import QtCore, QtGui
from qtjsonschema.widgets import create_widget
class MainWindow(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setWindowTitle("PyQtSchema")
# Menu bar
# File
# Open
# Save
# --
# Close
self.menu = QtGui.QMenuBar(self)
self.file_menu = self.menu.addMenu("&File")
_action_open = QtGui.QAction("&Open Schema", self)
_action_open.triggered.connect(self._handle_open)
_action_save = QtGui.QAction("&Save", self)
_action_save.triggered.connect(self._handle_save)
_action_quit = QtGui.QAction("&Close", self)
_action_quit.triggered.connect(self._handle_quit)
self.file_menu.addAction(_action_open)
self.file_menu.addAction(_action_save)
self.file_menu.addSeparator()
self.file_menu.addAction(_action_quit)
# Scrollable region for schema form
self.content_region = QtGui.QScrollArea(self)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.menu)
vbox.addWidget(self.content_region)
vbox.setContentsMargins(0,0,0,0)
hbox = QtGui.QHBoxLayout()
hbox.setContentsMargins(0,0,0,0)
hbox.addLayout(vbox)
self.setLayout(hbox)
def process_schema(self, schema):
"""
Load a schema and create the root element.
"""
import json
import collections
with open(schema) as f:
_schema = json.loads(f.read(), object_pairs_hook=collections.OrderedDict)
if "title" in _schema:
self.setWindowTitle("%s - PyQtSchema" % _schema["title"])
self.content_region.setWidget(create_widget(_schema.get("title", "(root)"), _schema))
self.content_region.setWidgetResizable(True)
def _handle_open(self):
# Open JSON Schema
schema = QtGui.QFileDialog.getOpenFileName(self, 'Open Schema', filter="JSON Schema (*.schema *.json)")
if schema:
self.process_schema(schema)
def _handle_save(self):
# Save JSON output
import json
obj = self.content_region.widget().to_json_object()
outfile = QtGui.QFileDialog.getSaveFileName(self, 'Save JSON', filter="JSON (*.json)")
if outfile:
with open(outfile, 'w') as f:
f.write(json.dumps(obj))
def _handle_quit(self):
# TODO: Check if saved?
self.close()
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
main_window = MainWindow()
main_window.show()
sys.exit(app.exec_())
|
[
"klange@yelp.com"
] |
klange@yelp.com
|
9b577e0397ab602bffa52d7290910ae2afb30a2d
|
19872c0f261100d3a7a3c770aa16ef719b7f397b
|
/PythonProjects/crawl_work/crawl_190605_51jop/nationwide_make_urls.py
|
b5e328efff01c1510da69677c7ba2fb1cb81e680
|
[] |
no_license
|
enjoqy/PythonProjects
|
b7951bd13c32ec40842e8c7f7a4b2a32929d3d8b
|
ae1a4b1a55a7906bb4dd78e8bd43d19decec48ba
|
refs/heads/master
| 2020-06-20T03:50:32.814009
| 2019-07-15T09:17:50
| 2019-07-15T09:17:50
| 184,964,749
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,367
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time: 2019/5/16 001612:11
# @Author: junhi
# java
# url = 'https://search.51job.com/list/010000,000000,0000,00,9,99,java,2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='
# 平面设计
# url = 'https://search.51job.com/list/010000,000000,0000,00,9,99,%25E5%25B9%25B3%25E9%259D%25A2,2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='
# 大数据
# url = 'https://search.51job.com/list/010000,000000,0000,00,9,99,%25E5%25A4%25A7%25E6%2595%25B0%25E6%258D%25AE,2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='
# 云计算
url = 'https://search.51job.com/list/010000,000000,0000,00,9,99,%25E4%25BA%2591%25E8%25AE%25A1%25E7%25AE%2597,2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='
provinces = [
'北京', '上海', '广东省', '深圳', '天津', '重庆', '江苏省', '浙江省',
'四川省', '海南省', '福建省', '山东省', '江西省', '广西', '安徽省', '河北',
'河南省', '湖北省', '湖南省', '陕西省', '山西省', '黑龙江省', '辽宁省', '吉林省',
'云南省', '贵州省', '甘肃省', '内蒙古', '宁夏', '西藏', '新疆', '青海省',
]
def get_nationwide_urls():
i = 1
nationwide_java_urls = {}
for province in provinces:
if i <= 9:
province_url = url[0:31] + str(i) + url[32:]
print(province_url)
else:
province_url = url[0:30] + str(i) + url[32:]
print(province_url)
nationwide_java_urls[province] = province_url
i = int(i)
if i == 32:
break
i += 1
return nationwide_java_urls
|
[
"gdlzhh321@163.com"
] |
gdlzhh321@163.com
|
3b42efb7734e3bf5050c3c3470912a7e738e57a2
|
456e964284c5e25bff5dd7df7361dd6e20b3ea96
|
/house_prices_competition.py
|
d3ec623731ca86a9398cbe1a73111ac24984088c
|
[] |
no_license
|
huangshizhi/kaggle
|
069d310909f1e45bd420791ab00405fe2e49a621
|
d754fd0c152461bf96e8553a8e1fd58b65b82cd6
|
refs/heads/master
| 2021-01-16T17:16:36.212132
| 2017-08-11T07:39:17
| 2017-08-11T07:39:17
| 100,005,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,569
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 9 09:54:45 2017
@author: huangshizhi
https://www.dataquest.io/blog/kaggle-getting-started/
"""
import pandas as pd
import numpy as np
#1.加载数据
train = pd.read_csv(r'D:\kaggle\house_prices\data\train.csv')
test = pd.read_csv(r'D:\kaggle\house_prices\data\test.csv')
import matplotlib.pyplot as plt
plt.style.use(style='ggplot')
plt.rcParams['figure.figsize'] = (10, 6)
#2.探究数据,查看数据的统计特征,skew为分布的不对称度
train.SalePrice.describe()
print ("Skew is:", train.SalePrice.skew())
plt.hist(train.SalePrice, color='blue')
plt.show()
target = np.log(train.SalePrice)
print ("Skew is:", target.skew())
plt.hist(target, color='blue')
plt.show()
#抽取数值型变量特征,Working with Numeric Features
numeric_features = train.select_dtypes(include=[np.number])
numeric_features.dtypes
#计算协方差矩阵
corr = numeric_features.corr()
print (corr['SalePrice'].sort_values(ascending=False)[:5], '\n')
print (corr['SalePrice'].sort_values(ascending=False)[-5:])
salePrice_top5 = corr['SalePrice'].sort_values(ascending=False)[:5]
salePrice_bottom5 = corr['SalePrice'].sort_values(ascending=False)[-5:]
#对房子的整体材料和成品率进行评估
train.OverallQual.unique()
quality_pivot = train.pivot_table(index='OverallQual',
values='SalePrice', aggfunc=np.median)
quality_pivot.plot(kind='bar', color='blue')
plt.xlabel('Overall Quality')
plt.ylabel('Median Sale Price')
plt.xticks(rotation=0)
plt.show()
#居住面积
plt.scatter(x=train['GrLivArea'], y=target)
plt.ylabel('Sale Price')
plt.xlabel('Above grade (ground) living area square feet')
plt.show()
#车库大小
plt.scatter(x=train['GarageArea'], y=target)
plt.ylabel('Sale Price')
plt.xlabel('Garage Area')
plt.show()
#去掉异常值之后
train = train[train['GarageArea'] < 1200]
plt.scatter(x=train['GarageArea'], y=np.log(train.SalePrice))
plt.xlim(-200,1600) # This forces the same scale as before
plt.ylabel('Sale Price')
plt.xlabel('Garage Area')
plt.show()
#Handling Null Values
nulls = pd.DataFrame(train.isnull().sum().sort_values(ascending=False))
nulls.columns = ['Null Count']
nulls.index.name = 'Feature'
#不包括其他类别的杂项特性
print ("Unique values are:", train.MiscFeature.unique())
#抽取非数值型变量特征
categoricals = train.select_dtypes(exclude=[np.number])
cate_desc = categoricals.describe()
print ("Original: \n")
print (train.Street.value_counts(), "\n")
#One-Hot Encoding
train['enc_street'] = pd.get_dummies(train.Street, drop_first=True)
test['enc_street'] = pd.get_dummies(train.Street, drop_first=True)
print ('Encoded: \n')
print (train.enc_street.value_counts())
condition_pivot = train.pivot_table(index='SaleCondition',
values='SalePrice', aggfunc=np.median)
condition_pivot.plot(kind='bar', color='blue')
plt.xlabel('Sale Condition')
plt.ylabel('Median Sale Price')
plt.xticks(rotation=0)
plt.show()
def encode(x): return 1 if x == 'Partial' else 0
train['enc_condition'] = train.SaleCondition.apply(encode)
test['enc_condition'] = test.SaleCondition.apply(encode)
condition_pivot = train.pivot_table(index='enc_condition', values='SalePrice', aggfunc=np.median)
condition_pivot.plot(kind='bar', color='blue')
plt.xlabel('Encoded Sale Condition')
plt.ylabel('Median Sale Price')
plt.xticks(rotation=0)
plt.show()
#填充缺失值
data = train.select_dtypes(include=[np.number]).interpolate().dropna()
#测试
sum(data.isnull().sum() != 0)
#3.建立回归模型,训练数据
y = np.log(train.SalePrice)
X = data.drop(['SalePrice', 'Id'], axis=1)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=42, test_size=.33)
#建模
from sklearn import linear_model
lr = linear_model.LinearRegression()
model = lr.fit(X_train, y_train)
print ("R^2 is: \n", model.score(X_test, y_test))
predictions = model.predict(X_test)
from sklearn.metrics import mean_squared_error
print ('RMSE is: \n', mean_squared_error(y_test, predictions))
actual_values = y_test
plt.scatter(predictions, actual_values, alpha=.75,color='b')
#plt.plot(X, y_rbf, color='black', lw=lw, label='RBF model')
#alpha helps to show overlapping data
plt.xlabel('Predicted Price')
plt.ylabel('Actual Price')
plt.title('Linear Regression Model')
plt.show()
for i in range (-2, 3):
alpha = 10**i
rm = linear_model.Ridge(alpha=alpha)
ridge_model = rm.fit(X_train, y_train)
preds_ridge = ridge_model.predict(X_test)
plt.scatter(preds_ridge, actual_values, alpha=.75, color='b')
plt.xlabel('Predicted Price')
plt.ylabel('Actual Price')
plt.title('Ridge Regularization with alpha = {}'.format(alpha))
overlay = 'R^2 is: {}\nRMSE is: {}'.format(
ridge_model.score(X_test, y_test),
mean_squared_error(y_test, preds_ridge))
plt.annotate(s=overlay,xy=(12.1,10.6),size='x-large')
plt.show()
#4.提交结果
submission = pd.DataFrame()
submission['Id'] = test.Id
feats = test.select_dtypes(
include=[np.number]).drop(['Id'], axis=1).interpolate()
predictions = model.predict(feats)
final_predictions = np.exp(predictions)
submission['SalePrice'] = final_predictions
submission.head()
submission.to_csv('D:\kaggle\house_prices\submission1.csv', index=False)
|
[
"noreply@github.com"
] |
noreply@github.com
|
969e32c9641599d2ac6f429333a9a104f34dff93
|
6a33819de8b7aae1388f94dd0142819693da5cdf
|
/src/kitchen/auth_backend.py
|
73d1663bd8fa0f64f9a2fe2bf8ad9e972e0a97eb
|
[] |
no_license
|
goudete/clustr
|
9ffa7deb9cec4492a7f65c888287976bdbd267f9
|
0a6904c7f29a3341fef55933cf7d73c8326fdb33
|
refs/heads/master
| 2023-04-06T06:39:52.048484
| 2020-09-19T14:26:27
| 2020-09-19T14:26:27
| 289,341,808
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 707
|
py
|
from django.contrib.auth.backends import ModelBackend
from .models import Kitchen
from django.contrib.auth.models import User
from restaurant_admin.models import Restaurant
class PasswordlessAuthBackend(ModelBackend):
"""Log in to Django without providing a password, just a cashier code/login number
"""
def authenticate(self, request, login_number=None):
try:
rest = Restaurant.objects.filter(kitchen_login_no=login_number).first()
return rest
except rest.DoesNotExist:
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except Kitchen.DoesNotExist:
return None
|
[
"rfitch@oxy.edu"
] |
rfitch@oxy.edu
|
0a8927cc6f6bdd664bb45f44bde260086ecb6f86
|
14dd1fd527bb7e30abd2e4ee64ffb34fe84f1e06
|
/jzc/postgresInsert.py
|
ff33ab9255a1b1887bfe3ed360a277bce6e8efcf
|
[] |
no_license
|
wdwoodee/luozt
|
7a78e7e66f9bc51159f6f03ca4702f4db17dc580
|
6a4bc86969f6f2db24d9d7d1446e4a4f97846165
|
refs/heads/master
| 2021-05-04T04:46:30.812814
| 2016-10-14T08:30:19
| 2016-10-14T08:30:19
| 70,889,537
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,093
|
py
|
#_DEBUG = True
import psycopg2
import pdb
try:
conn = psycopg2.connect(database="workspace2", user="postgres", password="postgres", host="127.0.0.1", port="54321")
except Exception:
print('Get a exception: Connect failed')
print("successfully")
cur = conn.cursor()
try:
errLine = 0
with open("test1ip.csv") as f:
i = 0
while True:
ln = f.readline()
if not ln:
break
items = ln.split(',')
if(items[0]=='LAN Segment'):
continue
if len(items) != 13:
errLine += 1
continue
#pdb.set_trace()
if items[10] == "Device Interface":
items[10]=9
elif items[10] == "ARP Table":
items[10]=7
elif items[10] == "CDP/LLDP Table":
items[10]=8
elif items[10] == "MAC Table":
items[10]=6
elif items[10] == "Manual":
items[10]=1
else :
items[10]=0
#if ( not items[8].isnumeric()) or ( not items[10].isnumeric()) or ( not items[11].isnumeric()) :
# pdb.set_trace()
# errLine += 1
# continue
try:
sqlinsert = "select saveoneiprecode_ip2mac_x64 ( false, '%s', '%s', '', '%s', '%s', '%s', '%s', '%s', '%s', %s, %s, %s)" % (items[9], items[0], items[1], items[2], items[4], items[5], items[6], items[7], items[8], items[10], items[11])
cur.execute(sqlinsert)
except Exception:
print("Insert Err:%s",sqlinsert)
errLine += 1
continue
i += 1
if ( (i%5)==0 ):
#pdb.set_trace()
conn.commit()
conn.commit()
print ("Complete: insert %d , err %d" %( i, errLine) );
conn.close()
except Exception:
print('Get a exception')
|
[
"18511246771@163.com"
] |
18511246771@163.com
|
6fa3e92bc057c995f58b43c06e0f64afa615f900
|
9a5c5ead8471c7bb71fe90429651b9d863ee2474
|
/bots/dayandtime.py
|
4d69b928ec7f213382d296b3b691500461877e14
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mwaiton/TwitterBots
|
12af71e19895a53791d12b7a788a508752b0f1d9
|
3c641690c5047a1bbbbf3c47bb90256b7cf4f271
|
refs/heads/master
| 2023-02-08T09:12:44.300477
| 2020-12-30T02:56:04
| 2020-12-30T02:56:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
import calendar
from datetime import datetime
"""Python program to find day of the week for a given date.
Also finds current date, which can be used to find the day.
"""
# To find a day of the week.
def find_day(date):
day = datetime.strptime(date, '%Y-%m-%d').weekday()
return calendar.day_name[day]
# To find today's date.
def today():
return str(datetime.date(datetime.now()))
if __name__ == '__main__':
d = today()
print("Today is", find_day(d))
|
[
"bexxmodd@seas.upenn.edu"
] |
bexxmodd@seas.upenn.edu
|
e6797d47ad8c18d5fc26593670c8cb8c8f0cdbd6
|
21ba6d42f7f26779322a149d0af25f3acbb07682
|
/Repository/Labelling/Automatically Labelled News/generate_auto_label.py
|
dbaca55e363df0a7a2b3423c3038b8663244f622
|
[] |
no_license
|
Bolanle/G54MIP
|
6f1fa37b1a9da0477c3b22c9f957cbaf2249b764
|
02913adc86088bbbdab23c6d508a7c91bcb0d110
|
refs/heads/master
| 2021-01-18T16:41:17.783175
| 2015-05-07T10:31:19
| 2015-05-07T10:31:19
| 24,644,928
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,177
|
py
|
from collections import defaultdict
from segmentation import SegmentCreator
from segmentation import DataPoint
import os, datetime, calendar
from xml.etree import ElementTree
from scipy.stats import pearsonr
import pandas
import matplotlib.pyplot as plot
import matplotlib.ticker as ticker
import numpy as np
def _get_y_from_eqn(start_index_point:DataPoint, end_index_point:DataPoint, x_value):
y = (((end_index_point.get_y() - start_index_point.get_y()) * (x_value - start_index_point.get_x())) / (
end_index_point.get_x() - start_index_point.get_x())) + start_index_point.get_y()
return y
def get_trend(start_point, end_point):
y_difference = end_point.get_y() - start_point.get_y()
if y_difference > 0:
return "up"
elif y_difference < 0:
return "down"
else:
return "neutral"
def get_news():
rel_path = "./"
company_news_data = dict()
for filename in os.listdir(rel_path):
if ".xml" in filename and 'unsure' not in filename:
company_news_data[filename.replace(".xml", '')] = ElementTree.parse(rel_path + filename).getroot()
return company_news_data
def get_data(path, draw=False):
creator = SegmentCreator(draw=draw)
return creator.create_segments(path, 2)
def map_segments_to_company():
rel_path = "../../Stock Data/"
company_data = dict()
for filename in os.listdir(rel_path):
if '.csv' in filename:
company = filename.replace(".csv", '')
company_data[company] = get_data(rel_path + filename)
return company_data
def get_segment_for_news_release(date, company_trends):
for i in range(len(company_trends) - 1):
start = company_trends[i]
end = company_trends[i + 1]
if datetime.datetime.strptime(start.get_date(), "%d/%m/%Y") <= datetime.datetime.strptime(date,
"%d/%m/%Y") \
<= datetime.datetime.strptime(end.get_date(), "%d/%m/%Y"):
return start, end
else:
return 0, 0
def auto_generate(news, company_trends, company_name, dates):
progress_sentiment_from_number = dict(up='u', down='d', neutral='n')
progress_sentiment_to_num = dict(up=1, down=-1, neutral=0)
progress_sentiment_trend = defaultdict(int)
for news_article in news:
date_of_news_release = news_article.get('datetime')
date_of_news_release = date_of_news_release.replace(' ET', '')
date_of_news_release = datetime.datetime.strptime(date_of_news_release, '%b %d, %Y %I:%M %p').strftime(
'%d/%m/%Y')
if date_of_news_release in dates:
start, end = get_segment_for_news_release(date_of_news_release, company_trends)
trend = get_trend(start, end)
news_article.attrib['progress_sentiment'] = progress_sentiment_from_number[trend]
try:
news_article.attrib.pop('feeling_sentiment')
except:
pass
progress_sentiment_trend[date_of_news_release] += progress_sentiment_to_num[trend]
# write to file
ElementTree.ElementTree(news).write("{}.xml".format(company_name), xml_declaration=True)
for day in dates:
if not progress_sentiment_trend[str(day)] and not company_name == "ibm":
progress_sentiment_trend[str(day)] = 1
elif not progress_sentiment_trend[str(day)] and company_name == "ibm":
progress_sentiment_trend[str(day)] = -1
sorted_progress = sorted(progress_sentiment_trend)
dateless_progress_trend = []
for day in sorted_progress:
dateless_progress_trend.append(progress_sentiment_trend[day])
return dateless_progress_trend
def get_projected_prices(segmented_price:list):
prices = []
for i in range(len(segmented_price) - 1):
start = segmented_price[i]
end = segmented_price[i + 1]
for x_value in range(start.get_x(), end.get_x()):
prices.append(DataPoint(x_value, _get_y_from_eqn(start, end, x_value)))
prices.append(segmented_price[-1])
prices.sort(key=lambda x: (x.get_x(), x.get_y()))
return prices
def aggregate_sentiment(trend):
aggregate = []
for i in range(len(trend)):
aggregate.append(sum(trend[:i + 1]))
return aggregate
if __name__ == '__main__':
news = get_news()
stock_price = map_segments_to_company()
for company in stock_price.keys():
print("*******************{0}****************".format(company))
projected_prices = get_projected_prices(stock_price[company])
file_csv = pandas.read_csv("../../Stock Data/{0}.csv".format(company))
data = file_csv[file_csv.columns[2]].tolist()
progress_trend = auto_generate(news[company], stock_price[company], company_name=company,
dates=file_csv[file_csv.columns[0]].tolist())
correlation, pvalue = pearsonr([price.get_y() for price in projected_prices],
aggregate_sentiment(progress_trend))
print("Projected correlation coefficient (progress)", correlation)
correlation, pvalue = pearsonr(data, aggregate_sentiment(progress_trend))
print("Actual correlation coefficient (progress)", correlation)
if company in []:
dates = file_csv[file_csv.columns[0]].tolist()
# next we'll write a custom formatter
N = len(dates)
ind = np.arange(N) # the evenly spaced plot indices
def format_date(x, pos=None):
thisind = np.clip(int(x + 0.5), 0, N - 1)
return datetime.datetime.strptime(dates[thisind], '%d/%m/%Y').strftime(
'%Y-%m-%d')
fig, ax = plot.subplots()
#ax.plot(ind, data, 'o-')
ax.xaxis.set_major_formatter(ticker.FuncFormatter(format_date))
fig.autofmt_xdate()
plot.plot(ind, [price.get_y() for price in projected_prices], linewidth=4)
#plot.plot(ind, aggregate_sentiment(progress_trend))
plot.title(company)
plot.show()
|
[
"onifade.esther@gmail.com"
] |
onifade.esther@gmail.com
|
034f01e6a2963900bc368fa59500b9e91af70e91
|
5504066c264a31a301b347858f0e6dd8db0fdccc
|
/docs/jliu118/reg_s3617_50um.py
|
b5eb98dec3120d2d09d8b29a13a3604dff2502da
|
[] |
no_license
|
neurodata-cobalt/cobalt
|
30fb656c851b56144b1d131e2028b5537bac8da0
|
f966200d09d03a75ff9f56ab5c08b03b7bc3aadb
|
refs/heads/master
| 2021-03-22T04:37:04.196672
| 2018-06-15T14:10:42
| 2018-06-15T14:10:42
| 102,608,162
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,956
|
py
|
from ndreg import *
import matplotlib
#import ndio.remote.neurodata as neurodata
from intern.remote.boss import BossRemote
from intern.resource.boss.resource import *
from NeuroDataResource import NeuroDataResource
import pickle
import numpy as np
from requests import HTTPError
import time
import configparser
startTime = time.time()
matplotlib.rcParams['figure.figsize'] = (10.0, 8.0)
# Assume a valid configuration file exists at .keys/intern.cfg.
cfg_file = '.keys/intern.cfg'
if cfg_file.startswith('~'):
cfg_file = os.path.expanduser('~') + cfg_file[1:]
config = configparser.ConfigParser()
config.read_file(file(cfg_file))
TOKEN = config['Default']['token']
rmt = BossRemote(cfg_file_or_dict=cfg_file)
REFERENCE_COLLECTION = 'ara_2016'
REFERENCE_EXPERIMENT = 'sagittal_50um'
REFERENCE_COORDINATE_FRAME = 'ara_2016'
REFERENCE_CHANNEL = 'average_50um'
# Set/Modify these parameters
REFERENCE_RESOLUTION = 0
REFERENCE_ISOTROPIC = True
# copied code from ndreg because for some reason it wasn't working
def setup_experiment_boss(remote, collection, experiment):
exp_setup = ExperimentResource(experiment, collection)
try:
exp_actual = remote.get_project(exp_setup)
coord_setup = CoordinateFrameResource(exp_actual.coord_frame)
coord_actual= remote.get_project(coord_setup)
return (exp_setup, coord_actual)
except HTTPError as e:
print(e.message)
def setup_channel_boss(remote, collection, experiment, channel, channel_type='image', datatype='uint16'):
(exp_setup, coord_actual) = setup_experiment_boss(remote, collection, experiment)
chan_setup = ChannelResource(channel, collection, experiment, channel_type, datatype=datatype)
try:
chan_actual = remote.get_project(chan_setup)
return (exp_setup, coord_actual, chan_actual)
except HTTPError as e:
print(e.message)
def imgDownload_boss(remote, channel_resource, coordinate_frame_resource, resolution=0, size=[], start=[], isotropic=False):
"""
Download image with given token from given server at given resolution.
If channel isn't specified the first channel is downloaded.
"""
# TODO: Fix size and start parameters
voxel_unit = coordinate_frame_resource.voxel_unit
voxel_units = ('nanometers', 'micrometers', 'millimeters', 'centimeters')
factor_divide = (1e-6, 1e-3, 1, 10)
fact_div = factor_divide[voxel_units.index(voxel_unit)]
spacingBoss = [coordinate_frame_resource.x_voxel_size, coordinate_frame_resource.y_voxel_size, coordinate_frame_resource.z_voxel_size]
spacing = [x * fact_div for x in spacingBoss] # Convert spacing to mm
if isotropic:
spacing = [x * 2**resolution for x in spacing]
else:
spacing[0] = spacing[0] * 2**resolution
spacing[1] = spacing[1] * 2**resolution
# z spacing unchanged since not isotropic
if size == []: size = get_image_size_boss(coordinate_frame_resource, resolution, isotropic)
if start == []: start = get_offset_boss(coordinate_frame_resource, resolution, isotropic)
#size[2] = 200
#dataType = metadata['channels'][channel]['datatype']
dataType = channel_resource.datatype
# Download all image data from specified channel
array = remote.get_cutout(channel_resource, resolution, [start[0], size[0]], [start[1], size[1]], [start[2], size[2]])
# Cast downloaded image to server's data type
# img = sitk.Cast(sitk.GetImageFromArray(array),ndToSitkDataTypes[dataType]) # convert numpy array to sitk image
img = sitk.Cast(sitk.GetImageFromArray(array),sitk.sitkUInt16) # convert numpy array to sitk image
# Reverse axes order
#img = sitk.PermuteAxesImageFilter().Execute(img,range(dimension-1,-1,-1))
img.SetDirection(identityDirection)
img.SetSpacing(spacing)
# Convert to 2D if only one slice
img = imgCollaspeDimension(img)
return img
(ref_exp_resource, ref_coord_resource, ref_channel_resource) = setup_channel_boss(rmt, REFERENCE_COLLECTION, REFERENCE_EXPERIMENT, REFERENCE_CHANNEL)
refImg = imgDownload_boss(rmt, ref_channel_resource, ref_coord_resource, resolution=REFERENCE_RESOLUTION, isotropic=REFERENCE_ISOTROPIC)
refThreshold = imgPercentile(refImg, 0.99)
REFERENCE_ANNOTATION_COLLECTION = 'ara_2016'
REFERENCE_ANNOTATION_EXPERIMENT = 'sagittal_50um'
REFERENCE_ANNOTATION_COORDINATE_FRAME = 'ara_2016'
REFERENCE_ANNOTATION_CHANNEL = 'annotation_50um'
REFERENCE_ANNOTATION_RESOLUTION = REFERENCE_RESOLUTION
REFERENCE_ANNOTATION_ISOTROPIC = True
(refAnnotation_exp_resource, refAnnotation_coord_resource, refAnnotation_channel_resource) = setup_channel_boss(rmt, REFERENCE_ANNOTATION_COLLECTION, REFERENCE_ANNOTATION_EXPERIMENT, REFERENCE_ANNOTATION_CHANNEL)
refAnnotationImg = imgDownload_boss(rmt, refAnnotation_channel_resource, refAnnotation_coord_resource, resolution=REFERENCE_ANNOTATION_RESOLUTION, isotropic=REFERENCE_ANNOTATION_ISOTROPIC)
randValues = np.random.rand(1000,3)
randValues = np.concatenate(([[0,0,0]],randValues))
randCmap = matplotlib.colors.ListedColormap(randValues)
# Remove missing parts of the brain
remove_regions = [507, 212, 220, 228, 236, 244, 151, 188, 196, 204]
refAnnoImg = sitk.GetArrayFromImage(refAnnotationImg)
remove_indices = np.isin(refAnnoImg, remove_regions)
refAnnoImg[remove_indices] = 0
# adjust annotations
refAnnoImg_adj = sitk.GetImageFromArray(refAnnoImg)
refAnnoImg_adj.SetSpacing(refAnnotationImg.GetSpacing())
refAnnotationImg = refAnnoImg_adj
# adjust atlas with corresponding indices
# refImg_adj = sitk.GetArrayFromImage(refImg)
# refImg_adj[remove_indices] = 0
# refImg_adj = sitk.GetImageFromArray(refImg_adj)
# refImg_adj.SetSpacing(refImg.GetSpacing())
# refImg = refImg_adj
# Downloading input image
# Modify these parameters for your specific experiment
SAMPLE_COLLECTION = 'ailey-dev'
SAMPLE_EXPERIMENT = 's3617'
SAMPLE_COORDINATE_FRAME = 'aileydev_s3617'
SAMPLE_CHANNEL = 'channel1'
SAMPLE_RESOLUTION = 4
SAMPLE_ISOTROPIC = False
sample_exp_resource, sample_coord_resource, sample_channel_resource = setup_channel_boss(rmt, SAMPLE_COLLECTION, SAMPLE_EXPERIMENT, SAMPLE_CHANNEL)
sampleImg = imgDownload_boss(rmt, sample_channel_resource, sample_coord_resource, resolution=SAMPLE_RESOLUTION, isotropic=SAMPLE_ISOTROPIC)
sampleThreshold = imgPercentile(sampleImg, .999)
#Reorienting input image
# modify sampleOrient based on your image orientation
sampleOrient = "RPI"
refOrient = "ASR"
sampleImgReoriented = imgReorient(sampleImg, sampleOrient, refOrient)
# Downsample images
DOWNSAMPLE_SPACING = 0.010 # millimeters
spacing = [DOWNSAMPLE_SPACING,DOWNSAMPLE_SPACING,DOWNSAMPLE_SPACING]
refImg_ds = sitk.Clamp(imgResample(refImg, spacing), upperBound=refThreshold)
sampleImg_ds = sitk.Clamp(imgResample(sampleImgReoriented, spacing), upperBound=sampleThreshold)
sampleImgSize_reorient = sampleImgReoriented.GetSize()
sampleImgSpacing_reorient= sampleImgReoriented.GetSpacing()
# Affine Registration
affine = imgAffineComposite(sampleImg_ds, refImg_ds, iterations=200, useMI=True, verbose=True)
sampleImg_affine = imgApplyAffine(sampleImgReoriented, affine, size=refImg.GetSize(), spacing=refImg.GetSpacing())
sampleImg_affine_bounded = sitk.Clamp(sampleImg_affine,upperBound=sampleThreshold)
refImg_bounded = sitk.Clamp(refImg, upperBound=refThreshold)
# LDDMM Registration
(field, invField) = imgMetamorphosisComposite(sampleImg_ds, refImg_ds, alphaList=[0.2, 0.1, 0.05],
scaleList = 1.0, useMI=True, iterations=100, verbose=True)
affineField = affineToField(affine, field.GetSize(), field.GetSpacing())
fieldComposite = fieldApplyField(field, affineField)
invAffineField = affineToField(affineInverse(affine), invField.GetSize(), invField.GetSpacing())
invFieldComposite = fieldApplyField(invAffineField, invField)
sampleImg_lddmm = imgApplyField(sampleImgReoriented, fieldComposite, size=refImg.GetSize(), spacing=refImg.GetSpacing())
|
[
"jonathan.jy.liu@gmail.com"
] |
jonathan.jy.liu@gmail.com
|
f1b834746b4997f601868f58ed815391ad9e6bf7
|
730b85b3c23337fddca9f4a80d82f8ed2f2eb2a4
|
/BotBlocker.py
|
6f1b566e739ae14e718fb48d898640ff6245218d
|
[
"MIT"
] |
permissive
|
sigmaister/Anonymous-Telegram-Bot
|
29db924b6a3b33fdae3aba045b68e3c0fb60992b
|
dee234dd9f931b438a9939310e6d54c0fea4999f
|
refs/heads/master
| 2023-05-25T01:08:30.300715
| 2020-04-12T10:55:33
| 2020-04-12T11:30:03
| 269,410,655
| 0
| 0
|
MIT
| 2023-05-22T23:29:49
| 2020-06-04T16:32:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,842
|
py
|
"""
This software has been developed by github user fndh (http://github.com/fndh)
You are free to use, modify and redistribute this software as you please, as
long as you follow the conditions listed in the LICENSE file of the github
repository indicated. I want to thank you for reading this small paragraph,
and please consider sending me a message if you are using this software! It
will surely make my day.
"""
class Blocker:
def __init__(self, sql_wrapper):
self.sql = sql_wrapper
self.sql.execute_and_commit(
"CREATE TABLE IF NOT EXISTS blocked_user_ids (user_id);")
def block_user(self, user_id):
"""
Block a user.
Updated the blocked table by adding the user ID if it is not already
there."""
if not self.is_user_blocked(user_id):
self.sql.execute_and_commit(
"INSERT INTO blocked_user_ids (user_id) VALUES (?);",
(user_id,))
def unblock_user(self, user_id):
"""
Unblock a user.
Remove the blocked user ID from the block table if the ID exists."""
self.sql.execute_and_commit(
"DELETE FROM blocked_user_ids WHERE user_id=?;",
(user_id,))
def get_blocked_users(self):
"""Retrieve a list of the currently blocked user IDs."""
rows = self.sql.select_and_fetch(
"SELECT user_id FROM blocked_user_ids;")
user_ids = [str(user_id[0]) for user_id in rows]
return user_ids
def is_user_blocked(self, user_id):
"""Verify if a user ID is stored in the block table."""
matched_ids = self.sql.select_and_fetch(
"SELECT COUNT(*) FROM blocked_user_ids WHERE user_id=?",
(user_id,))
# Return format from query is [(count,)]
return matched_ids[0][0]
|
[
"xavi_cat36@hotmail.com"
] |
xavi_cat36@hotmail.com
|
3d62fe89850d8886db18d58cd2b87b3b04745a1b
|
60ffc2a1264a7ac6e743b0c1da380d0daf9c849b
|
/src/core.py
|
d274ce41dd9a3d9ff816e8f776bf1ece3b235894
|
[] |
no_license
|
pedromxavier/760D
|
d3622fb4487ece13c633a527e68526b1677d9da7
|
458ebbeb1aa8975628bd2caebdd919933ecf264a
|
refs/heads/master
| 2021-02-28T16:40:30.769008
| 2020-06-05T18:46:01
| 2020-06-05T18:46:01
| 245,713,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,725
|
py
|
import telebot as tb
import re
import html
import urllib.request as req
def STATUS_PONTE():
try:
URL_PONTE = "https://www.ecoponte.com.br/condicoes-da-via"
PATTERN = r'<tr class=.*?-row><td>.*?<td>(.*?)<td><span class=.*?>\s*(\S*?)\s*</span>'
answer = req.urlopen(URL_PONTE)
answer_bytes = answer.read()
answer_html = answer_bytes.decode('utf8')
matches = re.findall(PATTERN, answer_html, re.DOTALL)
TEXT = "O tráfego na ponte está *{}* no sentido *{}* e *{}* no sentido *{}*."
args = []
for name, status in matches:
args.append(html.unescape(status))
args.append(html.unescape(name))
if not args: raise ValueError
return TEXT.format(*args)
except:
return "Não tenho informações sobre a ponte agora, lamento."
def NEXT_FRESCAO():
pass
def FRESCAO_IMG():
return open(r'static/HORARIO_FRESCAO.jpg', 'rb')
def START_TEXT(json):
if tb.Tempo.morning:
return "Bom dia, Niterói!"
elif tb.Tempo.evening:
return "Boa tarde, Niterói!"
else:
return "Boa Noite, Niterói!"
def UNKNOWN_TEXT(json):
return "Comando desconhecido `{text}`".format(json)
bot = tb.Bot.load(debug=True, fname='LV760DBOT')
bot.START_TEXT = START_TEXT
bot.UNKNOWN_TEXT = UNKNOWN_TEXT
with bot:
@bot.cmd_handler('ponte')
@bot.lock_start
def ponte(self, update, context):
self.debug[0]('[cmd :: ponte]')
json = self.parse(update, context)
self.debug[1]('[obj :: json]', json)
kw = {
'chat_id' : json['chat_id'],
'text' : STATUS_PONTE(),
'parse_mode' : tb.telegram.ParseMode.MARKDOWN,
}
self.debug[1]('[obj :: kw]', kw)
json['bot'].send_message(**kw)
@bot.cmd_handler('frescao')
@bot.lock_start
def frescao(self, update, context):
self.debug[0]('[cmd :: frescao]')
json = self.parse(update, context)
self.debug[1]('[obj :: json]', json)
kw = {
'chat_id' : json['chat_id'],
'photo' : FRESCAO_IMG(),
}
self.debug[1]('[obj :: kw]', kw)
json['bot'].send_photo(**kw)
@bot.cmd_handler('lv')
@bot.lock_start
def lv(self, update, context):
self.debug[0]('[cmd :: lv]')
json = self.parse(update, context)
self.debug[1]('[obj :: json]', json)
kw = {
'chat_id' : json['chat_id'],
'text' : 'Não sei fazer isso ainda.'
}
self.debug[1]('[obj :: kw]', kw)
json['bot'].send_message(**kw)
if __name__ == '__main__':
bot.run()
|
[
"pedromxavier@poli.ufrj.br"
] |
pedromxavier@poli.ufrj.br
|
f0b7898e2cc53710b09420d379c41c3e2ac4a97a
|
cbf70750d6c265e4043fd9d1d3bd835662cd680f
|
/customer/apps.py
|
845451d50116021235e04c440ee3b6c448bca321
|
[
"Apache-2.0"
] |
permissive
|
xxcfun/DJANGO_CRM
|
c54e249a9a3da9edaeb5d9b49e852d351c7e359a
|
1f8d2d7a025f9dc54b5bf498e7a577469f74c612
|
refs/heads/master
| 2023-01-14T05:21:54.995601
| 2020-11-27T03:23:40
| 2020-11-27T03:23:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
from django.apps import AppConfig
class CustomerConfig(AppConfig):
name = 'customer'
verbose_name = '客户管理'
|
[
"55070348+hhdMrLion@users.noreply.github.com"
] |
55070348+hhdMrLion@users.noreply.github.com
|
49831033a0db7eb9b44e22f82a18daf733b0ede5
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03698/s076680456.py
|
f88b228e0ad2e567dcb9e176f989690214f846c7
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 100
|
py
|
S=input()
for c in S:
if S.count(c)>1:
print("no")
break
else:
print("yes")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
9ed4cd0dc3795cc7629b973fd8603b0b0e896b3a
|
e4343fb98e9d50f67bc47374aa8f83ae0bf15fd8
|
/Lab9/main.py
|
7df71b77ec5f5f17bffb2720f3d8ba111b857b53
|
[] |
no_license
|
VVladislaVLL/geometry-labs
|
7caaf1cb4466957330416660caf78ee4bbc44557
|
df07f774f120bde2c8c7405e9eb6a3f870758778
|
refs/heads/master
| 2023-05-08T23:05:46.542715
| 2021-06-04T07:24:46
| 2021-06-04T07:24:46
| 343,880,645
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,406
|
py
|
#!/usr/bin/python
import random
from time import sleep
from matplotlib import pyplot as plt
from classes.Point import Point
from classes.Vector2d import Vector2d, pi
from divideAndRule2 import divide_and_rule, divide_and_rule
from utils.binary import binary_test
from utils.graph import draw_polygon
def reflect(p, vector_coords):
# Previous direction
v = p.direction
# Polygon side
q = Vector2d(vector_coords[0], vector_coords[1])
scal = 2 * (Vector2d.scalar_product(v, q) / Vector2d.scalar_product(q, q))
prod = Vector2d.s_mult(q, scal)
new_direction = Vector2d.s_minus(prod, v)
return new_direction
def plot_task(rectangle, points, points_y):
MIN_DISTANSE = 0.5
plt.ion()
s = 1
while s:
plt.clf()
draw_polygon(rectangle)
clash_flag = divide_and_rule(points, points_y)
if clash_flag[1] <= MIN_DISTANSE:
clash_flag[0][0].reflect_direction()
clash_flag[0][1].reflect_direction()
for i in points:
flag_binary = binary_test(rectangle, i.get_next_state_circle(MIN_DISTANSE / 2))['flag']
if not flag_binary:
coords_binary = binary_test(rectangle, i.get_next_state_circle(MIN_DISTANSE / 2))['points']
new_direction = reflect(i, coords_binary)
i.direction = new_direction
for i in points:
i.move()
plt.scatter(i.x, i.y, s=MIN_DISTANSE / 2 * 750, marker='o', c='g')
plt.draw()
plt.gcf().canvas.flush_events()
sleep(0.002)
plt.show()
plt.ioff()
plt.show()
if __name__ == '__main__':
# Rectangle
rectangle = [Point(1, 1), Point(1, 10), Point(15, 10), Point(15, 1)]
# Our points
points_set = [Point(2, 2), Point(2, 3), Point(4, 2),
Point(4, 5), Point(4, 8), Point(6, 2),
Point(6,9), Point(8, 5), Point(10,3),
Point(11,9), Point(12, 5), Point(12,8)]
points_set.sort(key=lambda point: (point.x, point.y))
points_set_y = points_set.copy()
points_set_y.sort(key=lambda point: (point.y, point.x))
# min_dist = divide_and_rule(points_set)
# print(min_dist)
# Set points direction
for point in points_set:
point.set_direction(Vector2d.get_vector(random.uniform(0, 2 * pi), 0.09))
plot_task(rectangle, points_set, points_set_y)
|
[
"vlad2002kochurko@gmail.com"
] |
vlad2002kochurko@gmail.com
|
ba33ce6940599e88acb5a239c6d0893a19068b6e
|
1b652b030a2742d8579474e155dcfdb65b34ac28
|
/print/name3.py
|
f3aa9cf3fac9d2fcf3ccdc2070123a4ca8d011b7
|
[] |
no_license
|
RobinHou0516/Homework
|
065dc076a0d5b508433bf97c73a0f1f603447e34
|
dcb687da489f7c3a87476c07e5902f124edc5856
|
refs/heads/master
| 2020-04-09T23:57:08.793511
| 2018-12-06T12:28:51
| 2018-12-06T12:28:51
| 160,671,520
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
name='Robin Kawensanna Smartest Unrivalled'
age='14'
grade='9th'
grade.title
school='JXFLS'
print('My name is '+name+'. I am a '+age+' year old '+grade+' grader at '+school+'.')
|
[
"Robinhou0516@gamil.com"
] |
Robinhou0516@gamil.com
|
bebb9c6ed06be5117b813c8c9ee9f2303de321f2
|
6ba09665a90059f326e594f4d1edb74fd55e2a1c
|
/utils/seg_metrics.py
|
7a0b9bac7d24bfdee7381f2d2d3880960abf9bf1
|
[
"MIT"
] |
permissive
|
NguyenNhan999/Joint-Motion-Estimation-and-Segmentation
|
f9ef73da51eaf37418ff2906c469b6f0b42ac0c5
|
1c36d97ef41bee48d377c2cf98ad1d7b86ee37b4
|
refs/heads/master
| 2022-12-03T10:02:36.489257
| 2020-08-21T12:32:14
| 2020-08-21T12:32:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,224
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 22 17:53:58 2018
@author: cq615
"""
import numpy as np, cv2
def np_categorical_dice(pred, truth, k):
# Dice overlap metric for label value k
A = (pred == k).astype(np.float32)
B = (truth == k).astype(np.float32)
return 2 * np.sum(A * B) / (np.sum(A) + np.sum(B))
def distance_metric(seg_A, seg_B, dx, k):
# Measure the distance errors between the contours of two segmentations
# The manual contours are drawn on 2D slices.
# We calculate contour to contour distance for each slice.
table_md = []
table_hd = []
K, X, Y, Z = seg_A.shape
for z in range(Z):
# Binary mask at this slice
slice_A = seg_A[k, :, :, z].astype(np.uint8)
slice_B = seg_B[k, :, :, z].astype(np.uint8)
# The distance is defined only when both contours exist on this slice
if np.sum(slice_A) > 0 and np.sum(slice_B) > 0:
# Find contours and retrieve all the points
contours, _ = cv2.findContours(cv2.inRange(slice_A, 1, 1), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
pts_A = contours[0]
for i in range(1, len(contours)):
pts_A = np.vstack((pts_A, contours[i]))
contours, _ = cv2.findContours(cv2.inRange(slice_B, 1, 1), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
pts_B = contours[0]
for i in range(1, len(contours)):
pts_B = np.vstack((pts_B, contours[i]))
# Distance matrix between point sets
M = np.zeros((len(pts_A), len(pts_B)))
for i in range(len(pts_A)):
for j in range(len(pts_B)):
M[i, j] = np.linalg.norm(pts_A[i, 0] - pts_B[j, 0])
# Mean distance and hausdorff distance
md = 0.5 * (np.mean(np.min(M, axis=0)) + np.mean(np.min(M, axis=1))) * dx
hd = np.max([np.max(np.min(M, axis=0)), np.max(np.min(M, axis=1))]) * dx
table_md += [md]
table_hd += [hd]
# Return the mean distance and Hausdorff distance across 2D slices
mean_md = np.mean(table_md) if table_md else None
mean_hd = np.mean(table_hd) if table_hd else None
return mean_md, mean_hd
|
[
"c.qin15@imperial.ac.uk"
] |
c.qin15@imperial.ac.uk
|
397bb1db215c047a38a6dd15583af7806156363f
|
202e70bbfee2c70049ea8ac43711ec008baa47a3
|
/main.py
|
6e72104a97d2f709d97279c8e5c1bb314b74519f
|
[] |
no_license
|
wqq1136883696/UbuntuPython
|
714b74c3559f2bf9e57d00fbe95f001646532611
|
c3d965ee749a15efdbded4169c576cd38dc44db3
|
refs/heads/master
| 2022-12-18T22:48:33.258697
| 2020-09-11T06:41:35
| 2020-09-11T06:41:35
| 294,617,289
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 576
|
py
|
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
print("{}, nice to meet you!".format(name))
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print_hi('PyCharm')
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
[
"1136883696@qq.com"
] |
1136883696@qq.com
|
94a992f736f385b74839c456c1539fa5deb7f28c
|
cdfa17ab8b6524a4611dbadd69fabe6a38c8fe0b
|
/pysot/models/sa/__init__.py
|
7799e4ee9021564c4e5d4e09e4fe9800056bf345
|
[
"Apache-2.0"
] |
permissive
|
bourahla-omar/pysot
|
7e61e24fe0d6375770569a47dc1051b89199bd56
|
c0fd8a0b3a307da0d50bc07208417d69244dc00f
|
refs/heads/master
| 2020-07-24T13:59:33.410511
| 2019-10-22T05:55:46
| 2019-10-22T05:55:46
| 207,949,969
| 0
| 0
|
Apache-2.0
| 2019-09-12T02:43:31
| 2019-09-12T02:43:30
| null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
# Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from pysot.models.sa.sa import sablock
SABLOCKS = {
'sablock': sablock,
}
def get_sa(name, **kwargs):
return SABLOCKS[name](**kwargs)
|
[
"csxuandongzhao@gmail.com"
] |
csxuandongzhao@gmail.com
|
f6bdba59b03588b2fe0c583e830fa12b83a347cb
|
0c8594ca33e334589e9c28c525ca86f75536fc74
|
/Python/evenodd.py
|
af98dd5f07bbfdf80a8e33e0aa0bcba0fe03c2c4
|
[] |
no_license
|
shubham1592/force
|
84112c74cdca0d7dd930eaf89c7d9aa62dbb007d
|
254b4729bb5332ecceb6b395cd7082023710b381
|
refs/heads/master
| 2021-07-17T10:44:14.096408
| 2019-10-23T19:04:12
| 2019-10-23T19:04:12
| 216,235,074
| 0
| 3
| null | 2020-10-02T07:33:39
| 2019-10-19T16:19:43
|
C++
|
UTF-8
|
Python
| false
| false
| 42
|
py
|
for x in range(1,51):
print(2*x)
|
[
"pi.shubham1592@gmail.com"
] |
pi.shubham1592@gmail.com
|
d35f840aebca9e72f9b224612463cc501f5f7dda
|
827dcdf40e7a4998b03b8d8f07f28ede09e47fd9
|
/corpus_scripts/surp_dict.py
|
720aaa496b2bf53b36180ebc441d6667f7830695
|
[] |
no_license
|
LauraGwilliams/arab_pred
|
9d1dfb1285c769888905f09d30d0a14b5468fad0
|
f006a42a9979e7ca6c19634c5b90e51b8f8ba3f0
|
refs/heads/master
| 2016-09-08T05:04:18.583702
| 2015-11-02T16:30:41
| 2015-11-02T16:30:41
| 18,057,133
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,548
|
py
|
import sys
import math
from collections import Counter
from collections import defaultdict
#NOTE -- 'CV' -- corresponds to the element of interest, and
# -- 'C' -- corresponds to the element of interest -1
#sets up the script arguments#
phoneme_of_interest = sys.argv[1]
#create empty dict that we'll fill after#
MyDict = defaultdict(int)
#figures out if it's a linear or mophemic analysis, reads the correct files and sets up the output file and folder#
if '_' in phoneme_of_interest:
C_in = open(phoneme_of_interest[:-2] + '_counts_final.txt','r').read().split('\n')
folder_path = 'log2_surp_dicts/surp-dict-'
print 'conducting morphemic analysis..'
else:
C_in = open(phoneme_of_interest[:-1] + '_counts_final.txt','r').read().split('\n')
print 'conducting linear analysis..'
folder_path = 'log2_surp_dicts/surp-dict-'
#open and read the CV counts, then if the first element of CV is in C, print the frequency of both C and CV#
with open(phoneme_of_interest + '_counts_final.txt','r') as CV_in:
for CV in CV_in:
for C in C_in:
length = len(phoneme_of_interest)
if '_' in phoneme_of_interest:
if len(phoneme_of_interest) >=4:
cv = ''.join(CV[0:int(length)-3])
if cv in C and ',' not in CV[0]:
CV = CV.split(',')
C = C.split(',')
print CV
print "CV"
print float(CV[1])
print C
print "C"
print float(C[1])
print "cond_prob"
cond_prob = float(CV[1])/float(C[1])
surp = math.log(float(cond_prob),2)
print cond_prob
MyDict[CV[0]] = surp
print "---"
else:
cv = ''.join(CV[0:int(length)-2])
if cv in C and ',' not in CV[0]:
CV = CV.split(',')
C = C.split(',')
print CV
print "CV"
print float(CV[1])
print C
print "C"
print float(C[1])
print "cond_prob"
cond_prob = float(CV[1])/float(C[1])
surp = math.log(float(cond_prob),2)
print cond_prob
MyDict[CV[0]] = surp
print "---"
else:
cv = ''.join(CV[0:int(length)-1])
#cv = ''.join(CV[0])
#cv = cv[:-1]
#cv = ''.join(CV[0:2])
if cv in C and ',' not in CV[0]:
#now, get the conditional probability of the second vowel, from that compute surprisal by making it log, then put it in the dict
#(the key is the CV, and the number is the surprisal #
#first, split the dict into its first and second elements i.e., 'ka' '34'
CV = CV.split(',')
C = C.split(',')
#next, get the conditional probability from the second elements of the list (the number) and work out the surprisal
print CV
print "CV"
print float(CV[1])
print C
print "C"
print float(C[1])
print "cond_prob"
cond_prob = float(CV[1])/float(C[1])
surp = math.log(float(cond_prob),2)
print cond_prob
MyDict[CV[0]] = surp
print "---"
#now we just save the dict to file#
file = open(folder_path + phoneme_of_interest + '.txt', 'w')
for key in MyDict:
file.write(key + ',' + str(MyDict[key]) + '\n')
file.close()
file = open(folder_path + phoneme_of_interest + '.txt', 'w')
for key in MyDict:
file.write(key + ',' + str(MyDict[key]) + '\n')
file.close()
#finally print out all words with a surprisal greater than 5:
highest_surp = dict((k, v) for k, v in MyDict.items() if v <= -5)
print "items with suprisal greater than 5:"
print highest_surp
#and less than 1:
highest_surp = dict((k, v) for k, v in MyDict.items() if v >= -2)
print "items with suprisal less than 2:"
print highest_surp
|
[
"root@Lauras-MacBook-Pro.local"
] |
root@Lauras-MacBook-Pro.local
|
5c36ae6fce8ec9601832a3503e9a4f0e716f1f1d
|
a35dadcdca748197bc400cebc180b58fe8f0735a
|
/constants.py
|
06f781bd894b739f694e6c9e5ed6447e66a2aa70
|
[] |
no_license
|
RGologorsky/CS-182-final-project
|
ec232bb40bca4ffab935be536ca8540972be57e6
|
117b3159b879d07c1195204718dadf2e696469f7
|
refs/heads/master
| 2022-09-23T03:33:08.991790
| 2020-06-04T22:13:36
| 2020-06-04T22:13:36
| 112,677,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,337
|
py
|
MULTIVAR = set(["AM21B","MATH21B", "MATH23B","MATH25A", "MATH55A"])
LINALG = set(["AM21A","MATH21A", "MATH23A","MATH25B", "MATH55B"])
STAT110 = "STAT110"
CS050 = "CS050"
CS051 = "CS051"
CS061 = "CS061"
CS020 = "CS020"
CS121 = "CS121"
CS124 = "CS124"
CS181 = "CS181"
CS182 = "CS182"
MATH23A = "MATH23A"
MATH25B = "MATH25B"
MATH25A = "MATH25A"
MATH25B = "MATH25B"
MATH55A = "MATH55A"
MATH55B = "MATH55B"
courses = {
'AM106': {'CLOCKDAYS': 'MW',
'CLOCKEND': '359PM',
'CLOCKSTART': '230PM',
'COURSE': 'AM106',
'DAYS': set(['M', 'W']),
'END': 1559,
'ENROLLMENT': 31,
'PREREQS': [set(['AM21B',
'MATH21B',
'MATH23B',
'MATH25A',
'MATH55A']),
set(['AM21A',
'MATH21A',
'MATH23A',
'MATH25B',
'MATH55B'])],
'Q': 3.7,
'SEMESTER': 'F',
'START': 1430,
'WORKLOAD': 6.6},
'AM107': {'CLOCKDAYS': 'TR',
'CLOCKEND': '1129AM',
'CLOCKSTART': '1000AM',
'COURSE': 'AM107',
'DAYS': set(['R', 'T']),
'END': 1129,
'ENROLLMENT': 23,
'PREREQS': [],
'Q': 4.3,
'SEMESTER': 'S',
'START': 1000,
'WORKLOAD': 8.1},
'AM120': {'CLOCKDAYS': 'TR',
'CLOCKEND': '1129AM',
'CLOCKSTART': '1000AM',
'COURSE': 'AM120',
'DAYS': set(['R', 'T']),
'END': 1129,
'ENROLLMENT': 108,
'PREREQS': [set(['AM21B',
'MATH21B',
'MATH23B',
'MATH25A',
'MATH55A']),
set(['AM21A',
'MATH21A',
'MATH23A',
'MATH25B',
'MATH55B']),
'CS050'],
'Q': 4.3,
'SEMESTER': 'S',
'START': 1000,
'WORKLOAD': 6.9},
'AM121': {'CLOCKDAYS': 'MW',
'CLOCKEND': '1129AM',
'CLOCKSTART': '1000AM',
'COURSE': 'AM121',
'DAYS': set(['M', 'W']),
'END': 1129,
'ENROLLMENT': 73,
'PREREQS': [set(['AM21A',
'MATH21A',
'MATH23A',
'MATH25B',
'MATH55B']),
'STAT110'],
'Q': 3.8,
'SEMESTER': 'F',
'START': 1000,
'WORKLOAD': 10.3},
'AM21A': {'CLOCKDAYS': 'MWF',
'CLOCKEND': '1159AM',
'CLOCKSTART': '1100AM',
'COURSE': 'AM21A',
'DAYS': set(['F', 'M', 'W']),
'END': 1159,
'ENROLLMENT': 169,
'PREREQS': [],
'Q': 3.8,
'SEMESTER': 'F',
'START': 1100,
'WORKLOAD': 7.7},
'AM21B': {'CLOCKDAYS': 'MWF',
'CLOCKEND': '1159AM',
'CLOCKSTART': '1100AM',
'COURSE': 'AM21B',
'DAYS': set(['F', 'M', 'W']),
'END': 1159,
'ENROLLMENT': 79,
'PREREQS': [set(['AM21A',
'MATH21B',
'MATH23B',
'MATH25A',
'MATH55A'])],
'Q': 3.3,
'SEMESTER': 'S',
'START': 1100,
'WORKLOAD': 9.2},
'CS001': {'CLOCKDAYS': 'TR',
'CLOCKEND': '1129AM',
'CLOCKSTART': '1000AM',
'COURSE': 'CS001',
'DAYS': set(['R', 'T']),
'END': 1129,
'ENROLLMENT': 76,
'PREREQS': [],
'Q': 3.8,
'SEMESTER': 'S',
'START': 1000,
'WORKLOAD': 7.4},
'CS020': {'CLOCKDAYS': 'MWF',
'CLOCKEND': '1059AM',
'CLOCKSTART': '1000AM',
'COURSE': 'CS020',
'DAYS': set(['F', 'M', 'W']),
'END': 1059,
'ENROLLMENT': 58,
'PREREQS': [],
'Q': 4.4,
'SEMESTER': 'S',
'START': 1000,
'WORKLOAD': 5.0},
'CS050': {'CLOCKDAYS': 'F',
'CLOCKEND': '1159AM',
'CLOCKSTART': '1000AM',
'COURSE': 'CS050',
'DAYS': set(['F']),
'END': 1159,
'ENROLLMENT': 750,
'PREREQS': [],
'Q': 3.5,
'SEMESTER': 'F',
'START': 1000,
'WORKLOAD': 15.2},
'CS051': {'CLOCKDAYS': 'T',
'CLOCKEND': '229PM',
'CLOCKSTART': '100PM',
'COURSE': 'CS051',
'DAYS': set(['T']),
'END': 1429,
'ENROLLMENT': 348,
'PREREQS': ['CS050'],
'Q': 3.4,
'SEMESTER': 'S',
'START': 1300,
'WORKLOAD': 13.9},
'CS061': {'CLOCKDAYS': 'TR',
'CLOCKEND': '359PM',
'CLOCKSTART': '230PM',
'COURSE': 'CS061',
'DAYS': set(['R', 'T']),
'END': 1559,
'ENROLLMENT': 129,
'PREREQS': ['CS050'],
'Q': 4.2,
'SEMESTER': 'F',
'START': 1430,
'WORKLOAD': 15.8},
'CS091R': {'CLOCKDAYS': '',
'CLOCKEND': '',
'CLOCKSTART': '',
'COURSE': 'CS091R',
'DAYS': set([]),
'END': -1,
'ENROLLMENT': 5,
'PREREQS': [],
'Q': 3.879545455,
'SEMESTER': 'F',
'START': -1,
'WORKLOAD': 11.04772727},
'CS091R': {'CLOCKDAYS': '',
'CLOCKEND': '',
'CLOCKSTART': '',
'COURSE': 'CS091R',
'DAYS': set([]),
'END': -1,
'ENROLLMENT': 5,
'PREREQS': [],
'Q': 3.879545455,
'SEMESTER': 'S',
'START': -1,
'WORKLOAD': 11.04772727},
'CS096': {'CLOCKDAYS': 'MWF',
'CLOCKEND': '559PM',
'CLOCKSTART': '400PM',
'COURSE': 'CS096',
'DAYS': set(['F', 'M', 'W']),
'END': 1759,
'ENROLLMENT': 2,
'PREREQS': [set(['CS051', 'CS061'])],
'Q': 4.5,
'SEMESTER': 'F',
'START': 1600,
'WORKLOAD': 3.0},
'CS105': {'CLOCKDAYS': 'TR',
'CLOCKEND': '229PM',
'CLOCKSTART': '100PM',
'COURSE': 'CS105',
'DAYS': set(['R', 'T']),
'END': 1429,
'ENROLLMENT': 37,
'PREREQS': [],
'Q': 4.7,
'SEMESTER': 'F',
'START': 1300,
'WORKLOAD': 5.2},
'CS108': {'CLOCKDAYS': 'TR',
'CLOCKEND': '1259PM',
'CLOCKSTART': '1130AM',
'COURSE': 'CS108',
'DAYS': set(['R', 'T']),
'END': 1259,
'ENROLLMENT': 36,
'PREREQS': [],
'Q': 4.8,
'SEMESTER': 'F',
'START': 1130,
'WORKLOAD': 4.1},
'CS109A': {'CLOCKDAYS': 'MW',
'CLOCKEND': '229PM',
'CLOCKSTART': '100PM',
'COURSE': 'CS109A',
'DAYS': set(['M', 'W']),
'END': 1429,
'ENROLLMENT': 131,
'PREREQS': ['CS050'],
'Q': 3.1,
'SEMESTER': 'F',
'START': 1300,
'WORKLOAD': 9.1},
'CS109B': {'CLOCKDAYS': 'MW',
'CLOCKEND': '229PM',
'CLOCKSTART': '100PM',
'COURSE': 'CS109B',
'DAYS': set(['M', 'W']),
'END': 1429,
'ENROLLMENT': 79,
'PREREQS': ['CS109A'],
'Q': 3.5,
'SEMESTER': 'S',
'START': 1300,
'WORKLOAD': 11.9},
'CS121': {'CLOCKDAYS': 'TR',
'CLOCKEND': '1129AM',
'CLOCKSTART': '1000AM',
'COURSE': 'CS121',
'DAYS': set(['R', 'T']),
'END': 1129,
'ENROLLMENT': 169,
'PREREQS': [set(['CS020',
'MATH23A',
'MATH25A',
'MATH25B',
'MATH55A',
'MATH55B'])],
'Q': 3.2,
'SEMESTER': 'F',
'START': 1000,
'WORKLOAD': 9.5},
'CS124': {'CLOCKDAYS': 'TR',
'CLOCKEND': '1259PM',
'CLOCKSTART': '1130AM',
'COURSE': 'CS124',
'DAYS': set(['R', 'T']),
'END': 1259,
'ENROLLMENT': 217,
'PREREQS': ['CS121'],
'Q': 3.9,
'SEMESTER': 'S',
'START': 1130,
'WORKLOAD': 15.2},
'CS126': {'CLOCKDAYS': 'TR',
'CLOCKEND': '1129AM',
'CLOCKSTART': '1000AM',
'COURSE': 'CS126',
'DAYS': set(['R', 'T']),
'END': 1129,
'ENROLLMENT': 30,
'PREREQS': ['STAT110', 'CS124'],
'Q': 3.0,
'SEMESTER': 'F',
'START': 1000,
'WORKLOAD': 8.0},
'CS127': {'CLOCKDAYS': 'TR',
'CLOCKEND': '1129AM',
'CLOCKSTART': '1000AM',
'COURSE': 'CS127',
'DAYS': set(['R', 'T']),
'END': 1129,
'ENROLLMENT': 19,
'PREREQS': [set(['CS121', 'CS124'])],
'Q': 4.5,
'SEMESTER': 'S',
'START': 1000,
'WORKLOAD': 12.2},
'CS134': {'CLOCKDAYS': 'MW',
'CLOCKEND': '359PM',
'CLOCKSTART': '230PM',
'COURSE': 'CS134',
'DAYS': set(['M', 'W']),
'END': 1559,
'ENROLLMENT': 167,
'PREREQS': ['STAT110',
set(['AM21A',
'MATH21A',
'MATH23A',
'MATH25B',
'MATH55B']),
set(['AM21B',
'MATH21B',
'MATH23B',
'MATH25A',
'MATH55A'])],
'Q': 3.5,
'SEMESTER': 'F',
'START': 1430,
'WORKLOAD': 9.3},
'CS136': {'CLOCKDAYS': 'MW',
'CLOCKEND': '1259PM',
'CLOCKSTART': '1130AM',
'COURSE': 'CS136',
'DAYS': set(['M', 'W']),
'END': 1259,
'ENROLLMENT': 57,
'PREREQS': [set(['AM21A',
'MATH21A',
'MATH23A',
'MATH25B',
'MATH55B']),
'CS051',
'STAT110',
set(['CS181', 'CS182'])],
'Q': 4.6,
'SEMESTER': 'F',
'START': 1130,
'WORKLOAD': 9.8},
'CS141': {'CLOCKDAYS': 'MW',
'CLOCKEND': '1129AM',
'CLOCKSTART': '1000AM',
'COURSE': 'CS141',
'DAYS': set(['M', 'W']),
'END': 1129,
'ENROLLMENT': 19,
'PREREQS': ['CS050'],
'Q': 4.0,
'SEMESTER': 'F',
'START': 1000,
'WORKLOAD': 10.5},
'CS143': {'CLOCKDAYS': 'MW',
'CLOCKEND': '359PM',
'CLOCKSTART': '230PM',
'COURSE': 'CS143',
'DAYS': set(['M', 'W']),
'END': 1559,
'ENROLLMENT': 43,
'PREREQS': ['CS050'],
'Q': 2.8,
'SEMESTER': 'F',
'START': 1430,
'WORKLOAD': 5.5},
'CS144R': {'CLOCKDAYS': 'MW',
'CLOCKEND': '359PM',
'CLOCKSTART': '230PM',
'COURSE': 'CS144R',
'DAYS': set(['M', 'W']),
'END': 1559,
'ENROLLMENT': 11,
'PREREQS': [],
'Q': 4.2,
'SEMESTER': 'S',
'START': 1430,
'WORKLOAD': 5.2},
'CS144R': {'CLOCKDAYS': 'MW',
'CLOCKEND': '359PM',
'CLOCKSTART': '230PM',
'COURSE': 'CS144R',
'DAYS': set(['M', 'W']),
'END': 1559,
'ENROLLMENT': 11,
'PREREQS': [],
'Q': 4.2,
'SEMESTER': 'F',
'START': 1430,
'WORKLOAD': 5.2},
'CS148': {'CLOCKDAYS': 'TR',
'CLOCKEND': '1259PM',
'CLOCKSTART': '1130AM',
'COURSE': 'CS148',
'DAYS': set(['R', 'T']),
'END': 1259,
'ENROLLMENT': 4,
'PREREQS': [],
'Q': 5.0,
'SEMESTER': 'S',
'START': 1130,
'WORKLOAD': 5.7},
'CS152': {'CLOCKDAYS': 'TR',
'CLOCKEND': '1129AM',
'CLOCKSTART': '1000AM',
'COURSE': 'CS152',
'DAYS': set(['R', 'T']),
'END': 1129,
'ENROLLMENT': 19,
'PREREQS': ['CS051', 'CS121'],
'Q': 3.4,
'SEMESTER': 'S',
'START': 1000,
'WORKLOAD': 6.6},
'CS165': {'CLOCKDAYS': 'MW',
'CLOCKEND': '529PM',
'CLOCKSTART': '400PM',
'COURSE': 'CS165',
'DAYS': set(['M', 'W']),
'END': 1729,
'ENROLLMENT': 32,
'PREREQS': ['CS051', 'CS061'],
'Q': 4.5,
'SEMESTER': 'F',
'START': 1600,
'WORKLOAD': 10.5},
'CS171': {'CLOCKDAYS': 'TR',
'CLOCKEND': '359PM',
'CLOCKSTART': '230PM',
'COURSE': 'CS171',
'DAYS': set(['R', 'T']),
'END': 1559,
'ENROLLMENT': 97,
'PREREQS': ['CS050'],
'Q': 3.7,
'SEMESTER': 'F',
'START': 1430,
'WORKLOAD': 9.8},
'CS175': {'CLOCKDAYS': 'MW',
'CLOCKEND': '229PM',
'CLOCKSTART': '100PM',
'COURSE': 'CS175',
'DAYS': set(['M', 'W']),
'END': 1429,
'ENROLLMENT': 13,
'PREREQS': [set(['CS051', 'CS061']),
set(['AM21A',
'MATH21A',
'MATH23A',
'MATH25B',
'MATH55B'])],
'Q': 4.0,
'SEMESTER': 'F',
'START': 1300,
'WORKLOAD': 9.5},
'CS179': {'CLOCKDAYS': 'TR',
'CLOCKEND': '359PM',
'CLOCKSTART': '230PM',
'COURSE': 'CS179',
'DAYS': set(['R', 'T']),
'END': 1559,
'ENROLLMENT': 59,
'PREREQS': ['CS050'],
'Q': 3.6,
'SEMESTER': 'S',
'START': 1430,
'WORKLOAD': 6.8},
'CS181': {'CLOCKDAYS': 'MW',
'CLOCKEND': '1029AM',
'CLOCKSTART': '900AM',
'COURSE': 'CS181',
'DAYS': set(['M', 'W']),
'END': 1029,
'ENROLLMENT': 215,
'PREREQS': ['CS051',
'STAT110',
set(['AM21B',
'MATH21B',
'MATH23B',
'MATH25A',
'MATH55A']),
set(['AM21A',
'MATH21A',
'MATH23A',
'MATH25B',
'MATH55B'])],
'Q': 3.6,
'SEMESTER': 'S',
'START': 900,
'WORKLOAD': 16.8},
'CS182': {'CLOCKDAYS': 'TR',
'CLOCKEND': '229PM',
'CLOCKSTART': '100PM',
'COURSE': 'CS182',
'DAYS': set(['R', 'T']),
'END': 1429,
'ENROLLMENT': 84,
'PREREQS': ['CS051', 'STAT110'],
'Q': 3.9,
'SEMESTER': 'F',
'START': 1300,
'WORKLOAD': 7.2},
'CS189': {'CLOCKDAYS': 'F',
'CLOCKEND': '359PM',
'CLOCKSTART': '100PM',
'COURSE': 'CS189',
'DAYS': set(['F']),
'END': 1559,
'ENROLLMENT': 20,
'PREREQS': [set(['CS181', 'CS182'])],
'Q': 3.6,
'SEMESTER': 'S',
'START': 1300,
'WORKLOAD': 13.9},
'CS191': {'CLOCKDAYS': 'MW',
'CLOCKEND': '1029AM',
'CLOCKSTART': '900AM',
'COURSE': 'CS191',
'DAYS': set(['M', 'W']),
'END': 1029,
'ENROLLMENT': 20,
'PREREQS': [],
'Q': 3.0,
'SEMESTER': 'S',
'START': 900,
'WORKLOAD': 8.0},
'ES50': {'CLOCKDAYS': 'MW',
'CLOCKEND': '359PM',
'CLOCKSTART': '230PM',
'COURSE': 'ES50',
'DAYS': set(['M', 'W']),
'END': 1559,
'ENROLLMENT': 85,
'PREREQS': [],
'Q': 3.5,
'SEMESTER': 'F',
'START': 1430,
'WORKLOAD': 6.6},
'ES52': {'CLOCKDAYS': 'MW',
'CLOCKEND': '229PM',
'CLOCKSTART': '100PM',
'COURSE': 'ES52',
'DAYS': set(['M', 'W']),
'END': 1429,
'ENROLLMENT': 53,
'PREREQS': [],
'Q': 3.9,
'SEMESTER': 'F',
'START': 1300,
'WORKLOAD': 9.8},
'MATH154': {'CLOCKDAYS': 'MWF',
'CLOCKEND': '1259PM',
'CLOCKSTART': '1200PM',
'COURSE': 'MATH154',
'DAYS': set(['F', 'M', 'W']),
'END': 1259,
'ENROLLMENT': 30,
'PREREQS': [set(['AM21B',
'MATH21B',
'MATH23B',
'MATH25A',
'MATH55A']),
set(['AM21A',
'MATH21A',
'MATH23A',
'MATH25B',
'MATH55B'])],
'Q': 4.5,
'SEMESTER': 'S',
'START': 1200,
'WORKLOAD': 10.1},
'MATH21A': {'CLOCKDAYS': '',
'CLOCKEND': '',
'CLOCKSTART': '',
'COURSE': 'MATH21A',
'DAYS': set([]),
'END': -1,
'ENROLLMENT': 237,
'PREREQS': [],
'Q': 3.6,
'SEMESTER': 'FS',
'START': -1,
'WORKLOAD': 10.0},
'MATH21B': {'CLOCKDAYS': '',
'CLOCKEND': '',
'CLOCKSTART': '',
'COURSE': 'MATH21B',
'DAYS': set([]),
'END': -1,
'ENROLLMENT': 320,
'PREREQS': [],
'Q': 3.5,
'SEMESTER': 'FS',
'START': -1,
'WORKLOAD': 8.4},
'MATH23A': {'CLOCKDAYS': 'F',
'CLOCKEND': '159PM',
'CLOCKSTART': '100PM',
'COURSE': 'MATH23A',
'DAYS': set(['F']),
'END': 1359,
'ENROLLMENT': 59,
'PREREQS': [],
'Q': 3.4,
'SEMESTER': 'F',
'START': 1300,
'WORKLOAD': 10.5},
'MATH23B': {'CLOCKDAYS': 'TR',
'CLOCKEND': '359PM',
'CLOCKSTART': '230PM',
'COURSE': 'MATH23B',
'DAYS': set(['R', 'T']),
'END': 1559,
'ENROLLMENT': 59,
'PREREQS': [set(['MATH23A', 'MATH25A'])],
'Q': 3.9,
'SEMESTER': 'S',
'START': 1430,
'WORKLOAD': 8.8},
'MATH25A': {'CLOCKDAYS': 'MWF',
'CLOCKEND': '1059AM',
'CLOCKSTART': '1000AM',
'COURSE': 'MATH25A',
'DAYS': set(['F', 'M', 'W']),
'END': 1059,
'ENROLLMENT': 45,
'PREREQS': [],
'Q': 4.6,
'SEMESTER': 'F',
'START': 1000,
'WORKLOAD': 17.1},
'MATH25B': {'CLOCKDAYS': 'MWF',
'CLOCKEND': '1059AM',
'CLOCKSTART': '1000AM',
'COURSE': 'MATH25B',
'DAYS': set(['F', 'M', 'W']),
'END': 1059,
'ENROLLMENT': 38,
'PREREQS': [set(['MATH25A','MATH55A'])],
'Q': 3.9,
'SEMESTER': 'S',
'START': 1000,
'WORKLOAD': 16.3},
'MATH55A': {'CLOCKDAYS': 'MWF',
'CLOCKEND': '1159AM',
'CLOCKSTART': '1100AM',
'COURSE': 'MATH55A',
'DAYS': set(['F', 'M', 'W']),
'END': 1159,
'ENROLLMENT': 11,
'PREREQS': [],
'Q': 3.7,
'SEMESTER': 'F',
'START': 1100,
'WORKLOAD': 30.2},
'MATH55B': {'CLOCKDAYS': 'MWF',
'CLOCKEND': '1159AM',
'CLOCKSTART': '1100AM',
'COURSE': 'MATH55B',
'DAYS': set(['F', 'M', 'W']),
'END': 1159,
'ENROLLMENT': 12,
'PREREQS': ['MATH55A'],
'Q': 4.0,
'SEMESTER': 'S',
'START': 1100,
'WORKLOAD': 45.2},
'STAT110': {'CLOCKDAYS': 'TR',
'CLOCKEND': '359PM',
'CLOCKSTART': '230PM',
'COURSE': 'STAT110',
'DAYS': set(['R', 'T']),
'END': 1559,
'ENROLLMENT': 444,
'PREREQS': [set(['AM21B',
'MATH21B',
'MATH23B',
'MATH25A',
'MATH55A'])],
'Q': 4.3,
'SEMESTER': 'F',
'START': 1430,
'WORKLOAD': 10.6},
}
# 'STAT121A': {'CLOCKDAYS': 'MW',
# 'CLOCKEND': '229PM',
# 'CLOCKSTART': '100PM',
# 'COURSE': 'STAT121A',
# 'DAYS': set(['M', 'W']),
# 'END': 1429,
# 'ENROLLMENT': 131,
# 'PREREQS': [],
# 'Q': 3.1,
# 'SEMESTER': 'F',
# 'START': 1300,
# 'WORKLOAD': 9.1},
# 'STAT121B': {'CLOCKDAYS': 'MW',
# 'CLOCKEND': '229PM',
# 'CLOCKSTART': '100PM',
# 'COURSE': 'STAT121B',
# 'DAYS': set(['M', 'W']),
# 'END': 1429,
# 'ENROLLMENT': 25,
# 'PREREQS': [],
# 'Q': 3.5,
# 'SEMESTER': 'S',
# 'START': 1300,
# 'WORKLOAD': 5}
|
[
"rgologorsky@college.harvard.edu"
] |
rgologorsky@college.harvard.edu
|
783fdd25c10199746426d1f7b3e87e009964b1e1
|
d4a5462b2cd2eff99da6ad5147b5423c819ae731
|
/1072.py
|
7cde2b4cbdcb7fcb593e7f072f7089b72b0530d7
|
[] |
no_license
|
Rafesz/URI_solutions_py
|
3a61e6b0b571a03857f1c4efb54546edb2a0fb6a
|
62a9f8227523e409afa9d506df66516ef9b48079
|
refs/heads/main
| 2023-08-11T20:55:04.267913
| 2021-09-21T22:25:50
| 2021-09-21T22:25:50
| 402,085,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 816
|
py
|
# Leia um valor inteiro N. Este valor será a quantidade de valores inteiros X que serão lidos em seguida.
# Mostre quantos destes valores X estão dentro do intervalo [10,20] e quantos estão fora do intervalo, mostrando essas informações.
# Entrada
# A primeira linha da entrada contém um valor inteiro N (N < 10000), que indica o número de casos de teste.
# Cada caso de teste a seguir é um valor inteiro X (-107 < X <107).
# Saída
# Para cada caso, imprima quantos números estão dentro (in) e quantos valores estão fora (out) do intervalo.
i = 0
contadorIn = 0
contaodorOut = 0
n = int(input())
while(i < n):
valor = int(input())
if(10 <= valor <= 20): contadorIn += 1
else: contaodorOut += 1
i += 1
print("{} in\n{} out".format(contadorIn, contaodorOut))
|
[
"noreply@github.com"
] |
noreply@github.com
|
7535a94b63e52647dad6aafa93a3c78cf10f5ec2
|
4833d5ebc9c84acd95059eb83524923dd9aebc83
|
/asyncio_helpers.py
|
d1cf4ebc364da783fd56b5ca077a0af4b7695b57
|
[] |
no_license
|
bdutro/chromium-proxy
|
24738f5f7b088c8bae55ba398c71ac82b928b09f
|
334fb3bd006a3f26b553d354d2830ba3b0328b0b
|
refs/heads/main
| 2023-02-20T03:07:22.911243
| 2021-01-08T18:45:50
| 2021-01-08T18:45:50
| 327,177,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
import asyncio
import sys
if sys.version_info < (3, 7):
def asyncio_run(p):
loop = asyncio.get_event_loop()
return loop.run_until_complete(p)
else:
def asyncio_run(p):
return asyncio.run(p)
|
[
"brett.dutro@gmail.com"
] |
brett.dutro@gmail.com
|
eb268714e1d62798cbe61cec0f6af724ee53d4f6
|
a163c2cec4d942212bd5bcd25a8759a7da570b7f
|
/ChatBot/main.py
|
f54e258b6b94e4253d6b6b77a9eb96e43cdb92e0
|
[] |
no_license
|
cainanalves/computational_intelligence
|
1daa4c3f153563e11b0d8410d6429648b48b57f1
|
503dfeb9db3dc18725d30587f968ed86ece53d7d
|
refs/heads/master
| 2021-08-23T10:47:22.650121
| 2017-12-04T15:42:48
| 2017-12-04T15:42:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
#!/usr/bin/python3.5
# encoding: utf-8
from chatterbot.trainers import ListTrainer
from chatterbot import ChatBot
import os
#Read_only=True --> Dizer ao bot que eu já o treinei e não precisa treinar novamente.
bot = ChatBot("Teste")#read_only=True)
bot.set_trainer(ListTrainer)
for arq in os.listdir("arqs"):
chats = open("arqs/"+arq,"rb").readlines()
bot.train(chats)
conversa = open("arqs/conversa", "a")
while True:
resq = input("Você: ")
conversa.write(str(resq)+"\n")
resp = bot.get_response(resq)
conversa.write(str(resp)+"\n")
print("Bot: "+ str(resp))
if (("chega de papo" in str(resq)) or ("chega de papo" in str(resp))):
break
conversa.close()
|
[
"cainan.teixeira10@hotmail.com"
] |
cainan.teixeira10@hotmail.com
|
417047a76d6ad25de43fc2acde8e4c37efc3ab2e
|
62187abac35eec54f56d956ced4aae18be5c667d
|
/pilta.py
|
e9eff95395e2224f94d442902c52ef9d71ff2a61
|
[] |
no_license
|
Gary345/Ivan_Arratia_1559
|
5aea66e102bcc49549f680413d5da00275079c72
|
fafcaa3c3c8e11f4264672dccee13a477a138bc0
|
refs/heads/main
| 2023-01-06T14:34:30.906982
| 2020-11-11T06:31:28
| 2020-11-11T06:31:28
| 300,461,243
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
pila =[3,4,5] # pila de inicio con un arreglo
print (pila) #impresion de la pila
#345
pila.append(7)
pila.append(8)
print (pila)
#34578
print("saco este elemento",pila.pop())
#8
print("Se quedo asi la pila: ", pila)
#3457
|
[
"ivanarratia314@aragon.unam.mx"
] |
ivanarratia314@aragon.unam.mx
|
cc97266146a0e0a5a82b27d70bd9882600547a30
|
606afd1394624127e37bf82328e1d21f00f8a2ca
|
/Layer.py
|
0c21cbb41afc36e4c72c6f5a25825d5ff2ed8f09
|
[] |
no_license
|
JoelGooch/Tensorflow-Final-Year-Project
|
b4d437a778f19e5cf7adc16afd82b5a0c7a93788
|
b72aeeed5ac9df13257c56fff5e71855709cb73c
|
refs/heads/master
| 2021-01-20T03:16:47.980594
| 2017-05-21T20:13:55
| 2017-05-21T20:13:55
| 83,829,318
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,299
|
py
|
# base Layer class that others will inherit from
class Layer:
def __init__(self, layer_name):
self.layer_name = layer_name
# contains all the parameters that a convolution layer will have
class ConvLayer(Layer):
def __init__(self, layer_name, kernel_size, stride, act_function, num_output_filters, weight_init, weight_val, bias_init, bias_val, padding, normalize, dropout , keep_rate):
Layer.__init__(self, layer_name)
self.layer_type = 'Convolution'
self.kernel_size = int(kernel_size)
self.stride = int(stride)
self.act_function = act_function
self.num_output_filters = int(num_output_filters)
self.weight_init = weight_init
self.weight_val = float(weight_val)
self.bias_init = bias_init
self.bias_val = float(bias_val)
self.padding = padding
self.normalize = normalize
self.dropout = dropout
self.keep_rate = float(keep_rate)
# contains all the parameters that a max pooling layer will have
class MaxPoolingLayer(Layer):
def __init__(self, layer_name, kernel_size, stride, padding, normalize, dropout, keep_rate):
Layer.__init__(self, layer_name)
self.layer_type = 'Max Pool'
self.kernel_size = int(kernel_size)
self.stride = int(stride)
self.padding = padding
self.normalize = normalize
self.dropout = dropout
self.keep_rate = float(keep_rate)
# contains all the parameters that a fully connected layer will have
class FullyConnectedLayer(Layer):
def __init__(self, layer_name, act_function, num_output_nodes, weight_init, weight_val, bias_init, bias_val, dropout, keep_rate):
Layer.__init__(self, layer_name)
self.layer_type = 'Fully Connected'
self.act_function = act_function
self.num_output_nodes = int(num_output_nodes)
self.weight_init = weight_init
self.weight_val = float(weight_val)
self.bias_init = bias_init
self.bias_val = float(bias_val)
self.dropout = dropout
self.keep_rate = float(keep_rate)
# contains all the parameters that an output layer will have
class OutputLayer(Layer):
def __init__(self, layer_name, act_function, weight_init, weight_val, bias_init, bias_val):
Layer.__init__(self, layer_name)
self.layer_type = 'Output'
self.act_function = act_function
self.weight_init = weight_init
self.weight_val = float(weight_val)
self.bias_init = bias_init
self.bias_val = float(bias_val)
|
[
"j_gooch808@hotmail.com"
] |
j_gooch808@hotmail.com
|
a077a0210a9ead65fa1545bd027f14dc8f564d61
|
20eac8f94ef0a59ef65670545e2f1b8fb719425f
|
/eventos/migrations/0002_auto_20210522_0314.py
|
78766c13cfa331ea9b1aefb40689a4801f28a1f3
|
[] |
no_license
|
edgardo28081/gomez
|
fcc1c3fd9d9ce76a8a892d16c78d162556f067dc
|
b51f3af15d413789298488736365011acd77ee43
|
refs/heads/main
| 2023-05-31T17:39:06.234220
| 2021-06-10T19:54:46
| 2021-06-10T19:54:46
| 375,813,783
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 527
|
py
|
# Generated by Django 3.2 on 2021-05-22 07:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('eventos', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='recuerdos',
name='foto4',
),
migrations.RemoveField(
model_name='recuerdos',
name='foto5',
),
migrations.RemoveField(
model_name='recuerdos',
name='foto6',
),
]
|
[
"edgardomarcano04@gmail.com"
] |
edgardomarcano04@gmail.com
|
f8bc14a1c8b118e1a3d390327c9c1d7f9a1cbbd5
|
7a68632e1788079f40894501e8394d89ebf784df
|
/mysite/settings.py
|
d540fa83124150c965b10568958646da1b003d75
|
[] |
no_license
|
Audywb/pythonanywhere
|
4f790dad374758f5419b59f4c59d9b22b4099881
|
4ec7494ad0e3fe478b9e6e6e56ed3ef8b1f29201
|
refs/heads/master
| 2023-01-02T14:56:31.593485
| 2020-10-25T13:38:30
| 2020-10-25T13:38:30
| 285,757,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,637
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l#d)=k3^2f9^39&__hb26y@cf+p95jv#g=p67-5yf9a1gi-$1+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [u'audyyy.pythonanywhere.com',u'localhost',u'127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myweb',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
'/var/www/static/',
]
STATIC_URL = '/static/'
# default static files settings for PythonAnywhere.
# see https://help.pythonanywhere.com/pages/DjangoStaticFiles for more info
MEDIA_ROOT = u'/home/wichit2s/mysite/media'
MEDIA_URL = '/media/'
STATIC_ROOT = u'/home/wichit2s/mysite/static'
STATIC_URL = '/static/'
|
[
"thesombats@gmail.com"
] |
thesombats@gmail.com
|
57c8c4f7a53557e403719802170a2e4a7bd660c6
|
9ecd7568b6e4f0f55af7fc865451ac40038be3c4
|
/tianlikai/hubei/enshi_zhongbiao.py
|
aa1eb42ebd5cbeb6d019ac1072c18bf552fa29cc
|
[] |
no_license
|
jasonTLK/scrapy
|
f5ac6e575e902c077a07dc0eb9d228506f1a173f
|
2de8245fbc8731cfd868bbd91168e26271045300
|
refs/heads/master
| 2021-01-20T04:22:23.080864
| 2017-04-28T07:46:29
| 2017-04-28T07:46:29
| 89,681,374
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,967
|
py
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy import Request
from scrapy.selector import Selector
try:
from scrapy.spiders import Spider
except:
from scrapy.spiders import BaseSpider as Spider
import datetime
from items.biding import biding_gov
from utils.toDB import *
# 湖北恩施招投标网站
# 中标信息
class hz_gov_Spider(scrapy.Spider):
name = "enshi_zhongbiao.py"
allowed_domains = ["eszggzy.cn"]
custom_settings = {
"DOWNLOADER_MIDDLEWARES": {
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
'middlewares.useragent_middleware.RandomUserAgent': 400,
# 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': None,
# 'middlewares.proxy_middleware.ProxyMiddleware': 250,
# 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
# 'middlewares.retry_middleware.RetryWithProxyMiddleware': 300,
# 'middlewares.timestamp_middleware.TimestampMiddleware': 120
}
}
def start_requests(self):
urls = [
"http://www.eszggzy.cn/TPFront/jyxx/070001/070001003/?Paging=",
"http://www.eszggzy.cn/TPFront/jyxx/070002/070002003/?Paging=",
]
pages = [21, 20]
for i in range(len(urls)):
num=1
while num<=pages[i]:
url =urls[i]+str(num)
num+=1
# print url
yield Request(url=url,callback=self.parse)
# start_urls = [
# "http://www.eszggzy.cn/TPFront/jyxx/070001/070001003/?Paging=1"
# ]
def parse(self, response):
selector = Selector(response)
names = selector.xpath("//td[@align='left']//a/@title").extract()
urls = selector.xpath("//td[@align='left']//a/@href").extract()
print len(names),len(urls)
for i in range(len(names)):
url = "http://www.eszggzy.cn" + "".join(urls[i+4])
str = "".join(names[i]) + "," + url
print str
yield Request(url=url, callback=self.parse2, meta={"info": str})
def parse2(self, response):
infos = response.meta["info"]
items = biding_gov()
items["url"] = response.url
items["name"] = "".join(infos).split(",")[0]
items["info"] = ""
items["create_time"] = datetime.datetime.now()
items["update_time"] = datetime.datetime.now()
page_info = "".join(response.body)
items["info"] = "".join(page_info).decode('gbk')
db = MongodbHandle("172.20.3.10 ", 27017, "spiderBiding")
db.get_insert(
"bid_hubei_EnShi",
{
"url": items["url"],
"name": items["name"],
"info": items["info"],
"create_time": items["create_time"],
"update_time": items["update_time"]
}
)
print items["url"]
print items["name"]
|
[
"18723163167@163.com"
] |
18723163167@163.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.