blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1ace6c9b79d2976602572934c15bbfb976f2eea9
|
37d8c1058a2362734aaffd9d9b94288e08caa7ad
|
/cv/roto.1.py
|
0926c62772f83c98186c56898cad0359f1531d99
|
[] |
no_license
|
lewan42/pstu_ecobot
|
ef9ccd9dadedd5d947ddf15f635ce3c982627215
|
11c72414d9388b90304d942e116b6f1c27b39368
|
refs/heads/master
| 2020-06-15T18:01:01.210289
| 2019-07-18T10:55:38
| 2019-07-18T10:55:38
| 195,358,356
| 1
| 0
| null | 2019-07-05T07:14:47
| 2019-07-05T07:14:46
| null |
UTF-8
|
Python
| false
| false
| 5,199
|
py
|
import serial
import sys
#Ver M1.01
#Edited by Masalskiy M from 20.11.2018
from time import sleep
ROTOADDR = 0x02; # адрес компьютера
MYADDR = 0x01; # адрес контроллера
CMD = 0xaa; # признак начала команды
SET_CT = 0x02; # команда - "встать в положение"
ASK_CT = 0x03; # команда - "вернуть положение"
SET_FREE = 0x05;
SET_KEEP = 0x06;
ASK_STATE = 0x07;
SET_VARS = 0x08;
SET_SPEED = 0x09;
ASK_VARS = 0x0A;
SET_C_HOLD= 0x0B;
SET_T_HOLD= 0x0C;
SET_LASER_ON= 0x0D;
SET_LASER_OFF= 0x0E;
ASK_USONIC= 0x0F;
ASK_VLMM= 0x10;
SET_CTZ = 0x11; # команда - "встать в положение Z"
ASK_CTZ = 0x12; # команда - "вернуть положение Z"
# ASK_
MSG_STATE = 0x14; # состояние устройства
MSG_READY = 0x15; # устройство готово
MSG_POS = 0x16; # отправлена позиция
LT = 1;
RT = -1;
UP = -1;
DN = 1;
FREE = 0;
HOLD = 1;
KEEP = 2;
class Roto(object):
port = None
KA = 0
KB = 0
KZ = 0
def __init__(self):
for n in [0,1,2,3,4]:
p = '/dev/ttyUSB%s' % (n)
print("Trying port %s..." % (p), file=sys.stderr)
try:
self.port = serial.Serial(
port=p,
baudrate=57600,
parity='N',
stopbits=1,
bytesize=8,
timeout=500,
)
sleep(2)
#self.to_port(self.to_cmd(SET_SPEED, 70, 40))
#state, cPOS, tPOS, DELTA, MODE, tMIN_DEG, tMAX_DEG, cMIN_DEG, cMAX_DEG, tMIN, tMAX, cMIN, cMAX, cSPD, tSPD = self.from_ans(self.from_port())
#self.from_port()
#assert tSPD == 40
#self.to_port(self.to_cmd(SET_CT, self.KA, self.KB))
#self.from_port()
#state, cPOS, tPOS = self.from_ans(self.from_port())
#assert state == MSG_POS
break
except serial.SerialException as e:
print(e.with_traceback, file=sys.stderr)
self.port = None
assert self.port != None
def to_port(self, string):
print('>>> %s' % (string), file=sys.stderr)
self.port.write(string)
def from_port(self):
_ = self.port.readline()
print('<<< %s' % (_), file=sys.stderr)
return str(_)
def to_cmd(self, *args):
s = "%s %s " % (ROTOADDR, CMD, )
for arg in args:
s += str(arg) + " "
s = s.strip()
s += "\n"
return bytes(s,'ascii')
def from_ans(self, string=""):
cmd, host, state = 0,0,0
try:
_ = [int(_) for _ in string.strip("'b\\n\\r").split(' ')]
print(_, file=sys.stderr)
host, cmd, state = _[:3]
except serial.SerialException as e:
print(e.with_traceback, file=sys.stderr)
assert cmd == CMD and host == MYADDR
return [state] + _[3:]
def move(self, c, t):
if type(c) == list: c = int(c[0])
if type(t) == list: t = int(t[0])
self.to_port(self.to_cmd(SET_CT,c + self.KA, t + self.KB))
return self.from_ans(self.from_port())
def moveZ(self, z):
if type(z) == list: z = int(z[0])
self.to_port(self.to_cmd(SET_CTZ,z + self.KZ))
return self.from_ans(self.from_port())
#-----------------Пример добавления ф-ции------------
def laserOn(self):
self.to_port(self.to_cmd(SET_LASER_ON))
return self.from_ans(self.from_port())
def laserOff(self):
self.to_port(self.to_cmd(SET_LASER_OFF))
return self.from_ans(self.from_port())
#----------------------------------------------------
def ask_uS(self):
self.to_port(self.to_cmd(ASK_USONIC))
_, _a = self.from_ans(self.from_port())
return _, _a
def ask_VL(self):
self.to_port(self.to_cmd(ASK_VLMM))
_, _a = self.from_ans(self.from_port())
return _, _a
def ask(self):
self.to_port(self.to_cmd(ASK_CT))
_, _a, _b, _z = self.from_ans(self.from_port())
_a -= self.KA
_b -= self.KB
_z -= self.KZ
return _, _a, _b,_z
if __name__ == "__main__":
r = Roto()
r.ask()
#-------------Запуск примера----------
#r.laserOn()
#sleep(3)
#r.laserOff()
#-------------------------------------
#r.move(150,150)
#r.move(10,-10)
#r.move(-10,-10)
#r.move(-10,10)
#r.move(0,0)
# r.move(180, 180)
# r.moveZ(50)
# r.laserOn()
# sleep(10)
# r.move(120, 150)
# r.moveZ(10)
# sleep(10)
# r.move(180, 180)
# r.moveZ(50)
# sleep(10)
r.laserOff()
# r.moveZ(-30)
# sleep(2)
# r.move(150, 180)
# sleep(2)
# r.move(180, 180)
# r.moveZ(30)
# sleep(2)
# r.move(180, 150)
# r.laserOn()
# r.moveZ(-30)
# sleep(1)
# print(r.ask_VL())
# r.move(180, 180)
# r.moveZ(-30)
# sleep(3)
# print(r.ask_uS())
# r.laserOff()
# r.moveZ(60)
|
[
"master-forve@ya.ru"
] |
master-forve@ya.ru
|
0a96dabd1afb8bcf75c36331d8d72eba0c8e42d4
|
3aadfc743fb3440ef564003e332260365d9639e8
|
/box_lake.py
|
eb3f4ad7f2ab42b77ef50e646dfdd52159ffdf24
|
[] |
no_license
|
bbuman/tbl
|
b33a41e78f71fd66b489b3a4b4b5d82c700b49ef
|
0d1f683283d8e8e053d02958aa6ee5ddf12ac395
|
refs/heads/master
| 2023-02-03T10:29:11.802753
| 2020-12-15T07:49:31
| 2020-12-15T07:49:31
| 320,510,118
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,994
|
py
|
class Lake:
def __init__(self, lake_area, lake_volume, benthic_macrophytes, benthic_macrofauna, benthic_bacteria, phytoplankton,
zooplankton, bacterioplankton, water_IC, water_OC, sediment):
"""Create the lake pool.
Keyword arguments:
lake_area -- area of the lake in the catchment [float] unit [m2]
lake_volume -- volume of the lake [float] unit [m3]
benthic_macrophytes -- mass of the benthic macrophyte vegetation [float] unit [kg C / m2]
benthic_macrofauna -- mass of the benthic macro fauna [float] unit [kg C / m2]
benthic_bacteria -- mass of the benthic bacteria population [float] unit [kg C / m2]
phytoplankton -- mass of the phytoplankton community in the water [float] unit [kg C / m3]
zooplankton -- mass of the zooplankton community in the water [float] unit [kg C / m3]
bacterioplankton -- mass of the bacterioplankton community in the water [float] unit [kg C / m3]
water_IC -- mass of the inorganic carbon dissolved in the water [float] unit [kg C / m3]
water_OC -- mass of the organic carbon dissolved in the water [float] unit [kg C / m3]
sediment -- mass of the carbon in the sediment [float] unit [kg C / m2]
"""
## Geometry
self.area = lake_area
self.volume = lake_volume
## Organisms
self.benthic_macrophytes = benthic_macrophytes
self.benthic_macrofauna = benthic_macrofauna
self.benthic_bacteria = benthic_bacteria
self.phytoplankton = phytoplankton
self.zooplankton = zooplankton
self.bacterioplankton = bacterioplankton
## Dissolved
self.water_IC = water_IC
self.water_OC = water_OC
## Sediment
self.sediment = sediment
## Biomass
self.determine_benthic_macropyhte_biomass()
self.determine_benthic_macrofauna_biomass()
self.determine_benthic_bacteria_biomass()
self.determine_phytoplankton_biomass()
self.determine_zooplankton_biomass()
self.determine_bacterioplankton_biomass()
## Total IC
self.determine_total_IC()
## Total OC
self.determine_total_OC()
## Total Sediment
self.determine_sediment_carbon()
## Sum up
self.determine_total()
def determine_benthic_macropyhte_biomass(self):
self.biomass_benthic_macrophyte = self.benthic_macrophytes * self.area
def determine_benthic_macrofauna_biomass(self):
self.biomass_benthic_macrofauna = self.benthic_macrofauna * self.area
def determine_benthic_bacteria_biomass(self):
self.biomass_benthic_bacteria = self.benthic_bacteria * self.area
def determine_phytoplankton_biomass(self):
self.biomass_phytoplankton = self.phytoplankton * self.volume
def determine_zooplankton_biomass(self):
self.biomass_zooplankton = self.zooplankton * self.volume
def determine_bacterioplankton_biomass(self):
self.biomass_bacterioplankton = self.bacterioplankton * self.volume
def determine_total_IC(self):
self.total_IC = self.water_IC * self.volume
def determine_total_OC(self):
self.total_OC = self.water_OC * self.volume
def determine_sediment_carbon(self):
self.carbon_sediment = self.sediment * self.area
def determine_total(self):
self.total_carbon = (self.biomass_benthic_macrophyte + self.biomass_benthic_macrofauna +
self.biomass_benthic_bacteria + self.biomass_phytoplankton + self.biomass_zooplankton +
self.biomass_bacterioplankton + self.total_IC + self.total_OC + self.carbon_sediment)
# ------------< FLUXES >----------------------------------------------------------------------------------------
def set_fluxes(self, benthic_npp, benthic_respiration, pelagic_gpp, pelagic_resp_auto, pelagic_resp_hetero,
lake_resp_hetero, sediment_accumulation, emission, deposition, lake_water_IC_out, lake_water_OC_out, lake_water_IC_in, lake_water_OC_in):
""" Define the fluxes in the lake.
Keyword arguments:
benthic_npp -- the npp for the sediment dwelling organisms [float] unit [kg C / m2 a]
benthic_respiration -- the respiration of the sediment dwelling organisms [flaot] unit [kg C / m2 a]
pelagic_gpp -- the gpp of the aquatic producers [float] unit [kg C / m3 a]
pelagic_resp_auto -- the autotrophic respiration of the aquatic producers [float] unit [kg C / m3 a]
pelagic_resp_hetero -- the heterotrophic respiration of the aquatic consumers in summer [float] unit [kg C / m3 a]
lake_resp_hetero -- the heterotrophic respiration of the aquatic consumers in winter [float] unit [kg C / m3 a]
sediment_accumulation -- the amount of carbon incorporated into sediments [float] unit [kg C / m2 a]
emission -- the amount of carbon lost to the atmosphere [float] unit [kg C / m2 a]
deposition -- annual deposition of carbon onto the lake [float] unit [kg C / m2 a]
lake_water_IC_out -- the amount of inorganic carbon exported from the lake downstream [float] unit [kg C / m3 a]
lake_water_OC_out -- the amount of organic carbon exported from the lake downstream [float] unit [kg C / m3 a]
lake_water_IC_in -- the amount of inorganic carbon imported from the catchment [float] unit [kg C / a]
lake_water_OC_in -- the amount of organic carbon imported from the catchment [float] unit [kg C / a]
"""
# Benthic NPP
self.benthic_npp = benthic_npp
self.determine_total_benthic_npp()
# Benthic Respiration
self.benthic_respiration = benthic_respiration
self.determine_total_benthic_respiration()
# Pelagic NPP
self.pelagic_gpp = pelagic_gpp
self.pelagic_resp_auto = pelagic_resp_auto
self.pelagic_resp_hetero = pelagic_resp_hetero
self.lake_resp_hetero = lake_resp_hetero
self.determine_pelagic_npp()
self.determine_total_pelagic_npp()
# Sediment accumulation
self.sediment_accumulation = sediment_accumulation
self.determine_total_sedimenet_accumulation()
# CO2 emission
self.emission = emission
self.determine_total_emission()
# Export
self.lake_water_IC_out = lake_water_IC_out
self.determine_total_lake_water_IC_out()
self.lake_water_OC_out = lake_water_OC_out
self.determine_total_lake_water_OC_out()
# Import
self.total_lake_water_IC_in = lake_water_IC_in
self.total_lake_water_OC_in = lake_water_OC_in
# Deposition
self.deposition = deposition
self.determine_total_deposition()
def determine_total_benthic_npp(self):
self.total_benthic_npp = self.benthic_npp * self.area
def determine_total_benthic_respiration(self):
self.total_benthic_respiration = self.benthic_respiration * self.area
def determine_pelagic_npp(self):
self.pelagic_npp = self.pelagic_gpp - self.pelagic_resp_auto - self.pelagic_resp_hetero - self.lake_resp_hetero
def determine_total_pelagic_npp(self):
self.total_pelagic_npp = self.pelagic_npp * self.area
def determine_total_sedimenet_accumulation(self):
self.total_sediment_accumulation = self.sediment_accumulation * self.area
def determine_total_emission(self):
self.total_emission = self.emission * self.area
def determine_total_lake_water_IC_out(self):
self.total_lake_water_IC_out = self.lake_water_IC_out * self.volume
def determine_total_lake_water_OC_out(self):
self.total_lake_water_OC_out = self.lake_water_OC_out * self.volume
def determine_total_deposition(self):
self.total_deposition = self.deposition * self.area
# ------------< Process Functions >----------------------------------------------------------------------------------------
def export_IC_downstream(self):
self.total_IC -= self.total_lake_water_IC_out
def export_OC_downstream(self):
self.total_OC -= self.total_lake_water_OC_out
def lake_to_atmo(self):
self.total_IC -= self.total_emission
def import_IC(self):
self.total_IC += self.total_lake_water_IC_in
def import_OC(self):
self.total_OC += self.total_lake_water_OC_in
def atmo_to_lake(self):
self.total_OC += self.total_deposition
def lake_production(self):
self.total_OC += self.total_benthic_npp
self.total_OC -= self.total_benthic_respiration
self.total_OC += self.pelagic_npp
def sediment_incorporation(self):
self.carbon_sediment += self.total_sediment_accumulation
self.total_OC -= self.total_sediment_accumulation
def update_total_carbon(self):
self.determine_total()
|
[
"spam.rooster@protonmail.com"
] |
spam.rooster@protonmail.com
|
d929ed62a19aa356c1852b172451852bf8d303b6
|
0db18688bda2f7f1c14ed4c35a645c07cb0ca552
|
/FYP UI/dashboard-updated.py
|
26a81d6d083a01dc513530607bba3235e5eec526
|
[] |
no_license
|
hamzaiqbal786/FYP-Adaptive-Clustering-For-Gesture-Analysis
|
f472ca0803a2d931d2797df33bb6f7fddd73e855
|
aeeaff93ef5e3e8287f13cf56607043c3ca4c159
|
refs/heads/main
| 2023-02-24T02:50:14.673243
| 2021-01-22T20:03:39
| 2021-01-22T20:03:39
| 332,046,912
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,096
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'dashboard-updated.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QApplication, QWidget, QInputDialog, QLineEdit, QFileDialog
from PyQt5.QtGui import QIcon
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1011, 520)
MainWindow.setStyleSheet("background-color: rgb(255, 255, 255);")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(-20, 10, 1041, 61))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(22)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label.setAutoFillBackground(False)
self.label.setStyleSheet("background-color: rgb(85, 0, 127);\n"
"color: rgb(255, 255, 255);\n"
"")
self.label.setObjectName("label")
self.groupBox_2 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_2.setGeometry(QtCore.QRect(10, 90, 491, 191))
self.groupBox_2.setStyleSheet("background-color: rgb(241, 241, 241);")
self.groupBox_2.setObjectName("groupBox_2")
self.video_text_box = QtWidgets.QTextEdit(self.groupBox_2)
self.video_text_box.setGeometry(QtCore.QRect(10, 50, 471, 31))
self.video_text_box.setStyleSheet("background-color: rgb(255, 255, 255);")
self.video_text_box.setObjectName("video_text_box")
self.startButton_2 = QtWidgets.QPushButton(self.groupBox_2)
self.startButton_2.setGeometry(QtCore.QRect(130, 120, 101, 41))
self.startButton_2.setAutoFillBackground(False)
self.startButton_2.setStyleSheet("background-color: rgb(255, 255, 255);")
self.startButton_2.setFlat(False)
self.startButton_2.setObjectName("startButton_2")
# opening file dialouge on button click
self.startButton_2.clicked.connect(self.openFile)
self.clear_button = QtWidgets.QPushButton(self.groupBox_2)
self.clear_button.setGeometry(QtCore.QRect(250, 120, 101, 41))
self.clear_button.setAutoFillBackground(False)
self.clear_button.setStyleSheet("background-color: rgb(255, 255, 255);")
self.clear_button.setFlat(False)
self.clear_button.setObjectName("clear_button")
self.groupBox_3 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_3.setGeometry(QtCore.QRect(510, 90, 491, 191))
self.groupBox_3.setStyleSheet("background-color: rgb(241, 241, 241);")
self.groupBox_3.setObjectName("groupBox_3")
self.startButton = QtWidgets.QPushButton(self.groupBox_3)
self.startButton.setGeometry(QtCore.QRect(190, 130, 101, 41))
self.startButton.setAutoFillBackground(False)
self.startButton.setStyleSheet("background-color: rgb(255, 255, 255);")
self.startButton.setFlat(False)
self.startButton.setObjectName("startButton")
self.radioButton_25 = QtWidgets.QRadioButton(self.groupBox_3)
self.radioButton_25.setGeometry(QtCore.QRect(50, 30, 111, 17))
self.radioButton_25.setChecked(False)
self.radioButton_25.setAutoRepeat(False)
self.radioButton_25.setObjectName("radioButton_25")
self.radioButton_50 = QtWidgets.QRadioButton(self.groupBox_3)
self.radioButton_50.setGeometry(QtCore.QRect(330, 30, 111, 17))
self.radioButton_50.setChecked(False)
self.radioButton_50.setObjectName("radioButton_50")
self.radioButton_75 = QtWidgets.QRadioButton(self.groupBox_3)
self.radioButton_75.setGeometry(QtCore.QRect(50, 80, 111, 17))
self.radioButton_75.setChecked(True)
self.radioButton_75.setObjectName("radioButton_75")
self.groupBox_4 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_4.setGeometry(QtCore.QRect(10, 300, 991, 171))
self.groupBox_4.setStyleSheet("background-color: rgb(241, 241, 241);")
self.groupBox_4.setObjectName("groupBox_4")
self.label_3 = QtWidgets.QLabel(self.groupBox_4)
self.label_3.setGeometry(QtCore.QRect(410, 20, 131, 21))
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.groupBox_4)
self.label_4.setGeometry(QtCore.QRect(710, 20, 161, 21))
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(self.groupBox_4)
self.label_5.setGeometry(QtCore.QRect(40, 70, 131, 21))
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(self.groupBox_4)
self.label_6.setGeometry(QtCore.QRect(40, 120, 131, 21))
self.label_6.setObjectName("label_6")
self.label_cluster_predicted_result = QtWidgets.QLabel(self.groupBox_4)
self.label_cluster_predicted_result.setGeometry(QtCore.QRect(650, 70, 261, 21))
self.label_cluster_predicted_result.setObjectName("label_cluster_predicted_result")
self.label_word_true_result = QtWidgets.QLabel(self.groupBox_4)
self.label_word_true_result.setGeometry(QtCore.QRect(320, 120, 281, 21))
self.label_word_true_result.setObjectName("label_word_true_result")
self.label_word_predicted_result = QtWidgets.QLabel(self.groupBox_4)
self.label_word_predicted_result.setGeometry(QtCore.QRect(640, 120, 281, 21))
self.label_word_predicted_result.setObjectName("label_word_predicted_result")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1011, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def openFile(self,MainWindow):
fileName = str(QtWidgets.QFileDialog.getOpenFileName(None,"Select File", "","MOV FIles(*.mov);;AVI Files (*.avi);; MP4 FIles(*.mp4)"))
self.video_text_box.setText(fileName)
print(fileName)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Adaptive Clustering For Gesture Analysis"))
self.label.setText(_translate("MainWindow", "<html><head/><body><p align=\"center\"><span style=\" font-weight:600;\">ADAPTIVE CLUSTERING FOR GESTURE ANALYSIS</span></p></body></html>"))
self.groupBox_2.setTitle(_translate("MainWindow", "Input Video"))
self.startButton_2.setText(_translate("MainWindow", "Select Video"))
self.clear_button.setText(_translate("MainWindow", "Clear"))
self.groupBox_3.setTitle(_translate("MainWindow", "Frames Percentage"))
self.startButton.setText(_translate("MainWindow", "Start"))
self.radioButton_25.setText(_translate("MainWindow", "First 25% Frames"))
self.radioButton_50.setText(_translate("MainWindow", "First 50% Frames"))
self.radioButton_75.setText(_translate("MainWindow", "First 75% Frames"))
self.groupBox_4.setTitle(_translate("MainWindow", "Output"))
self.label_3.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:12pt; font-weight:600;\">True Results</span></p></body></html>"))
self.label_4.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:12pt; font-weight:600;\">Predicted Results</span></p></body></html>"))
self.label_5.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:12pt; font-weight:600;\">Cluster Name</span></p></body></html><?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
"<ui version=\"4.0\">\n"
" <widget name=\"__qt_fake_top_level\">\n"
" <widget class=\"QLabel\" name=\"label_3\">\n"
" <property name=\"geometry\">\n"
" <rect>\n"
" <x>30</x>\n"
" <y>70</y>\n"
" <width>131</width>\n"
" <height>21</height>\n"
" </rect>\n"
" </property>\n"
" <property name=\"text\">\n"
" <string><html><head/><body><p><span style=" font-size:12pt; font-weight:600;">True Results</span></p></body></html></string>\n"
" </property>\n"
" </widget>\n"
" </widget>\n"
" <resources/>\n"
"</ui>\n"
""))
self.label_6.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:12pt; font-weight:600;\">Word Name</span></p><p>30 70 131 21 <html><head/><body><p><span style=" font-size:12pt; font-weight:600;">True Results</span></p></body></html> </p></body></html>"))
self.label_cluster_predicted_result.setText(_translate("MainWindow", "<html><head/><body><p align=\"center\"><span style=\" font-size:10pt; font-weight:600;\">B</span></p><p align=\"center\"><br/></p></body></html>"))
self.label_word_true_result.setText(_translate("MainWindow", "<html><head/><body><p align=\"center\"><span style=\" font-size:10pt; font-weight:600;\">Good Night</span><br/></p><p>410 70 281 21 <html><head/><body><p><span style=" font-size:10pt; font-weight:600;">Cluster</span></p><p><br/></p></body></html> 410 70 281 21 <html><head/><body><p><span style=" font-size:10pt; font-weight:600;">Cluster</span></p><p><br/></p></body></html> esources/> </p></body></html>"))
self.label_word_predicted_result.setText(_translate("MainWindow", "<html><head/><body><p align=\"center\"><span style=\" font-size:10pt; font-weight:600;\">Good Night</span><br/></p><p>410 70 281 21 <html><head/><body><p><span style=" font-size:10pt; font-weight:600;">Cluster</span></p><p><br/></p></body></html> 410 70 281 21 <html><head/><body><p><span style=" font-size:10pt; font-weight:600;">Cluster</span></p><p><br/></p></body></html> esources/> </p></body></html>"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
[
"noreply@github.com"
] |
hamzaiqbal786.noreply@github.com
|
f6db77f4a6e9500b2eac2f6f22c8da4e6e5571a5
|
9ebb54663069482988166480d1dde9e82bbf66cc
|
/main.py
|
47820be4a5647056e521b0c89505475e826a2704
|
[] |
no_license
|
DHRUV-CODER/DogeHouse-Bot
|
f6a89cee96c1df27b2a5dad4cbd81406e5b22652
|
779948bf80c63cb2720a11eee66db5c7c9a9e5d5
|
refs/heads/master
| 2023-04-28T03:57:02.380063
| 2021-05-07T11:52:46
| 2021-05-07T11:52:46
| 356,579,395
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,246
|
py
|
import dogehouse
import requests as r
import environ
import os
import keep_alive
import json
token = os.environ.get('TOKEN')
refresh_token = os.environ.get('REFRESH_TOKEN')
class Client(dogehouse.DogeClient):
@dogehouse.event
async def on_ready(self):
print(f"Logged on as {self.user.username}")
await self.create_room("Bot Room || Jokes || Facts & more..")
# await self.join_room("1fbb7d1e-d3b1-43ae-9854-06cd9231fe50")
print("Joined")
@dogehouse.event
async def on_message(self, message):
# if message.author.id == self.user.id:
# return
if message.content.startswith("hi"):
await self.send("sup , how are ya")
@dogehouse.command
async def help(self, ctx):
whisper_that_g = [ctx.author.id]
await self.send(message="Cmds -> !hanimal , !hcryptog , !hjoke",
whisper=whisper_that_g)
@dogehouse.command
async def hanimal(self, ctx):
whisper_that_g = [ctx.author.id]
await self.send(
message=
"!catfact, !dogfact, !pandafact, !foxfact, !birdfact, !koalafact",
whisper=whisper_that_g)
@dogehouse.command
async def hcryptog(self, ctx):
whisper_that_g = [ctx.author.id]
await self.send(message="!encode <msg>, !decode <msg>",
whisper=whisper_that_g)
@dogehouse.command
async def hjoke(self, ctx):
whisper_that_g = [ctx.author.id]
await self.send(message="!joke", whisper=whisper_that_g)
@dogehouse.command
async def hello(self):
await self.send("Hello!")
# @dogehouse.command
# async def ping(self, ctx):
# await self.send(f"Hello {ctx.author.mention}")
@dogehouse.command
async def catfact(self, ctx):
resp = r.get("https://some-random-api.ml/facts/cat").json()
await self.send(message=f"{resp['fact']}", whisper=[ctx.author.id])
@dogehouse.command
async def dogfact(self, ctx):
resp = r.get("https://some-random-api.ml/facts/dog").json()
await self.send(message=f"{resp['fact']}", whisper=[ctx.author.id])
@dogehouse.command
async def pandafact(self, ctx):
resp = r.get("https://some-random-api.ml/facts/panda").json()
await self.send(message=f"{resp['fact']}", whisper=[ctx.author.id])
@dogehouse.command
async def foxfact(self, ctx):
resp = r.get("https://some-random-api.ml/facts/fox").json()
await self.send(message=f"{resp['fact']}", whisper=[ctx.author.id])
@dogehouse.command
async def birdfact(self, ctx):
resp = r.get("https://some-random-api.ml/facts/bird").json()
await self.send(message=f"{resp['fact']}", whisper=[ctx.author.id])
@dogehouse.command
async def koalafact(self, ctx):
resp = r.get("https://some-random-api.ml/facts/koala").json()
await self.send(message=f"{resp['fact']}", whisper=[ctx.author.id])
@dogehouse.command
async def encode(self, ctx, *, encode_msg="no_msg"):
resp = r.get(
f"https://some-random-api.ml/base64?encode={encode_msg}").json()
await self.send(message=f"-> {resp['base64']}",
whisper=[ctx.author.id])
@dogehouse.command
async def decode(self, ctx, *, decode_msg="bm8gbXNn"):
resp = r.get(
f"https://some-random-api.ml/base64?decode={decode_msg}").json()
await self.send(message=f"-> {resp['text']}", whisper=[ctx.author.id])
@dogehouse.command
async def joke(self, ctx):
resp = r.get(f"https://v2.jokeapi.dev/joke/Any?type=single").json()
await self.send(message=f"{resp['joke']}", whisper=[ctx.author.id])
@dogehouse.command
async def reg(self, ctx, *, act):
with open("currentAct.json", "r") as f:
reg = json.load(f)
reg['Activity'] = act
with open("currentAct.json", "w+") as f:
json.dump(reg, f, indent=4)
await self.send(message=f"Done!!, Actvity Changed To -> {act}",
whisper=[ctx.author.id])
@dogehouse.command
async def what_we_doin(self, ctx):
with open("currentAct.json", "r") as f:
reg = json.load(f)
actv = reg['Activity']
await self.send(message=f'-> {actv}', whisper=[ctx.author.id])
@dogehouse.command
async def source(self, ctx):
url = 'https://github.com/DHRUV-CODER/DogeHouse-Bot'
await self.send(message=f'-> {url}', whisper=[ctx.author.id])
# @dogehouse.event
# async def on_error(self,error):
# await self.send(f"oops -> {error}")
# print(f"-> {error}")
@dogehouse.event
async def on_user_join(self, user):
userNameToWhisper = [user.id]
await self.send(
message=
f"welcome `{user.username}` !! , Pls Type `!help` For More Info & Btw If Udk What We Doin Type -> `!what_we_doin`",
whisper=userNameToWhisper)
# @dogehouse.event
# async def on_user_leave(self,user):
# await self.send(f"{user.username} Just Left")
keep_alive.keep_alive()
if __name__ == "__main__":
Client(token, refresh_token, prefix="!").run()
|
[
""
] | |
7844ad919dbe6add95bffc675d9fa9136a3abdc8
|
b2ff37406e50976b8db1c75bde4297f5039f7391
|
/Students/khushi_flask/Day1/file_read.py
|
100a6f19c50c722efa30eb60f4a2be2a07e31636
|
[
"MIT"
] |
permissive
|
Throttlerz-devs/flask-tutorial
|
fa21a13e9b3c23992efaf33c5e3828d4086f9540
|
09e1ad364c73ffba4e698592219d448865c6b31a
|
refs/heads/master
| 2023-08-01T07:02:37.069561
| 2021-09-17T04:53:14
| 2021-09-17T04:53:14
| 407,056,478
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 96
|
py
|
def read_data():
f=open('todo.txt','r')
data= f.read()
f.close()
return data
|
[
"khushik16001@gmail.com"
] |
khushik16001@gmail.com
|
34fd6dcec550d6150395078c713f106bf57146a2
|
6103eb1edbc22f8ea5dddf244466074fe33ff3a6
|
/clothes/urls.py
|
04d3952c065a952434967ca337b593d09c52f8c6
|
[] |
no_license
|
adik0708/thewayshop
|
d888a77263269ff8d3f81b3beb94d2645f28690c
|
a3c71db489578b9231fd666954fdc2f237f31707
|
refs/heads/main
| 2023-04-17T02:38:17.825392
| 2021-05-04T15:01:00
| 2021-05-04T15:01:00
| 364,254,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
from django.urls import path
from .views import home, products, search, contact, category_detail, outfit_detail
urlpatterns = [
path('', home, name='home'),
path('products/', products, name='products'),
path('search/', search, name='search'),
path('contact/', contact, name='contact'),
path('category/<int:id>', category_detail, name='category_detail'),
path('outfit/<int:id>', outfit_detail, name='outfit_detail')
]
|
[
"akylbekov1@gmail.com"
] |
akylbekov1@gmail.com
|
221cf3f8b981c2f28598a2e9958dcaebe825e227
|
49ca5ef9d9ede14dcd08d5770fe763be649f2185
|
/mooving_iot/utils/logger.py
|
870132070d66bcf64bbe5edd629e0760f7aa07e1
|
[
"MIT",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
DAVFoundation/iot-prototyping-platform
|
94cacbb318c3a4bbd1b0297d8c53d179d77fa700
|
937578333a1a6a06235a8290486a2906d1a43a7c
|
refs/heads/master
| 2022-06-12T14:42:03.726130
| 2019-12-31T21:06:54
| 2019-12-31T21:06:54
| 199,249,645
| 1
| 0
|
NOASSERTION
| 2022-05-25T03:06:38
| 2019-07-28T06:09:28
|
Python
|
UTF-8
|
Python
| false
| false
| 3,568
|
py
|
#***************************************************************************************************
# Imports
#***************************************************************************************************
# Global packages imports
import datetime
import os
import threading
# Local packages imports
import mooving_iot.project_config as prj_cfg
import mooving_iot.utils.exit as utils_exit
#***************************************************************************************************
# Private constants
#***************************************************************************************************
_MSG_TYPE_STR_ERR = 'ERROR'
_MSG_TYPE_STR_WARN = 'WARNING'
_MSG_TYPE_STR_INFO = 'INFO'
_MSG_TYPE_STR_DEBUG = 'DEBUG'
#***************************************************************************************************
# Public classes
#***************************************************************************************************
class Logger:
__log_file = None
__file_lock = threading.Lock()
__print_lock = threading.Lock()
def __init__(self, module_name, log_level):
assert type(module_name) is str, 'Value should be a string!'
assert isinstance(log_level, prj_cfg.LogLevel), 'Value should be a LogLevel enum value!'
self._module_name = module_name
self._log_level = log_level
if prj_cfg.FILE_LOG_ENABLE and (Logger.__log_file == None):
file_path_name = '{path}/log_{date}.log'.format(
path=prj_cfg.FILE_LOG_PATH,
date=datetime.datetime.utcnow().strftime('%Y_%m_%d_T%H_%M_%S_%f'))
with Logger.__file_lock:
try:
os.makedirs(prj_cfg.FILE_LOG_PATH, exist_ok=True)
Logger.__log_file = open(file=file_path_name, mode='w')
except OSError as err:
self.error('Cannot open file: {file}, error: {err}'
.format(file=file_path_name, err=err))
else:
utils_exit.register_on_exit(Logger.close_log_file)
def error(self, value, *args):
if self._is_log_enabled(prj_cfg.LogLevel.ERROR):
self._print(_MSG_TYPE_STR_ERR, value, *args)
def warning(self, value, *args):
if self._is_log_enabled(prj_cfg.LogLevel.WARNING):
self._print(_MSG_TYPE_STR_WARN, value, *args)
def info(self, value, *args):
if self._is_log_enabled(prj_cfg.LogLevel.INFO):
self._print(_MSG_TYPE_STR_INFO, value, *args)
def debug(self, value, *args):
if self._is_log_enabled(prj_cfg.LogLevel.DEBUG):
self._print(_MSG_TYPE_STR_DEBUG, value, *args)
@staticmethod
def close_log_file():
if Logger.__log_file != None:
Logger.__log_file.close()
def _print(self, msg_type, value, *args):
assert type(msg_type) is str, 'Value should be a string!'
assert type(value) is str, 'Value should be a string!'
with Logger.__print_lock:
format_str = '[{date}] <{type}> "{module}": {value}'.format(
date=datetime.datetime.utcnow().isoformat(),
module=self._module_name,
type=msg_type,
value=value)
print(format_str, *args)
if prj_cfg.FILE_LOG_ENABLE and (Logger.__log_file != None):
print(format_str, *args, file=Logger.__log_file)
def _is_log_enabled(self, log_level):
return (self._log_level >= log_level) and (prj_cfg.GLOBAL_LOG_LEVEL >= log_level)
|
[
"oleksandr.vanzuriak@lemberg.co.ua"
] |
oleksandr.vanzuriak@lemberg.co.ua
|
12ab47844ddb06acc0e92fde5463668505dcec45
|
29f08a6a1191c6f07c688136539337cf8a117c9d
|
/z3solver/z3solver.py
|
d71346a843c5f56651370910342b0b0847b30dd3
|
[] |
no_license
|
rajivkris/Artificial_Intelligence
|
b138420c8a71ab09c481de217d82862ac59a1650
|
045fef0c27d45d98f6b99a3a8b3f51d8215357b9
|
refs/heads/main
| 2023-01-27T12:48:21.206083
| 2020-12-01T19:27:18
| 2020-12-01T19:27:18
| 311,738,527
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
from z3 import *
x, y, z = Reals('x y z')
solver = Solver()
solver.add(2 * x + 3 * y == 5)
solver.check()
solver.add(y + 3 * z > 3)
solver.add(x - 3 * z <= 10)
print("Constraint added")
solver.add(x > -5, x < 5)
solver.add(y > 0)
print(f'Check satisfiability of solver {solver.check()}')
print(f'Solver model is {solver.model()}')
|
[
"rajivkris@gmail.com"
] |
rajivkris@gmail.com
|
a28ad4adcf82d69674924724039e267f78fc5607
|
10a2875baa753e34bcb1784fad66c1606a7d3513
|
/manage.py
|
f6e0af264bf5a2fb796373940e7fdc0bd695e004
|
[] |
no_license
|
cuong1404s7tp/city
|
eb7c30401d8d239f23afe844821457705dbdad86
|
33ae2f567bc1302648ec126e75b775b6dd916049
|
refs/heads/master
| 2020-04-18T02:17:21.588523
| 2019-01-23T09:39:55
| 2019-01-23T09:39:55
| 167,156,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'docity.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"cu.ong2177@gmail.com"
] |
cu.ong2177@gmail.com
|
6eba1fe4f4ea544882132374afc1bd2f331d7373
|
d4ae6d6d1ab13a8a7fb3a8b895421a6a101f66f0
|
/virtual/sysata/pessoa/migrations/0007_convocacao_convocados.py
|
e341deb8c37bbee74cd06cca32e91313e134ef82
|
[] |
no_license
|
eufrazio/programacao_comercial
|
7c75558d91b7d0d5c781aefc7609a24b8f1dd77f
|
0a268db18a97d92aa3bdd601ee44a095cc0870f2
|
refs/heads/master
| 2021-01-22T10:28:17.764085
| 2017-05-17T19:05:57
| 2017-05-17T19:05:57
| 82,005,168
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-05-09 12:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pessoa', '0006_auto_20170508_2258'),
]
operations = [
migrations.AddField(
model_name='convocacao',
name='convocados',
field=models.ManyToManyField(related_name='pessoa_convocados', to='pessoa.Pessoa'),
),
]
|
[
"eufrazius@gmail.com"
] |
eufrazius@gmail.com
|
fbd2493206b5434d598dec27afe045e16aa5912e
|
8430da1139e9ea5b81b8ac3b2ec0f96985824cf0
|
/assessment/tree.py
|
3cfa1fa83e6b78576019611bba51016931130c56
|
[] |
no_license
|
tom-wagner/ip
|
7d453ff2bbff5555f8ad1353a69ee0f1eed386ea
|
6dc6c6301df09d1dba9575e4f72a413d63b3a6d4
|
refs/heads/master
| 2022-02-20T20:37:31.295952
| 2019-09-28T01:36:23
| 2019-09-28T01:36:23
| 199,317,634
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,334
|
py
|
# START TIME:
class Tree:
def __init__(self, tree):
self.tree = tree
def breadth_first_traversal(self):
pass
def depth_first_traversal(self):
pass
def has(self):
pass
t = {
'v': 5,
'children': [
{'v': 9,
'children': [
{'v': 8,
'children': [
{'v': 0,
'children': []}
]},
{'v': 6,
'children': [
{'v': 1,
'children': []}
]}
]},
{'v': 1,
'children': [
{'v': 4,
'children': [
{'v': 7,
'children': []},
{'v': 0,
'children': []}
]}
]}
]
}
# TREE:
# 5
# 9 1
# 8 6 4
# 0 1 7 0
print(t)
my_tree = Tree(t)
depth_first = [0, 8, 1, 6, 9, 7, 0, 4, 1, 5]
breadth_first = [5, 9, 1, 8, 6, 4, 0, 1, 7, 0]
print('DFS', my_tree.depth_first_traversal() == depth_first)
print('BFS', my_tree.breadth_first_traversal() == breadth_first)
print('has returns true correctly', my_tree.has(7))
print('has returns true correctly', my_tree.has(8))
print('has returns false correctly', my_tree.has('BILL'))
# END TIME:
|
[
"tom.wagner@xaxis.com"
] |
tom.wagner@xaxis.com
|
4e3b6f6b97c41325ad2959082ffe7e93c4e6cc92
|
c60da36fc2e7630a4e767d5ac984a4cff2e44132
|
/train.py
|
5a47a51a419dfc93b5e64e92ea58c6cfeb1de9cc
|
[] |
no_license
|
PMingEli/hand-writing-recognition
|
55cc2c9b9deaff09edd4f54a395b6d21964968d4
|
3a04087da4f8611d637ffa95c106e3dd99c33b10
|
refs/heads/master
| 2022-11-21T04:07:02.492717
| 2020-06-28T15:20:50
| 2020-06-28T15:20:50
| 275,610,641
| 0
| 0
| null | 2020-06-28T15:13:11
| 2020-06-28T15:13:11
| null |
UTF-8
|
Python
| false
| false
| 4,195
|
py
|
import pickle
import os
import torch
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
from torchvision import transforms
from torchsummary import summary
from hwdb import HWDB
from model import ConvNet
def valid(epoch, net, test_loarder, writer):
print("epoch %d 开始验证..." % epoch)
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loarder:
images, labels = images.cuda(), labels.cuda()
outputs = net(images)
# 取得分最高的那个类
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('correct number: ', correct)
print('totol number:', total)
acc = 100 * correct / total
print('第%d个epoch的识别准确率为:%d%%' % (epoch, acc))
writer.add_scalar('valid_acc', acc, global_step=epoch)
def train(epoch, net, criterion, optimizer, train_loader, writer, save_iter=100):
print("epoch %d 开始训练..." % epoch)
net.train()
sum_loss = 0.0
total = 0
correct = 0
# 数据读取
for i, (inputs, labels) in enumerate(train_loader):
# 梯度清零
optimizer.zero_grad()
if torch.cuda.is_available():
inputs = inputs.cuda()
labels = labels.cuda()
outputs = net(inputs)
loss = criterion(outputs, labels)
# 取得分最高的那个类
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
loss.backward()
optimizer.step()
# 每训练100个batch打印一次平均loss与acc
sum_loss += loss.item()
if (i + 1) % save_iter == 0:
batch_loss = sum_loss / save_iter
# 每跑完一次epoch测试一下准确率
acc = 100 * correct / total
print('epoch: %d, batch: %d loss: %.03f, acc: %.04f'
% (epoch, i + 1, batch_loss, acc))
writer.add_scalar('train_loss', batch_loss, global_step=i + len(train_loader) * epoch)
writer.add_scalar('train_acc', acc, global_step=i + len(train_loader) * epoch)
for name, layer in net.named_parameters():
writer.add_histogram(name + '_grad', layer.grad.cpu().data.numpy(),
global_step=i + len(train_loader) * epoch)
writer.add_histogram(name + '_data', layer.cpu().data.numpy(),
global_step=i + len(train_loader) * epoch)
total = 0
correct = 0
sum_loss = 0.0
if __name__ == "__main__":
# 超参数
epochs = 20
batch_size = 100
lr = 0.01
data_path = r'data'
log_path = r'logs/batch_{}_lr_{}'.format(batch_size, lr)
save_path = r'checkpoints/'
if not os.path.exists(save_path):
os.mkdir(save_path)
# 读取分类类别
with open('char_dict', 'rb') as f:
class_dict = pickle.load(f)
num_classes = len(class_dict)
# 读取数据
transform = transforms.Compose([
transforms.Resize((64, 64)),
transforms.ToTensor(),
])
dataset = HWDB(path=data_path, transform=transform)
print("训练集数据:", dataset.train_size)
print("测试集数据:", dataset.test_size)
trainloader, testloader = dataset.get_loader(batch_size)
net = ConvNet(num_classes)
if torch.cuda.is_available():
net = net.cuda()
# net.load_state_dict(torch.load('checkpoints/handwriting_iter_004.pth'))
print('网络结构:\n')
summary(net, input_size=(3, 64, 64), device='cuda')
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=lr)
writer = SummaryWriter(log_path)
for epoch in range(epochs):
train(epoch, net, criterion, optimizer, trainloader, writer=writer)
valid(epoch, net, testloader, writer=writer)
print("epoch%d 结束, 正在保存模型..." % epoch)
torch.save(net.state_dict(), save_path + 'handwriting_iter_%03d.pth' % epoch)
|
[
"1076050774@qq.com"
] |
1076050774@qq.com
|
21a700bb20d695f0545a44e8ea56ccd2d5c1ecbd
|
d82ac08e029a340da546e6cfaf795519aca37177
|
/chapter_13_parallel_nn_training_theano/02_array_structures.py
|
041b18247a74fa59fe0cfc17db87096150e8cf80
|
[] |
no_license
|
CSwithJC/PythonMachineLearning
|
4409303c3f4d4177dc509c83e240d7a589b144a0
|
0c4508861e182a8eeacd4645fb93b51b698ece0f
|
refs/heads/master
| 2021-09-04T04:28:14.608662
| 2018-01-15T20:25:36
| 2018-01-15T20:25:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 566
|
py
|
import theano
import numpy as np
from theano import tensor as T
# Config Theano to use 32-bit architecture:
theano.config.floatX = 'float32'
#theano.config.device = 'gpu'
# initialize
x = T.fmatrix(name='x')
x_sum = T.sum(x, axis=0)
# compile
calc_sum = theano.function(inputs=[x], outputs=x_sum)
# execute (Python List)
ary = [[1, 2, 3], [1, 2, 3]]
print('Column sum:', calc_sum(ary))
# execute (NumPy array)
ary = np.array([[1, 2, 3], [1, 2, 3]],
dtype=theano.config.floatX)
print('Column sum:', calc_sum(ary))
print('TensorType: ', x.type())
|
[
"jean.mendez2@upr.edu"
] |
jean.mendez2@upr.edu
|
e3f2a8fb28e4efa6e7cf865d6c07704aaea945ea
|
837b0b70497d361ddd6794ca07d29cb2d3b55dbf
|
/model/notInUse/scripts/collectNikud.py
|
589115dab2ddf479b5cfd055bdb5344cc82e78a0
|
[] |
no_license
|
LihiHadjb/nikud
|
7dc213d35ea5558126392ccabbda689767430baf
|
6968e20c0e0ccfdb67ecc4596479868eaa49897b
|
refs/heads/master
| 2023-03-23T01:41:28.279697
| 2021-03-20T23:29:55
| 2021-03-20T23:29:55
| 300,214,182
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 304
|
py
|
import pandas as pd
with open('textToInputBasic.txt', 'r') as file:
text = file.read()
result = set()
for c in text:
if 'א' <= c <= 'ת' or 'a' <= c <= 'z' or 'A' <= c <= 'Z':
continue
result.add(c)
df = pd.DataFrame(result)
df.to_excel("collect.xlsx")
|
[
"48348862+LihiHadjb@users.noreply.github.com"
] |
48348862+LihiHadjb@users.noreply.github.com
|
b30cb1c91f51ab070f30533bea726b2f5ed62392
|
9672bb77d97f5bf69d110108314febf54c3bd7ef
|
/Certifications/Combine_Json.py
|
251684e9336ea8d25e148faa9e4c6e594d22a45c
|
[] |
no_license
|
haroonrasheed333/CareerTrajectory
|
9001b60ebd03f85878b280320611e5d6a73d359b
|
3461ecf86af52786cf4950bef54c601b941eac64
|
refs/heads/master
| 2021-01-25T08:32:16.745906
| 2013-12-12T03:22:32
| 2013-12-12T03:22:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 507
|
py
|
import os
import json
source_dir = 'certifications_acr'
files = [ f for (dirpath, dirnames, filenames) in os.walk(source_dir) for f in filenames]
print len(files)
certifications_json = dict()
certifications_json['certifications'] = []
for filename in files:
jsonc = open(source_dir + '/' + filename)
jsonc = json.load(jsonc)
certifications_json['certifications'].append(jsonc)
j = json.dumps(certifications_json, indent=4)
f = open('certifications_final.json', 'w')
print >> f, j
f.close()
|
[
"haroonrasheed@berkeley.edu"
] |
haroonrasheed@berkeley.edu
|
4bb43d3dd2e519289bd7d68b2eb6c251c1edeffc
|
b072a98e605a8325cf79efec92ffc564bd588916
|
/example/face_extract_dirs_example.py
|
0ce528fec03101b8ffda7fd93b87177ea712653c
|
[
"MIT"
] |
permissive
|
J77M/facextr
|
176b862fbfa03a210efccda1fabe5f3d2185ec75
|
d13539f4816f0dfde300bd7612b18a9c06dcb3b1
|
refs/heads/master
| 2020-07-21T11:56:06.746216
| 2019-09-07T17:02:48
| 2019-09-07T17:02:48
| 206,856,780
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,406
|
py
|
'''
example for extracting faces from multiple directories and their subdirectories
extract all faces from images from paths in list - dirs
upload_path - directory, where faces will be stored
this code doesn't process face extraction if number of files is more then 10000
'''
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import facextr
upload_path = r"C:\Users\user\tensorflow\keras\face_detection\face_extractor\results\dirs"
dirs = [r'\\123tb\Media\Photo\Pictures\2019', r'\\123tb\Media\Photo\Pictures\2018',
r'\\123tb\Media\Photo\Pictures\2017', r'\\123tb\Media\Photo\Pictures\2016',
r'\\123tb\Media\Photo\Pictures\2015', r'\\123tb\Media\Photo\Pictures\2014',
r'\\123tb\Media\Photo\Pictures\2013', r'\\123tb\Media\Photo\Pictures\2012',
r'\\123tb\Media\Photo\Pictures\2011', r'\\123tb\Media\Photo\Pictures\2010',
r'\\123tb\Media\Photo\Pictures\2009', r'\\123tb\Media\Photo\Pictures\2008',
r'\\123tb\Media\Photo\Pictures\2007', r'\\123tb\Media\Photo\Pictures\2006',
r'\\123tb\Media\Photo\Pictures\2005']
if __name__ == '__main__':
files = facextr.dirs_files_count(dirs)
print('number of image files : {}'.format(files))
if files < 10000:
facextr.face_extract_dirs(dirs, upload_path, dir_structure = True)
else:
print('too much files to process, may take more then 6 hours')
|
[
"juro.marusic@gmail.com"
] |
juro.marusic@gmail.com
|
a7c6d0e74d240aa7da3f3c8beef4b72e3afe7cc6
|
c7993e915ab093ae755977f2d844e4604df9f440
|
/Praktikum 1 No.12.py
|
d48c3e8d35fe49c8847324a1b0adb6c92da65ffc
|
[] |
no_license
|
ahmadalvin92/Chapter-05
|
fe7981182535681604a40baad7f53d0975d98a6e
|
b316c9540f25a09ceb88a0829d82cea2331945d1
|
refs/heads/main
| 2023-01-03T11:53:08.432939
| 2020-10-30T14:01:57
| 2020-10-30T14:01:57
| 308,644,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
a = 8
b = 3
if (a > 0) and (b > 0):
print("keduanya positif")
else:
print("keduanya tidak positif")
a = 8
b = 3
if (a > 0):
if (b > 0):
print("Keduanya positif")
else:
print("Keduanya tidak positif")
else:
print("Keduanya tidak positif")
|
[
"noreply@github.com"
] |
ahmadalvin92.noreply@github.com
|
5e95d15bbcb402658a0aa5ca152150228122ffa4
|
88be3911c7e73d4bf71b0482ee6d15f49030463a
|
/SEC31_Regex/Demo_findall.py
|
efd4979649d52b8aed3afc6af63204120a6ce980
|
[] |
no_license
|
skyaiolos/Python_KE
|
85f879d1cb637debd2e3a0239d7c8d7bfb30c827
|
8cc42c8f4d1245de4b79af429f72a9ed2508bc1a
|
refs/heads/master
| 2021-01-22T08:47:47.761982
| 2017-05-28T14:57:02
| 2017-05-28T14:57:02
| 92,634,507
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,028
|
py
|
"""
# Script Description:
Python 正则表达式之RegexObject
"""
__author__ = "爱的彼岸(QQ:3124724)"
__copyright__ = "Copyright 2017,3124724@qq.com"
# Create by Jianguo on 2017/5/7
import re
text = "Tom is 8 years old, Mike is 25 years old."
# 模式对象, 表现编译后的正则表达式(编译为字节码并缓存)
# re.compile(r'模式')
print('findall()'.center(100, '*'))
pattern = re.compile(r'\d+')
print(pattern.findall(text))
print(re.findall(r'\d+', text))
s = "\\author:Tom"
pattern = re.compile(r'\\author')
rex = pattern.findall(s)
print(rex)
text = "Tom is 8 years old, Mike is 25 years old.Peter is 87 years old."
pattern = re.compile(r'\d+')
rex = pattern.findall(text)
print(rex)
p_name = re.compile(r'[A-Z]\w+')
rex_p = p_name.findall(text)
print(rex_p)
p1 = re.compile(r'\d+')
p2 = re.compile(r'[A-Z]\w+')
print('findall() VS finditer()'.center(100, '*'))
print(p1.findall(text))
print()
print('finditer()'.center(30, '*'))
it = p1.finditer(text)
for item in it:
print(item)
|
[
"skyaiolos@aliyun.com"
] |
skyaiolos@aliyun.com
|
8ffb86706e389d8e762090671e43d0b079b34933
|
d5ba272c47ca56435da778dd3f307cc0369910c5
|
/IB CS/Hw/postfix_infix
|
36e293092a08c92b78a26d21a69c6dfc5998de28
|
[] |
no_license
|
hhshhd/hhshhd
|
34dc5c54b6d51ec169a693cd7c00805554eb4273
|
de1508854f95441e45101c8d2472bab41df0f41c
|
refs/heads/master
| 2020-03-27T12:03:53.116051
| 2020-02-19T08:09:55
| 2020-02-19T08:09:55
| 146,523,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,661
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 18 21:16:02 2018
@author: hhshhd
"""
class Stack():
def __init__(self):
self.stack = []
def itself(self):
return self.stack
def isEmpty(self):
return self.stack == []
def push(self, item):
self.stack.append(item)
def pop(self):
return self.stack.pop()
def peek(self):
return self.stack[-1]
def size(self):
return len(self.stack)
class Queue():
def __init__(self):
self.queue = []
def itself(self):
return self.queue
def isEmpty(self):
return not self.Queue
def enQueue(self,temp):
self.queue.insert(0,temp)
def deQueue(self):
return self.queue.pop()
def size(self):
return len(self.queue)
def addRear(self,temp):
self.queue.insert(0,temp)
def addFront(self,temp):
self.queue.append(temp)
def removeRear(self):
return self.queue.pop(0)
def removeFront(self):
return self.queue.pop()
def __str__(self):
return str(self.queue)
def eval_postfix(postfix):
test = Stack()
token_postfix = postfix.split(' ')
for i in token_postfix:
if i.isdigit():
test.push(i)
else:
right = test.pop()
left = test.pop()
result = eval(left + i + right)
test.push(str(result))
return test.itself()
def postfix_infix(postfix):
test = Stack()
token_postfix = postfix.split(' ')
signal = ' '
for i in token_postfix:
if i.isdigit():
test.push(i)
else:
if i in ['**'] and signal in ['*','/','//','%','+','-']:
temp = test.pop()
temp = '(' + temp + ')'
test.push(temp)
elif i in ['*','/','//','%'] and signal in ['+','-']:
temp = test.pop()
temp = '(' + temp + ')'
test.push(temp)
right = test.pop()
left = test.pop()
result = left + i + right
test.push(result)
signal = i
return test.itself()
print(eval_postfix('122 11 11 * -'))
print(postfix_infix('122 11 10 30 50 - + / **'))
def infix_postfix(infix):
test = Queue()
token_infix = []
signal = ' '
signall = ' '
for i in infix:
token_infix.append(i)
for j in token_infix:
if j.isdigit():
if signal.isdigit():
temp = test.deQueue()
temp = signal + j
test.enQueue(temp)
else:
test.enQueue(j)
elif j in ['(']:
continue
else:
if signall in [')']:
test.deQueue()
test.addFront(j)
signall = j
continue
elif j in ['**'] and signall in ['*','/','//','%','+','-']:
right = test.deQueue()
left = test.deQueue()
test.addFront(j)
test.addFront(left)
test.addFront(right)
elif j in ['*','/','//','%'] and signall in ['+','-']:
right = test.deQueue()
left = test.deQueue()
test.addFront(j)
test.addFront(left)
test.addFront(right)
else:
test.addFront(j)
signall = j
signal = j
return test.itself()
print(infix_postfix('(22+3)*3'))
print(infix_postfix('22+3*3'))
|
[
"noreply@github.com"
] |
hhshhd.noreply@github.com
|
|
687ddb9d4e990cf9ca5ebc733f3ffbe89ac6d8eb
|
9aca5ecc08bc81a58f33ea0082f7cac360a1633d
|
/2Var Simplex Algorithm.py
|
9919e5cfb9869b646f8477b3af6e6a328fa1b63c
|
[] |
no_license
|
RealConjugate/Python-Algorithms
|
d31360d7684012882edd1af83e2521ddc51135e3
|
44b1bdd58d288c854db68c58c626e0f9470ac68a
|
refs/heads/master
| 2021-07-01T08:57:14.777960
| 2021-06-30T20:02:17
| 2021-06-30T20:02:17
| 240,720,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,741
|
py
|
from math import gcd
from decimal import Decimal
from tkinter import *
window = Tk()
window.title("Simplex Algorithm 2Var")
def InitialTable(a,b,c,d,e,f,g,h):
array = []
array.append([c,d,(1,1),(0,1)])
array.append([f,g,(0,1),(1,1)])
array.append([(-1*a[0],a[1]),(-1*b[0],b[1]),(0,1),(0,1)])
values = [e,h,(0,1)]
basicVars = ["r","s","P"]
return (array, basicVars)
def Iterate(array, basicVars, values): # Maximising P: x,y,r,s>=0. Lists contain tuples
columns = ["x","y","r","s"]
negative = []
numList = []
for num in array[2]: # array[2] is objective variable row
if Sign(num) == "negative":
negative.append(float(num[0]/num[1])) # float to compare values
numList.append(num)
if not negative == []:
mostNegativeFloat = min(negative)
mostNegative = numList[negative.index(mostNegativeFloat)]
pivotCol = array[2].index(mostNegative)
theta = [Div(values[0],array[0][pivotCol]), Div(values[1],array[1][pivotCol])]
smallestPositive = (0,1) # zero tuple
for value in theta:
if Sign(value) == "positive":
if Sign(smallestPositive) == "zero" or Sign(Subt(value, smallestPositive)) == "negative":
smallestPositive = value
if not Sign(smallestPositive) == "zero":
pivotRow = theta.index(smallestPositive)
pivot = array[pivotRow][pivotCol]
newArray = [[],[],[]]
newValues = []
dividedRow = []
for item in array[pivotRow]:
dividedRow.append(Div(item,pivot))
for i in range(0,3):
if i == pivotRow:
newArray[i] = dividedRow
newValues.append(Div(values[i],pivot))
else:
for j in range(0,4):
newArray[i].append(Subt(array[i][j],Mult(array[i][pivotCol],dividedRow[j])))
newValues.append(Subt(values[i],Mult(array[i][pivotCol],Div(values[pivotRow],pivot))))
newBasicVars = []
for var in basicVars:
if var == basicVars[pivotRow]:
newBasicVars.append(columns[pivotCol])
else:
newBasicVars.append(var)
Iterate(newArray,newBasicVars,newValues)
else:
print("Optimal solution found")
pLabel["text"] = basicVars[2] + " = " + GetString(values[2])
var1["text"] = basicVars[0] + " = " + GetString(values[0])
var2["text"] = basicVars[1] + " = " + GetString(values[1])
if "x" in basicVars:
var1["text"] = "x = " + GetString(values[basicVars.index("x")])
else:
var1["text"] = "x = 0"
if "y" in basicVars:
var2["text"] = "y = " + GetString(values[basicVars.index("y")])
else:
var2["text"] = "y = 0"
def StringToRatio(string): # string into reduced fraction as tuple
if "/" in string: # string already "floatable" - only one /
index = string.index("/")
numerator = int(string[0:index])
denominator = int(string[index + 1:len(string)])
return (numerator, denominator)
else:
return Decimal(string).as_integer_ratio()
def IsStrFloatable(string): # checks if string can be written as fraction
if "/" in string:
index = string.index("/") # first instance
numerator = string[0:index]
denominator = string[index + 1:len(string)]
# If >1 / in string we get ValueError
try:
int(numerator)
try:
int(denominator)
return True
except ValueError:
return False
except ValueError:
return False
else:
try:
float(string)
return True
except ValueError:
return False
def Simplify(pair): # simplifies tuple
numerator = pair[0]
denominator = pair[1]
if denominator < 0:
numerator = -1 * numerator
denominator = -1 * denominator
GCD = gcd(numerator, denominator)
numerator = int(numerator / GCD)
denominator = int(denominator / GCD)
return (numerator, denominator)
def Div(V,W):
return Simplify((V[0]*W[1], V[1]*W[0]))
def Mult(V,W):
return Simplify((V[0]*W[0],V[1]*W[1]))
def Subt(V,W):
numerator = V[0]*W[1] - W[0]*V[1]
denominator = V[1]*W[1]
return Simplify((numerator, denominator))
def Sign(fraction):
fraction = Simplify(fraction)
if fraction[0] == 0:
return "zero"
if fraction[0] > 0:
return "positive"
if fraction[0] < 0:
return "negative"
def GetString(pair): # tuple --> fraction string
numerator = pair[0]
denominator = pair[1]
if denominator == 1:
return str(numerator)
else:
return str(numerator) + "/" + str(denominator)
def Validate():
a = EntryPX.get()
b = EntryPY.get()
c = entryx1.get()
d = entryy1.get()
e = entryval1.get()
f = entryx2.get()
g = entryy2.get()
h = entryval2.get()
strings = [a,b,c,d,e,f,g,h]
valid = True
for item in strings:
if not IsStrFloatable(item):
valid = False
if valid:
a = StringToRatio(a)
b = StringToRatio(b)
c = StringToRatio(c)
d = StringToRatio(d)
e = StringToRatio(e)
f = StringToRatio(f)
g = StringToRatio(g)
h = StringToRatio(h)
strings = [a,b,c,d,e,f,g,h]
if valid:
print(strings)
Iterate(InitialTable(a,b,c,d,e,f,g,h)[0],InitialTable(a,b,c,d,e,f,g,h)[1],[e,h,(0,1)])
# GUI Creation
Fconstraints1 = Frame(window)
constraints1 = Label(Fconstraints1, text = "Enter positive entries.")
constraints1.grid(row=0,column=0)
Fconstraints2 = Frame(window)
constraints2 = Label(Fconstraints2, text = "Input as int/fraction/decimal.")
constraints2.grid(row=0,column=0)
inputFrame = Frame(window)
FMaximise = Frame(inputFrame)
maximise = Label(FMaximise, text = "Maximise", width = 20)
maximise.grid(row=0,column=0)
FMaximise.grid(row=0,column=0)
FGiven = Frame(inputFrame)
given = Label(FGiven, text = "given", width = 20)
given.grid(row=0,column=0)
FGiven.grid(row=1,column=0)
gap1 = Frame(window)
space = Label(gap1, text = " ")
space.grid(row=0,column=0)
gap1.grid(row=2,column=0)
PRow = Frame(inputFrame)
labelP = Label(PRow, text = "P =")
labelP.grid(row=0,column=0)
EntryPX = Entry(PRow, width = 4)
EntryPX.grid(row=0,column=1)
labelPX = Label(PRow, text = "x +")
labelPX.grid(row=0,column=2)
EntryPY = Entry(PRow, width = 4)
EntryPY.grid(row=0,column=3)
labelPY = Label(PRow, text = "y")
labelPY.grid(row=0,column=4)
PRow.grid(row=0,column=1)
Row1 = Frame(inputFrame)
entryx1 = Entry(Row1, width = 4)
entryx1.grid(row=0,column=0)
labelx1 = Label(Row1, text = "x +")
labelx1.grid(row=0,column=1)
entryy1 = Entry(Row1, width = 4)
entryy1.grid(row=0,column=2)
labely1 = Label(Row1, text = "y <=")
labely1.grid(row=0,column=3)
entryval1 = Entry(Row1, width = 4)
entryval1.grid(row=0,column=4)
Row1.grid(row=1,column=1)
Row2 = Frame(inputFrame)
entryx2 = Entry(Row2, width = 4)
entryx2.grid(row=0,column=0)
labelx2 = Label(Row2, text = "x +")
labelx2.grid(row=0,column=1)
entryy2 = Entry(Row2, width = 4)
entryy2.grid(row=0,column=2)
labely2 = Label(Row2, text = "y <=")
labely2.grid(row=0,column=3)
entryval2 = Entry(Row2, width = 4)
entryval2.grid(row=0,column=4)
Row2.grid(row=2,column=1)
nonnegative = Frame(inputFrame)
label = Label(nonnegative, text = "x, y >= 0")
label.grid(row=0,column=0)
nonnegative.grid(row=3,column=1)
frameButton = Frame(inputFrame)
button = Button(
master = frameButton,
text = "Execute",
command = Validate,
bg = "#1E7800",
fg = "#FFFFFF"
)
button.grid(row=0,column=0)
frameButton.grid(row=4, column=1)
pFrame = Frame(inputFrame)
pLabel = Label(pFrame, text = "")
pLabel.grid(row=0,column=0)
pFrame.grid(row=5, column=1)
var1Frame = Frame(inputFrame)
var1 = Label(var1Frame, text = "")
var1.grid(row=0,column=0)
var2Frame = Frame(inputFrame)
var2 = Label(var2Frame, text = "")
var2.grid(row=0,column=0)
var1Frame.grid(row=5, column=0)
var2Frame.grid(row=6, column=0)
Fconstraints1.grid(row=0,column=0)
Fconstraints2.grid(row=1,column=0)
inputFrame.grid(row=3,column=0)
window.mainloop()
|
[
"noreply@github.com"
] |
RealConjugate.noreply@github.com
|
d86c18ba4ec7afad2c900121e66fde4e22ba5e2c
|
ec47c9f3fa378151a23c80525ceb1d0c02122d95
|
/engine/urls.py
|
7c7812c878312447bde3822619c98bbbf2d698d8
|
[] |
no_license
|
harshdalal442/udaaan
|
a45a5f1a397f2c0a9865c2b29e18720f47be9dfb
|
ad407fe2ce4b3985db81d88277df36dca059adce
|
refs/heads/master
| 2022-12-11T11:04:39.984886
| 2018-10-14T16:25:09
| 2018-10-14T16:25:09
| 152,994,274
| 0
| 0
| null | 2022-12-08T01:02:34
| 2018-10-14T16:24:40
|
Python
|
UTF-8
|
Python
| false
| false
| 290
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^screens/$', views.RegisterScreen),
url(r'^screens/(?P<theatre_name>[a-zA-Z0-9]+)/reserve/$', views.RegisterSeat),
url(r'^screens/(?P<theatre_name>[a-zA-Z0-9]+)/seats', views.RetreiveSeatInfo),
]
|
[
"harsh.d.btechi14@ahduni.edu.in"
] |
harsh.d.btechi14@ahduni.edu.in
|
22b3ecfa2b4ceef4967d1ba577da57db61f5e99a
|
69b4d9b7edb6241cc08c29b4c9b84ef7fe024b24
|
/MusicMaze/model/graph/Vertice.py
|
9660916446a22b611f873ad0ffab68d5b9272ced
|
[
"MIT"
] |
permissive
|
CookieComputing/MusicMaze
|
7e4c632c7d4dbb8686e17fc69cccadf832d28ab8
|
5a60fb23694583cfbfde3d19a0aec5292c5aa9cc
|
refs/heads/master
| 2020-03-31T18:20:24.738412
| 2019-04-23T05:59:15
| 2019-04-23T05:59:15
| 152,455,032
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,632
|
py
|
class Vertice:
"""This class represents an implementation of a vertice. A vertice in the
graph will be interpreted as a "position" that a user can walk onto in the
maze, and will consequently also be used as the cells of a maze."""
def __init__(self, name):
"""Initialize a vertice. A vertice should given a unique identifier
to promote the identification of nodes.
Args:
name(str): the unique identifier of a vertice. This name should be
used to promote a uid for a node and proper care with vertices
should enforce that no two nodes have the same name.
Raises:
ValueError: If the name is empty or None
"""
if not name:
raise ValueError("Given null name in vertice")
self.__name = name
self.__neighbors = dict()
def add_neighbor(self, neighbor):
"""Assign the given vertice to become this node's neighbor. This is
assignment is a directed assignment, however, so only this vertice will
be able to recognize the given neighbor as a neighbor after a single
operation of this function.
Args:
neighbor(Vertice): the vertice to become a neighbor to
Raises:
ValueError: If given a null neighbor or attempting to add itself
as a potential neighbor, or if adding a duplicate neighbor
"""
if not neighbor:
raise ValueError("Given invalid neighbor")
if neighbor == self:
raise ValueError("Vertice cannot become it's own neighbor")
if neighbor.name() in self.__neighbors:
raise ValueError("Attempting to add duplicate neighbor")
self.__neighbors[neighbor.name()] = neighbor
def remove_neighbor(self, neighbor):
"""Removes a given neighbor from this vertice's neighbors.
Args:
neighbor(str): the neighbor to remove
Raises:
ValueError(str): If the neighbor does not exist
"""
if neighbor.name() not in self.__neighbors:
raise ValueError("Neighbor does not exist to remove")
if neighbor != self.__neighbors[neighbor.name()]:
raise ValueError("Given vertice is not the actual vertice neighbor")
del self.__neighbors[neighbor.name()]
def is_neighbor(self, potential_neighbor):
"""Determines if the given vertice is a potential neighbor of this
vertice. This neighbor checking function will only determine that
the given vertice is a neighbor from this vertice's perspective, and
not from the neighbor's perspective.
Args:
potential_neighbor(Vertice): the expected neighbor
Returns:
boolean: If the vertice is indeed a neighbor of this vertice.
"""
if potential_neighbor.name() not in self.__neighbors:
return False
return self.__neighbors[potential_neighbor.name()] \
== potential_neighbor
def neighbors(self):
"""Returns a list of this vertice's neighbors. Since this is an
internal implementation detail, we make the choice to allow the
vertice to return actual references to other vertices. There is
no guarantee of the order of the neighbors.
Returns:
neighbors(list(Vertice)): This vertice's neighbors
"""
return list(self.__neighbors.values())
def name(self):
"""Return the unique name of this vertice.
Returns:
str: the name of this vertice"""
return self.__name
|
[
"kevinxu1@yahoo.com"
] |
kevinxu1@yahoo.com
|
ea385301144e17aa355e09063a6bd7bb66103bb1
|
d7faf47825b6f8e5abf9a9587f1e7248c0eed1e2
|
/python/ray/tests/test_asyncio_cluster.py
|
bea440bdf4b27bb1b625ec135c2bbc2bd5dd6d5b
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
ggdupont/ray
|
7d7c7f39a8f99a09199fab60897da9e48b8e2645
|
15391026c19f1cbbb8d412e46b01f7998e42f2b9
|
refs/heads/master
| 2023-03-12T06:30:11.428319
| 2021-12-07T05:34:27
| 2021-12-07T05:34:27
| 165,058,028
| 0
| 0
|
Apache-2.0
| 2023-03-04T08:56:50
| 2019-01-10T12:41:09
|
Python
|
UTF-8
|
Python
| false
| false
| 815
|
py
|
# coding: utf-8
import asyncio
import sys
import pytest
import numpy as np
import ray
from ray.cluster_utils import Cluster, cluster_not_supported
@pytest.mark.xfail(cluster_not_supported, reason="cluster not supported")
@pytest.mark.asyncio
async def test_asyncio_cluster_wait():
cluster = Cluster()
head_node = cluster.add_node()
cluster.add_node(resources={"OTHER_NODE": 100})
ray.init(address=head_node.address)
@ray.remote(num_cpus=0, resources={"OTHER_NODE": 1})
def get_array():
return np.random.random((192, 1080, 3)).astype(np.uint8) # ~ 0.5MB
object_ref = get_array.remote()
await asyncio.wait_for(object_ref, timeout=10)
ray.shutdown()
cluster.shutdown()
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
[
"noreply@github.com"
] |
ggdupont.noreply@github.com
|
5f8fd14fe0082158a135dcb0d3498417336ebf42
|
904e00bb34a124e44d34ac1da61d4f6caca511c0
|
/server/data/models.py
|
e9210f20a4f0f1c7c417b36d4dc8fb62f9e86c42
|
[] |
no_license
|
gunwooterry/inclusion-kaist
|
2dcb64da394955ad2e9bb9f89d4e155263ba5d8a
|
cecc01d0afcf58d6208cb0832b190578f536adda
|
refs/heads/master
| 2023-02-08T15:45:24.322827
| 2020-10-15T03:35:09
| 2020-10-15T03:35:09
| 115,467,626
| 1
| 0
| null | 2023-01-25T08:53:48
| 2017-12-27T01:19:13
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,150
|
py
|
from django.db import models
class Organization(models.Model):
name_ko = models.CharField(max_length=100)
name_en = models.CharField(max_length=100)
description_ko = models.CharField(max_length=500, blank=True)
description_en = models.CharField(max_length=500, blank=True)
location_ko = models.CharField(max_length=100, blank=True)
location_en = models.CharField(max_length=100, blank=True)
phone = models.CharField(max_length=15, blank=True)
email = models.EmailField(blank=True)
link = models.URLField(blank=True)
def __str__(self):
return self.name_ko
class Person(models.Model):
name_ko = models.CharField(max_length=50)
name_en = models.CharField(max_length=50)
department_ko = models.CharField(max_length=100, blank=True)
department_en = models.CharField(max_length=100, blank=True)
position_ko = models.CharField(max_length=100, blank=True)
position_en = models.CharField(max_length=100, blank=True)
image = models.ImageField(blank=True, null=True, upload_to='profiles')
def __str__(self):
return '{} ({})'.format(self.name_ko, self.department_ko)
|
[
"gunwooterry@gmail.com"
] |
gunwooterry@gmail.com
|
1fcb94080d373a4e5d005f3584cbd8aee502b1f7
|
3969ae549b1d6ab8871ceb86e3a834bdf03a55cb
|
/Day 14/puzzle_2.py
|
78167c96269891b4bb7f2543d5ae98bc164aec98
|
[] |
no_license
|
HealYouDown/advent-of-code-2015
|
7737603177e5ae1b74a98e58fb0c641779c9373e
|
53c92dc090c40ff82342a611aee82826dbf86b0a
|
refs/heads/master
| 2022-11-16T19:58:33.786271
| 2020-07-06T10:40:30
| 2020-07-06T10:40:30
| 277,513,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,725
|
py
|
import re
class Reindeer:
def __init__(self, line: str):
match = re.match(r"(.*) can fly (\d*) km/s for (\d*) seconds, but then must rest for (\d*) seconds.",
line)
self.name = match.group(1)
self.speed = int(match.group(2))
self.fly_time = int(match.group(3))
self.rest_time = int(match.group(4))
self.fly_counter = 0
self.rest_counter = 0
self.traveled = 0
self.flying = True
self.resting = False
self.points = 0
def __repr__(self) -> str:
return f"<Reindeer {self.name} traveled={self.traveled} points={self.points}>"
def __lt__(self, other: "Reindeer"):
return self.traveled > other.traveled
def action(self):
if self.flying:
self.traveled += self.speed
self.fly_counter += 1
elif self.resting:
self.rest_counter += 1
if self.fly_counter == self.fly_time:
self.fly_counter = 0
self.resting = True
self.flying = False
elif self.rest_counter == self.rest_time:
self.rest_counter = 0
self.resting = False
self.flying = True
if __name__ == "__main__":
with open("Day 14/input.txt", "r") as fp:
deers = [Reindeer(line) for line in fp.readlines()]
seconds = 2503
for _ in range(seconds):
for deer in deers:
deer.action()
deers.sort()
for deer in filter(lambda deer: deer.points == deers[0].points, deers):
deer.points += 1
deers.sort(key=lambda d: d.points, reverse=True)
print(deers[0].points + 1) # Idk why I need to add 1. It works :)
|
[
"jeremyregitz@gmail.com"
] |
jeremyregitz@gmail.com
|
a6a921b3c8898eaf78ef99de628b1da382cbb447
|
70e9fc1f810fac6bb90d840b30d66f23119e7633
|
/predict.py
|
b9d4425bf66af423d74137ff19efb81414c1b29e
|
[] |
no_license
|
RimaSadh/flowers_classifier
|
1b671caa12c87317909945e61a61607a960ccee1
|
9de65872dda101eb07e3b81bd4cd2c16e475f3c2
|
refs/heads/master
| 2022-12-08T02:38:15.843521
| 2020-08-27T12:37:19
| 2020-08-27T12:37:19
| 290,755,780
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,403
|
py
|
import argparse
import tensorflow as tf
import tensorflow_hub as hub
from PIL import Image
import numpy as np
import json
import warnings
warnings.filterwarnings('ignore')
# Function to define parser and its arguments
def define_arguments():
# Create parser object
parser = argparse.ArgumentParser(description = 'Flowers Image Classifier')
# For each argument we provide(Name, Default Value, Data Type, and Help Message)
# Image variable to get the image path used in the prediction
parser.add_argument('--image',
default = './test_images/wild_pansy.jpg',
type = str,
help = 'Image path')
# Model variable to get the model path used for the prediction
parser.add_argument('--model',
default = './flowers_classifier.h5',
type = str,
help = 'Model file path')
# K variable represents the top K most likely flowers
parser.add_argument('--top_k',
default = 5 ,
type = int,
help = 'The top K most likely classes')
# Variable to get the Json file that contains the labels
parser.add_argument('--category_names',
default = './label_map.json',
help = 'Json file used to map the labels with category names')
return parser
# Function that handles the processing of flower image before being injected to the prediction model
# ( Resizing and Normalizing )
def process_image(image):
# Convert (NumPy array) image into a TensorFlow Tensor
processed_image = tf.convert_to_tensor(image)
# Resize the image
processed_image = tf.image.resize(image, (224, 224))
# Normalize the pixel values
processed_image /= 255
# Return the image as NumPy array
return processed_image.numpy()
# Function handles the prediction of labels by taking as an inputs (image path, loaded model, top K as an integer)
def predict(image_path, model, top_k):
# First: Process the Image
#1. Load and import the image
image = Image.open(image_path)
#2. Convert it to numpy array
image = np.asarray(image)
#3. Resize and normalize the image
image = process_image(image)
#4. Add Extra dimension represents the batch size, to make the image in the needed dimensions for the model
image = np.expand_dims(image, axis = 0)
# Second: Predict tha labels using the loaded model
predicted_probabilities = model.predict(image)
# Third: Interpret the results returned by the model
# Finds the k largest entries in the probabilities vector and outputs their values and crossoponding labels
propabilities, classes = tf.nn.top_k(predicted_probabilities, k = top_k)
# Converts both the probabilities and classes to numpy list of 1-D
propabilities = propabilities.numpy().tolist()[0]
classes = classes.numpy().tolist()[0]
# Forth: Map the classes with the labels
labels = []
for l in classes:
labels.append(class_names[str(l+1)]) # (+1) for the difference in the labels names
return propabilities, labels
if __name__=="__main__":
parser = define_arguments()
arg_parser = parser.parse_args()
# Save user inputs to variables
image_path = arg_parser.image
model_path = arg_parser.model
top_k = arg_parser.top_k
category_names = arg_parser.category_names
# Load and map the labels to the flowers category
with open(category_names, 'r') as f:
class_names = json.load(f)
# Load the prediction model using TensorFlow
model = tf.keras.models.load_model(model_path, custom_objects = {'KerasLayer':hub.KerasLayer})
print("****Start Pridiction****\n")
# Predict by passing the image path + loaded model + top k as integer
probs, labels = predict(image_path, model, top_k)
# Print the result of prediction
print("Top {} prediction flower names and it's associated probability for the image in path: {}\n".format(top_k, image_path))
print('\t Flower Name | Probability% \n')
for p, l in zip(probs, labels):
p = float(format(p, '.4f'))
print('\t {} | {}%'.format(l, p*100))
print("\n****End Pridiction****")
|
[
"noreply@github.com"
] |
RimaSadh.noreply@github.com
|
dd57282a6f43709922c5f7cbe9ce63f81e77bcd0
|
414db33a43c50a500741784eea627ba98bb63e27
|
/0x0A-python-inheritance/9-rectangle.py
|
4092a9005ebb2873185b2c9b324c123b1c9c6344
|
[] |
no_license
|
rayraib/holbertonschool-higher_level_programming
|
2308ea02bd7f97eae3643e3ce0a6489cc1ad9ff5
|
6b4196eb890ffcb91e541431da9f5f57c5b85d4e
|
refs/heads/master
| 2021-09-14T09:12:26.664653
| 2018-05-11T03:23:12
| 2018-05-11T03:23:12
| 113,070,818
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 746
|
py
|
#!/usr/bin/python3
BaseGeometry = __import__('7-base_geometry').BaseGeometry
'''
subclass of BaseGeometry class
'''
class Rectangle(BaseGeometry):
''' representation of a rectangle'''
def __init__(self, width, height):
'''initialize the object attributes'''
BaseGeometry.integer_validator(self, "height", height)
self.__height = height
BaseGeometry.integer_validator(self, "width", width)
self.__width = width
def area(self):
''' calculate area of the rectangle'''
return (self.__height * self.__width)
def __str__(self):
'''return informal string represention of the object itself'''
return ("[Rectangle] {}/{}".format(self.__width, self.__height))
|
[
"binitarai11@gmail.com"
] |
binitarai11@gmail.com
|
89ddaad98abbd1f07088c068c03103c8d88bb637
|
b4507e2b9f424e7ceea0b1ef189d698196926847
|
/core/operators.py
|
3d1095bd14943401579a29159fde564b13a7b202
|
[] |
no_license
|
sadernalwis/Scratchpad
|
9e57b61fcee8b20d348328853490a69bc5bed395
|
0387993a9853dbaf9122d373c8ab5d412ba4f739
|
refs/heads/master
| 2023-02-13T12:39:29.136998
| 2021-01-13T00:36:01
| 2021-01-13T00:36:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
import bpy
from bpy.types import Operator
from libs.registry import autoregister
@autoregister
class SCRATCHPAD_OT_reload_sources(Operator):
"""Force reload of all shader source files"""
bl_idname = 'scratchpad.reload_sources'
bl_label = 'Reload Shader Sources'
def invoke(self, context, event):
for mat in bpy.data.materials:
mat.scratchpad.force_reload = True
return {'FINISHED'}
|
[
"cmcmanning@gmail.com"
] |
cmcmanning@gmail.com
|
bd82d3e98d1a67cc87a28e599370a8b6475b91ae
|
3467fe90c6c49b4ac86785d1da19d7183b2ac0f5
|
/6002x/findCombination.py
|
85d683714d531ae692f4b2fa142f7782b706f04d
|
[
"MIT"
] |
permissive
|
CarlosEduardoAS/MITx
|
277da453638da672c9946513bfb7a86e7446072b
|
532695d69c77581b6df80c145283b349b75e4973
|
refs/heads/main
| 2023-05-02T13:50:15.283211
| 2021-05-25T20:02:48
| 2021-05-25T20:02:48
| 351,555,645
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,219
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 25 16:10:53 2021
@author: caear
"""
import numpy
import itertools
def find_combination(choices, total):
"""
choices: a non-empty list of ints
total: a positive int
Returns result, a numpy.array of length len(choices)
such that
* each element of result is 0 or 1
* sum(result*choices) == total
* sum(result) is as small as possible
In case of ties, returns any result that works.
If there is no result that gives the exact total,
pick the one that gives sum(result*choices) closest
to total without going over.
"""
power_set = []
for i in itertools.product([1,0], repeat = len(choices)):
power_set.append(numpy.array(i))
filter_set_eq = []
filter_set_less = []
for j in power_set:
if sum(j*choices) == total:
filter_set_eq.append(j)
elif sum(j*choices) < total:
filter_set_less.append(j)
if len(filter_set_eq) > 0:
minidx = min(enumerate(filter_set_eq), key=lambda x:sum(x[1]))[1]
return minidx
else:
minidx = max(enumerate(filter_set_less), key = lambda x:sum(x[1]))[1]
return minidx
|
[
"79329559+CarlosEduardoAS@users.noreply.github.com"
] |
79329559+CarlosEduardoAS@users.noreply.github.com
|
b7bda430b886d0141d2525d46d6a57a4df588c92
|
ef42f9ebfc427882bc30f8b29692b8bf1195fc96
|
/Codigo/homePage/migrations/0010_infolibro_preciolibro.py
|
c2bde5503c358d8d35efd6d538f7cae8c7c5dc97
|
[] |
no_license
|
IngSw201910/ZonaCultura
|
9671a0fdab7ba2e80f53a88c5d520ddee12894cc
|
9bf543c0082a6c361f7980ae35977a7183952873
|
refs/heads/master
| 2020-04-19T11:41:19.564671
| 2019-05-23T13:54:31
| 2019-05-23T13:54:31
| 168,173,720
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
# Generated by Django 2.2 on 2019-04-14 04:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('homePage', '0009_auto_20190413_2135'),
]
operations = [
migrations.AddField(
model_name='infolibro',
name='PrecioLibro',
field=models.IntegerField(default=0),
),
]
|
[
"dpbeltran97@gmail.com"
] |
dpbeltran97@gmail.com
|
902b09ed2ee809a19293ec13b3fccd3cf58d2dbf
|
6ffd23679939f59f0a09c9507a126ba056b239d7
|
/imperative/python/megengine/core/_trace_option.py
|
638c142a12249cc9b7381b3c378d5b01f5b5ff9e
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
MegEngine/MegEngine
|
74c1c9b6022c858962caf7f27e6f65220739999f
|
66b79160d35b2710c00befede0c3fd729109e474
|
refs/heads/master
| 2023-08-23T20:01:32.476848
| 2023-08-01T07:12:01
| 2023-08-11T06:04:12
| 248,175,118
| 5,697
| 585
|
Apache-2.0
| 2023-07-19T05:11:07
| 2020-03-18T08:21:58
|
C++
|
UTF-8
|
Python
| false
| false
| 862
|
py
|
# -*- coding: utf-8 -*-
import os
from ._imperative_rt.core2 import set_cpp_use_symbolic_shape
_use_symbolic_shape = False
if os.environ.get("MEGENGINE_USE_SYMBOLIC_SHAPE"):
_use_symbolic_shape = True
_use_xla_backend = False
def use_symbolic_shape() -> bool:
r"""Returns whether tensor.shape returns a tensor instead of a tuple"""
return _use_symbolic_shape
def set_symbolic_shape(option: bool):
r"""Sets whether tensor.shape returns a tensor instead of a tuple"""
global _use_symbolic_shape
_org = _use_symbolic_shape
_use_symbolic_shape = option
return _org
def use_xla_backend() -> bool:
return _use_xla_backend
def set_use_xla_backend(option: bool) -> bool:
global _use_xla_backend
_org = _use_xla_backend
_use_xla_backend = option
return _org
set_cpp_use_symbolic_shape(use_symbolic_shape)
|
[
"megengine@megvii.com"
] |
megengine@megvii.com
|
e1b9f2842dd900818aff317fdcf377f37beb99ab
|
62e7193227d4929f6fbc2f95542bfd7ce42f7e76
|
/WindowsWrapper/create_experiment_dir.py
|
8726e5e4a15f663aec08b00e7fc2c69da6e16b95
|
[] |
no_license
|
tomjmanuel/windows_ConvnetWrapper
|
892ce9759ad90f7614bf8a81f52e284becc69a69
|
5ec9304d6db8b859e1a6d8a860a7f709706ff0c9
|
refs/heads/master
| 2020-07-04T05:51:28.610011
| 2019-08-13T21:22:54
| 2019-08-13T21:22:54
| 202,177,416
| 0
| 0
| null | 2019-11-02T07:26:01
| 2019-08-13T15:51:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,453
|
py
|
#!/usr/bin/env python
#########################################################
#
# Creates an experiment directory for cell detection pipeline
#
# Author: Alex Riordan
#
# Description: creates a user-specified directory with
# training/test/validation subdirectories and
# an autopopulated main_config.cfg file
#
# Usage: dir_name is user-specified directory, should be an absolute path
#
#
################################################################
import os, shutil, ConfigParser, sys
def create_experiment_directory(dir_name):
if not os.path.exists(dir_name):
os.makedirs(dir_name)
else:
raise AssertionError('Directory already exists.\n Choose a different name for your new experiment directory. ', dir_name)
os.makedirs(dir_name + '/labeled/test')
os.makedirs(dir_name + '/labeled/training')
os.makedirs(dir_name + '/labeled/validation')
def copy_main_config(dir_name):
src_path = os.path.dirname(os.path.abspath(__file__))
shutil.copy(src_path + '/main_config.cfg', dir_name)
config_path = dir_name + '/main_config.cfg'
cfg_parser = ConfigParser.SafeConfigParser()
cfg_parser.readfp(open(config_path, 'r'))
cfg_parser.set('general','data_dir', dir_name + '/labeled')
#repo_path = src_path.split('ConvnetCellDetection')[0] + 'ConvnetCellDetection/celldetection_znn'
repo_path = os.path.dirname(src_path) + '/celldetection_znn'
cfg_parser.set('network','net_arch_fpath', repo_path + '/2plus1d.znn')
cfg_parser.set('training','training_input_dir', dir_name + '/labeled_preprocessed')
cfg_parser.set('training','training_output_dir', dir_name + '/labeled_training_output')
cfg_parser.set('training','training_net_prefix', dir_name + '/labeled_training_output/2plus1d')
cfg_parser.set('forward','forward_net', dir_name + '/labeled_training_output/2plus1d_current.h5')
with open(config_path, 'wb') as configfile:
cfg_parser.write(configfile)
def main(dir_name = 'new_expt'):
dir_name = '../data/' + dir_name
create_experiment_directory(dir_name)
copy_main_config(dir_name)
print 'new experiment directory', dir_name, 'successfully created.'
if __name__ == "__main__":
if len(sys.argv) > 1:
dir_name = sys.argv[1]
main(dir_name)
else:
main()
|
[
"noreply@github.com"
] |
tomjmanuel.noreply@github.com
|
adf942ef17cc289e1c3cf16a609ecac205d03692
|
fc314838b18c14a00310f0059d5358c7c4afabd6
|
/special/models.py
|
6796cb77ef4370af265ada4e6ba8966f501a7cd4
|
[
"MIT"
] |
permissive
|
opendream/asip
|
5cb4b997fab2438193ae7490c159efced6dc3d91
|
20583aca6393102d425401d55ea32ac6b78be048
|
refs/heads/master
| 2022-11-28T23:28:18.405604
| 2020-03-10T04:56:23
| 2020-03-10T04:56:23
| 190,504,979
| 1
| 1
|
MIT
| 2022-11-22T01:10:46
| 2019-06-06T03:06:03
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,126
|
py
|
from django.db import models
# Create your models here.
from common.constants import STATUS_PUBLISHED, STATUS_CHOICES
from common.models import AbstractPermalink, CommonTrashModel
import files_widget
class Special(CommonTrashModel, AbstractPermalink):
title = models.CharField(max_length=512)
image = files_widget.ImageField(verbose_name='Banner Image', null=True, blank=True)
status = models.IntegerField(choices=STATUS_CHOICES, default=STATUS_PUBLISHED)
created = models.DateTimeField(auto_now_add=True, null=True, blank=True)
changed = models.DateTimeField(auto_now=True, null=True, blank=True)
def __unicode__(self):
return self.permalink
def get_absolute_url(self):
return '/%s/' % self.permalink
class Page(CommonTrashModel, AbstractPermalink):
special = models.ForeignKey(Special, related_name='pages', null=True, blank=True)
status = models.IntegerField(choices=STATUS_CHOICES, default=STATUS_PUBLISHED)
created = models.DateTimeField(auto_now_add=True, null=True, blank=True)
changed = models.DateTimeField(auto_now=True, null=True, blank=True)
|
[
"server@set.com"
] |
server@set.com
|
c83487e3debc10e945524c2ff7f63c1930983a75
|
537b1ccbd762bdad55d2cd49e6b24bb677a73dac
|
/reservations/migrations/0008_auto_20200831_0927.py
|
633be69d4350db3fe06f58668b412e8eb5dad19b
|
[] |
no_license
|
MahidharBandaru/webloom
|
d0ace50a01811a99c6e87380242d43cb1a81be0f
|
be45f5bf8c4e90417ab605b1d994d296e36b4dfb
|
refs/heads/master
| 2022-12-10T04:38:31.035730
| 2020-09-01T14:34:08
| 2020-09-01T14:34:08
| 291,698,764
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 795
|
py
|
# Generated by Django 3.1 on 2020-08-31 09:27
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('reservations', '0007_auto_20200831_0925'),
]
operations = [
migrations.RemoveField(
model_name='reservation',
name='datetime',
),
migrations.AddField(
model_name='reservation',
name='date',
field=models.DateField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='reservation',
name='time',
field=models.TimeField(default=django.utils.timezone.now),
preserve_default=False,
),
]
|
[
"mahi.mahidhar2@gmail.com"
] |
mahi.mahidhar2@gmail.com
|
933d859ca1c6bf077fcdc57be83a050da83f1265
|
a5dd8a280c96ab747215475e740cf49300bc13ab
|
/解析参数方法.py
|
fc4d549e9331d7d456aff38c8760b1819ae39061
|
[] |
no_license
|
leiqin01/python-learning
|
93c00e14b2f6d59161e7b02ef2f69554393646c0
|
13c56a007530c375fd773f167edd2d677c4fd02b
|
refs/heads/main
| 2023-03-23T11:34:26.875733
| 2021-03-13T16:56:10
| 2021-03-13T16:56:10
| 347,425,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 127
|
py
|
import sys
sys.float_info
sys.float_info.max
#Thonny的shell似乎运行不了这几行代码,直接使用IDLE可以运行
|
[
"noreply@github.com"
] |
leiqin01.noreply@github.com
|
4fc93ceb92b53151fe738828150e99657579c265
|
cf5d2b3cce51ae4ada792720164f92edf13db3ea
|
/perchess/pieces/rook.py
|
270a9ecc6d307f301b53d0cc95d2956def6c291a
|
[] |
no_license
|
rcisterna/perchess-py
|
95d6f49d5e884f1433d4572f882701765e00a742
|
fbd6650df6806345deb8bd801c3886e2f87cc54b
|
refs/heads/master
| 2022-11-14T04:43:38.731098
| 2020-07-05T02:36:21
| 2020-07-05T02:36:21
| 275,480,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
from perchess.pieces import Piece, Colors, Movement
class Rook(Piece):
"""Torre."""
def __init__(self, color: Colors):
"""
:param color: Color de jugador.
"""
movements = []
for travel in range(1, 8):
movements.extend([Movement(travel, 0), Movement(-travel, 0), Movement(0, travel), Movement(0, -travel)])
Piece.__init__(self, "R", color, movements)
|
[
"r.cisternasantos@gmail.com"
] |
r.cisternasantos@gmail.com
|
905cb8c5f6d0197487ae82ee1d0f00475fb00efe
|
2153a7ecfa69772797e379ff5642d52072a69b7c
|
/library/test/test_compiler/sbs_code_tests/70_class.py
|
64ce08233157b32ce3204a302018c8a61bc3d153
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"Python-2.0"
] |
permissive
|
KCreate/skybison
|
a3789c84541f39dc6f72d4d3eb9783b9ed362934
|
d1740e08d8de85a0a56b650675717da67de171a0
|
refs/heads/trunk
| 2023-07-26T04:50:55.898224
| 2021-08-31T08:20:46
| 2021-09-02T19:25:08
| 402,908,053
| 1
| 0
|
NOASSERTION
| 2021-09-03T22:05:57
| 2021-09-03T22:05:57
| null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
class C:
pass
# EXPECTED:
[
LOAD_BUILD_CLASS(0),
LOAD_CONST(Code((1, 0))),
LOAD_CONST('C'),
MAKE_FUNCTION(0),
LOAD_CONST('C'),
CALL_FUNCTION(2),
STORE_NAME('C'),
...,
CODE_START('C'),
LOAD_NAME('__name__'),
STORE_NAME('__module__'),
LOAD_CONST('C'),
STORE_NAME('__qualname__'),
...,
]
|
[
"emacs@fb.com"
] |
emacs@fb.com
|
e2986c8aa61230f3eadcd07902d54ab807a0c186
|
d3c395fee26b8c7d468df3b6fc8d46109e3282ce
|
/SA_Math.py
|
d74bef19f20c3a6c74f893f461c1fd61e47c6edf
|
[] |
no_license
|
heshijiu/SA1.0
|
030c3810cc7fbca71c5fa1b89f18bd3b7b5d4163
|
18ac5f93b6dc597ebe5f5260fc7aa5313fe24896
|
refs/heads/master
| 2021-01-24T18:58:11.036256
| 2017-07-02T10:20:20
| 2017-07-02T10:20:20
| 86,162,487
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,008
|
py
|
#!/usr/bin/Python
# -*- coding: utf-8 -*-
import cmath
class Point:
x = 0
y = 0
def __init__(self, x, y):
self.x = x
self.y = y
def __add__(self, other):
self.x += other.x
self.y += other.y
class Vector:
x = 0
y = 0
def __init__(self, x, y):
self.x = x
self.y = y
def __init__(self, p1, p2):
self.x = p2.x - p1.x
self.y = p2.y - p1.y
def Dot(self, v):
dot =self.x * v.x + self.y * v.y
return dot.real
def Normal(self):
m = cmath.sqrt(self.x * self.x + self.y * self.y)
m = m.real
if abs(m ) < 0.00000001:
return 0
self.x /= m
self.y /= m
def Moudl(self):
m = cmath.sqrt(self.x * self.x + self.y * self.y)
return m.real
def Transform(array):
l = len(array)
s = len(array[0])
newArray = []
i = 0
while i < s:
j = 0
temp = []
while j < l:
temp.append(array[j][i])
j += 1
newArray.append(temp)
i += 1
return newArray
class Samples:
__array = None
__count = 0
__mean = 0
__variance = 0
def __init__(self, array = None):
self.__array = array
self.__count = len(array)
self.__Mean()
self.__Variance()
return
def __Mean(self):
if self.__array == None:
return
if self.__count == 0:
return
sum = 0
for item in self.__array:
sum += item
self.__mean = sum / self.__count
return
def __Variance(self):
if self.__count < 2:
return
if self.__array == None:
return
sum = 0
for item in self.__array:
sum += (item - self.__mean) * (item - self.__mean)
self.__variance = sum / (self.__count - 1)
return
def mean(self):
return self.__mean
def variance(self):
return self.__variance
|
[
"noreply@github.com"
] |
heshijiu.noreply@github.com
|
b40cdf18254250789a2505f91f27df20c101aac1
|
1ed9e03e550469ebcf0a6de56d8cc87150a2b861
|
/fedml_core/distributed/communication/__init__.py
|
2547ee9667fa186251ebac9ccb53e01c04a03164
|
[] |
no_license
|
StevenLOL/FedML
|
ba211140b9eb5c3acd18a4e3c8a8aa9dd8c175e3
|
fe4c6caeec680bb8f2fa8ae30c0717a4522e6f6c
|
refs/heads/master
| 2022-11-28T09:39:45.270989
| 2020-08-02T16:52:07
| 2020-08-02T16:52:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 74
|
py
|
from .com_manager import CommunicationManager
from .message import Message
|
[
"me@chaoyanghe.com"
] |
me@chaoyanghe.com
|
1c22d4445c54dc6358a0ba0086ed39af5a259b49
|
6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386
|
/google/cloud/osconfig/agentendpoint/v1/osconfig-agentendpoint-v1-py/google/cloud/osconfig/agentendpoint_v1/services/agent_endpoint_service/transports/base.py
|
1529267fb51e6ac71e8e7bfbcf4c92072cb41021
|
[
"Apache-2.0"
] |
permissive
|
oltoco/googleapis-gen
|
bf40cfad61b4217aca07068bd4922a86e3bbd2d5
|
00ca50bdde80906d6f62314ef4f7630b8cdb6e15
|
refs/heads/master
| 2023-07-17T22:11:47.848185
| 2021-08-29T20:39:47
| 2021-08-29T20:39:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,967
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.osconfig.agentendpoint_v1.types import agentendpoint
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-cloud-osconfig-agentendpoint',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class AgentEndpointServiceTransport(abc.ABC):
"""Abstract transport class for AgentEndpointService."""
AUTH_SCOPES = (
)
DEFAULT_HOST: str = 'osconfig.googleapis.com'
def __init__(
self, *,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive")
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id)
# If the credentials is service account credentials, then always try to use self signed JWT.
if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.receive_task_notification: gapic_v1.method.wrap_method(
self.receive_task_notification,
default_retry=retries.Retry(
initial=1.0,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.Aborted,
core_exceptions.Cancelled,
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=3600.0,
),
default_timeout=3600.0,
client_info=client_info,
),
self.start_next_task: gapic_v1.method.wrap_method(
self.start_next_task,
default_timeout=None,
client_info=client_info,
),
self.report_task_progress: gapic_v1.method.wrap_method(
self.report_task_progress,
default_timeout=None,
client_info=client_info,
),
self.report_task_complete: gapic_v1.method.wrap_method(
self.report_task_complete,
default_timeout=None,
client_info=client_info,
),
self.register_agent: gapic_v1.method.wrap_method(
self.register_agent,
default_timeout=None,
client_info=client_info,
),
self.report_inventory: gapic_v1.method.wrap_method(
self.report_inventory,
default_timeout=None,
client_info=client_info,
),
}
@property
def receive_task_notification(self) -> Callable[
[agentendpoint.ReceiveTaskNotificationRequest],
Union[
agentendpoint.ReceiveTaskNotificationResponse,
Awaitable[agentendpoint.ReceiveTaskNotificationResponse]
]]:
raise NotImplementedError()
@property
def start_next_task(self) -> Callable[
[agentendpoint.StartNextTaskRequest],
Union[
agentendpoint.StartNextTaskResponse,
Awaitable[agentendpoint.StartNextTaskResponse]
]]:
raise NotImplementedError()
@property
def report_task_progress(self) -> Callable[
[agentendpoint.ReportTaskProgressRequest],
Union[
agentendpoint.ReportTaskProgressResponse,
Awaitable[agentendpoint.ReportTaskProgressResponse]
]]:
raise NotImplementedError()
@property
def report_task_complete(self) -> Callable[
[agentendpoint.ReportTaskCompleteRequest],
Union[
agentendpoint.ReportTaskCompleteResponse,
Awaitable[agentendpoint.ReportTaskCompleteResponse]
]]:
raise NotImplementedError()
@property
def register_agent(self) -> Callable[
[agentendpoint.RegisterAgentRequest],
Union[
agentendpoint.RegisterAgentResponse,
Awaitable[agentendpoint.RegisterAgentResponse]
]]:
raise NotImplementedError()
@property
def report_inventory(self) -> Callable[
[agentendpoint.ReportInventoryRequest],
Union[
agentendpoint.ReportInventoryResponse,
Awaitable[agentendpoint.ReportInventoryResponse]
]]:
raise NotImplementedError()
__all__ = (
'AgentEndpointServiceTransport',
)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
a37961de642964b227bc2185002940eabeba5a67
|
58be0e0db1be823d62c021e273f4073b7aa5e312
|
/Index_strat/LSMarket_future288_0.36BHedit.py
|
5fccbc5ccc54404f48bc7a46b0b662ccf1407f82
|
[] |
no_license
|
briansone/ai_a3d
|
028f7e9fbabeafaff3c628712e703c1fbcc8cbdf
|
80f053a4df0a4987edd0471966fca266e1f39e46
|
refs/heads/master
| 2022-11-30T20:57:44.913667
| 2020-08-16T09:00:24
| 2020-08-16T09:00:24
| 286,200,545
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,726
|
py
|
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Dropout
from sklearn.model_selection import cross_val_score
from keras.wrappers.scikit_learn import KerasClassifier
from keras.models import load_model
# from pprint import pprint as pp
import numpy as np
from sklearn.preprocessing import minmax_scale
verbosity = 1
groupSize = 288
feature_names = [ 'Date', 'BHP', 'RIO', 'OSH', 'WPL' ]
subset = [ 'RIO', 'OSH', 'WPL' ]
voting_data = pd.read_csv( 'a3d_oil_equity.csv', names = feature_names )
voting_data.head()
voting_data.dropna( inplace = True ) # this removes incomplete rows... interesting
voting_data.describe()
def groupData( data, groupSize ):
mergedArray = []
for i in range( data.size ):
if i > groupSize and i < data.shape[0] - 1:
mergedList = []
for ii in range( groupSize ):
mergedList = np.concatenate( ( mergedList, data[ i - ii ] ), axis=None )
mergedArray.append( mergedList )
return mergedArray
def groupLabels( data, groupSize ):
mergedArray = []
for i in range( data.size ):
i = i - 1
if i > groupSize:
mergedList = data[i]
mergedArray.append( mergedList )
return mergedArray
def doTraining():
trainingData = voting_data[ subset ].values
trainingData = minmax_scale( trainingData )
trainingData = groupData( trainingData, groupSize = groupSize )
trainingData = np.array( trainingData )
# print( trainingData.shape )
trainingLabels = voting_data[ 'BHP' ].values
trainingLabels = minmax_scale( trainingLabels )
trainingLabels = groupLabels( trainingLabels, groupSize = groupSize )
trainingLabels = np.array( trainingLabels )
# print( trainingLabels.shape )
model = Sequential()
# 17 feature inputs (votes) going into a 32-unit layer
model.add( Dense( 576, input_dim = len( trainingData[0] ), kernel_initializer = 'normal', activation = 'relu' ) )
# Another hidden layer of 16 units
model.add( Dense( 192, kernel_initializer = 'normal', activation = 'relu' ) )
# Another hidden layer of 16 units
model.add( Dense( 64, kernel_initializer = 'normal', activation = 'relu' ) )
# Output layer with a binary classification ( Democrat or Republican )
model.add( Dense( 1 ) )
# Compile model
model.compile( loss = 'mse', optimizer = 'rmsprop', metrics = [ 'mae' ] )
# Train model
model.fit( trainingData, trainingLabels, epochs = 5000, batch_size = 50, verbose = verbosity )
# Grade the model
scores = model.evaluate( trainingData, trainingLabels, verbose = verbosity )
print( "%s: %.2f%%" % ( model.metrics_names[1], scores[1]*100 ) )
# Save the model
model.save( 'BHMarket_Model.h5' )
def doPrediction():
trainingData = voting_data[ subset ].values
originalValue = trainingData[0][0]
trainingData = minmax_scale( trainingData )
normalizedValue = trainingData[0][0]
multiple = originalValue / normalizedValue
print( originalValue )
print( normalizedValue )
print( multiple )
trainingData = groupData( trainingData, groupSize = groupSize )
inputData = trainingData[-2]
trainingLabels = voting_data[ 'Date' ].values
trainingLabels = groupLabels( trainingLabels, groupSize = groupSize )
date = trainingLabels[-1]
print( inputData.shape )
loaded_model = load_model( 'LSMarket_Model.h5' )
# evaluate loaded model on test data
loaded_model.compile( loss = 'mse', optimizer = 'rmsprop', metrics = [ 'mae' ] )
# Predict things...
print( inputData )
print( inputData.shape )
thegoods = loaded_model.predict( inputData.reshape( (1, 864) ), batch_size = None, verbose = verbosity, steps = None )
print ( date, thegoods * multiple )
doTraining()
# doPrediction()
|
[
"noreply@github.com"
] |
briansone.noreply@github.com
|
bd778f3a59e0229767373b451f37fe4d11718750
|
14fd8bd44393cfd196243be411039699bc0ee471
|
/marketplace/migrations/0008_auto_20181202_0032.py
|
a28a321e50734e0df60c7ec6a0d7a3421c58b81d
|
[] |
no_license
|
Icebreaker2018/Icebreaker
|
5248d1fedfdc5321a3fc0633fe19304e2b67a995
|
df1c0cd606bbb42be8a06ba330d0fd84248c1508
|
refs/heads/master
| 2022-12-16T16:24:00.520244
| 2018-12-11T03:24:04
| 2018-12-11T03:24:04
| 154,955,218
| 0
| 1
| null | 2022-12-08T01:18:11
| 2018-10-27T11:10:52
|
CSS
|
UTF-8
|
Python
| false
| false
| 590
|
py
|
# Generated by Django 2.0.5 on 2018-12-01 19:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('marketplace', '0007_auto_20181202_0022'),
]
operations = [
migrations.RemoveField(
model_name='cart',
name='product',
),
migrations.AddField(
model_name='cart',
name='product',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='marketplace.product'),
),
]
|
[
"ajith.n17@iiits.in"
] |
ajith.n17@iiits.in
|
dbd667792f1668b9deab8ab7f7208a0d1aa2e8a7
|
f819e7e7ef6199b136ab14c6eb7778a9ca8ce2c1
|
/lib/plugins-loader/helpers/send_message.py
|
112b1bb7ca5a312274223802c9b8690df7bd1286
|
[] |
no_license
|
ewnd9/limelight
|
e66a3ab790476a7bbe2c5b91f199599d4b778475
|
966b04af76218533d496c05355e4885142040734
|
refs/heads/master
| 2021-01-17T19:10:00.850748
| 2015-09-21T15:53:54
| 2015-09-21T15:53:54
| 55,278,092
| 1
| 0
| null | 2016-04-02T04:43:07
| 2016-04-02T04:43:07
| null |
UTF-8
|
Python
| false
| false
| 1,519
|
py
|
import urllib
import contacts
from applescript import asrun, asquote
import re
def normalize_phone(num):
drop = ' -.'
for c in drop:
num = num.replace(c, '')
if len(num) > 5 and re.match(r"^[0-9]+$", num):
return num
else:
return None
def send_message(recipient, body, attach_selected_files):
buddy = None
if normalize_phone(recipient):
buddy = normalize_phone(recipient)
else:
address_book = contacts.address_book_to_list()
result = contacts.find_contact(recipient, address_book, "phone")
if result:
buddy = result['phone'][0]
if not buddy:
asrun("display notification %s with title \"Limelight\""%(asquote("Couldn't find iMessage contact for %s."%recipient)))
return
set_selected_files = """
tell application "Finder"
set selectedFiles to selection
end tell
""" if attach_selected_files else "set selectedFiles to {}"
script = """
%s
using terms from application "Messages"
tell application "Messages"
activate
set targetService to 1st service whose service type = iMessage
set targetBuddy to buddy %s of targetService
send %s to targetBuddy
repeat with theFile in selectedFiles
send (theFile as alias) to targetBuddy
end repeat
end tell
end using terms from
"""%(set_selected_files, asquote(buddy), asquote(body))
print script
asrun(script)
if __name__ == '__main__':
send_message("rebecca plattus", "message test", True)
|
[
"marc.brookman@gmail.com"
] |
marc.brookman@gmail.com
|
1000d07adde86b84f2994e67c3feb7eaa2dfe86f
|
6ff32eb4b6383c8f87b768af3873c6cbab5fb60a
|
/store/migrations/0008_order_customer.py
|
36944bf1561b3039c2a8672a4a9157d759de2ebf
|
[] |
no_license
|
itsyst/django-e-commerce
|
32fb99ce28bdfe368d2c7435578c53d61ae42fab
|
034a0d9f07560abac7b9391b8d2b9159fb60e254
|
refs/heads/master
| 2023-07-06T04:04:54.119123
| 2021-08-08T19:59:01
| 2021-08-08T19:59:01
| 386,954,148
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 553
|
py
|
# Generated by Django 3.2.5 on 2021-07-19 00:03
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('store', '0007_rename_membersship_customer_membership'),
]
operations = [
migrations.AddField(
model_name='order',
name='customer',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='store.customer'),
preserve_default=False,
),
]
|
[
"contact@elhamzi.me"
] |
contact@elhamzi.me
|
cce49df897b7c13bc32a590dee0ca0638412edaa
|
ed71767a3f5da3004ef333fa05721eeb84988f5e
|
/HealthBot/HealthBot/settings.py
|
fd1d6ef046c2da5a36dbaf35957426a8a90432f6
|
[] |
no_license
|
IISE-Hackathon/HealthBotWebapp
|
2c19606c19fef7c1f25749a54c2efe5a8b9fb301
|
2bdd9f66bcddcf956c7085a998379ae2db6c65f8
|
refs/heads/main
| 2023-02-06T04:10:07.380050
| 2020-12-20T02:34:05
| 2020-12-20T02:34:05
| 322,778,274
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,085
|
py
|
"""
Django settings for HealthBot project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$dgkfu1!k9a$6)y($wyt0%ncxcu1!v1e5@=zzp66$1l-92v1a4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Bot',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'HealthBot.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'HealthBot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"mk6386223@gmail.com"
] |
mk6386223@gmail.com
|
ba29949675b315b73286a3656adc6c73f7fb2e03
|
c98e1a103faa18f7cb721607897466719f11642d
|
/sourcehold/maps/sections/section1049.py
|
3627310bb37aa279e0c7ead5bc23d53a18a14296
|
[
"MIT"
] |
permissive
|
J-T-de/sourcehold-maps
|
fff77c9c1c3cdfae8cae7817e39ed0e4e1f0affb
|
330ab1b3426dbd93b5de3b0c031419e54f6a1618
|
refs/heads/master
| 2022-12-08T23:32:40.874993
| 2020-09-12T17:57:32
| 2020-09-12T17:57:32
| 294,927,094
| 0
| 0
|
MIT
| 2020-09-12T11:08:31
| 2020-09-12T11:08:30
| null |
UTF-8
|
Python
| false
| false
| 125
|
py
|
from .types import TileCompressedMapSection
class Section1049(TileCompressedMapSection):
_TYPE_ = "B"
_CLASS_ = int
|
[
"gynt@users.noreply.github.com"
] |
gynt@users.noreply.github.com
|
9d892e264bfcc2053dc648cf5f782ca27011689f
|
718b2424752e905e8c1451751b2afbca056ee3ff
|
/lacrosse_to_wunderground.py
|
d2675100ed09c27cfddccb1eaddbc23617b99f1b
|
[
"MIT"
] |
permissive
|
niavasha/lacrosse_to_wunderground
|
e2308361cc8d3e46a4ca906d7405ba2af32020ab
|
3aa1cfebca0320d01d6eea148735e31de664f9cd
|
refs/heads/master
| 2022-04-07T15:14:36.674882
| 2020-01-23T18:08:18
| 2020-01-23T18:08:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,352
|
py
|
"""
Downloads weather data from La Crosse Cloud System from your personal
weather station and uploads it to Wunderground via the wunderground/weather
personal weather station API.
"""
import sys
import time
import requests
import json
import datetime
from lacrosse_weather.lacrosse import lacrosse_login, lacrosse_get_locations, lacrosse_get_devices, lacrosse_get_weather_data
from wunderground_uploader.uploader import wunderground_upload_data_point
email = 'YOUR LA CROSSE VIEW ACCOUNT EMAIL'
password = 'YOUR LA CROSSE VIEW ACCOUNT PW'
station_id = 'YOUR WUNDERGROUND PWS STATION ID'
station_key = 'YOUR WUNDERGROUND PWS STATION KEY'
api_key = 'YOUR WUNDERGROUND API KEY'
def wunderground_get_utc_of_latest(station_id, api_key):
try:
r = requests.request('GET', 'https://api.weather.com/v2/pws/observations/current?stationId={}&format=json&units=e&apiKey={}'.format(station_id, api_key))
j = json.loads(r.content.decode('utf-8'))
ts = datetime.datetime.strptime(j['observations'][0]['obsTimeUtc'], "%Y-%m-%dT%H:%M:%S%z").timestamp()
except Exception:
ts = 0
print("Warning: Didn't get latest observation time, loading from time 0")
return int(ts)
def celsius_to_fahrenheit(celsius):
return (celsius * (9 / 5) ) + 32
def kilometers_per_hour_to_miles_per_hour(kilometers_per_hour):
return kilometers_per_hour / 1.609
def push_all_since_timestamp_temperature_to_wunderground(w, old_utc_timestamp):
for temp_data, humidity_data in zip(w['Temperature']['values'], w['Humidity']['values']):
utc_timestamp = temp_data['u']
if utc_timestamp > old_utc_timestamp:
weather_data = dict(
tempf=celsius_to_fahrenheit(temp_data['s']),
humidity=humidity_data['s']
)
wunderground_upload_data_point(station_id, station_key, weather_data, utc_timestamp)
time.sleep(2.5)
def push_all_since_timestamp_wind_to_wunderground(w, old_utc_timestamp):
for wind_data in w['WindSpeed']['values']:
utc_timestamp = wind_data['u']
if utc_timestamp > old_utc_timestamp:
weather_data = dict(
windspeedmph=kilometers_per_hour_to_miles_per_hour(wind_data['s'])
)
wunderground_upload_data_point(station_id, station_key, weather_data, utc_timestamp)
time.sleep(2.5)
if __name__ == '__main__':
try:
old_utc_timestamp = int(sys.argv[1])
except Exception:
old_utc_timestamp = wunderground_get_utc_of_latest(station_id, api_key)
token = lacrosse_login(email, password)
locations = lacrosse_get_locations(token)
devices = lacrosse_get_devices(token, locations)
new_timestamp = old_utc_timestamp
try:
for device in devices:
# TODO Will need updated credentials if we do long operations
# Your 'device_name' is likely different than 'temperature'
# replace this name with something that has an external "Temperature"
# sensor
# doing the following can show you the name here:
# print(device['device_name'])
# Same below with 'wind'
if device['device_name'] == 'temperature':
w = lacrosse_get_weather_data(token, device)
push_all_since_timestamp_temperature_to_wunderground(w, old_utc_timestamp)
new_timestamp = w['Temperature']['values'][-1]['u']
# Do this twice, as long pushes agove may cause credentials to expire
token = lacrosse_login(email, password)
locations = lacrosse_get_locations(token)
devices = lacrosse_get_devices(token, locations)
for device in devices:
if device['device_name'] == 'wind':
w = lacrosse_get_weather_data(token, device)
push_all_since_timestamp_wind_to_wunderground(w, old_utc_timestamp)
except Exception:
# Ignore all errors, just retry again later with your automation
pass
# Usage:
# New timestamp is printed as output, pipe it to a file and use that file
# as input the next time the script is run. Set the file the first time
# manually
#
# i.e. python3 lacrosse_to_wunderground.py `cat weather_ts` > weather_ts
print(new_timestamp)
|
[
"keith.prickett@sdvi.com"
] |
keith.prickett@sdvi.com
|
6c54d81e4263105997a4b7dbcb57d4d4673fe0e2
|
5d0fe4a9e026234fe15e6c4380355061bb4dac64
|
/tests/functional/pages/profile/individual_enter_your_email_and_password.py
|
4ed6007a0f1fe073b148c538f8fdceb4a783b69b
|
[
"MIT"
] |
permissive
|
uktrade/directory-tests
|
37e243862da8ac594cf1ea06ade714db5e1aba03
|
39ec6c26203580238e65566a472cbd80916e6726
|
refs/heads/master
| 2022-08-09T16:58:56.248982
| 2022-08-01T12:25:10
| 2022-08-01T12:25:10
| 71,367,747
| 4
| 3
|
MIT
| 2022-08-01T12:26:09
| 2016-10-19T14:48:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,702
|
py
|
# -*- coding: utf-8 -*-
"""Profile - Individual - Enter your business email address and set a password"""
from requests import Response, Session
from directory_tests_shared import PageType, Service, URLs
from tests.functional.utils.context_utils import Actor
from tests.functional.utils.request import (
Method,
check_response,
check_url,
make_request,
)
SERVICE = Service.PROFILE
NAME = "Individual enter your email address and set a password"
TYPE = PageType.FORM
URL = URLs.PROFILE_ENROL_INDIVIDUAL_ENTER_YOUR_EMAIL_AND_PASSWORD.absolute
EXPECTED_STRINGS = [
"Enter your email address and set a password",
"Your email address",
"Set a password",
"Confirm password",
"Tick this box to accept the",
]
def go_to(session: Session) -> Response:
return make_request(Method.GET, URL, session=session)
def should_be_here(response: Response):
check_url(response, URL)
check_response(response, 200, body_contains=EXPECTED_STRINGS)
def submit(actor: Actor) -> Response:
session = actor.session
headers = {"Referer": URL}
data = {
"csrfmiddlewaretoken": actor.csrfmiddlewaretoken,
"individual_user_enrolment_view-current_step": "user-account",
"user-account-email": actor.email,
"user-account-password": actor.password,
"user-account-password_confirmed": actor.password,
"user-account-terms_agreed": "on",
"user-account-remote_password_error": None,
"g-recaptcha-response": "test mode",
}
return make_request(
Method.POST,
URL,
session=session,
headers=headers,
files=data,
no_filename_in_multipart_form_data=True,
)
|
[
"kowalczykjanusz@gmail.com"
] |
kowalczykjanusz@gmail.com
|
2b979052909dff740be6659a03ee1ad2b6daec66
|
3ef9439cc406f1c1a7a4877e2bee55acb8bad062
|
/manage.py
|
29c86fffcbc5e837668c6db3cb05c9d5d773fc5c
|
[] |
no_license
|
Obsir/ixStudy
|
d6d031ff80248cc92f5af6cdad6d60ebc078f257
|
63f716d3a3660bb922d1c848bf8747386a102e6e
|
refs/heads/master
| 2023-05-10T11:37:34.026374
| 2021-06-03T14:45:53
| 2021-06-03T14:45:53
| 369,462,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 805
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ixStudy.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"Obser47@outlook.com"
] |
Obser47@outlook.com
|
69af8143efa20aa1ca3024c64e30be8fef84b392
|
ff7864a5ab14702de8aab5d6af036b80f68247a4
|
/ipsolver/linprog/mehrotra/__init__.py
|
f8f342a3c5f9f5a2314245dee330a716a43855b0
|
[] |
no_license
|
codacy-badger/iplib
|
e99f90684cd7b57992da3cefbec7dea7d4d89af9
|
0f4eeea6cd6945a83f43b680c7321f7b9be2175e
|
refs/heads/master
| 2020-04-22T23:55:52.732562
| 2019-02-14T16:43:28
| 2019-02-14T16:43:28
| 170,759,222
| 1
| 0
| null | 2019-02-14T21:16:27
| 2019-02-14T21:16:26
| null |
UTF-8
|
Python
| false
| false
| 89
|
py
|
from . import base_imp
from . import mehrotra_ipm
from . import regularized_mehrotra_ipm
|
[
"maksym.shpakovych@gmail.com"
] |
maksym.shpakovych@gmail.com
|
ba3daecdc648fa1992bea625ee5b35854be3a673
|
066f580391746d4ebef860007a831452fa2657cb
|
/server_functions.py
|
ac803849057c73352456fd1323b08eb4a5125f64
|
[] |
no_license
|
KeremOzfo/Hybrid-Sparsification
|
a46c5e0747fa77a634ccbc17da651e4c8a0f8468
|
713e288b2c0636cb96c9fac75b6e072230368e8c
|
refs/heads/master
| 2023-02-23T16:40:36.239354
| 2021-01-28T17:43:30
| 2021-01-28T17:43:30
| 330,001,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,404
|
py
|
import torch
import math
import time
import numpy as np
import torch.nn as nn
def pull_model(model_user, model_server):
for param_user, param_server in zip(model_user.parameters(), model_server.parameters()):
param_user.data = param_server.data[:] + 0
return None
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def zero_grad_ps(model):
for param in model.parameters():
param.grad = torch.zeros_like(param.data)
return None
def push_grad(model_user, model_server, num_cl):
for param_user, param_server in zip(model_user.parameters(), model_server.parameters()):
param_server.grad.data += param_user.grad.data / num_cl
return None
def push_model(model_user, model_server, num_cl):
for param_user, param_server in zip(model_user.parameters(), model_server.parameters()):
param_server.data += param_user.data / num_cl
return None
def initialize_zero(model):
for param in model.parameters():
param.data.mul_(0)
return None
def update_model(model, prev_model, lr, momentum, weight_decay):
for param, prevIncrement in zip(model.parameters(), prev_model.parameters()):
incrementVal = param.grad.data.add(weight_decay, param.data)
incrementVal.add_(momentum, prevIncrement.data)
incrementVal.mul_(lr)
param.data.add_(-1, incrementVal)
prevIncrement.data = incrementVal
return None
def get_grad_flattened(model, device):
grad_flattened = torch.empty(0).to(device)
for p in model.parameters():
if p.requires_grad:
a = p.grad.data.flatten().to(device)
grad_flattened = torch.cat((grad_flattened, a), 0)
return grad_flattened
def get_model_flattened(model, device):
model_flattened = torch.empty(0).to(device)
for p in model.parameters():
a = p.data.flatten().to(device)
model_flattened = torch.cat((model_flattened, a), 0)
return model_flattened
def get_model_sizes(model):
# get the size of the layers and number of eleents in each layer.
# only layers that are trainable
net_sizes = []
net_nelements = []
for p in model.parameters():
if p.requires_grad:
net_sizes.append(p.data.size())
net_nelements.append(p.nelement())
return net_sizes, net_nelements
def unshuffle(shuffled_vec, seed):
orj_vec = torch.empty(shuffled_vec.size())
perm_inds = torch.tensor([i for i in range(shuffled_vec.nelement())])
perm_inds_shuffled = shuffle_deterministic(perm_inds, seed)
for i in range(shuffled_vec.nelement()):
orj_vec[perm_inds_shuffled[i]] = shuffled_vec[i]
return orj_vec
def shuffle_deterministic(grad_flat, seed):
# Shuffle the list ls using the seed `seed`
torch.manual_seed(seed)
idx = torch.randperm(grad_flat.nelement())
return grad_flat.view(-1)[idx].view(grad_flat.size())
def get_indices(net_sizes, net_nelements):
# for reconstructing grad from flattened grad
ind_pairs = []
ind_start = 0
ind_end = 0
for i in range(len(net_sizes)):
for j in range(i + 1):
ind_end += net_nelements[j]
# print(ind_start, ind_end)
ind_pairs.append((ind_start, ind_end))
ind_start = ind_end + 0
ind_end = 0
return ind_pairs
def make_grad_unflattened(model, grad_flattened, net_sizes, ind_pairs):
# unflattens the grad_flattened into the model.grad
i = 0
for p in model.parameters():
if p.requires_grad:
temp = grad_flattened[ind_pairs[i][0]:ind_pairs[i][1]]
p.grad.data = temp.reshape(net_sizes[i])
i += 1
return None
def make_model_unflattened(model, model_flattened, net_sizes, ind_pairs):
# unflattens the grad_flattened into the model.grad
i = 0
for p in model.parameters():
temp = model_flattened[ind_pairs[i][0]:ind_pairs[i][1]]
p.data = temp.reshape(net_sizes[i])
i += 1
return None
def make_sparse_grad(grad_flat, sparsity_window, device):
# sparsify using block model
num_window = math.ceil(grad_flat.nelement() / sparsity_window)
for i in range(num_window):
ind_start = i * sparsity_window
ind_end = min((i + 1) * sparsity_window, grad_flat.nelement())
a = grad_flat[ind_start: ind_end]
ind = torch.topk(a.abs(), k=1, dim=0)[1] # return index of top not value
val = a[ind]
ind_true = ind_start + ind
grad_flat[ind_start: ind_end] *= torch.zeros(a.nelement()).to(device)
grad_flat[ind_true] = val
return None
def adjust_learning_rate(optimizer, epoch, lr_change, lr):
lr_change = np.asarray(lr_change)
loc = np.where(lr_change == epoch)[0][0] + 1
lr *= (0.1 ** loc)
lr = round(lr, 3)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def get_LR(optimizer):
lr = None
for param_group in optimizer.param_groups:
lr = param_group['lr']
return lr
def lr_warm_up(optimizers, num_workers, epoch, start_lr):
for cl in range(num_workers):
for param_group in optimizers[cl].param_groups:
if epoch == 0:
param_group['lr'] = 0.1
else:
lr_change = (start_lr - 0.1) / 4
param_group['lr'] = (lr_change * epoch) + 0.1
def get_bias_mask(model,device):
model_flattened = torch.empty(0).to(device)
for name, p in zip(model.named_parameters(),model.parameters()):
layer = name[0].split('.')
a = p.data.flatten().to(device)
if layer[len(layer)-1] == 'bias':
temp = torch.ones_like(a).to(device)
model_flattened = torch.cat((model_flattened, temp), 0)
else:
temp = torch.zeros_like(a).to(device)
model_flattened = torch.cat((model_flattened, temp), 0)
return model_flattened
def modify_freq_vec(freq_vec, grad, mask,bias_mask,add_percent,args):
topk = math.ceil(add_percent * (grad.numel() - torch.sum(bias_mask).item()) / 100)
vals, inds = torch.topk(grad.mul(1-mask).abs(), k=topk, dim=0)
freq_vec.mul_(args.freq_momentum)
freq_vec[inds] += 1
return None
def add_to_mask(freq_vec,mask,bias_mask,add_percent):
topk = math.ceil(add_percent * (freq_vec.numel() - torch.sum(bias_mask).item()) / 100)
vals, inds = torch.topk(freq_vec, k=topk, dim=0)
mask[inds] = 1
return None
def remove_from_mask(model,mask,bias_mask,drop_val):
model_size = model.numel()
zeros = model_size - (torch.nonzero(model.mul(1-bias_mask), as_tuple=False)).numel()
drop_k = math.ceil(drop_val * (model_size - torch.sum(bias_mask).item()) / 100)
vals, inds = torch.topk((model.mul(1-bias_mask)).abs(),k=model_size,dim=0)
inds = torch.flip(inds, dims=[0])
inds = inds[zeros:zeros+drop_k]
mask[inds] = 0
return None
def sparse_special_mask(flat_grad, sparsity_window, layer_spar, ind_pairs, device):
inds = torch.empty(0).to(device)
for layer in ind_pairs:
startPoint = (layer[0])
endPoint = (layer[1])
layer_len = endPoint - startPoint
l_top_k = math.ceil(layer_len / layer_spar)
l_vals, l_ind = torch.topk((flat_grad[startPoint:endPoint]).abs(), k=l_top_k, dim=0)
l_ind.add_(startPoint)
inds = torch.cat((inds.float(), l_ind.float()), 0)
inds = inds.long()
clone_grad = torch.clone(flat_grad).to(device)
clone_grad[inds] = 0
topk = math.ceil(len(flat_grad) / (sparsity_window)) - inds.numel()
vals_, inds_ = torch.topk(clone_grad.abs(), k=topk, dim=0)
inds = torch.cat((inds, inds_), 0)
clone_grad *= 0
clone_grad[inds] = 1
return clone_grad
def groups(grad_flat, group_len, denominator, device):
sparseCount = torch.sum(grad_flat != 0)
sparseCount = sparseCount.__int__()
vals, ind = torch.topk(grad_flat.abs(), k=sparseCount, dim=0)
group_boundries = torch.zeros(group_len + 1).to(device)
group_boundries[0] = vals[0].float()
sign_mask = torch.sign(grad_flat[ind])
for i in range(1, group_len):
group_boundries[i] = group_boundries[i - 1] / denominator
startPoint = 0
newVals = torch.zeros_like(vals)
startPointz = []
for i in range(group_len):
if vals[startPoint] > group_boundries[i + 1]:
startPointz.append(startPoint)
for index, val in enumerate(vals[startPoint:vals.numel()]):
if val <= group_boundries[i + 1] and group_boundries[i + 1] != 0:
newVals[startPoint:startPoint + index] = torch.mean(vals[startPoint:startPoint + index])
startPoint += index
break
elif group_boundries[i + 1] == 0:
newVals[startPoint:vals.numel()] = torch.mean(vals[startPoint:vals.numel()])
break
newVals *= sign_mask
grad_flat *= 0
grad_flat[ind] = newVals
def get_momentum_flattened(opt,device):
momentum_flattened = torch.empty(0).to(device)
for groupAvg in (opt.param_groups): # momentum
for p_avg in groupAvg['params']:
param_state_avg = opt.state[p_avg]
if 'momentum_buffer' not in param_state_avg:
buf_avg = param_state_avg['momentum_buffer'] = torch.zeros_like(p_avg.data)
else:
buf_avg = param_state_avg['momentum_buffer']
momentum_flattened = torch.cat((momentum_flattened, buf_avg.flatten().to(device)), 0)
return momentum_flattened
def make_momentum_unflattened(opt, momentum_flattened, net_sizes, ind_pairs):
import copy
i = 0
for groupAvg in (opt.param_groups): # momentum
for p_avg in groupAvg['params']:
temp = momentum_flattened[ind_pairs[i][0]:ind_pairs[i][1]]
opt.state[p_avg]['momentum_buffer'] = temp.reshape(net_sizes[i])
i+=1
return None
def custom_SGD(model,flat_momentum,mask,net_sizes,ind_pairs,lr,device,args):
flat_model = get_model_flattened(model,device)
flat_grad = get_grad_flattened(model,device)
flat_grad = flat_grad.add(flat_model,alpha=args.wd)
flat_grad.mul_(mask)
flat_momentum.mul_(args.SGDmomentum).add_(flat_grad, alpha=1)
if args.nesterov:
flat_grad = flat_grad.add(flat_momentum, alpha=args.SGDmomentum)
else:
flat_grad = flat_momentum
flat_model = flat_model.add(flat_grad, alpha=-lr)
make_model_unflattened(model,flat_model,net_sizes,ind_pairs)
return None
def get_BN_mask(net,device):
mask = torch.empty(0).to(device)
for layer in net.modules(): # Prune only convolutional and linear layers
if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
layer_weight = layer.weight
len = layer_weight.numel()
mask_ = torch.zeros(len,device=device)
mask = torch.cat((mask, mask_), 0)
if layer.bias is not None:
bias = layer.bias.numel()
mask_ = torch.ones(bias, device=device)
mask = torch.cat((mask, mask_), 0)
elif isinstance(layer, nn.BatchNorm2d):
bn_params = 0
for p in layer.parameters():
bn_params += p.numel()
mask_ = torch.ones(bn_params, device=device)
mask = torch.cat((mask, mask_), 0)
return mask
|
[
"keremozfatura@me.com"
] |
keremozfatura@me.com
|
984435c2ce0a43415fd4459d4871689edb1602c2
|
f8141f06d7fb3f80788086669fb3bda86564aed5
|
/Students/7-object-detection-frnn/cntk_helpers.py
|
d76e3c4124d1f1264447aed23227caa43b51421a
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
Azure-Samples/learnAnalytics-DeepLearning-Azure
|
bff46e9dbcd5c949e9e44cabed4c4db28d20df09
|
00df3cc0c98e8ed28bb33364138e748aa8364b5b
|
refs/heads/master
| 2022-01-04T16:35:46.656647
| 2019-06-28T20:14:04
| 2019-06-28T20:14:04
| 102,758,327
| 8
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38,360
|
py
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
from __future__ import print_function
from builtins import str
import pdb, sys, os, time
import numpy as np
import selectivesearch
from easydict import EasyDict
from fastRCNN.nms import nms as nmsPython
from builtins import range
import cv2, copy, textwrap
from PIL import Image, ImageFont, ImageDraw
from PIL.ExifTags import TAGS
available_font = "arial.ttf"
try:
dummy = ImageFont.truetype(available_font, 16)
except:
available_font = "FreeMono.ttf"
####################################
# Region-of-interest
####################################
def getSelectiveSearchRois(img, ssScale, ssSigma, ssMinSize, maxDim):
# Selective Search
# Parameters
# ----------
# im_orig : ndarray
# Input image
# scale : int
# Free parameter. Higher means larger clusters in felzenszwalb segmentation.
# sigma : float
# Width of Gaussian kernel for felzenszwalb segmentation.
# min_size : int
# Minimum component size for felzenszwalb segmentation.
# Returns
# -------
# img : ndarray
# image with region label
# region label is stored in the 4th value of each pixel [r,g,b,(region)]
# regions : array of dict
# [
# {
# 'rect': (left, top, right, bottom),
# 'labels': [...]
# },
# ...
# ]
# inter_area seems to give much better results esp when upscaling image
img, scale = imresizeMaxDim(img, maxDim, boUpscale=True, interpolation = cv2.INTER_AREA)
_, ssRois = selectivesearch.selective_search(img, scale=ssScale, sigma=ssSigma, min_size=ssMinSize)
rects = []
for ssRoi in ssRois:
x, y, w, h = ssRoi['rect']
rects.append([x,y,x+w,y+h])
return rects, img, scale
def getGridRois(imgWidth, imgHeight, nrGridScales, aspectRatios = [1.0]):
rects = []
# start adding large ROIs and then smaller ones
for iter in range(nrGridScales):
cellWidth = 1.0 * min(imgHeight, imgWidth) / (2 ** iter)
step = cellWidth / 2.0
for aspectRatio in aspectRatios:
wStart = 0
while wStart < imgWidth:
hStart = 0
while hStart < imgHeight:
if aspectRatio < 1:
wEnd = wStart + cellWidth
hEnd = hStart + cellWidth / aspectRatio
else:
wEnd = wStart + cellWidth * aspectRatio
hEnd = hStart + cellWidth
if wEnd < imgWidth-1 and hEnd < imgHeight-1:
rects.append([wStart, hStart, wEnd, hEnd])
hStart += step
wStart += step
return rects
def filterRois(rects, maxWidth, maxHeight, roi_minNrPixels, roi_maxNrPixels,
roi_minDim, roi_maxDim, roi_maxAspectRatio):
filteredRects = []
filteredRectsSet = set()
for rect in rects:
if tuple(rect) in filteredRectsSet: # excluding rectangles with same co-ordinates
continue
x, y, x2, y2 = rect
w = x2 - x
h = y2 - y
assert(w>=0 and h>=0)
# apply filters
if h == 0 or w == 0 or \
x2 > maxWidth or y2 > maxHeight or \
w < roi_minDim or h < roi_minDim or \
w > roi_maxDim or h > roi_maxDim or \
w * h < roi_minNrPixels or w * h > roi_maxNrPixels or \
w / h > roi_maxAspectRatio or h / w > roi_maxAspectRatio:
continue
filteredRects.append(rect)
filteredRectsSet.add(tuple(rect))
# could combine rectangles using non-maxima surpression or with similar co-ordinates
# groupedRectangles, weights = cv2.groupRectangles(np.asanyarray(rectsInput, np.float).tolist(), 1, 0.3)
# groupedRectangles = nms_python(np.asarray(rectsInput, np.float), 0.5)
assert(len(filteredRects) > 0)
return filteredRects
def readRois(roiDir, subdir, imgFilename):
roiPath = os.path.join(roiDir, subdir, imgFilename[:-4] + ".roi.txt")
rois = np.loadtxt(roiPath, np.int)
if len(rois) == 4 and type(rois[0]) == np.int32: # if only a single ROI in an image
rois = [rois]
return rois
####################################
# Generate and parse CNTK files
####################################
def readGtAnnotation(imgPath):
bboxesPath = imgPath[:-4] + ".bboxes.tsv"
labelsPath = imgPath[:-4] + ".bboxes.labels.tsv"
bboxes = np.array(readTable(bboxesPath), np.int32)
labels = readFile(labelsPath)
assert (len(bboxes) == len(labels))
return bboxes, labels
def getCntkInputPaths(cntkFilesDir, image_set):
cntkImgsListPath = os.path.join(cntkFilesDir, image_set + '.txt')
cntkRoiCoordsPath = os.path.join(cntkFilesDir, image_set + '.rois.txt')
cntkRoiLabelsPath = os.path.join(cntkFilesDir, image_set + '.roilabels.txt')
cntkNrRoisPath = os.path.join(cntkFilesDir, image_set + '.nrRois.txt')
return cntkImgsListPath, cntkRoiCoordsPath, cntkRoiLabelsPath, cntkNrRoisPath
def roiTransformPadScaleParams(imgWidth, imgHeight, padWidth, padHeight, boResizeImg = True):
scale = 1.0
if boResizeImg:
assert padWidth == padHeight, "currently only supported equal width/height"
scale = 1.0 * padWidth / max(imgWidth, imgHeight)
imgWidth = round(imgWidth * scale)
imgHeight = round(imgHeight * scale)
targetw = padWidth
targeth = padHeight
w_offset = ((targetw - imgWidth) / 2.)
h_offset = ((targeth - imgHeight) / 2.)
if boResizeImg and w_offset > 0 and h_offset > 0:
print ("ERROR: both offsets are > 0:", imgCounter, imgWidth, imgHeight, w_offset, h_offset)
error
if (w_offset < 0 or h_offset < 0):
print ("ERROR: at least one offset is < 0:", imgWidth, imgHeight, w_offset, h_offset, scale)
return targetw, targeth, w_offset, h_offset, scale
def roiTransformPadScale(rect, w_offset, h_offset, scale = 1.0):
rect = [int(round(scale * d)) for d in rect]
rect[0] += w_offset
rect[1] += h_offset
rect[2] += w_offset
rect[3] += h_offset
return rect
def getCntkRoiCoordsLine(rect, targetw, targeth):
# Return the absolute coordinate of the ROI in the original image.
x1, y1, x2, y2 = rect
return " {} {} {} {}".format(x1, y1, x2, y2)
def getCntkRoiLabelsLine(overlaps, thres, nrClasses):
# get one hot encoding
maxgt = np.argmax(overlaps)
if overlaps[maxgt] < thres: # set to background label if small overlap with GT
maxgt = 0
oneHot = np.zeros((nrClasses), dtype=int)
oneHot[maxgt] = 1
oneHotString = " {}".format(" ".join(str(x) for x in oneHot))
return oneHotString
def cntkPadInputs(currentNrRois, targetNrRois, nrClasses, boxesStr, labelsStr):
assert currentNrRois <= targetNrRois, "Current number of rois ({}) should be <= target number of rois ({})".format(currentNrRois, targetNrRois)
while currentNrRois < targetNrRois:
boxesStr += " 0 0 0 0"
labelsStr += " 1" + " 0" * (nrClasses - 1)
currentNrRois += 1
return boxesStr, labelsStr
def checkCntkOutputFile(cntkImgsListPath, cntkOutputPath, cntkNrRois, outputDim):
imgPaths = getColumn(readTable(cntkImgsListPath), 1)
with open(cntkOutputPath) as fp:
for imgIndex in range(len(imgPaths)):
if imgIndex % 100 == 1:
print ("Checking cntk output file, image %d of %d..." % (imgIndex, len(imgPaths)))
for roiIndex in range(cntkNrRois):
assert (fp.readline() != "")
assert (fp.readline() == "") # test if end-of-file is reached
# parse the cntk output file and save the output for each image individually
def parseCntkOutput(cntkImgsListPath, cntkOutputPath, outParsedDir, cntkNrRois, outputDim,
saveCompressed = False, skipCheck = False, skip5Mod = None):
if not skipCheck and skip5Mod == None:
checkCntkOutputFile(cntkImgsListPath, cntkOutputPath, cntkNrRois, outputDim)
# parse cntk output and write file for each image
# always read in data for each image to forward file pointer
imgPaths = getColumn(readTable(cntkImgsListPath), 1)
with open(cntkOutputPath) as fp:
for imgIndex in range(len(imgPaths)):
line = fp.readline()
if skip5Mod != None and imgIndex % 5 != skip5Mod:
print ("Skipping image {} (skip5Mod = {})".format(imgIndex, skip5Mod))
continue
print ("Parsing cntk output file, image %d of %d" % (imgIndex, len(imgPaths)))
# convert to floats
data = []
values = np.fromstring(line, dtype=float, sep=" ")
assert len(values) == cntkNrRois * outputDim, "ERROR: expected dimension of {} but found {}".format(cntkNrRois * outputDim, len(values))
for i in range(cntkNrRois):
posStart = i * outputDim
posEnd = posStart + outputDim
currValues = values[posStart:posEnd]
data.append(currValues)
# save
data = np.array(data, np.float32)
outPath = os.path.join(outParsedDir, str(imgIndex) + ".dat")
if saveCompressed:
np.savez_compressed(outPath, data)
else:
np.savez(outPath, data)
assert (fp.readline() == "") # test if end-of-file is reached
# parse the cntk labels file and return the labels
def readCntkRoiLabels(roiLabelsPath, nrRois, roiDim, stopAtImgIndex = None):
roiLabels = []
for imgIndex, line in enumerate(readFile(roiLabelsPath)):
if stopAtImgIndex and imgIndex == stopAtImgIndex:
break
roiLabels.append([])
pos = line.find(b'|roiLabels ')
valuesString = line[pos + 10:].strip().split(b' ')
assert (len(valuesString) == nrRois * roiDim)
for boxIndex in range(nrRois):
oneHotLabels = [int(s) for s in valuesString[boxIndex*roiDim : (boxIndex+1)*roiDim]]
assert(sum(oneHotLabels) == 1)
roiLabels[imgIndex].append(np.argmax(oneHotLabels))
return roiLabels
# parse the cntk rois file and return the co-ordinates
def readCntkRoiCoordinates(imgPaths, cntkRoiCoordsPath, nrRois, padWidth, padHeight, stopAtImgIndex = None):
roiCoords = []
for imgIndex, line in enumerate(readFile(cntkRoiCoordsPath)):
if stopAtImgIndex and imgIndex == stopAtImgIndex:
break
roiCoords.append([])
pos = line.find(b'|rois ')
valuesString = line[pos + 5:].strip().split(b' ')
assert (len(valuesString) == nrRois * 4)
imgWidth, imgHeight = imWidthHeight(imgPaths[imgIndex])
for boxIndex in range(nrRois):
rect = [float(s) for s in valuesString[boxIndex*4 : (boxIndex+1)*4]]
x1,y1,x2,y2 = rect
# convert back from padded-rois-co-ordinates to image co-ordinates
rect = getAbsoluteROICoordinates([x1,y1,x2,y2], imgWidth, imgHeight, padWidth, padHeight)
roiCoords[imgIndex].append(rect)
return roiCoords
def getAbsoluteROICoordinates(roi, imgWidth, imgHeight, padWidth, padHeight, resizeMethod = 'padScale'):
'''
The input image are usually padded to a fixed size, this method compute back the original
ROI absolute coordinate before the padding.
'''
if roi == [0,0,0,0]: # if padded roi
return [0,0,0,0]
if resizeMethod == "pad" or resizeMethod == "padScale":
if resizeMethod == "padScale":
scale = float(padWidth) / max(imgWidth, imgHeight)
imgWidthScaled = int(round(imgWidth * scale))
imgHeightScaled = int(round(imgHeight * scale))
else:
scale = 1.0
imgWidthScaled = imgWidth
imgHeightScaled = imgHeight
w_offset = float(padWidth - imgWidthScaled) / 2.0
h_offset = float(padHeight - imgHeightScaled) / 2.0
if resizeMethod == "padScale":
assert(w_offset == 0 or h_offset == 0)
rect = [roi[0] - w_offset, roi[1] - h_offset, roi[2] - w_offset, roi[3] - h_offset]
rect = [int(round(r / scale)) for r in rect]
else:
print ("ERROR: Unknown resize method '%s'" % resizeMethod)
error
assert(min(rect) >=0 and max(rect[0],rect[2]) <= imgWidth and max(rect[1],rect[3]) <= imgHeight)
return rect
####################################
# Classifier training / scoring
####################################
def getSvmModelPaths(svmDir, experimentName):
svmWeightsPath = "{}svmweights_{}.txt".format(svmDir, experimentName)
svmBiasPath = "{}svmbias_{}.txt".format(svmDir, experimentName)
svmFeatScalePath = "{}svmfeature_scale_{}.txt".format(svmDir, experimentName)
return svmWeightsPath, svmBiasPath, svmFeatScalePath
def loadSvm(svmDir, experimentName):
svmWeightsPath, svmBiasPath, svmFeatScalePath = getSvmModelPaths(svmDir, experimentName)
svmWeights = np.loadtxt(svmWeightsPath, np.float32)
svmBias = np.loadtxt(svmBiasPath, np.float32)
svmFeatScale = np.loadtxt(svmFeatScalePath, np.float32)
return svmWeights, svmBias, svmFeatScale
def saveSvm(svmDir, experimentName, svmWeights, svmBias, featureScale):
svmWeightsPath, svmBiasPath, svmFeatScalePath = getSvmModelPaths(svmDir, experimentName)
np.savetxt(svmWeightsPath, svmWeights)
np.savetxt(svmBiasPath, svmBias)
np.savetxt(svmFeatScalePath, featureScale)
def svmPredict(imgIndex, cntkOutputIndividualFilesDir, svmWeights, svmBias, svmFeatScale, roiSize, roiDim, decisionThreshold = 0):
cntkOutputPath = os.path.join(cntkOutputIndividualFilesDir, str(imgIndex) + ".dat.npz")
data = np.load(cntkOutputPath)['arr_0']
assert(len(data) == roiSize)
# get prediction for each roi
labels = []
maxScores = []
for roiIndex in range(roiSize):
feat = data[roiIndex]
scores = np.dot(svmWeights, feat * 1.0 / svmFeatScale) + svmBias.ravel()
assert (len(scores) == roiDim)
maxArg = np.argmax(scores[1:]) + 1
maxScore = scores[maxArg]
if maxScore < decisionThreshold:
maxArg = 0
labels.append(maxArg)
maxScores.append(maxScore)
return labels, maxScores
def nnPredict(imgIndex, cntkParsedOutputDir, roiSize, roiDim, decisionThreshold = None):
cntkOutputPath = os.path.join(cntkParsedOutputDir, str(imgIndex) + ".dat.npz")
data = np.load(cntkOutputPath)['arr_0']
assert(len(data) == roiSize)
# get prediction for each roi
labels = []
maxScores = []
for roiIndex in range(roiSize):
scores = data[roiIndex]
scores = softmax(scores)
assert (len(scores) == roiDim)
maxArg = np.argmax(scores)
maxScore = scores[maxArg]
if decisionThreshold and maxScore < decisionThreshold:
maxArg = 0
labels.append(maxArg)
maxScores.append(maxScore)
return labels, maxScores
def imdbUpdateRoisWithHighGtOverlap(imdb, positivesGtOverlapThreshold):
addedPosCounter = 0
existingPosCounter = 0
for imgIndex in range(imdb.num_images):
for boxIndex, gtLabel in enumerate(imdb.roidb[imgIndex]['gt_classes']):
if gtLabel > 0:
existingPosCounter += 1
else:
overlaps = imdb.roidb[imgIndex]['gt_overlaps'][boxIndex, :].toarray()[0]
maxInd = np.argmax(overlaps)
maxOverlap = overlaps[maxInd]
if maxOverlap >= positivesGtOverlapThreshold and maxInd > 0:
addedPosCounter += 1
imdb.roidb[imgIndex]['gt_classes'][boxIndex] = maxInd
return existingPosCounter, addedPosCounter
####################################
# Visualize results
####################################
def visualizeResults(imgPath, roiLabels, roiScores, roiRelCoords, padWidth, padHeight, classes,
nmsKeepIndices = None, boDrawNegativeRois = True, decisionThreshold = 0.0):
# read and resize image
imgWidth, imgHeight = imWidthHeight(imgPath)
scale = 800.0 / max(imgWidth, imgHeight)
imgDebug = imresize(imread(imgPath), scale)
assert(len(roiLabels) == len(roiRelCoords))
if roiScores:
assert(len(roiLabels) == len(roiScores))
# draw multiple times to avoid occlusions
for iter in range(0,3):
for roiIndex in range(len(roiRelCoords)):
label = roiLabels[roiIndex]
if roiScores:
score = roiScores[roiIndex]
if decisionThreshold and score < decisionThreshold:
label = 0
# init drawing parameters
thickness = 1
if label == 0:
color = (255, 0, 0)
else:
color = getColorsPalette()[label]
rect = [int(scale * i) for i in roiRelCoords[roiIndex]]
# draw in higher iterations only the detections
if iter == 0 and boDrawNegativeRois:
drawRectangles(imgDebug, [rect], color=color, thickness=thickness)
elif iter==1 and label > 0:
if not nmsKeepIndices or (roiIndex in nmsKeepIndices):
thickness = 4
drawRectangles(imgDebug, [rect], color=color, thickness=thickness)
elif iter == 2 and label > 0:
if not nmsKeepIndices or (roiIndex in nmsKeepIndices):
try:
font = ImageFont.truetype(available_font, 18)
except:
font = ImageFont.load_default()
text = classes[label]
if roiScores:
text += "(" + str(round(score, 2)) + ")"
imgDebug = drawText(imgDebug, (rect[0],rect[1]), text, color = (255,255,255), font = font, colorBackground=color)
return imgDebug
def applyNonMaximaSuppression(nmsThreshold, labels, scores, coords, ignore_background=False):
# generate input for nms
allIndices = []
nmsRects = [[[]] for _ in range(max(labels) + 1)]
coordsWithScores = np.hstack((coords, np.array([scores]).T))
for i in range(max(labels) + 1):
indices = np.where(np.array(labels) == i)[0]
nmsRects[i][0] = coordsWithScores[indices,:]
allIndices.append(indices)
# call nms
_, nmsKeepIndicesList = apply_nms(nmsRects, nmsThreshold, ignore_background=ignore_background)
# map back to original roi indices
nmsKeepIndices = []
for i in range(max(labels) + 1):
for keepIndex in nmsKeepIndicesList[i][0]:
nmsKeepIndices.append(allIndices[i][keepIndex]) # for keepIndex in nmsKeepIndicesList[i][0]]
assert (len(nmsKeepIndices) == len(set(nmsKeepIndices))) # check if no roi indices was added >1 times
return nmsKeepIndices
def apply_nms(all_boxes, thresh, ignore_background=False, boUsePythonImpl=True):
"""Apply non-maximum suppression to all predicted boxes output by the test_net method."""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in range(num_images)]
for _ in range(num_classes)]
nms_keepIndices = [[[] for _ in range(num_images)]
for _ in range(num_classes)]
for cls_ind in range(num_classes):
if ignore_background and (cls_ind == 0):
continue
for im_ind in range(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
if boUsePythonImpl:
keep = nmsPython(dets, thresh)
else:
keep = nms(dets, thresh)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
nms_keepIndices[cls_ind][im_ind] = keep
return nms_boxes, nms_keepIndices
####################################
# Wrappers for compatibility with
# original fastRCNN code
####################################
class DummyNet(object):
def __init__(self, dim, num_classes, cntkParsedOutputDir):
self.name = 'dummyNet'
self.cntkParsedOutputDir = cntkParsedOutputDir
self.params = {
"cls_score": [ EasyDict({'data': np.zeros((num_classes, dim), np.float32) }),
EasyDict({'data': np.zeros((num_classes, 1), np.float32) })],
"trainers" : None,
}
def im_detect(net, im, boxes, feature_scale=None, bboxIndices=None, boReturnClassifierScore=True, classifier = 'svm'): # trainers=None,
# Return:
# scores (ndarray): R x K array of object class scores (K includes
# background as object category 0)
# (optional) boxes (ndarray): R x (4*K) array of predicted bounding boxes
# load cntk output for the given image
cntkOutputPath = os.path.join(net.cntkParsedOutputDir, str(im) + ".dat.npz")
cntkOutput = np.load(cntkOutputPath)['arr_0']
if bboxIndices != None:
cntkOutput = cntkOutput[bboxIndices, :] # only keep output for certain rois
else:
cntkOutput = cntkOutput[:len(boxes), :] # remove zero-padded rois
# compute scores for each box and each class
scores = None
if boReturnClassifierScore:
if classifier == 'nn':
scores = softmax2D(cntkOutput)
elif classifier == 'svm':
svmBias = net.params['cls_score'][1].data.transpose()
svmWeights = net.params['cls_score'][0].data.transpose()
scores = np.dot(cntkOutput * 1.0 / feature_scale, svmWeights) + svmBias
assert (np.unique(scores[:, 0]) == 0) # svm always returns 0 for label 0
else:
error
return scores, None, cntkOutput
####################################
# Subset of helper library
# used in the fastRCNN code
####################################
# Typical meaning of variable names -- Computer Vision:
# pt = 2D point (column,row)
# img = image
# width,height (or w/h) = image dimensions
# bbox = bbox object (stores: left, top,right,bottom co-ordinates)
# rect = rectangle (order: left, top, right, bottom)
# angle = rotation angle in degree
# scale = image up/downscaling factor
# Typical meaning of variable names -- general:
# lines,strings = list of strings
# line,string = single string
# xmlString = string with xml tags
# table = 2D row/column matrix implemented using a list of lists
# row,list1D = single row in a table, i.e. single 1D-list
# rowItem = single item in a row
# list1D = list of items, not necessarily strings
# item = single item of a list1D
# slotValue = e.g. "terminator" in: play <movie> terminator </movie>
# slotTag = e.g. "<movie>" or "</movie>" in: play <movie> terminator </movie>
# slotName = e.g. "movie" in: play <movie> terminator </movie>
# slot = e.g. "<movie> terminator </movie>" in: play <movie> terminator </movie>
def makeDirectory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def getFilesInDirectory(directory, postfix = ""):
fileNames = [s for s in os.listdir(directory) if not os.path.isdir(os.path.join(directory, s))]
if not postfix or postfix == "":
return fileNames
else:
return [s for s in fileNames if s.lower().endswith(postfix)]
def readFile(inputFile):
#reading as binary, to avoid problems with end-of-text characters
#note that readlines() does not remove the line ending characters
with open(inputFile,'rb') as f:
lines = f.readlines()
return [removeLineEndCharacters(s) for s in lines]
def readTable(inputFile, delimiter='\t', columnsToKeep=None):
lines = readFile(inputFile);
if columnsToKeep != None:
header = lines[0].split(delimiter)
columnsToKeepIndices = listFindItems(header, columnsToKeep)
else:
columnsToKeepIndices = None;
return splitStrings(lines, delimiter, columnsToKeepIndices)
def getColumn(table, columnIndex):
column = [];
for row in table:
column.append(row[columnIndex])
return column
def deleteFile(filePath):
if os.path.exists(filePath):
os.remove(filePath)
def writeFile(outputFile, lines):
with open(outputFile,'w') as f:
for line in lines:
f.write("%s\n" % line)
def writeTable(outputFile, table):
lines = tableToList1D(table)
writeFile(outputFile, lines)
def deleteFile(filePath):
if os.path.exists(filePath):
os.remove(filePath)
def deleteAllFilesInDirectory(directory, fileEndswithString, boPromptUser = False):
if boPromptUser:
userInput = raw_input('--> INPUT: Press "y" to delete files in directory ' + directory + ": ")
if not (userInput.lower() == 'y' or userInput.lower() == 'yes'):
print ("User input is %s: exiting now." % userInput)
exit()
for filename in getFilesInDirectory(directory):
if fileEndswithString == None or filename.lower().endswith(fileEndswithString):
deleteFile(os.path.join(directory, filename))
def removeLineEndCharacters(line):
if line.endswith(b'\r\n'):
return line[:-2]
elif line.endswith(b'\n'):
return line[:-1]
else:
return line
def splitString(string, delimiter='\t', columnsToKeepIndices=None):
if string == None:
return None
items = string.decode('utf-8').split(delimiter)
if columnsToKeepIndices != None:
items = getColumns([items], columnsToKeepIndices)
items = items[0]
return items;
def splitStrings(strings, delimiter, columnsToKeepIndices=None):
table = [splitString(string, delimiter, columnsToKeepIndices) for string in strings]
return table;
def find(list1D, func):
return [index for (index,item) in enumerate(list1D) if func(item)]
def tableToList1D(table, delimiter='\t'):
return [delimiter.join([str(s) for s in row]) for row in table]
def sortDictionary(dictionary, sortIndex=0, reverseSort=False):
return sorted(dictionary.items(), key=lambda x: x[sortIndex], reverse=reverseSort)
def imread(imgPath, boThrowErrorIfExifRotationTagSet = True):
if not os.path.exists(imgPath):
print("ERROR: image path does not exist.")
error
rotation = rotationFromExifTag(imgPath)
if boThrowErrorIfExifRotationTagSet and rotation != 0:
print ("Error: exif roation tag set, image needs to be rotated by %d degrees." % rotation)
img = cv2.imread(imgPath)
if img is None:
print ("ERROR: cannot load image " + imgPath)
error
if rotation != 0:
img = imrotate(img, -90).copy() # got this error occassionally without copy "TypeError: Layout of the output array img is incompatible with cv::Mat"
return img
def rotationFromExifTag(imgPath):
TAGSinverted = {v: k for k, v in TAGS.items()}
orientationExifId = TAGSinverted['Orientation']
try:
imageExifTags = Image.open(imgPath)._getexif()
except:
imageExifTags = None
# rotate the image if orientation exif tag is present
rotation = 0
if imageExifTags != None and orientationExifId != None and orientationExifId in imageExifTags:
orientation = imageExifTags[orientationExifId]
# print ("orientation = " + str(imageExifTags[orientationExifId]))
if orientation == 1 or orientation == 0:
rotation = 0 # no need to do anything
elif orientation == 6:
rotation = -90
elif orientation == 8:
rotation = 90
else:
print ("ERROR: orientation = " + str(orientation) + " not_supported!")
error
return rotation
def imwrite(img, imgPath):
cv2.imwrite(imgPath, img)
def imresize(img, scale, interpolation = cv2.INTER_LINEAR):
return cv2.resize(img, (0,0), fx=scale, fy=scale, interpolation=interpolation)
def imresizeMaxDim(img, maxDim, boUpscale = False, interpolation = cv2.INTER_LINEAR):
scale = 1.0 * maxDim / max(img.shape[:2])
if scale < 1 or boUpscale:
img = imresize(img, scale, interpolation)
else:
scale = 1.0
return img, scale
def imWidth(input):
return imWidthHeight(input)[0]
def imHeight(input):
return imWidthHeight(input)[1]
def imWidthHeight(input):
width, height = Image.open(input).size #this does not load the full image
return width,height
def imArrayWidth(input):
return imArrayWidthHeight(input)[0]
def imArrayHeight(input):
return imArrayWidthHeight(input)[1]
def imArrayWidthHeight(input):
width = input.shape[1]
height = input.shape[0]
return width,height
def imshow(img, waitDuration=0, maxDim = None, windowName = 'img'):
if isinstance(img, str): #test if 'img' is a string
img = cv2.imread(img)
if maxDim is not None:
scaleVal = 1.0 * maxDim / max(img.shape[:2])
if scaleVal < 1:
img = imresize(img, scaleVal)
cv2.imshow(windowName, img)
cv2.waitKey(waitDuration)
def drawRectangles(img, rects, color = (0, 255, 0), thickness = 2):
for rect in rects:
pt1 = tuple(ToIntegers(rect[0:2]))
pt2 = tuple(ToIntegers(rect[2:]))
cv2.rectangle(img, pt1, pt2, color, thickness)
def drawCrossbar(img, pt):
(x,y) = pt
cv2.rectangle(img, (0, y), (x, y), (255, 255, 0), 1)
cv2.rectangle(img, (x, 0), (x, y), (255, 255, 0), 1)
cv2.rectangle(img, (img.shape[1],y), (x, y), (255, 255, 0), 1)
cv2.rectangle(img, (x, img.shape[0]), (x, y), (255, 255, 0), 1)
def ptClip(pt, maxWidth, maxHeight):
pt = list(pt)
pt[0] = max(pt[0], 0)
pt[1] = max(pt[1], 0)
pt[0] = min(pt[0], maxWidth)
pt[1] = min(pt[1], maxHeight)
return pt
def drawText(img, pt, text, textWidth=None, color = (255,255,255), colorBackground = None, font = None):
# loading default value in function call so the script won't cause errors in system where
# "arial.ttf" cannot be found
if font == None:
font = ImageFont.truetype("arial.ttf", 16)
pilImg = imconvertCv2Pil(img)
pilImg = pilDrawText(pilImg, pt, text, textWidth, color, colorBackground, font)
return imconvertPil2Cv(pilImg)
def pilDrawText(pilImg, pt, text, textWidth=None, color = (255,255,255), colorBackground = None, font = None):
# loading default value in function call so the script won't cause errors in system where
# "arial.ttf" cannot be found
if font == None:
font = ImageFont.truetype("arial.ttf", 16)
textY = pt[1]
draw = ImageDraw.Draw(pilImg)
if textWidth == None:
lines = [text]
else:
lines = textwrap.wrap(text, width=textWidth)
for line in lines:
width, height = font.getsize(line)
if colorBackground != None:
draw.rectangle((pt[0], pt[1], pt[0] + width, pt[1] + height), fill=tuple(colorBackground[::-1]))
draw.text(pt, line, fill = tuple(color), font = font)
textY += height
return pilImg
def getColorsPalette():
colors = [[255,0,0], [0,255,0], [0,0,255], [255,255,0], [255,0,255]]
for i in range(5):
for dim in range(0,3):
for s in (0.25, 0.5, 0.75):
if colors[i][dim] != 0:
newColor = copy.deepcopy(colors[i])
newColor[dim] = int(round(newColor[dim] * s))
colors.append(newColor)
return colors
def imconvertPil2Cv(pilImg):
rgb = pilImg.convert('RGB')
return np.array(rgb).copy()[:, :, ::-1]
def imconvertCv2Pil(img):
cv2_im = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
return Image.fromarray(cv2_im)
def ToIntegers(list1D):
return [int(float(x)) for x in list1D]
def softmax(vec):
expVec = np.exp(vec)
# TODO: check numerical stability
if max(expVec) == np.inf:
outVec = np.zeros(len(expVec))
outVec[expVec == np.inf] = vec[expVec == np.inf]
outVec = outVec / np.sum(outVec)
else:
outVec = expVec / np.sum(expVec)
return outVec
def softmax2D(w):
e = np.exp(w)
dist = e / np.sum(e, axis=1)[:, np.newaxis]
return dist
def getDictionary(keys, values, boConvertValueToInt = True):
dictionary = {}
for key,value in zip(keys, values):
if (boConvertValueToInt):
value = int(value)
dictionary[key] = value
return dictionary
class Bbox:
MAX_VALID_DIM = 100000
left = top = right = bottom = None
def __init__(self, left, top, right, bottom):
self.left = int(round(float(left)))
self.top = int(round(float(top)))
self.right = int(round(float(right)))
self.bottom = int(round(float(bottom)))
self.standardize()
def __str__(self):
return ("Bbox object: left = {0}, top = {1}, right = {2}, bottom = {3}".format(self.left, self.top, self.right, self.bottom))
def __repr__(self):
return str(self)
def rect(self):
return [self.left, self.top, self.right, self.bottom]
def max(self):
return max([self.left, self.top, self.right, self.bottom])
def min(self):
return min([self.left, self.top, self.right, self.bottom])
def width(self):
width = self.right - self.left + 1
assert(width>=0)
return width
def height(self):
height = self.bottom - self.top + 1
assert(height>=0)
return height
def surfaceArea(self):
return self.width() * self.height()
def getOverlapBbox(self, bbox):
left1, top1, right1, bottom1 = self.rect()
left2, top2, right2, bottom2 = bbox.rect()
overlapLeft = max(left1, left2)
overlapTop = max(top1, top2)
overlapRight = min(right1, right2)
overlapBottom = min(bottom1, bottom2)
if (overlapLeft>overlapRight) or (overlapTop>overlapBottom):
return None
else:
return Bbox(overlapLeft, overlapTop, overlapRight, overlapBottom)
def standardize(self): #NOTE: every setter method should call standardize
leftNew = min(self.left, self.right)
topNew = min(self.top, self.bottom)
rightNew = max(self.left, self.right)
bottomNew = max(self.top, self.bottom)
self.left = leftNew
self.top = topNew
self.right = rightNew
self.bottom = bottomNew
def crop(self, maxWidth, maxHeight):
leftNew = min(max(self.left, 0), maxWidth)
topNew = min(max(self.top, 0), maxHeight)
rightNew = min(max(self.right, 0), maxWidth)
bottomNew = min(max(self.bottom, 0), maxHeight)
return Bbox(leftNew, topNew, rightNew, bottomNew)
def isValid(self):
if self.left>=self.right or self.top>=self.bottom:
return False
if min(self.rect()) < -self.MAX_VALID_DIM or max(self.rect()) > self.MAX_VALID_DIM:
return False
return True
def getEnclosingBbox(pts):
left = top = float('inf')
right = bottom = float('-inf')
for pt in pts:
left = min(left, pt[0])
top = min(top, pt[1])
right = max(right, pt[0])
bottom = max(bottom, pt[1])
return Bbox(left, top, right, bottom)
def bboxComputeOverlapVoc(bbox1, bbox2):
surfaceRect1 = bbox1.surfaceArea()
surfaceRect2 = bbox2.surfaceArea()
overlapBbox = bbox1.getOverlapBbox(bbox2)
if overlapBbox == None:
return 0
else:
surfaceOverlap = overlapBbox.surfaceArea()
overlap = max(0, 1.0 * surfaceOverlap / (surfaceRect1 + surfaceRect2 - surfaceOverlap))
assert (overlap >= 0 and overlap <= 1)
return overlap
def computeAveragePrecision(recalls, precisions, use_07_metric=False):
""" ap = voc_ap(recalls, precisions, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(recalls >= t) == 0:
p = 0
else:
p = np.max(precisions[recalls >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrecalls = np.concatenate(([0.], recalls, [1.]))
mprecisions = np.concatenate(([0.], precisions, [0.]))
# compute the precision envelope
for i in range(mprecisions.size - 1, 0, -1):
mprecisions[i - 1] = np.maximum(mprecisions[i - 1], mprecisions[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrecalls[1:] != mrecalls[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrecalls[i + 1] - mrecalls[i]) * mprecisions[i + 1])
return ap
|
[
"alikaz.zaidi@gmail.com"
] |
alikaz.zaidi@gmail.com
|
7531cac66c00aa9fa817aaa518bfbbedc102318d
|
f21a735d8e3e0ab7bcde2a88caf7658e1008f9c0
|
/simple_auto_encoder_keras/simple_auto_encoder_keras.py
|
633ba837694466e5120e034fd27f699b04609fba
|
[] |
no_license
|
alizarghami/simple_dl_examples
|
cdaa1c48ddfa74e9f8b246a77e5242d74baa4b12
|
64ffb75c19d4ae45eed589719339f13f8c96542b
|
refs/heads/master
| 2023-02-15T19:12:55.258072
| 2021-01-07T09:41:37
| 2021-01-07T09:41:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,555
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 5 22:19:50 2020
@author: ali
"""
from keras.datasets import fashion_mnist
from keras.models import Sequential
from keras.layers.core import Dense
from keras.optimizers import Adam
import matplotlib.pyplot as plt
BATCH_SIZE = 1024
EPOCHS = 20
(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
Z_train = X_train.reshape(-1, 784)
Z_test = X_test.reshape(-1, 784)
simple_auto_encoder = Sequential()
simple_auto_encoder.add(Dense(512, activation='elu', input_shape=(784,)))
simple_auto_encoder.add(Dense(128, activation='elu'))
simple_auto_encoder.add(Dense(10, activation='linear', name='bottleneck'))
simple_auto_encoder.add(Dense(128, activation='elu'))
simple_auto_encoder.add(Dense(512, activation='elu'))
simple_auto_encoder.add(Dense(784))
simple_auto_encoder.compile(Adam(), loss='mean_squared_error')
image = Z_test[0].reshape(28, 28)
res = simple_auto_encoder.predict(Z_test[0].reshape(-1, 784))
res = res.reshape(28, 28)
fig1 = plt.figure('Before training')
ax1 = fig1.add_subplot(1,2,1)
ax1.imshow(image)
ax2 = fig1.add_subplot(1,2,2)
ax2.imshow(res)
trained_model = simple_auto_encoder.fit(Z_train, Z_train, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_data=(Z_test, Z_test))
res = simple_auto_encoder.predict(Z_test[0].reshape(-1, 784))
res = res.reshape(28, 28)
fig2 = plt.figure('After training')
ax1 = fig2.add_subplot(1,2,1)
ax1.imshow(image)
ax2 = fig2.add_subplot(1,2,2)
ax2.imshow(res)
simple_auto_encoder.save('models/model.h5')
|
[
"alizarghami@gmail.com"
] |
alizarghami@gmail.com
|
f342ac5aa82cd4281c3d4189cda3b8f347147b4f
|
1a5971b5e2960322b4f9d318b08e9fefa908a776
|
/Height.py
|
d7dc80e06611ef6b9f7f1bea4e593b1f33cd840b
|
[] |
no_license
|
mikesmithlab/Ball
|
1253baf1f5c8a363683f86e741ffacbce3e5c774
|
33cc74aba6e4ff21bc1c6ec51a50cd390531f99b
|
refs/heads/master
| 2020-04-04T11:02:28.506512
| 2019-04-15T13:49:42
| 2019-04-15T13:49:42
| 155,876,736
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,712
|
py
|
from scipy import signal
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from tkinter import filedialog
import os
from scipy.signal import argrelextrema
from scipy import optimize
def createHistogram(ball,filename,binnum=100):
plt.figure()
file = filename[:-5]+'.txt'
ball['Height'].hist(bins=binnum)
surfmax = [ball['surface'].max(),ball['surface'].max()]
count,binedges = np.histogram(ball['Height'],bins=binnum)
countmax = [0,count.max()]
bincentres = (binedges[:-1] + binedges[1:])/2
plt.plot(surfmax,countmax,'r--')
np.savetxt('myfile.txt', np.c_[bincentres,count])
plt.savefig(file[:-4] + '.png')
def surfaceScale(ball,FrameRate=500,accelerationmm = 33000):
#Get values in pixels of surface motion
minimumSurfVal,meanSurfVal,MaximumSurfVal=plotFitSurface(ball[ball['frame']<500])
#Need to use the actual amplitude to scale the surface motion.
Amplitude = accelerationmm/(2*np.pi*50)**2
print('Amplitude pixels')
print(abs(minimumSurfVal - MaximumSurfVal)/2)
ball['surface'] = -(ball['surface']-meanSurfVal)
scale_surface = Amplitude/abs(MaximumSurfVal-meanSurfVal)
ball['surface']=ball['surface']*scale_surface
return ball
def ballScale(ball,FrameRate=500,RadBallInMM=5):
#Define new yball in terms of height above the mean surface position, which is the optical axis
ball['Height']=-(ball['yball'] - ball['surface'].mean())-ball['radball']
ball['ballscale']=RadBallInMM/ball['radball']
ball['Height']=ball['Height']*ball['ballscale']
print(ball['Height'].max())
print(ball['Height'].mean())
print(ball['Height'].min())
return ball
def sin_f(x, A,B, C, D): # this is your 'straight line' y=f(x)
return A*np.sin(B*x + C) + D
def plotFitSurface(ball):
drivingF = 50
camFPS = 500
dataLength = np.shape(ball.index.unique())[0]
omega = 2*np.pi*(drivingF)/camFPS
#frames = ball.groupby(by='frame').mean().index.values
surfacedata = (ball.groupby(by='frame').mean()['surface'])#-ball.groupby(by='frame').mean()['surface'].mean())
frames = surfacedata.index
params,SD = optimize.curve_fit(sin_f,frames,surfacedata,bounds=([-np.inf,omega*0.999,-np.inf,0],[np.inf,omega*1.001,np.inf,1000]))
frame_fine = np.arange(0,dataLength,0.01)
surface = sin_f(frame_fine,params[0],params[1],params[2],params[3])
if False:
plt.figure()
plt.plot(frames,surfacedata,'bx')
plt.plot(frame_fine,surface,'r-')
plt.show()
minimumSurfVal = np.min(surface)
maximumSurfVal = np.max(surface)
meanSurfVal = np.mean(surface)
return (minimumSurfVal,meanSurfVal,maximumSurfVal)
def plotVar(ball,value,file='',maxVal=10000,show=False,save=True,):
plt.figure()
frames = ball.groupby('frame').mean().index
Variable = ball.groupby('frame').mean()[value]
plt.plot(frames[frames < maxVal],Variable[frames < maxVal],'rx')
plt.plot(frames[frames < maxVal],Variable[frames < maxVal],'b-')
if save:
plt.savefig(file + value +'.png')
if show:
plt.show()
if __name__ == "__main__":
filename = filedialog.askopenfilename(initialdir='/media/ppzmis/data/BouncingBall_Data/newMovies/Processed Data/',title='Select Data File', filetypes = (('DataFrames', '*.hdf5'),))
print(filename)
ball = pd.read_hdf(filename)
ball = ballScale(ball,FrameRate=500,RadBallInMM=5)
ball = surfaceScale(ball)
plotVar(ball,'Height',file=filename,show=False)
plotVar(ball,'radball',file=filename,show=False)
createHistogram(ball,filename)
|
[
"mike.i.smith@nottingham.ac.uk"
] |
mike.i.smith@nottingham.ac.uk
|
b9b9950099375ec0726ae8664dd690b39569313e
|
027ed4a5f07e2c74e3b709609cb782f5afc29558
|
/src/tp1/ga_rainhas.py
|
38f95811c3da9e85867419b7b7ec61c5391902a7
|
[] |
no_license
|
vicrsp/ce-ppgee
|
1b0bebd6310150596137b01067b6443bf96a9851
|
360d26ce2a006629afa62be3a700ddc5eac08e84
|
refs/heads/master
| 2021-03-05T23:57:49.177900
| 2020-11-16T21:26:12
| 2020-11-16T21:26:12
| 246,164,688
| 0
| 0
| null | 2020-11-16T21:07:52
| 2020-03-09T23:37:10
|
Python
|
UTF-8
|
Python
| false
| false
| 8,131
|
py
|
from math import factorial
import numpy as np
import itertools
import random
class GAPermutation:
def __init__(self, fitness_func, pop_size=100, num_generations=300, max_int=8, crossover_probability=0.6, mutation_probability=0.05, use_inversion_mutation=False):
self.population_size = pop_size
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
self.num_generations = num_generations
self.fitness_func = fitness_func
self.max_int = max_int
self.use_inversion_mutation = use_inversion_mutation
self.fitness_eval = 0
self.scale_factor = self.max_int*(self.max_int-1)/2
self.best_objective = np.Infinity
self.best_solution = []
self.best_fitness = 0
self.converged = False
def initialize_population(self):
"""
Initializes the population
"""
self.pop_size = (self.population_size, self.max_int)
# self.population = np.random.randint(
# low=0, high=self.max_int, size=self.pop_size)
self.population = np.zeros(self.pop_size)
for i in range(self.population_size):
array = np.arange(self.max_int)
np.random.shuffle(array)
self.population[i, :] = array
self.initial_population = np.copy(self.population)
def cal_pop_fitness(self, population):
"""
Calculating the fitness values of all solutions in the current population.
"""
pop_fitness = []
# Calculating the fitness value of each solution in the current population.
for sol in population:
fitness = self.scale(self.fitness_func(sol))
pop_fitness.append(fitness)
pop_fitness = np.array(pop_fitness)
self.fitness_eval = self.fitness_eval + pop_fitness.shape[0]
self.population_fitness = pop_fitness
return pop_fitness
def run(self):
self.initialize_population()
for generation in range(self.num_generations):
# Measuring the fitness of each chromosome in the population.
fitness = self.cal_pop_fitness(self.population)
best_fitness_index = np.argmax(fitness)
if(fitness[best_fitness_index] > self.best_fitness):
self.best_fitness = fitness[best_fitness_index]
self.best_solution = self.population[best_fitness_index, :]
if self.descale(self.best_fitness) == 0:
self.converged = True
break
# Selecting the best parents in the population for mating.
parents = self.selection(fitness, int(self.population_size/2))
# Crossover
offspring_crossover = self.crossover(parents)
# Mutation
offspring_mutated = self.mutation(offspring_crossover)
# Survivor selection
offspring_survived = self.survivor_selection(
fitness, offspring_mutated)
# Update population
self.population = offspring_survived
self.best_objective = self.descale(self.best_fitness)
def crossover(self, parents):
prob = np.random.rand()
offspring = parents
if prob < self.get_crossover_probability():
offspring = self.ordered_crossover(parents)
return offspring
def stochastic_universal_sampling_selection(self, fitness, num_parents):
"""
Selects the parents using SUS selection technique.
"""
sorted_parents = self.population[np.flip(np.argsort(fitness))]
sorted_fitness = fitness[np.flip(np.argsort(fitness))]
fitness_sum = np.sum(fitness)
distance = fitness_sum / float(num_parents)
start = random.uniform(0, distance)
points = [start + i*distance for i in range(num_parents)]
parents = np.empty((num_parents, self.max_int))
parents_fitness = np.empty(num_parents)
parent_num = 0
for p in points:
idx = 0
r = sorted_fitness[idx]
while r < p:
idx = idx + 1
r = r + sorted_fitness[idx]
parents[parent_num, :] = sorted_parents[idx, :]
parents_fitness[parent_num] = sorted_fitness[idx]
parent_num = parent_num + 1
return parents, parents_fitness
def selection(self, fitness, num_tournament):
parents = np.zeros((2, self.max_int))
parent_selection, parent_fitness = self.stochastic_universal_sampling_selection(
fitness, 2)
sort_indexes = np.argsort(parent_fitness)
best = parent_selection[sort_indexes[-1], :]
second_best = parent_selection[sort_indexes[-2], :]
parents[0, :] = best
parents[1, :] = second_best
return parents
def survivor_selection(self, fitness, offspring):
offspring_fitness = self.cal_pop_fitness(offspring)
pop_fitness = np.hstack((fitness, offspring_fitness))
merged_pop = np.vstack((self.population, offspring))
sort_indexes = np.argsort(pop_fitness)
sorted_pop = merged_pop[sort_indexes]
return sorted_pop[2:, :]
def inversion_mutation(self, offsprings):
m, n = offsprings.shape
mutated = np.zeros(offsprings.shape)
for i in range(m):
prob = np.random.rand()
mutated[i, :] = offsprings[i, :]
if prob < self.get_mutation_probability():
pos_1, pos_2 = np.sort(
np.random.randint(low=0, high=n, size=2))
flipped_array = np.flip(offsprings[i, pos_1:pos_2])
mutated[i, pos_1:pos_2] = flipped_array
return mutated
def mutation(self, offsprings):
if(self.use_inversion_mutation):
return self.inversion_mutation(offsprings)
else:
return self.swap_mutation(offsprings)
def swap_mutation(self, offsprings):
m, n = offsprings.shape
mutated = np.zeros(offsprings.shape)
for i in range(m):
prob = np.random.rand()
mutated[i, :] = offsprings[i, :]
if prob < self.get_mutation_probability():
pos_1, pos_2 = np.random.randint(low=0, high=n, size=2)
first_num = offsprings[i, pos_1]
second_num = offsprings[i, pos_2]
mutated[i, pos_1] = second_num
mutated[i, pos_2] = first_num
return mutated
def ordered_crossover(self, parents):
"""
Executes an ordered crossover (OX) on the input individuals.
"""
parent1, parent2 = parents[0, :], parents[1, :]
size = len(parent1)
a, b = random.sample(range(size), 2)
if a > b:
a, b = b, a
holes1, holes2 = [True] * size, [True] * size
for i in range(size):
if i < a or i > b:
holes1[int(parent2[i])] = False
holes2[int(parent1[i])] = False
# We must keep the original values somewhere before scrambling everything
temp1, temp2 = parent1, parent2
k1, k2 = b + 1, b + 1
for i in range(size):
if not holes1[int(temp1[(i + b + 1) % size])]:
parent1[int(k1 % size)] = temp1[int((i + b + 1) % size)]
k1 += 1
if not holes2[int(temp2[(i + b + 1) % size])]:
parent2[int(k2 % size)] = temp2[int((i + b + 1) % size)]
k2 += 1
# Swap the content between a and b (included)
for i in range(a, b + 1):
parent1[i], parent2[i] = parent2[i], parent1[i]
return np.array([parent1, parent2])
def scale(self, fx):
"""
Scales the objective with Cmax scaling
"""
return self.scale_factor - fx
def descale(self, fitness):
return self.scale_factor - fitness
def get_mutation_probability(self):
return self.mutation_probability
def get_crossover_probability(self):
return self.crossover_probability
|
[
"victorspruela@gmail.com"
] |
victorspruela@gmail.com
|
eef6bd0e000db126892cd5cf1efaecae9b0e373f
|
fadcdc099a17a031190db8a073ceb9b8ff3c52a3
|
/2017202085/src/srcipts/KGQA/neo_db/query_create_txt.py
|
d47e26de3052d2a6c38d23d93ce94aed54d78d61
|
[] |
no_license
|
info-ruc/nlp20projects
|
751457d63e36baafc91aec2d8a6a9ed2c2634eef
|
0684e6204378761b471415743cbd63efe906deb0
|
refs/heads/master
| 2023-02-08T05:25:27.467295
| 2020-12-30T00:24:16
| 2020-12-30T00:24:16
| 291,974,135
| 0
| 10
| null | 2020-12-30T00:24:18
| 2020-09-01T11:03:42
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 556
|
py
|
from neo_db.config import graph, CA_LIST, similar_words
import codecs
import os
import json
import base64
def concept():
data = list(graph.run(
"MATCH (n:CONCEPT) RETURN n LIMIT 10000"))
f = open('../raw_data/concept.txt', 'w', encoding='utf-8')
for d in data:
f.write(d['n']['conceptName']+'\n')
def author():
data = list(graph.run(
"MATCH (n:AUTHOR) RETURN n LIMIT 10000"))
f = open('../raw_data/author.txt', 'w', encoding='utf-8')
for d in data:
f.write(d['n']['authorName']+'\n')
|
[
"857316974@qq.com"
] |
857316974@qq.com
|
9957e818b639a1c4714fa373df9e059a62e4b8d1
|
40777537b6c47ffa32b565484325dc4fb0d42e83
|
/examples/separate_api_route_example.py
|
9a7e8b1b7e8116671eaf6dbcadeca22ac5455def
|
[
"MIT"
] |
permissive
|
muhammedfurkan/aiogram
|
24e8e9c55ea01c6a2f08abd1122956640db92abd
|
692c1340b4dda556da640e5f9ea2200848c06840
|
refs/heads/dev-2.x
| 2021-12-03T21:33:40.494047
| 2021-10-28T21:03:46
| 2021-10-28T21:03:46
| 200,087,088
| 0
| 0
|
MIT
| 2020-11-04T15:52:35
| 2019-08-01T16:46:50
|
Python
|
UTF-8
|
Python
| false
| false
| 870
|
py
|
# NOTE: This is an example of an integration between
# externally created Application object and the aiogram's dispatcher
# This can be used for a custom route, for instance
from aiohttp import web
from aiogram import Bot, Dispatcher, types
from aiogram.dispatcher.webhook import configure_app
bot = Bot(token=config.bot_token)
dp = Dispatcher(bot)
@dp.message_handler(commands=["start"])
async def cmd_start(message: types.Message):
await message.reply("start!")
# handle /api route
async def api_handler(request):
return web.json_response({"status": "OK"}, status=200)
app = web.Application()
# add a custom route
app.add_routes([web.post("/api", api_handler)])
# every request to /bot route will be retransmitted to dispatcher to be handled
# as a bot update
configure_app(dp, app, "/bot")
if __name__ == "__main__":
web.run_app(app, port=9000)
|
[
"noreply@github.com"
] |
muhammedfurkan.noreply@github.com
|
8ad510177a0ecc90f3b4308ee64dee9040f7d329
|
2f0603a1a61baaf588c25b1960d18500e7060933
|
/Theano_code/dbn.py
|
334cbbb7c64aa6108b59613270881771eaf03c29
|
[] |
no_license
|
mukami12/ReduceFA_2015
|
20a3b9dd4faf862465e5aec980a55734befa9a22
|
a6fe51a8d315c8771911389b1f744878f556ab65
|
refs/heads/master
| 2020-03-31T14:54:15.798399
| 2016-05-22T09:16:20
| 2016-05-22T09:16:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,586
|
py
|
import os
import sys
import time
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from logistic_sgd import LogisticRegression
from data_process import loadFeaturedData
from data_process import load10secData
from mlp import HiddenLayer
from rbm import RBM
# start-snippet-1
class DBN(object):
"""Deep Belief Network
A deep belief network is obtained by stacking several RBMs on top of each
other. The hidden layer of the RBM at layer `i` becomes the input of the
RBM at layer `i+1`. The first layer RBM gets as input the input of the
network, and the hidden layer of the last RBM represents the output. When
used for classification, the DBN is treated as a MLP, by adding a logistic
regression layer on top.
"""
def __init__(self, numpy_rng, theano_rng=None, n_ins=28 * 28,
hidden_layers_sizes=[10, 10], n_outs=10):
"""This class is made to support a variable number of layers.
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: numpy random number generator used to draw initial
weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng`
:type n_ins: int
:param n_ins: dimension of the input to the DBN
:type hidden_layers_sizes: list of ints
:param hidden_layers_sizes: intermediate layers size, must contain
at least one value
:type n_outs: int
:param n_outs: dimension of the output of the network
"""
self.sigmoid_layers = []
self.rbm_layers = []
self.params = []
self.n_layers = len(hidden_layers_sizes)
assert self.n_layers > 0
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# allocate symbolic variables for the data
self.x = T.matrix('x') # the data is presented as rasterized images
self.y = T.ivector('y') # the labels are presented as 1D vector
# of [int] labels
# end-snippet-1
# The DBN is an MLP, for which all weights of intermediate
# layers are shared with a different RBM. We will first
# construct the DBN as a deep multilayer perceptron, and when
# constructing each sigmoidal layer we also construct an RBM
# that shares weights with that layer. During pretraining we
# will train these RBMs (which will lead to chainging the
# weights of the MLP as well) During finetuning we will finish
# training the DBN by doing stochastic gradient descent on the
# MLP.
for i in xrange(self.n_layers):
# construct the sigmoidal layer
# the size of the input is either the number of hidden
# units of the layer below or the input size if we are on
# the first layer
if i == 0:
input_size = n_ins
else:
input_size = hidden_layers_sizes[i - 1]
# the input to this layer is either the activation of the
# hidden layer below or the input of the DBN if you are on
# the first layer
if i == 0:
layer_input = self.x
else:
layer_input = self.sigmoid_layers[-1].output
sigmoid_layer = HiddenLayer(rng=numpy_rng,
input=layer_input,
n_in=input_size,
n_out=hidden_layers_sizes[i],
activation=T.nnet.sigmoid)
# add the layer to our list of layers
self.sigmoid_layers.append(sigmoid_layer)
# its arguably a philosophical question... but we are
# going to only declare that the parameters of the
# sigmoid_layers are parameters of the DBN. The visible
# biases in the RBM are parameters of those RBMs, but not
# of the DBN.
self.params.extend(sigmoid_layer.params)
# Construct an RBM that shared weights with this layer
rbm_layer = RBM(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=hidden_layers_sizes[i],
W=sigmoid_layer.W,
hbias=sigmoid_layer.b)
self.rbm_layers.append(rbm_layer)
# We now need to add a logistic layer on top of the MLP
self.logLayer = LogisticRegression(
input=self.sigmoid_layers[-1].output,
n_in=hidden_layers_sizes[-1],
n_out=n_outs)
self.params.extend(self.logLayer.params)
# compute the cost for second phase of training, defined as the
# negative log likelihood of the logistic regression (output) layer
self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)
# compute the gradients with respect to the model parameters
# symbolic variable that points to the number of errors made on the
# minibatch given by self.x and self.y
self.errors = self.logLayer.errors(self.y)
# Compute Confusion_matrix by heehwan
self.confusion_matrix = self.logLayer.confusion_matrix(self.y)
def pretraining_functions(self, train_set_x, batch_size, k):
'''Generates a list of functions, for performing one step of
gradient descent at a given layer. The function will require
as input the minibatch index, and to train an RBM you just
need to iterate, calling the corresponding function on all
minibatch indexes.
:type train_set_x: theano.tensor.TensorType
:param train_set_x: Shared var. that contains all datapoints used
for training the RBM
:type batch_size: int
:param batch_size: size of a [mini]batch
:param k: number of Gibbs steps to do in CD-k / PCD-k
'''
# index to a [mini]batch
index = T.lscalar('index') # index to a minibatch
learning_rate = T.scalar('lr') # learning rate to use
# number of batches
n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
# begining of a batch, given `index`
batch_begin = index * batch_size
# ending of a batch given `index`
batch_end = batch_begin + batch_size
pretrain_fns = []
for rbm in self.rbm_layers:
# get the cost and the updates list
# using CD-k here (persisent=None) for training each RBM.
# TODO: change cost function to reconstruction error
cost, updates = rbm.get_cost_updates(learning_rate,
persistent=None, k=k)
# compile the theano function
fn = theano.function(
inputs=[index, theano.Param(learning_rate, default=0.1)],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[batch_begin:batch_end]
}
)
# append `fn` to the list of functions
pretrain_fns.append(fn)
return pretrain_fns
def build_finetune_functions(self, datasets, batch_size):
'''Generates a function `train` that implements one step of
finetuning, a function `validate` that computes the error on a
batch from the validation set, and a function `main` that
computes the error on a batch from the testing set
:type datasets: list of pairs of theano.tensor.TensorType
:param datasets: It is a list that contain all the datasets;
the has to contain three pairs, `train`,
`valid`, `main` in this order, where each pair
is formed of two Theano variables, one for the
datapoints, the other for the labels
:type batch_size: int
:param batch_size: size of a minibatch
:type learning_rate: float
:param learning_rate: learning rate used during finetune stage
'''
(train_set_x, train_set_y) = datasets[0]
(test_set_x, test_set_y) = datasets[1]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
index = T.lscalar('index') # index to a [mini]batch
# compute the gradients with respect to the model parameters
gparams = T.grad(self.finetune_cost, self.params)
# Dynamic learning rate by heehwan
l_r = T.scalar('l_r', dtype=theano.config.floatX)
# compute list of fine-tuning updates
updates = []
for param, gparam in zip(self.params, gparams):
updates.append((param, param - gparam * l_r))
train_fn = theano.function(
inputs=[index, l_r],
outputs=self.finetune_cost,
updates=updates,
givens={
self.x: train_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: train_set_y[
index * batch_size: (index + 1) * batch_size
]
}
)
train_cost_i = theano.function(
[index],
self.finetune_cost,
givens={
self.x: train_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: train_set_y[
index * batch_size: (index + 1) * batch_size
]
}
)
test_score_i = theano.function(
[index],
self.errors,
givens={
self.x: test_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: test_set_y[
index * batch_size: (index + 1) * batch_size
]
}
)
test_cost_i = theano.function(
[index],
self.finetune_cost,
givens={
self.x: test_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: test_set_y[
index * batch_size: (index + 1) * batch_size
]
}
)
test_confmatrix_i = theano.function(
[index],
self.confusion_matrix,
givens={
self.x: test_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: test_set_y[
index * batch_size: (index + 1) * batch_size
]
}
)
# Create a function that scans the entire main set
def train_cost():
return [train_cost_i(i) for i in xrange(n_train_batches)]
def test_score():
return [test_score_i(i) for i in xrange(n_test_batches)]
def test_cost():
return [train_cost_i(i) for i in xrange(n_test_batches)]
def test_confmatrix():
return [test_confmatrix_i(i) for i in xrange(n_test_batches)]
# return train_fn, valid_score, test_score
return train_fn, train_cost, test_score, test_cost, test_confmatrix
############################################################################
#
# # start-snippet-1
# class DBN(object):
# """Deep Belief Network
#
# A deep belief network is obtained by stacking several RBMs on top of each
# other. The hidden layer of the RBM at layer `i` becomes the input of the
# RBM at layer `i+1`. The first layer RBM gets as input the input of the
# network, and the hidden layer of the last RBM represents the output. When
# used for classification, the DBN is treated as a MLP, by adding a logistic
# regression layer on top.
# """
#
# def __init__(self, numpy_rng, theano_rng=None, n_ins=28 * 28,
# hidden_layers_sizes=[10, 10], n_outs=10):
# """This class is made to support a variable number of layers.
#
# :type numpy_rng: numpy.random.RandomState
# :param numpy_rng: numpy random number generator used to draw initial
# weights
#
# :type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
# :param theano_rng: Theano random generator; if None is given one is
# generated based on a seed drawn from `rng`
#
# :type n_ins: int
# :param n_ins: dimension of the input to the DBN
#
# :type hidden_layers_sizes: list of ints
# :param hidden_layers_sizes: intermediate layers size, must contain
# at least one value
#
# :type n_outs: int
# :param n_outs: dimension of the output of the network
# """
#
# self.sigmoid_layers = []
# self.rbm_layers = []
# self.params = []
# self.n_layers = len(hidden_layers_sizes)
#
# assert self.n_layers > 0
#
# if not theano_rng:
# theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
#
# # allocate symbolic variables for the data
# self.x = T.matrix('x') # the data is presented as rasterized images
# self.y = T.ivector('y') # the labels are presented as 1D vector
# # of [int] labels
# # end-snippet-1
# # The DBN is an MLP, for which all weights of intermediate
# # layers are shared with a different RBM. We will first
# # construct the DBN as a deep multilayer perceptron, and when
# # constructing each sigmoidal layer we also construct an RBM
# # that shares weights with that layer. During pretraining we
# # will train these RBMs (which will lead to chainging the
# # weights of the MLP as well) During finetuning we will finish
# # training the DBN by doing stochastic gradient descent on the
# # MLP.
#
# for i in xrange(self.n_layers):
# # construct the sigmoidal layer
#
# # the size of the input is either the number of hidden
# # units of the layer below or the input size if we are on
# # the first layer
# if i == 0:
# input_size = n_ins
# else:
# input_size = hidden_layers_sizes[i - 1]
#
# # the input to this layer is either the activation of the
# # hidden layer below or the input of the DBN if you are on
# # the first layer
# if i == 0:
# layer_input = self.x
# else:
# layer_input = self.sigmoid_layers[-1].output
#
# sigmoid_layer = HiddenLayer(rng=numpy_rng,
# input=layer_input,
# n_in=input_size,
# n_out=hidden_layers_sizes[i],
# activation=T.nnet.sigmoid)
#
# # add the layer to our list of layers
# self.sigmoid_layers.append(sigmoid_layer)
#
# # its arguably a philosophical question... but we are
# # going to only declare that the parameters of the
# # sigmoid_layers are parameters of the DBN. The visible
# # biases in the RBM are parameters of those RBMs, but not
# # of the DBN.
# self.params.extend(sigmoid_layer.params)
#
# # Construct an RBM that shared weights with this layer
# rbm_layer = RBM(numpy_rng=numpy_rng,
# theano_rng=theano_rng,
# input=layer_input,
# n_visible=input_size,
# n_hidden=hidden_layers_sizes[i],
# W=sigmoid_layer.W,
# hbias=sigmoid_layer.b)
# self.rbm_layers.append(rbm_layer)
#
# # We now need to add a logistic layer on top of the MLP
# self.logLayer = LogisticRegression(
# input=self.sigmoid_layers[-1].output,
# n_in=hidden_layers_sizes[-1],
# n_out=n_outs)
# self.params.extend(self.logLayer.params)
#
# # compute the cost for second phase of training, defined as the
# # negative log likelihood of the logistic regression (output) layer
# self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)
#
# # compute the gradients with respect to the model parameters
# # symbolic variable that points to the number of errors made on the
# # minibatch given by self.x and self.y
# self.errors = self.logLayer.errors(self.y)
#
# # Compute Confusion_matrix by heehwan
# self.confusion_matrix = self.logLayer.confusion_matrix(self.y)
#
# def pretraining_functions(self, train_set_x, batch_size, k):
# '''Generates a list of functions, for performing one step of
# gradient descent at a given layer. The function will require
# as input the minibatch index, and to train an RBM you just
# need to iterate, calling the corresponding function on all
# minibatch indexes.
#
# :type train_set_x: theano.tensor.TensorType
# :param train_set_x: Shared var. that contains all datapoints used
# for training the RBM
# :type batch_size: int
# :param batch_size: size of a [mini]batch
# :param k: number of Gibbs steps to do in CD-k / PCD-k
#
# '''
#
# # index to a [mini]batch
# index = T.lscalar('index') # index to a minibatch
# learning_rate = T.scalar('lr') # learning rate to use
#
# # number of batches
# n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
# # begining of a batch, given `index`
# batch_begin = index * batch_size
# # ending of a batch given `index`
# batch_end = batch_begin + batch_size
#
# pretrain_fns = []
# for rbm in self.rbm_layers:
#
# # get the cost and the updates list
# # using CD-k here (persisent=None) for training each RBM.
# # TODO: change cost function to reconstruction error
# cost, updates = rbm.get_cost_updates(learning_rate,
# persistent=None, k=k)
#
# # compile the theano function
# fn = theano.function(
# inputs=[index, theano.Param(learning_rate, default=0.1)],
# outputs=cost,
# updates=updates,
# givens={
# self.x: train_set_x[batch_begin:batch_end]
# }
# )
# # append `fn` to the list of functions
# pretrain_fns.append(fn)
#
# return pretrain_fns
#
# def build_finetune_functions(self, datasets, batch_size, learning_rate):
# '''Generates a function `train` that implements one step of
# finetuning, a function `validate` that computes the error on a
# batch from the validation set, and a function `main` that
# computes the error on a batch from the testing set
#
# :type datasets: list of pairs of theano.tensor.TensorType
# :param datasets: It is a list that contain all the datasets;
# the has to contain three pairs, `train`,
# `valid`, `main` in this order, where each pair
# is formed of two Theano variables, one for the
# datapoints, the other for the labels
# :type batch_size: int
# :param batch_size: size of a minibatch
# :type learning_rate: float
# :param learning_rate: learning rate used during finetune stage
#
# '''
#
# (train_set_x, train_set_y) = datasets[0]
# (valid_set_x, valid_set_y) = datasets[1]
# (test_set_x, test_set_y) = datasets[2]
#
# # compute number of minibatches for training, validation and testing
# n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
# n_valid_batches /= batch_size
# n_test_batches = test_set_x.get_value(borrow=True).shape[0]
# n_test_batches /= batch_size
#
# index = T.lscalar('index') # index to a [mini]batch
#
# # compute the gradients with respect to the model parameters
# gparams = T.grad(self.finetune_cost, self.params)
#
# # compute list of fine-tuning updates
# updates = []
# for param, gparam in zip(self.params, gparams):
# updates.append((param, param - gparam * learning_rate))
#
# train_fn = theano.function(
# inputs=[index],
# outputs=self.finetune_cost,
# updates=updates,
# givens={
# self.x: train_set_x[
# index * batch_size: (index + 1) * batch_size
# ],
# self.y: train_set_y[
# index * batch_size: (index + 1) * batch_size
# ]
# }
# )
#
# test_score_i = theano.function(
# [index],
# self.errors,
# givens={
# self.x: test_set_x[
# index * batch_size: (index + 1) * batch_size
# ],
# self.y: test_set_y[
# index * batch_size: (index + 1) * batch_size
# ]
# }
# )
#
# valid_score_i = theano.function(
# [index],
# self.errors,
# givens={
# self.x: valid_set_x[
# index * batch_size: (index + 1) * batch_size
# ],
# self.y: valid_set_y[
# index * batch_size: (index + 1) * batch_size
# ]
# }
# )
#
# test_confmatrix_i = theano.function(
# [index],
# self.confusion_matrix,
# givens={
# self.x: test_set_x[
# index * batch_size: (index + 1) * batch_size
# ],
# self.y: test_set_y[
# index * batch_size: (index + 1) * batch_size
# ]
# }
# )
# # Create a function that scans the entire validation set
# def valid_score():
# return [valid_score_i(i) for i in xrange(n_valid_batches)]
#
# # Create a function that scans the entire main set
# def test_score():
# return [test_score_i(i) for i in xrange(n_test_batches)]
#
# def test_confmatrix():
# return [test_confmatrix_i(i) for i in xrange(n_test_batches)]
#
# # return train_fn, valid_score, test_score
# return train_fn, valid_score, test_score, test_confmatrix
|
[
"heehwan.park@gmail.com"
] |
heehwan.park@gmail.com
|
21f9d1804e8d0e7fa36cfa13c8694012e9e3e993
|
7280a5bff73d67b16f0fce871dec606edd0c7347
|
/test.py
|
72f01014d94246fe0444d69e30d72f664a4d9e22
|
[] |
no_license
|
nj-sseo/Korean-language-classifier
|
59032520514754a2beb335ffd02d0461671bb249
|
ee467f7a7b8432d23ed7bf299a30742ebf969f86
|
refs/heads/master
| 2020-12-10T03:07:49.059802
| 2020-01-14T15:15:57
| 2020-01-14T15:15:57
| 233,489,535
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,867
|
py
|
# -*- coding: utf-8 -*-
"""test
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1GixyCfk27jb3GlyRfsyFVX3f3rhPp0IE
"""
import random
import os
import time
import math
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils
import torch
from torch.utils.data import DataLoader,Dataset
from torch.autograd import Variable
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
from PIL import Image
import PIL.ImageOps
import numpy as np
import matplotlib.pyplot as plt
class GargLeNet(nn.Module):
__constants__ = ['transform_input']
def __init__(self, num_classes= 36, transform_input=False, init_weights=True, blocks=None):
super(GargLeNet, self).__init__()
if blocks is None:
blocks = [BasicConv2d, Inception]
assert len(blocks) == 2
conv_block = blocks[0]
inception_block = blocks[1]
self.transform_input = transform_input
# Stem
self.conv1 = conv_block(3, 64, kernel_size=5, stride=1, padding=0)
self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
#in_channels, ch1x1, ch3x3red_a, ch3x3_a, ch3x3red_b, ch3x3_b1, ch3x3_b2, ch3x3_pool_proj
# Inception #1
self.inception2a1 = inception_block(64, 32, 24, 32, 32, 48, 48, 16)
self.inception2b1 = inception_block(128, 64, 48, 64, 64, 96, 96, 64)
self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
# Inception #2
self.inception3b1 = inception_block(288, 64, 48, 64, 64, 96, 96, 32)
self.inception3b2 = inception_block(256, 64, 48, 64, 64, 96, 96, 32)
self.inception3b3 = inception_block(256, 64, 48, 64, 64, 96, 96, 32)
self.inception3b4 = inception_block(256, 64, 48, 64, 64, 96, 96, 32)
self.inception3c1 = inception_block(256, 128, 96, 128, 128, 192, 192, 64)
self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
# Inception #3
self.inception4c1 = inception_block(512, 128, 96, 128, 128, 192, 192, 64)
self.inception4c2 = inception_block(512, 128, 96, 128, 128, 192, 192, 64)
# AvgPool, Dropout, FC
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(0.4)
self.fc = nn.Linear(512, num_classes)
if init_weights:
self._initialize_weights()
def _initialize_weights(self): # 아직 이해는 다 못했지만 일단 사용 가능한것 같아 냅둠.
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
X = stats.truncnorm(-2, 2, scale=0.01)
values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)
values = values.view(m.weight.size())
with torch.no_grad():
m.weight.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _transform_input(self, x): # 이것도 아직 이해를 못했는데, 우리에게 맞게 변형하거나 삭제.
# type: (Tensor) -> Tensor
if self.transform_input:
x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
return x
def _forward(self, x):
# Stem (1)
x = self.conv1(x)
x = self.maxpool1(x)
# Inception #1 (2)
x = self.inception2a1(x)
x = self.inception2b1(x)
x = self.maxpool2(x)
# Inception #2 (3)
x = self.inception3b1(x)
x = self.inception3b2(x)
x = self.inception3b3(x)
x = self.inception3b4(x)
x = self.inception3c1(x)
x = self.maxpool3(x)
# Inception #3 (4)
x = self.inception4c1(x)
x = self.inception4c2(x)
# AvgPool, Dropout, and FC
x = self.avgpool(x)
x = torch.flatten(x, 1)
# N x 1024
x = self.dropout(x)
x = self.fc(x)
# N x 1000 (num_classes)
return x
def forward(self, x):
x = self._transform_input(x)
x = self._forward(x)
return x
# Inception module은 논문에서와 같이 변형. (Inception v1 -> factorized ver.)
class Inception(nn.Module):
__constants__ = ['branch2', 'branch3', 'branch4']
def __init__(self, in_channels, ch1x1, ch3x3red_a, ch3x3_a, ch3x3red_b, ch3x3_b1, ch3x3_b2, ch3x3_pool_proj, conv_block=None):
super(Inception, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch1 = conv_block(in_channels, ch1x1, kernel_size=1)
self.branch2 = nn.Sequential(
conv_block(in_channels, ch3x3red_a, kernel_size=1),
conv_block(ch3x3red_a, ch3x3_a, kernel_size=3, padding=1)
)
self.branch3 = nn.Sequential(
conv_block(in_channels, ch3x3red_b, kernel_size=1),
conv_block(ch3x3red_b, ch3x3_b1, kernel_size=3, padding=1),
conv_block(ch3x3_b1, ch3x3_b2, kernel_size=3, padding=1)
)
self.branch4 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=True),
conv_block(in_channels, ch3x3_pool_proj, kernel_size=1)
)
def _forward(self, x):
branch1 = self.branch1(x)
branch2 = self.branch2(x)
branch3 = self.branch3(x)
branch4 = self.branch4(x)
outputs = [branch1, branch2, branch3, branch4]
return outputs
def forward(self, x):
outputs = self._forward(x)
return torch.cat(outputs, 1)
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
class FinalDataset(Dataset):
def __init__(self, root_dir, train = True, transform = None, augment = None):
self.folder_dataset = dset.ImageFolder(root = root_dir)
self.train = train
self.transform = transform
self.augment = augment
def __getitem__(self,index):
img_dir, label = self.folder_dataset.imgs[index] #label 은 폴더 index로 리턴
img = Image.open(img_dir).convert('RGB') # convert to grayscale
if self.train is True and self.augment is not None:
augment = np.random.choice(self.augment, 1).tolist()
augment += self.transform
# print(len(augment))
else:
augment = self.transform
if self.transform is not None:
img = transforms.Compose(augment)(img)
return img, label
def __len__(self):
return len(self.folder_dataset.imgs)
def train(model, train_loader, optimizer, criterion, epoch, time_start):
model.train()
percent_prev = -1
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
# print(output.shape, target.shape)
loss = criterion(output, target)
loss.backward()
optimizer.step()
#if batch_idx % log_interval == 0:
percent_curr = 100 * batch_idx // len(train_loader)
if percent_curr > percent_prev:
percent_prev = percent_curr
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f},\tTime duration: {}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item(), timeSince(time_start)))
#torch.save(model.state_dict(),"drive/My Drive/public/results/mnist_cnn.pt")
return loss.item()
def test(model, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# sum up batch loss
test_loss += F.cross_entropy(output, target, reduction = 'sum').item()
# get the index of the max log-probability
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
print("output, target = ")
print(pred, target)
test_loss /= len(test_loader.dataset)
print('\nTest: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return test_loss
# From Prof's CNN_MNIST practice code
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
train_dir = "./train"
valid_dir = "./val"
output_dir = "./output"
try:
os.makedirs(output_dir + "/output", exist_ok = True)
except OSError as e:
if os.path.isdir('.output'): pass
else:
print('\nPlease make a directory ./output\n', e)
option = {'train_dir': train_dir,
'valid_dir': valid_dir,
'output' : output_dir,
'input_size': (224,224),
'batch': 16,
'epoch': 10,
'lr': 0.001,
'momentum': 0.9,
'log_interval': 2,
'valid_interval': 2,
'n_cpu': 100,
'augment': True,
'ver': 0.2}
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': option['n_cpu'], 'pin_memory': True} if use_cuda else {}
print('option:', option)
print('use cuda:', use_cuda)
if __name__ == '__main__':
transform = [transforms.Resize(option['input_size']),
transforms.ToTensor()]
valid_set = FinalDataset(root_dir = option['valid_dir'], train = False,
transform = transform)
valid_loader = DataLoader(valid_set,
shuffle = False,
batch_size = 100, # test batch: 100
**kwargs)
model = GargLeNet().to(device)
model.load_state_dict(torch.load(output_dir+"/gargle0.72_20.pth"), strict = False)
test_loss = test(model, valid_loader)
|
[
"nj.ssseo@gmail.com"
] |
nj.ssseo@gmail.com
|
b3e8ae7b18054584b3712e746015b1abd10e5a7a
|
d1c637544d893247a4731d8638b7b13aebcc3f4e
|
/youtubeScrape.py
|
26be9c5c00b6444c486cb178a0370923bce39666
|
[] |
no_license
|
jayachandra2128/Youtube-comments-scraper
|
17c3b8119531166c122974ee6d1d8f921347fef6
|
ecf88c6a89edf097c2a4f310cf73d44267f27693
|
refs/heads/master
| 2021-09-25T02:16:51.616218
| 2018-10-17T00:07:59
| 2018-10-17T00:07:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,455
|
py
|
import simplejson as json
import urllib2
from urllib import urlopen
import sys
import time
import csv
import os
import io
os.chdir(r'C:\Users\jc\Desktop')
csvFile =open('test.csv',"w")
#csvFile =open('test.tsv',"w")
#writer = csv.writer(csvFile,delimiter=',')
#writer.writerow('Comments')
csvFile.write("comments\n")
STAGGER_TIME = 1
# open the url and the screen name
# (The screen name is the screen name of the user for whom to return results for)
url = "https://www.googleapis.com/youtube/v3/commentThreads?key=AIzaSyCYkTUjKgFGcKDnkNQMgSBbb4obnqIzUEM&textFormat=plainText&part=snippet&videoId=Ye8mB6VsUHw&maxResults=50"
# this takes a python object and dumps it to a string which is a JSON
# representation of that object
url1=urlopen(url)
#data = json.load(urllib2.urlopen(url))
result = json.load(url1)
# print the result
itemList= result.get("items")
length=len(itemList)
for i in range(0,length):
results= (result["items"][i].get('snippet').get("topLevelComment").get('snippet').get("textDisplay")).encode("utf-8")
print results
results=results.replace(",", "")
#print (result["items"][i].get('snippet').get("topLevelComment").get('snippet').get("textDisplay")).encode("utf-8")
#writer.writerow((result["items"][i].get('snippet').get("topLevelComment").get('snippet').get("textDisplay")).encode("utf-8"))
csvFile.write(results)
csvFile.write('\n')
time.sleep(STAGGER_TIME)
csvFile.close()
|
[
"noreply@github.com"
] |
jayachandra2128.noreply@github.com
|
ca882b27134e8b7e97382771cc03bef0fcd2a3fe
|
242f1dafae18d3c597b51067e2a8622c600d6df2
|
/src/1300-1399/1344.angle.clock.py
|
8f16b6ea976d0a6986c2e132b2eb2b95f928c1e3
|
[] |
no_license
|
gyang274/leetcode
|
a873adaa083270eb05ddcdd3db225025533e0dfe
|
6043134736452a6f4704b62857d0aed2e9571164
|
refs/heads/master
| 2021-08-07T15:15:01.885679
| 2020-12-22T20:57:19
| 2020-12-22T20:57:19
| 233,179,192
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
class Solution:
def angleClock(self, hour: int, minutes: int) -> float:
h, m = hour % 12, minutes % 60
hA, mA = h * 30 + m / 60 * 30, m * 6
dA = abs(hA - mA)
return min(dA, 360 - dA)
if __name__ == '__main__':
solver = Solution()
cases = [
(2, 58),
]
rslts = [solver.angleClock(hour, minutes) for hour, minutes in cases]
for cs, rs in zip(cases, rslts):
print(f"case: {cs} | solution: {rs}")
|
[
"gyang274@gmail.com"
] |
gyang274@gmail.com
|
00e86c23f7f35dd581b7ede27815d2dc38061604
|
f149dae096359ff81715fa3cd856d9dba81e1e52
|
/nakey/core/admin.py
|
2e010798ddb2271d9b90bcd08a83b6cb1a398042
|
[] |
no_license
|
aibaq/nakey
|
2cfe65be10f3dfc146ee811ae57de988b84e7e01
|
c93ff72ae5784d9145c36c46abf43edb37906f3b
|
refs/heads/master
| 2022-12-10T21:41:42.561790
| 2019-10-13T16:13:38
| 2019-10-13T16:13:38
| 158,396,548
| 1
| 0
| null | 2022-11-22T03:13:58
| 2018-11-20T13:48:03
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,150
|
py
|
from django.contrib import admin
from mptt.admin import MPTTModelAdmin
from .models import *
@admin.register(Banner)
class BannerAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'image')
@admin.register(Category)
class CategoryAdmin(MPTTModelAdmin):
list_display = ('id', 'name')
@admin.register(Color)
class ColorAdmin(admin.ModelAdmin):
list_display = ('id', 'name')
@admin.register(Size)
class SizeAdmin(admin.ModelAdmin):
list_display = ('id', 'name')
@admin.register(Manufacture)
class ManufactureAdmin(admin.ModelAdmin):
list_display = ('id', 'name')
class ItemImageAdmin(admin.StackedInline):
model = ItemImage
@admin.register(Item)
class ItemAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'price', 'category')
search_fields = ('name',)
inlines = (ItemImageAdmin,)
class RequestItemAdmin(admin.StackedInline):
model = RequestItem
readonly_fields = ('item', 'count')
@admin.register(Request)
class RequestAdmin(admin.ModelAdmin):
list_display = ('id', 'full_name', 'phone', 'address', 'email')
search_fields = ('full_name',)
inlines = (RequestItemAdmin,)
|
[
"aiba.prenov@gmail.com"
] |
aiba.prenov@gmail.com
|
c088e5173d99e3899018d41fce0902bfd0be8dab
|
06c1179ff523f2de0b2caf68cc1f93b1012ced77
|
/bot/cogs/polls.py
|
34f8f43458c4e010df7a29845fc47d69488447a4
|
[] |
no_license
|
jpark9013/Discord-Bot
|
6ab6bae3070ff9542dd862fc7fc2e732c3f8a3b1
|
290c638cf46379219ee5ac9426bf0ee98ee79776
|
refs/heads/master
| 2022-12-06T17:53:32.814677
| 2020-08-28T01:25:01
| 2020-08-28T01:25:01
| 281,536,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,342
|
py
|
import operator
import time
import typing
from datetime import datetime
import discord
from discord.ext import commands, tasks
from utils.format import send_embed
class Polls(commands.Cog, name="Polls"):
def __init__(self, bot):
self.bot = bot
global db
db = self.bot.db
self.EMOJIS = (
"1️⃣",
"2️⃣",
"3️⃣",
"4️⃣",
"5️⃣",
"6️⃣",
"7️⃣",
"8️⃣",
"9️⃣",
"🔟"
)
self.check_polls.start()
@commands.group()
async def poll(self, ctx):
"""The base poll command. Doesn't do anything when invoked."""
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
@poll.command(aliases=["add"])
@commands.has_permissions(administrator=True)
@commands.bot_has_permissions(add_reactions=True)
@commands.guild_only()
async def create(self, ctx, channel: typing.Optional[discord.TextChannel], minutes: float, title: str, *options):
"""Create a poll. Make the options space separated, with quotes if spaces within the options themselves, such as
``do thing 2`` are needed."""
if len(options) > 10 or len(options) < 1:
return await send_embed(ctx, "Invalid number of options; must be between one and ten.", negative=True)
if minutes < 0.5 or minutes > 604800:
return await send_embed(ctx, "Invalid length of time given. Must be between 0.5 and 604800 minutes.",
negative=True)
cursor = await db.execute("Select count(GuildID) from Polls where GuildID = ?", (ctx.guild.id,))
result = await cursor.fetchone()
if result[0] == 50:
return await send_embed(ctx, "Your guild already has the maximum number of available of polls at 50.",
negative=True)
if not channel:
channel = ctx.channel
then = time.time() + minutes * 60
embed = discord.Embed(
colour=discord.Colour.orange(),
title=title
)
embed.set_author(name="React to answer the poll with the corresponding number.")
embed.set_footer(text=f"Ends at {datetime.utcfromtimestamp(then).strftime('%m/%d/%Y, %H:%M:%S')}")
embed.description = "\n\n".join([f"{i}. {v}" for i, v in enumerate(options, start=1)])
msg = await channel.send(embed=embed)
to_insert = (ctx.guild.id, channel.id, msg.id, len(options), then) + tuple([i for i in options]) \
+ tuple([None for i in range(10 - len(options))])
await db.execute("Insert into Polls values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", to_insert)
await db.commit()
for i in range(len(options)):
await msg.add_reaction(self.EMOJIS[i])
await send_embed(ctx, "Created poll.")
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
@poll.command(aliases=["stop"])
@commands.has_permissions(administrator=True)
@commands.guild_only()
async def end(self, ctx, msg: discord.Message):
"""End a poll early. Give Message ID as the parameter."""
if msg.guild != ctx.guild:
return await send_embed(ctx, "You do not have permission to do that.", negative=True)
cursor = await db.execute("Select count(*), TopNumber, "
"Option1, Option2, Option3, Option4, Option5, Option6, Option7, Option8, Option9, "
"Option10 "
"from Polls where MessageID = ?", (msg.id,))
result = await cursor.fetchone()
if not result[0]:
return await send_embed(ctx, "The specified poll does not exist.", negative=True)
old_embed = msg.embeds[0]
emojis = self.EMOJIS[:result[1]]
options = [v for i, v in enumerate(result[2:12]) if i <= result[1]]
reactions = [i.count for i in msg.reactions if str(i) in emojis]
total = sum(reactions)
if total == 0:
await db.execute("Delete from Polls where MessageID = ?", (msg.id,))
await db.commit()
embed = discord.Embed(
colour=discord.Colour.red(),
title=f"Poll has ended (No Votes)\n"
f"(Original title: {old_embed.title})",
description=old_embed.description
)
embed.set_footer(text=f"Ended at {datetime.now().strftime('%m/%d/%Y, %H:%M:%S')}")
return await send_embed(ctx, f"No votes for the poll with message ID {msg.id}.", negative=True)
result_dict = {i + 1: reactions[i] for i in range(result[1])}
result_dict = dict(sorted(result_dict.items(), key=operator.itemgetter(1), reverse=True))
results = [f"``{options[i - 1]}`` with **{v}** votes "
f"({round(v/total*100, 2)}% of the total)" for i, v in result_dict.items()]
description = ["Results:"] + [f"{i}. {v}" for i, v in enumerate(results, start=1)]
embed = discord.Embed(
colour=discord.Colour.green(),
title=f"Poll has ended\n"
f"(Original title: {old_embed.title})",
description="\n\n".join(description)
)
embed.set_footer(text=f"Ended at {datetime.now().strftime('%m/%d/%Y, %H:%M:%S')}")
await msg.edit(embed=embed)
await db.execute("Delete from Polls where MessageID = ?", (msg.id,))
await db.commit()
await send_embed(ctx, "Ended poll.")
@tasks.loop(seconds=30)
async def check_polls(self):
cursor = await db.execute("Select GuildID, ChannelID, MessageID from Polls where TimeEnding <= ?",
(time.time(),))
result = await cursor.fetchall()
for guild_id, channel_id, message_id in result:
try:
msg = await self.bot.get_guild(guild_id).get_channel(channel_id).fetch_message(message_id)
cmd = self.bot.get_command("poll end")
ctx = await self.bot.get_context(msg)
await cmd(ctx, msg)
except Exception as e:
print(e)
def setup(bot):
bot.add_cog(Polls(bot))
|
[
"jpark9013@gmail.com"
] |
jpark9013@gmail.com
|
f3f1d4c14482ef7424b06e6354575ee7e493a375
|
dfb55278f50b2e3fd040a62d40cedf225072a2f5
|
/flask1.py
|
e27863fcbd061ab7f9fc8053c7c6ca19625df00a
|
[] |
no_license
|
2ahmedabdullah/first_repo
|
26ab335d493a166da6c95e582e1a3498cc696d00
|
0bd93849f834a8b9682de7fde1541c9ff6e84c4f
|
refs/heads/main
| 2023-07-29T14:44:36.837253
| 2021-09-14T11:33:49
| 2021-09-14T11:33:49
| 406,337,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from flask import Flask, request
app = Flask(__name__)
@app.route('/hello_world', methods=['GET', 'POST'])
def add():
#a = request.form["a"]
#b = request.form["b"]
#c = request.form["c"]
return "Hello World!"#str( int(a) + int(b) + int(c) )
if __name__=='__main__':
app.run(port=7000)
|
[
"noreply@github.com"
] |
2ahmedabdullah.noreply@github.com
|
66ebb027ebb9fcf1674157a1fd4328b8c803a1b6
|
60aa3bcf5ace0282210685e74ee8ed31debe1769
|
/base/lib/encodings/cp1253.py
|
e32862ea0e2b0a2d349861903d7635099bf924b3
|
[] |
no_license
|
TheBreadGuy/sims4-ai-engine
|
42afc79b8c02527353cc084117a4b8da900ebdb4
|
865212e841c716dc4364e0dba286f02af8d716e8
|
refs/heads/master
| 2023-03-16T00:57:45.672706
| 2016-05-01T17:26:01
| 2016-05-01T17:26:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,776
|
py
|
import codecs
class Codec(codecs.Codec):
__qualname__ = 'Codec'
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
__qualname__ = 'IncrementalEncoder'
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
__qualname__ = 'IncrementalDecoder'
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
__qualname__ = 'StreamWriter'
class StreamReader(Codec, codecs.StreamReader):
__qualname__ = 'StreamReader'
def getregentry():
return codecs.CodecInfo(name='cp1253', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
decoding_table = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f€\ufffe‚ƒ„…†‡\ufffe‰\ufffe‹\ufffe\ufffe\ufffe\ufffe\ufffe‘’“”•–—\ufffe™\ufffe›\ufffe\ufffe\ufffe\ufffe\xa0΅Ά£¤¥¦§¨©\ufffe«¬\xad®―°±²³΄µ¶·ΈΉΊ»Ό½ΎΏΐΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡ\ufffeΣΤΥΦΧΨΩΪΫάέήίΰαβγδεζηθικλμνξοπρςστυφχψωϊϋόύώ\ufffe'
encoding_table = codecs.charmap_build(decoding_table)
|
[
"jp@bellgeorge.com"
] |
jp@bellgeorge.com
|
82a31547b7df987e69677a23ad29f56ad9a5ccbe
|
41c5f7da28b87a3034754254d21791b322e819d8
|
/test/test_json_analysis_result_sub_group_all_of.py
|
e181c4639ce155f9ebebe587db93934f73ee12ae
|
[] |
no_license
|
MADANA-IO/madana-apiclient-python
|
16cb3eb807897903df2a885a94a2c02fc405818a
|
40dc21ab43d9565ac3dff86d7270093cce112753
|
refs/heads/master
| 2023-03-08T05:02:32.616469
| 2021-02-11T10:17:30
| 2021-02-11T10:17:30
| 287,797,297
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,151
|
py
|
# coding: utf-8
"""
madana-api
<h1>API Quickstart Guide</h1> <p>This documentation contains a Quickstart Guide, a few <a href=\"downloads.html\">sample clients</a> for download and information about the available <a href=\"resources.html\">endpoints</a> and <a href=\"data.html\">DataTypes</a> </p> <p>The <a target=\"_blank\" href=\"http://madana-explorer-staging.eu-central-1.elasticbeanstalk.com/login\"> MADANA Explorer</a> can be used to verify the interactions with the API</p> <p>Internal use only. For more information visit <a href=\"https://www.madana.io\">www.madana.io</a></p> <br> <br> # noqa: E501
The version of the OpenAPI document: 0.4.12
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import madana_sampleclient_python
from madana_sampleclient_python.models.json_analysis_result_sub_group_all_of import JsonAnalysisResultSubGroupAllOf # noqa: E501
from madana_sampleclient_python.rest import ApiException
class TestJsonAnalysisResultSubGroupAllOf(unittest.TestCase):
"""JsonAnalysisResultSubGroupAllOf unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test JsonAnalysisResultSubGroupAllOf
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = madana_sampleclient_python.models.json_analysis_result_sub_group_all_of.JsonAnalysisResultSubGroupAllOf() # noqa: E501
if include_optional :
return JsonAnalysisResultSubGroupAllOf(
filter = '0'
)
else :
return JsonAnalysisResultSubGroupAllOf(
)
def testJsonAnalysisResultSubGroupAllOf(self):
"""Test JsonAnalysisResultSubGroupAllOf"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
[
"dev@madana.io"
] |
dev@madana.io
|
c34ebf8df587e82c4940fe9afa6c7a9bfb778caf
|
8d3d49f028960bb018adac71172847eb21887810
|
/ruffus/test/test_branching_dependencies.py
|
44a2bcdd246f96933c56a67f8a3caa307eca064b
|
[
"MIT"
] |
permissive
|
msGenDev/ruffus
|
534c2c834d64078471e28df3cc45f96edb549d57
|
8f2e3e34b8eb013bd38ca8fd2cab6af94dd38d31
|
refs/heads/master
| 2021-01-18T09:15:47.035339
| 2014-07-25T13:36:16
| 2014-07-25T13:36:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,581
|
py
|
#!/usr/bin/env python
from __future__ import print_function
"""
branching.py
test branching dependencies
"""
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# options
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
from optparse import OptionParser
import sys, os
import os.path
try:
import StringIO as io
except:
import io as io
import re
# add self to search path for testing
exe_path = os.path.split(os.path.abspath(sys.argv[0]))[0]
sys.path.insert(0,os.path.abspath(os.path.join(exe_path,"..", "..")))
if __name__ == '__main__':
module_name = os.path.split(sys.argv[0])[1]
module_name = os.path.splitext(module_name)[0];
else:
module_name = __name__
parser = OptionParser(version="%prog 1.0")
parser.add_option("-D", "--debug", dest="debug",
action="store_true", default=False,
help="Make sure output is correct and clean up.")
parser.add_option("-t", "--target_tasks", dest="target_tasks",
action="append",
default = list(),
metavar="JOBNAME",
type="string",
help="Target task(s) of pipeline.")
parser.add_option("-f", "--forced_tasks", dest="forced_tasks",
action="append",
default = list(),
metavar="JOBNAME",
type="string",
help="Pipeline task(s) which will be included even if they are up to date.")
parser.add_option("-j", "--jobs", dest="jobs",
default=1,
metavar="jobs",
type="int",
help="Specifies the number of jobs (commands) to run simultaneously.")
parser.add_option("-v", "--verbose", dest = "verbose",
action="count", default=0,
help="Do not echo to shell but only print to log.")
parser.add_option("--touch_files_only", dest = "touch_files_only",
action="store_true", default=False,
help="Do not run pipeline. Only touch.")
parser.add_option("-d", "--dependency", dest="dependency_file",
#default="simple.svg",
metavar="FILE",
type="string",
help="Print a dependency graph of the pipeline that would be executed "
"to FILE, but do not execute it.")
parser.add_option("-F", "--dependency_graph_format", dest="dependency_graph_format",
metavar="FORMAT",
type="string",
default = 'svg',
help="format of dependency graph file. Can be 'ps' (PostScript), "+
"'svg' 'svgz' (Structured Vector Graphics), " +
"'png' 'gif' (bitmap graphics) etc ")
parser.add_option("-n", "--just_print", dest="just_print",
action="store_true", default=False,
help="Print a description of the jobs that would be executed, "
"but do not execute them.")
parser.add_option("-M", "--minimal_rebuild_mode", dest="minimal_rebuild_mode",
action="store_true", default=False,
help="Rebuild a minimum of tasks necessary for the target. "
"Ignore upstream out of date tasks if intervening tasks are fine.")
parser.add_option("-K", "--no_key_legend_in_graph", dest="no_key_legend_in_graph",
action="store_true", default=False,
help="Do not print out legend and key for dependency graph.")
parser.add_option("-H", "--draw_graph_horizontally", dest="draw_horizontally",
action="store_true", default=False,
help="Draw horizontal dependency graph.")
parameters = [
]
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# imports
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
import time
import re
import operator
import sys,os
from collections import defaultdict
import random
sys.path.append(os.path.abspath(os.path.join(exe_path,"..", "..")))
from ruffus import *
import ruffus
# use simplejson in place of json for python < 2.6
try:
import json
except ImportError:
import simplejson
json = simplejson
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Functions
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
def test_job_io(infiles, outfiles, extra_params):
"""
cat input files content to output files
after writing out job parameters
"""
# dump parameters
params = (infiles, outfiles) + extra_params
if isinstance(infiles, str):
infiles = [infiles]
elif infiles == None:
infiles = []
if isinstance(outfiles, str):
outfiles = [outfiles]
output_text = list()
for f in infiles:
output_text.append(open(f).read())
output_text = "".join(sorted(output_text))
output_text += json.dumps(infiles) + " -> " + json.dumps(outfiles) + "\n"
for f in outfiles:
open(f, "w").write(output_text)
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Main logic
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# get help string
f =io.StringIO()
parser.print_help(f)
helpstr = f.getvalue()
(options, remaining_args) = parser.parse_args()
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Tasks
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
#
# 1 -> 2 -> 3 ->
# -> 4 ->
# 5 -> 6
#
tempdir = "temp_branching_dir/"
#
# task1
#
@originate([tempdir + d for d in ('a.1', 'b.1', 'c.1')])
@follows(mkdir(tempdir))
@posttask(lambda: open(tempdir + "task.done", "a").write("Task 1 Done\n"))
def task1(outfile, *extra_params):
"""
First task
"""
open(tempdir + "jobs.start", "a").write('job = %s\n' % json.dumps([None, outfile]))
test_job_io(None, outfile, extra_params)
open(tempdir + "jobs.finish", "a").write('job = %s\n' % json.dumps([None, outfile]))
#
# task2
#
@posttask(lambda: open(tempdir + "task.done", "a").write("Task 2 Done\n"))
@transform(task1, suffix(".1"), ".2")
def task2(infiles, outfiles, *extra_params):
"""
Second task
"""
open(tempdir + "jobs.start", "a").write('job = %s\n' % json.dumps([infiles, outfiles]))
test_job_io(infiles, outfiles, extra_params)
open(tempdir + "jobs.finish", "a").write('job = %s\n' % json.dumps([infiles, outfiles]))
#
# task3
#
@transform(task2, regex('(.*).2'), inputs([r"\1.2", tempdir + "a.1"]), r'\1.3')
@posttask(lambda: open(tempdir + "task.done", "a").write("Task 3 Done\n"))
def task3(infiles, outfiles, *extra_params):
"""
Third task
"""
open(tempdir + "jobs.start", "a").write('job = %s\n' % json.dumps([infiles, outfiles]))
test_job_io(infiles, outfiles, extra_params)
open(tempdir + "jobs.finish", "a").write('job = %s\n' % json.dumps([infiles, outfiles]))
#
# task4
#
@jobs_limit(1)
@transform(tempdir + "*.1", suffix(".1"), ".4")
@follows(task1)
@posttask(lambda: open(tempdir + "task.done", "a").write("Task 4 Done\n"))
def task4(infiles, outfiles, *extra_params):
"""
Fourth task is extra slow
"""
open(tempdir + "jobs.start", "a").write('job = %s\n' % json.dumps([infiles, outfiles]))
time.sleep(0.1)
test_job_io(infiles, outfiles, extra_params)
open(tempdir + "jobs.finish", "a").write('job = %s\n' % json.dumps([infiles, outfiles]))
#
# task5
#
@files(None, tempdir + 'a.5')
@follows(mkdir(tempdir))
@posttask(lambda: open(tempdir + "task.done", "a").write("Task 5 Done\n"))
def task5(infiles, outfiles, *extra_params):
"""
Fifth task is extra slow
"""
open(tempdir + "jobs.start", "a").write('job = %s\n' % json.dumps([infiles, outfiles]))
time.sleep(1)
test_job_io(infiles, outfiles, extra_params)
open(tempdir + "jobs.finish", "a").write('job = %s\n' % json.dumps([infiles, outfiles]))
#
# task6
#
#@files([[[tempdir + d for d in 'a.3', 'b.3', 'c.3', 'a.4', 'b.4', 'c.4', 'a.5'], tempdir + 'final.6']])
@merge([task3, task4, task5], tempdir + "final.6")
@follows(task3, task4, task5, )
@posttask(lambda: open(tempdir + "task.done", "a").write("Task 6 Done\n"))
def task6(infiles, outfiles, *extra_params):
"""
final task
"""
open(tempdir + "jobs.start", "a").write('job = %s\n' % json.dumps([infiles, outfiles]))
test_job_io(infiles, outfiles, extra_params)
open(tempdir + "jobs.finish", "a").write('job = %s\n' % json.dumps([infiles, outfiles]))
def check_job_order_correct(filename):
"""
1 -> 2 -> 3 ->
-> 4 ->
5 -> 6
"""
precedence_rules = [[1, 2],
[2, 3],
[1, 4],
[5, 6],
[3, 6],
[4, 6]]
index_re = re.compile(r'.*\.([0-9])["\]\n]*$')
job_indices = defaultdict(list)
for linenum, l in enumerate(open(filename)):
m = index_re.search(l)
if not m:
raise "Non-matching line in [%s]" % filename
job_indices[int(m.group(1))].append(linenum)
for job_index in job_indices:
job_indices[job_index].sort()
for before, after in precedence_rules:
if before not in job_indices or after not in job_indices:
continue
if job_indices[before][-1] >= job_indices[after][0]:
raise Exception("Precedence violated for job %d [line %d] and job %d [line %d] of [%s]"
% ( before, job_indices[before][-1],
after, job_indices[after][0],
filename))
def check_final_output_correct(after_touch_files = False):
"""
check if the final output in final.6 is as expected
"""
expected_output = \
""" ["DIR/a.1"] -> ["DIR/a.2"]
["DIR/a.1"] -> ["DIR/a.4"]
["DIR/a.2", "DIR/a.1"] -> ["DIR/a.3"]
["DIR/a.3", "DIR/b.3", "DIR/c.3", "DIR/a.4", "DIR/b.4", "DIR/c.4", "DIR/a.5"] -> ["DIR/final.6"]
["DIR/b.1"] -> ["DIR/b.2"]
["DIR/b.1"] -> ["DIR/b.4"]
["DIR/b.2", "DIR/a.1"] -> ["DIR/b.3"]
["DIR/c.1"] -> ["DIR/c.2"]
["DIR/c.1"] -> ["DIR/c.4"]
["DIR/c.2", "DIR/a.1"] -> ["DIR/c.3"]
[] -> ["DIR/a.1"]
[] -> ["DIR/a.1"]
[] -> ["DIR/a.1"]
[] -> ["DIR/a.1"]
[] -> ["DIR/a.1"]
[] -> ["DIR/a.5"]
[] -> ["DIR/b.1"]
[] -> ["DIR/b.1"]
[] -> ["DIR/c.1"]
[] -> ["DIR/c.1"]"""
expected_output = expected_output.replace(" ", "").replace("DIR/", tempdir).split("\n")
orig_expected_output = expected_output
if after_touch_files:
expected_output.pop(-3)
final_6_contents = sorted([l.rstrip() for l in open(tempdir + "final.6", "r").readlines()])
if final_6_contents != expected_output:
print("Actual:", file=sys.stderr)
for ll in final_6_contents:
print(ll, file=sys.stderr)
print("_" * 80, file=sys.stderr)
print("Expected:", file=sys.stderr)
for ll in orig_expected_output:
print(ll, file=sys.stderr)
print("_" * 80, file=sys.stderr)
for i, (l1, l2) in enumerate(zip(final_6_contents, expected_output)):
if l1 != l2:
sys.stderr.write("%d\nActual:\n >%s<\nExpected:\n >%s<\n" % (i, l1, l2))
raise Exception ("Final.6 output is not as expected\n")
#
# Necessary to protect the "entry point" of the program under windows.
# see: http://docs.python.org/library/multiprocessing.html#multiprocessing-programming
#
if __name__ == '__main__':
print("Python version %s" % sys.version, file=sys.stderr)
print("Ruffus version %s" % ruffus.__version__, file=sys.stderr)
if options.just_print:
pipeline_printout(sys.stdout, options.target_tasks, options.forced_tasks,
verbose=options.verbose)
elif options.dependency_file:
pipeline_printout_graph ( open(options.dependency_file, "w"),
options.dependency_graph_format,
options.target_tasks,
options.forced_tasks,
draw_vertically = not options.draw_horizontally,
no_key_legend = options.no_key_legend_in_graph)
elif options.debug:
import os
os.system("rm -rf %s" % tempdir)
pipeline_run(options.target_tasks, options.forced_tasks, multiprocess = options.jobs,
logger = stderr_logger if options.verbose else black_hole_logger,
verbose = options.verbose)
check_final_output_correct()
check_job_order_correct(tempdir + "jobs.start")
check_job_order_correct(tempdir + "jobs.finish")
#
# check touch file works, running the pipeline leaving an empty file where b.1
# would be
#
if options.touch_files_only:
#
# remove these because the precedence for the two runs must not be mixed together
#
os.unlink(os.path.join(tempdir, "jobs.start") )
os.unlink(os.path.join(tempdir, "jobs.finish") )
#
# remove b.1 and touch
#
if options.verbose:
print("\n\nNow just delete b.1 for task2...\n")
os.unlink(os.path.join(tempdir, "b.1"))
pipeline_run([task2], options.forced_tasks, multiprocess = options.jobs,
logger = stderr_logger if options.verbose else black_hole_logger,
gnu_make_maximal_rebuild_mode = not options.minimal_rebuild_mode,
verbose = options.verbose,
touch_files_only = options.touch_files_only)
#
# Now wait for the empty b.1 to show up in the output
#
if options.verbose:
print("\n\nRun normally...\n")
pipeline_run(options.target_tasks, options.forced_tasks, multiprocess = options.jobs,
logger = stderr_logger if options.verbose else black_hole_logger,
gnu_make_maximal_rebuild_mode = not options.minimal_rebuild_mode,
verbose = options.verbose)
check_final_output_correct(options.touch_files_only)
check_job_order_correct(tempdir + "jobs.start")
check_job_order_correct(tempdir + "jobs.finish")
print("OK")
import shutil
shutil.rmtree(tempdir)
else:
pipeline_run(options.target_tasks, options.forced_tasks, multiprocess = options.jobs,
logger = stderr_logger if options.verbose else black_hole_logger,
gnu_make_maximal_rebuild_mode = not options.minimal_rebuild_mode,
verbose = options.verbose, touch_files_only = options.touch_files_only)
print("OK")
|
[
"src@llew.org.uk"
] |
src@llew.org.uk
|
61ba1bf8f484efe8d1927186299780006459dc35
|
d8e839c8b630a6f4bd67cdbf3e8624178451719f
|
/testtask/constants.py
|
b7f029bb90483bd3249d3e1e988d6e956502bf44
|
[] |
no_license
|
andy071001/evaluate
|
ea5535224029caffbbd7cd66462790cb16635f1d
|
0f94288e2b15864c9b9f21027c28d74011067586
|
refs/heads/master
| 2021-01-01T05:48:16.399143
| 2013-09-26T02:25:10
| 2013-09-26T02:25:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 537
|
py
|
#coding=utf8
VERIFY_CODE_LENGTH = 4
online_makepolo_url = "http://192.168.0.211/spc_new.php?search_flag=0&q="
strategy_makepolo_url = "http://caigou.makepolo.com/spc_new.php?search_flag=0&q="
alibaba_url = "http://s.1688.com/selloffer/offer_search.htm?n=y&keywords="
CON_REQ = 5
QUERY_WORD_PER_PAGE = 10
QUERY_TASK_PER_PAGE = 10
SHEET_NAME_DICT = [u'相关性评估', u'数据质量评估', u'策略GSB评估']
STRATEGY_RATING_TEXT = [u'左边好很多', u'左边好一些', u'两边差不多', u'右边好一些', u'右边好很多']
|
[
"liuwenbin_2011@163.com"
] |
liuwenbin_2011@163.com
|
a3107b0c1a2da9aed5839d1306f79a2aa6a91e03
|
0d2f636592dc12458254d793f342857298c26f12
|
/vowel.py
|
d1da799f259f873b5637804df56c23b3325a671c
|
[] |
no_license
|
chenpc1214/test
|
c6b545dbe13e672f11c58464405e024394fc755b
|
8610320686c499be2f5fa36ba9f11935aa6d657b
|
refs/heads/master
| 2022-12-13T22:44:41.256315
| 2020-09-08T16:25:49
| 2020-09-08T16:25:49
| 255,796,035
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
vowel = ['a', 'e', 'i', 'o', 'u']
word= "milliway"
for letter in word:
if letter in vowel:
print(letter)
|
[
"kkbuger1523@gmail.com"
] |
kkbuger1523@gmail.com
|
5a44841660257beadc2955a6caffe9c38d935a58
|
313cbdec2661f507409389a4d3f5a3bdd7248658
|
/end-term design/server.py
|
7a0ae5b366096e6d993b8a41b3c696c62d250725
|
[] |
no_license
|
huangjj27/web2.0_experiments
|
673853e49d857bd1bbcd7f3d3bb2e3967494853e
|
a8151ca9ff1569865f1103aff45821d311206950
|
refs/heads/master
| 2021-01-18T13:21:44.627631
| 2016-07-01T13:34:36
| 2016-07-01T13:34:36
| 25,640,762
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,103
|
py
|
#! encoding=utf-8
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import os.path
import re
import time
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
name_pattern = re.compile(r"[a-zA-Z0-9]{6,12}")
password_pattern = re.compile(r"[A-Z][a-zA-Z0-9]{5,11}")
#file input-output functions
#operations on userData
def get_Users():
users_file = open("static/data/userData.txt")
users_list = users_file.read().strip().split('\n')
users = []
for user_item in users_list:
users.append(user_item.split(','))
users_file.close()
return users
def write_Users(users):
users_file = open("static/data/userData.txt", "w")
users_list = []
for user in users:
users_list.append(','.join(user))
users_file.write('\n'.join(users_list)+'\n')
users_file.close()
#operations on questionsData
def get_Questions():
questions_file = open("static/data/questionData.txt")
questions_list = questions_file.read().decode('utf8').strip().split('\n')
questions = []
for question_item in questions_list:
questions.append(question_item.split(';'))
questions_file.close()
return questions
def write_Questions(questions):
questions_file = open("static/data/questionData.txt", "w")
questions_list = []
for question in questions:
questions_list.append(';'.join(question).encode('utf8'))
questions_file.write('\n'.join(questions_list)+'\n')
questions_file.close()
#operations on replyData
def get_Replies():
reply_file = open("static/data/replyData.txt")
reply_list = reply_file.read().decode('utf8').strip().split('\n')
reply = []
for reply_item in reply_list:
reply.append(reply_item.split(';'))
reply_file.close()
return reply
def write_Replies(replies):
reply_file = open("static/data/replyData.txt", "w")
reply_list = []
for reply in replies:
reply_list.append(';'.join(reply).encode('utf-8'))
reply_file.write('\n'.join(reply_list)+'\n')
reply_file.close()
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
return self.get_secure_cookie("user")
class IndexHandler(BaseHandler):
def get(self):
if self.current_user:
questions = get_Questions()
questions = questions[::-1]
replies = get_Replies()
self.render("index.html",title='主页', Username=self.current_user,
questions=questions, replies=replies)
else:
self.redirect("/login")
return
name = tornado.escape.xhtml_escape(self.current_user)
class LoginHandler(BaseHandler):
def get(self):
self.render("login_signup.html", title="登录页面", button="登录",
link="没有账号?点击注册", url="/signup", action="/login",
subtitle="")
def post(self):
user = [self.get_argument("name").encode('utf-8'),
self.get_argument("password").encode('utf-8')]
users = get_Users()
if user in users:
self.set_secure_cookie("user", user[0], expires_days=None)
self.redirect("/")
else:
self.render("login_signup.html", title="登录页面", button="登录",
link="没有账号?点击注册", url="/signup",
action="/login", subtitle="登录失败,请重新尝试")
class LogoutHandler(BaseHandler):
def get(self):
self.clear_all_cookies()
return self.redirect("/login")
class SignUpHandler(BaseHandler):
def get(self):
self.render("login_signup.html", title = "注册页面", button="注册",
link="已有账号?点击登录",
url="/login", action="/signup", subtitle="")
def post(self):
user = [self.get_argument("name").encode('utf-8'),
self.get_argument("password").encode('utf-8')]
users = get_Users()
valid_1 = name_pattern.match(user[0]) and \
password_pattern.match(user[1])
# avoid the same user be registered again by other password
valid_2 = True
for user_item in users:
if user_item[0] == user[0]:
valid_2 = False
if valid_1 and valid_2:
users.append(user)
write_Users(users)
self.render("login_signup.html", title = "注册页面", button="注册",
link="已有账号?点击登录",
url="/login", action="/signup",
subtitle="注册成功!请前往登录页面登录")
else:
self.render("login_signup.html", title = "注册页面", button="注册",
link="已有账号?点击登录",
url="/login", action="/signup",
subtitle="注册失败,请重新尝试")
# coded by YaoShaoling, correted by HuangJunjie
class QuestionHandler(BaseHandler):
def get(self):
if self.current_user:
self.render('question.html', title="提问页面", subtitle="问题",
uptext="问题内容", button="提交问题",
Username=self.current_user)
else:
self.redirect("/login")
def post(self):
questions = get_Questions()
stitle = self.get_argument('title', None)
stime = time.strftime("%Y-%m-%d %H:%M")
stext = self.get_argument('content', None)
invalid_1 = re.search(';', stitle) or re.search(';', stext)
invalid_2 = re.search('\n', stitle)or re.search('\n', stext)
if invalid_1 or invalid_2:
return self.render('question.html', title="提问页面",
subtitle="问题",
uptext="问题内容", button="提交问题",
Username=self.current_user)
if stitle and stime and stext:
new_question = [stitle, stime, self.current_user, stext]
questions.append(new_question)
write_Questions(questions)
return self.redirect('/')
return self.render('question.html', title="提问页面", subtitle="问题",
uptext="问题内容",
button="提交问题", Username=self.current_user)
class WrongHandler(tornado.web.RequestHandler):
def get(self):
self.write_error(404)
def write_error(self, status_code, **kwages):
if status_code == 404:
self.render('404.html', title = "404")
else:
self.write('Ah ha! error:' + str(status_code))
class ResponseHandler(BaseHandler):
def post(self):
replies = get_Replies()
responsetext = self.get_argument('responsetext', None)
responsetitle = self.get_argument('title', None)
if responsetext and responsetitle:
new_reply = [responsetitle, time.strftime("%Y-%m-%d %H:%M"),
self.current_user, responsetext]
replies.append(new_reply)
write_Replies(replies)
return self.redirect('/')
settings = {
"template_path": os.path.join(os.path.dirname(__file__), "template"),
"static_path": os.path.join(os.path.dirname(__file__), "static"),
"cookie_secret": "61oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
"login_url": "/login",
"debug": True
}
if __name__ == "__main__":
tornado.options.parse_command_line()
app = tornado.web.Application([
(r"/", IndexHandler),
(r"/login", LoginHandler),
(r"/signup", SignUpHandler),
(r"/logout", LogoutHandler),
(r"/question", QuestionHandler),
(r"/response", ResponseHandler),
(r".*", WrongHandler)],
**settings
)
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
|
[
"349373001@qq.com"
] |
349373001@qq.com
|
cc6579536149f92ed343cb5352c5a36895ee78d9
|
dca141c8c887d09828b259e4e85edefe303a8684
|
/basic/models.py
|
b4bf9f47c234654e050133059330db10ac672fcb
|
[] |
no_license
|
ImLordImpaler/hackathon2.0
|
f55cd499f0e64042d689d5ca0f2510edc6af6880
|
6dfbfb2a90c6e3b9e8ddc5cc22111bb592d3a195
|
refs/heads/main
| 2023-04-22T01:40:50.170012
| 2021-05-16T06:53:08
| 2021-05-16T06:53:08
| 367,805,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,651
|
py
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
class Profile(models.Model):
user = models.OneToOneField(User , related_name='profile_user',on_delete=models.CASCADE)
fname = models.CharField(max_length=1000, null=True)
lname = models.CharField(max_length=1000, null=True)
dob = models.DateField(null=True)
email = models.EmailField(null=True)
fake_id = models.IntegerField(default=0)
friends = models.ManyToManyField(User )
def __str__(self) :
return str(self.fname)
class Post(models.Model):
txt = models.CharField(max_length=100000)
user = models.ForeignKey(User , on_delete=models.CASCADE)
likes = models.IntegerField(default=0)
dislikes = models.IntegerField(default=0)
time = models.DateTimeField(auto_now_add=True)
liked = models.ManyToManyField(User , related_name="post_like" )
def total_liked(self):
return self.liked.count()
def __str__(self) :
return self.txt
class Comment(models.Model):
post = models.ForeignKey(Post , on_delete=models.CASCADE)
text = models.CharField(max_length=100000)
user = models.ForeignKey(User , on_delete=models.CASCADE)
liked = models.ManyToManyField(User , related_name="comment_like" , blank=True)
def __str__(self) :
return self.text
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
obj = Profile.objects.create(user=instance)
obj.fake_id = instance.user.id
obj.save()
|
[
"ksfastners619@gmail.com"
] |
ksfastners619@gmail.com
|
ae993260199a1bd4f613b868736c93d2f0d23f44
|
198a9eee33187d90bab8d4e2d57c0cea10eedb84
|
/chap5/exe1.py
|
b010e5f90be03ca317a786ff0d17f81cf3748f1c
|
[] |
no_license
|
mihirverma7781/Python-Scripts
|
2fe7de78004dcc0ff7f5c7b5a85f0a0f4d539967
|
5fa6751fb072f4599d4919d2b0ee096b9064e963
|
refs/heads/master
| 2022-12-19T04:38:55.611510
| 2020-10-01T18:47:15
| 2020-10-01T18:47:15
| 300,388,242
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
number = list(range(1,11))
def square(l):
empty=[]
for i in l:
empty.append(i*i)
return empty
print(square(number))
|
[
"mihirv7781@gmail.com"
] |
mihirv7781@gmail.com
|
da38fc3ea3b67732802f09a7cad2d4a6fc6d2eaf
|
177b5e8c33d3fede31291556cf57cd9fda08bb73
|
/03.welcome-user/data_models/__init__.py
|
ebbed7f923f54df3b6e1382d16d212dce66ae2f3
|
[] |
no_license
|
Lycrika/Prueba_bot
|
d6d32954cfb53989d7ed1dbcec262adf88eb383e
|
3dc22991d6ef2d94a4b230ea9d0ddaff5e7db658
|
refs/heads/master
| 2022-12-10T23:07:54.561038
| 2020-09-03T16:54:32
| 2020-09-03T16:54:32
| 292,627,876
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from .welcome_user_state import WelcomeUserState
__all__ = ["WelcomeUserState"]
|
[
"noreply@github.com"
] |
Lycrika.noreply@github.com
|
50c8ed36aa0ef9ef9ff88d080685ef00cd226fff
|
2ab8567a39de52b0bd31b69be40a6a4c1080121f
|
/posts/models.py
|
a2dab9e06801ccc00ec7edf08a48a3ddfb897215
|
[] |
no_license
|
urahman1517/twitter
|
d84526cfb93ca09f08344790118ec1a8f0c542c9
|
61052cf3feb2e9cf66ff765973c436064bbfb4f8
|
refs/heads/main
| 2023-07-01T22:32:26.273753
| 2021-08-20T04:26:42
| 2021-08-20T04:26:42
| 397,506,132
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 675
|
py
|
from django.db import models
from cloudinary.models import CloudinaryField
# Create your models here.
class Post(models.Model):
class Meta(object):
db_table = 'post'
name = models.CharField(
'Name' , blank=False, null=False, max_length=14, db_index=True, default='Anonymous'
)
body = models.CharField(
'Body' , blank=True, null=True, max_length=140, db_index=True
)
image = CloudinaryField (
'image' , blank=True
)
like_count=models.PositiveIntegerField (
'like_count' , default=0 , blank=True
)
created_at = models.DateTimeField(
'Created DateTime', blank=True, auto_now_add=True
)
|
[
"urahman1517@outlook.com"
] |
urahman1517@outlook.com
|
a2e495fdc47015c860dc2e716dfa6d8a401a6538
|
0b40232eb2395c27353c892ef4ccb5c604bb75be
|
/Array/third_max.py
|
174029680ba012a49f9c34cb0d61196da859ba00
|
[] |
no_license
|
HareshNasit/LeetCode
|
971ae9dd5e4f0feeafa5bb3bcf5b7fa0a514d54d
|
674728af189aa8951a3fcb355b290f5666b1465c
|
refs/heads/master
| 2021-06-18T07:37:40.121698
| 2021-02-12T12:30:18
| 2021-02-12T12:30:18
| 168,089,751
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
def thirdMax(self, nums):
"""
https://leetcode.com/problems/third-maximum-number/submissions/
:type nums: List[int]
:rtype: int
"""
nums_set = set(nums)
nums_list = list(nums_set)
nums_list.sort(reverse = True)
if len(nums_list) > 2:
return nums_list[2]
return nums_list[0]
|
[
"harsh.nasit@mail.utoronto.ca"
] |
harsh.nasit@mail.utoronto.ca
|
448f10654f9220c75b2e6296a98881c99e513397
|
ea54fa9ee09d90ec2db1c5740704d55577bc1b54
|
/app/test.py
|
fafe813236502ee648c274261f4ee63939cd0caa
|
[
"MIT"
] |
permissive
|
bayuajinurmnsh/test_task
|
0cf004c2600bc4bd13cd822ee5478ab03a1f818e
|
67331413d240f9fd3c67ab05343e5a216b112f4a
|
refs/heads/main
| 2023-07-06T09:54:13.551712
| 2021-08-15T08:58:19
| 2021-08-15T08:58:19
| 395,964,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,563
|
py
|
from logging import error
import unittest
from requests.api import request
from app import app
import technical
class Test(unittest.TestCase):
#UNIT TEST FOR app.py
URL = "http://127.0.0.1:5000/test_task/api/distance_address"
data_valid = {"address": "Moscow"}
key_invalid = {"adres": "Moscow"}
invalid_address_1 ={"address": "@5-!&*a"}
invalid_address_2 ={"address": "-1@1 jgstuo2"}
outside_mkad = {"address": "Jakarta, Indonesia"}
error_1 = b'{"message":"You have to send data in json format"}\n'
error_2 = b'{"message":"make sure you have key address in your JSON data"}\n'
error_3 = b'{"message":"Invalid address!"}\n'
error_4 = b'{"message":"Can not find your address!"}\n'
error_5 = b'{"message":"Server do not have access to yandex API"}\n'
inside_mkad = b'{"message":"area inside MKAD"}\n'
# Test for index function
# Test to check the index function if it run properly or not
def test_index(self):
test = app.test_client(self)
response = test.get('/', content_type = 'html/text')
self.assertEqual(response.status_code, 200)
# Test for distance_address function
# Test to check if address is inside Moscow ring road
def test_inside_mkad(self):
tester = app.test_client(self)
response = tester.post(self.URL, json = self.data_valid,
content_type = 'application/json')
self.assertEqual(self.inside_mkad,response.data)
self.assertEqual(response.status_code, 200)
# Test to check if address is outside Moscow ring road
def test_outside_mkad(self):
tester = app.test_client(self)
response = tester.post(self.URL, json = self.outside_mkad,
content_type = 'application/json')
self.assertNotEqual(response.data, self.inside_mkad)
self.assertEqual(response.status_code, 200)
# Test to check if client not post a json file type
# In this case i try to use xml
def test_content_type_not_json(self):
test = app.test_client(self)
response = test.post(self.URL, data = self.data_valid,
content_type='application/xml')
self.assertEqual(response.data, self.error_1)
self.assertEqual(response.status_code, 415)
# Test to check if key not is json file
# valid key is 'address' but in this test used 'adres'
def test_content_key_invalid(self):
test = app.test_client(self)
response = test.post(self.URL, json = self.key_invalid,
content_type = 'application/json')
self.assertEqual(response.data, self.error_2)
self.assertEqual(response.status_code, 400)
# Invalid address type 1
# This test check if client only send one letter in address or
# maybe a number with single value like only "5"
def test_invalid_addres_1(self):
tester = app.test_client(self)
response = tester.post(self.URL, json = self.invalid_address_1,
content_type = 'application/json')
self.assertEqual(response.data, self.error_3)
self.assertEqual(response.status_code, 422)
# Invalid address type 2
# If address have passed the invalid type 1 but yandex can not find
# latitude and longitude from specific address
def test_invalid_addres_2(self):
tester = app.test_client(self)
response = tester.post(self.URL, json = self.invalid_address_2,
content_type = 'application/json')
self.assertEqual(response.data, self.error_4)
self.assertEqual(response.status_code, 404)
# Test if our server have access to Yandex API
# Error occurs when our server do not have valid API Key
def test_access(self):
tester = app.test_client(self)
response = tester.post(self.URL, json = self.data_valid,
content_type = 'application/json')
self.assertNotEqual(response.data, self.error_5)
self.assertNotEqual(response.status_code, 500)
#UNIT TEST FOR technical.py
# Test if address inside mkad [test class CheckDistance]
def test_count_distance_1(self):
lat = 55.753220 #lat for Moscow, Russia
long = 37.622513 #long for Moscow, Russia
obj_check_distance = technical.CheckDistance(lat,long)
count_distance = obj_check_distance.count_distance()
self.assertEqual('area inside MKAD', count_distance)
# Test if address outside mkad [test class CheckDistance]
# And test if count_distance return a value in float type
def test_count_distance_2(self):
lat = -6.175391 #lat for Jakarta, Indonesia
long = 106.826261 #long for Jakarta, Indonesia
obj_check_distance = technical.CheckDistance(lat,long)
count_distance = obj_check_distance.count_distance()
self.assertNotEqual('area inside MKAD', count_distance)
self.assertIs(type(count_distance), float)
# Test if lat, and long not in float type [test class CheckDistance]
def test_count_distance_3(self):
lat_1 = "-6.175391"
long_1 = 106.826261
lat_2 = "Moscow, Russia"
long_2 = "Jakarta, Indonesia"
obj_check_distance_1 = technical.CheckDistance(lat_1,long_1)
count_distance_1 = obj_check_distance_1.count_distance()
self.assertEqual("latitude and longitude must be in float type",
count_distance_1)
obj_check_distance_2 = technical.CheckDistance(lat_2,long_2)
count_distance_2 = obj_check_distance_2.count_distance()
self.assertEqual("latitude and longitude must be in float type",
count_distance_2)
# Test haversine function if lat and long in float type
# And check if result not in string type [test class CheckDistance]
def test_haversine_1(self):
lat_1 = 55.898947 #lat for MKAD, 88th kilometre, inner side
long_1 = 37.632206 # long for MKAD, 88th kilometre, inner side
lat_2 = 38.231572
long_2 = 25.192846
obj_check_distance = technical.CheckDistance(lat_2,long_2)
haversine = obj_check_distance.haversine(lat_1, long_1,
lat_2, long_2)
self.assertIsNot(type(haversine), str)
self.assertIs(type(haversine), float)
# Test haversine function if lat and lon in integer
def test_haversine_2(self):
lat_1 = int(55)
long_1 = int(37)
lat_2 = int(38)
long_2 = int(-25)
obj_check_distance = technical.CheckDistance(lat_2,long_2)
haversine = obj_check_distance.haversine(lat_1, long_1,
lat_2, long_2)
self.assertIsNot(type(haversine), str)
# Test if lat or long in string type
def test_haversine_3(self):
lat_1 = str(55) #lat for MKAD, 88th kilometre, inner side
long_1 = 37 # long for MKAD, 88th kilometre, inner side
lat_2 = 38.0098
long_2 = "15"
obj_check_distance = technical.CheckDistance(lat_2,long_2)
haversine = obj_check_distance.haversine(lat_1, long_1,
lat_2, long_2)
self.assertEqual("latitude and longitude can not be string",
haversine)
# Test check_address function to check address is valid or not
# Test valid if (address is a letter, length addres >=2)
# Test valid if (address is number, length address >=2)
def test_check_address_valid_1(self):
address = "Moscow, Russia"
obj_check_address = technical.TextPreprocessing(address)
check_address = obj_check_address.check_address()
self.assertEqual("valid", check_address)
# Test if lat and long value in string type
def test_check_address_valid_2(self):
address = "55.2333, 25.444221"
obj_check_address = technical.TextPreprocessing(address)
check_address = obj_check_address.check_address()
self.assertEqual("valid", check_address)
# Test if address not in string type
def test_check_address_valid_2(self):
address = 55.233325
obj_check_address = technical.TextPreprocessing(address)
check_address = obj_check_address.check_address()
self.assertEqual("address must be in string type", check_address)
if __name__ == "__main__":
unittest.main()
|
[
"bayuaji.nurmansah16@gmail.com"
] |
bayuaji.nurmansah16@gmail.com
|
7564c377061b8558390c11f80829db31740ea8d9
|
7b3305ce06473172df7e441dbaa4d486f1449171
|
/linked_dynamic_solver.py
|
96b6cda67a837cf320b97aea8e4ee57a44021661
|
[] |
no_license
|
Kylepoore/knapsack-solver
|
6ffda82b442e6d1119b83820b44bb8a5d97411fd
|
3eb27fc4740919ecab52ddeb5a572422236e7303
|
refs/heads/master
| 2020-04-05T23:40:41.176024
| 2013-07-18T03:28:10
| 2013-07-18T03:28:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,619
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
def solveIt(inputData):
# Modify this code to run your optimization algorithm
# parse the input
lines = inputData.split('\n')
firstLine = lines[0].split()
items = int(firstLine[0])
capacity = int(firstLine[1])
values = []
weights = []
for i in range(1, items+1):
line = lines[i]
parts = line.split()
values.append(int(parts[0]))
weights.append(int(parts[1]))
items = len(values)
#dp algorithm:
print items * capacity
ltable = [ [] ]
tuples = zip(values,weights)
for j in range(0,items+1):
ltable.append([])
for k in range(0,capacity+1):
ltable[j].append([])
ltable[j][k] = ((0,0))
if(j == 0 or k == 0):
continue
if(weights[j-1] <= k):
ltable[j][k] = tuples[j-1]
else:
ltable[j][k] = ltable[j-1][k]
continue
if(weights[j-1] + ltable[j-1][k][1] <= k):
ltable[j][k] = tuple(a+b for a,b in zip(ltable[j-1][k], tuples[j-1]))
if(ltable[j-1][k][0] > values[j-1]):
ltable[j][k] = ltable[j-1][k]
if (ltable[j-1][k-weights[j-1]][0] + values[j-1] > ltable[j-1][k][0]):
ltable[j][k] = tuple(a+b for a, b in zip(ltable[j-1][k-weights[j-1]], tuples[j-1]))
else:
ltable[j][k] = ltable[j-1][k]
print "-",
print "done"
value = ltable[items][capacity][0]
print "backtracking..."
taken = []
i = items
j = capacity
for i in range(items,0,-1):
if(ltable[i][j][0] == ltable[i-1][j][0]):
taken.append(0)
else:
j = j - weights[i-1]
taken.append(1)
taken.reverse()
for k in range(0,capacity+1):
for j in range(0,items+1):
print str(ltable[j][k][0]) + "\t",
print ""
# print table[(items,capacity,0)]
# prepare the solution in the specified output format
outputData = str(value) + ' ' + str(1) + '\n'
outputData += ' '.join(map(str, taken))
return outputData
import sys
if __name__ == '__main__':
if len(sys.argv) > 1:
fileLocation = sys.argv[1].strip()
inputDataFile = open(fileLocation, 'r')
inputData = ''.join(inputDataFile.readlines())
inputDataFile.close()
print solveIt(inputData)
else:
print 'This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/ks_4_0)'
|
[
"kylepoore1@gmail.com"
] |
kylepoore1@gmail.com
|
c2e4537265eacfee364c3be61266d0a16861c951
|
dc39ccc50b7d34e5de84f3cc132c5cc096a32656
|
/BASIC/class/attribute.py
|
40377cc862a0cdd596c36046d3178d5438bfeccf
|
[] |
no_license
|
Shukladas1115/Python
|
0947aefd62a9ce4c3140360cb7259b031368709c
|
feb32bc2e2e7df377fc2d92330bfdacb83f31a55
|
refs/heads/master
| 2022-02-20T04:15:56.036495
| 2019-08-26T16:36:52
| 2019-08-26T16:36:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
class A(object):
x = 1
class B(A):
pass
class C(A):
pass
print(A.x, B.x, C.x) # 1 1 1
B.x = 2
print(A.x, B.x, C.x) # 1 2 1
A.x = 3
print(A.x, B.x, C.x) # 3 2 3 tại sao vậy?
'''
C doesn’t have its own x property, independent of A.
Thus, references to C.x are in fact references to A.x
C kế thừa từ A, C không thực sự sở hữu thuộc tính x mà nó tham chiếu đến thuộc tính x của A
'''
|
[
"minhhien90@gmail.com"
] |
minhhien90@gmail.com
|
cd96121d34ea23c08f5222fe1efa991136ffc79b
|
8b3345a2a5005dfe42afbd6a0653ac7fc61f037d
|
/server/app.py
|
0d9764ae696d6f3c9c51fe32821d0cb4edc39d99
|
[
"MIT"
] |
permissive
|
bfortuner/label-ai
|
a40be4e65ef7c4f46f8810d98c3d13b114bc42fc
|
f05896c2b2c2d282763ee7db54b5f66066073961
|
refs/heads/master
| 2021-01-19T14:17:00.034167
| 2017-09-01T16:07:59
| 2017-09-01T16:07:59
| 100,892,823
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 731
|
py
|
from flask import Flask
from flask_cors import CORS
from flask_graphql import GraphQLView
from schema import Schema
from flask import Response, request, abort, send_from_directory
from PIL import Image
from io import StringIO
import config as cfg
import data
def create_app(**kwargs):
app = Flask(__name__)
app.debug = True
app.add_url_rule(
'/graphql',
view_func=GraphQLView.as_view('graphql', schema=Schema, **kwargs)
)
return app
app = create_app(graphiql=True)
@app.route('/image/<filename>')
def image(filename):
return send_from_directory(cfg.MEDIA_PATH, filename)
if __name__ == '__main__':
CORS(app, resources={r'/graphql': {'origins': '*'}})
app.run(host='0.0.0.0')
|
[
"bfortuner@gmail.com"
] |
bfortuner@gmail.com
|
fec89b02f0a042bdaf94e3f1b051f4f3ce65eb22
|
286407dc9a39025447a2a796125125b5558882e1
|
/comments_app/forms.py
|
c197c6c0599f2213a340f64f620cbf498d8c0b97
|
[] |
no_license
|
cyh1995/blog0.9
|
f2a608e0bc2ffd91ee9fd8b68133eb5cc2698ef0
|
ee535bfec3068a2e3b6cfafce34a7cd22263a284
|
refs/heads/master
| 2022-12-09T05:04:51.772719
| 2018-06-09T02:31:53
| 2018-06-09T02:31:53
| 136,444,754
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
# 通过调用这个继承与form对象的类的一些方法和属性,来代替原有的前端表单
from django import forms
from .models import Comment
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ['name','email','url','text']
|
[
"969963179@qq.com"
] |
969963179@qq.com
|
d3d9bd3030137f0b28db61765cc2c6f602ff6ca5
|
591806a05facb216f4bec4615c91417ea8b68293
|
/yummy/restaurant/forms.py
|
e1a592327d58fd5c86d37518dee33e6e61ef8412
|
[] |
no_license
|
neostoic/yummy
|
e8e214b3e56605850c40a823f82bd85a50b282bd
|
bedbdd6239e8aab7ea38778bf5b92e2812095c55
|
refs/heads/master
| 2021-01-17T23:36:14.032762
| 2014-05-12T15:30:33
| 2014-05-12T15:30:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 634
|
py
|
from django import forms
from restaurant.models import Review
from django.utils.translation import ugettext_lazy as _
class ReviewForm(forms.ModelForm):
class Meta:
model = Review
fields = ['content', 'rating']
widgets = {'rating': forms.RadioSelect()}
labels = {'content': _('Write Review')}
help_texts = {'content': _('Please write your reviews for this restaurant.')}
def clean_rating(self):
rating = self.cleaned_data['rating']
if rating < 1 or rating > 5:
raise forms.ValidationError('rating should be an int between 1 and 5')
return rating
|
[
"ghylxdw@gmail.com"
] |
ghylxdw@gmail.com
|
f4c38240821bf96e65612f342986cf276694f90d
|
34578a08451dc124f02fbba92a219da3347059cd
|
/.history/tools/views_20190502130213.py
|
5ef8462e7964c7373832387076323b91f3acac43
|
[] |
no_license
|
gwjczwy/CTF-Exercises
|
b35d938b30adbc56c1b6f45dc36cea1421c702fb
|
c2d5c47f5047b1601564453e270ce50aad7f56fc
|
refs/heads/master
| 2020-05-25T23:51:26.190350
| 2019-05-22T13:18:59
| 2019-05-22T13:18:59
| 188,042,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,399
|
py
|
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from json import dumps
from .models import Url,Money
import time
#########################
#配置变量
sourcePath=r'C:\Users\arnoux\Desktop\训练平台\sql\log.txt'
#########################
#主页
@login_required
def index(requests):
data={'toolname':'index','user':requests.user}
return render(requests,'tools/index.html',data)
#########################
#短链接
@login_required
def surl(requests):#短链接 index
data={}
data['toolName']="surl"
data['parameter']="index"
return render(requests, 'tools/index.html', data)
def surls(requests,parameter):#带参数的短链接跳转
data={}
data['toolName']="surl"
data['parameter']="link"
print('短链接参数',parameter)
try:
req=Url.objects.get(sUrl=parameter)
print('获取对象成功')
except:
return HttpResponse('你来错地方了,悟空')
req=req.fullUrl
return HttpResponse('<script>window.location.href="'+req+'";</script>')
@csrf_exempt
@login_required
def createSUrl(requests):
if not (requests.method == 'POST' and requests.POST['fullUrl']):
req={'message':'fail'}
return HttpResponse(dumps(req),content_type="application/json")
fullUrl=requests.POST['fullUrl']
while True:
randUrl=randStr(5)#随机长度为5的字符串
try:
Url.objects.get(sUrl=randUrl)#如果重复就继续随机
print('再!来!一!次!')
except:
break
randUrl=randStr(5)
Url(sUrl=randUrl,fullUrl=fullUrl).save()
req={'message':'success','url':randUrl}
return HttpResponse(dumps(req),content_type="application/json")
def randStr(l):
import random
import string
seed = "1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
sa = []
for i in range(l):
sa.append(random.choice(seed))
salt = ''.join(sa)
return salt
#########################
#商店
@login_required
def shop(requests):
data={}
data['toolName']="shop"
money = Money.objects.get(user=requests.user)
data['money']=money
return render(requests, 'tools/index.html', data)
#商店兑换
@csrf_exempt
@login_required
def shopExchange(requests):
if not (requests.method == 'POST' and 'rule' in requests.POST and 'num' in requests.POST):
print('非法请求')
req={'message':'fail','reason':'非法请求'}
return HttpResponse(dumps(req),content_type="application/json")
rule=requests.POST['rule']
num=requests.POST['num']
if not rule in ['m2b','b2m']:# 判断转换规则是否合法
print('rule参数不合法')
req={'message':'fail','reason':'rule参数不合法'}
return HttpResponse(dumps(req),content_type="application/json")
if num.isdigit():# 判断数字是否合法
num=int(num)
if num<0:
req={'message':'fail','reason':'非法参数'}
return HttpResponse(dumps(req),content_type="application/json")
else:
req={'message':'fail','reason':'非法参数'}
return HttpResponse(dumps(req),content_type="application/json")
# 获取货币对象
money = Money.objects.get(user=requests.user)
if rule=='m2b':
if money.monero>=num:
money.bitcoin+=num
money.save()
time.sleep(5) #等待时间 造成条件竞争
money.monero-=num
money.save()
else:
req={'message':'fail','reason':'monero 不足'}
return HttpResponse(dumps(req),content_type="application/json")
elif rule=='b2m':
if money.bitcoin>=num:
money.monero+=num
money.save()
time.sleep(5)
money.bitcoin-=num
money.save()
else:
req={'message':'fail','reason':'bitcoin 不足'}
return HttpResponse(dumps(req),content_type="application/json")
else:
req={'message':'fail','reason':'未知错误'}
return HttpResponse(dumps(req),content_type="application/json")
req={'message':'success','monero':money.monero,'bitcoin':money.bitcoin}
return HttpResponse(dumps(req),content_type="application/json")
#########################
#日志
@login_required
def logs(requests):
data={}
data['toolName']="logs"
return render(requests, 'tools/index.html', data)
# 添加日志
@csrf_exempt
@login_required
def addLog(requests):
if not (requests.method == 'POST' and 'path' in requests.POST and 'content' in requests.POST):
req={'message':'fail','reason':'非法请求'}
return HttpResponse(dumps(req),content_type="application/json")
path=requests.POST['path']
content=requests.POST['content']
# 获取货币对象
money = Money.objects.get(user=requests.user)
if money.bitcoin >=100:
try:
with open(path,'at') as file:
file.write(content)
money.bitcoin-=100
money.save()
req={'message':'success','reason':'操作成功'}
return HttpResponse(dumps(req),content_type="application/json")
except:
req={'message':'fail','reason':'写入文件错误'}
return HttpResponse(dumps(req),content_type="application/json")
else:
req={'message':'fail','reason':'货币不足'}
return HttpResponse(dumps(req),content_type="application/json")
# 获取日志
def getLog(requests):
req={'message':'fail','reason':'货币不足'}
return HttpResponse(dumps(req),content_type="application/json")
#下载源代码
def downSource(requests):
# 获取货币对象
money = Money.objects.get(user=requests.user)
if money.bitcoin >=1000:
money.bitcoin-=1000
money.save()
file = open(sourcePath, 'rb')
response = HttpResponse(file)
response['Content-Type'] = 'application/octet-stream' #设置头信息,告诉浏览器这是个文件
response['Content-Disposition'] = 'attachment;filename="'+sourcePath.split('\\')[-1]+'";'
return response
else:
req={'message':'fail','reason':'货币不足'}
return HttpResponse(dumps(req),content_type="application/json")
|
[
"zwy053@163.com"
] |
zwy053@163.com
|
3c8a6763e29d5fa0d860c9b0725bd43c3a8400b0
|
71aa88ebc6fc7b2b7cb119ab19e7d0815b2bb11b
|
/mysite/settings.py
|
07aaac233aab2ed0e099f5c7e9826578132a24ac
|
[] |
no_license
|
zv100558snv/my-first-blog
|
e69f9b9fe6a18b6499603c776038d24bd6c8c7e5
|
52d7544a604986dde2e969b4e7dd08d8f4d9a03b
|
refs/heads/master
| 2021-01-25T11:33:33.377867
| 2017-06-16T17:57:40
| 2017-06-16T17:57:40
| 93,933,293
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,293
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!8uh7@x(rb=8+suo0u3sn)olh&9bs_3ma+c*o-^2f$+&rd#=_)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['zv100558snv.pythonanywhere.com']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog', # 'blog',
'register', # Регистрация
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
#'whitenoise.middleware.WhiteNoiseMiddleware', # http://whitenoise.evans.io/en/stable/
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'Europe/Kiev' #'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/' # расположении статических файлов
STATIC_ROOT = os.path.join(BASE_DIR, 'static') # + расположении статических файлов
LOGIN_REDIRECT_URL = '/'
|
[
"zv100558snv@gmail.com"
] |
zv100558snv@gmail.com
|
fa36d96624f3655b5258367533c44b0c14db498b
|
d364123a0655bff7e9d725382934fe2c15b5bfc4
|
/Crawler/lianxi/hsimg_test.py
|
bc62fc7c1c354c4ba3007bd3c78507f7a0a83c1e
|
[] |
no_license
|
yuan1093040152/SeleniumTest
|
88d75361c8419354f56856c326f843a0a89d7ca6
|
d155b98702bc46c174499042b43257696b861b5e
|
refs/heads/master
| 2023-08-31T15:00:25.415642
| 2023-08-30T09:26:42
| 2023-08-30T09:26:42
| 227,269,300
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,271
|
py
|
#coding=utf-8
'''
Created on 2018年7月15日
@author: kai.yangf
'''
import requests,re,time
from multiprocessing import pool
from requests.exceptions import RequestException
from threading import Thread
def get_one_page(url):
try:
response = requests.get(url)
html = response.text
if response.status_code == 200:
print (True)
print (html[:5])
return html
else:
return None
except RequestException:
return None
def parse_one_page(url):
html = get_one_page(url)
pettern = re.compile('<img.*?alt.*?src="(.*?)" />',re.S)
items = re.findall(pettern,html)
print (len(items))
for item in items:
writeIO(item)
def writeIO(item):
filename = str(time.time()) + '.jpg'
response = requests.get(item)
Path = 'E:\\CrawlerImg\\' + filename
with open(Path,'wb') as f:
f.write(response.content)
f.close()
def each_page(url):
host = 'https://www.8484dd.com'
html = get_one_page(url)
pettern = re.compile('<li.*?<a.*?href="(.+?)".*?</a>',re.S)
items = re.findall(pettern,html)
print (len(items))
for item in items:
if re.match('/pic', item):
if re.search('.html', item):
url = host + item
parse_one_page(url)
def each_page_value(i):
url = 'https://www.8484dd.com/pic/5/index_'+ str(i) +'.html'
host = 'https://www.8484dd.com'
html = get_one_page(url)
pettern = re.compile('<li.*?<a.*?href="(.+?)".*?</a>',re.S)
items = re.findall(pettern,html)
print (len(items))
for item in items:
if re.match('/pic', item):
if re.search('.html', item):
url = host + item
parse_one_page(url)
def main(url):
html = get_one_page(url)
parse_one_page(html)
if __name__ == '__main__':
# for i in range(2,10):
# url = 'https://www.8484dd.com/pic/5/index_'+ str(i) +'.html'
# each_page(url)
Threads = []
for i in range(2,11):
t = Thread(target=each_page_value, args =(i,))
Threads.append(t)
for i in range(2,11):
Threads[i].start()
for i in range(2,11):
Threads[i].join()
|
[
"1093040152@qq.com"
] |
1093040152@qq.com
|
99b6572f5e721b9004712c8f912dddaaac91b703
|
712394b09a33cfb1d66726a32e051f2729fcc2a8
|
/python/api_scraping.py
|
70e864b5b95b99b9802c9377b1f046da89b386fb
|
[] |
no_license
|
AaronDonaldson74/code-challenges
|
6c8f0deade05ea33d589f98720ae2cc0090c046e
|
d266a6eee7364451b9b233abc44d8964738e6aab
|
refs/heads/master
| 2022-07-16T16:53:18.914504
| 2020-05-22T15:05:50
| 2020-05-22T15:05:50
| 256,545,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,133
|
py
|
####Mine didn't work
"""
import requests
import pprint
from bs4 import BeautifulSoup
r = requests.get('http://www.dailysmarty.com/topics/python')
html = r.text
soup = BeautifulSoup(html, 'html.parser')
# print(soup.find_all('a'))
links = soup.find_all('a')
for link in links:
if link.get('href'):
print(link.get('href'))
"""
import requests
from bs4 import BeautifulSoup
from inflection import titleize
def title_generator(links):
titles = []
def post_formatter(url):
if 'posts/' in url:
url = url.split('/')[-1]
url = url.replace('-', ' ')
url = titleize(url)
titles.append(url)
# <!--- UPDATED CODE -->
for link in links:
if link.get('href') == None:
continue
else:
post_formatter(link.get("href"))
# <!--- UPDATED CODE -->
return titles
r = requests.get('http://www.dailysmarty.com/topics/python')
soup = BeautifulSoup(r.text, 'html.parser')
links = soup.find_all('a')
titles = title_generator(links)
for title in titles:
f = open("demofile.txt", "a")
f.write(f'{title} \n')
f.close()
|
[
"Dev.Aaron.Donaldson@gmail.com"
] |
Dev.Aaron.Donaldson@gmail.com
|
30f79ab832c61791b1d7ad8a735e2dd76ec18658
|
1cb77df33f8a3abaf7d9e775e0b4d51867554341
|
/mysite/settings.py
|
38d216e79024bab6347fd8ea7820213b845f3aa8
|
[] |
no_license
|
EugeneLemeshevsky/Django.test
|
6a0f50c84ce77156273d1de59fe4458b8cb1b11f
|
fe7991b139195118b5acbc7d866482e800826ada
|
refs/heads/master
| 2021-01-19T20:22:35.155499
| 2017-05-18T15:31:24
| 2017-05-18T15:31:24
| 88,501,044
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,124
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i7c%m4-6f^!gm@ji-z+_ylh2b$r+v2k4ew_ttjphqcie12@3ry'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"elemeshevsky@gmail.com"
] |
elemeshevsky@gmail.com
|
31f64762cb63b1fbd9b34933a297a9ed4438eddb
|
ffad0de28109d0156baba92b5793e6d8142ced7c
|
/server/channels_list_test.py
|
84b54743e63a3c4deed2798a8d9a3f3a3ced6293
|
[] |
no_license
|
nomii15/COMP1531-server
|
823753e11b78619b7f67c32d9f5f1f39d839b6f8
|
af00ba90cdf2fa1ce5170a7a2bf506bfe550bbd7
|
refs/heads/master
| 2021-07-17T08:26:57.074709
| 2019-11-17T07:29:44
| 2019-11-17T07:29:44
| 228,518,923
| 1
| 0
| null | 2021-01-05T18:13:55
| 2019-12-17T02:47:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,550
|
py
|
import pytest
from channels_list import channels_list
from auth_register import auth_register
from channels_create import channels_create
'''
Provide a list of all channels (and their associated details) that the authorised user is part of
'''
def test_list_one():
#setup
register1 = auth_register("validemail1@gmail.com", "validpassword1", "USER1", "validname1")
token1 = register1['token']
u_id1 = register1['u_id']
channel_id1 = channels_create(token1, 'channel1', True)
channel_list1 = channels_list(token1)
channel_list = {'channels': [{'channel_id': 1, 'name': 'channel1'}]}
#check only channel user is part of exists in the list
assert channel_list == channel_list1
def test_list_empty():
#setup
register2 = auth_register("validemail2@gmail.com", "validpassword2", "USER2", "validname2")
token2 = register2['token']
u_id2 = register2['u_id']
register3 = auth_register("validemail3@gmail.com", "validpassword3", "USER3", "validname3")
token3 = register3['token']
u_id3 = register3['u_id']
register4 = auth_register("validemail4@gmail.com", "validpassword4", "USER4", "validname4")
token4 = register4['token']
u_id4 = register4['u_id']
channel_id2 = channels_create(token2, 'channel2', True)
channel_id3 = channels_create(token3, 'channel3', True)
channel_list4 = channels_list(token4)
empty_list = {'channels' : []}
#check channel list is empty as user does not belong to any channels
assert channel_list4 == empty_list
|
[
"email@example.com"
] |
email@example.com
|
f389fa4b887a9f39521c43981f7fa8e10aff2cd2
|
779e9e07d82b745e81d135e298274d82371218ea
|
/Introduction_to_the_Bioinformatics_armory.py
|
9b0a57f87236c6d7b1686e6ceec185f9f731f7eb
|
[] |
no_license
|
TigerYassin/Rosalind
|
099660b47e8137e0a092a651908a25f34ddb76ed
|
c11921f790c8ae0453df3d967784675a93921408
|
refs/heads/master
| 2021-01-19T08:00:40.142454
| 2017-04-22T19:57:03
| 2017-04-22T19:57:03
| 87,594,445
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
"""
count how many times each time the each letter appears
working
"""
myString = raw_input("Enter your string")
A = myString.count("A")
G = myString.count("G")
T = myString.count("T")
C = myString.count("C")
print A,C,G,T
|
[
"yassin@incorporated.org"
] |
yassin@incorporated.org
|
7e076554b3b3afe2172491769a0650071b0ad1b5
|
bf3d9406b1ae813c4f93a3f294483146848bd566
|
/slp/util/parallel.py
|
47e48ba7c49f98fdf3bea6393f8f65e1b6cf9879
|
[
"MIT"
] |
permissive
|
Hadryan/slp
|
9c038c0273d7c3ff109bba1b5381a1b1a256e3ed
|
9a5a2b4d56a256e953c5f2c8972b8cd8ca584ced
|
refs/heads/master
| 2020-07-25T21:32:06.970945
| 2019-06-18T17:49:37
| 2019-06-18T17:49:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,327
|
py
|
# flake8: noqa
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang, Rutgers University, Email: zhang.hang@rutgers.edu
## Modified by Thomas Wolf, HuggingFace Inc., Email: thomas@huggingface.co
## Copyright (c) 2017-2018
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
"""Encoding Data Parallel"""
import threading
import functools
import torch
from torch.autograd import Variable, Function
import torch.cuda.comm as comm
from torch.nn.parallel.data_parallel import DataParallel
from torch.nn.parallel.distributed import DistributedDataParallel
from torch.nn.parallel.parallel_apply import get_a_var
from torch.nn.parallel.scatter_gather import gather
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
__all__ = ['allreduce', 'DataParallelModel', 'DataParallelCriterion',
'patch_replication_callback']
def allreduce(*inputs):
"""Cross GPU all reduce autograd operation for calculate mean and
variance in SyncBN.
"""
return AllReduce.apply(*inputs)
class AllReduce(Function):
@staticmethod
def forward(ctx, num_inputs, *inputs):
ctx.num_inputs = num_inputs
ctx.target_gpus = [inputs[i].get_device() for i in range(0, len(inputs), num_inputs)]
inputs = [inputs[i:i + num_inputs]
for i in range(0, len(inputs), num_inputs)]
# sort before reduce sum
inputs = sorted(inputs, key=lambda i: i[0].get_device())
results = comm.reduce_add_coalesced(inputs, ctx.target_gpus[0])
outputs = comm.broadcast_coalesced(results, ctx.target_gpus)
return tuple([t for tensors in outputs for t in tensors])
@staticmethod
def backward(ctx, *inputs):
inputs = [i.data for i in inputs]
inputs = [inputs[i:i + ctx.num_inputs]
for i in range(0, len(inputs), ctx.num_inputs)]
results = comm.reduce_add_coalesced(inputs, ctx.target_gpus[0])
outputs = comm.broadcast_coalesced(results, ctx.target_gpus)
return (None,) + tuple([Variable(t) for tensors in outputs for t in tensors])
class Reduce(Function):
@staticmethod
def forward(ctx, *inputs):
ctx.target_gpus = [inputs[i].get_device() for i in range(len(inputs))]
inputs = sorted(inputs, key=lambda i: i.get_device())
return comm.reduce_add(inputs)
@staticmethod
def backward(ctx, gradOutput):
return Broadcast.apply(ctx.target_gpus, gradOutput)
class DistributedDataParallelModel(DistributedDataParallel):
"""Implements data parallelism at the module level for the DistributedDataParallel module.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the
batch dimension.
In the forward pass, the module is replicated on each device,
and each replica handles a portion of the input. During the backwards pass,
gradients from each replica are summed into the original module.
Note that the outputs are not gathered, please use compatible
:class:`encoding.parallel.DataParallelCriterion`.
The batch size should be larger than the number of GPUs used. It should
also be an integer multiple of the number of GPUs so that each chunk is
the same size (so that each GPU processes the same number of samples).
Args:
module: module to be parallelized
device_ids: CUDA devices (default: all devices)
Reference:
Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,
Amit Agrawal. “Context Encoding for Semantic Segmentation.
*The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*
Example::
>>> net = encoding.nn.DistributedDataParallelModel(model, device_ids=[0, 1, 2])
>>> y = net(x)
"""
def gather(self, outputs, output_device):
return outputs
class DataParallelModel(DataParallel):
"""Implements data parallelism at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the
batch dimension.
In the forward pass, the module is replicated on each device,
and each replica handles a portion of the input. During the backwards pass,
gradients from each replica are summed into the original module.
Note that the outputs are not gathered, please use compatible
:class:`encoding.parallel.DataParallelCriterion`.
The batch size should be larger than the number of GPUs used. It should
also be an integer multiple of the number of GPUs so that each chunk is
the same size (so that each GPU processes the same number of samples).
Args:
module: module to be parallelized
device_ids: CUDA devices (default: all devices)
Reference:
Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,
Amit Agrawal. “Context Encoding for Semantic Segmentation.
*The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*
Example::
>>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2])
>>> y = net(x)
"""
def gather(self, outputs, output_device):
return outputs
def replicate(self, module, device_ids):
modules = super(DataParallelModel, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
class DataParallelCriterion(DataParallel):
"""
Calculate loss in multiple-GPUs, which balance the memory usage.
The targets are splitted across the specified devices by chunking in
the batch dimension. Please use together with :class:`encoding.parallel.DataParallelModel`.
Reference:
Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,
Amit Agrawal. “Context Encoding for Semantic Segmentation.
*The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*
Example::
>>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2])
>>> criterion = encoding.nn.DataParallelCriterion(criterion, device_ids=[0, 1, 2])
>>> y = net(x)
>>> loss = criterion(y, target)
"""
def forward(self, inputs, *targets, **kwargs):
# input should be already scatterd
# scattering the targets instead
if not self.device_ids:
return self.module(inputs, *targets, **kwargs)
targets, kwargs = self.scatter(targets, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.module(inputs, *targets[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = _criterion_parallel_apply(replicas, inputs, targets, kwargs)
#return Reduce.apply(*outputs) / len(outputs)
#return self.gather(outputs, self.output_device).mean()
return self.gather(outputs, self.output_device)
def _criterion_parallel_apply(modules, inputs, targets, kwargs_tup=None, devices=None):
assert len(modules) == len(inputs)
assert len(targets) == len(inputs)
if kwargs_tup:
assert len(modules) == len(kwargs_tup)
else:
kwargs_tup = ({},) * len(modules)
if devices is not None:
assert len(modules) == len(devices)
else:
devices = [None] * len(modules)
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input, target, kwargs, device=None):
torch.set_grad_enabled(grad_enabled)
if device is None:
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
# this also avoids accidental slicing of `input` if it is a Tensor
if not isinstance(input, (list, tuple)):
input = (input,)
if not isinstance(target, (list, tuple)):
target = (target,)
output = module(*(input + target), **kwargs)
with lock:
results[i] = output
except Exception as e:
with lock:
results[i] = e
if len(modules) > 1:
threads = [threading.Thread(target=_worker,
args=(i, module, input, target,
kwargs, device),)
for i, (module, input, target, kwargs, device) in
enumerate(zip(modules, inputs, targets, kwargs_tup, devices))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, Exception):
raise output
outputs.append(output)
return outputs
###########################################################################
# Adapted from Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
#
class CallbackContext(object):
pass
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created
by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead
of calling the callback of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate
|
[
"geopar@central.ntua.gr"
] |
geopar@central.ntua.gr
|
a4aa71959c2f1c3dce79168ddb51c85bfaa1899c
|
cdee5cc20a5085b40f8555e7199fe19403e005c3
|
/experimental/graphicalClassification/MultiClassMajorityVote.py
|
e02402ed226105cb3faf7d5e5aab05424c9616b6
|
[
"Apache-2.0"
] |
permissive
|
visenger/aggregation
|
1e908d11df701e900d94d6545f3cc35a6c7dc915
|
82dce87eaaf14b0b2bedd29fc82c026fda2a0138
|
refs/heads/master
| 2020-03-19T03:08:52.140663
| 2017-06-21T10:32:27
| 2017-06-21T10:32:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
#!/usr/bin/env python
from __future__ import print_function
__author__ = 'greghines'
class MultiClassMajorityVote:
def __init__(self,subjectNodes,userNodes):
self.subjectNodes = subjectNodes
self.userNodes = userNodes
self.alpha = 0.6
def __classify__(self,attributeList):
for att in attributeList:
for user in self.userNodes:
user.__changeClassificationAttributes__(att)
for subject in self.subjectNodes:
subject.__changeClassificationAttributes__(att)
#what alpha value would this subject need to get correct positive?
|
[
"ggdhines@gmail.com"
] |
ggdhines@gmail.com
|
a8ed54d16d183772b2a0c8c2adc1a98cc0b4f006
|
4aa464b9fe64e12527524ae46e3fd52d8b555232
|
/extractAndSplitSDF.py
|
85d25b4018b9abb8dfe1959bc85bd7a588962811
|
[] |
no_license
|
amaunz/embrel
|
8ebf669a6835b982761b2f7e9bde9434e3fd85c9
|
683b01fec6e7fe2f022e615c4325811d8dc09d62
|
refs/heads/master
| 2020-05-15T13:17:32.389335
| 2009-06-20T16:10:28
| 2009-06-20T16:10:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,046
|
py
|
import sys
import getopt
import re
import os
import openbabel, pybel
def setHash(hash,key,value,debug=False):
if debug:
print 'debug::%s' % key
try:
hash[key] = value
except:
print key
pass
class SDFReader:
molecules = {}
fieldStructure = {'animals' : ['Dog_Primates','Hamster','Mouse','Rat'],'cell' : ['MultiCellCall','SingleCellCall'],'muta' : ['Mutagenicity'],'td50' : ['mg','note','mmol'],'sex' : ['Female','Male','BothSexes'],'targetS' : ['Cynomolgus', 'Rhesus']}
filename = ''
def __init__(self, filename=None):
if filename:
molecules = [mol for mol in pybel.readfile("sdf", filename)]
possibleFields = {}
for mol in molecules:
map(lambda x: setHash(possibleFields,x,1), mol.data)
dataEntryKeys = possibleFields.keys()
dataEntryKeys.sort()
#for key in dataEntryKeys:
# print key
map(lambda x: setHash(self.molecules,x.data['STRUCTURE_InChIKey'],x), molecules)
print len(self.molecules)
self.filename = filename
def getMolecules(self,what=('ActivityOutcome','Mouse'),activity=None,format='smi'):
returnMols = []
returnActs = []
if what[0] == 'ActivityOutcome':
mols = []
if not activity:
for x in self.molecules:
try:
if self.molecules[x].data['ActivityOutcome_CPDBAS_%s' % (what[1])] != 'blank':
mols.append(self.molecules[x])
except:
pass
else:
for x in self.molecules:
try:
if self.molecules[x].data['ActivityOutcome_CPDBAS_%s' % (what[1])] == activity:
mols.append(self.molecules[x])
except:
pass
returnMols = []
if format=='smi':
returnMols = map(lambda x : '%s\t# CID=%s\tactivity=%s\n' % (x.write().strip(),x.data['DSSTox_CID'],x.data['ActivityOutcome_CPDBAS_%s' % (what[1])]), mols)
elif format=='fminer':
returnMols = map(lambda x : '%s\t%s\n' % (x.data['DSSTox_CID'], x.write().strip()), mols)
endpoint = 'CPDBAS_%s' % (what[1])
returnActs = map(lambda x : '%s\t%s\t%s\n' % (x.data['DSSTox_CID'], endpoint ,((x.data['ActivityOutcome_CPDBAS_%s' % (what[1])]) == 'active' and '1' or '0')), mols)
elif format=='sdf':
returnMols = map(lambda x : '%s' % (x.write('sdf')), mols)
#endpoint = 'CPDBAS_%s' % (what[1])
#returnActs = map(lambda x : '%s\t%s\t%s\n' % (x.data['DSSTox_CID'], endpoint ,((x.data['ActivityOutcome_CPDBAS_%s' % (what[1])]) == 'active' and '1' or '0')), mols)
return (returnMols,returnActs)
def writeMols(self,what=('ActivityOutcome','Mouse'),activity=None,format='smi'):
nameStem = os.path.splitext(os.path.basename(self.filename))[0]
outFilename = '%s_%s_%s' % (nameStem,what[0],what[1])
if activity != None:
outFilename += '_%s' % (activity)
outFilenameStructures = outFilename + '.%s' % (format)
outFilenameActivities = outFilename + '.act'
(mols,actvities) = self.getMolecules(what=what,activity=activity,format=format)
fh = open(outFilenameStructures,'w')
fh.writelines(mols)
fh.close()
if actvities:
fh = open(outFilenameActivities,'w')
fh.writelines(actvities)
fh.close()
sdf = SDFReader('../cpdb/dsstox/CPDBAS_v5d_1547_20Nov2008.sdf')
#sdf.writeMols(format='fminer',activity='active')
#sdf.writeMols(format='fminer',activity='inactive')
#sdf.writeMols(format='fminer')
sdf.writeMols(format='sdf',activity='inactive')
#for mol in sdf.getMolecules(format='fminer',activity='inactive'):
# print mol
|
[
"andreas@maunz.de"
] |
andreas@maunz.de
|
596371fc40da72e9144ca944c568c4e424504ca9
|
7d7a50abe111fbd95ef0fcc89a89a891938627da
|
/halalapp/models.py
|
2b3b0449ff9cfe5655c2976df38e70f9e6fbf0e0
|
[] |
no_license
|
YounngR/HalalApp
|
398e347dae2779b2e762d5f321887b6477774f1a
|
593e7f3375e3150957587dda0e1782a244caf153
|
refs/heads/master
| 2022-03-29T23:12:16.767267
| 2019-12-01T09:13:46
| 2019-12-01T09:13:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,137
|
py
|
from django.db import models
class User(models.Model):
user_ID = models.CharField(max_length=20, blank=False)
user_PW = models.CharField(max_length=20, blank=False)
e_mail = models.CharField(max_length=20, null=True, blank=False)
phone_number = models.CharField(max_length=20, null=True, blank=False)
adress = models.CharField(max_length=20, null=True, blank=False)
user_date = models.DateTimeField(null=True, blank=False)
user_gender = models.BooleanField(default=True)
class Recipe(models.Model):
user_ID = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True, blank=False)
Recipe_name = models.CharField(max_length=20, blank=False)
type_list = (('1','한식'), ('2','일식'),('3','중식'),('4','분식'),('5','야식'),('6','아시안'))
type = models.CharField(max_length=20, choices=type_list, blank=False)
Recipe_date = models.DateTimeField(null=True, blank=False)
Recipe_image = models.ImageField(null=True, blank=True, default="photo")
Recipe_etc = models.CharField(max_length=20, null=True, blank=False)
Recipe_recommend = models.IntegerField(default=0)
|
[
"syr94@daum.net"
] |
syr94@daum.net
|
a70c7ad6684dd6121db4c63219f381a8d520655e
|
eddb7468b7f1b54789893d509fbf119e0bbf1786
|
/fetcher/BlessFetcher.py
|
dfd28dc77348cc81ddf4145907bb26bade96698a
|
[] |
no_license
|
destinationluo/wedding-invitation-analysis
|
e86f6568de96c2de2fb217de9f4af011c35d86fb
|
5762faad61a2181e22004f4f0b0de2117235944c
|
refs/heads/master
| 2020-04-10T13:54:04.040371
| 2018-12-13T16:00:44
| 2018-12-13T16:00:44
| 161,061,875
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 351
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'luoqian'
from datasource.MySQLUtil import *
class BlessFetcher(object):
def __init__(self, mysqlUtil: MySQLUtil):
self.mysqlUtil = mysqlUtil
def fetch(self, callback):
sql = "SELECT * FROM bless order by id desc"
self.mysqlUtil.executeQuery(sql, lambda data: callback(data))
|
[
"359583129@qq.com"
] |
359583129@qq.com
|
bb9642dc5230886722fa77ed337f507f0a5ba8c9
|
c72f2ab329485a67ac57b19f40ec77ae37195e52
|
/tests/tools.py
|
eb91ba610df13cd4da8389cdbbf084031022c06b
|
[] |
no_license
|
YiningGuo/Habit-Tracker
|
26ab2cfc15fa74610d2625f7850d3d32eef2aa4a
|
63883259ec8037206dd58ae7ed31e47c930d155f
|
refs/heads/master
| 2021-05-11T01:34:16.319350
| 2018-03-13T07:46:45
| 2018-03-13T07:46:45
| 118,332,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 949
|
py
|
import os
import sys
import unittest
from google.appengine.ext import testbed
class TestTool(unittest.TestCase):
"""Test set up tools for unit testing"""
lib_path = os.path.abspath(os.path.join(__file__, '..', '..', 'py'))
sys.path.append(lib_path)
test_email = "test@example.com"
user_id = '123'
def set_test(self):
os.environ['ENV'] = 'prod'
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.setup_env(USER_EMAIL=self.test_email, USER_ID=self.user_id,
USER_IS_ADMIN='1', overwrite=True)
self.testbed.init_user_stub()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
def tearDown(self):
self.testbed.deactivate()
def set_user(self, email, id):
self.test_email = email
self.user_id = id
self.testbed.setup_env(USER_EMAIL=email, USER_ID=id)
def set_non_admin(self):
self.testbed.setup_env(USER_IS_ADMIN='0')
|
[
"yguo9310@uni.sydney.edu.au"
] |
yguo9310@uni.sydney.edu.au
|
39ddeb9ad873ed4901adbf3640031f907f3503a3
|
2b5bc632859ca01b6b2feae6186b1314ed8c5187
|
/everpad/provider/daemon.py
|
5b6b49be3c92f2d0a2ee5e6669c92c7f6b8189b9
|
[] |
no_license
|
mcardillo55/everpad
|
c64e2d35bd4ccceff901d9720030dbb8adfcef56
|
ab6271a5b73eedf81d0c31e351e567282dbd6685
|
refs/heads/master
| 2020-12-25T05:55:05.811394
| 2012-12-19T03:36:25
| 2012-12-19T03:36:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,175
|
py
|
import sys
sys.path.insert(0, '../..')
from everpad.provider.service import ProviderService
from everpad.provider.sync import SyncThread
from everpad.provider.tools import set_auth_token, get_db_session
from everpad.tools import get_auth_token, print_version
from everpad.provider import models
from PySide.QtCore import Slot, QSettings
import dbus
import dbus.mainloop.glib
import signal
import fcntl
import os
import getpass
import argparse
if 'kde' in os.environ.get('DESKTOP_SESSION'): # kde init qwidget for wallet access
from PySide.QtGui import QApplication
App = QApplication
else:
from PySide.QtCore import QCoreApplication
App = QCoreApplication
class ProviderApp(App):
def __init__(self, verbose, *args, **kwargs):
App.__init__(self, *args, **kwargs)
self.settings = QSettings('everpad', 'everpad-provider')
self.verbose = verbose
session_bus = dbus.SessionBus()
self.bus = dbus.service.BusName("com.everpad.Provider", session_bus)
self.service = ProviderService(self, session_bus, '/EverpadProvider')
self.sync_thread = SyncThread(self)
self.sync_thread.sync_state_changed.connect(
Slot(int)(self.service.sync_state_changed),
)
self.sync_thread.data_changed.connect(
Slot()(self.service.data_changed),
)
if get_auth_token():
self.sync_thread.start()
self.service.qobject.authenticate_signal.connect(
self.on_authenticated,
)
self.service.qobject.remove_authenticate_signal.connect(
self.on_remove_authenticated,
)
@Slot(str)
def on_authenticated(self, token):
set_auth_token(token)
self.sync_thread.start()
@Slot()
def on_remove_authenticated(self):
self.sync_thread.quit()
set_auth_token('')
session = get_db_session()
session.query(models.Note).delete(
synchronize_session='fetch',
)
session.query(models.Resource).delete(
synchronize_session='fetch',
)
session.query(models.Notebook).delete(
synchronize_session='fetch',
)
session.query(models.Tag).delete(
synchronize_session='fetch',
)
session.commit()
def log(self, data):
if self.verbose:
print data
def main():
signal.signal(signal.SIGINT, signal.SIG_DFL)
fp = open('/tmp/everpad-provider-%s.lock' % getpass.getuser(), 'w')
fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
try:
os.mkdir(os.path.expanduser('~/.everpad/'))
os.mkdir(os.path.expanduser('~/.everpad/data/'))
except OSError:
pass
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', action='store_true', help='verbose output')
parser.add_argument('--version', '-v', action='store_true', help='show version')
args = parser.parse_args(sys.argv[1:])
if args.version:
print_version()
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
app = ProviderApp(args.verbose, sys.argv)
app.exec_()
if __name__ == '__main__':
main()
|
[
"nvbn.rm@gmail.com"
] |
nvbn.rm@gmail.com
|
4726012f426c9e8943505c2ecbca998aa912a06a
|
246e9200a834261eebcf1aaa54da5080981a24ea
|
/project-euler/26-50/distinct-powers.py
|
548316d3dcc396ed31b53767aa4519b6d076d20d
|
[] |
no_license
|
kalsotra2001/practice
|
db435514b7b57ce549b96a8baf64fad8f579da18
|
bbc8a458718ad875ce5b7caa0e56afe94ae6fa68
|
refs/heads/master
| 2021-12-15T20:48:21.186658
| 2017-09-07T23:01:56
| 2017-09-07T23:01:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 113
|
py
|
powers = set()
for i in range(2, 101):
for j in range(2, 101):
powers.add(i ** j)
print len(powers)
|
[
"jacquelineluo95@gmail.com"
] |
jacquelineluo95@gmail.com
|
08de3983cade375a46349f7de656f9ca3a921a9e
|
89b45e528f3d495f1dd6f5bcdd1a38ff96870e25
|
/PythonCrashCourse/chapter_06/exercise6_05.py
|
b03a04f3a086ec1337414ecd27d147eb1ba55d24
|
[] |
no_license
|
imatyukin/python
|
2ec6e712d4d988335fc815c7f8da049968cc1161
|
58e72e43c835fa96fb2e8e800fe1a370c7328a39
|
refs/heads/master
| 2023-07-21T13:00:31.433336
| 2022-08-24T13:34:32
| 2022-08-24T13:34:32
| 98,356,174
| 2
| 0
| null | 2023-07-16T02:31:48
| 2017-07-25T22:45:29
|
Python
|
UTF-8
|
Python
| false
| false
| 660
|
py
|
#!/usr/bin/env python3
rivers = {
'amazon': 'brasil',
'nile': 'egypt',
'mississippi': 'usa',
}
for river, country in rivers.items():
if river == 'mississippi':
print("The " + river.title() + " runs through " + country.upper() + ".")
else:
print("The " + river.title() + " runs through " + country.title() + ".")
print("\nThe following rivers have been mentioned:")
for river in set(rivers.keys()):
print(river.title())
print("\nThe following countries have been mentioned:")
for country in set(rivers.values()):
if country == 'usa':
print(country.upper())
else:
print(country.title())
|
[
"i.matukin@gmail.com"
] |
i.matukin@gmail.com
|
5455de00b15db2d7287cf466dab2e7fcaf5920cb
|
9bc780c6414ce29fde8c8294db88a2464b563a59
|
/myst/wsgi.py
|
a317d395bcdad76cde1aa1d4e78cca2452abc915
|
[] |
no_license
|
me3onik/test
|
36d63105c9193971f59b4905cac7aadd740aa76f
|
b5057ef8a0e20c37b7cadcddb9e80b5cc20113e9
|
refs/heads/master
| 2020-05-26T08:09:12.176370
| 2019-06-13T07:55:29
| 2019-06-13T07:55:29
| 188,161,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
"""
WSGI config for myst project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'myst.settings')
application = get_wsgi_application()
|
[
"me3onik@gmail.com"
] |
me3onik@gmail.com
|
b07e20842b46cac3da56fd62ca2c28f96062795b
|
f34184e6060b58680a9768c58f53e10ca17a0dc3
|
/manage.py
|
894acc68496b98d22c16a94be490e713ff01efaa
|
[] |
no_license
|
ExtremelySeriousChicken/instaTravel
|
ee150249e6a4d061bfff1ec7f3ffa4ee11fff036
|
d4dbb61ccea838197639f872cdfcd96af61eb91f
|
refs/heads/master
| 2021-01-16T21:27:29.566892
| 2015-09-30T21:42:21
| 2015-09-30T21:42:21
| 42,782,219
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
#!/usr/bin/env python
import os
from app import create_app
from flask.ext.script import Manager, Shell
app = create_app('default')
manager = Manager(app)
if __name__ == '__main__':
manager.run()
|
[
"wijaya@umich.edu"
] |
wijaya@umich.edu
|
8ac9678fb6079ee9bfdb77dac2a762f5d30f718e
|
2f1e83dc48cca5c14fad53ad95a4b920756508e4
|
/src/z3c/menu/simple/menu.py
|
02483430e7ffc333cd90b1dee8efeaea95e67078
|
[
"ZPL-2.1"
] |
permissive
|
ZeitOnline/z3c.menu.simple
|
c5839c70a3eaccc3c8a1a2f1e72de96b030f7176
|
fc6f8ce8fddb5918a18e997e3adb53a58547b419
|
refs/heads/main
| 2023-02-22T19:45:47.983639
| 2019-12-12T10:14:33
| 2019-12-12T10:14:33
| 220,233,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,017
|
py
|
##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
$Id: menu.py 114728 2010-07-14 06:53:53Z icemac $
"""
__docformat__ = 'restructuredtext'
import zope.component
import zope.interface
from zope.contentprovider.interfaces import IContentProvider
from zope.viewlet import viewlet
from zope.viewlet import manager
from zope.app.component import hooks
from zope.app.publisher.interfaces.browser import IBrowserMenu
from zope.browserpage.viewpagetemplatefile import ViewPageTemplateFile
from zope.traversing.browser.absoluteurl import absoluteURL
from z3c.i18n import MessageFactory as _
from z3c.menu.simple.interfaces import ISimpleMenuItem
from z3c.menu.simple.interfaces import ITabMenu
from z3c.menu.simple.interfaces import ITab
from z3c.menu.simple.interfaces import IAction
# ISimpleMenuItem implementation
@zope.interface.implementer(ISimpleMenuItem)
class SimpleMenuItem(viewlet.ViewletBase):
"""Selectable menu item."""
template = ViewPageTemplateFile('menu_item.pt')
selectedViewNames = None
activeCSS = u'active-menu-item'
inActiveCSS = u'inactive-menu-item'
@property
def title(self):
return _(self.__name__)
@property
def url(self):
return u''
@property
def extras(self):
return {}
@property
def selected(self):
name = self.__parent__.__name__
if self.selectedViewNames is None:
if name == self.url:
return True
elif name in self.selectedViewNames:
return True
return False
@property
def css(self):
if self.selected:
return self.activeCSS
else:
return self.inActiveCSS
def render(self):
"""Return the template with the option 'menus'"""
return self.template()
class ContextMenuItem(SimpleMenuItem):
"""Menu item viewlet generating context related links."""
urlEndings = []
viewURL = u''
@property
def selected(self):
requestURL = self.request.getURL()
for urlEnding in self.urlEndings:
if requestURL.endswith(urlEnding):
return True
return False
@property
def url(self):
contextURL = absoluteURL(self.context, self.request)
return contextURL + '/' + self.viewURL
class GlobalMenuItem(SimpleMenuItem):
"""Menu item viewlet generating global/site related links."""
urlEndings = []
viewURL = u''
@property
def selected(self):
requestURL = self.request.getURL()
for urlEnding in self.urlEndings:
if requestURL.endswith(urlEnding):
return True
return False
@property
def url(self):
siteURL = absoluteURL(hooks.getSite(), self.request)
return siteURL + '/' + self.viewURL
# ITabMenu implementation
@zope.interface.implementer(ITabMenu)
class TabMenu(object):
"""Tab menu offering tabs and actions."""
def __init__(self, context, request, view):
self.__parent__ = view
self.context = context
self.request = request
def update(self):
"""See zope.contentprovider.interfaces.IContentProvider"""
self.tabs = zope.component.queryMultiAdapter(
(self.context, self.request, self.__parent__), IContentProvider,
'ITab')
if self.tabs is not None:
self.tabs.update()
self.actions = zope.component.queryMultiAdapter(
(self.context, self.request, self.__parent__), IContentProvider,
'IAction')
if self.actions is not None:
self.actions.update()
def render(self):
"""See zope.contentprovider.interfaces.IContentProvider"""
result = u''
if self.tabs is not None:
result += self.tabs.render()
if self.actions is not None:
result += self.actions.render()
return result
@zope.interface.implementer(ITab)
class Tab(manager.WeightOrderedViewletManager):
"""Tab Menu"""
def render(self):
"""Return the template with the option 'menus'"""
if not self.viewlets:
return u''
return self.template()
@zope.interface.implementer(ISimpleMenuItem)
class TabItem(SimpleMenuItem):
"""Base implementation for menu items."""
template = ViewPageTemplateFile('tab_item.pt')
@zope.interface.implementer(IAction)
class Action(manager.WeightOrderedViewletManager):
"""Action Menu"""
def render(self):
"""Return the template with the option 'menus'"""
if not self.viewlets:
return u''
return self.template()
@zope.interface.implementer(ISimpleMenuItem)
class ActionItem(SimpleMenuItem):
"""Base implementation for action items."""
template = ViewPageTemplateFile('action_item.pt')
class BrowserMenu(TabMenu):
"""Menu Action Menu Items
A special tab menu, which takes its items from a browser menu
"""
template = ViewPageTemplateFile('browser_menu_action_item.pt')
# This is the name of the menu
menuId = None
def update(self):
menu = zope.component.getUtility(IBrowserMenu, self.menuId)
self.title = menu.title
self.menuItems = menu.getMenuItems(self.context, self.request)
def render(self):
"""Return the template with the option 'menus'"""
if not self.menuItems:
return u''
return self.template()
|
[
"tom.lazar@zeit.de"
] |
tom.lazar@zeit.de
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.