hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
594cc653ec79a656a999da000662af797c265edc
| 669
|
py
|
Python
|
source/try_init_models.py
|
tuanle618/deepArt-generation
|
bfa11d9f2a825ed53420f85adf3ffe23966b42be
|
[
"MIT"
] | 8
|
2019-03-25T14:53:55.000Z
|
2022-01-09T11:08:30.000Z
|
source/try_init_models.py
|
ptl93/deepArt-generation
|
bfa11d9f2a825ed53420f85adf3ffe23966b42be
|
[
"MIT"
] | 10
|
2020-01-28T21:56:49.000Z
|
2022-02-10T00:10:30.000Z
|
source/try_init_models.py
|
ptl93/deepArt-generation
|
bfa11d9f2a825ed53420f85adf3ffe23966b42be
|
[
"MIT"
] | 5
|
2019-03-18T13:46:26.000Z
|
2022-02-20T15:05:56.000Z
|
# -*- coding: utf-8 -*-
"""
@title: try_init_models.py
@author: Tuan Le
@email: tuanle@hotmail.de
"""
from dcgan import DCGAN
from vae import VAE
if __name__ == "__main__":
print("Init DCGAN_1 model...")
dcgan_1 = DCGAN(name='DCGAN_1')
print("Init DCGAN_2 model...")
dcgan_2 = DCGAN(name='DCGAN_2')
print("Init DCGAN_3 model...")
dcgan_3 = DCGAN(name='DCGAN_3')
print('Init VAE_1 model...')
vae_1 = VAE(name='VAE_1')
print('Init VAE_2 model...')
vae_2 = VAE(name='VAE_2')
print('Init VAE_3 model...')
vae_3 = VAE(name='VAE_3')
print('Init VAE_4 model...')
vae_4 = VAE(name='VAE_4')
| 20.90625
| 35
| 0.588939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 318
| 0.475336
|
3ca0c10499ba17cd0bb023edc1433da2fe3b0c6e
| 1,144
|
py
|
Python
|
03. Programacion orientada a objetos/12. sobrecarga relacional/e1.py
|
Cidryl/python-desde-cero
|
fade09d13ab0ed0cbb4f45a49a4ad9e3980f3276
|
[
"MIT"
] | null | null | null |
03. Programacion orientada a objetos/12. sobrecarga relacional/e1.py
|
Cidryl/python-desde-cero
|
fade09d13ab0ed0cbb4f45a49a4ad9e3980f3276
|
[
"MIT"
] | null | null | null |
03. Programacion orientada a objetos/12. sobrecarga relacional/e1.py
|
Cidryl/python-desde-cero
|
fade09d13ab0ed0cbb4f45a49a4ad9e3980f3276
|
[
"MIT"
] | null | null | null |
class Persona:
def __init__(self,nombre, edad):
self.nombre=nombre
self.edad=edad
def __eq__(self,objeto2):
if self.edad==objeto2.edad:
return True
else:
return False
def __ne__(self,objeto2):
if self.edad!=objeto2.edad:
return True
else:
return False
def __gt__(self,objeto2):
if self.edad>objeto2.edad:
return True
else:
return False
def __ge__(self,objeto2):
if self.edad>=objeto2.edad:
return True
else:
return False
def __lt__(self,objeto2):
if self.edad<objeto2.edad:
return True
else:
return False
def __le__(self,objeto2):
if self.edad<=objeto2.edad:
return True
else:
return False
# bloque principal
persona1=Persona('juan',22)
persona2=Persona('ana',20)
if persona1==persona2:
print("Las dos personas tienen la misma edad.")
else:
print("No tienen la misma edad.")
| 22.88
| 52
| 0.523601
| 940
| 0.821678
| 0
| 0
| 0
| 0
| 0
| 0
| 96
| 0.083916
|
3ca23892448af2cabbc53d9df0bfd9fc4244b346
| 1,416
|
py
|
Python
|
crack-data-structures-and-algorithms/leetcode/sort_list_q148.py
|
Watch-Later/Eureka
|
3065e76d5bf8b37d5de4f9ee75b2714a42dd4c35
|
[
"MIT"
] | 20
|
2016-05-16T11:09:04.000Z
|
2021-12-08T09:30:33.000Z
|
crack-data-structures-and-algorithms/leetcode/sort_list_q148.py
|
Watch-Later/Eureka
|
3065e76d5bf8b37d5de4f9ee75b2714a42dd4c35
|
[
"MIT"
] | 1
|
2018-12-30T09:55:31.000Z
|
2018-12-30T14:08:30.000Z
|
crack-data-structures-and-algorithms/leetcode/sort_list_q148.py
|
Watch-Later/Eureka
|
3065e76d5bf8b37d5de4f9ee75b2714a42dd4c35
|
[
"MIT"
] | 11
|
2016-05-02T09:17:12.000Z
|
2021-12-08T09:30:35.000Z
|
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def sortList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
return merge_sort_list(head)
def merge_sort_list(head):
if not head or not head.next:
return head
slow = fast = head
while fast.next and fast.next.next:
fast = fast.next.next
slow = slow.next
# Split into two lists.
# Why head2 starts from the next node of mid(slow)?
# Assume we have only two nodes, A -> B -> ^
# The strategy we use here eseentially is like floor((l + r) / 2), which
# always stucks on A if we make mid the head.
# Logically, mid with floor strategy makes it the **last element** of the first part.
head2 = slow.next
slow.next = None
l1 = merge_sort_list(head)
l2 = merge_sort_list(head2)
return merge_lists(l1, l2)
def merge_lists(l1, l2):
# Introduce dummy node to simplify merge.
# No need to check l1 & l2 up front
dummy = ListNode(0)
p = dummy
while l1 and l2:
if l1.val < l2.val:
p.next = l1
l1 = l1.next
else:
p.next = l2
l2 = l2.next
p = p.next
if l1:
p.next = l1
if l2:
p.next = l2
return dummy.next
| 22.47619
| 89
| 0.57274
| 264
| 0.186441
| 0
| 0
| 0
| 0
| 0
| 0
| 501
| 0.353814
|
3ca2ace31bf9ede1d629dd5fbae03c55bc75f2bf
| 71
|
py
|
Python
|
labs/py3code.py
|
turing4ever/illustrated-python-3-course
|
d1faff57590713fcd1c6a9215529d6f9c629b046
|
[
"MIT"
] | 57
|
2018-04-25T21:57:07.000Z
|
2021-12-21T19:09:00.000Z
|
labs/py3code.py
|
radovankavicky/illustrated-python-3-course
|
d1faff57590713fcd1c6a9215529d6f9c629b046
|
[
"MIT"
] | 4
|
2018-04-30T05:32:46.000Z
|
2021-12-06T17:55:36.000Z
|
labs/py3code.py
|
radovankavicky/illustrated-python-3-course
|
d1faff57590713fcd1c6a9215529d6f9c629b046
|
[
"MIT"
] | 26
|
2018-04-27T06:11:35.000Z
|
2021-04-11T12:07:37.000Z
|
# place super_test.py code here
# place keyword_test.py code here
| 8.875
| 33
| 0.732394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 64
| 0.901408
|
3ca2e7b053503c5f1274ef05c3605bdeeddc592f
| 71,712
|
py
|
Python
|
Source Codes/CDBC_Source_Code.py
|
CDBCTool/CDBC
|
70e64241e4fb7687832e3771f316cb036f6fc3c7
|
[
"MIT"
] | 13
|
2019-05-13T22:45:32.000Z
|
2022-02-27T07:19:16.000Z
|
Source Codes/CDBC_Source_Code.py
|
CDBCTool/CDBC
|
70e64241e4fb7687832e3771f316cb036f6fc3c7
|
[
"MIT"
] | 2
|
2019-09-03T03:57:06.000Z
|
2021-11-21T14:01:31.000Z
|
Source Codes/CDBC_Source_Code.py
|
CDBCTool/CDBC
|
70e64241e4fb7687832e3771f316cb036f6fc3c7
|
[
"MIT"
] | 3
|
2019-11-04T17:05:02.000Z
|
2021-12-29T18:14:51.000Z
|
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import sys,os,time
from scipy.stats import gamma, norm, beta
import matplotlib.pyplot as plt
from datetime import date, timedelta
import numpy as np
import tkinter
from os import listdir
from os.path import isfile, join
def sorted_values(Obs,Sim):
count = 0
for i in range(len(Obs)):
if Obs[i] == 0:
count += 1
Rank = [i+1 for i in range(len(Obs))]
Dict = dict(zip(Rank,Sim))
SortedSim = sorted(Dict.values())
SortedRank = sorted(Dict, key=Dict.get)
for i in range(count):
SortedSim[i] = 0
ArrangedDict = dict(zip(SortedRank,SortedSim))
SortedDict_by_Rank = sorted(ArrangedDict.items())
ArrangedSim = [v for k,v in SortedDict_by_Rank]
return ArrangedSim
def sorted_values_thresh(Sim, Fut):
try:
Min_Positive_Value_Sim = min(i for i in sim if i > 0)
except:
Min_Positive_Value_Sim = 0
for i in range(len(Fut)):
if Fut[i] < Min_Positive_Value_Sim:
Fut[i] = 0
return Fut
class TitleBar(QDialog):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.setWindowFlags(Qt.FramelessWindowHint)
StyleTitleBar='''QDialog{
background-color: rgb(2,36,88);
}
QLabel{
color: rgb(0, 255, 255);
font: 11pt "MS Shell Dlg 2";
}'''
self.setStyleSheet(StyleTitleBar)
self.setAutoFillBackground(True)
self.setFixedSize(750,30)
Style_minimize='''QToolButton{
background-color: transparent;
color: rgb(255, 255, 255);
border: none;
}
QToolButton:hover{
background-color: rgb(66, 131, 221,230);
border: none;
}'''
Style_close='''QToolButton{
background-color: rgb(217, 0, 0);
color: rgb(255, 255, 255);
border: none;
}
QToolButton:hover{
background-color: rgb(255, 0, 0);
border: none;
}'''
Font=QFont('MS Shell Dlg 2',11)
Font.setBold(True)
self.minimize = QToolButton(self)
self.minimize.setText('–')
self.minimize.setFixedHeight(20)
self.minimize.setFixedWidth(25)
self.minimize.setStyleSheet(Style_minimize)
self.minimize.setFont(Font)
self.close = QToolButton(self)
self.close.setText(u"\u00D7")
self.close.setFixedHeight(20)
self.close.setFixedWidth(45)
self.close.setStyleSheet(Style_close)
self.close.setFont(Font)
image = QPixmap(r"Interpolation-2.png")
labelImg =QLabel(self)
labelImg.setFixedSize(QSize(20,20))
labelImg.setScaledContents(True)
labelImg.setPixmap(image)
labelImg.setStyleSheet('border: none;')
label = QLabel(self)
label.setText(" Climate Data Bias Corrector (RAIN, TEMP, SRAD)")
label.setFont(Font)
label.setStyleSheet('border: none;')
hbox=QHBoxLayout(self)
hbox.addWidget(labelImg)
hbox.addWidget(label)
hbox.addWidget(self.minimize)
hbox.addWidget(self.close)
hbox.insertStretch(2,600)
hbox.setSpacing(1)
hbox.setContentsMargins(5,0,5,0)
self.setSizePolicy(QSizePolicy.Expanding,QSizePolicy.Fixed)
self.maxNormal=False
self.close.clicked.connect(self.closeApp)
self.minimize.clicked.connect(self.showSmall)
def showSmall(self):
widget.showMinimized();
def closeApp(self):
widget.close()
def mousePressEvent(self,event):
if event.button() == Qt.LeftButton:
widget.moving = True
widget.offset = event.pos()
def mouseMoveEvent(self,event):
if widget.moving:
widget.move(event.globalPos()-widget.offset)
class HFTab(QTabWidget):
def __init__(self, parent = None):
super(HFTab, self).__init__(parent)
self.HTab = QWidget()
self.FTab = QWidget()
self.setStyleSheet('QTabBar { font: bold }')
self.addTab(self.HTab,"For Historical Data")
self.addTab(self.FTab,"For Future Data")
self.HTabUI()
self.FTabUI()
self.started = False
def HTabUI(self):
grid = QGridLayout()
grid.addWidget(self.input(), 0, 0)
grid.addWidget(self.output(), 1, 0)
grid.addWidget(self.method(), 2, 0)
grid.addWidget(self.progress(), 3, 0)
grid.setContentsMargins(0,0,0,0)
## self.setTabText(0,"Historical")
self.HTab.setLayout(grid)
def input(self):
##########Layout for taking input climate data to be bias corrected ##########
gBox = QGroupBox("Inputs:")
layout1 = QGridLayout()
self.Obsfile = QLineEdit()
self.browse2 = QPushButton("...")
self.browse2.setMaximumWidth(25)
self.browse2.clicked.connect(self.browse2_file)
self.q1 = QPushButton("?")
self.q1.setMaximumWidth(15)
self.q1.clicked.connect(self.Info1)
self.Obsfile.setPlaceholderText("File with observed climate data (*.csv or *.txt)")
layout1.addWidget(self.Obsfile,1,0,1,3)
layout1.addWidget(self.q1,1,3,1,1)
layout1.addWidget(self.browse2,1,4,1,1)
self.ModHfile = QLineEdit()
self.ModHfile.setPlaceholderText("File with GCM outputs (*.csv or *.txt)")
self.q2 = QPushButton("?")
self.q2.setMaximumWidth(15)
self.q2.clicked.connect(self.Info2)
self.browse3 = QPushButton("...")
self.browse3.setMaximumWidth(25)
self.browse3.clicked.connect(self.browse3_file)
layout1.addWidget(self.ModHfile,2,0,1,3)
layout1.addWidget(self.q2,2,3,1,1)
layout1.addWidget(self.browse3,2,4,1,1)
## ##########Layout for taking comma delimited vs tab delimited################################
## sublayout1 = QGridLayout()
##
## self.label1 = QLabel("Input Format:\t")
## self.b1 = QRadioButton("Comma Delimated (*.csv)")
## #self.b1.setChecked(True)
## self.b2 = QRadioButton("Tab Delimited (*.txt)")
##
## self.b1.toggled.connect(lambda:self.btnstate(self.b1))
## self.b2.toggled.connect(lambda:self.btnstate(self.b2))
##
## sublayout1.addWidget(self.label1,1,0)
## sublayout1.addWidget(self.b1,1,1)
## sublayout1.addWidget(self.b2,1,2)
## layout1.addLayout(sublayout1,3,0)
gBox.setLayout(layout1)
return gBox
def output(self):
##########Layout for output file location and interpolation##########
gBox = QGroupBox("Outputs:")
layout4 = QGridLayout()
self.outputfile_location = QLineEdit()
self.outputfile_location.setPlaceholderText("Folder to save bias corrected GCM outputs")
self.browse4 = QPushButton("...")
self.browse4.setMaximumWidth(25)
self.browse4.clicked.connect(self.browse4_file)
layout4.addWidget(self.outputfile_location,1,0,1,3)
layout4.addWidget(self.browse4,1,3,1,1)
########################Layout for taking comma delimited vs tab delimited################################
sublayout2 = QGridLayout()
output_label = QLabel("Output Format:\t")
self.b3 = QRadioButton("Comma Delimated (*.csv)")
#self.b3.setChecked(True)
self.b4 = QRadioButton("Tab Delimited (*.txt)")
self.b3.toggled.connect(lambda:self.btn2state(self.b3))
self.b4.toggled.connect(lambda:self.btn2state(self.b4))
sublayout2.addWidget(output_label,1,0)
sublayout2.addWidget(self.b3,1,1)
sublayout2.addWidget(self.b4,1,2)
layout4.addLayout(sublayout2,2,0)
gBox.setLayout(layout4)
return gBox
def method(self):
########################Layout for taking methods of Bias Correction ################################
gBox = QGroupBox("Variable/Distribution")
layout5 = QGridLayout()
self.b5 = QRadioButton("Rainfall/Gamma")
#self.b3.setChecked(True)
self.b6 = QRadioButton("Temperature/Normal")
self.b7 = QRadioButton("Solar Radiation/Beta")
self.b5.toggled.connect(lambda:self.btn3state(self.b5))
self.b6.toggled.connect(lambda:self.btn3state(self.b6))
self.b7.toggled.connect(lambda:self.btn3state(self.b7))
self.show_hide = QPushButton("Show Details")
Font=QFont()
Font.setBold(True)
#self.show_hide.setFont(Font)
self.show_hide.setCheckable(True)
#self.show_hide.toggle()
self.show_hide.clicked.connect(self.ShowHide)
self.show_hide.setFixedWidth(90)
self.show_hide.setFixedHeight(25)
Style_show_hide_Button = """
QPushButton{
color: rgb(255, 255, 255);
background-color: rgb(66, 131, 221);
border: none;
}
QPushButton:Checked{
background-color: rgb(66, 131, 221);
border: none;
}
QPushButton:hover{
background-color: rgb(66, 131, 221,230);
border: none;
}
"""
self.show_hide.setStyleSheet(Style_show_hide_Button)
self.show_plots = QPushButton("Show Plots")
self.show_plots.clicked.connect(self.ShowPlots)
self.show_plots.setFixedWidth(75)
self.show_plots.setFixedHeight(25)
self.show_plots.setStyleSheet(Style_show_hide_Button)
self.start = QPushButton("Run")
self.start.setFixedWidth(50)
self.start.setFixedHeight(25)
Style_Run_Button = """
QPushButton{
color: rgb(255, 255, 255);
background-color: rgb(0,121,0);
border-color: none;
border: none;
}
QPushButton:hover{
background-color: rgb(0,121,0,230);
}
"""
self.start.clicked.connect(self.start_correctionH)
#self.start.setFont(Font)
self.start.setStyleSheet(Style_Run_Button)
self.stop = QPushButton("Cancel")
self.stop.setMaximumWidth(60)
self.stop.setFixedHeight(25)
Style_Cancel_Button = """
QPushButton{
color: rgb(255, 255, 255);
background-color: rgb(180,0,0,240);
border-color: none;
border: none;
}
QPushButton:hover{
background-color: rgb(180,0,0,220);
}
"""
self.stop.clicked.connect(self.stop_correctionH)
#self.stop.setFont(Font)
self.stop.setStyleSheet(Style_Cancel_Button)
layout5.addWidget(self.b5,1,1)
layout5.addWidget(self.b6,1,2)
layout5.addWidget(self.b7,1,3)
layout5.addWidget(self.show_hide,1,7)
layout5.addWidget(self.start,1,4)
layout5.addWidget(self.stop,1,6)
layout5.addWidget(self.show_plots,1,5)
## layout5.addWidget(self.b5,1,1)
## layout5.addWidget(self.b6,1,2)
## layout5.addWidget(self.b7,1,3)
## layout5.addWidget(self.show_hide,2,5)
## layout5.addWidget(self.start,1,4)
## layout5.addWidget(self.stop,2,4)
## layout5.addWidget(self.show_plots,1,5)
gBox.setLayout(layout5)
return gBox
########## Layout for progress of Bias Correction ##########
def progress(self):
gBox = QGroupBox()
layout6 = QVBoxLayout()
STYLE2 = """
QProgressBar{
text-align: center;
}
QProgressBar::chunk {
background-color: rgb(0,121,0);
}
"""
self.status = QLabel('')
self.progressbar = QProgressBar()
## self.progressbarfinal = QProgressBar()
## self.progressbar.setMinimum(1)
self.progressbar.setFixedHeight(13)
## self.progressbarfinal.setFixedHeight(13)
self.progressbar.setStyleSheet(STYLE2)
## self.progressbarfinal.setStyleSheet(STYLE2)
self.textbox = QTextEdit()
self.textbox.setReadOnly(True)
self.textbox.moveCursor(QTextCursor.End)
self.textbox.hide()
self.scrollbar = self.textbox.verticalScrollBar()
layout6.addWidget(self.status)
layout6.addWidget(self.progressbar)
## layout6.addWidget(self.progressbarfinal)
layout6.addWidget(self.textbox)
gBox.setLayout(layout6)
return gBox
########################### Control Buttons ####################################################
def browse2_file(self):
Obs_file = QFileDialog.getOpenFileName(self,caption = "Open File",directory=r"C:\Users\gupta\OneDrive\0. M.Tech. Research Work\Codes\GUIs\Bias Correction\\",
filter="Comma Delimated (*.csv);;Tab Delimated (*.txt)")
self.Obsfile.setText(QDir.toNativeSeparators(Obs_file))
def browse3_file(self):
ModH_file = QFileDialog.getOpenFileName(self,caption = "Open File", directory=r"C:\Users\gupta\OneDrive\0. M.Tech. Research Work\Codes\GUIs\Bias Correction\\",
filter="Comma Delimated (*.csv);;Tab Delimated (*.txt)")
self.ModHfile.setText(QDir.toNativeSeparators(ModH_file))
def browse4_file(self):
output_file = QFileDialog.getExistingDirectory(self, "Save File in Folder", r"C:\Users\gupta\OneDrive\0. M.Tech. Research Work\Codes\GUIs\Bias Correction\\",
QFileDialog.ShowDirsOnly)
self.outputfile_location.setText(QDir.toNativeSeparators(output_file))
def Info1(self):
QMessageBox.information(self, "Information About Input Files (Observed)",
'''Sample input (.csv or .txt) should be same as it is shown in Sample Example:\nC:\Program Files (x86)\Climate Data Bias Corrector\Sample Input (Obs).csv
''')
def Info2(self):
QMessageBox.information(self, "Information About Input File (Model)",
'''Sample input (.csv or .txt) should be same as it is shown in Sample Example:\nC:\Program Files (x86)\Climate Data Bias Corrector\Sample Input (Mod).csv
''')
## def btnstate(self,b):
## if b.text() == "Comma Delimated (*.csv)" and b.isChecked() == True:
## self.seperator = ','
## self.seperatorname = '.csv'
## if b.text() == "Tab Delimited (*.txt)" and b.isChecked() == True:
## self.seperator = '\t'
## self.seperatorname = '.txt'
def btn2state(self,b):
if b.text() == "Comma Delimated (*.csv)" and b.isChecked() == True:
self.seperator2 = ','
self.seperatorname2 = '.csv'
if b.text() == "Tab Delimited (*.txt)" and b.isChecked() == True:
self.seperator2 = '\t'
self.seperatorname2 = '.txt'
def btn3state(self,b):
if b.text() == "Rainfall/Gamma" and b.isChecked() == True:
self.methodname = b.text()
if b.text() == "Temperature/Normal" and b.isChecked() == True:
self.methodname = b.text()
if b.text() == "Solar Radiation/Beta" and b.isChecked() == True:
self.methodname = b.text()
def start_correctionH(self):
self.started = True
self.BiasCorrectH()
def stop_correctionH(self):
if self.started:
self.started = False
QMessageBox.information(self, "Information", "Bias correction is aborted.")
def ShowHide(self):
if self.show_hide.text() == "Hide Details" and self.show_hide.isChecked() == False:
self.textboxF.hide()
self.textbox.hide()
## self.setFixedSize(700,372)
ShowHide(self.show_hideF.text())
ShowHide(self.show_hide.text())
self.show_hideF.setText('Show Details')
self.show_hide.setText('Show Details')
if self.show_hide.text() == "Show Details" and self.show_hide.isChecked() == True:
self.textboxF.show()
self.textbox.show()
## self.setFixedSize(700,620)
ShowHide(self.show_hideF.text())
ShowHide(self.show_hide.text())
self.show_hideF.setText('Hide Details')
self.show_hide.setText('Hide Details')
def BiasCorrectH(self):
if self.Obsfile.text() == "":
QMessageBox.critical(self, "Message", "File containing observed climate data (*.csv or *.txt) is not given.")
self.started = False
if self.ModHfile.text() == "":
QMessageBox.critical(self, "Message", "File containing GCM outputs (*.csv or *.txt) is not given.")
self.started = False
if self.outputfile_location.text() == "":
QMessageBox.critical(self, "Message", "Folder to save bias corrected GCM outputs is not given")
self.started = False
try:
## sep = self.seperator
## sepname = self.seperatorname
sep2 = self.seperator2
sepname2 = self.seperatorname2
except:
QMessageBox.critical(self, "Message", "Format is not defined.")
self.started = False
try:
method = self.methodname
except:
QMessageBox.critical(self, "Message", "Variable/Distribution is not defined.")
self.started = False
self.textbox.setText("")
start = time.time()
self.status.setText('Status: Correcting')
## self.progressbarfinal.setMinimum(0)
## self.progressbarfinal.setValue(0)
self.progressbar.setMinimum(0)
self.progressbar.setValue(0)
Fobs = self.Obsfile.text()
Fmod = self.ModHfile.text()
ObsData, ModData, CorrectedData = [], [], []
with open(Fobs) as f:
line = [line for line in f]
for i in range(len(line)):
if Fobs.endswith('.csv'):
ObsData.append([word for word in line[i].split(",") if word])
if Fobs.endswith('.txt'):
ObsData.append([word for word in line[i].split("\t") if word])
lat = [float(ObsData[0][c]) for c in range(1,len(ObsData[0]))]
lon = [float(ObsData[1][c]) for c in range(1,len(ObsData[0]))]
Latitude = []
Longitude = []
with open(Fmod) as f:
line = [line for line in f]
for i in range(len(line)):
if Fmod.endswith('.csv'):
ModData.append([word for word in line[i].split(",") if word])
if Fmod.endswith('.txt'):
ModData.append([word for word in line[i].split("\t") if word])
DateObs = [ObsData[r][0] for r in range(len(ObsData))]
DateMod = [ModData[r][0] for r in range(len(ModData))]
OutPath = self.outputfile_location.text()
CorrectedData.append(DateMod)
YMod = int(DateMod[2][-4:])
YObs = int(DateObs[2][-4:])
app.processEvents()
if len(lat)>1:
random_count = np.random.randint(len(lat),size=(1))
else:
random_count = 0
fig = plt.figure(figsize=(15,7))
plt.style.use('ggplot')
## plt.style.use('fivethirtyeight')
for j in range(len(lat)):
obs = [float(ObsData[r][j+1]) for r in range(2,len(ObsData))]
MOD = [float(ModData[r][j+1]) for r in range(2,len(ModData))]
Date = [date(YMod,1,1)+timedelta(i) for i in range(len(MOD))]
DateObs = [date(YObs,1,1)+timedelta(i) for i in range(len(obs))]
if method == 'Rainfall/Gamma' and self.started == True:
MOD_Month=[]
Obs_Monthwise = [[] for m in range(12)]
Obs_MonthFreq = [[] for m in range(12)]
MOD_Monthwise = [[] for m in range(12)]
MOD_MonthFreq = [[] for m in range(12)]
Cor_Monthwise = []
Date_Monthwise= [[] for m in range(12)]
for m in range(12):
for i in range(len(obs)):
if Date[i].month == m+1:
Date_Monthwise[m].append(Date[i])
Obs_Monthwise[m].append(obs[i])
MOD_Monthwise[m].append(MOD[i])
for m in range(12):
MOD_Month.append(sorted_values(Obs_Monthwise[m],MOD_Monthwise[m]))
MOD_Monthwise = MOD_Month
for m in range(12):
for i in range(len(MOD_Monthwise[m])):
if MOD_Monthwise[m][i]>0:
MOD_MonthFreq[m].append(MOD_Monthwise[m][i])
if Obs_Monthwise[m][i]>0:
Obs_MonthFreq[m].append(Obs_Monthwise[m][i])
nplot=1
for m in range(12):
Cor = []
if len(MOD_MonthFreq[m])>0 and len(Obs_MonthFreq[m])>0:
Mo, Mg, Vo, Vg = np.mean(Obs_MonthFreq[m]), np.mean(MOD_MonthFreq[m]), np.std(Obs_MonthFreq[m])**2, np.std(MOD_MonthFreq[m])**2
if not any(param<0.000001 for param in [Mo, Mg, Vo, Vg]):
O_alpha, O_beta, G_alpha, G_beta = Mo**2/Vo, Vo/Mo, Mg**2/Vg, Vg/Mg
O_loc, G_loc = 0, 0
## print('G',O_alpha, O_beta, G_alpha, G_beta)
else:
O_alpha, O_loc, O_beta = gamma.fit(Obs_MonthFreq[m], loc=0)
G_alpha, G_loc, G_beta = gamma.fit(MOD_MonthFreq[m], loc=0)
## print('fit',O_alpha, O_beta, G_alpha, G_beta)
## print(O_alpha, O_beta, G_alpha, G_beta)
prob = gamma.cdf(MOD_Monthwise[m],G_alpha, scale=G_beta)
Corr = gamma.ppf(prob, O_alpha, scale=O_beta)
for i in range(len(Obs_Monthwise[m])):
if len(MOD_MonthFreq[m])>0:
if MOD_Monthwise[m][i] >= min(MOD_MonthFreq[m]):
Cor.append(Corr[i])
else:
Cor.append(0)
else:
Cor.append(0)
for c in Cor:
Cor_Monthwise.append('%.1f'%c)
if j == random_count:
ax = fig.add_subplot(3,4,nplot)
obs_cdf = gamma.cdf(Obs_Monthwise[m], O_alpha, O_loc, O_beta)
mod_cdf = gamma.cdf(MOD_Monthwise[m], G_alpha, G_loc, G_beta)
Mc, Vc = np.mean(Cor), np.std(Cor)**2
if not any(param<0.000001 for param in [Mc, Vc]):
CF_alpha, CF_beta = Mc**2/Vc, Vc/Mc
CF_loc, G_loc = 0, 0
else:
CF_alpha, CF_loc, CF_beta=gamma.fit(Cor)
cor_cdf = gamma.cdf(Cor, CF_alpha, CF_loc, CF_beta)
ax.set_title('Month: '+str(m+1), fontsize=12)
o, = ax.plot(Obs_Monthwise[m], obs_cdf, '.b')
m, = ax.plot(MOD_Monthwise[m], mod_cdf, '.r')
c, = ax.plot(Cor, cor_cdf, '.g')
nplot=nplot+1
fig.legend([o,m,c,(o,m,c,)],['Observed','Before Correction','After Correction'],ncol=3,loc=8,frameon=False, fontsize=14)
plt.subplots_adjust(hspace=0.3, wspace=0.3)
plt.suptitle('CDF Plots of ' + method.split('/')[0] + ' for Randomly Selected Lat: '+str(lat[j])+' Lon: '+str(lon[j]),fontsize=16)
if method =='Temperature/Normal' and self.started == True:
MOD_Month=[]
Obs_Monthwise = [[] for m in range(12)]
MOD_Monthwise = [[] for m in range(12)]
Cor_Monthwise = []
Date_Monthwise= [[] for m in range(12)]
for m in range(12):
for i in range(len(MOD)):
if Date[i].month == m+1:
Date_Monthwise[m].append(Date[i])
MOD_Monthwise[m].append(MOD[i])
for m in range(12):
for i in range(len(obs)):
if DateObs[i].month == m+1:
Obs_Monthwise[m].append(obs[i])
nplot=1
for m in range(12):
Cor = []
Mo, So = norm.fit(Obs_Monthwise[m])
Mg, Sg = norm.fit(MOD_Monthwise[m])
prob = norm.cdf(MOD_Monthwise[m],Mg, Sg)
Cor = norm.ppf(prob, Mo, So)
for c in Cor:
Cor_Monthwise.append('%.1f'%c)
if j == random_count:
ax = fig.add_subplot(3,4,nplot)
obs_cdf = norm.cdf(Obs_Monthwise[m], Mo, So)
mod_cdf = norm.cdf(MOD_Monthwise[m], Mg, Sg)
Mc, Sc = norm.fit(Cor)
cor_cdf = norm.cdf(Cor, Mc, Sc)
ax.set_title('Month: '+str(m+1), fontsize=12)
o, = ax.plot(Obs_Monthwise[m], obs_cdf, '.b')
m, = ax.plot(MOD_Monthwise[m], mod_cdf, '.r')
c, = ax.plot(Cor, cor_cdf, '.g')
nplot=nplot+1
fig.legend([o,m,c,(o,m,c,)],['Observed','Before Correction','After Correction'],ncol=3,loc=8,frameon=False, fontsize=14)
plt.subplots_adjust(hspace=0.3, wspace=0.3)
plt.suptitle('CDF Plots of ' + method.split('/')[0] + ' for Randomly Selected Lat: '+str(lat[j])+' Lon: '+str(lon[j]),fontsize=16)
if method =='Solar Radiation/Beta' and self.started == True:
MOD_Month=[]
Obs_Monthwise = [[] for m in range(12)]
MOD_Monthwise = [[] for m in range(12)]
Cor_Monthwise = []
Date_Monthwise= [[] for m in range(12)]
for m in range(12):
for i in range(len(MOD)):
if Date[i].month == m+1:
Date_Monthwise[m].append(Date[i])
MOD_Monthwise[m].append(MOD[i])
for m in range(12):
for i in range(len(obs)):
if DateObs[i].month == m+1:
Obs_Monthwise[m].append(obs[i])
nplot=1
for m in range(12):
Cor = []
oMin, oMax = min(Obs_Monthwise[m]), max(Obs_Monthwise[m])
gMin, gMax = min(MOD_Monthwise[m]), max(MOD_Monthwise[m])
Mo = (np.mean(Obs_Monthwise[m])-oMin)/(oMax - oMin)
Mg = (np.mean(MOD_Monthwise[m])-gMin)/(gMax - gMin)
Vo = np.std(Obs_Monthwise[m])**2/(oMax - oMin)**2
Vg = np.std(MOD_Monthwise[m])**2/(gMax - gMin)**2
ao, ag = -Mo*(Vo + Mo**2 - Mo)/Vo, -Mg*(Vg + Mg**2 - Mg)/Vg
bo, bg = ao*(1 - Mo)/Mo, ag*(1 - Mg)/Mg
TransO = [(Obs_Monthwise[m][i]-oMin)/(oMax-oMin) for i in range(len(Obs_Monthwise[m]))]
TransG = [(MOD_Monthwise[m][i]-gMin)/(gMax-gMin) for i in range(len(MOD_Monthwise[m]))]
prob = beta.cdf(TransG, ag, bg)
TransC = beta.ppf(prob, ao, bo)
Cor = [TransC[i]*(oMax-oMin)+oMin for i in range(len(TransC))]
for c in Cor:
Cor_Monthwise.append('%.1f'%c)
if j == random_count:
ax = fig.add_subplot(3,4,nplot)
obs_cdf = beta.cdf(TransO, ao, bo)
mod_cdf = beta.cdf(TransG, ag, bg)
Mc = (np.mean(Cor)-min(Cor))/(max(Cor)-min(Cor))
Vc = np.std(Cor)**2/(max(Cor)-min(Cor))**2
ac = -Mc*(Vc + Mc**2 - Mc)/Vc
bc = ac*(1 - Mc)/Mc
cor_cdf = beta.cdf(TransC, ac, bc)
ax.set_title('Month: '+str(m+1), fontsize=12)
o, = ax.plot(Obs_Monthwise[m], obs_cdf, '.b')
m, = ax.plot(MOD_Monthwise[m], mod_cdf, '.r')
c, = ax.plot(Cor, cor_cdf, '.g')
nplot=nplot+1
fig.legend([o,m,c,(o,m,c,)],['Observed','Before Correction','After Correction'],ncol=3,loc=8,frameon=False, fontsize=14)
plt.subplots_adjust(hspace=0.3, wspace=0.3)
plt.suptitle('CDF Plots of ' + method.split('/')[0] + ' for Randomly Selected Lat: '+str(lat[j])+' Lon: '+str(lon[j]),fontsize=16)
Date_Month=[]
for m in range(12):
for i in range(len(Date_Monthwise[m])):
Date_Month.append(Date_Monthwise[m][i])
DateCorr_Dict = dict(zip(Date_Month,Cor_Monthwise))
SortedCorr = sorted(DateCorr_Dict.items())
CorrectedData.append([lat[j],lon[j]]+[v for k,v in SortedCorr])
app.processEvents()
self.scrollbar.setValue(self.scrollbar.maximum())
self.progressbar.setValue(j)
## self.progressbarfinal.setValue(j)
self.progressbar.setMaximum(len(lat)+len(CorrectedData[0])-2)
## self.progressbarfinal.setMaximum(len(lat)+len(CorrectedData[0])-2)
self.textbox.append('Corrected '+ str(j+1)+' out of '+str(len(lat))+':\tLat: %.1f'%lat[j]+'\tLon: %.1f'%lon[j])
self.status.setText('Status: Writing Bias Corrected Data to File.')
self.textbox.append('\nWriting Bias Corrected Data to File.')
app.processEvents()
if sep2 == ',':
f = open(OutPath+'\Bias Corrected '+method.split('/')[0]+' '+str(YMod)+'.csv','w')
for c in range(len(CorrectedData[0])):
app.processEvents()
if self.started==True:
f.write(','.join(str(CorrectedData[r][c]) for r in range(len(CorrectedData))))
f.write('\n')
if (c+1)%10 == 1 and (c+1) != 11:
self.textbox.append("Writing %dst day data" % (c+1))
elif (c+1)%10 == 2:
self.textbox.append("Writing %dnd day data" % (c+1))
elif (c+1)%10 == 3:
self.textbox.append("Writing %drd day data" % (c+1))
else:
self.textbox.append("Writing %dth day data" % (c+1))
app.processEvents()
self.scrollbar.setValue(self.scrollbar.maximum())
self.progressbar.setValue(len(lat)+c+1)
## self.progressbarfinal.setValue(len(lat)+c+1)
self.progressbar.setMaximum(len(lat)+len(CorrectedData[0])-2)
## self.progressbarfinal.setMaximum(len(lat)+len(CorrectedData[0])-2)
if c == len(CorrectedData[0])-1:
end = time.time()
t = end-start
self.status.setText('Status: Completed.')
self.textbox.append("\nTotal Time Taken: %.2d:%.2d:%.2d" % (t/3600,(t%3600)/60,t%60))
QMessageBox.information(self, "Information", "Bias Correction is completed.")
f.close()
if sep2 == '\t':
f = open(OutPath+'\Bias Corrected '+method.split('/')[0]+' '+str(YMod)+'.txt','w')
for c in range(len(CorrectedData[0])):
app.processEvents()
if self.started==True:
f.write('\t'.join(str(CorrectedData[r][c]) for r in range(len(CorrectedData))))
f.write('\n')
if (c+1)%10 == 1 and (c+1) != 11:
self.textbox.append("Writing %dst day data" % (c+1))
elif (c+1)%10 == 2:
self.textbox.append("Writing %dnd day data" % (c+1))
elif (c+1)%10 == 3:
self.textbox.append("Writing %drd day data" % (c+1))
else:
self.textbox.append("Writing %dth day data" % (c+1))
app.processEvents()
self.scrollbar.setValue(self.scrollbar.maximum())
self.progressbar.setValue(len(lat)+c+1)
self.progressbar.setMaximum(len(lat)+len(CorrectedData[0])-2)
## self.progressbarfinal.setValue(len(lat)+c+1)
## self.progressbarfinal.setMaximum(len(lat)+len(CorrectedData[0])-2)
if c == len(CorrectedData[0])-1:
end = time.time()
t = end-start
self.status.setText('Status: Completed.')
self.textbox.append("\nTotal Time Taken: %.2d:%.2d:%.2d" % (t/3600,(t%3600)/60,t%60))
QMessageBox.information(self, "Information", "Bias Correction is completed.")
f.close()
def ShowPlots(self):
plt.show()
def FTabUI(self):
gridF = QGridLayout()
gridF.addWidget(self.inputF(), 0, 0)
gridF.addWidget(self.outputF(), 1, 0)
gridF.addWidget(self.methodF(), 2, 0)
gridF.addWidget(self.progressF(), 3, 0)
gridF.setContentsMargins(0,0,0,0)
## self.setTabText(0,"Historical")
self.FTab.setLayout(gridF)
def inputF(self):
##########Layout for taking input climate data to be bias corrected ##########
gBoxF = QGroupBox("Inputs:")
layout1F = QGridLayout()
self.ObsfileF = QLineEdit()
self.browse2F = QPushButton("...")
self.browse2F.setMaximumWidth(25)
self.browse2F.clicked.connect(self.browse2_fileF)
self.q1F = QPushButton("?")
self.q1F.setMaximumWidth(15)
self.q1F.clicked.connect(self.Info1F)
self.ObsfileF.setPlaceholderText("File with observed historical climate data (*.csv or *.txt)")
self.ModHfileF = QLineEdit()
self.browse1F = QPushButton("...")
self.browse1F.setMaximumWidth(25)
self.browse1F.clicked.connect(self.browse1_fileF)
self.q0F = QPushButton("?")
self.q0F.setMaximumWidth(15)
self.q0F.clicked.connect(self.Info0F)
self.ModHfileF.setPlaceholderText("File with GCM historical climate projections (*.csv or *.txt)")
layout1F.addWidget(self.ObsfileF,1,0,1,3)
layout1F.addWidget(self.q1F,1,3,1,1)
layout1F.addWidget(self.browse2F,1,4,1,1)
layout1F.addWidget(self.ModHfileF,1,5,1,3)
layout1F.addWidget(self.q0F,1,8,1,1)
layout1F.addWidget(self.browse1F,1,9,1,1)
self.ModFfileF = QLineEdit()
self.ModFfileF.setPlaceholderText("File with GCM future climate projections (*.csv or *.txt)")
self.q2F = QPushButton("?")
self.q2F.setMaximumWidth(15)
self.q2F.clicked.connect(self.Info2F)
self.browse3F = QPushButton("...")
self.browse3F.setMaximumWidth(25)
self.browse3F.clicked.connect(self.browse3_fileF)
layout1F.addWidget(self.ModFfileF,3,0,1,8)
layout1F.addWidget(self.q2F,3,8,1,1)
layout1F.addWidget(self.browse3F,3,9,1,1)
## ##########Layout for taking comma delimited vs tab delimited################################
## sublayout1 = QGridLayout()
##
## self.label1 = QLabel("Input Format:\t")
## self.b1 = QRadioButton("Comma Delimated (*.csv)")
## #self.b1.setChecked(True)
## self.b2 = QRadioButton("Tab Delimited (*.txt)")
##
## self.b1.toggled.connect(lambda:self.btnstate(self.b1))
## self.b2.toggled.connect(lambda:self.btnstate(self.b2))
##
## sublayout1.addWidget(self.label1,1,0)
## sublayout1.addWidget(self.b1,1,1)
## sublayout1.addWidget(self.b2,1,2)
## layout1.addLayout(sublayout1,3,0)
gBoxF.setLayout(layout1F)
return gBoxF
def outputF(self):
##########Layout for output file location and interpolation##########
gBoxF = QGroupBox("Outputs:")
layout4F = QGridLayout()
self.outputfile_locationF = QLineEdit()
self.outputfile_locationF.setPlaceholderText("Folder to save bias corrected GCM outputs")
self.browse4F = QPushButton("...")
self.browse4F.setMaximumWidth(25)
self.browse4F.clicked.connect(self.browse4_fileF)
layout4F.addWidget(self.outputfile_locationF,1,0,1,3)
layout4F.addWidget(self.browse4F,1,3,1,1)
########################Layout for taking comma delimited vs tab delimited################################
sublayout2F = QGridLayout()
output_labelF = QLabel("Output Format:\t")
self.b3F = QRadioButton("Comma Delimated (*.csv)")
#self.b3.setChecked(True)
self.b4F = QRadioButton("Tab Delimited (*.txt)")
self.b3F.toggled.connect(lambda:self.btn2stateF(self.b3F))
self.b4F.toggled.connect(lambda:self.btn2stateF(self.b4F))
sublayout2F.addWidget(output_labelF,1,0)
sublayout2F.addWidget(self.b3F,1,1)
sublayout2F.addWidget(self.b4F,1,2)
layout4F.addLayout(sublayout2F,2,0)
gBoxF.setLayout(layout4F)
return gBoxF
def methodF(self):
########################Layout for taking methods of Bias Correction ################################
gBoxF = QGroupBox("Variable/Distribution")
layout5F = QGridLayout()
self.b5F = QRadioButton("Rainfall/Gamma")
#self.b3F.setChecked(True)
self.b6F = QRadioButton("Temperature/Normal")
self.b7F = QRadioButton("Solar Radiation/Beta")
self.b5F.toggled.connect(lambda:self.btn3stateF(self.b5F))
self.b6F.toggled.connect(lambda:self.btn3stateF(self.b6F))
self.b7F.toggled.connect(lambda:self.btn3stateF(self.b7F))
self.show_hideF = QPushButton("Show Details")
Font=QFont()
Font.setBold(True)
#self.show_hideF.setFont(Font)
self.show_hideF.setCheckable(True)
#self.show_hideF.toggle()
self.show_hideF.clicked.connect(self.ShowHideF)
self.show_hideF.setFixedWidth(90)
self.show_hideF.setFixedHeight(25)
Style_show_hideF_Button = """
QPushButton{
color: rgb(255, 255, 255);
background-color: rgb(66, 131, 221);
border: none;
}
QPushButton:Checked{
background-color: rgb(66, 131, 221);
border: none;
}
QPushButton:hover{
background-color: rgb(66, 131, 221,230);
border: none;
}
"""
self.show_hideF.setStyleSheet(Style_show_hideF_Button)
self.show_plotsF = QPushButton("Show Plots")
self.show_plotsF.clicked.connect(self.ShowPlotsF)
self.show_plotsF.setFixedWidth(75)
self.show_plotsF.setFixedHeight(25)
self.show_plotsF.setStyleSheet(Style_show_hideF_Button)
self.startF = QPushButton("Run")
self.startF.setFixedWidth(50)
self.startF.setFixedHeight(25)
Style_RunF_Button = """
QPushButton{
color: rgb(255, 255, 255);
background-color: rgb(0,121,0);
border-color: none;
border: none;
}
QPushButton:hover{
background-color: rgb(0,121,0,230);
}
"""
self.startF.clicked.connect(self.start_correctionF)
#self.startF.setFont(Font)
self.startF.setStyleSheet(Style_RunF_Button)
self.stopF = QPushButton("Cancel")
self.stopF.setMaximumWidth(60)
self.stopF.setFixedHeight(25)
Style_CancelF_Button = """
QPushButton{
color: rgb(255, 255, 255);
background-color: rgb(180,0,0,240);
border-color: none;
border: none;
}
QPushButton:hover{
background-color: rgb(180,0,0,220);
}
"""
self.stopF.clicked.connect(self.stop_correctionF)
#self.stopF.setFont(Font)
self.stopF.setStyleSheet(Style_CancelF_Button)
layout5F.addWidget(self.b5F,1,1)
layout5F.addWidget(self.b6F,1,2)
layout5F.addWidget(self.b7F,1,3)
layout5F.addWidget(self.show_hideF,1,7)
layout5F.addWidget(self.startF,1,4)
layout5F.addWidget(self.stopF,1,6)
layout5F.addWidget(self.show_plotsF,1,5)
## layout5F.addWidget(self.b5F,1,1)
## layout5F.addWidget(self.b6F,1,2)
## layout5F.addWidget(self.b7F,1,3)
## layout5F.addWidget(self.show_hideF,2,5)
## layout5F.addWidget(self.startF,1,4)
## layout5F.addWidget(self.stopF,2,4)
## layout5F.addWidget(self.show_plotsF,1,5)
gBoxF.setLayout(layout5F)
return gBoxF
########## Layout for progress of Bias Correction ##########
def progressF(self):
gBoxF = QGroupBox()
layout6F = QVBoxLayout()
STYLE2 = """
QProgressBar{
text-align: center;
}
QProgressBar::chunk {
background-color: rgb(0,121,0);
}
"""
self.statusF = QLabel('')
self.progressbarF = QProgressBar()
## self.progressbarfinalF = QProgressBar()
#self.progressbarF.setMinimum(1)
self.progressbarF.setFixedHeight(13)
## self.progressbarfinalF.setFixedHeight(13)
self.progressbarF.setStyleSheet(STYLE2)
## self.progressbarfinalF.setStyleSheet(STYLE2)
self.textboxF = QTextEdit()
self.textboxF.setReadOnly(True)
self.textboxF.moveCursor(QTextCursor.End)
self.textboxF.hide()
self.scrollbarF = self.textboxF.verticalScrollBar()
layout6F.addWidget(self.statusF)
layout6F.addWidget(self.progressbarF)
## layout6F.addWidget(self.progressbarfinalF)
layout6F.addWidget(self.textboxF)
gBoxF.setLayout(layout6F)
return gBoxF
########################### Control Buttons ####################################################
def browse1_fileF(self):
ModH_fileF = QFileDialog.getOpenFileName(self,caption = "Open File",directory=r"C:\Users\gupta\OneDrive\0. M.Tech. Research Work\Codes\GUIs\Bias Correction\\",
filter="Comma Delimated (*.csv);;Tab Delimated (*.txt)")
self.ModHfileF.setText(QDir.toNativeSeparators(ModH_fileF))
def browse2_fileF(self):
Obs_fileF = QFileDialog.getOpenFileName(self,caption = "Open File",directory=r"C:\Users\gupta\OneDrive\0. M.Tech. Research Work\Codes\GUIs\Bias Correction\\",
filter="Comma Delimated (*.csv);;Tab Delimated (*.txt)")
self.ObsfileF.setText(QDir.toNativeSeparators(Obs_fileF))
def browse3_fileF(self):
ModF_fileF = QFileDialog.getOpenFileName(self,caption = "Open File", directory=r"C:\Users\gupta\OneDrive\0. M.Tech. Research Work\Codes\GUIs\Bias Correction\\",
filter="Comma Delimated (*.csv);;Tab Delimated (*.txt)")
self.ModFfileF.setText(QDir.toNativeSeparators(ModF_fileF))
def browse4_fileF(self):
output_fileF = QFileDialog.getExistingDirectory(self, "Save File in Folder", r"C:\Users\gupta\OneDrive\0. M.Tech. Research Work\Codes\GUIs\Bias Correction\\",
QFileDialog.ShowDirsOnly)
self.outputfile_locationF.setText(QDir.toNativeSeparators(output_fileF))
def Info0F(self):
QMessageBox.information(self, "Information About Input Files (Model Historical)",
'''Sample input (.csv or .txt) should be same as it is shown in Sample Example:\nC:\Program Files (x86)\Climate Data Bias Corrector\Sample Input (ModH).csv
''')
def Info1F(self):
QMessageBox.information(self, "Information About Input Files (Observed Historical)",
'''Sample input (.csv or .txt) should be same as it is shown in Sample Example:\nC:\Program Files (x86)\Climate Data Bias Corrector\Sample Input (ObsH).csv
''')
def Info2F(self):
QMessageBox.information(self, "Information About Input File (Model Future)",
'''Sample input (.csv or .txt) should be same as it is shown in Sample Example:\nC:\Program Files (x86)\Climate Data Bias Corrector\Sample Input (ModF).csv
''')
## def btnstateF(self,b):
## if b.text() == "Comma Delimated (*.csv)" and b.isChecked() == True:
## self.seperatorF = ','
## self.seperatornameF = '.csv'
## if b.text() == "Tab Delimited (*.txt)" and b.isChecked() == True:
## self.seperatorF = '\t'
## self.seperatornameF = '.txt'
def btn2stateF(self,b):
if b.text() == "Comma Delimated (*.csv)" and b.isChecked() == True:
self.seperator2F = ','
self.seperatorname2F = '.csv'
if b.text() == "Tab Delimited (*.txt)" and b.isChecked() == True:
self.seperator2F = '\t'
self.seperatorname2F = '.txt'
def btn3stateF(self,b):
if b.text() == "Rainfall/Gamma" and b.isChecked() == True:
self.methodnameF = b.text()
if b.text() == "Temperature/Normal" and b.isChecked() == True:
self.methodnameF = b.text()
if b.text() == "Solar Radiation/Beta" and b.isChecked() == True:
self.methodnameF = b.text()
def start_correctionF(self):
self.started = True
self.BiasCorrectF()
def stop_correctionF(self):
if self.started:
self.started = False
QMessageBox.information(self, "Information", "Bias correction is aborted.")
def ShowHideF(self):
if self.show_hideF.text() == "Hide Details" and self.show_hideF.isChecked() == False:
self.textboxF.hide()
self.textbox.hide()
## self.setFixedSize(700,372)
ShowHide(self.show_hideF.text())
ShowHide(self.show_hide.text())
self.show_hideF.setText('Show Details')
self.show_hide.setText('Show Details')
if self.show_hideF.text() == "Show Details" and self.show_hideF.isChecked() == True:
self.textboxF.show()
self.textbox.show()
## self.setFixedSize(700,620)
ShowHide(self.show_hideF.text())
ShowHide(self.show_hide.text())
self.show_hideF.setText('Hide Details')
self.show_hide.setText('Hide Details')
def BiasCorrectF(self):
if self.ObsfileF.text() == "":
QMessageBox.critical(self, "Message", "File with observed historical climate data (*.csv or *.txt) is not given.")
self.started = False
if self.ModHfileF.text() == "":
QMessageBox.critical(self, "Message", "File with GCM historical climate projections (*.csv or *.txt) is not given.")
self.started = False
if self.ModFfileF.text() == "":
QMessageBox.critical(self, "Message", "File with GCM future climate projections (*.csv or *.txt) is not given.")
self.started = False
if self.outputfile_locationF.text() == "":
QMessageBox.critical(self, "Message", "Folder to save bias corrected GCM outputs is not given")
self.started = False
try:
## sepF = self.seperator
## sepnameF = self.seperatorname
sep2F = self.seperator2F
sepname2F = self.seperatorname2F
except:
QMessageBox.critical(self, "Message", "Format is not defined.")
self.started = False
try:
method = self.methodnameF
except:
QMessageBox.critical(self, "Message", "Variable/Distribution is not defined.")
self.started = False
self.textboxF.setText("")
start = time.time()
self.statusF.setText('Status: Correcting.')
## self.progressbarfinalF.setMinimum(0)
## self.progressbarfinalF.setValue(0)
self.progressbarF.setMinimum(0)
self.progressbarF.setValue(0)
FobsH = self.ObsfileF.text()
FmodH = self.ModHfileF.text()
FmodF = self.ModFfileF.text()
ObsHData, ModHData, ModFData, CorrectedData = [], [], [], []
with open(FobsH) as f:
line = [line for line in f]
for i in range(len(line)):
if FobsH.endswith('.csv'):
ObsHData.append([word for word in line[i].split(",") if word])
if FobsH.endswith('.txt'):
ObsHData.append([word for word in line[i].split("\t") if word])
lat = [float(ObsHData[0][c]) for c in range(1,len(ObsHData[0]))]
lon = [float(ObsHData[1][c]) for c in range(1,len(ObsHData[0]))]
Latitude = []
Longitude = []
with open(FmodH) as f:
line = [line for line in f]
for i in range(len(line)):
if FmodH.endswith('.csv'):
ModHData.append([word for word in line[i].split(",") if word])
if FmodH.endswith('.txt'):
ModData.append([word for word in line[i].split("\t") if word])
with open(FmodF) as f:
line = [line for line in f]
for i in range(len(line)):
if FmodF.endswith('.csv'):
ModFData.append([word for word in line[i].split(",") if word])
if FmodF.endswith('.txt'):
ModFData.append([word for word in line[i].split("\t") if word])
DateObsH = [ObsHData[r][0] for r in range(len(ObsHData))]
DateModH = [ModHData[r][0] for r in range(len(ModHData))]
DateModF = [ModFData[r][0] for r in range(len(ModFData))]
OutPath = self.outputfile_locationF.text()
CorrectedData.append(DateModF)
YObsH = int(DateObsH[2][-4:])
YModH = int(DateModH[2][-4:])
YModF = int(DateModF[2][-4:])
app.processEvents()
if len(lat)>1:
random_count = np.random.randint(len(lat),size=(1))
else:
random_count = 0
fig = plt.figure(figsize=(15,7))
plt.style.use('ggplot')
## plt.style.use('fivethirtyeight')
for j in range(len(lat)):
ObsH = [float(ObsHData[r][j+1]) for r in range(2,len(ObsHData))]
ModH = [float(ModHData[r][j+1]) for r in range(2,len(ModHData))]
ModF = [float(ModFData[r][j+1]) for r in range(2,len(ModFData))]
DateObsH = [date(YObsH,1,1)+timedelta(i) for i in range(len(ObsH))]
DateModH = [date(YModH,1,1)+timedelta(i) for i in range(len(ModH))]
DateModF = [date(YModF,1,1)+timedelta(i) for i in range(len(ModF))]
if method == 'Rainfall/Gamma' and self.started == True:
DateH=DateModH
DateF=DateModF
ModH_Month=[]
ModF_Month=[]
Cor_Monthwise = []
ObsH_Monthwise = [[] for m in range(12)]
ObsH_MonthFreq = [[] for m in range(12)]
ModH_Monthwise = [[] for m in range(12)]
ModH_MonthFreq = [[] for m in range(12)]
ModF_Monthwise = [[] for m in range(12)]
ModF_MonthFreq = [[] for m in range(12)]
DateH_Monthwise= [[] for m in range(12)]
DateF_Monthwise= [[] for m in range(12)]
for m in range(12):
for i in range(len(ObsH)):
if DateH[i].month == m+1:
DateH_Monthwise[m].append(DateH[i])
ObsH_Monthwise[m].append(ObsH[i])
ModH_Monthwise[m].append(ModH[i])
for m in range(12):
for i in range(len(ModF)):
if DateF[i].month == m+1:
DateF_Monthwise[m].append(DateF[i])
ModF_Monthwise[m].append(ModF[i])
for m in range(12):
ModH_Month.append(sorted_values(ObsH_Monthwise[m],ModH_Monthwise[m]))
ModF_Month.append(sorted_values_thresh(ModH_Month[m], ModF_Monthwise[m]))
ModH_Monthwise = ModH_Month
ModF_Monthwise = ModF_Month
for m in range(12):
for i in range(len(ModH_Monthwise[m])):
if ModH_Monthwise[m][i]>0:
ModH_MonthFreq[m].append(ModH_Monthwise[m][i])
if ObsH_Monthwise[m][i]>0:
ObsH_MonthFreq[m].append(ObsH_Monthwise[m][i])
for i in range(len(ModF_Monthwise[m])):
if ModF_Monthwise[m][i]>0:
ModF_MonthFreq[m].append(ModF_Monthwise[m][i])
nplot=1
for m in range(12):
Cor = []
if len(ModH_MonthFreq[m])>0 and len(ObsH_MonthFreq[m])>0 and len(ModF_MonthFreq[m])>0:
Moh, Mgh, Mgf, Voh, Vgh, Vgf = np.mean(ObsH_MonthFreq[m]), np.mean(ModH_MonthFreq[m]), np.mean(ModF_MonthFreq[m]), np.std(ObsH_MonthFreq[m])**2, np.std(ModH_MonthFreq[m])**2, np.std(ModF_MonthFreq[m])**2
if not any(param<0.000001 for param in [Moh, Mgh, Mgf, Voh, Vgh, Vgf]):
aoh, boh, agh, bgh, agf, bgf = Moh**2/Voh, Voh/Moh, Mgh**2/Vgh, Vgh/Mgh, Mgf**2/Vgf, Vgf/Mgf
loh, lgh, lgf = 0, 0, 0
else:
aoh, loh, boh = gamma.fit(ObsH_MonthFreq[m], loc=0)
agh, lgh, bgh = gamma.fit(ModH_MonthFreq[m], loc=0)
agf, lgf, bgf = gamma.fit(ModF_MonthFreq[m], loc=0)
'CDF of ModF with ModH Parameters'
Prob_ModF_ParaModH = gamma.cdf(ModF_Monthwise[m],agh, scale=bgh)
'Inverse of Prob_ModF_ParaModH with ParaObsH to get corrected transformed values of Future Model Time Series'
Cor = gamma.ppf(Prob_ModF_ParaModH, aoh, scale=boh)
else:
for i in range(len(ModF_Monthwise[m])):
Cor.append(0)
for c in Cor:
Cor_Monthwise.append('%.1f'%c)
if j == random_count:
ax = fig.add_subplot(3,4,nplot)
obsH_cdf = gamma.cdf(ObsH_Monthwise[m], aoh, loh, boh)
modF_cdf = gamma.cdf(ModF_Monthwise[m], agf, lgf, bgf)
Mc, Vc = np.mean(Cor), np.std(Cor)**2
if not any(param<0.000001 for param in [Mc, Vc]):
acf, bcf = Mc**2/Vc, Vc/Mc
lcf = 0
else:
acf, lcf, bcf = gamma.fit(Cor)
cor_cdf = gamma.cdf(Cor, acf, lcf, bcf)
ax.set_title('Month: '+str(m+1), fontsize=12)
o, = ax.plot(ObsH_Monthwise[m], obsH_cdf, '.b')
m, = ax.plot(ModF_Monthwise[m], modF_cdf, '.r')
c, = ax.plot(Cor, cor_cdf, '.g')
nplot=nplot+1
fig.legend([o,m,c,(o,m,c,)],['Observed','Before Correction','After Correction'],ncol=3,loc=8,frameon=False, fontsize=14)
plt.subplots_adjust(hspace=0.3, wspace=0.3)
plt.suptitle('CDF Plots of ' + method.split('/')[0] + ' for Randomly Selected Lat: '+str(lat[j])+' Lon: '+str(lon[j]),fontsize=16)
if method =='Temperature/Normal' and self.started == True:
DateH=DateModH
DateF=DateModF
Cor_Monthwise = []
ObsH_Monthwise = [[] for m in range(12)]
ModH_Monthwise = [[] for m in range(12)]
ModF_Monthwise = [[] for m in range(12)]
DateH_Monthwise= [[] for m in range(12)]
DateF_Monthwise= [[] for m in range(12)]
for m in range(12):
for i in range(len(ObsH)):
if DateH[i].month == m+1:
DateH_Monthwise[m].append(DateH[i])
ObsH_Monthwise[m].append(ObsH[i])
ModH_Monthwise[m].append(ModH[i])
for m in range(12):
for i in range(len(ModF)):
if DateF[i].month == m+1:
DateF_Monthwise[m].append(DateF[i])
ModF_Monthwise[m].append(ModF[i])
nplot=1
for m in range(12):
Cor = []
Moh, Mgh, Mgf, Soh, Sgh, Sgf = np.mean(ObsH_Monthwise[m]), np.mean(ModH_Monthwise[m]), np.mean(ModF_Monthwise[m]), np.std(ObsH_Monthwise[m]), np.std(ModH_Monthwise[m]), np.std(ModF_Monthwise[m])
Prob_ModF = norm.cdf(ModF_Monthwise[m], Mgf, Sgf)
Inv_of_Prob_ModF_ParaObsH = norm.ppf(Prob_ModF, Moh, Soh)
Inv_of_Prob_ModF_ParaModH = norm.ppf(Prob_ModF, Mgh, Sgh)
for i in range(len(ModF_Monthwise[m])):
Cor.append(ModF_Monthwise[m][i]+Inv_of_Prob_ModF_ParaObsH[i]-Inv_of_Prob_ModF_ParaModH[i])
for c in Cor:
Cor_Monthwise.append('%.1f'%c)
if j == random_count:
ax = fig.add_subplot(3,4,nplot)
obsH_cdf = norm.cdf(ObsH_Monthwise[m], Moh, Soh)
modF_cdf = norm.cdf(ModF_Monthwise[m], Mgf, Sgf)
Mcf, Scf = norm.fit(Cor)
cor_cdf = norm.cdf(Cor, Mcf, Scf)
ax.set_title('Month: '+str(m+1), fontsize=12)
o, = ax.plot(ObsH_Monthwise[m], obsH_cdf, '.b')
m, = ax.plot(ModF_Monthwise[m], modF_cdf, '.r')
c, = ax.plot(Cor, cor_cdf, '.g')
nplot=nplot+1
fig.legend([o,m,c,(o,m,c,)],['Observed','Before Correction','After Correction'],ncol=3,loc=8,frameon=False, fontsize=14)
plt.subplots_adjust(hspace=0.3, wspace=0.3)
plt.suptitle('CDF Plots of ' + method.split('/')[0] + ' for Randomly Selected Lat: '+str(lat[j])+' Lon: '+str(lon[j]),fontsize=16)
if method =='Solar Radiation/Beta' and self.started == True:
ModH_Month=[]
Cor_Monthwise = []
ObsH_Monthwise = [[] for m in range(12)]
ModH_Monthwise = [[] for m in range(12)]
ModF_Monthwise = [[] for m in range(12)]
DateObsH_Monthwise= [[] for m in range(12)]
DateModH_Monthwise= [[] for m in range(12)]
DateModF_Monthwise= [[] for m in range(12)]
for m in range(12):
for i in range(len(ObsH)):
if DateObsH[i].month == m+1:
DateObsH_Monthwise[m].append(DateObsH[i])
ObsH_Monthwise[m].append(ObsH[i])
for m in range(12):
for i in range(len(ModH)):
if DateModH[i].month == m+1:
DateModH_Monthwise[m].append(DateModH[i])
ModH_Monthwise[m].append(ModH[i])
for m in range(12):
for i in range(len(ModF)):
if DateModF[i].month == m+1:
DateModF_Monthwise[m].append(DateModF[i])
ModF_Monthwise[m].append(ModF[i])
nplot=1
for m in range(12):
Cor = []
'Maximum and minimum value monthwise of whole time series are calculated below for ObsH, ModH and ModF'
ohMin, ohMax = min(ObsH_Monthwise[m]), max(ObsH_Monthwise[m])
ghMin, ghMax = min(ModH_Monthwise[m]), max(ModH_Monthwise[m])
gfMin, gfMax = min(ModF_Monthwise[m]), max(ModF_Monthwise[m])
'Mean and variance value monthwise of whole time series are calculated below for ObsH, ModH and ModF'
Moh = (np.mean(ObsH_Monthwise[m])-ohMin)/(ohMax - ohMin)
Mgh = (np.mean(ModH_Monthwise[m])-ghMin)/(ghMax - ghMin)
Mgf = (np.mean(ModF_Monthwise[m])-gfMin)/(gfMax - gfMin)
Voh = np.std(ObsH_Monthwise[m])**2/(ohMax - ohMin)**2
Vgh = np.std(ModH_Monthwise[m])**2/(ghMax - ghMin)**2
Vgf = np.std(ModF_Monthwise[m])**2/(gfMax - gfMin)**2
'a,b parameters in beta distribution, monthwise of whole time series, are calculated below for ObsH, ModH and ModF'
aoh, agh, agf = -Moh*(Voh + Moh**2 - Moh)/Voh, -Mgh*(Vgh + Mgh**2 - Mgh)/Vgh, -Mgf*(Vgf + Mgf**2 - Mgf)/Vgf
boh, bgh, bgf = aoh*(1 - Moh)/Moh, agh*(1 - Mgh)/Mgh, agf*(1 - Mgf)/Mgf
'All the time series are transformed to range (0,1)'
TransOH = [(ObsH_Monthwise[m][i]-ohMin)/(ohMax-ohMin) for i in range(len(ObsH_Monthwise[m]))]
TransGH = [(ModH_Monthwise[m][i]-ghMin)/(ghMax-ghMin) for i in range(len(ModH_Monthwise[m]))]
TransGF = [(ModF_Monthwise[m][i]-gfMin)/(gfMax-gfMin) for i in range(len(ModF_Monthwise[m]))]
'CDF of ModF with ModH Parameters'
Prob_ModF_ParaModH = beta.cdf(TransGF, agh, bgh)
'Inverse of Prob_ModF_ParaModH with ParaObsH to get corrected transformed values of Future Model Time Series'
TransC = beta.ppf(Prob_ModF_ParaModH, aoh, boh)
Cor = [TransC[i]*(ohMax-ohMin)+ohMin for i in range(len(TransC))]
for c in Cor:
Cor_Monthwise.append('%.1f'%c)
DateF_Monthwise = DateModF_Monthwise
if j == random_count:
ax = fig.add_subplot(3,4,nplot)
obsH_cdf = beta.cdf(TransOH, aoh, boh)
modF_cdf = beta.cdf(TransGF, agf, bgf)
Mcf = (np.mean(Cor)-min(Cor))/(max(Cor)-min(Cor))
Vcf = np.std(Cor)**2/(max(Cor)-min(Cor))**2
acf = -Mcf*(Vcf + Mcf**2 - Mcf)/Vcf
bcf = acf*(1 - Mcf)/Mcf
cor_cdf = beta.cdf(TransC, acf, bcf)
ax.set_title('Month: '+str(m+1), fontsize=12)
o, = ax.plot(ObsH_Monthwise[m], obsH_cdf, '.b')
m, = ax.plot(ModF_Monthwise[m], modF_cdf, '.r')
c, = ax.plot(Cor, cor_cdf, '.g')
nplot=nplot+1
fig.legend([o,m,c,(o,m,c,)],['Observed','Before Correction','After Correction'],ncol=3,loc=8,frameon=False, fontsize=14)
plt.subplots_adjust(hspace=0.3, wspace=0.3)
plt.suptitle('CDF Plots of ' + method.split('/')[0] + ' for Randomly Selected Lat: '+str(lat[j])+' Lon: '+str(lon[j]),fontsize=16)
Date_Month=[]
for m in range(12):
for i in range(len(DateF_Monthwise[m])):
Date_Month.append(DateF_Monthwise[m][i])
DateCorr_Dict = dict(zip(Date_Month,Cor_Monthwise))
SortedCorr = sorted(DateCorr_Dict.items())
CorrectedData.append([lat[j],lon[j]]+[v for k,v in SortedCorr])
app.processEvents()
self.scrollbarF.setValue(self.scrollbarF.maximum())
self.progressbarF.setValue(j)
## self.progressbarfinalF.setValue(j)
self.progressbarF.setMaximum(len(lat)+len(CorrectedData[0])-2)
## self.progressbarfinalF.setMaximum(len(lat)+len(CorrectedData[0])-2)
self.textboxF.append('Corrected '+ str(j+1)+' out of '+str(len(lat))+':\tLat: %.1f'%lat[j]+'\tLon: %.1f'%lon[j])
self.statusF.setText('Status: Writing Bias Corrected Data to File.')
self.textboxF.append('\nWriting Bias Corrected Data to File.')
app.processEvents()
if sep2F == ',':
f = open(OutPath+'\Bias Corrected '+method.split('/')[0]+' '+str(YModF)+'.csv','w')
for c in range(len(CorrectedData[0])):
app.processEvents()
if self.started==True:
f.write(','.join(str(CorrectedData[r][c]) for r in range(len(CorrectedData))))
f.write('\n')
if (c+1)%10 == 1 and (c+1) != 11:
self.textboxF.append("Writing %dst day data" % (c+1))
elif (c+1)%10 == 2:
self.textboxF.append("Writing %dnd day data" % (c+1))
elif (c+1)%10 == 3:
self.textboxF.append("Writing %drd day data" % (c+1))
else:
self.textboxF.append("Writing %dth day data" % (c+1))
app.processEvents()
self.scrollbarF.setValue(self.scrollbarF.maximum())
self.progressbarF.setValue(len(lat)+c+1)
## self.progressbarfinalF.setValue(len(lat)+c+1)
self.progressbarF.setMaximum(len(lat)+len(CorrectedData[0])-2)
## self.progressbarfinalF.setMaximum(len(lat)+len(CorrectedData[0])-2)
if c == len(CorrectedData[0])-1:
end = time.time()
t = end-start
self.statusF.setText('Status: Completed.')
self.textboxF.append("\nTotal Time Taken: %.2d:%.2d:%.2d" % (t/3600,(t%3600)/60,t%60))
QMessageBox.information(self, "Information", "Bias Correction is completed.")
f.close()
if sep2F == '\t':
f = open(OutPath+'\Bias Corrected '+method.split('/')[0]+' '+str(YModF)+'.txt','w')
for c in range(len(CorrectedData[0])):
app.processEvents()
if self.started==True:
f.write('\t'.join(str(CorrectedData[r][c]) for r in range(len(CorrectedData))))
f.write('\n')
if (c+1)%10 == 1 and (c+1) != 11:
self.textboxF.append("Writing %dst day data" % (c+1))
elif (c+1)%10 == 2:
self.textboxF.append("Writing %dnd day data" % (c+1))
elif (c+1)%10 == 3:
self.textboxF.append("Writing %drd day data" % (c+1))
else:
self.textboxF.append("Writing %dth day data" % (c+1))
app.processEvents()
self.scrollbarF.setValue(self.scrollbarF.maximum())
self.progressbarF.setValue(len(lat)+c+1)
self.progressbarF.setMaximum(len(lat)+len(CorrectedData[0])-2)
## self.progressbarfinalF.setValue(len(lat)+c+1)
## self.progressbarfinalF.setMaximum(len(lat)+len(CorrectedData[0])-2)
if c == len(CorrectedData[0])-1:
end = time.time()
t = end-start
self.statusF.setText('Status: Completed.')
self.textboxF.append("\nTotal Time Taken: %.2d:%.2d:%.2d" % (t/3600,(t%3600)/60,t%60))
QMessageBox.information(self, "Information", "Bias Correction is completed.")
f.close()
def ShowPlotsF(self):
plt.show()
class BiasCorrection(QWidget):
def __init__(self, parent=None):
super(BiasCorrection,self).__init__(parent)
grid = QGridLayout()
self.m_titlebar=TitleBar(self)
grid.addWidget(self.m_titlebar, 0, 0)
self.tabs = HFTab(self)
grid.addWidget(self.tabs, 1, 0)
self.setLayout(grid)
grid.setContentsMargins(0,0,0,0)
## self.setWindowTitle("Weather Data Interpolator")
self.setFocus()
self.adjustSize()
self.Widget_Width = self.frameGeometry().width()
self.Widget_Height = self.frameGeometry().height()
## self.setFixedSize(750,354)
self.setFixedSize(750,self.Widget_Height)
## self.move(350,100)
self.setWindowFlags(Qt.FramelessWindowHint)
## self.setWindowFlags(Qt.WindowMaximizeButtonHint)
started = False
app = QApplication(sys.argv)
widget = BiasCorrection()
app_icon = QIcon()
app_icon.addFile('Interpolation-2.ico', QSize(40,40))
app.setWindowIcon(app_icon)
pixmap = QPixmap("Splash_CDBC.png")
splash = QSplashScreen(pixmap)
splash.show()
screen_resolution = app.desktop().screenGeometry()
width, height = screen_resolution.width(), screen_resolution.height()
widget.move(width/2-widget.width()/2,height/2-widget.height()/2)
time.sleep(2)
def ShowHide(text):
if text == 'Show Details':
widget.setFixedSize(750,BiasCorrection().Widget_Height+BiasCorrection().Widget_Height*2/3)
print(widget.height())
## widget.setFixedSize(750,620)
if text == 'Hide Details':
widget.setFixedSize(750,BiasCorrection().Widget_Height+1)
print(widget.height())
## widget.setFixedSize(750,354)
##widget.setFixedWidth(500)
##widget.setFixedHeight(400)
widget.show()
splash.finish(widget)
app.exec_()
| 45.76388
| 228
| 0.526146
| 69,649
| 0.971205
| 0
| 0
| 0
| 0
| 0
| 0
| 15,767
| 0.219859
|
3ca4fb77d1058786e6c3813cfbd46b9161c2b28a
| 3,473
|
py
|
Python
|
lagom/core/es/base_es_master.py
|
lkylych/lagom
|
64777be7f09136072a671c444b5b3fbbcb1b2f18
|
[
"MIT"
] | null | null | null |
lagom/core/es/base_es_master.py
|
lkylych/lagom
|
64777be7f09136072a671c444b5b3fbbcb1b2f18
|
[
"MIT"
] | null | null | null |
lagom/core/es/base_es_master.py
|
lkylych/lagom
|
64777be7f09136072a671c444b5b3fbbcb1b2f18
|
[
"MIT"
] | null | null | null |
from lagom.core.multiprocessing import BaseIterativeMaster
class BaseESMaster(BaseIterativeMaster):
"""
Base class for master of parallelized evolution strategies (ES).
It internally defines an ES algorithm.
In each generation, it distributes all sampled solution candidates, each for one worker,
to compute a list of object function values and then update the ES.
For more details about how master class works, please refer
to the documentation of the class, BaseIterativeMaster.
All inherited subclasses should implement the following function:
1. make_es(self)
2. _process_es_result(self, result)
"""
def __init__(self,
num_iteration,
worker_class,
num_worker,
init_seed=0,
daemonic_worker=None):
super().__init__(num_iteration=num_iteration,
worker_class=worker_class,
num_worker=num_worker,
init_seed=init_seed,
daemonic_worker=daemonic_worker)
# Create ES solver
self.es = self.make_es()
# It is better to force popsize to be number of workers
assert self.es.popsize == self.num_worker
def make_es(self):
"""
User-defined function to create an ES algorithm.
Returns:
es (BaseES): An instantiated object of an ES class.
Examples:
cmaes = CMAES(mu0=[3]*100,
std0=0.5,
popsize=12)
return cmaes
"""
raise NotImplementedError
def make_tasks(self, iteration):
# ES samples new candidate solutions
solutions = self.es.ask()
# Record iteration number, for logging in _process_workers_result()
# And it also keeps API untouched for assign_tasks() in non-iterative Master class
self.generation = iteration
return solutions
def _process_workers_result(self, tasks, workers_result):
# Rename, in ES context, the task is to evalute the solution candidate
solutions = tasks
# Unpack function values from workers results, [solution_id, function_value]
# Note that the workers result already sorted ascendingly with respect to task ID
function_values = [result[1] for result in workers_result]
# Update ES
self.es.tell(solutions, function_values)
# Obtain results from ES
result = self.es.result
# Process the ES result
self._process_es_result(result)
def _process_es_result(self, result):
"""
User-defined function to process the result from ES.
Note that the user can use the class memeber `self.generation` which indicate the index of
the current generation, it is automatically incremented each time when sample a set of
solution candidates.
Args:
result (dict): A dictionary of result returned from es.result.
Examples:
best_f_val = result['best_f_val']
if self.generation == 0 or (self.generation+1) % 100 == 0:
print(f'Best function value at generation {self.generation+1}: {best_f_val}')
"""
raise NotImplementedError
| 37.344086
| 98
| 0.600921
| 3,412
| 0.982436
| 0
| 0
| 0
| 0
| 0
| 0
| 2,091
| 0.602073
|
3ca513ca1cc8091c31b7381ae44ccedd1283fc01
| 1,096
|
py
|
Python
|
Roman_Morozov_dz_3/task_5.py
|
Wern-rm/2074_GB_Python
|
f0b7a7f4ed993a007c1aef6ec9ce266adb5a3646
|
[
"MIT"
] | null | null | null |
Roman_Morozov_dz_3/task_5.py
|
Wern-rm/2074_GB_Python
|
f0b7a7f4ed993a007c1aef6ec9ce266adb5a3646
|
[
"MIT"
] | null | null | null |
Roman_Morozov_dz_3/task_5.py
|
Wern-rm/2074_GB_Python
|
f0b7a7f4ed993a007c1aef6ec9ce266adb5a3646
|
[
"MIT"
] | null | null | null |
"""
Реализовать функцию get_jokes(), возвращающую n шуток, сформированных из трех случайных слов, взятых из трёх списков (по одному из каждого):
"""
import random
nouns = ["автомобиль", "лес", "огонь", "город", "дом"]
adverbs = ["сегодня", "вчера", "завтра", "позавчера", "ночью"]
adjectives = ["веселый", "яркий", "зеленый", "утопичный", "мягкий"]
def get_jokes(count, repeat=True, **kwargs) -> list[str]:
result: list[str] = []
if repeat:
for i in range(count):
result.append(' '.join(random.choice(kwargs[j]) for j in kwargs.keys()))
else:
for i in range(count):
noun, adverb, adjective = [random.choice(kwargs[j]) for j in kwargs.keys()]
result.append(' '.join([noun, adverb, adjective]))
return result
if __name__ == '__main__':
print(get_jokes(count=1, repeat=True, nouns=nouns, adverbs=adverbs, adjectives=adjectives))
print(get_jokes(count=3, repeat=False, nouns=nouns, adverbs=adverbs, adjectives=adjectives))
print(get_jokes(count=5, repeat=True, nouns=nouns, adverbs=adverbs, adjectives=adjectives))
| 40.592593
| 140
| 0.666058
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 482
| 0.373065
|
3ca67e9442436a3a4c05f92ccc99c1b4150df427
| 11,217
|
py
|
Python
|
tools.py
|
akerestely/nonlinearBestFit
|
e45b5e33dd8fdfc2f9bd19b48523b1759e694fc4
|
[
"MIT"
] | 1
|
2019-10-09T07:39:55.000Z
|
2019-10-09T07:39:55.000Z
|
tools.py
|
akerestely/nonlinearBestFit
|
e45b5e33dd8fdfc2f9bd19b48523b1759e694fc4
|
[
"MIT"
] | null | null | null |
tools.py
|
akerestely/nonlinearBestFit
|
e45b5e33dd8fdfc2f9bd19b48523b1759e694fc4
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
np.random.seed(421)
def hCG(x: np.ndarray, A: float, B: float, alpha: float):
return A * np.exp(-alpha * x) + B
def gen_rand_points(n: int, A: float = 1000, B: float = 3, alpha: float = 0.01, noise: float = 2, consecutive: bool = False):
"""
:param n: number of points to generate
:param A, B, alpha: parameters to hCG function
:param noise: randomly add this much to the result of the hCG function
"""
from numpy.random import random
sparsity = 1
if consecutive is False:
x = random(n) * n * sparsity
x.sort() # just for plot visual effect; does not change results
else :
x = np.linspace(0, n-1, n) * sparsity
y = hCG(x, A, B, alpha)
ynoise = random(n) * noise - noise / 2
y += ynoise
return x, y
def gen_rand_points_and_plot(n: int, A: float, B: float, alpha: float, noise: float, consecutive: bool):
x, y = gen_rand_points(20, A = 1000, B = 3, alpha = 1, noise=0, consecutive=False)
import matplotlib.pyplot as plt
plt.scatter(x, y)
plt.xlabel("$time$")
plt.ylabel("$hCG(time)$")
plt.show()
return x, y
def load_data(required_data_points: int = 3) -> pd.DataFrame:
url = "data/measurements.csv"
data = pd.read_csv(url)
# remove unused columns
data = data.loc[:, data.columns.str.startswith('MTH')]
def name_to_weekcount(s:str) -> int:
tokens = s.split('-')
import re
mth = int(re.search(r'\d+', tokens[0]).group(0)) - 1
wk = 0
if len(tokens) is not 1:
wk = int(re.search(r'\d+', tokens[1]).group(0)) - 1
return mth * 4 + wk
# rename columns
data.columns = pd.Series(data.columns).apply(name_to_weekcount)
# discard entries which have less than required_data_points measurements
data = data[data.count(axis=1) > required_data_points]
return data
def get_x_y(data: pd.DataFrame, row: int) -> (np.ndarray, np.ndarray) :
my_data = data.loc[row:row, :].dropna(axis=1)
x = np.array(my_data.columns[:]) # time
y = my_data.iloc[0,:].values # measurement
return x, y
def plot_real_data(data, from_row = None, to_row = None):
figsize = None
if from_row is not None and to_row is not None:
count = to_row - from_row
if count > 1:
figsize = (10, 5 * count)
data.T.iloc[:, from_row:to_row].dropna(axis=0).plot(kind="line", marker='o', subplots=True, figsize=figsize)
def plot_function(func, x: np.ndarray, y: np.ndarray):
import matplotlib.pyplot as plt
range_param = np.linspace(0, 1)
pt = [func(t, x, y) for t in range_param]
plt.plot(range_param, pt)
plt.show()
def print_rmse_methods(x: np.ndarray, y: np.ndarray, paramsList: list):
"""
param paramsList: array of tuples, where tuple contains A, B and alpha
"""
from sklearn.metrics import mean_squared_error
from math import sqrt
for i, params in enumerate(paramsList):
rmse = sqrt(mean_squared_error(y, hCG(x, *params)))
print(f"Method {i} RMSE: {rmse}")
def plot_methods(x: np.ndarray, y: np.ndarray, paramsList:list , paramsNames: list = [], data_id: str="", showPlot: bool = True):
"""
param paramsList: array of tuples, where tuple contains A, B and alpha
param paramsNames: array of strings, where each sting represents the name of the corresponding param tuple.
The names will appear on the plot. Optional, in which case the name will be the index in the array.
"""
from sklearn.metrics import mean_squared_error
from math import sqrt
import matplotlib.pyplot as plt
plt.xlabel(r"$time$")
plt.ylabel(r"$hCG(time)$")
plt.plot(x, y, 'bo', label=f"data {data_id}")
#print(paramsNames)
for i, params in enumerate(paramsList):
rmse = sqrt(mean_squared_error(y, hCG(x, *params)))
name = paramsNames[i] if i < len(paramsNames) else ("Method " + str(i))
plt.plot(x, hCG(x, *params),
label=f'{name}: A=%5.2f, B=%5.2f, alpha=%5.2f, rmse=%5.2f' % (*params, rmse))
plt.legend()
if showPlot:
plt.show()
# print_rmse_methods(x, y, params, paramsCalc)
def plot_results(x: np.ndarray, y: np.ndarray, ptsStart: int = 0, ptsEnd: int = None, ptsTrain: int = None, data_id: str="", showPlot:bool = True, allAlgorithms:bool = True):
"""
:param ptsStart: use x, y values starting from this point
:param ptsEnd: use x, y values ending at this point
:param ptsTrain: use this much x, y values for training starting from ptsStart
"""
ptsEnd = ptsEnd or len(x)
ptsTrain = ptsTrain or (ptsEnd - ptsStart)
if ptsStart + ptsTrain > ptsEnd:
raise ValueError("Invalid interval for points")
x_train = x[ptsStart : ptsStart + ptsTrain]
y_train = y[ptsStart : ptsStart + ptsTrain]
paramsList = []
paramsNames = []
if allAlgorithms:
try:
from scipy.optimize import curve_fit
popt, _ = curve_fit(hCG, x_train, y_train) # uses Levenberg-Marquardt iterative method
paramsList.append(tuple(popt))
paramsNames.append("Iterative")
except:
pass
try:
from bestfitte import best_fit
paramsList.append(best_fit(x_train, y_train))
paramsNames.append("BestFit")
except:
pass
if allAlgorithms:
try:
from pseloglin import fit
paramsList.append(fit(x_train, y_train))
paramsNames.append("PseLogLin")
except:
pass
plot_methods(x[ptsStart:ptsEnd], y[ptsStart:ptsEnd], paramsList, paramsNames, data_id, showPlot)
def plot_and_get_real_data(row: int) -> (np.ndarray, np.ndarray):
data = load_data()
plot_real_data(data, row, row+1)
return get_x_y(data, row)
def get_real_data(row: int) -> (np.ndarray, np.ndarray):
data = load_data()
return get_x_y(data, row)
def plot_with_inner_plot(x: np.ndarray, y: np.ndarray, limX1: float, limX2: float, limY1: float, limY2: float, zoom: float = 2.5, loc='upper right'):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.scatter(x, y)
plt.xlabel("$time$")
plt.ylabel("$hCG(time)$")
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
axins = zoomed_inset_axes(ax, zoom, loc=loc)
axins.scatter(x, y)
axins.set_xlim(limX1, limX2)
axins.set_ylim(limY1, limY2)
#plt.yticks(visible=False)
#plt.xticks(visible=False)
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5")
def find_and_plot_best_fit(x: np.ndarray, y: np.ndarray):
import bestfitte
A, B, alpha = bestfitte.best_fit(x, y)
from sklearn.metrics import mean_squared_error
rmse = np.sqrt(mean_squared_error(y, hCG(x, A, B, alpha)))
import matplotlib.pyplot as plt
plt.scatter(x, y, label='data')
plt.plot(x, hCG(x, A, B, alpha),
label=f'A=%5.2f, B=%5.2f, alpha=%5.2f, rmse=%5.2f' % (A, B, alpha, rmse))
plt.legend()
plt.show()
def find_and_plot_best_fit_param_noise_grid(paramsList, noises):
import matplotlib.pyplot as plt
plt.figure(figsize = (20, 10))
for i, params in enumerate(paramsList):
for j, noise in enumerate(noises):
n:int = 20
x, y = gen_rand_points(n, *params, noise)
plt.subplot(len(paramsList), len(noises), i * len(noises) + j + 1)
plt.scatter(x, y)
import bestfitte
A, B, alpha = bestfitte.best_fit(x, y)
from sklearn.metrics import mean_squared_error
rmse = np.sqrt(mean_squared_error(y, hCG(x, A, B, alpha)))
import matplotlib.pyplot as plt
plt.scatter(x, y)
plt.plot(np.arange(n), hCG(np.arange(n), A, B, alpha),
label=f'A=%5.2f, B=%5.2f, alpha=%5.2f, noise=%5.2f, \nA=%5.2f, B=%5.2f, alpha=%5.2f, rmse=%5.2f' % (*params, noise, A, B, alpha, rmse))
plt.legend()
def compare_results_on_datasets(datasets: list):
'''
datasets parameter is a list of datasets which contain (x_data, y_data, dataset_name) tuples
'''
import matplotlib.pyplot as plt
plt.figure(figsize = (9*len(datasets), 5))
for i, dataset in enumerate(datasets):
x, y, name = dataset
plt.subplot(1, len(datasets), i + 1)
plot_results(x, y, data_id = name, showPlot=False)
def compare_time_on_datasets(datasets: list = None):
'''
datasets parameter is a list of datasets which contain (x_data, y_data, dataset_name) tuples
if omitted, 10 random dataset will be generated
'''
if datasets is None:
# generate 10 random datasets
paramsList = []
for _ in range(10):
paramsList.append((
np.random.random_integers(3, 20), #n
np.random.random() * 1e3, # A
np.random.random() * 1e1, # B
np.random.random() * 1e1, # alpha
np.random.random() * 1 # noise
))
datasets = []
for params in paramsList:
datasets.append(gen_rand_points(*params) +
(f'n=%d, A=%5.2f, B=%5.2f, alpha=%5.2f, noise=%5.2f' % params,))
from scipy.optimize import curve_fit
from bestfitte import best_fit
from pseloglin import fit
from time import perf_counter
rows = []
for dataset in datasets:
x, y, name = dataset
measurements = {'Dataset' : name}
start = perf_counter()
try:
curve_fit(hCG, x, y)
end = perf_counter()
measurements["Iterative"] = end - start
except:
measurements["Iterative"] = np.nan
start = perf_counter()
try:
best_fit(x, y)
end = perf_counter()
measurements["BestFit"] = end - start
except:
measurements["BestFit"] = np.nan
start = perf_counter()
try:
fit(x, y)
end = perf_counter()
measurements["PseLogLin"] = end - start
except:
measurements["PseLogLin"] = np.nan
rows.append(measurements)
import pandas as pd
df = pd.DataFrame(rows, columns=["Dataset", "Iterative", "BestFit", "PseLogLin"])
df.loc['mean'] = df.mean()
df["Dataset"].values[-1] = "Mean"
#print(df.to_latex(index=False))
return df
def compare_with_less_trained(x: np.ndarray, y: np.ndarray, trainPoints):
'''
trainPoints, array with the number of points to use for train on each subplot
'''
import matplotlib.pyplot as plt
plt.figure(figsize = (9 * len(trainPoints), 10))
plt.subplot(2, len(trainPoints), len(trainPoints) / 2 + 1)
plot_results(x, y, showPlot=False, allAlgorithms=False, data_id="All")
for i, ptsTrain in enumerate(trainPoints):
plt.subplot(2, len(trainPoints), len(trainPoints) + i + 1)
plot_results(x, y, ptsTrain = ptsTrain, showPlot=False, allAlgorithms=False, data_id=str(ptsTrain) + " points")
plt.plot(x[ptsTrain:], y[ptsTrain:], "o", color="orange")
| 36.537459
| 174
| 0.61594
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,250
| 0.200588
|
3ca799dcd7f204dd2b5700a464c22a2701817676
| 925
|
py
|
Python
|
Section 2 - Data (variables, assignments and expressions)/Breakouts/Breakout 2.2 - ATM/convert pseudo-code solution.py
|
gitjot/python-for-lccs
|
a8a4ae8847abbc33361f80183c06d57b20523382
|
[
"CC0-1.0"
] | 10
|
2020-02-14T14:28:15.000Z
|
2022-02-02T18:44:11.000Z
|
Section 2 - Data (variables, assignments and expressions)/Breakouts/Breakout 2.2 - ATM/convert pseudo-code solution.py
|
gitjot/python-for-lccs
|
a8a4ae8847abbc33361f80183c06d57b20523382
|
[
"CC0-1.0"
] | null | null | null |
Section 2 - Data (variables, assignments and expressions)/Breakouts/Breakout 2.2 - ATM/convert pseudo-code solution.py
|
gitjot/python-for-lccs
|
a8a4ae8847abbc33361f80183c06d57b20523382
|
[
"CC0-1.0"
] | 8
|
2020-03-25T09:27:42.000Z
|
2021-11-03T15:24:38.000Z
|
# Event: LCCS Python Fundamental Skills Workshop
# Date: May 2018
# Author: Joe English, PDST
# eMail: computerscience@pdst.ie
# Purpose: Solution to Breakout 2.2 (ATM)
# Display a welcome message
print("Welcome to LCCS Bank Ltd.")
print("=========================")
# Initialise a variable called balance to 123.45
balance = 123.45
# Display the value of balance
print("Your balance is:", balance)
# Prompt the user to enter the amount to lodge
amount = float(input("Enter amount to lodge: "))
# Increase the balance by the amount entered
balance = balance + amount
# Display the value of balance
print("Your balance is:", balance)
# Prompt the user to enter the amount to withdraw
amount = float(input("Enter amount to withdraw: "))
# Decrease the balance by the amount entered
balance = balance - amount
# Display the value of balance
print("Your balance is:", round(balance,2) )
| 27.205882
| 52
| 0.692973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 687
| 0.742703
|
3ca93bc9e19f578ac6c9e0e416c1d3d6ec54c6d4
| 460
|
py
|
Python
|
src/unit6/user/user_datastore.py
|
cdoremus/udacity-python_web_development-cs253
|
87cf5dd5d0e06ee745d3aba058d96fa46f2aeb6b
|
[
"Apache-2.0"
] | null | null | null |
src/unit6/user/user_datastore.py
|
cdoremus/udacity-python_web_development-cs253
|
87cf5dd5d0e06ee745d3aba058d96fa46f2aeb6b
|
[
"Apache-2.0"
] | null | null | null |
src/unit6/user/user_datastore.py
|
cdoremus/udacity-python_web_development-cs253
|
87cf5dd5d0e06ee745d3aba058d96fa46f2aeb6b
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on Apr 30, 2012
@author: h87966
'''
class UserDataStore():
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
def save(self, user):
pass
def delete(self, user_id):
pass
def fetch(self, user_id):
pass
def fetchAll(self):
pass
def fetchByUsername(self, username):
pass
| 12.777778
| 40
| 0.43913
| 410
| 0.891304
| 0
| 0
| 0
| 0
| 0
| 0
| 108
| 0.234783
|
3ca9eb97e4365037a9faa4fd695283f51ac6d5a4
| 3,870
|
py
|
Python
|
sciflo/utils/mail.py
|
hysds/sciflo
|
f706288405c8eee59a2f883bab3dcb5229615367
|
[
"Apache-2.0"
] | null | null | null |
sciflo/utils/mail.py
|
hysds/sciflo
|
f706288405c8eee59a2f883bab3dcb5229615367
|
[
"Apache-2.0"
] | null | null | null |
sciflo/utils/mail.py
|
hysds/sciflo
|
f706288405c8eee59a2f883bab3dcb5229615367
|
[
"Apache-2.0"
] | 1
|
2019-02-07T01:08:34.000Z
|
2019-02-07T01:08:34.000Z
|
from smtplib import SMTP
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.header import Header
from email.utils import parseaddr, formataddr, COMMASPACE, formatdate
from email.encoders import encode_base64
def send_email(sender, cc_recipients, bcc_recipients, subject, body, attachments=[]):
"""Send an email.
All arguments should be Unicode strings (plain ASCII works as well).
Only the real name part of sender and recipient addresses may contain
non-ASCII characters.
The email will be properly MIME encoded and delivered though SMTP to
localhost port 25. This is easy to change if you want something different.
The charset of the email will be the first one out of US-ASCII, ISO-8859-1
and UTF-8 that can represent all the characters occurring in the email.
"""
# combined recipients
recipients = cc_recipients + bcc_recipients
# Header class is smart enough to try US-ASCII, then the charset we
# provide, then fall back to UTF-8.
header_charset = 'ISO-8859-1'
# We must choose the body charset manually
for body_charset in 'US-ASCII', 'ISO-8859-1', 'UTF-8':
try:
body.encode(body_charset)
except UnicodeError:
pass
else:
break
# Split real name (which is optional) and email address parts
sender_name, sender_addr = parseaddr(sender)
parsed_cc_recipients = [parseaddr(rec) for rec in cc_recipients]
parsed_bcc_recipients = [parseaddr(rec) for rec in bcc_recipients]
#recipient_name, recipient_addr = parseaddr(recipient)
# We must always pass Unicode strings to Header, otherwise it will
# use RFC 2047 encoding even on plain ASCII strings.
sender_name = str(Header(str(sender_name), header_charset))
unicode_parsed_cc_recipients = []
for recipient_name, recipient_addr in parsed_cc_recipients:
recipient_name = str(Header(str(recipient_name), header_charset))
# Make sure email addresses do not contain non-ASCII characters
recipient_addr = recipient_addr.encode('ascii')
unicode_parsed_cc_recipients.append((recipient_name, recipient_addr))
unicode_parsed_bcc_recipients = []
for recipient_name, recipient_addr in parsed_bcc_recipients:
recipient_name = str(Header(str(recipient_name), header_charset))
# Make sure email addresses do not contain non-ASCII characters
recipient_addr = recipient_addr.encode('ascii')
unicode_parsed_bcc_recipients.append((recipient_name, recipient_addr))
# Make sure email addresses do not contain non-ASCII characters
sender_addr = sender_addr.encode('ascii')
# Create the message ('plain' stands for Content-Type: text/plain)
msg = MIMEMultipart()
msg['CC'] = COMMASPACE.join([formataddr((recipient_name, recipient_addr))
for recipient_name, recipient_addr in unicode_parsed_cc_recipients])
msg['BCC'] = COMMASPACE.join([formataddr((recipient_name, recipient_addr))
for recipient_name, recipient_addr in unicode_parsed_bcc_recipients])
msg['Subject'] = Header(str(subject), header_charset)
msg.attach(MIMEText(body.encode(body_charset), 'plain', body_charset))
# Add attachments
for attachment in attachments:
part = MIMEBase('application', "octet-stream")
part.set_payload(attachment.file.read())
encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % attachment.filename)
msg.attach(part)
# print "#" * 80
# print msg.as_string()
# Send the message via SMTP to localhost:25
smtp = SMTP("localhost")
smtp.sendmail(sender, recipients, msg.as_string())
smtp.quit()
| 42.527473
| 103
| 0.708527
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,435
| 0.370801
|
3caa5d8aa46dcaada0dadcfe04d781f5ae6b979d
| 496
|
py
|
Python
|
my-ml-api/api/schemas.py
|
ballcarsen/MyMlTool
|
eb476e21799ec773fa816f63693e6de4c52d0094
|
[
"MIT"
] | null | null | null |
my-ml-api/api/schemas.py
|
ballcarsen/MyMlTool
|
eb476e21799ec773fa816f63693e6de4c52d0094
|
[
"MIT"
] | null | null | null |
my-ml-api/api/schemas.py
|
ballcarsen/MyMlTool
|
eb476e21799ec773fa816f63693e6de4c52d0094
|
[
"MIT"
] | null | null | null |
from typing import List, Optional
from pydantic import BaseModel
class UploadBase(BaseModel):
file_name: str
user_id: int
class UploadCreate(UploadBase):
pass
class Upload(UploadBase):
upload_id: int
class Config:
orm_mode = True
class UserBase(BaseModel):
first_name: str
last_name: str
class UserCreate(UserBase):
password: str
class User(UserBase):
user_id: int
uploads: List[Upload] = []
class Config:
orm_mode = True
| 13.777778
| 33
| 0.677419
| 413
| 0.832661
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3caab00869605f81530d9a70561508995ff52b3b
| 2,467
|
py
|
Python
|
apps/extention/views/tool.py
|
rainydaygit/testtcloudserver
|
8037603efe4502726a4d794fb1fc0a3f3cc80137
|
[
"MIT"
] | 349
|
2020-08-04T10:21:01.000Z
|
2022-03-23T08:31:29.000Z
|
apps/extention/views/tool.py
|
rainydaygit/testtcloudserver
|
8037603efe4502726a4d794fb1fc0a3f3cc80137
|
[
"MIT"
] | 2
|
2021-01-07T06:17:05.000Z
|
2021-04-01T06:01:30.000Z
|
apps/extention/views/tool.py
|
rainydaygit/testtcloudserver
|
8037603efe4502726a4d794fb1fc0a3f3cc80137
|
[
"MIT"
] | 70
|
2020-08-24T06:46:14.000Z
|
2022-03-25T13:23:27.000Z
|
from flask import Blueprint
from apps.extention.business.tool import ToolBusiness
from apps.extention.extentions import validation, parse_json_form
from library.api.render import json_detail_render
tool = Blueprint('tool', __name__)
@tool.route('/ip', methods=['GET'])
def tool_ip():
"""
@api {get} /v1/tool/ip 查询 ip 地址信息
@apiName GetIpAddress
@apiGroup 拓展
@apiDescription 查询 ip 地址信息
@apiParam {string} ip 合法的 ip 地址
@apiParamExample {json} Request-Example:
{
"ip": "110.110.110.12"
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": {
"address": "\u4e0a\u6d77\u5e02",
"address_detail": {
"city": "\u4e0a\u6d77\u5e02",
"city_code": 289,
"district": "",
"province": "\u4e0a\u6d77\u5e02",
"street": "",
"street_number": ""
},
"point": {
"x": "13524118.26",
"y":"3642780.37"
}
},
"message":"ok"
}
"""
code, data, address, message = ToolBusiness.get_tool_ip()
return json_detail_render(code, data, message)
@tool.route('/apk/analysis', methods=['POST'])
@validation('POST:tool_apk_analysis_upload')
def apk_analysis_handler():
"""
@api {post} /v1/tool/apk/analysis 分析 apk 包信息
@apiName AnalysisApkInformation
@apiGroup 拓展
@apiDescription 分析 apk 包信息
@apiParam {apk_download_url} apk 包的下载地址
@apiParamExample {json} Request-Example:
{
"apk_download_url": "http://tcloud-static.ywopt.com/static/3787c7f2-5caa-434a-9a47-3e6122807ada.apk"
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": {
"default_activity": "com.earn.freemoney.cashapp.activity.SplashActivity",
"icon": "iVBORw0KGgoAAAANSUhEUgAAAGAAAABgCAYAAADimHc4AAAVr0lEQVR42u2debAdVZ3HP6f79N3ekuQlJOQlARICBCGs",
"label": "Dosh Winner",
"package_name": "com.earn.freemoney.cashapp",
"size": "13.97",
"version_code": "86",
"version_name": "2.0.36"
},
"message": "ok"
}
"""
apk_download_url, type = parse_json_form('tool_apk_analysis_upload')
if apk_download_url:
data = ToolBusiness.apk_analysis(apk_download_url, type)
return json_detail_render(0, data)
else:
return json_detail_render(101, 'apk_download_url is required!')
| 29.369048
| 115
| 0.614512
| 0
| 0
| 0
| 0
| 2,300
| 0.905155
| 0
| 0
| 1,844
| 0.725699
|
3cab08629b30111114e01484ab49b594bbdb9dd0
| 3,948
|
py
|
Python
|
apt_repoman/connection.py
|
memory/repoman
|
4c5cdfba85afcab5a1219fa5629abc457de27ed5
|
[
"Apache-2.0"
] | 1
|
2017-07-01T21:46:40.000Z
|
2017-07-01T21:46:40.000Z
|
apt_repoman/connection.py
|
memory/repoman
|
4c5cdfba85afcab5a1219fa5629abc457de27ed5
|
[
"Apache-2.0"
] | null | null | null |
apt_repoman/connection.py
|
memory/repoman
|
4c5cdfba85afcab5a1219fa5629abc457de27ed5
|
[
"Apache-2.0"
] | 6
|
2017-07-13T21:41:14.000Z
|
2020-08-07T19:40:25.000Z
|
# stdlib imports
import logging
import time
# pypi imports
from boto3 import Session
LOG = logging.getLogger(__name__)
class Connection(object):
def __init__(self, role_arn='', profile_name='', region=None):
self._log = LOG or logging.getLogger(__name__)
self.role_arn = role_arn
self.profile_name = profile_name
self.region = region
self._s3 = None
self._sdb = None
self._sts = None
self._iam = None
self._sns = None
self._session = None
self._caller_id = None
@property
def session(self):
'''Set our object's self._session attribute to a boto3
session object. If profile_name is set, use it to pull a
specific credentials profile from ~/.aws/credentials,
otherwise use the default credentials path.
If role_arn is set, use the first session object to
assume the role, and then overwrite self._session with
a new session object created using the role credentials.'''
if self._session is None:
self._session = self.get_session()
return self._session
@property
def s3(self):
if self._s3 is None:
self._s3 = self.get_resource('s3')
return self._s3
@property
def sdb(self):
if self._sdb is None:
self._sdb = self.get_client('sdb')
return self._sdb
@property
def sts(self):
if self._sts is None:
self._sts = self.get_client('sts')
return self._sts
@property
def iam(self):
if self._iam is None:
self._iam = self.get_client('iam')
return self._iam
@property
def sns(self):
if self._sns is None:
self._sns = self.get_client('sns')
return self._sns
@property
def caller_id(self):
if self._caller_id is None:
self._caller_id = self.sts.get_caller_identity()['Arn']
return self._caller_id
def get_session(self):
if self.profile_name:
self._log.info(
'using AWS credential profile %s', self.profile_name)
try:
kwargs = {'profile_name': self.profile_name}
if self.region:
kwargs['region_name'] = self.region
session = Session(**kwargs)
except Exception as ex:
self._log.fatal(
'Could not connect to AWS using profile %s: %s',
self.profile_name, ex)
raise
else:
self._log.debug(
'getting an AWS session with the default provider')
kwargs = {}
if self.region:
kwargs['region_name'] = self.region
session = Session(**kwargs)
if self.role_arn:
self._log.info(
'attempting to assume STS self.role %s', self.role_arn)
try:
self.role_creds = session.client('sts').assume_role(
RoleArn=self.role_arn,
RoleSessionName='repoman-%s' % time.time(),
DurationSeconds=3600)['Credentials']
except Exception as ex:
self._log.fatal(
'Could not assume self.role %s: %s',
self.role_arn, ex)
raise
kwargs = {
'aws_access_key_id': self.role_creds['AccessKeyId'],
'aws_secret_access_key': self.role_creds['SecretAccessKey'],
'aws_session_token': self.role_creds['SessionToken']}
if self.region:
kwargs['region_name'] = self.region
session = Session(**kwargs)
return session
def get_client(self, service_name):
return self.session.client(service_name)
def get_resource(self, service_name):
return self.session.resource(service_name)
| 31.584
| 76
| 0.563323
| 3,822
| 0.968085
| 0
| 0
| 1,392
| 0.352584
| 0
| 0
| 880
| 0.222898
|
3cabc6bebd08e9407e6c12b5afc414ea98b75d01
| 1,412
|
py
|
Python
|
setup.py
|
squidfarts/py-program
|
98c3694ffa90b5969eafe1093def9097dfd0d62c
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
squidfarts/py-program
|
98c3694ffa90b5969eafe1093def9097dfd0d62c
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
squidfarts/py-program
|
98c3694ffa90b5969eafe1093def9097dfd0d62c
|
[
"Apache-2.0"
] | 1
|
2021-02-19T20:32:33.000Z
|
2021-02-19T20:32:33.000Z
|
#!/user/bin/env python3
###################################################################################
# #
# NAME: setup.py #
# #
# AUTHOR: Michael Brockus. #
# #
# CONTACT: <mailto:michaelbrockus@squidfarts.com> #
# #
# NOTICES: #
# #
# License: Apache 2.0 :http://www.apache.org/licenses/LICENSE-2.0 #
# #
###################################################################################
import setuptools, setup
setup(
name='py-program',
version='0.1.0',
description='Python program',
author='Michael Brockus',
author_email='michaelbrockus@squidfarts.com',
license='Apache-2.0',
include_package_data=True,
packages=['src.main', 'src.main.module']
)
| 50.428571
| 83
| 0.24221
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,224
| 0.866856
|
3cac0aa35252a097de5d59a421a354021c1ccdfa
| 21,267
|
py
|
Python
|
paul_analysis/Python/labird/fieldize.py
|
lzkelley/arepo-mbh-sims_analysis
|
f14519552cedd39a040b53e6d7cc538b5b8f38a3
|
[
"MIT"
] | null | null | null |
paul_analysis/Python/labird/fieldize.py
|
lzkelley/arepo-mbh-sims_analysis
|
f14519552cedd39a040b53e6d7cc538b5b8f38a3
|
[
"MIT"
] | null | null | null |
paul_analysis/Python/labird/fieldize.py
|
lzkelley/arepo-mbh-sims_analysis
|
f14519552cedd39a040b53e6d7cc538b5b8f38a3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Methods for interpolating particle lists onto a grid. There are three classic methods:
ngp - Nearest grid point (point interpolation)
cic - Cloud in Cell (linear interpolation)
tsc - Triangular Shaped Cloud (quadratic interpolation)
Each function takes inputs:
Values - list of field values to interpolate, centered on the grid center.
Points - coordinates of the field values
Field - grid to add interpolated points onto
There are also helper functions (convert and convert_centered) to rescale arrays to grid units.
"""
import math
import numpy as np
#Try to import scipy.weave. If we can't, don't worry, we just use the unaccelerated versions
try :
import scipy.weave
except ImportError :
scipy=None
def convert(pos, ngrid,box):
"""Rescales coordinates to grid units.
(0,0) is the lower corner of the grid.
Inputs:
pos - coord array to rescale
ngrid - dimension of grid
box - Size of the grid in units of pos
"""
return pos*(ngrid-1)/float(box)
def convert_centered(pos, ngrid,box):
"""Rescales coordinates to grid units.
(0,0) is the center of the grid
Inputs:
pos - coord array to rescale
ngrid - dimension of grid
box - Size of the grid in units of pos
"""
return pos*(ngrid-1.)/float(box)+(ngrid-1.)/2.
def check_input(pos, field):
"""Checks the position and field values for consistency.
Avoids segfaults in the C code."""
if np.size(pos) == 0:
return 0
dims=np.size(np.shape(field))
if np.max(pos) > np.shape(field)[0] or np.min(pos) < 0:
raise ValueError("Positions outside grid")
if np.shape(pos)[1] < dims:
raise ValueError("Position array not wide enough for field")
return 1
def ngp(pos,values,field):
"""Does nearest grid point for a 2D array.
Inputs:
Values - list of field values to interpolate
Points - coordinates of the field values
Field - grid to add interpolated points onto
Points need to be in grid units
Note: This is implemented in scipy.weave and pure python (in case the weave breaks).
For O(1e5) points both versions are basically instantaneous.
For O(1e7) points the sipy.weave version is about 100 times faster.
"""
if not check_input(pos,field):
return field
nx=np.shape(values)[0]
dims=np.size(np.shape(field))
# Coordinates of nearest grid point (ngp).
ind=np.array(np.rint(pos),dtype=np.int)
#Sum over the 3rd axis here.
expr="""for(int j=0;j<nx;j++){
int ind1=ind(j,0);
int ind2=ind(j,1);
field(ind1,ind2)+=values(j);
}
"""
expr3d="""for(int j=0;j<nx;j++){
int ind1=ind(j,0);
int ind2=ind(j,1);
int ind3=ind(j,2);
field(ind1,ind2,ind3)+=values(j);
}
"""
try:
if dims==2:
scipy.weave.inline(expr,['nx','ind','values','field'],type_converters=scipy.weave.converters.blitz)
elif dims==3:
scipy.weave.inline(expr3d,['nx','ind','values','field'],type_converters=scipy.weave.converters.blitz)
else:
raise ValueError
except Exception:
#Fall back on slow python version.
for j in xrange(0,nx):
field[tuple(ind[j,0:dims])]+=values[j]
return field
def cic(pos, value, field,totweight=None,periodic=False):
"""Does Cloud-in-Cell for a 2D array.
Inputs:
Values - list of field values to interpolate
Points - coordinates of the field values
Field - grid to add interpolated points onto
Points need to be in coordinates where np.max(points) = np.shape(field)
"""
# Some error handling.
if not check_input(pos,field):
return field
nval=np.size(value)
dim=np.shape(field)
nx = dim[0]
dim=np.size(dim)
#-----------------------
# Calculate CIC weights.
#-----------------------
# Coordinates of nearest grid point (ngp).
ng=np.array(np.rint(pos[:,0:dim]),dtype=np.int)
# Distance from sample to ngp.
dng=ng-pos[:,0:dim]
#Setup two arrays for later:
# kk is for the indices, and ww is for the weights.
kk=np.empty([2,nval,dim])
ww=np.empty([2,nval,dim])
# Index of ngp.
kk[1]=ng
# Weight of ngp.
ww[1]=0.5+np.abs(dng)
# Point before ngp.
kk[0]=kk[1]-1 # Index.
ww[0]=0.5-np.abs(dng)
#Take care of the points at the boundaries
tscedge(kk,ww,nx,periodic)
#-----------------------------
# Interpolate samples to grid.
#-----------------------------
# tscweight adds up all tsc weights allocated to a grid point, we need
# to keep track of this in order to compute the temperature.
# Note that total(tscweight) is equal to nrsamples and that
# total(ifield)=n0**3 if sph.plot NE 'sph,temp' (not 1 because we use
# xpos=posx*n0 --> cube length different from EDFW paper).
#index[j] -> kk[0][j,0],kk[0][j,2],kk[0][j,3] -> kk[0][j,:]
extraind=np.zeros(dim-1,dtype=int)
#Perform y=0, z=0 addition
tsc_xind(field,value,totweight,kk,ww,extraind)
if dim > 1:
#Perform z=0 addition
extraind[0]=1
tsc_xind(field,value,totweight,kk,ww,extraind)
if dim > 2:
extraind[1]=1
#Perform the rest of the addition
for yy in xrange(0,2):
extraind[0]=yy
tsc_xind(field,value,totweight,kk,ww,extraind)
if totweight == None:
return field
else:
return (field,totweight)
def tsc(pos,value,field,totweight=None,periodic=False):
""" NAME: TSC
PURPOSE:
Interpolate an irregularly sampled field using a Triangular Shaped Cloud
EXPLANATION:
This function interpolates an irregularly sampled field to a
regular grid using Triangular Shaped Cloud (nearest grid point
gets weight 0.75-dx**2, points before and after nearest grid
points get weight 0.5*(1.5-dx)**2, where dx is the distance
from the sample to the grid point in units of the cell size).
INPUTS:
pos: Array of coordinates of field samples, in grid units from 0 to nx
value: Array of sample weights (field values). For e.g. a
temperature field this would be the temperature and the
keyword AVERAGE should be set. For e.g. a density field
this could be either the particle mass (AVERAGE should
not be set) or the density (AVERAGE should be set).
field: Array to interpolate onto of size nx,nx,nx
totweight: If this is not None, the routine will to it the weights at each
grid point. You can then calculate the average later.
periodic: Set this keyword if you want a periodic grid.
ie, the first grid point contains samples of both sides of the volume
If this is not true, weight is not conserved (some falls off the edges)
Note: Points need to be in grid units: pos = [0,ngrid-1]
Note 2: If field has fewer dimensions than pos, we sum over the extra dimensions,
and the final indices are ignored.
Example of default allocation of nearest grid points: n0=4, *=gridpoint.
0 1 2 3 Index of gridpoints
* * * * Grid points
|---|---|---|---| Range allocated to gridpoints ([0.0,1.0> --> 0, etc.)
0 1 2 3 4 posx
OUTPUTS:
Returns particles interpolated to field, and modifies input variable of the same name.
PROCEDURE:
Nearest grid point is determined for each sample.
TSC weights are computed for each sample.
Samples are interpolated to the grid.
Grid point values are computed (sum or average of samples).
EXAMPLE:
nx=20
ny=10
posx=randomu(s,1000)
posy=randomu(s,1000)
value=posx**2+posy**2
field=tsc(value,pos,field,/average)
surface,field,/lego
NOTES:
A standard reference for these interpolation methods is: R.W. Hockney
and J.W. Eastwood, Computer Simulations Using Particles (New York:
McGraw-Hill, 1981).
MODIFICATION HISTORY:
Written by Joop Schaye, Feb 1999.
Check for overflow for large dimensions P. Riley/W. Landsman Dec. 1999
Ported to python, cleaned up and drastically shortened using
these new-fangled "function" thingies by Simeon Bird, Feb. 2012
"""
# Some error handling.
if not check_input(pos,field):
return field
nval=np.size(value)
dim=np.shape(field)
nx = dim[0]
dim=np.size(dim)
#-----------------------
# Calculate TSC weights.
#-----------------------
# Coordinates of nearest grid point (ngp).
ng=np.array(np.rint(pos[:,0:dim]),dtype=np.int)
# Distance from sample to ngp.
dng=ng-pos[:,0:dim]
#Setup two arrays for later:
# kk is for the indices, and ww is for the weights.
kk=np.empty([3,nval,dim])
ww=np.empty([3,nval,dim])
# Index of ngp.
kk[1,:,:]=ng
# Weight of ngp.
ww[1,:,:]=0.75-dng**2
# Point before ngp.
kk[0,:,:]=kk[1,:,:]-1 # Index.
dd=1.0-dng # Distance to sample.
ww[0]=0.5*(1.5-dd)**2 # TSC-weight.
# Point after ngp.
kk[2,:,:]=kk[1,:,:]+1 # Index.
dd=1.0+dng # Distance to sample.
ww[2]=0.5*(1.5-dd)**2 # TSC-weight.
#Take care of the points at the boundaries
tscedge(kk,ww,nx,periodic)
#-----------------------------
# Interpolate samples to grid.
#-----------------------------
# tscweight adds up all tsc weights allocated to a grid point, we need
# to keep track of this in order to compute the temperature.
# Note that total(tscweight) is equal to nrsamples and that
# total(ifield)=n0**3 if sph.plot NE 'sph,temp' (not 1 because we use
# xpos=posx*n0 --> cube length different from EDFW paper).
#index[j] -> kk[0][j,0],kk[0][j,2],kk[0][j,3] -> kk[0][j,:]
extraind=np.zeros(dim-1,dtype=int)
#Perform y=0, z=0 addition
tsc_xind(field,value,totweight,kk,ww,extraind)
if dim > 1:
#Perform z=0 addition
for yy in xrange(1,3):
extraind[0]=yy
tsc_xind(field,value,totweight,kk,ww,extraind)
if dim > 2:
#Perform the rest of the addition
for zz in xrange(1,3):
for yy in xrange(0,3):
extraind[0]=yy
extraind[1]=zz
tsc_xind(field,value,totweight,kk,ww,extraind)
if totweight == None:
return field
else:
return (field,totweight)
def cic_str(pos,value,field,in_radii,periodic=False):
"""This is exactly the same as the cic() routine, above, except
that instead of each particle being stretched over one grid point,
it is stretched over a cubic region with some radius.
Field must be 2d
Extra arguments:
radii - Array of particle radii in grid units.
"""
# Some error handling.
if not check_input(pos,field):
return field
nval=np.size(value)
dim=np.shape(field)
nx = dim[0]
dim=np.size(dim)
if dim != 2:
raise ValueError("Non 2D grid not supported!")
#Use a grid cell radius of 2/3 (4 \pi /3 )**(1/3) s
#This means that l^3 = cell volume for AREPO (so it should be more or less exact)
#and is close to the l = 0.5 (4\pi/3)**(1/3) s
#cic interpolation that Nagamine, Springel & Hernquist used
#to approximate their SPH smoothing
corr=2./3.*(4*math.pi/3.)**0.3333333333
radii=np.array(corr*in_radii)
#If the smoothing length is below a single grid cell,
#stretch it.
ind = np.where(radii < 0.5)
radii[ind]=0.5
#Weight of each cell
weight = value/(2*radii)**dim
#Upper and lower bounds
up = pos[:,1:dim+1]+np.repeat(np.transpose([radii,]),dim,axis=1)
low = pos[:,1:dim+1]-np.repeat(np.transpose([radii,]),dim,axis=1)
#Upper and lower grid cells to add to
upg = np.array(np.floor(up),dtype=int)
lowg = np.array(np.floor(low),dtype=int)
#Deal with the edges
if periodic:
raise ValueError("Periodic grid not supported")
else:
ind=np.where(up > nx-1)
up[ind] = nx
upg[ind]=nx-1
ind=np.where(low < 0)
low[ind]=0
lowg[ind]=0
expr="""for(int p=0;p<nval;p++){
//Temp variables
double wght = weight(p);
int ilx=lowg(p,0);
int ily=lowg(p,1);
int iux=upg(p,0);
int iuy=upg(p,1);
double lx=low(p,0);
double ly=low(p,1);
double ux=up(p,0);
double uy=up(p,1);
//Deal with corner values
field(ilx,ily)+=(ilx+1-lx)*(ily+1-ly)*wght;
field(iux,ily)+=(ux-iux)*(ily+1-ly)*wght;
field(ilx,iuy)+=(ilx+1-lx)*(uy-iuy)*wght;
field(iux,iuy)+=(ux-iux)*(uy-iuy)*wght;
//Edges in y
for(int gx=ilx+1;gx<iux;gx++){
field(gx,ily)+=(ily+1-ly)*wght;
field(gx,iuy)+=(uy-iuy)*wght;
}
//Central region
for(int gy=ily+1;gy< iuy;gy++){
//Edges.
field(ilx,gy)+=(ilx+1-lx)*wght;
field(iux,gy)+=(ux-iux)*wght;
//x-values
for(int gx=ilx+1;gx<iux;gx++){
field(gx,gy)+=wght;
}
}
}
"""
try:
scipy.weave.inline(expr,['nval','upg','lowg','field','up','low','weight'],type_converters=scipy.weave.converters.blitz)
except Exception:
for p in xrange(0,nval):
#Deal with corner values
field[lowg[p,0],lowg[p,1]]+=(lowg[p,0]+1-low[p,0])*(lowg[p,1]+1-low[p,1])*weight[p]
field[upg[p,0],lowg[p,1]]+=(up[p,0]-upg[p,0])*(lowg[p,1]+1-low[p,1])*weight[p]
field[lowg[p,0],upg[p,1]]+=(lowg[p,0]+1-low[p,0])*(up[p,1]-upg[p,1])*weight[p]
field[upg[p,0], upg[p,1]]+=(up[p,0]-upg[p,0])*(up[p,1]-upg[p,1])*weight[p]
#Edges in y
for gx in xrange(lowg[p,0]+1,upg[p,0]):
field[gx,lowg[p,1]]+=(lowg[p,1]+1-low[p,1])*weight[p]
field[gx,upg[p,1]]+=(up[p,1]-upg[p,1])*weight[p]
#Central region
for gy in xrange(lowg[p,1]+1,upg[p,1]):
#Edges in x
field[lowg[p,0],gy]+=(lowg[p,0]+1-low[p,0])*weight[p]
field[upg[p,0],gy]+=(up[p,0]-upg[p,0])*weight[p]
#x-values
for gx in xrange(lowg[p,0]+1,upg[p,0]):
field[gx,gy]+=weight[p]
return field
from _fieldize_priv import _SPH_Fieldize
# this takes forever!!!!a
# Typical call: fieldize.sph_str(coords,mHI,sub_nHI_grid[ii],ismooth,weights=weights, periodic=True)
def sph_str(pos,value,field,radii,weights=None,periodic=False):
"""Interpolate a particle onto a grid using an SPH kernel.
This is similar to the cic_str() routine, but spherical.
Field must be 2d
Extra arguments:
radii - Array of particle radii in grid units.
weights - Weights to divide each contribution by.
"""
# Some error handling.
if np.size(pos)==0:
return field
dim=np.shape(field)
if np.size(dim) != 2:
raise ValueError("Non 2D grid not supported!")
if weights == None:
weights = np.array([0.])
#Cast some array types
if pos.dtype != np.float32:
pos = np.array(pos, dtype=np.float32)
if radii.dtype != np.float32:
radii = np.array(radii, dtype=np.float32)
if value.dtype != np.float32:
value = np.array(value, dtype=np.float32)
field += _SPH_Fieldize(pos, radii, value, weights,periodic,dim[0])
return
import scipy.integrate as integ
def integrate_sph_kernel(h,gx,gy):
"""Compute the integrated sph kernel for a particle with
smoothing length h, at position pos, for a grid-cell at gg"""
#Fast method; use the value at the grid cell.
#Bad if h < grid cell radius
r0 = np.sqrt((gx+0.5)**2+(gy+0.5)**2)
if r0 > h:
return 0
h2 = h*h
#Do the z integration with the trapezium rule.
#Evaluate this at some fixed (well-chosen) abcissae
zc=0
if h/2 > r0:
zc=np.sqrt(h2/4-r0**2)
zm = np.sqrt(h2-r0**2)
zz=np.array([zc,(3*zc+zm)/4.,(zc+zm)/2.,(zc+3*zm)/2,zm])
kern = sph_kern2(np.sqrt(zz**2+r0**2),h)
total= 2*integ.simps(kern,zz)
if h/2 > r0:
zz=np.array([0,zc/8.,zc/4.,3*zc/8,zc/2.,5/8.*zc,3*zc/4.,zc])
kern = sph_kern1(np.sqrt(zz**2+r0**2),h)
total+= 2*integ.simps(kern,zz)
return total
def do_slow_sph_integral(h,gx,gy):
"""Evaluate the very slow triple integral to find kernel contribution. Only do it when we must."""
#z limits are -h - > h, for simplicity.
#x and y limits are grid cells
(weight,err)=integ.tplquad(sph_cart_wrap,-h,h,lambda x: gx,lambda x: gx+1,lambda x,y: gy,lambda x,y:gy+1,args=(h,),epsabs=5e-3)
return weight
def sph_cart_wrap(z,y,x,h):
"""Cartesian wrapper around sph_kernel"""
r = np.sqrt(x**2+y**2+z**2)
return sph_kernel(r,h)
def sph_kern1(r,h):
"""SPH kernel for 0 < r < h/2"""
return 8/math.pi/h**3*(1-6*(r/h)**2+6*(r/h)**3)
def sph_kern2(r,h):
"""SPH kernel for h/2 < r < h"""
return 2*(1-r/h)**3*8/math.pi/h**3
def sph_kernel(r,h):
"""Evaluates the sph kernel used in gadget."""
if r > h:
return 0
elif r > h/2:
return 2*(1-r/h)**3*8/math.pi/h**3
else:
return 8/math.pi/h**3*(1-6*(r/h)**2+6*(r/h)**3)
def tscedge(kk,ww,ngrid,periodic):
"""This function takes care of the points at the grid boundaries,
either by wrapping them around the grid (the Julie Andrews sense)
or by throwing them over the side (the Al Pacino sense).
Arguments are:
kk - the grid indices
ww - the grid weights
nx - the number of grid points
periodic - Julie or Al?
"""
if periodic:
#If periodic, the nearest grid indices need to wrap around
#Note python has a sensible remainder operator
#which always returns > 0 , unlike C
kk=kk%ngrid
else:
#Find points outside the grid
ind=np.where(np.logical_or((kk < 0),(kk > ngrid-1)))
#Set the weights of these points to zero
ww[ind]=0
#Indices of these points now do not matter, so set to zero also
kk[ind]=0
def tscadd(field,index,weight,value,totweight):
"""This function is a helper for the tsc and cic routines. It adds
the weighted value to the field and optionally calculates the total weight.
Returns nothing, but alters field
"""
nx=np.size(value)
dims=np.size(np.shape(field))
total=totweight !=None
#Faster C version of this function: this is getting a little out of hand.
expr="""for(int j=0;j<nx;j++){
int ind1=index(j,0);
int ind2=index(j,1);
"""
if dims == 3:
expr+="""int ind3=index(j,2);
field(ind1,ind2,ind3)+=weight(j)*value(j);
"""
if total:
expr+=" totweight(ind1,ind2,ind3) +=weight(j);"
if dims == 2:
expr+="""field(ind1,ind2)+=weight(j)*value(j);
"""
if total:
expr+=" totweight(ind1,ind2) +=weight(j);"
expr+="}"
try:
if dims==2 or dims == 3:
if total:
scipy.weave.inline(expr,['nx','index','value','field','weight','totweight'],type_converters=scipy.weave.converters.blitz)
else:
scipy.weave.inline(expr,['nx','index','value','field','weight'],type_converters=scipy.weave.converters.blitz)
else:
raise ValueError
except Exception:
wwval=weight*value
for j in xrange(0,nx):
ind=tuple(index[j,:])
field[ind]+=wwval[j]
if totweight != None:
totweight[ind]+=weight[j]
return
def get_tscweight(ww,ii):
"""Calculates the TSC weight for a particular set of axes.
ii should be a vector of length dims having values 0,1,2.
(for CIC a similar thing but ii has values 0,1)
eg, call as:
get_tscweight(ww,[0,0,0])
"""
tscweight=1.
#tscweight = \Pi ww[1]*ww[2]*ww[3]
for j in xrange(0,np.size(ii)):
tscweight*=ww[ii[j],:,j]
return tscweight
def tsc_xind(field,value,totweight,kk,ww,extraind):
"""Perform the interpolation along the x-axis.
extraind argument contains the y and z indices, if needed.
So for a 1d interpolation, extraind=[], for 2d,
extraind=[y,], for 3d, extraind=[y,z]
Returns nothing, but alters field
"""
dims=np.size(extraind)+1
dim_list=np.zeros(dims,dtype=int)
dim_list[1:dims]=extraind
index=kk[0]
#Set up the index to have the right kk values depending on the y,z axes
for i in xrange(1,dims):
index[:,i]=kk[extraind[i-1],:,i]
#Do the addition for each value of x
for i in xrange(0,np.shape(kk)[0]):
dim_list[0]=i
tscweight=get_tscweight(ww,dim_list)
index[:,0]=kk[i,:,0]
tscadd(field,index,tscweight,value,totweight)
return
| 34.246377
| 137
| 0.590022
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12,322
| 0.579395
|
3cad04b55e10337da5937edce699d46c3369e96d
| 1,607
|
py
|
Python
|
epytope/test/DummyAdapter.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 7
|
2021-02-01T18:11:28.000Z
|
2022-01-31T19:14:07.000Z
|
epytope/test/DummyAdapter.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 22
|
2021-01-02T15:25:23.000Z
|
2022-03-14T11:32:53.000Z
|
epytope/test/DummyAdapter.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 4
|
2021-05-28T08:50:38.000Z
|
2022-03-14T11:45:32.000Z
|
# This code is part of the epytope distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
.. module:: DummyAdaper
:synopsis: Contains a pseudo data base adapter for testing purposes.
.. moduleauthor:: schubert, brachvogel
"""
import copy
from epytope.IO.ADBAdapter import ADBAdapter, EAdapterFields
class DummyAdapter(ADBAdapter):
def __init__(self):
pass
def get_product_sequence(self, product_refseq, **kwargs):
# TODO: also implement this one?
pass
def get_transcript_sequence(self, transcript_refseq, **kwargs):
# TODO: also implement this one?
pass
def get_transcript_information(self, transcript_refseq, **kwargs):
"""
At the moment we only use this method.
:param transcript_refseq: Refseq id of transcript
:type transcript_refseq: str.
:return: Dictionary with (EAdapterFields: <field content>
relevant: GENE = gene id, STRAND = +/-, SEQ = transcript sequence
"""
tsc_1 = {
EAdapterFields.SEQ: "AAAAACCCCCGGGGG", # 15 * C
EAdapterFields.GENE: "gene_1", # gene id
EAdapterFields.STRAND: "+", # normal 5' to 3'
}
tsc_2 = {
EAdapterFields.SEQ: "GGGGGCCCCCAAAAA", # 15 * C
EAdapterFields.GENE: "gene_1", # gene id
EAdapterFields.STRAND: "+", # normal 5' to 3'
}
res = {
"tsc_1": tsc_1,
"tsc_2": tsc_2
}
return copy.deepcopy(res[transcript_refseq])
| 31.509804
| 73
| 0.6229
| 1,221
| 0.759801
| 0
| 0
| 0
| 0
| 0
| 0
| 805
| 0.500933
|
3cad775a80e54adc9a4854ed12070f7e895a7dd6
| 2,819
|
py
|
Python
|
backend/plugins/nav_bar/migrations/0008_migrate_to_link_all_base.py
|
marksweb/django-cms-60min-demo-2021
|
d9ca83538d6c5c7a0b0e1a18ae1a15bda4c296e4
|
[
"MIT"
] | null | null | null |
backend/plugins/nav_bar/migrations/0008_migrate_to_link_all_base.py
|
marksweb/django-cms-60min-demo-2021
|
d9ca83538d6c5c7a0b0e1a18ae1a15bda4c296e4
|
[
"MIT"
] | 1
|
2022-01-15T11:29:16.000Z
|
2022-01-15T22:11:45.000Z
|
backend/plugins/nav_bar/migrations/0008_migrate_to_link_all_base.py
|
marksweb/django-cms-60min-demo-2021
|
d9ca83538d6c5c7a0b0e1a18ae1a15bda4c296e4
|
[
"MIT"
] | 3
|
2022-01-14T15:55:00.000Z
|
2022-01-23T23:46:56.000Z
|
# Generated by Django 2.2.16 on 2020-09-17 16:00
from django.db import migrations, models
import django.db.models.deletion
import enumfields.fields
import link_all.models
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('nav_bar', '0007_add_field_is_use_multi_level_menu_on_mobile'),
]
operations = [
migrations.AddField(
model_name='menuitemmodel',
name='link_button_color',
field=enumfields.fields.EnumField(blank=True, default='primary', enum=link_all.models.ButtonColor, max_length=64, verbose_name='Color'),
),
migrations.AddField(
model_name='menuitemmodel',
name='link_content_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='contenttypes.ContentType'),
),
migrations.AddField(
model_name='menuitemmodel',
name='link_instance_pk',
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='menuitemmodel',
name='link_is_button',
field=models.BooleanField(default=False, verbose_name='Render as button'),
),
migrations.AddField(
model_name='menuitemmodel',
name='link_is_button_full_width',
field=models.BooleanField(default=False, verbose_name='Full width'),
),
migrations.AddField(
model_name='menuitemmodel',
name='link_is_button_outlined',
field=models.BooleanField(default=False, verbose_name='Transparent body'),
),
migrations.AddField(
model_name='menuitemmodel',
name='link_is_open_in_new_tab',
field=models.BooleanField(default=False, verbose_name='Open in a new tab'),
),
migrations.AddField(
model_name='menuitemmodel',
name='link_label',
field=models.CharField(blank=True, max_length=1024),
),
migrations.AddField(
model_name='menuitemmodel',
name='link_type',
field=enumfields.fields.EnumField(default='url', enum=link_all.models.LinkType, max_length=64),
),
migrations.AddField(
model_name='menuitemmodel',
name='link_url',
field=models.CharField(blank=True, max_length=1024),
),
migrations.AlterField(
model_name='navbarpluginmodel',
name='is_use_multi_level_menu_on_mobile',
field=models.BooleanField(default=False, help_text='The multi-level menu shows around 3-4 levels of children.', verbose_name='Use multi-level menu on mobile'),
),
]
| 38.616438
| 171
| 0.630011
| 2,644
| 0.937921
| 0
| 0
| 0
| 0
| 0
| 0
| 743
| 0.263569
|
3cadd23dc28e0931be3476bf361e1ba65acc6956
| 4,187
|
py
|
Python
|
test/unit/utils/test_expiration_queue.py
|
dolphinridercrypto/bxcommon
|
8f70557c1dbff785a5dd3fcdf91176066e085c3a
|
[
"MIT"
] | 12
|
2019-11-06T17:39:10.000Z
|
2022-03-01T11:26:19.000Z
|
test/unit/utils/test_expiration_queue.py
|
dolphinridercrypto/bxcommon
|
8f70557c1dbff785a5dd3fcdf91176066e085c3a
|
[
"MIT"
] | 8
|
2019-11-06T21:31:11.000Z
|
2021-06-02T00:46:50.000Z
|
test/unit/utils/test_expiration_queue.py
|
dolphinridercrypto/bxcommon
|
8f70557c1dbff785a5dd3fcdf91176066e085c3a
|
[
"MIT"
] | 5
|
2019-11-14T18:08:11.000Z
|
2022-02-08T09:36:22.000Z
|
import time
import unittest
from mock import MagicMock
from bxcommon.utils.expiration_queue import ExpirationQueue
class ExpirationQueueTests(unittest.TestCase):
def setUp(self):
self.time_to_live = 60
self.queue = ExpirationQueue(self.time_to_live)
self.removed_items = []
def test_expiration_queue(self):
# adding 2 items to the queue with 1 second difference
item1 = 1
item2 = 2
self.queue.add(item1)
time_1_added = time.time()
time.time = MagicMock(return_value=time.time() + 1)
self.queue.add(item2)
time_2_added = time.time()
self.assertEqual(len(self.queue), 2)
self.assertEqual(int(time_1_added), int(self.queue.get_oldest_item_timestamp()))
self.assertEqual(item1, self.queue.get_oldest())
# check that nothing is removed from queue before the first item expires
self.queue.remove_expired(time_1_added + self.time_to_live / 2, remove_callback=self._remove_item)
self.assertEqual(len(self.queue), 2)
self.assertEqual(len(self.removed_items), 0)
# check that first item removed after first item expired
self.queue.remove_expired(time_1_added + self.time_to_live + 1, remove_callback=self._remove_item)
self.assertEqual(len(self.queue), 1)
self.assertEqual(len(self.removed_items), 1)
self.assertEqual(self.removed_items[0], item1)
self.assertEqual(int(time_2_added), int(self.queue.get_oldest_item_timestamp()))
self.assertEqual(item2, self.queue.get_oldest())
# check that second item is removed after second item expires
self.queue.remove_expired(time_2_added + self.time_to_live + 1, remove_callback=self._remove_item)
self.assertEqual(len(self.queue), 0)
self.assertEqual(len(self.removed_items), 2)
self.assertEqual(self.removed_items[0], item1)
self.assertEqual(self.removed_items[1], item2)
def test_remove_oldest_item(self):
items_count = 10
for i in range(items_count):
self.queue.add(i)
self.assertEqual(items_count, len(self.queue))
removed_items_1 = []
for i in range(items_count):
self.assertEqual(i, self.queue.get_oldest())
self.queue.remove_oldest(removed_items_1.append)
self.queue.add(1000 + i)
for i in range(items_count):
self.assertEqual(i, removed_items_1[i])
self.assertEqual(items_count, len(self.queue))
removed_items_2 = []
for i in range(items_count):
self.assertEqual(i + 1000, self.queue.get_oldest())
self.queue.remove_oldest(removed_items_2.append)
for i in range(items_count):
self.assertEqual(i + 1000, removed_items_2[i])
self.assertEqual(0, len(self.queue))
def test_remove_not_oldest_item(self):
# adding 2 items to the queue with 1 second difference
item1 = 9
item2 = 5
self.queue.add(item1)
time_1_added = time.time()
time.time = MagicMock(return_value=time.time() + 1)
self.queue.add(item2)
self.assertEqual(len(self.queue), 2)
self.assertEqual(int(time_1_added), int(self.queue.get_oldest_item_timestamp()))
self.assertEqual(item1, self.queue.get_oldest())
self.queue.remove(item2)
self.assertEqual(len(self.queue), 1)
self.assertEqual(int(time_1_added), int(self.queue.get_oldest_item_timestamp()))
self.assertEqual(item1, self.queue.get_oldest())
def test_remove_oldest_items_with_limits(self):
time.time = MagicMock(return_value=time.time())
for i in range(20):
self.queue.add(i)
time.time = MagicMock(return_value=time.time() + 5)
self.assertEqual(20, len(self.queue))
time.time = MagicMock(return_value=time.time() + self.time_to_live)
self.queue.remove_expired(limit=5)
self.assertEqual(15, len(self.queue))
self.queue.remove_expired()
self.assertEqual(0, len(self.queue))
def _remove_item(self, item):
self.removed_items.append(item)
| 34.319672
| 106
| 0.662288
| 4,067
| 0.97134
| 0
| 0
| 0
| 0
| 0
| 0
| 297
| 0.070934
|
3caefd3f5a8bfe14855d5ea0372e3bc9a9317bc4
| 480
|
py
|
Python
|
legacy-code/pailindrome.py
|
developbiao/pythonbasics
|
a7549786629e820646dcde5bb9f1aad4331de9be
|
[
"MIT"
] | 1
|
2019-06-13T15:33:57.000Z
|
2019-06-13T15:33:57.000Z
|
legacy-code/pailindrome.py
|
developbiao/pythonbasics
|
a7549786629e820646dcde5bb9f1aad4331de9be
|
[
"MIT"
] | null | null | null |
legacy-code/pailindrome.py
|
developbiao/pythonbasics
|
a7549786629e820646dcde5bb9f1aad4331de9be
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
def is_palindrome(n):
x = n
op_num = 0
while n:
op_num = op_num * 10 + n % 10
n = n//10
return x == op_num
# Test
output = filter(is_palindrome, range(1, 1000))
print('1~1000:', list(output))
if list(filter(is_palindrome, range(1, 200))) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 22, 33, 44, 55, 66, 77, 88, 99, 101, 111, 121, 131, 141, 151, 161, 171, 181, 191]:
print('测试成功!')
else:
print('测试失败!')
| 25.263158
| 163
| 0.55
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 89
| 0.179435
|
3cafbcdeecba4bc828647c5d5e2a12435c74df80
| 776
|
py
|
Python
|
spotify_search/search.py
|
MiltonLn/spotify-tracks-pyconco2020
|
4a75b15852344f7dac066bea3c3e3abb1157d198
|
[
"MIT"
] | 1
|
2021-07-29T16:09:30.000Z
|
2021-07-29T16:09:30.000Z
|
spotify_search/search.py
|
MiltonLn/spotify-tracks-pyconco2020
|
4a75b15852344f7dac066bea3c3e3abb1157d198
|
[
"MIT"
] | null | null | null |
spotify_search/search.py
|
MiltonLn/spotify-tracks-pyconco2020
|
4a75b15852344f7dac066bea3c3e3abb1157d198
|
[
"MIT"
] | null | null | null |
from importlib import import_module
from flask import Flask, request, jsonify
from .spotify_api import get_spotify_response
app = Flask(__name__)
app.config.from_object("spotify_search.settings")
@app.route("/search", methods=["GET"])
def search():
search_term = request.args.get("search_term", "")
limit = request.args.get("limit")
search_type = request.args.get("type")
assert search_type in ["artist", "track", "album"]
json_response = get_spotify_response(
search_term,
limit=limit,
search_type=search_type
)
utils_module = import_module("spotify_search.utils")
parse_method = getattr(utils_module, f"parse_{search_type}s")
search_results = parse_method(json_response)
return jsonify(search_results)
| 26.758621
| 65
| 0.719072
| 0
| 0
| 0
| 0
| 573
| 0.738402
| 0
| 0
| 134
| 0.17268
|
3cb1615543f6a7b7ba1580acd4a1477cfa004ce2
| 3,940
|
py
|
Python
|
Python/src/controllers/MainController.py
|
Jictyvoo/EXA868--PathFinder
|
1fe839e0d3c14f36a4a2187cc8bc00c19f3bda4a
|
[
"MIT"
] | null | null | null |
Python/src/controllers/MainController.py
|
Jictyvoo/EXA868--PathFinder
|
1fe839e0d3c14f36a4a2187cc8bc00c19f3bda4a
|
[
"MIT"
] | null | null | null |
Python/src/controllers/MainController.py
|
Jictyvoo/EXA868--PathFinder
|
1fe839e0d3c14f36a4a2187cc8bc00c19f3bda4a
|
[
"MIT"
] | null | null | null |
import math
from models.business.OrganismController import OrganismController
from models.value.Finder import Finder
from models.value.Labyrinth import Labyrinth
class MainController:
def __init__(self):
self.__labyrinth = Labyrinth("../config.json")
self.__labyrinth.loadLabyrinth("../labyrinth.la")
self.__controllerOrganism = OrganismController(Finder, self.__labyrinth.getBeginPosition())
self.__genomeDecoder = ("UP", "RIGHT", "DOWN", "LEFT")
self.__stateDecoder = {'alive': 0, 'dead': -1, 'finished': 1}
self.__ending = self.__labyrinth.getEndingPosition()
self.__have_finished = False
self.__generations_finished = 0
self.__generations_fitness_average = []
self.__best_fitness = []
self.__best_organisms = []
def finished_generations(self):
return self.__generations_finished
def get_generations_fitness_average(self):
return self.__generations_fitness_average
def get_best_fitness(self):
return self.__best_fitness
def get_genome_decoder(self):
return self.__genomeDecoder
def get_labyrinth(self):
return self.__labyrinth
def get_best_one(self):
return self.__controllerOrganism.getSmallerPath(list_to_order=self.__best_organisms)[0]
def __calculate_fitness(self, organism):
x_diference = organism.getPosition()['x']
x_diference = x_diference - self.__ending['x']
y_diference = organism.getPosition()['y']
y_diference = y_diference - self.__ending['y']
# return math.sqrt(math.pow(x_diference, 2) + math.pow(y_diference, 2))
return math.fabs(x_diference) + math.fabs(y_diference)
def move(self, organisms):
for organism in organisms:
count = 0
for genome in organism.getGenome():
if organism.getState() == self.__stateDecoder['alive']:
position = organism.getPosition()
has_moved = self.__labyrinth.move(self.__genomeDecoder[genome], position)
if has_moved:
organism.updateFitness(1)
organism.setPosition(has_moved)
if self.__labyrinth.isAtFinal(has_moved):
organism.updateFitness(100)
organism.setState(self.__stateDecoder['finished'])
organism.setLast(count)
print("Generation: " + str(organism.getGeneration()), organism.getGenome())
self.__have_finished = True
else:
organism.updateFitness(-5)
# organism.setState(self.stateDecoder['dead'])
count = count + 1
if organism.getState() == self.__stateDecoder['dead']:
organism.updateFitness(-10)
organism.updateFitness(-10 * self.__calculate_fitness(organism))
# print(organism.getPosition())
begin_position = self.__labyrinth.getBeginPosition()
organism.setPosition({'x': begin_position['x'], 'y': begin_position['y']})
def execute(self):
organisms = self.__controllerOrganism.getOrganisms()
if not organisms:
return None
self.move(organisms)
if self.__have_finished:
self.__generations_finished = self.__generations_finished + 1
self.__have_finished = False
self.__generations_fitness_average.append(self.__controllerOrganism.average_fitness())
mom, dad = self.__controllerOrganism.selectBestOnes()
self.__best_fitness.append(mom.getFitness())
self.__best_organisms.append(mom)
self.__controllerOrganism.crossover(mom, dad, 0.05)
if mom.getGeneration() % 11 == 0:
self.__controllerOrganism.saveGenomes("../LastsGenomes.json")
| 39.4
| 103
| 0.628173
| 3,774
| 0.957868
| 0
| 0
| 0
| 0
| 0
| 0
| 310
| 0.07868
|
3cb181b4a78692a5068ea6ba57d0e24bbe0db8c2
| 3,386
|
py
|
Python
|
accounts/views.py
|
callmewind/billdev
|
fcd53cb98284677fb619abeafb17a88035aabfd6
|
[
"MIT"
] | null | null | null |
accounts/views.py
|
callmewind/billdev
|
fcd53cb98284677fb619abeafb17a88035aabfd6
|
[
"MIT"
] | null | null | null |
accounts/views.py
|
callmewind/billdev
|
fcd53cb98284677fb619abeafb17a88035aabfd6
|
[
"MIT"
] | null | null | null |
from django.views.generic.edit import CreateView
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.utils.translation import ugettext_lazy as _
from django.views.generic.base import RedirectView
from django.conf import settings
from .forms import *
class ActivateAccountTokenGenerator(PasswordResetTokenGenerator):
def _make_hash_value(self, user, timestamp):
return (
str(user.pk) + str(timestamp) + str(user.is_active)
)
class SignUpView(CreateView):
template_name = 'accounts/sign-up.html'
form_class = SignUpForm
def form_valid(self, form):
from django.template.response import TemplateResponse
from django.utils.http import urlsafe_base64_encode
from django.utils.encoding import force_bytes
from django.core.mail import send_mail
from django.urls import reverse
import urllib
user = form.save()
token_generator = ActivateAccountTokenGenerator()
activation_link = self.request.build_absolute_uri(
reverse('accounts:activate', kwargs={
'uidb64' : urlsafe_base64_encode(force_bytes(user.pk)).decode(),
'token': token_generator.make_token(user)
})
)
context = {
'user' : user,
'activation_link' : activation_link
}
send_mail(
_('Activate your account'),
activation_link,
'test@example.com',
[ user.email ],
html_message=activation_link)
#send_mail(user.site, 'guides/email/promo-confirm-email.html', user.email, _('Just one click to access to your Guide %(mobile_emoji)s' % {'mobile_emoji': u"\U0001F4F2" }), context, user.web_language)
return TemplateResponse(self.request, 'accounts/sign-up-confirm.html', { 'email': user.email })
def dispatch(self, request, *args, **kwargs):
if self.request.user.is_authenticated:
from django.shortcuts import redirect
return redirect(settings.LOGIN_REDIRECT_URL)
return super().dispatch(request, *args, **kwargs)
class ActivateView(RedirectView):
url = settings.LOGIN_REDIRECT_URL
def dispatch(self, request, *args, **kwargs):
from django.utils.encoding import force_text
from django.utils.http import urlsafe_base64_decode
from django.http import Http404
from .models import User
try:
user = User.objects.get(pk=force_text(urlsafe_base64_decode(self.kwargs['uidb64'])))
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
raise Http404
token_generator = ActivateAccountTokenGenerator()
if request.user.is_authenticated:
if user.pk != request.user.pk:
raise Http404
elif token_generator.check_token(user, self.kwargs['token']):
from django.contrib.auth import login
from django.contrib import messages
user.is_active = True
user.save()
login(request, user, 'django.contrib.auth.backends.ModelBackend')
messages.success(request, _('Your account has been activated. Welcome!'))
return super().dispatch(request, *args, **kwargs)
else:
raise Http404
| 36.804348
| 208
| 0.646486
| 3,098
| 0.914944
| 0
| 0
| 0
| 0
| 0
| 0
| 460
| 0.135854
|
3cb5796f6762e147de6c1a95dfd1c12f82cf44f8
| 241
|
py
|
Python
|
hw-2/useful_modules.py
|
Atlasshrugs00/astr-119
|
be30734d2580acd947e5b2e22e3039d0d42419f3
|
[
"MIT"
] | null | null | null |
hw-2/useful_modules.py
|
Atlasshrugs00/astr-119
|
be30734d2580acd947e5b2e22e3039d0d42419f3
|
[
"MIT"
] | 8
|
2021-09-24T04:02:52.000Z
|
2021-12-09T05:45:22.000Z
|
hw-2/useful_modules.py
|
Atlasshrugs00/astr-119
|
be30734d2580acd947e5b2e22e3039d0d42419f3
|
[
"MIT"
] | null | null | null |
import numpy as np #numpy library
import matplotlib.pyplot as plt #matplotlib pyplot
import sys #acces to c-like sys library
import os #gives access to operating system
print(sys.argv) #prints any command line arguments
print(os.getcwd())
| 26.777778
| 50
| 0.792531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 127
| 0.526971
|
3cb70deff93c19ea3ca28c0dcdec1ef4bed01acf
| 3,532
|
py
|
Python
|
Custom/text.py
|
SemLaan/Hotel-review-sentiment-analysis
|
b7fd22dcea63bab1c7fe666a7f4912931de1f4dc
|
[
"Apache-2.0"
] | null | null | null |
Custom/text.py
|
SemLaan/Hotel-review-sentiment-analysis
|
b7fd22dcea63bab1c7fe666a7f4912931de1f4dc
|
[
"Apache-2.0"
] | null | null | null |
Custom/text.py
|
SemLaan/Hotel-review-sentiment-analysis
|
b7fd22dcea63bab1c7fe666a7f4912931de1f4dc
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
from nltk import tokenize as tokenizers
from nltk.stem import PorterStemmer, WordNetLemmatizer
class TextCleaning:
def __init__(self):
return
def remove_hyperlinks(self, corpus):
corpus = corpus.str.replace(r"https?://t.co/[A-Za-z0-9]+", "https")
return corpus
def remove_numbers(self, corpus):
corpus = corpus.str.replace(r"\w*\d\w*", "")
return corpus
def tokenize(self, corpus):
tokenizer = tokenizers.RegexpTokenizer(r'\w+')
corpus = corpus.apply(lambda x: tokenizer.tokenize(x))
return corpus
def untokenize(self, corpus):
corpus = corpus.apply(
lambda tokenized_review: ' '.join(tokenized_review)
)
return corpus
def lemmatize(self, corpus):
corpus = self.tokenize(corpus)
lemmatizer = WordNetLemmatizer()
corpus = corpus.apply(
lambda tokens: [lemmatizer.lemmatize(token) for token in tokens]
)
return self.untokenize(corpus)
def stem(self, corpus):
corpus = self.tokenize(corpus)
stemmer = PorterStemmer()
corpus = corpus.apply(
lambda tokens: [stemmer.stem(token) for token in tokens]
)
return self.untokenize(corpus)
def to_lower(self, corpus):
return corpus.apply(str.lower)
def negate_corpus(self, corpus):
corpus = corpus.apply(self.negate_sentence)
return corpus
def negate_sentence(self, sentence):
sentence = sentence.lower()
for word in appos:
if word in sentence:
sentence = sentence.replace(word, appos[word])
return sentence.lower()
def count_negations(self, corpus):
negations = 0
for sentence in corpus:
sentence = sentence.lower()
for word in appos:
if word in sentence:
negations += 1
print(negations)
return
appos = {
"aren t" : "are not",
"can t" : "cannot",
"couldn t" : "could not",
"didn t" : "did not",
"doesn t" : "does not",
"don t" : "do not",
"hadn t" : "had not",
"hasn t" : "has not",
"haven t" : "have not",
"he d" : "he would",
"he ll" : "he will",
"he s" : "he is",
"i d" : "I would",
"i ll" : "I will",
"i m" : "I am",
"isn t" : "is not",
"it s" : "it is",
"it ll":"it will",
"i ve" : "I have",
"let s" : "let us",
"mightn t" : "might not",
"mustn t" : "must not",
"shan t" : "shall not",
"she d" : "she would",
"she ll" : "she will",
"she s" : "she is",
"shouldn t" : "should not",
"that s" : "that is",
"there s" : "there is",
"they d" : "they would",
"they ll" : "they will",
"they re" : "they are",
"they ve" : "they have",
"we d" : "we would",
"we re" : "we are",
"weren t" : "were not",
"we ve" : "we have",
"what ll" : "what will",
"what re" : "what are",
"what s" : "what is",
"what ve" : "what have",
"where s" : "where is",
"who d" : "who would",
"who ll" : "who will",
"who re" : "who are",
"who s" : "who is",
"who ve" : "who have",
"won t" : "will not",
"wouldn t" : "would not",
"you d" : "you would",
"you ll" : "you will",
"you re" : "you are",
"you ve" : "you have",
" re": " are",
"wasn t": "was not",
"we ll":" will",
}
| 22.213836
| 76
| 0.51812
| 1,936
| 0.548131
| 0
| 0
| 0
| 0
| 0
| 0
| 1,018
| 0.288222
|
3cb8b156ffda90f3a147616840973c64a0b81e50
| 546
|
py
|
Python
|
kolibri/plugins/user_auth/root_urls.py
|
MBKayro/kolibri
|
0a38a5fb665503cf8f848b2f65938e73bfaa5989
|
[
"MIT"
] | 545
|
2016-01-19T19:26:55.000Z
|
2022-03-20T00:13:04.000Z
|
kolibri/plugins/user_auth/root_urls.py
|
MBKayro/kolibri
|
0a38a5fb665503cf8f848b2f65938e73bfaa5989
|
[
"MIT"
] | 8,329
|
2016-01-19T19:32:02.000Z
|
2022-03-31T21:23:12.000Z
|
kolibri/plugins/user_auth/root_urls.py
|
MBKayro/kolibri
|
0a38a5fb665503cf8f848b2f65938e73bfaa5989
|
[
"MIT"
] | 493
|
2016-01-19T19:26:48.000Z
|
2022-03-28T14:35:05.000Z
|
"""
This is here to enable redirects from the old /user endpoint to /auth
"""
from django.conf.urls import include
from django.conf.urls import url
from django.views.generic.base import RedirectView
from kolibri.core.device.translation import i18n_patterns
redirect_patterns = [
url(
r"^user/$",
RedirectView.as_view(
pattern_name="kolibri:kolibri.plugins.user_auth:user_auth", permanent=True
),
name="redirect_user",
),
]
urlpatterns = [url(r"", include(i18n_patterns(redirect_patterns)))]
| 26
| 86
| 0.705128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 150
| 0.274725
|
3cb8db111fef337bf519873d89b2fd5a45a81770
| 250
|
py
|
Python
|
Learning/CodeWars/Python/7 kyu_Sum_of_numbers_from_0_to_N.py
|
aliasfoxkde/snippets
|
bb6dcc6597316ef9c88611f526935059451c3b5a
|
[
"MIT"
] | null | null | null |
Learning/CodeWars/Python/7 kyu_Sum_of_numbers_from_0_to_N.py
|
aliasfoxkde/snippets
|
bb6dcc6597316ef9c88611f526935059451c3b5a
|
[
"MIT"
] | null | null | null |
Learning/CodeWars/Python/7 kyu_Sum_of_numbers_from_0_to_N.py
|
aliasfoxkde/snippets
|
bb6dcc6597316ef9c88611f526935059451c3b5a
|
[
"MIT"
] | null | null | null |
# See: https://www.codewars.com/kata/56e9e4f516bcaa8d4f001763
def show_sequence(n):
if n == 0:
return '0=0'
elif n < 0:
return str(n) + '<0'
return str(range(n+1))[1:-1].replace(', ','+') + ' = ' + str(sum(range(1,n+1)))
| 27.777778
| 83
| 0.54
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 0.328
|
3cb8ec1381ca6215654d8b8a9da92a3ab2726159
| 4,685
|
py
|
Python
|
Script.py
|
harisqazi1/Automated_Script
|
6680e0604db55297fad2ab2f99ea61324ca88048
|
[
"MIT"
] | null | null | null |
Script.py
|
harisqazi1/Automated_Script
|
6680e0604db55297fad2ab2f99ea61324ca88048
|
[
"MIT"
] | null | null | null |
Script.py
|
harisqazi1/Automated_Script
|
6680e0604db55297fad2ab2f99ea61324ca88048
|
[
"MIT"
] | null | null | null |
"""
Title: Automated Script for Data Scraping
Creator: Haris "5w464l1c10u5"
Purpose: This was made in order to make it easier to get data from online, all through one python script
Usage:
python3 Automated_Script.py
Resources:
https://www.digitalocean.com/community/tutorials/how-to-scrape-web-pages-with-beautiful-soup-and-python-3
https://www.guru99.com/reading-and-writing-files-in-python.html
https://www.dataquest.io/blog/web-scraping-tutorial-python/
https://forecast.weather.gov/MapClick.php?lat=42.00900000000007&lon=-87.69495999999998
https://pythonspot.com/http-download-file-with-python/
"""
#!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import urllib.request, urllib.error, urllib.parse
from datetime import date, datetime
import io
import codecs
Code_Version = 3
#Time in H:M:S format
now = datetime.now()
Time = now.strftime("%I:%M:%S:%p")
#Date
Today_Date = date.today()
Date = Today_Date.strftime("(%A) %B %d, %Y")
try:
#Weather
page = requests.get('https://forecast.weather.gov/MapClick.php?lat=42.00900000000007&lon=-87.69495999999998')
soup = BeautifulSoup(page.text, 'html.parser')
except:
print("Weather.gov is not available")
try:
#Weather Type
weathertype = soup.find(class_='myforecast-current')
type = weathertype.contents[0]
type = type.encode('utf-8')
except:
type = "N/A"
try:
#Fahrenheit
weather = soup.find(class_='myforecast-current-lrg')
w = weather.contents[0]
w = w.encode('utf-8')
except:
w = "N/A"
try:
#Humidity
Humidity = soup.find_all('td')[0].get_text()
Hum_percent = soup.find_all('td')[1].get_text()
except:
Humidity = "N/A"
Hum_percent = "N/A"
try:
#Wind_Speed
W_Speed = soup.find_all('td')[2].get_text()
W_S = soup.find_all('td')[3].get_text()
except:
W_Speed = "N/A"
W_S = "N/A"
try:
#Wind_Chill
Wind_Chill = soup.find_all('td')[10].get_text()
Wind_Chill_num = soup.find_all('td')[11].get_text()
Wind_Chill = Wind_Chill.encode('utf-8')
Wind_Chill_num = Wind_Chill_num.encode('utf-8')
except:
Wind_Chill = "N/A"
Wind_Chill_num = "N/A"
try:
#Last_Update
Last_Update = soup.find_all('td')[12].get_text()
Last_Update_num = soup.find_all('td')[13].get_text()
except:
Last_Update = "N/A"
Last_Update_num = "N/A"
html_file = """
<h1 style="text-align: center;"><span style="text-decoration: underline;">Good Morning, Haris!</span></h1>
<h4 style="text-align: left;">Time:</h4>
<h4 style="text-align: left;">Date:</h4>
<h4>Code Version:</h4>
<hr />
<h3 style="font-size: 1.5em; text-align: center;"><span style="text-decoration: underline;"><span style="background-color: #00ccff;">Weather</span></span></h3>
<table style="margin-left: auto; margin-right: auto; height: 195px;" width="238">
<tbody>
<tr style="height: 7px;">
<td style="width: 228px; height: 7px;">Current Weather:</td>
</tr>
<tr style="height: 1px;">
<td style="width: 228px; height: 1px;">Weather Type:</td>
</tr>
<tr style="height: 2px;">
<td style="width: 228px; height: 2px;">Humidity:</td>
</tr>
<tr style="height: 2px;">
<td style="width: 228px; height: 2px;">Wind Speed:</td>
</tr>
<tr style="height: 2px;">
<td style="width: 228px; height: 2px;">Wind Chill:</td>
</tr>
<tr style="height: 2px;">
<td style="width: 228px; height: 2px;">Last Update:</td>
</tr>
</tbody>
</table>
<p style="font-size: 1.5em;"> </p>
<hr />
<h3 style="font-size: 1.5em; text-align: center;"><span style="text-decoration: underline; background-color: #cc99ff;">News</span></h3>
"""
html_file = html_file.replace('Time:','Current Time: ' + Time)
html_file = html_file.replace('Date:','Today\'s Date: ' + Date)
html_file = html_file.replace('Code Version:', 'Code Version: #' + str(Code_Version))
html_file = html_file.replace('Current Weather:','Current Weather: ' + w.decode('utf8'))
html_file = html_file.replace('Weather Type:','Weather Type: ' + type.decode('utf8'))
html_file = html_file.replace('Humidity:','Humidity: ' + Hum_percent)
html_file = html_file.replace('Wind Speed:','Wind Speed: ' + W_S)
html_file = html_file.replace('Wind Chill:','Wind Chill: ' + Wind_Chill_num.decode('utf-8'))
html_file = html_file.replace('Last Update:','Last Update: ' + Last_Update_num)
try:
response = urllib.request.urlopen('https://allinfosecnews.com/')
html = response.read()
except:
print("https://allinfosecnews.com/ is not available")
with io.open("website.html", 'w', encoding='utf8') as f:
f.write(html_file)
f.write(html.decode('utf-8'))
f.close()
print(w)
print(type)
print(Hum_percent)
print(W_Speed)
print(W_S)
print(Wind_Chill_num)
print(Last_Update_num)
| 28.919753
| 159
| 0.683458
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,650
| 0.565635
|
3cb91fcc9d369715e263d80560e5e0440993f481
| 144
|
py
|
Python
|
pnbp/helpers/__init__.py
|
prettynb/pnbp
|
1be54a2217a85675ec4a14a1c8a1d2501be88404
|
[
"MIT"
] | 1
|
2021-07-30T02:00:29.000Z
|
2021-07-30T02:00:29.000Z
|
pnbp/helpers/__init__.py
|
prettynb/pnbp
|
1be54a2217a85675ec4a14a1c8a1d2501be88404
|
[
"MIT"
] | null | null | null |
pnbp/helpers/__init__.py
|
prettynb/pnbp
|
1be54a2217a85675ec4a14a1c8a1d2501be88404
|
[
"MIT"
] | null | null | null |
from .base import _convert_datetime
from .codeblock import CodeBlock
from .link import Link
from .tag import Tag
from .url import Url
| 9
| 35
| 0.756944
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3cb929d8fa24f1122564db813af9ab0475a425f5
| 838
|
py
|
Python
|
tests/elections/test_police_and_crime_commissioner.py
|
DemocracyClub/uk-election-timetables
|
2541f9e5050a393906bafa2b70709fe650de3f32
|
[
"MIT"
] | 2
|
2020-11-14T15:56:56.000Z
|
2021-01-11T11:11:09.000Z
|
tests/elections/test_police_and_crime_commissioner.py
|
DemocracyClub/uk-election-timetables
|
2541f9e5050a393906bafa2b70709fe650de3f32
|
[
"MIT"
] | 12
|
2020-11-18T20:27:43.000Z
|
2021-12-15T10:47:01.000Z
|
tests/elections/test_police_and_crime_commissioner.py
|
DemocracyClub/uk-election-timetables
|
2541f9e5050a393906bafa2b70709fe650de3f32
|
[
"MIT"
] | null | null | null |
from datetime import date
from uk_election_timetables.elections import PoliceAndCrimeCommissionerElection
# Reference election: pcc.avon-and-somerset.2016-05-05
def test_publish_date_police_and_crime_commissioner():
election = PoliceAndCrimeCommissionerElection(date(2016, 5, 5))
assert election.sopn_publish_date == date(2016, 4, 8)
# Reference election: pcc.2021-05-06
def test_registration_deadline_police_and_crime_commissioner():
election = PoliceAndCrimeCommissionerElection(date(2021, 5, 6))
assert election.registration_deadline == date(2021, 4, 19)
# Reference election: pcc.2021-05-06
def test_postal_vote_application_deadline_police_and_crime_commissioner():
election = PoliceAndCrimeCommissionerElection(date(2021, 5, 6))
assert election.postal_vote_application_deadline == date(2021, 4, 20)
| 33.52
| 79
| 0.805489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 126
| 0.150358
|
3cb98b826371f4dfda09a39ed9c09c8f6ab7451b
| 847
|
py
|
Python
|
LaureatsBackEnd-master/laureats/migrations/0011_auto_20200111_1525.py
|
SanaaCHAOU/laureat_management_ENSAT
|
d769714f9f8cb9ebf90e02577547ec348c011461
|
[
"MIT"
] | null | null | null |
LaureatsBackEnd-master/laureats/migrations/0011_auto_20200111_1525.py
|
SanaaCHAOU/laureat_management_ENSAT
|
d769714f9f8cb9ebf90e02577547ec348c011461
|
[
"MIT"
] | null | null | null |
LaureatsBackEnd-master/laureats/migrations/0011_auto_20200111_1525.py
|
SanaaCHAOU/laureat_management_ENSAT
|
d769714f9f8cb9ebf90e02577547ec348c011461
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.2 on 2020-01-11 14:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('laureats', '0010_auto_20200111_1458'),
]
operations = [
migrations.CreateModel(
name='Profession',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('libelle', models.CharField(default='', max_length=255)),
],
options={
'ordering': ['libelle'],
},
),
migrations.AlterField(
model_name='employe',
name='profession',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='employes', to='laureats.Profession'),
),
]
| 28.233333
| 132
| 0.570248
| 721
| 0.85124
| 0
| 0
| 0
| 0
| 0
| 0
| 180
| 0.212515
|
3cbc5cfef3c4ee6f751fd3f8b8b9e741e7ebbbd4
| 1,952
|
py
|
Python
|
python/250.count-univalue-subtrees.py
|
Zhenye-Na/leetcode
|
95196a45f5709ccf7b970ee5ac84a4bf8fe2301e
|
[
"MIT"
] | 10
|
2019-09-15T00:23:57.000Z
|
2022-01-05T12:53:42.000Z
|
python/250.count-univalue-subtrees.py
|
Zhenye-Na/leetcode
|
95196a45f5709ccf7b970ee5ac84a4bf8fe2301e
|
[
"MIT"
] | 3
|
2021-06-30T00:39:26.000Z
|
2021-08-01T07:13:59.000Z
|
python/250.count-univalue-subtrees.py
|
Zhenye-Na/leetcode
|
95196a45f5709ccf7b970ee5ac84a4bf8fe2301e
|
[
"MIT"
] | 6
|
2020-02-08T02:55:22.000Z
|
2022-01-02T22:48:18.000Z
|
# [250] Count Univalue Subtrees
# Description
# Given a binary tree, count the number of uni-value subtrees.
# A Uni-value subtree means all nodes of the subtree have the same value.
# Example
# Example 1
# Input: root = {5,1,5,5,5,#,5}
# Output: 4
# Explanation:
# 5
# / \
# 1 5
# / \ \
# 5 5 5
# Example 2
# Input: root = {1,3,2,4,5,#,6}
# Output: 3
# Explanation:
# 1
# / \
# 3 2
# / \ \
# 4 5 6
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: the given tree
@return: the number of uni-value subtrees.
"""
def countUnivalSubtrees(self, root):
# write your code here
self.count = 0
self.dfs(root)
return self.count
def dfs(self, root):
"""
return how many subtrees with uni-values
"""
if not root:
return None, True
left_node, left_uni = self.dfs(root.left)
right_node, right_uni = self.dfs(root.right)
if left_uni and right_uni:
if left_node is None and right_node is None:
self.count += 1
return root, True
elif left_node and left_node.val == root.val:
self.count += 1
return root, True
elif right_node and right_node.val == root.val:
self.count += 1
return root, True
elif right_node and left_node and left_node.val == root.val and right_node.val == root.val:
self.count += 1
return root, True
return root, False
| 22.436782
| 103
| 0.518955
| 1,116
| 0.571721
| 0
| 0
| 0
| 0
| 0
| 0
| 974
| 0.498975
|
3cbd5fce78146aae7cbddda0c039ec527c342db9
| 5,752
|
py
|
Python
|
apis.py
|
teemuja/ndp_app3
|
8a9517b2e2385640dc1a2c1baf0ae07cf630c89c
|
[
"MIT"
] | null | null | null |
apis.py
|
teemuja/ndp_app3
|
8a9517b2e2385640dc1a2c1baf0ae07cf630c89c
|
[
"MIT"
] | null | null | null |
apis.py
|
teemuja/ndp_app3
|
8a9517b2e2385640dc1a2c1baf0ae07cf630c89c
|
[
"MIT"
] | null | null | null |
# apis for ndp_d3
from owslib.wfs import WebFeatureService
import pandas as pd
import geopandas as gpd
import momepy
import streamlit as st
@st.cache(allow_output_mutation=True)
def pno_data(kunta,vuosi=2021):
url = 'http://geo.stat.fi/geoserver/postialue/wfs' # vaestoruutu tai postialue
wfs = WebFeatureService(url=url, version="2.0.0")
layer = f'postialue:pno_tilasto_{vuosi}'
data_ = wfs.getfeature(typename=layer, outputFormat='json') # propertyname=['kunta'],
gdf_all = gpd.read_file(data_)
noneed = ['id', 'euref_x', 'euref_y', 'pinta_ala']
paavodata = gdf_all.drop(columns=noneed)
kuntakoodit = pd.read_csv('config/kunta_dict.csv', index_col=False, header=0).astype(str)
kuntakoodit['koodi'] = kuntakoodit['koodi'].str.zfill(3)
kunta_dict = pd.Series(kuntakoodit.kunta.values, index=kuntakoodit.koodi).to_dict()
paavodata = paavodata.replace({'kunta':kunta_dict})
dict_feat = pd.read_csv('config/paavo2021_dict.csv', skipinitialspace=True, header=None, index_col=0,squeeze=True).to_dict()
selkopaavo = paavodata.rename(columns=dict_feat).sort_values('Kunta')
pno_valinta = selkopaavo[selkopaavo['Kunta'] == kunta].sort_values('Asukkaat yhteensä', ascending=False)
return pno_valinta
@st.cache(allow_output_mutation=True)
def hri_data(pno):
def make_bbox(pno, point_crs='4326', projected_crs='3857'): # 3879
poly = gpd.GeoSeries(pno.geometry)
b = poly.to_crs(epsg=projected_crs)
b = b.buffer(100)
bbox = b.to_crs(epsg=point_crs).bounds
bbox = bbox.reset_index(drop=True)
bbox_tuple = bbox['minx'][0], bbox['miny'][0], bbox['maxx'][0], bbox['maxy'][0]
return bbox_tuple
bbox = make_bbox(pno) + tuple(['urn:ogc:def:crs:EPSG::4326'])
url = 'https://kartta.hsy.fi/geoserver/wfs'
wfs = WebFeatureService(url=url, version="2.0.0")
layer = 'ilmasto_ja_energia:rakennukset'
data = wfs.getfeature(typename=layer, bbox=bbox, outputFormat='json')
gdf = gpd.read_file(data)
# columns to keep
columns = ['kuntanimi', 'valm_v', 'kerrosala', 'kerrosluku', 'kayt_luok', 'kayttark', 'geometry']
# overlay with pno area & use only columns
gdf_pno = pno.to_crs(3067).overlay(gdf.to_crs(3067), how='intersection')[columns]#.to_crs(4326)
gdf_pno.rename(columns={'valm_v': 'rakennusvuosi',
'kayt_luok': 'rakennustyyppi',
'kayttark': 'tarkenne',
}, inplace=True)
gdf_out = gdf_pno.to_crs(epsg=4326)
return gdf_out
@st.cache(allow_output_mutation=True)
def densities(buildings):
# projected crs for momepy calculations & prepare for housing
gdf_ = buildings.to_crs(3857)
# check kerrosala data and use footprint if nan/zero
gdf_['kerrosala'] = pd.to_numeric(gdf_['kerrosala'], errors='coerce', downcast='float')
gdf_['kerrosala'].fillna(gdf_.area, inplace=True)
gdf_.loc[gdf_['kerrosala'] == 0, 'kerrosala'] = gdf_.area
# add footprint area
gdf_['rakennusala'] = gdf_.area
#gdf_.loc[:,gdf_['rakennusala']] = gdf_.area
# exlude some utility building types
no_list = ['Muut rakennukset','Palo- ja pelastustoimen rakennukset','Varastorakennukset']
yes_serie = ~gdf_.rakennustyyppi.isin(no_list)
gdf = gdf_[yes_serie]
# prepare momoepy..
gdf['uID'] = momepy.unique_id(gdf)
limit = momepy.buffered_limit(gdf)
tessellation = momepy.Tessellation(gdf, unique_id='uID', limit=limit).tessellation
# calculate GSI = ground space index = coverage = CAR = coverage area ratio
tess_GSI = momepy.AreaRatio(tessellation, gdf,
momepy.Area(tessellation).series,
momepy.Area(gdf).series, 'uID')
gdf['GSI'] = round(tess_GSI.series,3)
# calculate FSI = floor space index = FAR = floor area ratio
gdf['FSI'] = round(gdf['kerrosala'] / momepy.Area(tessellation).series,3)
# calculate OSR = open space ratio = spaciousness
gdf['OSR'] = round((1 - gdf['GSI']) / gdf['FSI'],3)
# ND calculations
# queen contiguity for 2 degree neighbours = "perceived neighborhood"
tessellation = tessellation.merge(gdf[['uID','rakennusala','kerrosala','OSR']]) # add selected values from buildings to tess-areas
sw = momepy.sw_high(k=2, gdf=tessellation, ids='uID') # degree of nd
gdf['GSI_ND'] = round(momepy.Density(tessellation, values='rakennusala', spatial_weights=sw, unique_id='uID').series, 2)
gdf['FSI_ND'] = round(momepy.Density(tessellation, values='kerrosala', spatial_weights=sw, unique_id='uID').series, 2)
gdf['OSR_ND'] = round((1 - gdf['GSI_ND']) / gdf['FSI_ND'], 2)
gdf['OSR_ND_mean'] = round(momepy.AverageCharacter(tessellation, values='OSR', spatial_weights=sw, unique_id='uID').mean,2)
# remove infinite values of osr if needed..
gdf['OSR_ND'].clip(upper=gdf['OSR'].quantile(0.99), inplace=True)
gdf['OSR_ND_mean'].clip(upper=gdf['OSR'].quantile(0.99), inplace=True)
gdf_out = gdf.to_crs(4326)
return gdf_out
@st.cache(allow_output_mutation=True)
def tess_boundaries(buildings):
# projected crs for momepy calculations & prepare for housing
gdf_ = buildings.to_crs(3857)
gdf_['kerrosala'] = pd.to_numeric(gdf_['kerrosala'], errors='coerce', downcast='float')
gdf_['kerrosala'].fillna(gdf_.area, inplace=True)
no_list = ['Muut rakennukset','Palo- ja pelastustoimen rakennukset','Varastorakennukset']
yes_serie = ~gdf_.rakennustyyppi.isin(no_list) # exclude some types
gdf = gdf_[yes_serie]
gdf['uID'] = momepy.unique_id(gdf)
limit = momepy.buffered_limit(gdf)
tessellation = momepy.Tessellation(gdf, unique_id='uID', limit=limit).tessellation
return tessellation.to_crs(4326)
| 52.770642
| 134
| 0.685327
| 0
| 0
| 0
| 0
| 5,605
| 0.974274
| 0
| 0
| 1,878
| 0.326438
|
3cbec5b44846435b33e0ef20ab76a5f6a4ef6c68
| 6,471
|
py
|
Python
|
test-suite/unit-testing/PortageLive.soap/tests/testIncrAddSentence.py
|
nrc-cnrc/Portage-SMT-TAS
|
73f5a65de4adfa13008ea9a01758385c97526059
|
[
"MIT"
] | null | null | null |
test-suite/unit-testing/PortageLive.soap/tests/testIncrAddSentence.py
|
nrc-cnrc/Portage-SMT-TAS
|
73f5a65de4adfa13008ea9a01758385c97526059
|
[
"MIT"
] | null | null | null |
test-suite/unit-testing/PortageLive.soap/tests/testIncrAddSentence.py
|
nrc-cnrc/Portage-SMT-TAS
|
73f5a65de4adfa13008ea9a01758385c97526059
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# vim:expandtab:ts=3:sw=3
# @file testIncrStatus.py
# @brief Test SOAP calls to incrAddSentence using a deployed PortageLive web server.
#
# @author Samuel Larkin
#
# Traitement multilingue de textes / Multilingual Text Processing
# Tech. de l'information et des communications / Information and Communications Tech.
# Conseil national de recherches Canada / National Research Council Canada
# Copyright 2016, Sa Majeste la Reine du Chef du Canada /
# Copyright 2016, Her Majesty in Right of Canada
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
#import zeep
#client = zeep.Client(wsdl=url)
from suds.cache import DocumentCache
from suds.client import Client
from suds import WebFault
import unittest
import logging
import requests
import time
import random
import os
import sys
import shutil
logging.basicConfig(level=logging.CRITICAL)
# If you need to debug what is happening, uncomment the following line
#logging.basicConfig(level=logging.DEBUG)
url = 'http://127.0.0.1'
class TestIncrAddSentence(unittest.TestCase):
"""
Using PortageLiveAPI's WSDL deployed on a web server, we test SOAP calls to
incrAddSentence().
"""
def __init__(self, *args, **kwargs):
super(TestIncrAddSentence, self).__init__(*args, **kwargs)
DocumentCache().clear()
self.url = url + ':' + os.getenv('PHP_PORT', 8756)
self.WSDL = self.url + '/PortageLiveAPI.wsdl'
self.client = Client(self.WSDL)
self.context = 'unittest.rev.en-fr'
self.document_model_id = 'PORTAGE_UNITTEST_4da35'
self.source_sentence = "'home'"
self.target_sentence = '"maison"'
self.document_model_dir = os.path.join("doc_root", "plive",
"DOCUMENT_MODEL_" + self.context + '_' + self.document_model_id)
if (os.path.isdir(self.document_model_dir)):
shutil.rmtree(self.document_model_dir)
def test_01_no_argument(self):
"""
incrAddSentence() should warn the user that it needs some parameters.
"""
with self.assertRaises(WebFault) as cm:
self.client.service.incrAddSentence()
self.assertEqual(cm.exception.message, "Server raised fault: 'Missing parameter'")
def test_02_all_arguments_null(self):
"""
incrAddSentence() expects 3 arguments that cannot be None/NULL.
"""
with self.assertRaises(WebFault) as cm:
self.client.service.incrAddSentence(None, None, None, None, None)
self.assertEqual(cm.exception.message, "Server raised fault: 'Missing parameter'")
def test_03_no_document_model_id(self):
"""
It is invalid to use the empty string as document level model ID.
"""
with self.assertRaises(WebFault) as cm:
self.client.service.incrAddSentence(self.context, '', '', '')
self.assertEqual(cm.exception.message,
"Server raised fault: 'You must provide a valid document_model_id.'")
def test_04_no_source_sentence(self):
"""
The source sentence cannot be empty.
"""
with self.assertRaises(WebFault) as cm:
self.client.service.incrAddSentence(self.context,
self.document_model_id, '', '')
self.assertEqual(cm.exception.message,
"Server raised fault: 'You must provide a source sentence.'")
def test_05_no_target_sentence(self):
"""
The target sentence cannot be empty.
"""
with self.assertRaises(WebFault) as cm:
self.client.service.incrAddSentence(self.context,
self.document_model_id,
self.source_sentence, '')
self.assertEqual(cm.exception.message,
"Server raised fault: 'You must provide a target sentence.'")
@unittest.skip("Should we check for too many parameters?")
def test_06_too_many_parameters(self):
"""
TODO: Should we get some sort of message if we provide an invalid number
of arguments
"""
with self.assertRaises(WebFault) as cm:
self.client.service.incrAddSentence(self.context,
self.document_model_id,
self.source_sentence,
self.target_sentence,
'extra_dummy_argument')
self.assertEqual(cm.exception.message,
"Server raised fault: 'You must provide a target sentence.'")
def test_07_basic_valid_usage(self):
"""
This tests a valid call to incrAddSentence() where
document_model_id is valid, source sentence is valid and target
sentence is also valid.
- The SOAP call should return true since it's supposed to be able to add
this sentence pair to the queue.
- The training phase should have inserted the sentence pair in the
corpora.
"""
UID = str(random.randint(0, 100000))
source = self.source_sentence + str(time.time()) + UID
target = self.target_sentence + str(time.time()) + UID
result = self.client.service.incrAddSentence(self.context,
self.document_model_id,
source, target)
self.assertEqual(result, True, 'SOAP call failed to add a sentence pair')
r = requests.get(self.url + '/plive/DOCUMENT_MODEL_' + self.context + '_' + self.document_model_id + '/corpora')
self.assertEqual(r.status_code, 200,
"Failed to fetch the corpora file for: " + self.document_model_id)
ref_sentence_pair = '\t'.join((source, target))
sentence_pairs = tuple(l.split('\t', 1)[-1] for l in r.text.split('\n'))
self.assertEqual(sentence_pairs.count(ref_sentence_pair), 1,
"Expected exactly one occurrence of our sentence pair in corpora.")
# Let incremental training finish.
time.sleep(3);
with open(os.path.join(self.document_model_dir, "incr-update.status"), "r") as sf:
status = sf.read().strip()
self.assertEqual(status, '0',
"0 exit status for incr-update.sh not found in incr-update.status.")
if __name__ == '__main__':
unittest.main()
| 36.767045
| 118
| 0.637923
| 5,310
| 0.820584
| 0
| 0
| 726
| 0.112193
| 0
| 0
| 2,527
| 0.390512
|
3cbf25669395a89790375a19545ba5be63026880
| 1,919
|
py
|
Python
|
Cryptography/Caesar_Cipher.py
|
hari40009/learnpython
|
b75e700f62f49ab9d8fef607ebd87a34c5cb6530
|
[
"MIT"
] | 1
|
2018-11-07T04:13:52.000Z
|
2018-11-07T04:13:52.000Z
|
Cryptography/Caesar_Cipher.py
|
engineerprogrammer/learnpython
|
140acfd8fc6345745a9b274baaa1e58ea3217f9f
|
[
"MIT"
] | null | null | null |
Cryptography/Caesar_Cipher.py
|
engineerprogrammer/learnpython
|
140acfd8fc6345745a9b274baaa1e58ea3217f9f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
""" A program to use a Caesar cipher based on user input for the shift value """
MAX_SHIFT = 26
def whatMode():
""" Finds out what the user wants to do """
while True:
print("Do you wish to encrypt, decrypt or brute force a message: ")
mode = input().lower()
if mode in "encrypt e decrypt d brute b".split():
return mode[0]
else:
print("Enter '[E]ncrypt', '[D]ecrypt' or [B]rute")
def plainMessage():
""" Gets a string from the user """
print ("Message: ")
return input()
def getKey():
""" Gets a shift value from the user """
shiftKey = 0
while True:
print("Enter shift key (1-%s) " % (MAX_SHIFT))
shiftKey = int(input())
if (shiftKey >= 1 and shiftKey <= MAX_SHIFT):
return shiftKey
def cryptMessage(mode, message, shiftKey):
""" The encryption / decryption action is here """
if mode[0] == 'd':
shiftKey = -shiftKey
translated = ''
for symbol in message: # The encryption stuff
if symbol.isalpha():
num = ord(symbol)
num += shiftKey
if symbol.isupper():
if num > ord('Z'):
num -= 26
elif num < ord('A'):
num += 26
elif symbol.islower():
if num > ord('z'):
num -= 26
elif num < ord('a'):
num += 26
translated += chr(num)
else:
translated += symbol
return translated
mode = whatMode()
message = plainMessage()
if mode[0] != 'b':
shiftKey = getKey()
print('Your translated text is:')
if mode[0] != 'b': #Brute force settings
print(cryptMessage(mode, message, shiftKey))
else:
for shiftKey in range(1, MAX_SHIFT + 1):
print(shiftKey, cryptMessage('decrypt', message, shiftKey))
| 27.028169
| 80
| 0.532569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 541
| 0.281918
|
3cc3cc243655d3b808c34d010f7d4b9e190e610a
| 494
|
py
|
Python
|
leetcode/python/medium/p046_permute.py
|
kefirzhang/algorithms
|
549e68731d4c05002e35f0499d4f7744f5c63979
|
[
"Apache-2.0"
] | null | null | null |
leetcode/python/medium/p046_permute.py
|
kefirzhang/algorithms
|
549e68731d4c05002e35f0499d4f7744f5c63979
|
[
"Apache-2.0"
] | null | null | null |
leetcode/python/medium/p046_permute.py
|
kefirzhang/algorithms
|
549e68731d4c05002e35f0499d4f7744f5c63979
|
[
"Apache-2.0"
] | null | null | null |
class Solution:
def __init__(self):
self.res = []
def permute(self, nums):
self.backTrack(nums, [])
return self.res
def backTrack(self, nums, track):
if len(nums) == len(track):
self.res.append(track[:])
return
for i in nums:
if i in track:
continue
track.append(i)
self.backTrack(nums, track)
track.remove(i)
slu = Solution()
print(slu.permute([1]))
| 22.454545
| 39
| 0.506073
| 450
| 0.910931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3cc66fc74313d0ecd7ced030e26d629577fa26a1
| 74
|
py
|
Python
|
Level1/count_p_and_y.py
|
chae-heechan/Programmers_Python_Algorithm_Study
|
c61af0b1b97d790e2332581eb0b7da42c3e510fa
|
[
"MIT"
] | null | null | null |
Level1/count_p_and_y.py
|
chae-heechan/Programmers_Python_Algorithm_Study
|
c61af0b1b97d790e2332581eb0b7da42c3e510fa
|
[
"MIT"
] | null | null | null |
Level1/count_p_and_y.py
|
chae-heechan/Programmers_Python_Algorithm_Study
|
c61af0b1b97d790e2332581eb0b7da42c3e510fa
|
[
"MIT"
] | null | null | null |
def solution(s):
return (s.lower().count('p') == s.lower().count('y'))
| 37
| 57
| 0.567568
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.081081
|
3cc75769cc0430a3c58ed37733ff77e1117674ee
| 83
|
py
|
Python
|
bemy/apps.py
|
foropolo/profiles-rest-api
|
f35cbb5727204bf4419c6b0a9797d7c624773219
|
[
"MIT"
] | null | null | null |
bemy/apps.py
|
foropolo/profiles-rest-api
|
f35cbb5727204bf4419c6b0a9797d7c624773219
|
[
"MIT"
] | 6
|
2019-12-05T00:35:40.000Z
|
2022-02-10T08:29:56.000Z
|
bemy/apps.py
|
foropolo/profiles-rest-api
|
f35cbb5727204bf4419c6b0a9797d7c624773219
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class BemyConfig(AppConfig):
name = 'bemy'
| 13.833333
| 33
| 0.73494
| 46
| 0.554217
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.072289
|
3cc9578bf937313ea3ce810099e43cb50d90651a
| 634
|
py
|
Python
|
ribosome/compute/ribosome.py
|
tek/ribosome-py
|
8bd22e549ddff1ee893d6e3a0bfba123a09e96c6
|
[
"MIT"
] | null | null | null |
ribosome/compute/ribosome.py
|
tek/ribosome-py
|
8bd22e549ddff1ee893d6e3a0bfba123a09e96c6
|
[
"MIT"
] | null | null | null |
ribosome/compute/ribosome.py
|
tek/ribosome-py
|
8bd22e549ddff1ee893d6e3a0bfba123a09e96c6
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import Generic, TypeVar, Type
from lenses import UnboundLens
from amino import Dat
from ribosome.data.plugin_state import PluginState
D = TypeVar('D')
CC = TypeVar('CC')
C = TypeVar('C')
class Ribosome(Generic[D, CC, C], Dat['Ribosome[D, CC, C]']):
def __init__(
self,
state: PluginState[D, CC],
comp_type: Type[C],
comp_lens: UnboundLens['Ribosome[D, CC, C]', 'Ribosome[D, CC, C]', C, C],
) -> None:
self.state = state
self.comp_type = comp_type
self.comp_lens = comp_lens
__all__ = ('Ribosome',)
| 21.862069
| 85
| 0.621451
| 367
| 0.578864
| 0
| 0
| 0
| 0
| 0
| 0
| 80
| 0.126183
|
3cc96d6bfddb10586b88d9ad0d7b86bd5ca4e9aa
| 1,431
|
py
|
Python
|
pythonstartup.py
|
avisilver/util_scripts
|
ffe4ee4b7a7b907b7d93bef5ec96151d2cbf8508
|
[
"MIT"
] | null | null | null |
pythonstartup.py
|
avisilver/util_scripts
|
ffe4ee4b7a7b907b7d93bef5ec96151d2cbf8508
|
[
"MIT"
] | null | null | null |
pythonstartup.py
|
avisilver/util_scripts
|
ffe4ee4b7a7b907b7d93bef5ec96151d2cbf8508
|
[
"MIT"
] | null | null | null |
# Add auto-completion and a stored history file of commands to your Python
# interactive interpreter. Requires Python 2.0+, readline. Autocomplete is
# bound to the Esc key by default (you can change it - see readline docs).
#
# Store the file in ~/.pystartup, and set an environment variable to point
# to it: "export PYTHONSTARTUP=/home/user/.pystartup" in bash.
#
# Note that PYTHONSTARTUP does *not* expand "~", so you have to put in the
# full path to your home directory.
import atexit
import os
import readline
import rlcompleter
historyPath = os.path.expanduser("~/.pyhistory")
def save_history(historyPath=historyPath):
import readline
readline.write_history_file(historyPath)
if os.path.exists(historyPath):
readline.read_history_file(historyPath)
atexit.register(save_history)
readline.parse_and_bind('tab: complete')
del os, atexit, readline, rlcompleter, save_history, historyPath
def dirp(object_or_module):
"""dirp(object_or_module) -> string
Print the object's or currently imported module's attributes as shown
in dir() on separate lines with docstrings"""
for attr in dir(object_or_module):
doc = object_or_module.__getattribute__(attr).__doc__
doc = doc if doc else ""
indented_doc = "\n".join(doc.split("\n"))
print ("\n{line}\n{attr}\n{doc}".format(
line="-"*80,
attr=attr,
doc=indented_doc
))
| 31.108696
| 74
| 0.709294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 698
| 0.487771
|
3cc9c9c6db3e02d74038abeb59341c1138d3a879
| 70
|
py
|
Python
|
saleor/plugins/category/notify_event.py
|
hoangtuananh97/saleor
|
94ad493ef61302fb458822868fc2b4a884ec2065
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/plugins/category/notify_event.py
|
hoangtuananh97/saleor
|
94ad493ef61302fb458822868fc2b4a884ec2065
|
[
"CC-BY-4.0"
] | 4
|
2021-09-06T03:55:32.000Z
|
2021-10-15T08:47:58.000Z
|
saleor/plugins/category/notify_event.py
|
hoangtuananh97/saleor
|
94ad493ef61302fb458822868fc2b4a884ec2065
|
[
"CC-BY-4.0"
] | null | null | null |
def send_category_notify():
print("Plugin Category when created")
| 23.333333
| 41
| 0.757143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 30
| 0.428571
|
3ccccac3d5c3d7c8d168081f420c8dfcbee68843
| 761
|
py
|
Python
|
NhMedicalSite/panel/models.py
|
Dogruyer/ecommerce
|
aa505b401e42882a96e6ef6375bd1a1ed95c5b85
|
[
"Apache-2.0"
] | null | null | null |
NhMedicalSite/panel/models.py
|
Dogruyer/ecommerce
|
aa505b401e42882a96e6ef6375bd1a1ed95c5b85
|
[
"Apache-2.0"
] | null | null | null |
NhMedicalSite/panel/models.py
|
Dogruyer/ecommerce
|
aa505b401e42882a96e6ef6375bd1a1ed95c5b85
|
[
"Apache-2.0"
] | 1
|
2018-11-01T11:10:58.000Z
|
2018-11-01T11:10:58.000Z
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Category(models.Model):
category_name=models.CharField(max_length=100)
class Products(models.Model):
title=models.CharField(max_length=100)
content = models.TextField(max_length=300)
product_image=models.ImageField(upload_to='images/products/',
default='images/products/default.jpg')
product_price=models.DecimalField(max_digits=6,decimal_places=4)
category_id=models.ForeignKey(Category)
class User:
name=models.CharField(max_length=50)
lastname=models.CharField(max_length=50)
email=models.EmailField(max_length=100)
password=models.CharField(max_length=20)
| 21.138889
| 75
| 0.730618
| 644
| 0.846255
| 0
| 0
| 0
| 0
| 0
| 0
| 73
| 0.095926
|
3cccd58c207124db8b9a503a7ea72e1986e27cb3
| 459
|
py
|
Python
|
voxelcut/initial.py
|
JohnyEngine/CNC
|
e4c77250ab2b749d3014022cbb5eb9924e939993
|
[
"Apache-2.0"
] | null | null | null |
voxelcut/initial.py
|
JohnyEngine/CNC
|
e4c77250ab2b749d3014022cbb5eb9924e939993
|
[
"Apache-2.0"
] | null | null | null |
voxelcut/initial.py
|
JohnyEngine/CNC
|
e4c77250ab2b749d3014022cbb5eb9924e939993
|
[
"Apache-2.0"
] | null | null | null |
toolpath.coords = Coords(-100, -100, -5, 100, 100, 5)
voxelcut.set_current_color(12566512)
toolpath.coords.add_block(0.150768, 0, -5, 9.69846, 9.84808, 10)
GRAY = 0x505050
RED = 0x600000
BLUE = 0x000050
toolpath.tools[2] = Tool([[Span(Point(3, 0), Vertex(0, Point(3, 20), Point(0, 0)), False), GRAY], [Span(Point(3, 20), Vertex(0, Point(3, 40), Point(0, 0)), False), RED]])
#toolpath.load('C:/Users/Dan/AppData/Local/Temp/test.tap')
toolpath.load('test.tap')
| 45.9
| 170
| 0.681917
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 68
| 0.148148
|
3ccda61294b042b9301d3115e54f9eaad129e0a8
| 2,200
|
py
|
Python
|
core/cliqueIntersectionGraph.py
|
ongmingyang/some-max-cut
|
7ebabd06d3e46789a3672bd516adc48953ba135e
|
[
"MIT"
] | 3
|
2018-03-16T17:25:23.000Z
|
2021-04-27T21:42:31.000Z
|
core/cliqueIntersectionGraph.py
|
ongmingyang/some-max-cut
|
7ebabd06d3e46789a3672bd516adc48953ba135e
|
[
"MIT"
] | null | null | null |
core/cliqueIntersectionGraph.py
|
ongmingyang/some-max-cut
|
7ebabd06d3e46789a3672bd516adc48953ba135e
|
[
"MIT"
] | null | null | null |
import sys
from clique import Clique
from cvxopt import spmatrix, amd
from collections import defaultdict as dd
import chompack as cp
from util.graph import Graph
LARGEST_CLIQUE_SIZE = 24
#
# A CliqueIntersectionGraph is a graph (V,E), where V is a set of cliques, each
# bag containing a clique, and (i,j) in E if clique i and clique j have a non
# empty sepset
#
# @param I,J,W (I[i],J[i]) is an edge in the original graph with weight
# W[i]. We require I > J
#
class CliqueIntersectionGraph(Graph):
def __init__(self, I, J, W):
Graph.__init__(self)
self.cliques = self.nodes # We use a different alias to prevent confusion
n = max(max(I),max(J))+1
eye = spmatrix(1, range(n), range(n))
A = spmatrix(W, I, J, (n,n)) + eye
self.n = n
# Compute symbolic factorization using AMD ordering
# This automatically does a chordal completion on the graph
symb = cp.symbolic(A, p=amd.order)
# The factorization permutes the node indices, we need to unpermute these
cliques = symb.cliques()
perm = symb.p
cliques = [[perm[i] for i in clique] for clique in cliques]
# If the largest clique is above threshold, we terminate the algorithm
self.max_clique_size = max(len(x) for x in cliques)
if self.max_clique_size > LARGEST_CLIQUE_SIZE:
sys.exit('''
Chordal completion has clique of size %d,
Max allowed size is %d,
Program terminating...
''' % (self.max_clique_size, LARGEST_CLIQUE_SIZE))
node_to_clique = dd(list)
# Instantiate cliques and fill node_to_clique entries
for index, nodes in enumerate(cliques):
clique = Clique(index, nodes, A)
for node in nodes:
node_to_clique[node].append(clique)
self.cliques.append(clique)
# Update list of neighbours after node_to_clique entries are filled
for clique in self.cliques:
for node in clique.nodes:
neighbours = list(node_to_clique[node])
neighbours.remove(clique)
# Add edge to edgeset
for neighbour in neighbours:
edge = tuple(sorted([neighbour.index, clique.index]))
self.edges[edge] = clique.determine_sepset_size(neighbour)
| 33.333333
| 79
| 0.678636
| 1,709
| 0.776818
| 0
| 0
| 0
| 0
| 0
| 0
| 852
| 0.387273
|
3ccdd8c975b584a486aac3e7fbb9b1d2ae39487f
| 4,586
|
py
|
Python
|
backend/src/baserow/contrib/database/airtable/tasks.py
|
ashishdhngr/baserow
|
b098678d2165eb7c42930ee24dc6753a3cb520c3
|
[
"MIT"
] | null | null | null |
backend/src/baserow/contrib/database/airtable/tasks.py
|
ashishdhngr/baserow
|
b098678d2165eb7c42930ee24dc6753a3cb520c3
|
[
"MIT"
] | null | null | null |
backend/src/baserow/contrib/database/airtable/tasks.py
|
ashishdhngr/baserow
|
b098678d2165eb7c42930ee24dc6753a3cb520c3
|
[
"MIT"
] | null | null | null |
import logging
from django.conf import settings
from baserow.config.celery import app
logger = logging.getLogger(__name__)
@app.task(
bind=True,
queue="export",
soft_time_limit=settings.BASEROW_AIRTABLE_IMPORT_SOFT_TIME_LIMIT,
)
def run_import_from_airtable(self, job_id: int):
"""
Starts the Airtable import job. This task must run after the job has been created.
:param job_id: The id related to the job that must be started.
"""
from celery.exceptions import SoftTimeLimitExceeded
from pytz import timezone as pytz_timezone
from requests.exceptions import RequestException
from django.db import transaction
from django.core.cache import cache
from baserow.core.signals import application_created
from baserow.core.utils import Progress
from baserow.contrib.database.airtable.models import AirtableImportJob
from baserow.contrib.database.airtable.handler import AirtableHandler
from baserow.contrib.database.airtable.exceptions import AirtableBaseNotPublic
from baserow.contrib.database.airtable.constants import (
AIRTABLE_EXPORT_JOB_DOWNLOADING_FAILED,
AIRTABLE_EXPORT_JOB_DOWNLOADING_FINISHED,
)
from .cache import airtable_import_job_progress_key
job = AirtableImportJob.objects.select_related("group").get(id=job_id)
def progress_updated(percentage, state):
"""
Every time the progress of the import changes, this callback function is
called. If the percentage or the state has changed, the job will be updated.
"""
nonlocal job
if job.progress_percentage != percentage:
job.progress_percentage = percentage
changed = True
if state is not None and job.state != state:
job.state = state
changed = True
if changed:
# The progress must also be stored in the Redis cache. Because we're
# currently in a transaction, other database connections don't know about
# the progress and this way, we can still communite it to the user.
cache.set(
airtable_import_job_progress_key(job.id),
{"progress_percentage": job.progress_percentage, "state": job.state},
timeout=None,
)
job.save()
progress = Progress(100)
progress.register_updated_event(progress_updated)
kwargs = {}
if job.timezone is not None:
kwargs["timezone"] = pytz_timezone(job.timezone)
try:
with transaction.atomic():
databases, id_mapping = AirtableHandler.import_from_airtable_to_group(
job.group,
job.airtable_share_id,
progress_builder=progress.create_child_builder(
represents_progress=progress.total
),
**kwargs
)
# The web-frontend needs to know about the newly created database, so we
# call the application_created signal.
for database in databases:
application_created.send(self, application=database, user=None)
job.state = AIRTABLE_EXPORT_JOB_DOWNLOADING_FINISHED
job.database = databases[0]
# Don't override the other properties that have been set during the
# progress update.
job.save(update_fields=("state", "database"))
except Exception as e:
exception_mapping = {
SoftTimeLimitExceeded: "The import job took too long and was timed out.",
RequestException: "The Airtable server could not be reached.",
AirtableBaseNotPublic: "The Airtable base is not publicly shared.",
}
error = "Something went wrong while importing the Airtable base."
for exception, error_message in exception_mapping.items():
if isinstance(e, exception):
error = error_message
break
logger.error(e)
job.state = AIRTABLE_EXPORT_JOB_DOWNLOADING_FAILED
job.error = str(e)
job.human_readable_error = error
# Don't override the other properties that have been set during the
# progress update.
job.save(
update_fields=(
"state",
"error",
"human_readable_error",
)
)
# Delete the import job cached entry because the transaction has been committed
# and the AirtableImportJob entry now contains the latest data.
cache.delete(airtable_import_job_progress_key(job.id))
| 35.276923
| 86
| 0.657872
| 0
| 0
| 0
| 0
| 4,456
| 0.971653
| 0
| 0
| 1,275
| 0.27802
|
3cd0a4bbec748d6e33fb26e96ae01249982c0522
| 7,439
|
py
|
Python
|
d2lbook/notebook.py
|
naoufelito/d2l-book
|
bb281e1640aaf039b4d2d69bb9c8d6334a7cb98a
|
[
"Apache-2.0"
] | null | null | null |
d2lbook/notebook.py
|
naoufelito/d2l-book
|
bb281e1640aaf039b4d2d69bb9c8d6334a7cb98a
|
[
"Apache-2.0"
] | 1
|
2020-06-06T06:34:03.000Z
|
2020-06-06T07:01:56.000Z
|
d2lbook/notebook.py
|
naoufelito/d2l-book
|
bb281e1640aaf039b4d2d69bb9c8d6334a7cb98a
|
[
"Apache-2.0"
] | null | null | null |
"""utilities to handle notebooks"""
from typing import Union, List, Optional
import copy
import notedown
import nbformat
import nbconvert
from nbformat import notebooknode
from d2lbook import markdown
from d2lbook import common
def create_new_notebook(nb: notebooknode.NotebookNode,
cells: List[notebooknode.NotebookNode]
) -> notebooknode.NotebookNode:
"""create an empty notebook by copying metadata from nb"""
new_nb = copy.deepcopy(nb)
new_nb.cells = cells
return new_nb
def read_markdown(source: Union[str, List[str]]) -> notebooknode.NotebookNode:
"""Returns a notebook from markdown source"""
if not isinstance(source, str):
source = '\n'.join(source)
reader = notedown.MarkdownReader(match='strict')
return reader.reads(source)
def split_markdown_cell(nb: notebooknode.NotebookNode) -> notebooknode.NotebookNode:
"""split a markdown cell if it contains tab block.
a new property `class` is added to the metadata for a tab cell.
"""
# merge continous markdown cells
grouped_cells = common.group_list(nb.cells,
lambda cell, _: cell.cell_type=='markdown')
new_cells = []
for is_md, group in grouped_cells:
if not is_md:
new_cells.extend(group)
else:
src = '\n\n'.join(cell.source for cell in group)
md_cells = markdown.split_markdown(src)
is_tab_cell = lambda cell, _: cell['type']=='markdown' and 'class' in cell
grouped_md_cells = common.group_list(md_cells, is_tab_cell)
for is_tab, md_group in grouped_md_cells:
new_cell = nbformat.v4.new_markdown_cell(
markdown.join_markdown_cells(md_group))
if is_tab:
tab = md_group[0]['class']
assert tab.startswith('`') and tab.endswith('`'), tab
new_cell.metadata['tab'] = tab[1:-1]
new_cells.append(new_cell)
new_cells = [cell for cell in new_cells if cell.source]
return create_new_notebook(nb, new_cells)
def _get_cell_tab(cell: notebooknode.NotebookNode, default_tab: str='') -> Optional[str]:
"""Get the cell tab"""
if 'tab' in cell.metadata:
return cell.metadata['tab']
if cell.cell_type != 'code':
return None
match = common.source_tab_pattern.search(cell.source)
if match:
return match[1]
return default_tab
def get_tab_notebook(nb: notebooknode.NotebookNode, tab: str, default_tab: str
) -> notebooknode.NotebookNode:
"""Returns a notebook with code/markdown cells that doesn't match tab
removed.
Return None if no cell matched the tab and nb contains code blocks.
A `origin_pos` property is added to the metadata for each cell, which
records its position in the original notebook `nb`.
"""
matched_tab = False
new_cells = []
for i, cell in enumerate(nb.cells):
new_cell = copy.deepcopy(cell)
new_cell.metadata['origin_pos'] = i
cell_tab = _get_cell_tab(new_cell, default_tab)
if not cell_tab:
new_cells.append(new_cell)
else:
if cell_tab == tab:
new_cell.metadata['tab'] = cell_tab
matched_tab = True
# remove the tab from source
lines = new_cell.source.split('\n')
for j, line in enumerate(lines):
src_tab = common.source_tab_pattern.search(line)
text_tab = common.md_mark_pattern.search(line)
if src_tab or (text_tab and (
text_tab[1]=='begin_tab' or text_tab[1]=='end_tab')):
del lines[j]
new_cell.source = '\n'.join(lines)
new_cells.append(new_cell)
if not matched_tab and any([cell.cell_type=='code' for cell in nb.cells]):
return None
return create_new_notebook(nb, new_cells)
def merge_tab_notebooks(src_notebooks: List[notebooknode.NotebookNode]
) -> notebooknode.NotebookNode:
"""Merge the tab notebooks into a single one.
The reserved function of get_tab_notebook.
"""
n = max([max([cell.metadata['origin_pos'] for cell in nb.cells])
for nb in src_notebooks])
new_cells = [None] * (n+1)
for nb in src_notebooks:
for cell in nb.cells:
new_cells[cell.metadata['origin_pos']] = copy.deepcopy(cell)
return create_new_notebook(src_notebooks[0], new_cells)
def _get_tab_bar(tabs, tab_id, default_tab, div_class=''):
code = f"```eval_rst\n\n.. raw:: html\n\n <div class=\"mdl-tabs mdl-js-tabs mdl-js-ripple-effect\"><div class=\"mdl-tabs__tab-bar {div_class}\">"
for i, tab in enumerate(tabs):
active = 'is-active' if tab == default_tab else ''
code +=f'<a href="#{tab}-{tab_id}-{i}" class="mdl-tabs__tab {active}">{tab}</a>'
code += "</div>\n```"
return nbformat.v4.new_markdown_cell(code)
def _get_tab_panel(cells, tab, tab_id, default_tab):
active = 'is-active' if tab == default_tab else ''
tab_panel_begin = nbformat.v4.new_markdown_cell(
f"```eval_rst\n.. raw:: html\n\n <div class=\"mdl-tabs__panel {active}\" id=\"{tab}-{tab_id}\">\n```")
tab_panel_end = nbformat.v4.new_markdown_cell(
"```eval_rst\n.. raw:: html\n\n </div>\n```")
return [tab_panel_begin, *cells, tab_panel_end]
def _merge_tabs(nb: notebooknode.NotebookNode):
"""merge side-by-side tabs into a single one"""
def _tab_status(cell, status):
if _get_cell_tab(cell):
return 1 if cell.cell_type == 'markdown' else 2
return 0
cell_groups = common.group_list(nb.cells, _tab_status)
meta = [(in_tab, [cell.metadata['tab'] for cell in group] if in_tab else None
) for in_tab, group in cell_groups]
new_cells = []
i = 0
while i < len(meta):
in_tab, tabs = meta[i]
if not in_tab:
new_cells.append((False, cell_groups[i][1]))
i += 1
else:
j = i + 1
while j < len(meta):
if meta[j][1] != tabs:
break
j += 1
groups = [group for _, group in cell_groups[i:j]]
new_cells.append((True, [x for x in zip(*groups)]))
i = j
return new_cells
def add_html_tab(nb: notebooknode.NotebookNode, default_tab: str) -> notebooknode.NotebookNode:
"""Add html codes for the tabs"""
cell_groups = _merge_tabs(nb)
tabs = [len(group) for in_tab, group in cell_groups if in_tab]
if not tabs or max(tabs) <= 1:
return nb
new_cells = []
for i, (in_tab, group) in enumerate(cell_groups):
if not in_tab:
new_cells.extend(group)
else:
tabs = [cells[0].metadata['tab'] for cells in group]
div_class = "code" if group[0][0].cell_type == 'code' == 2 else "text"
new_cells.append(_get_tab_bar(tabs, i, default_tab, div_class))
for j, (tab, cells) in enumerate(zip(tabs, group)):
new_cells.extend(_get_tab_panel(cells, tab, f'{i}-{j}', default_tab))
new_cells.append(nbformat.v4.new_markdown_cell(
"```eval_rst\n.. raw:: html\n\n </div>\n```"))
return create_new_notebook(nb, new_cells)
| 41.099448
| 152
| 0.609894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,485
| 0.199624
|
3cd1756adb8c57eb1928457d00bc92c25a43ba4c
| 1,204
|
py
|
Python
|
myamiweb/imcache/imcacheconfig.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | null | null | null |
myamiweb/imcache/imcacheconfig.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | null | null | null |
myamiweb/imcache/imcacheconfig.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | 1
|
2019-09-05T20:58:37.000Z
|
2019-09-05T20:58:37.000Z
|
# config file for imcached
# camera name pattern to cache. For example 'GatanK2' will restrict it
# only to camera name containing the string
camera_name_pattern = ''
# time in seconds to wait between consecutive queries
query_interval = 5
# limit query to later than this timestamp (mysql style: yyyymmddhhmmss)
min_timestamp = '20130126000000'
# limit query to start at this image id
start_id = 0
# root dir of cache. session subdirs will be added automatically
cache_path = '/srv/cache/dbem'
# maximum image dimension after conversion
redux_maxsize1 = 4096
redux_maxsize2 = 1024
# initial redux read and resize before calculating power and final
redux_args1 = {
'pipes': 'read:Read,shape:Shape',
'cache': False,
}
# redux to create final image for cache
redux_args_jpg = {
'cache': False,
'pipes': 'shape:Shape,scale:Scale,format:Format',
'scaletype': 'stdev',
'scalemin': -5,
'scalemax': 5,
'oformat': 'JPEG',
}
# redux to create final power image for cache
redux_args_pow = {
'cache': False,
'pipes': 'power:Power,shape:Shape,mask:Mask,scale:Scale,format:Format',
'power': True,
'maskradius': 10,
'scaletype': 'stdev',
'scalemin': -5,
'scalemax': 5,
'oformat': 'JPEG',
}
| 23.607843
| 72
| 0.724252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 886
| 0.73588
|
3cd1a6c109376dfdc24ad44b61222972d5c24dd2
| 3,737
|
py
|
Python
|
graphs/graphgenerator.py
|
andrew-lockwood/lab-project
|
e39a0f21966cdee519942cf2f94b7bab6ed2196e
|
[
"MIT"
] | 1
|
2017-08-30T15:21:31.000Z
|
2017-08-30T15:21:31.000Z
|
graphs/graphgenerator.py
|
andrew-lockwood/lab-project-summer2016
|
e39a0f21966cdee519942cf2f94b7bab6ed2196e
|
[
"MIT"
] | null | null | null |
graphs/graphgenerator.py
|
andrew-lockwood/lab-project-summer2016
|
e39a0f21966cdee519942cf2f94b7bab6ed2196e
|
[
"MIT"
] | 1
|
2017-06-15T20:44:59.000Z
|
2017-06-15T20:44:59.000Z
|
import sqlite3
import matplotlib.pyplot as plt
import re
from collections import Counter
db = "C:\\Users\\Andrew\\lab-project\\data\\frontiers_corpus.db"
def wordvsline():
q = "SELECT wordcount, linecount FROM ArticleTXT"
curr.execute(q)
x,y = zip(*curr.fetchall())
mpl_fig = plt.figure()
ax = mpl_fig.add_subplot(111)
plt.scatter(x,y)
plt.xlim(0,25000)
plt.ylim(0,450)
ax.set_xlabel('Word Count')
ax.set_ylabel('Line Count')
ax.set_title('Words vs Lines')
plt.show()
def titles_between(start, end):
q = """ SELECT DISTINCT articleID
FROM ArticleInformation
WHERE date BETWEEN
'{s}' AND '{e}'""".format(s=start, e=end)
return di.execute_query(q)
def by_year():
q = """ SELECT strftime('%Y', date), count(articleID)
FROM ArticleInformation
GROUP BY strftime('%Y', date)"""
return di.execute_query(q)
def by_month():
q = """ SELECT strftime('%Y-%m', date), count(articleID)
FROM ArticleInformation
GROUP BY strftime('%Y-%m', date)"""
return di.execute_query(q)
def by_quarter():
q = """ SELECT strftime('%Y', date),
CASE
WHEN cast(strftime('%m', date) as integer) BETWEEN 1 AND 3 THEN 1
WHEN cast(strftime('%m', date) as integer) BETWEEN 4 AND 6 THEN 2
WHEN cast(strftime('%m', date) as integer) BETWEEN 7 AND 9 THEN 3
ELSE 4
END AS Quarter, count(articleID)
FROM ArticleInformation
GROUP BY strftime('%Y', date),
CASE
WHEN cast(strftime('%m', date) as integer) BETWEEN 1 AND 3 THEN 1
WHEN cast(strftime('%m', date) as integer) BETWEEN 4 AND 6 THEN 2
WHEN cast(strftime('%m', date) as integer) BETWEEN 7 AND 9 THEN 3
ELSE 4
END"""
return di.execute_query(q)
def graph(value):
data = []
if value == 'year':
for year, count in by_year():
data.append((year, count))
if value == 'month':
for year, count in by_month():
data.append((year, count))
if value == 'quarter':
for year, quarter, count in by_quarter():
d = "%s%s"%(year,'q'+str(quarter))
data.append((d, count))
x = [i for i in range(len(data))]
labels,y = zip(*data)
mpl_fig = plt.figure()
ax = mpl_fig.add_subplot(111)
plt.margins(0.025, 0)
plt.bar(x, y, align='center')
ax.set_ylabel('Articles Recieved')
plt.xticks(x, labels, rotation=45)
plt.show()
def kwd_frequency():
c1 = Counter()
c2 = Counter()
q = """ SELECT keyword, count(articleID)
FROM OriginalKeywords
GROUP BY keyword"""
data = curr.execute(q)
n = 10
for kwd, count in data.fetchall():
if count < 20:
c2[int(count)] += 1
else:
c1[int(count/n)] += 1
x = [i for i in range(len(c1))]
labels,y = zip(*c1.items())
labels = ["%s-%s"%(l*n, l*n+n) for l in labels]
mpl_fig = plt.figure()
ax = mpl_fig.add_subplot(111)
plt.margins(0.025, 0)
plt.bar(x, y, align='center')
plt.xticks(x, labels, rotation=90)
plt.show()
x = [i for i in range(len(c2))]
labels,y = zip(*c2.items())
mpl_fig = plt.figure()
ax = mpl_fig.add_subplot(111)
plt.margins(0.025, 0)
plt.bar(x, y, align='center')
plt.xticks(x, labels, rotation=90)
plt.show()
if __name__ == "__main__":
conn = sqlite3.connect(db)
curr = conn.cursor()
kwd_frequency()
| 24.585526
| 85
| 0.54616
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,584
| 0.423869
|
3cd1de8fe3c2b6efa630c25b86bb05e41fab354a
| 5,612
|
py
|
Python
|
peering_manager/constants.py
|
maznu/peering-manager
|
d249fcf530f4cc48b39429badb79bc203e0148ba
|
[
"Apache-2.0"
] | 127
|
2017-10-12T00:27:45.000Z
|
2020-08-07T11:13:55.000Z
|
peering_manager/constants.py
|
maznu/peering-manager
|
d249fcf530f4cc48b39429badb79bc203e0148ba
|
[
"Apache-2.0"
] | 247
|
2017-12-26T12:55:34.000Z
|
2020-08-08T11:57:35.000Z
|
peering_manager/constants.py
|
maznu/peering-manager
|
d249fcf530f4cc48b39429badb79bc203e0148ba
|
[
"Apache-2.0"
] | 63
|
2017-10-13T06:46:05.000Z
|
2020-08-08T00:41:57.000Z
|
from collections import OrderedDict
from devices.filters import ConfigurationFilterSet
from devices.models import Configuration
from devices.tables import ConfigurationTable
from messaging.filters import ContactFilterSet, EmailFilterSet
from messaging.models import Contact, ContactAssignment, Email
from messaging.tables import ContactTable, EmailTable
from net.filters import ConnectionFilterSet
from net.models import Connection
from net.tables import ConnectionTable
from peering.filters import (
AutonomousSystemFilterSet,
BGPGroupFilterSet,
CommunityFilterSet,
DirectPeeringSessionFilterSet,
InternetExchangeFilterSet,
InternetExchangePeeringSessionFilterSet,
RouterFilterSet,
RoutingPolicyFilterSet,
)
from peering.models import (
AutonomousSystem,
BGPGroup,
Community,
DirectPeeringSession,
InternetExchange,
InternetExchangePeeringSession,
Router,
RoutingPolicy,
)
from peering.tables import (
AutonomousSystemTable,
BGPGroupTable,
CommunityTable,
DirectPeeringSessionTable,
InternetExchangePeeringSessionTable,
InternetExchangeTable,
RouterTable,
RoutingPolicyTable,
)
from utils.functions import count_related
__all__ = ("SEARCH_MAX_RESULTS", "SEARCH_TYPES")
SEARCH_MAX_RESULTS = 15
SEARCH_TYPES = OrderedDict(
(
# devices
(
"configuration",
{
"queryset": Configuration.objects.all(),
"filterset": ConfigurationFilterSet,
"table": ConfigurationTable,
"url": "devices:configuration_list",
},
),
# messaging
(
"contact",
{
"queryset": Contact.objects.prefetch_related("assignments").annotate(
assignment_count=count_related(ContactAssignment, "contact")
),
"filterset": ContactFilterSet,
"table": ContactTable,
"url": "messaging:contact_list",
},
),
(
"email",
{
"queryset": Email.objects.all(),
"filterset": EmailFilterSet,
"table": EmailTable,
"url": "messaging:email_list",
},
),
# net
(
"connection",
{
"queryset": Connection.objects.prefetch_related(
"internet_exchange_point", "router"
),
"filterset": ConnectionFilterSet,
"table": ConnectionTable,
"url": "net:connection_list",
},
),
# peering
(
"autonomousystem",
{
"queryset": AutonomousSystem.objects.defer("prefixes"),
"filterset": AutonomousSystemFilterSet,
"table": AutonomousSystemTable,
"url": "peering:autonomoussystem_list",
},
),
(
"bgpgroup",
{
"queryset": BGPGroup.objects.all(),
"filterset": BGPGroupFilterSet,
"table": BGPGroupTable,
"url": "peering:bgpgroup_list",
},
),
(
"community",
{
"queryset": Community.objects.all(),
"filterset": CommunityFilterSet,
"table": CommunityTable,
"url": "peering:community_list",
},
),
(
"directpeeringsession",
{
"queryset": DirectPeeringSession.objects.prefetch_related(
"autonomous_system", "bgp_group", "router"
),
"filterset": DirectPeeringSessionFilterSet,
"table": DirectPeeringSessionTable,
"url": "peering:directpeeringsession_list",
},
),
(
"internetexchange",
{
"queryset": InternetExchange.objects.prefetch_related(
"local_autonomous_system"
).annotate(
connection_count=count_related(
Connection, "internet_exchange_point"
)
),
"filterset": InternetExchangeFilterSet,
"table": InternetExchangeTable,
"url": "peering:internetexchange_list",
},
),
(
"internetexchangepeeringsession",
{
"queryset": InternetExchangePeeringSession.objects.prefetch_related(
"autonomous_system", "ixp_connection"
),
"filterset": InternetExchangePeeringSessionFilterSet,
"table": InternetExchangePeeringSessionTable,
"url": "peering:internetexchangepeeringsession_list",
},
),
(
"router",
{
"queryset": Router.objects.prefetch_related("platform").annotate(
connection_count=count_related(Connection, "router")
),
"filterset": RouterFilterSet,
"table": RouterTable,
"url": "peering:router_list",
},
),
(
"routingpolicy",
{
"queryset": RoutingPolicy.objects.all(),
"filterset": RoutingPolicyFilterSet,
"table": RoutingPolicyTable,
"url": "peering:routingpolicy_list",
},
),
),
)
| 31.351955
| 85
| 0.533678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,179
| 0.210086
|
3cd24bc69492048a6c6dccda50896c121dfcd5b5
| 1,453
|
py
|
Python
|
alexhart/day1-2.py
|
chadnetzer/advent2020
|
b992eb202ff9dd5cc353914a136337412c8bd074
|
[
"MIT"
] | null | null | null |
alexhart/day1-2.py
|
chadnetzer/advent2020
|
b992eb202ff9dd5cc353914a136337412c8bd074
|
[
"MIT"
] | 1
|
2020-12-06T07:51:48.000Z
|
2020-12-08T05:03:11.000Z
|
alexhart/day1-2.py
|
chadnetzer/advent2020
|
b992eb202ff9dd5cc353914a136337412c8bd074
|
[
"MIT"
] | 8
|
2020-12-01T21:29:21.000Z
|
2020-12-09T23:55:15.000Z
|
chalenge_input = '''1956
1994
457
1654
2003
1902
1741
1494
1597
1129
1146
1589
1989
1093
1881
1288
1848
1371
1508
1035
1813
1335
1634
1102
1262
1637
1048
1807
1270
1528
1670
1803
1202
1294
1570
1640
1484
1872
1140
1207
1485
1781
1778
1772
1334
1267
1045
1194
1873
1441
1557
1414
1123
1980
1527
1591
1665
1916
1662
1139
1973
1258
1041
1134
1609
1554
1455
1124
1478
1938
1759
1281
1410
1511
930
1319
1302
1827
1216
1404
1460
2002
1590
1817
1341
1631
1608
1382
1158
1594
1049
1804
1555
1753
447
1021
1079
609
1766
1327
1851
1052
1737
1175
1043
1945
1573
1113
1724
1203
1856
1682
1623
1135
1015
1423
1412
1315
1375
1895
1351
1530
1758
1445
1518
1819
1567
1305
1919
1952
1432
1099
1476
1883
1871
1900
1442
1393
1214
1283
1538
1391
1008
1109
1621
1876
1998
1032
1324
1927
481
1732
1370
1683
1199
1465
1882
1293
1671
1456
1197
1506
1381
1469
1830
1957
1850
1184
1564
1170
1943
1131
1867
1208
1788
1337
1722
1760
1651
1069
1574
1959
1770
66
1190
1606
1899
1054
980
1693
1173
1479
1333
1579
1720
1782
1971
1438
1178
1306'''
test_input = '''1721
979
366
299
675
1456'''
def sum_check(input_string_test):
inputlines = input_string_test.splitlines()
for linepri in inputlines:
for linesec in inputlines:
for linethr in inputlines:
if int(linepri) + int(linesec) + int(linethr) == 2020:
return int(linepri) * int(linesec) * int(linethr)
print(sum_check(test_input))
print(sum_check(chalenge_input))
| 6.634703
| 70
| 0.751549
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,028
| 0.707502
|
3cd2638aee801c7efa156f6936b153c75c517e46
| 465
|
py
|
Python
|
e2e_graphsage/utils/logging.py
|
mingruimingrui/E2EGraphSage
|
90de7befd3a8ced514697c073b4c64e96b63bdb0
|
[
"MIT"
] | null | null | null |
e2e_graphsage/utils/logging.py
|
mingruimingrui/E2EGraphSage
|
90de7befd3a8ced514697c073b4c64e96b63bdb0
|
[
"MIT"
] | null | null | null |
e2e_graphsage/utils/logging.py
|
mingruimingrui/E2EGraphSage
|
90de7befd3a8ced514697c073b4c64e96b63bdb0
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import logging
def setup_logging(log_path, mode='w'):
fmt = '%(asctime)s %(levelname)-4.4s %(filename)s:%(lineno)d: %(message)s'
logging.root.handlers = []
logging.basicConfig(
filename=log_path,
filemode=mode,
format=fmt,
datefmt='%m-%d %H:%M',
level=logging.INFO
)
logging.getLogger().addHandler(logging.StreamHandler())
return logging.getLogger(__name__)
| 24.473684
| 78
| 0.647312
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 84
| 0.180645
|
3cd2949cb17d74dce66873599c286cade86072c8
| 3,486
|
py
|
Python
|
dmipy/distributions/tests/test_bingham.py
|
AthenaEPI/mipy
|
dbbca4066a6c162dcb05865df5ff666af0e4020a
|
[
"MIT"
] | 59
|
2018-02-22T19:14:19.000Z
|
2022-02-22T05:40:27.000Z
|
dmipy/distributions/tests/test_bingham.py
|
AthenaEPI/mipy
|
dbbca4066a6c162dcb05865df5ff666af0e4020a
|
[
"MIT"
] | 95
|
2018-02-03T11:55:30.000Z
|
2022-03-31T15:10:39.000Z
|
dmipy/distributions/tests/test_bingham.py
|
AthenaEPI/mipy
|
dbbca4066a6c162dcb05865df5ff666af0e4020a
|
[
"MIT"
] | 23
|
2018-02-13T07:21:01.000Z
|
2022-02-22T20:12:08.000Z
|
from numpy.testing import assert_almost_equal, assert_equal
from dmipy.utils import utils
import numpy as np
from dmipy.utils.utils import (
rotation_matrix_100_to_theta_phi, rotation_matrix_around_100,
rotation_matrix_100_to_theta_phi_psi
)
from dmipy.distributions import distributions
def test_rotation_100_to_theta_phi():
# test 1: does R100_to_theta_phi rotate a vector theta_phi?
theta_ = np.random.rand() * np.pi
phi_ = (np.random.rand() - .5) * np.pi
R100_to_theta_pi = rotation_matrix_100_to_theta_phi(theta_, phi_)
xyz = np.dot(R100_to_theta_pi, np.r_[1, 0, 0])
_, theta_rec, phi_rec = utils.cart2sphere(xyz)
assert_almost_equal(theta_, theta_rec)
assert_almost_equal(phi_, phi_rec)
def test_axis_rotation_does_not_affect_axis():
# test 2: does R_around_100 not affect 100?
psi_ = np.random.rand() * np.pi
R_around_100 = rotation_matrix_around_100(psi_)
v100 = np.r_[1, 0, 0]
assert_equal(v100, np.dot(R_around_100, v100))
def test_psi_insensitivity_when_doing_psi_theta_phi_rotation():
# test 3: does psi still have no influence on main eigenvector when doing
# both rotations?
theta_ = np.random.rand() * np.pi
phi_ = (np.random.rand() - .5) * np.pi
psi_ = np.random.rand() * np.pi
R_ = rotation_matrix_100_to_theta_phi_psi(theta_, phi_, psi_)
xyz = np.dot(R_, np.r_[1, 0, 0])
_, theta_rec, phi_rec = utils.cart2sphere(xyz)
assert_almost_equal(theta_, theta_rec)
assert_almost_equal(phi_, phi_rec)
def test_rotation_around_axis():
# test 4: does psi really rotate the second vector?
psi_ = np.pi # half circle
R_around_100 = rotation_matrix_around_100(psi_)
v2 = np.r_[0, 1, 0]
v2_expected = np.r_[0, -1, 0]
v2_rot = np.dot(R_around_100, v2)
assert_equal(np.round(v2_rot), v2_expected)
def test_rotation_on_bingham_tensor():
# test 5: does combined rotation rotate Bingham well?
kappa_ = np.random.rand()
beta_ = kappa_ / 2. # beta<kappa
Bdiag_ = np.diag(np.r_[kappa_, beta_, 0])
theta_ = np.random.rand() * np.pi
phi_ = (np.random.rand() - .5) * np.pi
psi_ = np.random.rand() * np.pi * 0
R_ = rotation_matrix_100_to_theta_phi_psi(theta_, phi_, psi_)
B_ = R_.dot(Bdiag_).dot(R_.T)
eigvals, eigvecs = np.linalg.eigh(B_)
main_evec = eigvecs[:, np.argmax(eigvals)]
_, theta_rec0, phi_rec0 = utils.cart2sphere(main_evec)
# checking if the angles are antipodal to each other
if abs(theta_ - theta_rec0) > 1e-5:
theta_rec = np.pi - theta_rec0
if phi_rec0 > 0:
phi_rec = phi_rec0 - np.pi
elif phi_rec0 < 0:
phi_rec = phi_rec0 + np.pi
else:
theta_rec = theta_rec0
phi_rec = phi_rec0
assert_almost_equal(theta_, theta_rec)
assert_almost_equal(phi_, phi_rec)
assert_almost_equal(np.diag(Bdiag_), np.sort(eigvals)[::-1])
def test_bingham_equal_to_watson(beta_fraction=0):
# test if bingham with beta=0 equals watson distribution
mu_ = np.random.rand(2)
n_cart = utils.sphere2cart(np.r_[1., mu_])
psi_ = np.random.rand() * np.pi
odi_ = np.max([0.1, np.random.rand()])
bingham = distributions.SD2Bingham(mu=mu_, psi=psi_,
odi=odi_,
beta_fraction=beta_fraction)
watson = distributions.SD1Watson(mu=mu_, odi=odi_)
Bn = bingham(n=n_cart)
Wn = watson(n=n_cart)
assert_almost_equal(Bn, Wn, 3)
| 35.938144
| 77
| 0.676133
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 429
| 0.123064
|
3cd3066a814fddcf19dac7173c44fed139f2e632
| 669
|
py
|
Python
|
head_first_design_patterns/hofs/duck_dispenser.py
|
incolumepy-cursos/poop
|
e4ac26b8d2a8c263a93fd9642fab52aafda53d80
|
[
"MIT"
] | null | null | null |
head_first_design_patterns/hofs/duck_dispenser.py
|
incolumepy-cursos/poop
|
e4ac26b8d2a8c263a93fd9642fab52aafda53d80
|
[
"MIT"
] | null | null | null |
head_first_design_patterns/hofs/duck_dispenser.py
|
incolumepy-cursos/poop
|
e4ac26b8d2a8c263a93fd9642fab52aafda53d80
|
[
"MIT"
] | null | null | null |
__author__ = '@britodfbr'
from head_first_design_patterns.hofs import duck
from head_first_design_patterns.hofs import fly_behaviors
from head_first_design_patterns.hofs import quack_behaviors
def run():
# Instatiate ducks
print("==== Model duck ====")
model = duck.DuckHOF()
model.perform_quack()
model.perform_fly()
model.display()
print("==== True duck ====")
model.perform_fly = fly_behaviors.fly_wings
model.perform_quack = quack_behaviors.quack
model.display()
print("==== Toy duck ====")
model.perform_fly = fly_behaviors.fly_rocket_powered
model.perform_quack = quack_behaviors.squeak
model.display()
| 27.875
| 59
| 0.715994
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 93
| 0.139013
|
3cd5abf591689acf3071f0da912c722b5ef681bb
| 1,279
|
py
|
Python
|
tests/test_zones_json.py
|
electricitymap/electricitymap-contrib
|
6572b12d1cef72c734b80273598e156ebe3c22ea
|
[
"MIT"
] | 143
|
2022-01-01T10:56:58.000Z
|
2022-03-31T11:25:47.000Z
|
tests/test_zones_json.py
|
electricitymap/electricitymap-contrib
|
6572b12d1cef72c734b80273598e156ebe3c22ea
|
[
"MIT"
] | 276
|
2021-12-30T15:57:15.000Z
|
2022-03-31T14:57:16.000Z
|
tests/test_zones_json.py
|
electricitymap/electricitymap-contrib
|
6572b12d1cef72c734b80273598e156ebe3c22ea
|
[
"MIT"
] | 44
|
2021-12-30T19:48:42.000Z
|
2022-03-29T22:46:16.000Z
|
import json
import unittest
from electricitymap.contrib.config import ZONES_CONFIG
ZONE_KEYS = ZONES_CONFIG.keys()
class ZonesJsonTestcase(unittest.TestCase):
def test_bounding_boxes(self):
for zone, values in ZONES_CONFIG.items():
bbox = values.get("bounding_box")
if bbox:
self.assertLess(bbox[0][0], bbox[1][0])
self.assertLess(bbox[0][1], bbox[1][1])
def test_sub_zones(self):
for zone, values in ZONES_CONFIG.items():
sub_zones = values.get("subZoneNames", [])
for sub_zone in sub_zones:
self.assertIn(sub_zone, ZONE_KEYS)
def test_zones_from_geometries_exist(self):
world_geometries = json.load(open("web/geo/world.geojson"))
world_geometries_zone_keys = set()
for ft in world_geometries["features"]:
world_geometries_zone_keys.add(ft["properties"]["zoneName"])
all_zone_keys = set(ZONES_CONFIG.keys())
non_existing_zone_keys = sorted(world_geometries_zone_keys - all_zone_keys)
assert (
len(non_existing_zone_keys) == 0
), f"{non_existing_zone_keys} are defined in world.geojson but not in zones.json"
if __name__ == "__main__":
unittest.main(buffer=True)
| 34.567568
| 89
| 0.656763
| 1,099
| 0.859265
| 0
| 0
| 0
| 0
| 0
| 0
| 171
| 0.133698
|
3cd609e71dc0ee42d0acf42ff022c5f15ae9992d
| 3,483
|
py
|
Python
|
app/bda_core/entities/training/word2vec_trainer.py
|
bda-19fs/bda-chatbot
|
4fcbda813ff5d3854a4c2e12413775676bcba9e2
|
[
"MIT"
] | 1
|
2019-05-25T12:12:39.000Z
|
2019-05-25T12:12:39.000Z
|
app/bda_core/entities/training/word2vec_trainer.py
|
bda-19fs/bda-chatbot
|
4fcbda813ff5d3854a4c2e12413775676bcba9e2
|
[
"MIT"
] | null | null | null |
app/bda_core/entities/training/word2vec_trainer.py
|
bda-19fs/bda-chatbot
|
4fcbda813ff5d3854a4c2e12413775676bcba9e2
|
[
"MIT"
] | null | null | null |
import gensim
import numpy as np
class Config:
'''
This class represents the configuration for the Word2Vec model.
'''
def __init__(self, dimension=150, hierarchical_softmax=0, negative_sampling=0, ns_exponent=0,
sample=0, window_size=5, workers=3, use_skip_gram=1, min_count=2, epochs=10):
self.dimension = dimension
self.hierarchical_softmax = hierarchical_softmax
self.negative_sampling = negative_sampling
self.ns_exponent = ns_exponent
self.sample = sample
self.window_size = window_size
self.workers = workers
self.use_skip_gram = use_skip_gram
self.min_count = min_count
self.epochs = epochs
def fit_model(sentences, config):
'''
Fits the Word2Vec model with the given sentences. The vectors were normalized after the training.
A further training of the model is not possible.
:param sentences: A python list of sentences
:param config: The config for the model
:return: The trained Word2Vec model
'''
model = gensim.models.Word2Vec(size=config.dimension, hs=config.hierarchical_softmax, window=config.window_size,
workers=config.workers, sg=config.use_skip_gram, min_count=2)
model.build_vocab(sentences)
model.train(sentences, total_examples=len(sentences), epochs=config.epochs)
model.init_sims(replace=True)
return model
def avg_word_vector(model, word_list):
'''
Calculates the average vector of a list of words. The average vector is the mean
of all word vectors. Only words of the Word2Vec vocabulary can be considered.
:param model: The trained Word2Vec model
:param word_list: A python list of words
:return: The average vector
'''
words = [word for word in word_list if word in model.wv.vocab]
return np.mean(model.wv.__getitem__(words), axis=0)
def transpose_vector(vec):
'''
Returns a new vector that is the transposition of the given vector.
:param vec: The vector to transpose
:return: The transposition vector
'''
return vec[np.newaxis]
def create_sentence_vectors(model, questions):
'''
Calculates the average vectors for all questions. The order of the sentences list
will remain in the returned list of vectors.
:param model: The trained Word2Vec model
:param questions: A python list of word lists
:return: A list of average vectors
'''
vectors = []
for i in range(len(questions)):
word_list = [word for word in questions[i] if word in model.wv.vocab]
avg_vector = None
if len(word_list) > 0:
avg_vector = avg_word_vector(model, word_list)
vectors.append(avg_vector)
vectors = np.array(vectors)
return vectors
def create_matrix_from_vectors(vectors):
'''
Creates a matrix that contains all vectors of the given vector list as row vectors.
:param vectors: A list of vectors with the same dimension
:return: The concatenation matrix of the given vectors
'''
vectors_len = len(vectors)
if vectors_len > 0:
matrix = transpose_vector(vectors[0])
for i in range(1, vectors_len):
vec = vectors[i]
if vec is not None:
transposed = transpose_vector(vectors[i])
matrix = np.concatenate((matrix, transposed), axis=0)
return matrix
else:
raise Exception('the given list of vectors is empty')
| 35.907216
| 116
| 0.681022
| 678
| 0.19466
| 0
| 0
| 0
| 0
| 0
| 0
| 1,375
| 0.394775
|
3cd825fe40c8c6d189d67799fba8e31f6ba53c8a
| 642
|
py
|
Python
|
polls/migrations/0008_auto_20150918_1715.py
|
santeyio/phantastesproject
|
5ce1e2cb59e8283fe280e01d0e185be62cd4001a
|
[
"MIT"
] | null | null | null |
polls/migrations/0008_auto_20150918_1715.py
|
santeyio/phantastesproject
|
5ce1e2cb59e8283fe280e01d0e185be62cd4001a
|
[
"MIT"
] | null | null | null |
polls/migrations/0008_auto_20150918_1715.py
|
santeyio/phantastesproject
|
5ce1e2cb59e8283fe280e01d0e185be62cd4001a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('polls', '0007_vote'),
]
operations = [
migrations.RemoveField(
model_name='book',
name='votes',
),
migrations.AddField(
model_name='book',
name='user',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| 23.777778
| 76
| 0.605919
| 500
| 0.778816
| 0
| 0
| 0
| 0
| 0
| 0
| 66
| 0.102804
|
3cd8375d5dea7465c5253237889db106c353b42a
| 4,342
|
py
|
Python
|
src/main/python/bktools/framework/money/currency.py
|
bspa10/bktools
|
8ddff2bb325df6c4c2bb5cadd3029c0e11ba0734
|
[
"MIT"
] | null | null | null |
src/main/python/bktools/framework/money/currency.py
|
bspa10/bktools
|
8ddff2bb325df6c4c2bb5cadd3029c0e11ba0734
|
[
"MIT"
] | null | null | null |
src/main/python/bktools/framework/money/currency.py
|
bspa10/bktools
|
8ddff2bb325df6c4c2bb5cadd3029c0e11ba0734
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
# Standard Library
from os import path
from threading import Lock
from typing import Set
from typing import Optional
from xml.etree import ElementTree as ET
from xml.etree.ElementTree import Element
# 3rd Party Library
# Current Folder
# Current Application
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
class Currency(object):
"""
ISO 4217 currency.
"""
__slots__ = ('__code', '__name', '__number', '__precision')
def __init__(self, code: str, name: str, number: int, precision: int):
self.__code = code
self.__name = name
self.__number = number
self.__precision = precision
@property
def code(self) -> str:
"""
The currency code which consist of 3 uppercase characters.
e.g: USD, BRL
"""
return self.__code
@property
def name(self) -> str:
"""
Currency Name. i.e. US Dollar, Brazilian Real
"""
return self.__name
@property
def number(self) -> int:
"""
The currency number.
e.g:
840 -> US Dollar
986 -> Brazilian Real
"""
return self.__number
@property
def precision(self) -> int:
"""
The treatment of minor currency unit, in exponent where base is 10.
For example, a U.S. dollar is 100 cents, witch is 2.
"""
return self.__precision
#: -=-=-=-=-=-=-=-=-=-=-=
#: Comparation Functions
#: -=-=-=-=-=-=-=-=-=-=-=
def __eq__(self, other):
if not isinstance(other, Currency):
return False
return self.number == other.number
#: -=-=-=-=-=-=-=-=-=-=-=
#: Utility Functions
#: -=-=-=-=-=-=-=-=-=-=-=
def __hash__(self):
return hash(self.number)
def __repr__(self):
return f'{self.__class__.__name__} {self.code}'
class Currencies(object):
"""
Factory of ISO 4217 - Currency Code.
"""
__slots__ = '_'
__guard = Lock()
__entries: Set[Currency] = set()
__BASE_DIR = path.abspath(path.dirname(__file__))
def __init__(self):
with self.__guard:
if self.__entries:
return
#: http://www.currency-iso.org/dam/downloads/lists/list_one.xml
file = path.abspath(f'{self.__BASE_DIR}/iso4217.xml')
raw: Element = ET.parse(file)
for node in raw.findall('CcyTbl/CcyNtry'):
country = self.__get_value(node, 'CtryNm')
if country and country.startswith('ZZ'):
# Ignore none-real countries
continue
code = self.__get_value(node, 'Ccy')
name = self.__get_value(node, 'CcyNm')
number = self.__get_value(node, 'CcyNbr')
unit = self.__get_value(node, 'CcyMnrUnts')
if code and name and number:
try:
currency = Currency(code, name, int(number), int(unit))
except (TypeError, ValueError):
currency = Currency(code, name, int(number), 0)
self.__entries.add(currency)
@staticmethod
def __get_value(node: Element, key: str) -> Optional[str]:
element = node.find(key)
if element is not None:
return element.text.strip()
return None
@classmethod
def code(cls, code: str) -> Optional[Currency]:
"""
Retrieve the Currency object by its code.
e.g. BRL, USD
Parameters:
code: The currency code. e.g. BRL
Returns:
the Currency object
"""
code = code.upper()
for entry in cls().__entries:
if entry.code == code:
return entry
return None
@classmethod
def number(cls, number: int) -> Optional[Currency]:
"""
Retrieve the Currency object by its number.
e.g. 986 (BRL), 840 (USD)
Parameters:
number: The currency number. e.g. 840 (USD)
Returns:
the Currency object
"""
for entry in cls().__entries:
if entry.number == number:
return entry
return None
| 25.692308
| 120
| 0.52211
| 3,936
| 0.906495
| 0
| 0
| 1,804
| 0.415477
| 0
| 0
| 1,577
| 0.363197
|
3cd8a7fa6829673461545374eeacd667661ea155
| 4,863
|
py
|
Python
|
DemoFinal.py
|
sohinim006/Heroku-App-demo
|
875b894b48e8544f6dbe629635f195ccd97ba201
|
[
"MIT"
] | null | null | null |
DemoFinal.py
|
sohinim006/Heroku-App-demo
|
875b894b48e8544f6dbe629635f195ccd97ba201
|
[
"MIT"
] | 1
|
2020-06-02T02:53:57.000Z
|
2020-06-02T02:53:57.000Z
|
DemoFinal.py
|
sohinim006/Heroku-App-demo
|
875b894b48e8544f6dbe629635f195ccd97ba201
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import pickle
# In[2]:
data=pd.read_csv("wd.csv",encoding="ISO-8859-1")
# In[3]:
data
# In[4]:
data.fillna(0,inplace=True) #it fills NaN with O's
# In[5]:
data
# In[6]:
data.dtypes
# In[7]:
#conversion
data['Temp']=pd.to_numeric(data['Temp'],errors='coerce')
data['D.O. (mg/l)']=pd.to_numeric(data['D.O. (mg/l)'],errors='coerce')
data['PH']=pd.to_numeric(data['PH'],errors='coerce')
data['B.O.D. (mg/l)']=pd.to_numeric(data['B.O.D. (mg/l)'],errors='coerce')
data['CONDUCTIVITY (µmhos/cm)']=pd.to_numeric(data['CONDUCTIVITY (µmhos/cm)'],errors='coerce')
data['NITRATENAN N+ NITRITENANN (mg/l)']=pd.to_numeric(data['NITRATENAN N+ NITRITENANN (mg/l)'],errors='coerce')
data['TOTAL COLIFORM (MPN/100ml)Mean']=pd.to_numeric(data['TOTAL COLIFORM (MPN/100ml)Mean'],errors='coerce')
data.dtypes
# In[8]:
#initialization
start=2
end=1779
station=data.iloc [start:end ,0]
location=data.iloc [start:end ,1]
state=data.iloc [start:end ,2]
do= data.iloc [start:end ,4].astype(np.float64)
value=0
ph = data.iloc[ start:end,5]
co = data.iloc [start:end ,6].astype(np.float64)
year=data.iloc[start:end,11]
tc=data.iloc [2:end ,10].astype(np.float64)
bod = data.iloc [start:end ,7].astype(np.float64)
na= data.iloc [start:end ,8].astype(np.float64)
na.dtype
# In[9]:
data=pd.concat([station,location,state,do,ph,co,bod,na,tc,year],axis=1)
data. columns = ['station','location','state','do','ph','co','bod','na','tc','year']
# In[10]:
data
# In[11]:
#calulation of Ph
data['npH']=data.ph.apply(lambda x: (100 if (8.5>=x>=7)
else(80 if (8.6>=x>=8.5) or (6.9>=x>=6.8)
else(60 if (8.8>=x>=8.6) or (6.8>=x>=6.7)
else(40 if (9>=x>=8.8) or (6.7>=x>=6.5)
else 0)))))
# In[12]:
#calculation of dissolved oxygen
data['ndo']=data.do.apply(lambda x:(100 if (x>=6)
else(80 if (6>=x>=5.1)
else(60 if (5>=x>=4.1)
else(40 if (4>=x>=3)
else 0)))))
# In[13]:
#calculation of total coliform
data['nco']=data.tc.apply(lambda x:(100 if (5>=x>=0)
else(80 if (50>=x>=5)
else(60 if (500>=x>=50)
else(40 if (10000>=x>=500)
else 0)))))
#calculation of electrical conductivity
data['nec']=data.co.apply(lambda x:(100 if (75>=x>=0)
else(80 if (150>=x>=75)
else(60 if (225>=x>=150)
else(40 if (300>=x>=225)
else 0)))))
# In[14]:
#calc of B.D.O
data['nbdo']=data.bod.apply(lambda x:(100 if (3>=x>=0)
else(80 if (6>=x>=3)
else(60 if (80>=x>=6)
else(40 if (125>=x>=80)
else 0)))))
# In[15]:
data
# In[16]:
#Calulation of nitrate
data['nna']=data.na.apply(lambda x:(100 if (20>=x>=0)
else(80 if (50>=x>=20)
else(60 if (100>=x>=50)
else(40 if (200>=x>=100)
else 0)))))
data.head()
data.dtypes
# In[17]:
data
# In[18]:
from sklearn.model_selection import train_test_split
# In[19]:
data=data.drop(['station','location'],axis=1)
# In[20]:
data
# In[21]:
data=data.drop(['do','ph','co','bod','na','tc'],axis=1)
# In[22]:
data
# In[24]:
yt=data['nco']
# In[25]:
yt
# In[26]:
data=data.drop(['nco'],axis=1)
# In[27]:
data
# In[28]:
x_t,x_tt,y_t,y_tt=train_test_split(data,yt,test_size=0.2,random_state=4)
# In[29]:
#reg2.fit(x_t,y_t)
# In[30]:
#a2=reg2.predict(x_tt)
#a2
#randomforest
# In[39]:
from sklearn.ensemble import RandomForestRegressor
# In[40]:
rfr=RandomForestRegressor(n_estimators=1000,random_state=42)
# In[41]:
rfr.fit(x_t,y_t)
pickle.dump(rfr,open('model.pkl','wb'))
# In[42]:
model = pickle.load(open('model.pkl','rb'))
yrfr=rfr.predict(x_tt)
# In[43]:
from sklearn.metrics import mean_squared_error
print('mse:%.2f'%mean_squared_error(y_tt,yrfr))
# In[44]:
y_tt
# In[45]:
yrfr
# In[47]:
dtrfr = pd.DataFrame({'Actual': y_tt, 'Predicted': yrfr})
dtrfr.head(20)
# In[48]:
from sklearn.metrics import r2_score
# In[49]:
print(r2_score(y_tt,yrfr))
# In[ ]:
| 15.438095
| 112
| 0.499897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,181
| 0.242754
|
3cd8ed3786032ec99ff11bc34e84132d3b428b08
| 1,926
|
py
|
Python
|
Classes/gaussian.py
|
sankarebarri/Python
|
0c39da1df74d74b7b0a3724e57b5205a7d88537f
|
[
"MIT"
] | null | null | null |
Classes/gaussian.py
|
sankarebarri/Python
|
0c39da1df74d74b7b0a3724e57b5205a7d88537f
|
[
"MIT"
] | null | null | null |
Classes/gaussian.py
|
sankarebarri/Python
|
0c39da1df74d74b7b0a3724e57b5205a7d88537f
|
[
"MIT"
] | null | null | null |
import numpy as np
import math
class Gaussian:
def __init__(self, mu=0, sigma=1):
self.mean = mu
self.stdev = sigma
self.data = []
def calculate_mean(self):
self.mean = np.mean(self.data)
return self.mean
def calculate_stdev(self, sample=True):
x_mean = self.calculate_mean()
mean_item_squared = []
for i in range(len(self.data)):
mean_item = (self.data[i] - x_mean)**2
mean_item_squared.append(mean_item)
self.stdev = math.sqrt(np.sum(mean_item_squared) / len(self.data))
sample_length = len(self.data)
if sample:
self.stdev = math.sqrt(np.sum(mean_item_squared) / (sample_length-1))
return self.stdev
return self.stdev
def read_data_file(self, file_name, sample=True):
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(line)
line = file.readline()
file.close()
self.data = data_list
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev(sample=True)
def __add__(self, other):
results = Gaussian()
results.mean = self.mean + other.mean
results.stdev = math.sqrt(self.stdev**2 + other.stdev**2)
return results
def __repr__(self):
return f'mean is {self.mean}, stdev is {self.stdev}'
data = [9, 2, 5, 4, 12, 7]
gaussian = Gaussian()
gaussian.data = data
print(gaussian.calculate_mean())
print(gaussian.calculate_stdev(sample=True))
gaussian_one = Gaussian(5, 2)
gaussian_two = Gaussian(7, 3)
gaussian_sum = gaussian_one + gaussian_two
print(gaussian_sum)
print(gaussian_sum.stdev)
print(gaussian_sum.mean)
| 27.126761
| 81
| 0.574247
| 1,544
| 0.801661
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.023364
|
3cda167a85c43c6395a461abd5b9210a39f3e5bb
| 987
|
py
|
Python
|
setup.py
|
datagovau/ckanext-datagovau
|
902c80a9c3a07ad6bbd52a4b19dac8a3ec2686b9
|
[
"Apache-2.0"
] | 1
|
2019-07-22T08:02:11.000Z
|
2019-07-22T08:02:11.000Z
|
setup.py
|
datagovau/ckanext-datagovau
|
902c80a9c3a07ad6bbd52a4b19dac8a3ec2686b9
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
datagovau/ckanext-datagovau
|
902c80a9c3a07ad6bbd52a4b19dac8a3ec2686b9
|
[
"Apache-2.0"
] | 6
|
2015-01-23T16:32:18.000Z
|
2021-06-27T03:42:18.000Z
|
from setuptools import find_packages, setup
version = "1.0.0a1"
# Keep in case we still need pylons...Just use the line below in place
# of the install_requires argument in the call to setup().
# install_requires=['requests', 'feedparser', 'pylons', 'python-dateutil'],
setup(
name="ckanext-datagovau",
version=version,
description="Extension for customising CKAN for data.gov.au",
long_description="",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords="",
author="Greg von Nessi",
author_email="greg.vonnessi@linkdigital.com.au",
url="",
license="",
packages=find_packages(exclude=["ez_setup", "examples", "tests"]),
namespace_packages=["ckanext", "ckanext.datagovau"],
include_package_data=True,
zip_safe=False,
install_requires=[
"typing_extensions",
],
entry_points="""
[ckan.plugins]
datagovau = ckanext.datagovau.plugin:DataGovAuPlugin
""",
)
| 32.9
| 94
| 0.690983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 570
| 0.577508
|
3ce1874797f955e0861f0ec1dfc943c5714b8253
| 6,192
|
py
|
Python
|
utils.py
|
kalpetros/greek-dictionary
|
962f36c299cbb46ffce9c7f78db7c9e513269499
|
[
"MIT"
] | 3
|
2021-04-27T16:39:12.000Z
|
2021-11-17T02:15:13.000Z
|
utils.py
|
kalpetros/greek-dictionary
|
962f36c299cbb46ffce9c7f78db7c9e513269499
|
[
"MIT"
] | null | null | null |
utils.py
|
kalpetros/greek-dictionary
|
962f36c299cbb46ffce9c7f78db7c9e513269499
|
[
"MIT"
] | 1
|
2021-06-15T23:57:44.000Z
|
2021-06-15T23:57:44.000Z
|
import click
import os
import requests
import shutil
import sys
import time
from bs4 import BeautifulSoup
alphabet = [
{
'letter': 'Α',
'pages': 31660
},
{
'letter': 'Β',
'pages': 5050
},
{
'letter': 'Γ',
'pages': 5890
},
{
'letter': 'Δ',
'pages': 7130
},
{
'letter': 'Ε',
'pages': 12530
},
{
'letter': 'Ζ',
'pages': 1500
},
{
'letter': 'Η',
'pages': 1310
},
{
'letter': 'Θ',
'pages': 2300
},
{
'letter': 'Ι',
'pages': 1720
},
{
'letter': 'Κ',
'pages': 17700
},
{
'letter': 'Λ',
'pages': 4740
},
{
'letter': 'Μ',
'pages': 13020
},
{
'letter': 'Ν',
'pages': 3790
},
{
'letter': 'Ξ',
'pages': 5250
},
{
'letter': 'Ο',
'pages': 4970
},
{
'letter': 'Π',
'pages': 18560
},
{
'letter': 'Ρ',
'pages': 2720
},
{
'letter': 'Σ',
'pages': 14340
},
{
'letter': 'Τ',
'pages': 7680
},
{
'letter': 'Υ',
'pages': 3170
},
{
'letter': 'Φ',
'pages': 5640
},
{
'letter': 'Χ',
'pages': 5370
},
{
'letter': 'Ψ',
'pages': 2080
},
{
'letter': 'Ω',
'pages': 470
}
]
def is_clean(word):
"""
Check for profanity
"""
clean = True
profane_words = []
if word in profane_words:
clean = False
return clean
def log(text, type):
colors = {
'success': 'green',
'info': 'yellow',
'warning': 'red'
}
click.secho(f'[{type}] - {text}', fg=colors[type])
def get_source(url):
"""
Get page source for the given url
"""
rs = requests.get(url)
source = BeautifulSoup(rs.content, 'html.parser')
return source
def parse(source):
"""
Return words array for the given page source
"""
children = source.find(id='lemmas').children
words = []
for node in children:
dt = node.find('dt')
if dt != -1:
word = dt.find('b').text.strip(',')
words.append(word)
return words
def scrape(letter: str, pages: int):
"""
Scrapes www.greek-language.gr to build
a full list of modern Greek words
https://www.greek-language.gr/greekLang/index.html
"""
log(f'Getting letter {letter} words...', 'info')
start = time.time()
url = 'https://www.greek-language.gr/greekLang/modern_greek/tools/lexica/reverse/search.html'
results = []
page = 0
while page <= int(pages):
time.sleep(0.1)
endpoint = f'{url}?start={page}&lq={letter}*'
source = get_source(endpoint)
words = parse(source)
page = page + 10
for word in words:
results.append(word)
end = time.time()
total = end - start
log(f'Got {letter} in {total}', 'success')
return results
def get_data(file_name):
"""
Return words in a given file
"""
results = []
if not os.path.isfile(file_name):
return results
try:
with open(file_name, 'r') as words:
for word in words:
results.append(word.strip())
except Exception as e:
log(f'Could not get data {str(e)}', 'warning')
return results
def check():
"""
Check if necessary files exist
"""
if not os.path.isfile('files/el.txt'):
log('el.txt is missing from files. Please restore the repository.', 'warning')
sys.exit(2)
if not os.path.isdir('output'):
log('Output folder is missing. Creating folder...', 'warning')
os.mkdir('output')
def clean_output():
"""
Delete output files and folder
"""
if not os.path.isdir('output'):
log('Working directory already clean...', 'info')
return
shutil.rmtree('output')
log('Working directory clean', 'success')
return
def romanize_words(words):
"""
Romanize words
"""
mappings = {
'α': 'a',
'ά': 'a',
'β': 'v',
'γ': 'g',
'δ': 'd',
'ε': 'e',
'έ': 'e',
'ζ': 'z',
'η': 'i',
'ή': 'i',
'θ': 'th',
'ι': 'i',
'ί': 'i',
'ϊ': 'i',
'ΐ': 'i',
'κ': 'k',
'λ': 'l',
'μ': 'm',
'ν': 'n',
'ξ': 'ks',
'ο': 'o',
'ό': 'o',
'π': 'p',
'ρ': 'r',
'σ': 's',
'ς': 's',
'τ': 't',
'υ': 'y',
'ύ': 'y',
'ϋ': 'y',
'ΰ': 'y',
'φ': 'f',
'χ': 'h',
'x': 'h',
'ψ': 'ps',
'ω': 'o',
'ώ': 'o',
'-': '-',
'!': '!',
'.': '.',
',': ',',
"'": "'"
}
results = []
if not words:
log('No data provided', 'info')
return results
for word in words:
result = []
chars = list(word.strip())
for char in chars:
try:
char = char.lower()
result.append(mappings[char])
except Exception as e:
log(f'Could not map {str(e)}', 'warning')
word = ''.join(result)
results.append(word)
log('Romanized all words', 'success')
return results
def export(file_name, words, file_type='txt'):
"""
Create a words file
"""
if not words:
log('No data provided', 'warning')
return
check()
log(f'Creating file {file_name}.{file_type}...', 'info')
output = open(f'output/{file_name}.{file_type}', 'w')
if file_type == 'json':
output.write('[')
for word in words:
if file_type == 'txt':
output.write(f'{word.strip()}\n')
elif file_type == 'json':
output.write(f'"{word.strip()}",\n')
if file_type == 'json':
output.write(']')
output.close()
log(f'Created {file_name}.{file_type}', 'success')
| 18.211765
| 97
| 0.439599
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,142
| 0.34261
|
3ce716ac3e56a4c2bf161beb78851142feb3c86b
| 1,585
|
py
|
Python
|
pysanejs/api.py
|
Lookyloo/PySaneJS
|
99615608222d7386e74472bcc052f40b05916b2a
|
[
"BSD-2-Clause"
] | 1
|
2019-01-30T16:12:32.000Z
|
2019-01-30T16:12:32.000Z
|
pysanejs/api.py
|
CIRCL/PySaneJS
|
501f22d0d22d6361bb71a8bf0bbb2e14d3c0f9f1
|
[
"BSD-2-Clause"
] | 36
|
2021-06-09T17:34:05.000Z
|
2022-03-28T09:04:37.000Z
|
pysanejs/api.py
|
Lookyloo/PySaneJS
|
99615608222d7386e74472bcc052f40b05916b2a
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
from urllib.parse import urljoin
from typing import Union, Dict, List, Optional
class SaneJS():
def __init__(self, root_url: str='https://sanejs.circl.lu/'):
self.root_url = root_url
self.session = requests.session()
@property
def is_up(self) -> bool:
try:
r = self.session.head(self.root_url)
return r.status_code == 200
except Exception:
return False
def sha512(self, sha512: Union[str, list]) -> Dict[str, List[str]]:
'''Search for a hash (sha512)
Reponse:
{
"response": [
"libraryname|version|filename",
...
]
}
'''
r = self.session.post(urljoin(self.root_url, 'sha512'), json={'sha512': sha512})
return r.json()
def library(self, library: Union[str, list], version: Optional[str]=None) -> Dict[str, Dict[str, Dict[str, Dict[str, str]]]]:
''' Search for a library by name.
Response:
{
"response": {
"libraryname": {
"version": {
"filename": "sha512",
...
}
...
},
...
}
}
'''
to_query = {'library': library}
if version:
to_query['version'] = version
r = self.session.post(urljoin(self.root_url, 'library'), json=to_query)
return r.json()
| 28.303571
| 129
| 0.477603
| 1,439
| 0.907886
| 0
| 0
| 191
| 0.120505
| 0
| 0
| 658
| 0.415142
|
3ce959e8fac079b9e0e0bacc34e00bde93edb83c
| 1,937
|
py
|
Python
|
Log1/HiPyQt3/HiPyQt38QTableWidget.py
|
codenara/PyQt1
|
1550920577188e4d318b47fc69ba5ee243092d88
|
[
"MIT"
] | null | null | null |
Log1/HiPyQt3/HiPyQt38QTableWidget.py
|
codenara/PyQt1
|
1550920577188e4d318b47fc69ba5ee243092d88
|
[
"MIT"
] | null | null | null |
Log1/HiPyQt3/HiPyQt38QTableWidget.py
|
codenara/PyQt1
|
1550920577188e4d318b47fc69ba5ee243092d88
|
[
"MIT"
] | null | null | null |
# HiPyQt version 3.8
# use QTableWidget
# use QCheckBox
# use QPushButton
import sys
from PyQt5.QtWidgets import *
class MyWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Hi PyQt")
self.setGeometry(50, 50, 400, 300)
# QTableWidget
self.tableWidget = QTableWidget(self)
self.tableWidget.resize(290, 290)
self.tableWidget.setRowCount(2)
self.tableWidget.setColumnCount(2)
self.tableWidget.setItem(0, 0, QTableWidgetItem("John"))
self.tableWidget.setItem(0, 1, QTableWidgetItem("21"))
self.tableWidget.setItem(1, 0, QTableWidgetItem("Paul"))
self.tableWidget.setItem(1, 1, QTableWidgetItem("22"))
horizontalHeaderLabels = ["Name", "Age"]
self.tableWidget.setHorizontalHeaderLabels(horizontalHeaderLabels)
verticalHeaderLabels = ["One", "Two"]
self.tableWidget.setVerticalHeaderLabels(verticalHeaderLabels)
# QCheckBox
self.checkBox = QCheckBox("Editable", self)
self.checkBox.move(300, 10)
self.checkBox.resize(90, 30)
self.checkBox.stateChanged.connect(self.checkBox_stateChanged)
# QPushButton
self.button = QPushButton("Resize", self)
self.button.move(300, 50)
self.button.resize(80, 30)
self.button.clicked.connect(self.button_clicked)
def checkBox_stateChanged(self):
if self.checkBox.isChecked() == True:
self.tableWidget.setEditTriggers(QAbstractItemView.AllEditTriggers) # Enable editing
else:
self.tableWidget.setEditTriggers(QAbstractItemView.NoEditTriggers) # Disable editing
def button_clicked(self):
self.tableWidget.resizeColumnsToContents()
self.tableWidget.resizeRowsToContents()
if __name__ == "__main__":
app = QApplication(sys.argv)
myWindow = MyWindow()
myWindow.show()
app.exec()
| 32.283333
| 97
| 0.674239
| 1,695
| 0.875065
| 0
| 0
| 0
| 0
| 0
| 0
| 219
| 0.113061
|
3cea6fdbaa10d4f4a87f24213944a946b586b65c
| 1,346
|
py
|
Python
|
predictor.py
|
abhayraw1/crnn.pytorch
|
307f2dbf8163148d165ef15cdd522c7c137041e4
|
[
"MIT"
] | null | null | null |
predictor.py
|
abhayraw1/crnn.pytorch
|
307f2dbf8163148d165ef15cdd522c7c137041e4
|
[
"MIT"
] | null | null | null |
predictor.py
|
abhayraw1/crnn.pytorch
|
307f2dbf8163148d165ef15cdd522c7c137041e4
|
[
"MIT"
] | null | null | null |
import torch
from torch.autograd import Variable
from . import utils
from . import dataset
from PIL import Image
from pathlib import Path
from . import crnn
model_path = Path(__file__).parent/'data/crnn.pth'
alphabet = '0123456789abcdefghijklmnopqrstuvwxyz'
model = crnn.CRNN(32, 1, 37, 256)
if torch.cuda.is_available():
model = model.cuda()
print('loading pretrained model from %s' % model_path)
model.load_state_dict(torch.load(model_path))
converter = utils.strLabelConverter(alphabet)
transformer = dataset.resizeNormalize((100, 32))
def predict(img_path=None, arr=None):
assert img_path is not None or arr is not None
if arr is not None:
image = Image.fromarray(arr)
else:
image = Image.open(img_path)
image = image.convert('L')
image = transformer(image)
if torch.cuda.is_available():
image = image.cuda()
image = image.view(1, *image.size())
image = Variable(image)
model.eval()
preds = model(image)
_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
preds_size = Variable(torch.IntTensor([preds.size(0)]))
raw_pred = converter.decode(preds.data, preds_size.data, raw=True)
sim_pred = converter.decode(preds.data, preds_size.data, raw=False)
# print('%-20s => %-20s' % (raw_pred, sim_pred))
return sim_pred
| 28.041667
| 71
| 0.696137
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 138
| 0.102526
|
3ced3da168b0c4d5fb8345ab35a6e8f79cade777
| 2,951
|
py
|
Python
|
src/graph_transpiler/webdnn/backend/webgl/kernels/split_axis.py
|
gunpowder78/webdnn
|
c659ea49007f91d178ce422a1eebe289516a71ee
|
[
"MIT"
] | 1
|
2018-07-26T13:52:21.000Z
|
2018-07-26T13:52:21.000Z
|
src/graph_transpiler/webdnn/backend/webgl/kernels/split_axis.py
|
gunpowder78/webdnn
|
c659ea49007f91d178ce422a1eebe289516a71ee
|
[
"MIT"
] | null | null | null |
src/graph_transpiler/webdnn/backend/webgl/kernels/split_axis.py
|
gunpowder78/webdnn
|
c659ea49007f91d178ce422a1eebe289516a71ee
|
[
"MIT"
] | null | null | null |
from typing import List, Sequence
from webdnn.backend.code_generator.injectors.kernel_name_injector import KernelNameInjector
from webdnn.backend.webgl.attributes.channel_mode import ChannelMode, ChannelModeEnum
from webdnn.backend.webgl.generator import WebGLDescriptorGenerator
from webdnn.backend.webgl.kernel import Kernel
from webdnn.backend.webgl.kernels.util import FragmentShaderPreamble, texture_stride, texture_shape
from webdnn.backend.webgl.uniform_injector import UniformInjector
from webdnn.graph.operators.split_axis import SplitAxis
template = FragmentShaderPreamble + """
%%UNIFORM(sampler2D, sampler_x)%%;
%%UNIFORM(vec2, texture_stride_y)%%;
%%UNIFORM(vec4, variable_shape_y)%%;
%%UNIFORM(vec4, variable_stride_y)%%;
%%UNIFORM(vec4, variable_shape_x)%%;
%%UNIFORM(vec4, variable_stride_x)%%;
%%UNIFORM(vec2, texture_stride_x)%%;
%%UNIFORM(vec2, texture_shape_x)%%;
%%UNIFORM(vec4, offset)%%;
void main() {
vec4 variable_position_y = convert_position(gl_FragCoord.xy, texture_stride_y, variable_stride_y, variable_shape_y);
vec4 variable_position_x = variable_position_y + offset;
float x = texture2D(sampler_x, convert_coord(variable_position_x, variable_stride_x, texture_stride_x, texture_shape_x)).r;
gl_FragColor = vec4(x, 0, 0, 0);
}
"""
def _pad_to_4d(arr: Sequence[int], val: int = 1):
assert len(arr) <= 4, ValueError
arr = list(arr)
while len(arr) < 4:
arr.append(val)
return arr
@WebGLDescriptorGenerator.register_handler(SplitAxis)
def split_axis(op: SplitAxis) -> List[Kernel]:
x = op.inputs["x"]
ys = [op.outputs[f"y{i}"] for i in range(len(op.outputs))]
sections = [0] + op.sections
axis = op.axis
kernels = []
for i, y in enumerate(ys):
assert x.order.check_same_axes(y.order)
assert ChannelMode.get(x) == ChannelMode.get(y) == ChannelModeEnum.R
name_injector = KernelNameInjector(op)
uniform_injector = UniformInjector()
offset = [sections[i] if a == axis else 0 for a in y.order.axes]
uniform_injector.register({
"sampler_x": x,
"texture_stride_y": texture_stride(y),
"variable_shape_y": _pad_to_4d(y.shape),
"variable_stride_y": _pad_to_4d(y.stride),
"texture_shape_x": texture_shape(x),
"texture_stride_x": texture_stride(x),
"variable_shape_x": _pad_to_4d([x.shape_dict[a] for a in y.order.axes]),
"variable_stride_x": _pad_to_4d([x.stride_dict[a] for a in y.order.axes]),
"offset": _pad_to_4d(offset, 0)
})
source = template
source = uniform_injector.inject(source)
source = name_injector.inject(source)
kernel = Kernel(
source,
name_injector.name,
uniform_injector.samplers,
uniform_injector.uniforms,
y
)
kernels.append(kernel)
return kernels
| 32.788889
| 127
| 0.686208
| 0
| 0
| 0
| 0
| 1,485
| 0.503219
| 0
| 0
| 857
| 0.29041
|
3ced6fbe48c455d53e5baee0065fd6577be73a4b
| 35
|
py
|
Python
|
__init__.py
|
chunlaw/GeoNews
|
836547a51a0ed177f04135979e0a0f5212e88ed7
|
[
"MIT"
] | 3
|
2016-09-05T13:43:59.000Z
|
2016-09-05T15:36:12.000Z
|
__init__.py
|
chunlaw/GeoNews
|
836547a51a0ed177f04135979e0a0f5212e88ed7
|
[
"MIT"
] | null | null | null |
__init__.py
|
chunlaw/GeoNews
|
836547a51a0ed177f04135979e0a0f5212e88ed7
|
[
"MIT"
] | null | null | null |
__all__ = ['models']
import models
| 11.666667
| 20
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 0.228571
|
3cedde962258fae75ef3400a99dada61c8a82bd1
| 1,244
|
py
|
Python
|
systemstat.py
|
asl97/asl97-i3bar-status-spacer
|
83245582cf8973b0d128b5ed806e776e00960c5e
|
[
"MIT"
] | null | null | null |
systemstat.py
|
asl97/asl97-i3bar-status-spacer
|
83245582cf8973b0d128b5ed806e776e00960c5e
|
[
"MIT"
] | null | null | null |
systemstat.py
|
asl97/asl97-i3bar-status-spacer
|
83245582cf8973b0d128b5ed806e776e00960c5e
|
[
"MIT"
] | null | null | null |
import time
import psutil
def _parsesendrecv(interface, new, old):
up = max(new[interface].bytes_sent - old[interface].bytes_sent, -1)
down = max(new[interface].bytes_recv - old[interface].bytes_recv, -1)
return up, down
class _netlink:
def __init__(self):
self.old = psutil.net_io_counters(pernic=True)
def get_status(self, exclude=[]):
new = psutil.net_io_counters(pernic=True)
o = []
with open("/proc/net/route") as f:
route = f.read()
for interface in new:
if interface in exclude or interface not in route:
continue
up, down = _parsesendrecv(interface, new, self.old)
if up == -1:
sup = "?K"
else:
sup = "%.1fK" % (up/1024)
if down == -1:
sdown = "?K"
else:
sdown = "%.1fK" % (down/1024)
o.append((interface, sup, sdown))
self.old = new
return o
netlink = _netlink().get_status
def cpu():
return psutil.cpu_percent()
def ram():
mem = psutil.virtual_memory()
return ((mem.used+mem.buffers)/mem.total)*100
def datetime():
return time.strftime("%a %d/%m/%Y %H:%M:%S")
| 28.272727
| 73
| 0.549035
| 769
| 0.618167
| 0
| 0
| 0
| 0
| 0
| 0
| 61
| 0.049035
|
3cefbde68b0741c1883ec538b390be6d177b8949
| 18,044
|
py
|
Python
|
tests/test_net.py
|
ciubecca/kalasanty
|
df99f6814f073f2fb0fbd271d2fbfccb209c4b45
|
[
"BSD-3-Clause"
] | 1
|
2021-10-19T16:59:31.000Z
|
2021-10-19T16:59:31.000Z
|
tests/test_net.py
|
ciubecca/kalasanty
|
df99f6814f073f2fb0fbd271d2fbfccb209c4b45
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_net.py
|
ciubecca/kalasanty
|
df99f6814f073f2fb0fbd271d2fbfccb209c4b45
|
[
"BSD-3-Clause"
] | 1
|
2021-10-20T13:05:56.000Z
|
2021-10-20T13:05:56.000Z
|
import os
import numpy as np
import h5py
import tempfile
import pytest
from keras import backend as K
from keras.layers import Input, Convolution3D, concatenate
from keras.models import Model
from keras.optimizers import Adam
import pybel
from tfbio.data import Featurizer
from kalasanty.net import dice_np, dice, dice_loss, ovl_np, ovl, ovl_loss, DataWrapper, UNet
path = os.path.dirname(os.path.realpath(__file__))
test_dataset = os.path.join(path, 'test_data.hdf')
protein_file = os.path.join(path, 'datasets', 'scpdb', '2qfo_1', 'protein.mol2')
featurizer = Featurizer(save_molecule_codes=False)
num_features = len(featurizer.FEATURE_NAMES)
input_shape = (1, 4, 2, 3, 1)
arr_zeros = np.zeros(input_shape)
arr_ones = np.ones(input_shape)
def teardown_function(function):
K.clear_session()
@pytest.fixture(scope='function')
def data():
data = DataWrapper(test_dataset, test_set=0.2, max_dist=52, scale=0.33)
yield data
data.close()
@pytest.mark.parametrize('smoothing', (0, 0.1, 0.001),
ids=lambda x: 'smoothing %s' % x)
def test_dice(smoothing):
x = Input(input_shape[1:])
m = Model(inputs=x, outputs=x)
arr_random = np.random.choice([0, 1], size=input_shape,
p=[0.75, 0.25])
arrays = (arr_random, arr_zeros, arr_ones)
arr_sum = arr_random.sum()
ones_sum = arr_ones.sum()
scores = (1.0, smoothing / (arr_sum + smoothing),
(2 * arr_sum + smoothing) / (arr_sum + ones_sum + smoothing))
m.compile(Adam(), lambda x, y: dice(x, y, smoothing_factor=smoothing))
for array, score in zip(arrays, scores):
score_keras = m.evaluate(arr_random, array, verbose=0)
score_np = dice_np(arr_random, array, smoothing_factor=smoothing)
assert np.allclose(score_keras, score_np, 6)
assert np.allclose(score_keras, score, 6)
@pytest.mark.parametrize('smoothing', (0, 0.1, 0.001),
ids=lambda x: 'smoothing %s' % x)
def test_ovl(smoothing):
x = Input(input_shape[1:])
m = Model(inputs=x, outputs=x)
arr_random = np.random.choice([0, 1], size=input_shape,
p=[0.75, 0.25])
arr_sum = arr_random.sum()
ones_sum = arr_ones.sum()
arrays = (arr_random, arr_zeros, arr_ones)
scores = (1.0, smoothing / (arr_sum + smoothing),
(arr_sum + smoothing) / (ones_sum + smoothing))
m.compile(Adam(), lambda x, y: ovl(x, y, smoothing_factor=smoothing))
for array, score in zip(arrays, scores):
score_keras = m.evaluate(arr_random, array, verbose=0)
score_np = ovl_np(arr_random, array, smoothing_factor=smoothing)
assert np.allclose(score_keras, score_np, 6)
assert np.allclose(score_keras, score, 6)
def test_unet_from_data_handle(data):
with pytest.raises(ValueError, match='you must either provide'):
UNet()
with pytest.raises(TypeError, match='data_handle should be a DataWrapper'):
UNet(data_handle='10gs')
model = UNet(data_handle=data)
assert model.data_handle == data
assert model.scale == data.scale
assert model.max_dist == data.max_dist
assert len(model.inputs) == 1
assert model.inputs[0].shape[-1] == data.x_channels
assert len(model.outputs) == 1
assert model.outputs[0].shape[-1] == data.y_channels
@pytest.mark.parametrize('box_size', (4, 16), ids=lambda x: 'box=%s' % x)
@pytest.mark.parametrize('i', (5, 1), ids=lambda x: 'i=%s' % x)
@pytest.mark.parametrize('o', (2, 1), ids=lambda x: 'o=%s' % x)
def test_unet_from_layers(box_size, i, o):
inputs = Input([box_size] * 3 + [i])
conv1 = Convolution3D(filters=3, kernel_size=1, activation='elu',
padding='same')(inputs)
outputs = Convolution3D(filters=o, kernel_size=1, activation='sigmoid',
padding='same')(conv1)
model = UNet(inputs=inputs, outputs=outputs, box_size=box_size,
input_channels=i, output_channels=o)
assert hasattr(model, 'data_handle')
assert model.data_handle is None
with pytest.raises(ValueError, match='input should be 5D'):
UNet(inputs=inputs[0], outputs=inputs)
with pytest.raises(ValueError, match='output should be 5D'):
UNet(inputs=inputs, outputs=outputs[1])
with pytest.raises(ValueError, match='input and output shapes do not match'):
UNet(inputs=inputs, outputs=concatenate([outputs, outputs], 1))
@pytest.mark.parametrize('box_size', (36, 144), ids=lambda x: 'box=%s' % x)
@pytest.mark.parametrize('o', (4, 2), ids=lambda x: 'o=%s' % x)
def test_unet_with_featurizer(box_size, o):
f = Featurizer()
i = len(f.FEATURE_NAMES)
with pytest.raises(TypeError, match='should be a tfbio.data.Featurize'):
UNet(box_size=box_size, input_channels=i, output_channels=o,
scale=0.5, featurizer=1)
model = UNet(box_size=box_size, input_channels=i, output_channels=o,
scale=0.5, featurizer=f)
assert hasattr(model, 'data_handle')
assert model.data_handle is None
assert hasattr(model, 'featurizer')
assert isinstance(model.featurizer, Featurizer)
@pytest.mark.parametrize('box_size', (8, 16), ids=lambda x: 'box=%s' % x)
@pytest.mark.parametrize('i_channels', ([5, 3], [2, 1, 1]),
ids=lambda x: 'i=' + ','.join([str(i) for i in x]))
@pytest.mark.parametrize('o_channels', ([3, 3], [2, 1, 4]),
ids=lambda x: 'o=' + ','.join([str(i) for i in x]))
def test_multiple_inputs_outputs(box_size, i_channels, o_channels):
inputs = [Input([box_size] * 3 + [i]) for i in i_channels]
conv1 = [Convolution3D(filters=3, kernel_size=1, activation='elu',
padding='same')(inp) for inp in inputs]
conv1 = concatenate(conv1, axis=-1)
outputs = [Convolution3D(filters=o, kernel_size=1, activation='sigmoid',
padding='same')(conv1) for o in o_channels]
model = UNet(inputs=inputs, outputs=outputs, box_size=box_size,
input_channels=sum(i_channels),
output_channels=sum(o_channels))
assert len(model.inputs) == len(i_channels)
assert len(model.outputs) == len(o_channels)
@pytest.mark.parametrize('loss', (dice_loss, ovl_loss))
def test_training(data, loss):
train_gen = data.batch_generator(batch_size=5)
eval_gen = data.batch_generator(batch_size=5)
test_gen = data.batch_generator(batch_size=2, subset='test')
num_epochs = 2
box_size = data.box_size
input_channels = data.x_channels
output_channels = data.y_channels
inputs = Input((box_size, box_size, box_size, input_channels))
outputs = Convolution3D(filters=output_channels, kernel_size=1,
activation='sigmoid')(inputs)
model = UNet(inputs=inputs, outputs=outputs)
model.compile(optimizer=Adam(lr=1e-6), loss=loss,
metrics=[dice, dice_loss, ovl, ovl_loss])
model.fit_generator(train_gen, steps_per_epoch=2,
epochs=num_epochs, verbose=0)
for scores in (model.evaluate_generator(eval_gen, steps=2),
model.evaluate_generator(test_gen, steps=1)):
assert np.allclose(scores[1], -scores[2])
assert np.allclose(scores[3], -scores[4])
loss_change = model.history.history['loss']
assert len(loss_change) == num_epochs
assert (loss_change[0] != loss_change[1:]).all()
@pytest.mark.parametrize('kwargs, err', (
({'scale': 1.0}, ValueError),
({'max_dist': 35}, ValueError),
({'featurizer': 123}, TypeError),
({'featurizer': Featurizer()}, ValueError)
), ids=('wrong scale', 'wrong dist', 'wrong featurizer type',
'wrong featurizer shape'))
@pytest.mark.parametrize('compiled', (True, False),
ids=('compiled', 'not compiled'))
@pytest.mark.filterwarnings('ignore:No training configuration found')
def test_load_wrong_args(data, kwargs, err, compiled):
box_size = data.box_size
i = data.x_channels
o = data.y_channels
model1 = UNet(box_size=box_size, input_channels=i,
output_channels=o, scale=data.scale,
data_handle=data)
if compiled:
model1.compile(optimizer=Adam(lr=1e-6),
loss='binary_crossentropy',
metrics=[dice, dice_loss, ovl, ovl_loss])
with tempfile.NamedTemporaryFile(suffix='.hdf') as f:
model1.save(f.name)
with pytest.raises(err, match=list(kwargs)[0]):
UNet.load_model(f.name, data_handle=data, **kwargs)
@pytest.mark.parametrize('kwargs', (
{},
{'max_dist': 52, 'scale': 0.33, 'featurizer': featurizer},
), ids=('no args', 'scale 1:3, dist=52, featurizer'))
@pytest.mark.parametrize('compiled', (True, False),
ids=('compiled', 'not compiled'))
@pytest.mark.filterwarnings('ignore:No training configuration found')
def test_save_load(data, kwargs, compiled):
from keras.models import load_model as keras_load
box_size = data.box_size
i = data.x_channels
o = data.y_channels
model1 = UNet(box_size=box_size, input_channels=i,
output_channels=o, scale=data.scale,
data_handle=data)
if compiled:
model1.compile(optimizer=Adam(lr=1e-6),
loss='binary_crossentropy',
metrics=[dice, dice_loss, ovl, ovl_loss])
weights1 = model1.get_weights()
with tempfile.NamedTemporaryFile(suffix='.hdf') as f:
model1.save(f.name)
model2 = UNet.load_model(f.name, data_handle=data, **kwargs)
weights2 = model2.get_weights()
assert model1.to_json() == model2.to_json()
for w1, w2 in zip(weights1, weights2):
assert np.allclose(w1, w2)
with tempfile.NamedTemporaryFile(suffix='.hdf') as f:
model1.save_keras(f.name)
model2 = keras_load(f.name)
weights2 = model2.get_weights()
for w1, w2 in zip(weights1, weights2):
assert np.allclose(w1, w2)
@pytest.mark.parametrize('kwargs', (
{'box_size': 30},
{'input_channels': 1},
{'output_channels': 4},
{'scale': 2.0},
{'featurizer': Featurizer()},
{'inputs': Input([36] * 3 + [1])},
{'outputs': Convolution3D(filters=3, kernel_size=1, activation='elu',
padding='same')(Input([36] * 3 + [1]))}
), ids=('box_size', 'input_channels', 'output_channels', 'scale', 'featurizer',
'inputs, no outputs', 'outputs, no inputs'))
def test_incompatible_with_data_handle(data, kwargs):
with pytest.raises(ValueError, match=list(kwargs)[0]):
UNet(data_handle=data, **kwargs)
@pytest.mark.parametrize('input_shape, strides, message', (
([10] * 3 + [1], 1, 'input shape does not match box_size'),
([20] * 5 + [1], 1, 'input should be 5D'),
([20] * 3 + [1], 2, 'input and output shapes do not match'),
), ids=('box size', 'not 3D image', 'different shapes'))
def test_incompatible_layers_shapes(input_shape, strides, message):
inputs = Input(input_shape)
if message == 'input should be 5D':
outputs = inputs
else:
outputs = Convolution3D(filters=3, kernel_size=1, activation='sigmoid',
padding='same', strides=strides)(inputs)
with pytest.raises(ValueError, match=message):
UNet(inputs=inputs, outputs=outputs, box_size=20)
@pytest.mark.parametrize('kwargs', (
{'box_size': 30},
{'input_channels': 1},
{'output_channels': 4},
{'featurizer': Featurizer()},
), ids=lambda x: ', '.join(str(k) for k in x))
def test_incompatible_with_layers(kwargs):
inputs = Input([10] * 3 + [3])
conv1 = Convolution3D(filters=3, kernel_size=1, activation='elu',
padding='same')(inputs)
outputs = Convolution3D(filters=5, kernel_size=1, activation='sigmoid',
padding='same')(conv1)
with pytest.raises(ValueError, match=list(kwargs)[0]):
UNet(inputs=inputs, outputs=outputs, **kwargs)
def test_get_pockets_segmentation(data):
with pytest.raises(ValueError, match='data_handle must be set'):
model = UNet(box_size=data.box_size,
input_channels=data.x_channels,
output_channels=data.y_channels,
l2_lambda=1e-7)
model.pocket_density_from_grid('10gs')
with pytest.raises(ValueError, match='scale must be set'):
model = UNet(box_size=data.box_size,
input_channels=data.x_channels,
output_channels=data.y_channels,
l2_lambda=1e-7, data_handle=data)
model.scale = None
model.pocket_density_from_grid('10gs')
np.random.seed(42)
model = UNet(box_size=data.box_size,
input_channels=data.x_channels,
output_channels=data.y_channels,
l2_lambda=1e-7, data_handle=data)
model.compile(optimizer=Adam(lr=1e-6), loss='binary_crossentropy')
density, *_ = model.pocket_density_from_grid('10gs')
with pytest.raises(ValueError, match='not supported'):
model.get_pockets_segmentation(np.array([density] * 2), 0.6)
pocket = model.get_pockets_segmentation(density, 0.6)
assert pocket.shape == (data.box_size,) * 3
assert pocket.max() > 0
assert len(np.unique(pocket)) - 1 <= pocket.max()
def test_save_pockets_cmap(data):
model = UNet(data_handle=data, l2_lambda=1e-7)
model.compile(optimizer=Adam(lr=1e-6), loss='binary_crossentropy')
density, origin, step = model.pocket_density_from_grid('10gs')
with pytest.raises(ValueError, match='saving more than one prediction'):
model.save_density_as_cmap(np.concatenate((density, density)), origin,
step)
with tempfile.NamedTemporaryFile(suffix='.cmap') as cmap_file:
fname = cmap_file.name
model.save_density_as_cmap(density, origin, step, fname=fname)
with h5py.File(fname, 'r') as f:
assert 'Chimera' in f
group = f['Chimera']
assert len(group.keys()) == data.y_channels
for i in range(data.y_channels):
key = 'image%s' % (i + 1)
assert key in group
assert 'data_zyx' in group[key]
dataset = group[key]['data_zyx'][:]
assert np.allclose(density[0, ..., i].transpose([2, 1, 0]),
dataset[:])
def test_save_pockets_cube(data):
model = UNet(data_handle=data, l2_lambda=1e-7)
model.compile(optimizer=Adam(lr=1e-6), loss='binary_crossentropy')
density, origin, step = model.pocket_density_from_grid('10gs')
with pytest.raises(ValueError, match='saving more than one prediction'):
model.save_density_as_cube(np.concatenate((density, density)), origin,
step)
with pytest.raises(NotImplementedError, match='saving multichannel'):
model.save_density_as_cube(density, origin, step)
density = density[..., [0]]
with tempfile.NamedTemporaryFile(suffix='.cube') as cmap_file:
fname = cmap_file.name
model.save_density_as_cube(density, origin, step, fname=fname)
with open(fname, 'r') as f:
# skip header
for _ in range(7):
f.readline()
values = np.array(f.read().split()).reshape(density.shape)
assert np.allclose(density, values.astype(float))
@pytest.mark.parametrize('box_size', (36, 72), ids=lambda x: 'box=%s' % x)
@pytest.mark.parametrize('o', (1, 3), ids=lambda x: 'o=%s' % x)
def test_predict_mol(box_size, o):
mol = next(pybel.readfile('mol2', protein_file))
with pytest.raises(ValueError, match='featurizer must be set'):
model = UNet(box_size=box_size, scale=0.5, input_channels=num_features,
output_channels=o)
model.pocket_density_from_mol(mol)
with pytest.raises(ValueError, match='scale must be set'):
model = UNet(featurizer=featurizer, box_size=box_size,
input_channels=num_features, output_channels=o)
model.pocket_density_from_mol(mol)
model = UNet(featurizer=featurizer, box_size=box_size, scale=0.5,
output_channels=o)
model.compile(optimizer=Adam(lr=1e-6), loss='binary_crossentropy')
with pytest.raises(TypeError, match='pybel.Molecule'):
model.pocket_density_from_mol(protein_file)
density, origin, step = model.pocket_density_from_mol(mol)
assert (density > 0).any()
@pytest.mark.parametrize('box_size', (36, 72), ids=lambda x: 'box=%s' % x)
@pytest.mark.parametrize('o', (1, 2), ids=lambda x: 'o=%s' % x)
def test_predict_pocket_atoms(box_size, o):
np.random.seed(42)
mol = next(pybel.readfile('mol2', protein_file))
model = UNet(featurizer=featurizer, box_size=box_size, scale=0.5,
output_channels=o)
model.compile(optimizer=Adam(lr=1e-6), loss='binary_crossentropy')
segmentation_kwargs = {'threshold': 0.55, 'min_size': 5}
pocket_mols_atoms = model.predict_pocket_atoms(mol, dist_cutoff=3,
expand_residue=False,
**segmentation_kwargs)
pocket_mols_residues = model.predict_pocket_atoms(mol, dist_cutoff=3,
expand_residue=True,
**segmentation_kwargs)
assert len(pocket_mols_atoms) == len(pocket_mols_residues)
assert len(pocket_mols_atoms) > 0
for p1, p2 in zip(pocket_mols_atoms, pocket_mols_residues):
assert isinstance(p1, pybel.Molecule)
assert isinstance(p2, pybel.Molecule)
assert len(p1.atoms) <= len(p2.atoms)
res1 = set([res.idx for res in p1.residues])
res2 = set([res.idx for res in p2.residues])
assert res1 == res2
| 39.483589
| 92
| 0.63323
| 0
| 0
| 119
| 0.006595
| 13,151
| 0.72883
| 0
| 0
| 1,946
| 0.107847
|
3cf130cd62278bdee384dab7ff29ec047f8b848a
| 2,256
|
py
|
Python
|
tests/test_bash_runner.py
|
rtmigo/svet
|
06f9c5be7706351c2ef93fae0f9fa97ee69593f7
|
[
"BSD-3-Clause"
] | 5
|
2021-05-18T19:55:22.000Z
|
2022-03-07T20:52:19.000Z
|
tests/test_bash_runner.py
|
rtmigo/vien
|
06f9c5be7706351c2ef93fae0f9fa97ee69593f7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_bash_runner.py
|
rtmigo/vien
|
06f9c5be7706351c2ef93fae0f9fa97ee69593f7
|
[
"BSD-3-Clause"
] | 1
|
2021-05-23T04:04:29.000Z
|
2021-05-23T04:04:29.000Z
|
# SPDX-FileCopyrightText: (c) 2021 Artëm IG <github.com/rtmigo>
# SPDX-License-Identifier: BSD-3-Clause
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from timeit import default_timer as timer
from tests.common import is_posix
from vien._bash_runner import *
from tests.time_limited import TimeLimited
@unittest.skipUnless(is_posix, "not POSIX")
class TestRunAsBash(unittest.TestCase):
# python3 -m unittest svet.bash_runner_test
def test_good_command_code_zero(self):
bash_lines = [
f'set -e',
f"ls"]
code = run_as_bash_script("\n".join(bash_lines), capture_output=True)
self.assertEqual(code.returncode, 0) # ok
def test_bad_command_error_code(self):
bash_lines = [
f'set -e',
f"ok_computer_make_me_happy"]
code = run_as_bash_script("\n".join(bash_lines), capture_output=True)
self.assertEqual(code.returncode, 127) # error
def test_alias_expansion(self):
with TemporaryDirectory() as td:
file_to_create = Path(td) / "to_be_or_not_to_be.txt"
file_to_create_quoted = repr(str(file_to_create))
bash_lines = [
f'set -e',
f"shopt -s expand_aliases",
f'alias ohoho="echo"', # this will work in bash, but not in sh
f'ohoho "that is the answer" > {file_to_create_quoted}']
self.assertFalse(file_to_create.exists())
code = run_as_bash_script("\n".join(bash_lines),
capture_output=True)
self.assertEqual(code.returncode, 0)
self.assertTrue(file_to_create.exists())
self.assertEqual(file_to_create.read_text().strip(),
"that is the answer")
def test_input_delay(self):
start = timer()
# run interactive shell end type "exit" after small delay
with TimeLimited(seconds=10): # safety net
run_as_bash_script("exec bash", input="exit\n".encode(),
input_delay=1, timeout=10, capture_output=True)
end = timer()
self.assertGreater(end - start, 0.9)
self.assertLess(end - start, 5)
| 37.6
| 79
| 0.624113
| 1,870
| 0.828533
| 0
| 0
| 1,914
| 0.848028
| 0
| 0
| 513
| 0.227293
|
3cf1aac57cec16e9686acb6784d6d3e00f8dc890
| 8,825
|
py
|
Python
|
adversarial/train_adversarial.py
|
liguge/Conditional-Adversarial-Domain-Generalization-with-Single-Discriminator
|
e0f2cd042e2c124e73d2982af28fa270263180d8
|
[
"MIT"
] | 1
|
2022-01-16T03:21:18.000Z
|
2022-01-16T03:21:18.000Z
|
adversarial/train_adversarial.py
|
liguge/Conditional-Adversarial-Domain-Generalization-with-Single-Discriminator
|
e0f2cd042e2c124e73d2982af28fa270263180d8
|
[
"MIT"
] | 1
|
2022-03-29T10:50:48.000Z
|
2022-03-30T07:14:56.000Z
|
adversarial/train_adversarial.py
|
hectorLop/Conditional-Adversarial-Domain-Generalization-with-Single-Discriminator
|
e0f2cd042e2c124e73d2982af28fa270263180d8
|
[
"MIT"
] | 2
|
2022-01-16T03:21:54.000Z
|
2022-03-10T01:17:12.000Z
|
from typing import Dict, List, Tuple
import torch
import numpy as np
import argparse
from torch import nn
import yaml
import pandas as pd
from sklearn.metrics import roc_auc_score
from adversarial.adversarial import AdversarialNetwork, Classifier, Discriminator
from adversarial.dataset import (
AdversarialDataset,
get_transforms
)
from adversarial.config import Config
from adversarial.utils import (
fix_all_seeds,
freeze_unfreeze,
get_ground_truth_vector
)
from torch.utils.data import DataLoader
def train_step(
model : nn.Module,
train_loader : DataLoader,
config : Config,
class_criterion : object,
disc_criterion : object,
extractor_criterion : object,
optimizer : torch.optim.Optimizer
) -> Tuple[float, float, float, float]:
model.train()
class_loss_accum, disc_loss_accum, extr_loss_accum = 0., 0., 0.
y_train = []
preds = []
for images, domains, labels in train_loader:
images = images.to(config.DEVICE)
domains = domains.to(config.DEVICE)
labels = labels.to(config.DEVICE)
# Set the gradients to zero before backprop step
optimizer.zero_grad()
# # # # # # # # # # # # # #
# Step 1: Classification #
# # # # # # # # # # # # # #
freeze_unfreeze(model.feature_extractor, True)
freeze_unfreeze(model.discriminator, True)
freeze_unfreeze(model.classifier, True)
# Get predictions and calculate the loss
y_preds_class = model(images)
y_preds_class = y_preds_class.to(config.DEVICE)
class_loss = class_criterion(y_preds_class.squeeze(), labels)
class_loss_accum += class_loss.item()
# Backward step
class_loss.backward()
optimizer.step()
optimizer.zero_grad()
y_train.append(labels.detach().cpu().numpy())
preds.append(y_preds_class.softmax(1).detach().cpu().numpy())
# # # # # # # # # # # # #
# Step 2: Discriminator #
# # # # # # # # # # # # #
freeze_unfreeze(model.feature_extractor, False)
freeze_unfreeze(model.discriminator, True)
freeze_unfreeze(model.classifier, True)
# Get predictions and calculate the loss
y_preds_disc = model.forward_disc(images)
y_preds_disc = y_preds_disc.to(config.DEVICE)
disc_loss = disc_criterion(y_preds_disc.squeeze(), domains)
disc_loss_accum += disc_loss.item()
# Backward step
disc_loss.backward()
optimizer.step()
optimizer.zero_grad()
# # # # # # # # # # #
# Step 3: Extractor #
# # # # # # # # # # #
freeze_unfreeze(model.feature_extractor, True)
freeze_unfreeze(model.discriminator, False)
freeze_unfreeze(model.classifier, True)
# Get predictions and calculate the loss
y_preds_extr = model.forward_disc(images)
y_preds_extr = y_preds_extr.to(config.DEVICE)
gt_vector = get_ground_truth_vector(labels, config.N_DOMAINS, config.N_CLASSES)
gt_vector = gt_vector.to(config.DEVICE)
extr_loss = extractor_criterion(y_preds_extr.squeeze(), gt_vector)
extr_loss_accum += extr_loss.item()
# Backward step
extr_loss.backward()
optimizer.step()
optimizer.zero_grad()
y_train = np.concatenate(y_train)
preds = np.concatenate(preds)
preds = preds[np.arange(len(preds)), preds.argmax(1)]
auc = roc_auc_score(y_train, preds)
return class_loss_accum, disc_loss_accum, extr_loss_accum, auc
def val_step(model : nn.Module, val_loader : DataLoader,
config : Config, criterion : object) -> Tuple[float, float]:
model.eval()
preds = []
epoch_loss = 0
y_test = []
with torch.no_grad():
for images, domains, labels in val_loader:
images = images.to(config.DEVICE)
domains = domains.to(config.DEVICE)
labels = labels.to(config.DEVICE)
y_preds = model(images)
y_preds = y_preds.to(config.DEVICE)
loss = criterion(y_preds.squeeze(), labels)
y_test.append(labels.cpu().numpy())
preds.append(y_preds.softmax(1).cpu().numpy())
epoch_loss += loss.item()
y_test = np.concatenate(y_test)
preds = np.concatenate(preds)
preds = preds[np.arange(len(preds)), preds.argmax(1)]
auc = roc_auc_score(y_test, preds)
return epoch_loss, auc
def fit(
model : nn.Module,
train_loader : DataLoader,
val_loader : DataLoader,
config : Config,
filepath : str
) -> Tuple[nn.Module, List[float], List[float]]:
model = model.to(config.DEVICE)
optimizer = torch.optim.SGD(model.parameters(),
lr=config.LEARNING_RATE,
momentum=config.MOMENTUM,
weight_decay=config.WEIGHT_DECAY)
# Criterions for each step
class_criterion = torch.nn.CrossEntropyLoss()
disc_criterion = torch.nn.CrossEntropyLoss()
extr_criterion = torch.nn.MSELoss()
n_batches, n_batches_val = len(train_loader), len(val_loader)
best_loss = np.inf
val_loss_accum, train_loss_accum = [], []
with torch.cuda.device(config.DEVICE):
for epoch in range(1, config.EPOCHS + 1):
class_loss, disc_loss, extr_loss, train_auc = train_step(model,
train_loader,
config,
class_criterion,
disc_criterion,
extr_criterion,
optimizer)
class_loss = class_loss / n_batches
disc_loss = disc_loss / n_batches
extr_loss = extr_loss / n_batches
val_loss, val_auc = val_step(model,
val_loader,
config,
class_criterion)
val_loss = val_loss / n_batches_val
prefix = f"[Epoch {epoch:2d} / {config.EPOCHS:2d}]"
print(prefix)
print(f"{prefix} Train Class loss: {class_loss:7.5f}. Train Disc Loss: {disc_loss:7.5f}. Train Extr Loss: {extr_loss:7.5f}")
print(f"{prefix} Val Class loss: {val_loss:7.5f}")
print(f"{prefix} Train AUC-ROC: {train_auc:7.5f}. Val AUC-ROC: {val_auc:7.5f}")
if val_loss < best_loss:
best_loss = val_loss
print(f'{prefix} Save Val loss: {val_loss:7.5f}')
torch.save(model.state_dict(), filepath)
print(prefix)
return model, train_loss_accum, val_loss_accum
def get_loaders(df_train, df_val, config=Config):
ds_train = AdversarialDataset(df_train, get_transforms(config, augment=True), config)
dl_train = DataLoader(ds_train,
batch_size=config.BATCH_SIZE,
shuffle=True,
num_workers=0)
ds_val = AdversarialDataset(df_val, get_transforms(config, augment=False), config)
dl_val = DataLoader(ds_val,
batch_size=config.BATCH_SIZE,
shuffle=True,
num_workers=0)
return dl_train, dl_val
def train(parameters : Dict):
fix_all_seeds(3088)
train = pd.read_csv(parameters['train_set'])
val = pd.read_csv(parameters['val_set'])
train_loader, val_loader = get_loaders(train, val)
print('Getting the model')
classifier = Classifier(256, 2)
discriminator = Discriminator(256, 0.5, Config.N_DOMAINS, Config.N_CLASSES)
model = AdversarialNetwork(discriminator, classifier,
parameters['model_name'], 2048)
print('TRAINING')
model, train_loss, val_loss = fit(model,
train_loader,
val_loader,
Config,
parameters['checkpoint'])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', required=True, help='Config YAML file')
args = parser.parse_args()
with open(args.config) as file:
params = yaml.load(file, Loader=yaml.FullLoader)
train(params)
| 33.942308
| 136
| 0.566799
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 891
| 0.100963
|
3cf1f4f9c94b916e1af4be610a5cfc8f880bc37a
| 18,425
|
py
|
Python
|
generate_md.py
|
wzyjerry/EPO-patent-process
|
686c0ea6d9122436071c809a238b8348cdf65120
|
[
"MIT"
] | null | null | null |
generate_md.py
|
wzyjerry/EPO-patent-process
|
686c0ea6d9122436071c809a238b8348cdf65120
|
[
"MIT"
] | null | null | null |
generate_md.py
|
wzyjerry/EPO-patent-process
|
686c0ea6d9122436071c809a238b8348cdf65120
|
[
"MIT"
] | null | null | null |
def trans_date(field: dict) -> str:
text = str(field['date'])
return '%s.%s.%s' % (text[6:], text[4:6], text[:4])
def trans_4xx(field: dict, lang: str) -> str:
text = str(field['bnum'])
return '%s %s %s/%s' % (trans_date(field), labels['bulletin'][lang], text[:4], text[4:])
def trans_ipc(field: str) -> str:
field = field.split()
return '%s %s %s %s' % (field[0][1:], field[1][:2], field[1][2:], field[2])
def trans_ipcr(field: dict) -> str:
text = field['text'].split()
return '%s %s <sup>(%s.%s)</sup>' % (text[0], text[1], text[2][:4], text[2][4:6])
def trans_name(field: dict, out_str: bool) -> str:
if 'B725EP' in field:
return '<br>'.join(field['B725EP']['text'])
if 'sfx' in field:
sfx = field['sfx']
else:
sfx = ''
snm = field['snm'] + sfx
if 'adr' not in field or len(field['adr']) == 0:
return snm
adr = field['adr']
if out_str and 'str' in adr:
return '%s<br>%s<br>%s (%s)' % (snm, adr['str'], adr['city'], adr['ctry'])
else:
return '%s<br>%s (%s)' % (snm, adr['city'], adr['ctry'])
def trans_international_an(field: dict) -> str:
anum = field['B861']['dnum']['anum']
return 'PCT/%s/%s' % (anum[:6], anum[6:])
def trans_international_pn(field: dict) -> str:
B871 = field['B871']
pnum = B871['dnum']['pnum']
bnum = str(B871['bnum'])
return '%s %s/%s (%s Gazette %s/%s)' % (pnum[:2], pnum[2:6], pnum[6:], trans_date(B871), bnum[:4], bnum[4:])
def trans_doc(field: dict) -> str:
dnum = field['dnum']
anum = dnum['anum']
if 'pnum' in dnum:
pnum = dnum['pnum']
return '%s / %s' % (anum, format(int(pnum), ',').replace(',', ' '))
else:
return anum
labels = {
15: {
'de': [
'Korrekturinformation',
'Korrigierte Fassung Nr.',
'Korrekturen, siehe'
],
'en': [
'Correction information',
'Corrected version no',
'Corrections, see'
],
'fr': [
'Information de correction',
'Version corrigée no',
'Corrections, voir'
]
},
21: {
'de': 'Anmeldenummer',
'en': 'Application number',
'fr': 'Numéro de dépôt'
},
22: {
'de': 'Anmeldetag',
'en': 'Date of filing',
'fr': 'Date de dépôt'
},
30: {
'de': 'Priorität',
'en': 'Priority',
'fr': 'Priorité'
},
43: {
'de': {
'A1': 'Veröffentlichungstag',
'A3': 'Veröffentlichungstag A2',
'A8': 'Veröffentlichungstag',
'A9': 'Veröffentlichungstag',
'B1': 'Veröffentlichungstag der Anmeldung',
'B2': 'Veröffentlichungstag der Anmeldung',
'B3': 'Veröffentlichungstag der Anmeldung',
'B9': 'Veröffentlichungstag der Anmeldung'
},
'en': {
'A1': 'Date of publication',
'A3': 'Date of publication A2',
'A8': 'Date of publication',
'A9': 'Date of publication',
'B1': 'Date of publication of application',
'B2': 'Date of publication of application',
'B3': 'Date of publication of application',
'B9': 'Date of publication of application'
},
'fr': {
'A1': 'Date de publication',
'A3': 'Date de publication A2',
'A8': 'Date de publication',
'A9': 'Date de publication',
'B1': 'Date de publication de la demande',
'B2': 'Date de publication de la demande',
'B3': 'Date de publication de la demande',
'B9': 'Date de publication de la demande'
}
},
45: {
'de': {
'B1': 'Veröffentlichungstag und Bekanntmachung des Hinweises auf die Patenterteilung',
'B2': {
45: 'Hinweis auf die Patenterteilung',
47: 'Veröffentlichungstag und Bekanntmachung des Hinweises auf die Entscheidung über den Einspruch'
},
'B9': {
45: 'Hinweis auf die Patenterteilung',
47: 'Veröffentlichungstag und Bekanntmachung des Hinweises auf die Entscheidung über den Einspruch'
}
},
'en': {
'B1': 'Date of publication and mention of the grant of the patent',
'B2': {
45: 'Mention of the grant of the patent',
47: 'Date of publication and mention of the opposition decision:'
},
'B9': {
45: 'Mention of the grant of the patent',
47: 'Date of publication and mention of the opposition decision:'
}
},
'fr': {
'B1': 'Date de publication et mention de la délivrance du brevet',
'B2': {
45: 'Mention de la délivrance du brevet',
47: 'Date de publication et mention de la décision concernant l’opposition'
},
'B9': {
45: 'Mention de la délivrance du brevet',
47: 'Date de publication et mention de la décision concernant l’opposition'
}
}
},
48: {
'de': 'Corrigendum ausgegeben am',
'en': 'Corrigendum issued on',
'fr': 'Corrigendum publié le'
},
51: {
'de': 'Int Cl.',
'en': 'Int Cl.',
'fr': 'Int Cl.',
},
56: {
'de': 'Entgegenhaltungen',
'en': 'References cited',
'fr': 'Documents cités'
},
60: {
'de': 'Teilanmeldung',
'en': 'Divisional application',
'fr': 'Demande divisionnaire'
},
71: {
'de': 'Anmelder',
'en': 'Applicant',
'fr': 'Demandeur'
},
72: {
'de': 'Erfinder',
'en': 'Inventor',
'fr': 'Inventeur'
},
73: {
'de': 'Patentinhaber',
'en': 'Proprietor',
'fr': 'Titulaire'
},
74: {
'de': 'Vertreter',
'en': 'Representative',
'fr': 'Mandataire'
},
84: {
'de': [
'Benannte Vertragsstaaten',
'Benannte Erstreckungsstaaten',
'Benannte Validierungsstaaten'
],
'en': [
'Designated Contracting States',
'Designated Extension States',
'Designated Validation States'
],
'fr': [
'Etats contractants désignés',
'Etats d’extension désignés',
'Etats de validation désignés'
]
},
86: {
'de': 'Internationale Anmeldenummer',
'en': 'International application number',
'fr': 'Numéro de dépôt international'
},
87: {
'de': 'Internationale Veröffentlichungsnummer',
'en': 'International publication number',
'fr': 'Numéro de publication internationale'
},
88: {
'de': 'Veröffentlichungstag A3',
'en': 'Date of publication A3',
'fr': 'Date de publication A3'
},
'bulletin': {
'de': 'Patentblatt',
'en': 'Bulletin',
'fr': 'Bulletin'
},
'description': {
'de': 'Beschreibung',
'en': 'Description',
'fr': 'Description'
},
'remarks': {
'de': 'Bemerkungen',
'en': 'Remarks'
}
}
def generate_md(patent: str) -> str:
md = []
kind = patent['attr']['kind']
lang = patent['attr']['lang']
SDOBI = patent['SDOBI']
B000 = SDOBI['B000']
eptags = B000['eptags']
B100 = SDOBI['B100']
B200 = SDOBI['B200']
B400 = SDOBI['B400']
B500 = SDOBI['B500']
B700 = SDOBI['B700']
B800 = SDOBI['B800']
md.append('# (11)(19) **%s %s %s**' % (B100['B190'], format(int(B100['B110']), '0>7,').replace(',', ' '), B100['B130']))
if 'B120' in B100:
if 'B121EP' in B100['B120']:
md.append('## (12) **%s**<br>%s' % (B100['B120']['B121'], B100['B120']['B121EP']))
else:
md.append('## (12) **%s**' % B100['B120']['B121'])
if kind in ['A3']:
md.append('## (88) %s:<br>**%s**' % (labels[88][lang], trans_4xx(B800['B880'], lang)))
if kind in ['B1']:
md.append('## (45) %s:<br>**%s**' % (labels[45][lang][kind], trans_4xx(B400['B450'], lang)))
if kind in ['A8', 'A9', 'B9']:
B150 = B100['B150']
md.append('## (15) %s:<br>' % labels[15][lang][0])
B151 = B150['B151']
if B151[0] == 'W':
md.append('**%s %s (%s %s)**<br>' % (labels[15][lang][1], B151[1:], B151, B100['B132EP']))
else:
raise Exception('not W')
# TODO: Mismatch here. eg. EP10153923W1B9
# TODO: EP12812953W1B9
md.append('**%s**<br>' % labels[15][lang][2])
for B155 in B150['B155']:
if B155['B1551'] == lang:
if 'B153' in B150:
md.append('**%s  INID code(s)  %s**' % (B155['B1552'], B150['B153']))
elif 'B154' in B150:
for B154 in B150['B154']:
if B154['B1541'] == lang:
md.append('**%s**<br>**%s**' % (B155['B1552'], B154['B1542']))
else:
md.append('<br>**%s**<br>' % (B155['B1552']))
md.append('## (48) %s:<br>**%s**' % (labels[48][lang], trans_4xx(B400['B480'], lang)))
if kind in ['B2', 'B9']:
if 'B477' in B400:
md.append('## (45) %s<br>**%s**' % (labels[45][lang][kind][47], trans_4xx(B400['B477'], lang)))
md.append('## (45) %s<br>**%s**' % (labels[45][lang][kind][45], trans_4xx(B400['B450'], lang)))
if kind in ['B3']:
md.append('## (45) Date of publication and mention of the limitation decision:<br>')
for B4530EP in B400['B453EP']['B4530EP']:
md.append('1. **%s-%s %s**' % (B4530EP['kind'], B4530EP['attr']['limitation-sequence'], trans_4xx(B4530EP, lang)))
md.append('## (45) Mention of the grant of the patent:<br>**%s**' % trans_4xx(B400['B450'], lang))
if kind in ['A1', 'A3', 'A8', 'A9']:
md.append('## (43) %s:<br>**%s**' % (labels[43][lang][kind], trans_4xx(B400['B430'], lang)))
md.append('## (21) %s: **%s**' % (labels[21][lang], B200['B210']))
md.append('## (22) %s: **%s**' % (labels[22][lang], trans_date(B200['B220'])))
if 'B510' in B500:
B510 = B500['B510']
md.append('## (51) %s<sup>%s</sup>:' % (labels[51][lang], B510['B516']))
md.append('+ **%s**' % trans_ipc(B510['B511']))
if 'B512' in B510:
for B512 in B510['B512']:
md.append('+ %s' % trans_ipc(B512))
if 'B513' in B510:
for B513 in B510['B513']:
md.append('+ %s' % trans_ipc(B513))
if 'B514' in B510:
md.append('+ %s' % B510['B517EP'])
if 'B510EP' in B500:
md.append('## (51) %s:' % labels[51][lang])
for ipcr in B500['B510EP']:
md.append('+ ***%s***' % trans_ipcr(ipcr))
if 'B860' in B800:
md.append('## (86) %s:<br>**%s**' % (labels[86][lang], trans_international_an(B800['B860'])))
if 'B870' in B800:
md.append('## (87) %s:<br>**%s**' % (labels[87][lang], trans_international_pn(B800['B870'])))
md.append('***')
md.append('## (54)')
for B540 in B500['B540']:
if B540['B541'] == patent['attr']['lang']:
md.append('+ **%s**' % B540['B542'])
else:
md.append('+ %s' % B540['B542'])
md.append('***')
md.append('## (84) %s:' % labels[84][lang][0])
md.append('**%s**' % ' '.join(B800['B840']))
if 'B844EP' in B800:
md.append('<br>%s:<br>**%s**' % (labels[84][lang][1], ' '.join([x['ctry'] for x in B800['B844EP']['B845EP']])))
if 'B848EP' in B800:
md.append('<br>%s:<br>**%s**' % (labels[84][lang][2], ' '.join([x['ctry'] for x in B800['B848EP']['B849EP']])))
if 'B300' in SDOBI:
B300 = SDOBI['B300']
md.append('## (30) %s:' % labels[30][lang])
for priority in B300:
md.append('+ **%s %s %s**' % (trans_date(priority['B320']), priority['B330']['ctry'], priority['B310']))
if 'B600' in SDOBI:
B600 = SDOBI['B600']
if 'B620' in B600:
B620 = B600['B620']['parent']
md.append('## (62) Document number(s) of the earlier application(s) in accordance with Art. 76 EPC:')
for pdoc_list in B620['pdoc']:
for pdoc in pdoc_list:
md.append('+ **%s**' % trans_doc(pdoc))
if 'B270' in B200:
B270 = B200['B270']
md.append('## (27) Previously filed application:')
md.append('+ **%s %s %s**' % (trans_date(B270), B270['ctry'], B270['dnum']['anum']))
if kind in ['B1', 'B2', 'B3', 'B9']:
md.append('## (43) %s: **%s**' % (labels[43][lang][kind], trans_4xx(B400['B430'], lang)))
if 'B600' in SDOBI:
B600 = SDOBI['B600']
if 'B620EP' in B600:
B620EP = B600['B620EP']['parent']
md.append('## (60) %s:' % labels[60][lang])
for cdoc_list in B620EP['cdoc']:
for cdoc in cdoc_list:
md.append('+ **%s**' % trans_doc(cdoc))
if 'B710' in B700:
md.append('## (71) %s:' % labels[71][lang])
for applicant in B700['B710']:
if 'B716EP' in applicant:
md.append('+ **%s**<br>Designated Contracting States:<br>**%s**' % (trans_name(applicant, False), ' '.join(applicant['B716EP']['ctry'])))
else:
md.append('+ **%s**' % trans_name(applicant, False))
if 'B730' in B700:
md.append('## (73) %s:' % labels[73][lang])
for grantee in B700['B730']:
if 'B736EP' in grantee:
md.append('+ **%s**<br>Designated Contracting States:<br>**%s**' % (trans_name(grantee, False), ' '.join(grantee['B736EP']['ctry'])))
else:
md.append('+ **%s**' % trans_name(grantee, False))
md.append('## (72) %s:' % labels[72][lang])
for inventor in B700['B720']:
md.append('+ **%s**' % trans_name(inventor, False).strip())
if 'B740' in B700:
md.append('## (74) %s:' % labels[74][lang])
for agent in B700['B740']:
md.append('+ **%s**' % trans_name(agent, True))
if 'B560' in B500:
B560 = B500['B560']
md.append('## (56) %s:' % labels[56][lang])
if 'B561' in B560:
B561 = B560['B561']
for patent_citation in B561:
md.append('1. **%s**' % patent_citation['text'])
if 'B562' in B560:
B562 = B560['B562']
md.append('')
for patent_citation in B562:
md.append('+ **%s**' % patent_citation['text'])
if 'B050EP' in eptags or 'B053EP' in eptags or 'B070EP' in eptags:
md.append('<br><br><u>%s:</u>' % labels['remarks'][lang])
if 'B050EP' in eptags:
for B050EP in eptags['B050EP']:
md.append('+ %s' % B050EP['B052EP'])
if 'B053EP' in eptags:
for B053EP in eptags['B053EP']:
md.append('+ %s' % B053EP)
if 'B070EP' in eptags:
md.append('+ %s' % eptags['B070EP'])
md.append('***')
if 'abstract' in patent:
md.append('(57) ')
abstract = patent['abstract']
for abst in abstract:
for content in abst['content']:
md.append('%s<br>' % content['content'])
md.append('***')
if 'description' in patent:
md.append('**%s**<br>' % labels['description'][lang])
description = patent['description']
for content in description['content']:
if content['type'] == 'heading':
md.append('<br>%s<br>' % content['content'])
elif content['type'] == 'p':
md.append('**[%s]** %s<br>\n' % (content['attr']['num'], content['content']))
md.append('***')
if 'claims' in patent:
for claims in patent['claims']:
claims_title = 'Claims'
if claims['attr']['lang'] == 'de':
claims_title = 'Patentansprüche'
elif claims['attr']['lang'] == 'fr':
claims_title = 'Revendications'
md.append('### **%s**<br><br>' % claims_title)
for claim in claims['claim']:
md.append('1. %s<br><br>' % '<br>'.join(claim['claim_text']).replace('\n', '<br>'))
md.append('***')
if 'amended-claims' in patent:
amended_claims = patent['amended-claims']
for claims in amended_claims:
md.append('**%s**<br><br>' % claims['heading']['content'])
for claim in claims['claim']:
md.append('1. %s<br><br>' % '<br>'.join(claim['claim_text']).replace('\n', '<br>'))
if 'amended-claims-statement' in claims:
amended_claims_statement = claims['amended-claims-statement']
for item in amended_claims_statement:
for claims_statement in item['claims-statement']:
for content in claims_statement['content']:
if content['type'] == 'heading':
md.append('<br><br>**%s**<br><br>' % content['content'])
elif content['type'] == 'p':
md.append('%s<br>\n' % content['content'])
md.append('***')
if 'amended-claims-statement' in patent:
amended_claims_statement = patent['amended-claims-statement']
for item in amended_claims_statement:
for claims_statement in item['claims-statement']:
for content in claims_statement['content']:
if content['type'] == 'heading':
md.append('<br><br>**%s**<br><br>' % content['content'])
elif content['type'] == 'p':
md.append('%s<br>\n' % content['content'])
md.append('***')
if 'ep-reference-list' in patent:
ep_reference_list = patent['ep-reference-list']
for content in ep_reference_list['content']:
if content['type'] == 'heading':
md.append('<br><br>%s<br><br>' % content['content'])
elif content['type'] == 'p':
md.append('%s<br>' % content['content'])
return '\n'.join(md)
| 39.623656
| 153
| 0.48711
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,958
| 0.376678
|
3cf38cae0f2a545ab33232a28befeb4c8470d502
| 1,103
|
py
|
Python
|
tests/test_http_basic_auth.py
|
zhanghe06/flask_restful
|
6ef54f3f7efbbaff6169e963dcf45ab25e11e593
|
[
"MIT"
] | 1
|
2020-12-04T03:15:47.000Z
|
2020-12-04T03:15:47.000Z
|
tests/test_http_basic_auth.py
|
zhanghe06/flask_restful
|
6ef54f3f7efbbaff6169e963dcf45ab25e11e593
|
[
"MIT"
] | 1
|
2021-06-01T22:24:27.000Z
|
2021-06-01T22:24:27.000Z
|
tests/test_http_basic_auth.py
|
zhanghe06/flask_restful
|
6ef54f3f7efbbaff6169e963dcf45ab25e11e593
|
[
"MIT"
] | 2
|
2020-12-04T03:16:18.000Z
|
2021-09-04T14:10:12.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: test_http_basic_auth.py
@time: 2018-06-21 11:17
"""
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import requests
from requests.auth import HTTPBasicAuth
class HttpBasicAuthTest(unittest.TestCase):
"""
认证测试
"""
def setUp(self):
self.auth_username = 'username'
self.auth_password = 'password'
self.auth_url = 'http://0.0.0.0:5000/token'
self.session = requests.session()
def test_auth_success(self):
"""
测试认证成功
:return:
"""
base_auth = HTTPBasicAuth(self.auth_username, self.auth_password)
res = self.session.get(self.auth_url, auth=base_auth)
self.assertEqual(res.status_code, 200)
def test_auth_failure(self):
"""
测试认证失败
:return:
"""
res = self.session.get(self.auth_url)
self.assertEqual(res.status_code, 401)
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| 20.425926
| 73
| 0.635539
| 790
| 0.696035
| 0
| 0
| 0
| 0
| 0
| 0
| 339
| 0.298678
|
3cf5781010a796345729a2c7347029eba43ec197
| 1,696
|
py
|
Python
|
snomed_parent_cat_mapper.py
|
vickysam/pyHealth
|
5660afd385a0342aa2039b42af5f208c672bfdeb
|
[
"Apache-2.0"
] | 7
|
2017-04-30T15:12:33.000Z
|
2021-11-21T01:39:04.000Z
|
snomed_parent_cat_mapper.py
|
vickysam/pyHealth
|
5660afd385a0342aa2039b42af5f208c672bfdeb
|
[
"Apache-2.0"
] | null | null | null |
snomed_parent_cat_mapper.py
|
vickysam/pyHealth
|
5660afd385a0342aa2039b42af5f208c672bfdeb
|
[
"Apache-2.0"
] | 2
|
2018-08-07T14:38:14.000Z
|
2021-04-09T05:41:08.000Z
|
import csv
import pymedtermino
from pymedtermino.snomedct import *
pymedtermino.LANGUAGE = "en"
pymedtermino.REMOVE_SUPPRESSED_CONCEPTS = False
input_delta_file = 'sct2_Concept_Delta_INT_20160131.csv'
output_delta_file = 'sct2_Concept_Delta_INT_20160131_Top_Category_Mapped.csv'
data = []
snomed_data = []
with open('top_parent_cat.csv', 'rb') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
for row in reader:
data.append([row['top_concept_id'],row['top_category_code']])
print "Supplied : ", data
with open(input_delta_file, 'rb') as csvfile:
reader = csv.DictReader(csvfile, delimiter=' ', quotechar='"')
for row in reader:
snomed_data.append([row['id'],row['effectiveTime'],row['active'],row['moduleId'],row['definitionStatusId'],0,0])
csvfile = open(output_delta_file, 'w')
writer = csv.DictWriter(csvfile, fieldnames=['id','effectiveTime','active','moduleId','definitionStatusId','topCategoryCode','topCategoryId'])
writer.writeheader()
i = 0
for concept in snomed_data:
ancestors = list(SNOMEDCT[concept[0]].ancestors())
category = SNOMEDCT[138875005]
if len(ancestors) >= 2:
category = ancestors[-2]
if len(ancestors) >= 3:
if ancestors[-3].code == '406455002' or ancestors[-3].code == '116273005':
category = ancestors[-3]
else:
category = SNOMEDCT[138875005]
term = category.term
for item in data:
if item[0] == str(category.code):
term=item[1]
writer.writerow({'id': str(concept[0]), 'effectiveTime': concept[1],'active': concept[2],'moduleId': str(concept[3]),'definitionStatusId': str(concept[4]) , 'topCategoryCode': term,'topCategoryId': str(category.code)})
i = i + 1
csvfile.close()
print "Completed...."
| 32.615385
| 219
| 0.722877
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 461
| 0.271816
|
3cf5831f266719f857798ff19bb7f65e432caf03
| 710
|
py
|
Python
|
Python/287. FindTheDuplicateNumber.py
|
RaymondWaterlooLi/LeetCode-Solutions
|
7973d2838b114f1dffc29f436fb660a96b51f660
|
[
"MIT"
] | 263
|
2020-10-05T18:47:29.000Z
|
2022-03-31T19:44:46.000Z
|
Python/287. FindTheDuplicateNumber.py
|
RaymondWaterlooLi/LeetCode-Solutions
|
7973d2838b114f1dffc29f436fb660a96b51f660
|
[
"MIT"
] | 1,264
|
2020-10-05T18:13:05.000Z
|
2022-03-31T23:16:35.000Z
|
Python/287. FindTheDuplicateNumber.py
|
RaymondWaterlooLi/LeetCode-Solutions
|
7973d2838b114f1dffc29f436fb660a96b51f660
|
[
"MIT"
] | 760
|
2020-10-05T18:22:51.000Z
|
2022-03-29T06:06:20.000Z
|
#Given an array of integers nums containing n + 1 integers where each integer is in the range [1, n] inclusive.
#There is only one duplicate number in nums, return this duplicate number.
class Solution(object):
def findDuplicate(self, nums):
#Traversing the list using for loop
s = sorted(nums) #sorting given array
a,b = 0,len(nums)
temp=(a+b)//2
t = 1
while t: #using binary search to find duplicate
if s[temp] == temp and s[temp-1] == temp:
return s[temp]
if s[temp] == temp+1 and s[temp-1] == temp:
a = temp
else:
b = temp
temp = (a+b)//2
| 37.368421
| 111
| 0.539437
| 522
| 0.735211
| 0
| 0
| 0
| 0
| 0
| 0
| 278
| 0.391549
|
3cf74e26261f13d85a64a42ef32a7fccd8ef0a55
| 2,484
|
py
|
Python
|
utils/evaluate_annotation.py
|
cltl-students/hamersma-agression-causes
|
11cbfd94031a0a3c84a27afa20d8a539acdab609
|
[
"MIT"
] | null | null | null |
utils/evaluate_annotation.py
|
cltl-students/hamersma-agression-causes
|
11cbfd94031a0a3c84a27afa20d8a539acdab609
|
[
"MIT"
] | null | null | null |
utils/evaluate_annotation.py
|
cltl-students/hamersma-agression-causes
|
11cbfd94031a0a3c84a27afa20d8a539acdab609
|
[
"MIT"
] | null | null | null |
import pandas as pd
from sklearn.metrics import cohen_kappa_score, confusion_matrix
import os
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
dirname = os.path.dirname(__file__)
def extract_annotations(files):
'''Function that takes a file with the annotations as input and extracts lists of annotations for vims that are
annotated by both annotators.
:param files: list of files
:returns annotations_ann1: list of strings
:returns annotations_ann2: list of strings'''
file_ann1 = dirname +'/annotations/' + files[0]
file_ann2 = dirname + '/annotations/' + files[1]
ann1 = pd.read_excel(file_ann1, index_col=1).T.to_dict()
ann2 = pd.read_excel(file_ann2, index_col=1).T.to_dict()
annotations_ann1 = []
annotations_ann2 = []
for key, value in ann2.items():
label2 = value['Aggression']
label1 = ann1.get(key).get('Aggression')
annotations_ann1.append(label1)
annotations_ann2.append(label2)
return annotations_ann1, annotations_ann2
def calculate_score(ann1, ann2):
"""Function that calculates the inter agreement score using Cohen's Kappa, prints the scores and confusion matrix.
:param ann1: list of annotation labels
:param ann2: list of annotation labels """
agreement = [anno1 == anno2 for anno1, anno2 in zip(ann1, ann2)]
percentage = sum(agreement) / len(agreement)
print("Percentage Agreement: %.2f" % percentage)
termlabels = ['pos', 'neg']
kappa = cohen_kappa_score(ann1, ann2, labels=termlabels)
print("Cohen's Kappa: %.2f" % kappa)
confusions = confusion_matrix(ann1, ann2, labels=termlabels)
pandas_table = pd.DataFrame(confusions, index=termlabels, columns = ['pos', 'neg'])
group_names = ["True Pos", "False Neg", "False Pos", "True Neg"]
group_counts = ["{0: 0.0f}".format(value) for value in confusions.flatten()]
labels = [f"{v1} {v2}" for v1, v2 in zip(group_names, group_counts)]
labels = np.asarray(labels).reshape(2, 2)
sns.heatmap(pandas_table, annot=labels, fmt='', cmap = 'Blues')
plt.title("Confusion matrix annotations", size=12)
plt.show()
print(pandas_table)
def main():
files = ['202103022_chunks_annotated_Sanne.xlsx', '20210322_chunks_annotated_Zana.xlsx']
terms_an1, terms_an2 = extract_annotations(files)
calculate_score(terms_an1, terms_an2)
if __name__ == '__main__':
main()
| 39.428571
| 119
| 0.686393
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 797
| 0.320853
|
3cf83d68c033ebd1a763e8c4a9ee5516e254ffd0
| 1,068
|
py
|
Python
|
cogs/Events.py
|
popop098/Teasia-Bot.py
|
764c3b1cab8e07a9e98690263ad94011ee26ab72
|
[
"MIT"
] | 1
|
2020-12-21T12:05:25.000Z
|
2020-12-21T12:05:25.000Z
|
cogs/Events.py
|
popop098/Taesia-Bot.py
|
764c3b1cab8e07a9e98690263ad94011ee26ab72
|
[
"MIT"
] | null | null | null |
cogs/Events.py
|
popop098/Taesia-Bot.py
|
764c3b1cab8e07a9e98690263ad94011ee26ab72
|
[
"MIT"
] | 1
|
2021-10-30T03:45:42.000Z
|
2021-10-30T03:45:42.000Z
|
import discord
from discord.ext import commands
from discord.ext.commands import has_permissions, MissingPermissions, CommandNotFound, BucketType, cooldown, CommandOnCooldown
from discord import Webhook, RequestsWebhookAdapter
from time import gmtime, strftime
from discord.utils import get
import youtube_dl
import logging
import random
import praw
import time
import json
import sys
import os
from random import randint
def RandomColor():
return randint(0, 0xFFFFFF)
class Events(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
print("[-]", error)
if isinstance(error, CommandOnCooldown):
await ctx.send(f"워워~진정하세요 잠시 쿨타임에 걸렸어요. {error.retry_after:,.2f} 초후에 다시 사용해주세요")
elif isinstance(error, MissingPermissions):
Denied = discord.Embed(title="⚠권한부족!", description="이 명령을 실행하실 권한이 없어요.자세한 사항은 관리자님께 문의하세요.", color=EmbedColor)
await ctx.send(embed=Denied)
def setup(bot):
bot.add_cog(Events(bot))
| 30.514286
| 126
| 0.729401
| 673
| 0.563652
| 0
| 0
| 587
| 0.491625
| 558
| 0.467337
| 244
| 0.204355
|
3cf96ed28f3d03023b6eb089f792b8961163dffe
| 1,927
|
py
|
Python
|
panopto_client/access.py
|
uw-it-cte/django-panopto-client
|
cdfc22e1a7c1e06de62477c30681da0755238152
|
[
"Apache-2.0"
] | 4
|
2017-12-29T19:15:37.000Z
|
2019-11-18T18:32:39.000Z
|
panopto_client/access.py
|
uw-it-cte/django-panopto-client
|
cdfc22e1a7c1e06de62477c30681da0755238152
|
[
"Apache-2.0"
] | 2
|
2017-09-07T23:27:52.000Z
|
2019-04-10T20:27:22.000Z
|
panopto_client/access.py
|
uw-it-cte/django-panopto-client
|
cdfc22e1a7c1e06de62477c30681da0755238152
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
"""
This module exposes Panopto "AccessManagement" Service methods
"""
from panopto_client import PanoptoAPI, PanoptoAPIException
class AccessManagement(PanoptoAPI):
def __init__(self):
super(AccessManagement, self).__init__(
wsdl='AccessManagement.svc?wsdl',
port='BasicHttpBinding_IAccessManagement')
def access_role(self, role):
try:
return self._instance('ns0:AccessRole')[role]
except TypeError:
return role
def getFolderAccessDetails(self, folder_id):
return self._request('GetFolderAccessDetails', {
'auth': self.authentication_info(),
'folderId': folder_id,
})
def grantUsersAccessToFolder(self, folder_id, user_ids, role):
return self._request('GrantUsersAccessToFolder', {
'auth': self.authentication_info(),
'folderId': folder_id,
'userIds': self.guid_list(ns='ns2:ArrayOfguid', guids=user_ids),
'role': self.access_role(role),
})
def revokeUsersAccessFromFolder(self, folder_id, user_ids, role):
return self._request('RevokeUsersAccessFromFolder', {
'auth': self.authentication_info(),
'folderId': folder_id,
'userIds': self.guid_list(ns='ns2:ArrayOfguid', guids=user_ids),
'role': self.access_role(role),
})
def getSessionAccessDetails(self, session_id):
return self._request('GetSessionAccessDetails', {
'auth': self.authentication_info(),
'sessionId': session_id
})
def updateSessionIsPublic(self, session_id, is_public):
return self._request('UpdateSessionIsPublic', {
'auth': self.authentication_info(),
'sessionId': session_id,
'isPublic': is_public
})
| 34.410714
| 76
| 0.637779
| 1,706
| 0.885314
| 0
| 0
| 0
| 0
| 0
| 0
| 517
| 0.268293
|
3cf9d103d47dd847c7bbdc09c8f10bae634a2961
| 20,459
|
py
|
Python
|
src/astrild/particles/halo.py
|
Christovis/wys-ars
|
bb15f2d392842f9b32de12b5db5c86079bc97105
|
[
"MIT"
] | 3
|
2021-07-27T14:45:58.000Z
|
2022-01-31T21:09:46.000Z
|
src/astrild/particles/halo.py
|
Christovis/wys-ars
|
bb15f2d392842f9b32de12b5db5c86079bc97105
|
[
"MIT"
] | 1
|
2021-11-03T10:47:45.000Z
|
2021-11-03T10:47:45.000Z
|
src/astrild/particles/halo.py
|
Christovis/wys-ars
|
bb15f2d392842f9b32de12b5db5c86079bc97105
|
[
"MIT"
] | 1
|
2021-11-03T10:17:34.000Z
|
2021-11-03T10:17:34.000Z
|
import os
from gc import collect
from pathlib import Path
from typing import List, Optional, Tuple, Type, Union
from importlib import import_module
import yaml
import numpy as np
import pandas as pd
from sklearn.neighbors import BallTree
#from halotools.mock_observables import tpcf_multipole
from astrild.particles.ecosmog import Ecosmog
from astrild.particles.hutils import SubFind
from astrild.particles.hutils import Rockstar
#from astrild.particles.utils import TPCF
from astrild.utils import read_hdf5
from astrild.io import IO
dir_src = Path(__file__).parent.absolute()
default_halo_stats_config = dir_src / "configs/halo_stats.yaml"
dm_particle_mass = 7.98408e10 #[Msun/h]
class HalosWarning(BaseException):
pass
class Halos:
"""
Class to manage Rockstar & SubFind halos and get their statistics such as:
- halo mass fct.
- two point correlation fct.
- concentration mass relation
- pairwise velocity distribution
Attributes:
sim_type:
simulation:
Methods:
from_subfind:
from_rockstar:
from_dataframe:
from_file:
get_subfind_stats:
get_subfind_tpcf:
get_rockstar_stats:
get_rockstar_tpcf:
filter_resolved_subfind_halos:
filter_resolved_rockstar_halos:
_save_results:
_sort_statistics:
_create_filename:
"""
def __init__(
self,
halos: Union[read_hdf5.snapshot, pd.DataFrame],
simulation: Optional[Type[Ecosmog]] = None,
):
self.data = halos
self.sim = simulation
if hasattr(self.sim, "files") == False:
self.halotype = None
elif "fof" in list(self.sim.files.keys()):
self.halotype = "Arepo"
elif "halos" in list(self.sim.files.keys()):
self.halotype = "Rockstar"
@classmethod
def from_subfind(
cls, snap_nr: int, simulation: Optional[Type[Ecosmog]] = None,
) -> "Halos":
""" """
snapshot = read_hdf5.snapshot(
snap_nr,
simulation.dirs["sim"],
part_type_list=["dm"],
snapbases=["/snap-groupordered_"],
# check_total_particle_number=True,
# verbose=True,
)
snapshot.group_catalog(
[
"Group_M_Crit200",
"Group_R_Crit200",
"GroupPos",
"GroupVel",
"GroupFirstSub",
"GroupLenType",
"SubhaloVmax",
"SubhaloPos",
"SubhaloVel",
"SubhaloMass",
"SubhaloHalfmassRad",
]
)
if snapshot.cat["n_groups"] == 0:
snapshot = None
else:
snapshot.cat.update(
{
"SubhaloVmax": snapshot.cat["SubhaloVmax"][
(snapshot.cat["GroupFirstSub"][:]).astype(np.int64)
]
}
)
return cls(snapshot, simulation)
@classmethod
def from_rockstar(
cls, snap_nr: int, simulation: Optional[Type[Ecosmog]] = None,
) -> "Halos":
"""
Load halo data from Rockstar halo finder into pandas.DataFrame
Args:
snap_nr:
simulation:
"""
# TODO: currently only one directory supported, e.g. 012
files_path = simulation.files["halos"][str(snap_nr)]
first = True
for file_path in files_path:
snapshot_part = pd.read_csv(
file_path, header=0, skiprows=np.arange(1, 20), delim_whitespace=True,
)
if first is True:
snapshot = snapshot_part
first = False
else:
snapshot = snapshot.append(snapshot_part, ignore_index=True)
return cls.from_dataframe(snapshot, simulation)
@classmethod
def from_file(
cls, filename: str, simulation: Optional[Type[Ecosmog]] = None,
) -> "Halos":
""" """
df = pd.read_hdf(filename, key="df")
return cls.from_dataframe(df, simulation)
@classmethod
def from_dataframe(
cls, df: pd.DataFrame, simulation: Optional[Type[Ecosmog]] = None,
) -> "Halos":
""" """
return cls(df, simulation)
def get_subfind_stats(
self, config_file: str = default_halo_stats_config, save: bool = True,
) -> None:
"""
Compute statistics of halos identified with SubFind from one or a
collection of simulations.
Args:
config_file:
file pointer in which containes info on what statistics to
compute and their settings.
save:
wether to save results to file.
"""
# load settings (stg)
with open(config_file) as f:
statistics = yaml.load(f, Loader=yaml.FullLoader)
for name in statistics.keys():
statistics[name]["results"] = {"bins": {}, "values": {}}
# load particles/utils/stats.py package for dynamic function call
module = import_module("astrild.particles.hutils")
# sort statistics according to required halos resolutions
stat_names_ord = self._sort_statistics(statistics)
for snap_nr in self.sim.dir_nrs:
snapshot = self.get_subfind_halo_data(snap_nr)
if snapshot is None:
print(f"No sub- & halos found for snapshot {snap_nr}")
continue
resolution = 0
for stat_name in stat_names_ord:
if statistics[stat_name]["resolution"] != resolution:
resolution = int(statistics[stat_name]["resolution"])
snapshot = self.filter_resolved_subfind_halos(snapshot, resolution)
print(f" Compute {stat_name}")
clas = getattr(module, "SubFind")
fct = getattr(clas, stat_name)
bins, values = fct(snapshot, **statistics[stat_name]["args"])
if (bins is not None) and (values is not None):
statistics[stat_name]["results"]["bins"]["snap_%d" % snap_nr] = bins
statistics[stat_name]["results"]["values"][
"snap_%d" % snap_nr
] = values
collect()
if save:
self._save_results("subfind", statistics)
else:
self.statistics = statistics
def filter_resolved_subfind_halos(
self, snapshot: read_hdf5.snapshot, nr_particles: int,
) -> read_hdf5.snapshot:
"""
Filter halos with '> nr_particles' particles
Args:
Return:
"""
min_mass = dm_particle_mass * nr_particles
mass = snapshot.cat["Group_M_Crit200"][:] * snapshot.header.hubble # [Msun/h]
idx_groups = mass > min_mass
mass = snapshot.cat["SubhaloMass"][:] * snapshot.header.hubble # [Msun/h]
idx_subhalos = mass > min_mass
# idx = snapshot.cat["GroupLenType"][:, 1] > nr_particles
# idx = snapshot.cat["Group_M_Crit200"][:] > \
# 100*(snapshot.header.massarr[1] * 1e10 / snapshot.header.hubble)
return self.filter_subfind_and_fof_halos(snapshot, idx_groups, idx_subhalos)
def filter_nonzero_subfind_halos_size(
self, snapshot: read_hdf5.snapshot,
) -> read_hdf5.snapshot:
"""
Filter halos with non-zero size
Args:
Return:
"""
rad = snapshot.cat["Group_R_Crit200"][:] # [ckpc/h]
idx_groups = rad > 0
rad = snapshot.cat["SubhaloHalfmassRad"][:] # [ckpc/h]
idx_subhalos = rad > 0
return self.filter_subfind_and_fof_halos(snapshot, idx_groups, idx_subhalos)
def filter_subfind_and_fof_halos(
self,
snapshot: read_hdf5.snapshot,
idx_groups: np.ndarray,
idx_subhalos: np.ndarray,
) -> read_hdf5.snapshot:
""" Filter sub- and fof-halos by indices """
for key, value in snapshot.cat.items():
if "Group" in key:
idx = idx_groups
elif ("Subhalo" in key) and (len(snapshot.cat[key]) > len(idx_groups)):
idx = idx_subhalos
else:
HalosWarning(f"The key is {key} is a problem")
continue
if len(value.shape) == 0:
continue
elif len(value.shape) == 1:
snapshot.cat.update({key: value[idx]})
elif len(value.shape) == 2:
snapshot.cat.update({key: value[idx, :]})
else:
raise HalosWarning(
f"The group data {key} has weird dimensions: {value.shape}."
)
return snapshot
#def get_subfind_tpcf(
# self,
# subfind_type: str,
# config: dict,
# save: bool = True,
#) -> None:
# """
# Compute real- and redshift-space TPCF for halos. This computation is
# done using halotools.
# https://halotools.readthedocs.io/en/latest/index.html
# Args:
# subfind_type: ["Group", "Subhalo"]
# config:
# save:
# wether to save results to file.
# """
# tpcf = {}
# for l in config["multipoles"]:
# tpcf[str(l)] = {}
# multipoles = config["multipoles"]
# del config["multipoles"]
# for snap_nr in self.sim.dir_nrs:
# snapshot = self.get_subfind_halo_data(snap_nr)
#
# if snapshot is None:
# print(f"No sub- & halos found for snapshot {snap_nr}")
# continue
# snapshot = self.filter_resolved_subfind_halos(snapshot, 100)
#
# if subfind_type == "group":
# halo_pos = snapshot.cat["GroupPos"][:] * \
# snapshot.header.hubble / 1e3 #[Mpc/h]
# scale_factor = 1 / (1 + snapshot.header.redshift)
# print("test a -------", scale_factor)
# halo_vel = snapshot.cat["GroupVel"][:] / scale_factor #[km/s]
# if subfind_type == "subhalo":
# halo_pos = snapshot.cat["SubhaloPos"][:] * \
# snapshot.header.hubble / 1e3 #[Mpc/h]
# halo_vel = snapshot.cat["SubhaloVel"][:] #[km/s]
# s_bins, mu_range, tpcf_s= TPCF.compute(
# pos=halo_pos,
# vel=halo_vel,
# **config,
# multipole=l,
# )
# for l in multipoles:
# _tpcf = tpcf_multipole(tpcf_s, mu_range, order=l)
# tpcf[str(l)]["snap_%d" % snap_nr] = _tpcf
# print(l, "!!!!!!!!!!!! snap_%d" % snap_nr, _tpcf)
#
# tpcf["s_bins"] = s_bins
# if save:
# IO.save_tpcf(
# self.sim.dirs['out'],
# config,
# multipoles,
# "subfind",
# "_"+subfind_type,
# tpcf,
# )
# else:
# self.tpcf = tpcf
def get_rockstar_stats(
self,
config_file: str = default_halo_stats_config,
snap_nrs: Optional[List[int]] = None,
save: bool = True,
):
"""
Compute statistics of halos identified with Rockstar from one or a
collection of simulations.
rockstar:
https://bitbucket.org/gfcstanford/rockstar/src/main/
https://github.com/yt-project/rockstar
https://www.cosmosim.org/cms/documentation/database-structure/tables/rockstar/
Args:
config_file:
file pointer in which containes info on what statistics to
compute and their settings.
save:
wether to save results to file.
"""
# load settings (stg)
with open(config_file) as f:
statistics = yaml.load(f, Loader=yaml.FullLoader)
for name in statistics.keys():
statistics[name]["results"] = {"bins": {}, "values": {}}
# load particles/utils/stats.py package for dynamic function call
module = import_module("astrild.particles.hutils")
# sort statistics according to required halo resolutions
stat_names_ord = self._sort_statistics(statistics)
if snap_nrs is None:
snap_nrs = self.sim.dir_nrs
for snap_nr in snap_nrs:
snapshot = self.get_rockstar_halo_data(
self.sim.files["halos"][str(snap_nr)]
)
if len(snapshot.index.values) == 0:
print(f"No sub- & halos found for snapshot {snap_nr}")
continue
resolution = 0
for stat_name in stat_names_ord:
if statistics[stat_name]["resolution"] != resolution:
resolution = int(statistics[stat_name]["resolution"])
snapshot = self.filter_resolved_rockstar_halos(
snapshot, resolution
)
print(f" Compute {stat_name}")
clas = getattr(module, "Rockstar")
fct = getattr(clas, stat_name)
if stat_name != "histograms":
bins, values = fct(snapshot, **statistics[stat_name]["args"])
if (bins is not None) and (values is not None):
statistics[stat_name]["results"]["bins"]["snap_%d" % snap_nr] = bins
statistics[stat_name]["results"]["values"][
"snap_%d" % snap_nr
] = values
else:
hist = fct(snapshot, **statistics[stat_name]["args"])
statistics[stat_name]["results"]["values"]["snap_%d" % snap_nr] = hist
if save:
self._save_results("rockstar", statistics)
else:
self.statistics = statistics
#def get_rockstar_tpcf(
# self,
# config: dict,
# snap_nrs: Optional[List[int]] = None,
# save: bool = True,
#) -> None:
# """
# Compute real- and redshift-space TPCF for halos. This computation is
# done using halotools.
# https://halotools.readthedocs.io/en/latest/index.html
# Args:
# config:
# save:
# wether to save results to file.
# """
# tpcf = {}
# for l in config["multipoles"]:
# tpcf[str(l)] = {}
# multipoles = config["multipoles"]
# del config["multipoles"]
#
# if snap_nrs is None:
# snap_nrs = self.sim.dir_nrs
# for snap_nr in snap_nrs:
# snapshot = self.get_rockstar_halo_data(
# self.sim.files["halos"][str(snap_nr)]
# )
#
# if snapshot is None:
# print(f"No sub- & halos found for snapshot {snap_nr}")
# continue
# snapshot = self.filter_resolved_rockstar_halos(snapshot, 100)
#
# halo_pos = snapshot[["x", "y", "z"]].values #[Mpc/h]
# halo_vel = snapshot[["vx", "vy", "vz"]].values #[km/s]
# s_bins, mu_range, tpcf_s= TPCF.compute(
# pos=halo_pos,
# vel=halo_vel,
# **config,
# )
# for l in multipoles:
# _tpcf = tpcf_multipole(tpcf_s, mu_range, order=l)
# tpcf[str(l)]["snap_%d" % snap_nr] = _tpcf
#
# tpcf["s_bins"] = s_bins
# if save:
# IO.save_tpcf(
# self.sim.dirs['out'],
# config,
# multipoles,
# "rockstar",
# "",
# tpcf,
# )
# else:
# self.tpcf = tpcf
def filter_resolved_rockstar_halos(
self, snapshot: pd.DataFrame, nr_particles: int,
) -> pd.DataFrame:
"""
Filter halos with '> nr_particles' particles
"""
min_mass = dm_particle_mass * nr_particles
return snapshot[snapshot["m200c"] > min_mass]
def _sort_statistics(self, statistics: dict) -> List[str]:
"""
Sort statistics by their required particle resolution
(low -to-> high).
"""
resolutions = np.zeros(len(list(statistics.keys())))
for idx, (_, stg) in enumerate(statistics.items()):
resolutions[idx] = int(stg["resolution"])
idxs = np.argsort(resolutions)
return [list(statistics.keys())[idx] for idx in idxs]
def _save_results(self, halofinder: str, methods: dict):
"""
Save results of each statistic of each simulations snapshot
for Rockstar and SubFind.
"""
for method, stg in methods.items():
if method != "histograms":
columns = list(stg["results"]["bins"].keys())
if len(self.sim.dir_nrs) > 1:
assert np.sum(stg["results"]["bins"][columns[0]]) == np.sum(
stg["results"]["bins"][columns[1]]
)
df = pd.DataFrame(
data=stg["results"]["values"], index=stg["results"]["bins"][columns[0]],
)
if "seperate" in list(stg["args"].keys()):
compare = np.sum(stg["args"]["seperate"]["compare"])
if compare == 2:
compare = "11"
if compare == 3:
compare = "12"
if compare == 4:
compare = "22"
else:
compare = "00"
file_out = f"{self.sim.dirs['out']}{halofinder}_{method}_{compare}.h5"
if os.path.exists(file_out):
os.remove(file_out)
print(f"Saving results to -> {file_out}")
df.to_hdf(file_out, key="df", mode="w")
else:
for snap_nr, stg_in_snap in stg["results"]["values"].items():
data = np.asarray(list(stg_in_snap.values())).T
columns = list(stg_in_snap.keys())
df = pd.DataFrame(data=data, columns=columns)
file_out = f"{self.sim.dirs['out']}{halofinder}_{method}" + \
"_{snap_nr}.h5"
if os.path.exists(file_out):
os.remove(file_out)
print(f"Saving results to -> {file_out}")
df.to_hdf(file_out, key="df", mode="w")
def _create_filename(self, file_in: str, quantity: str):
""" Create file-name for merged snapshots"""
quantity = quantity.replace("_", "")
file_out = file_in.split("/")[-1].replace("Ray", quantity)
file_out = file_out.replace(".h5", "_lt.fits")
if ("_lc" not in file_in) or ("zrange" not in file_in):
file_out = file_out.split("_")
box_string = [string for string in file_in.split("/") if "box" in string][0]
idx, string = [
(idx, "%s_" % box_string + string)
for idx, string in enumerate(file_out)
if "output" in string
][0]
file_out[idx] = string
file_out = "_".join(file_out)
return self.sim.dirs["out"] + file_out
@staticmethod
def get_nearest_neighbours(
df: pd.DataFrame,
target_id: int,
dmax: Optional[int] = None,
extent: Optional[int] = None,
) -> tuple:
"""
Args:
df: halo DataFrame
target_id: object id for which to find NNs
dmax: maximal distance between objects
Return:
indices and distances
"""
pos = df[["theta1_deg", "theta2_deg"]].values
pos_i = df[df["id"] == target_id][["theta1_deg", "theta2_deg"]].values
if dmax is None:
dmax = df[df["id"] == target_id]["r200_deg"].values
if extent is not None:
dmax *= extent
if len(pos_i.shape) == 1:
pos_i = pos_i[np.newaxis, :]
btree = BallTree(pos)
pairs = btree.query_radius(pos_i, dmax, return_distance=True,)
return pairs[0][0], pairs[1][0]
| 34.853492
| 92
| 0.525783
| 19,768
| 0.966225
| 0
| 0
| 3,371
| 0.164769
| 0
| 0
| 8,362
| 0.40872
|
3cfb5d1a0f1982dc0361736334993c9728647d4a
| 367
|
py
|
Python
|
webapi.py
|
Netherdrake/steemdata-webapi
|
02b443b6e7292577dfcca1a7fcc55329b1b70fb9
|
[
"MIT"
] | 1
|
2017-04-20T04:22:07.000Z
|
2017-04-20T04:22:07.000Z
|
webapi.py
|
Netherdrake/steemdata-webapi
|
02b443b6e7292577dfcca1a7fcc55329b1b70fb9
|
[
"MIT"
] | 1
|
2017-06-07T13:08:32.000Z
|
2017-06-07T13:08:32.000Z
|
webapi.py
|
Netherdrake/steemdata-webapi
|
02b443b6e7292577dfcca1a7fcc55329b1b70fb9
|
[
"MIT"
] | null | null | null |
import os
from eve import Eve
from eve_docs import eve_docs
from flask_bootstrap import Bootstrap
# init Eve
app = Eve(settings='settings.py')
# init Eve-Docs
Bootstrap(app)
app.register_blueprint(eve_docs, url_prefix='/docs')
if __name__ == '__main__':
app.run(host=os.getenv('FLASK_HOST', '127.0.0.1'),
debug=not os.getenv('PRODUCTION', False))
| 21.588235
| 54
| 0.719346
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 90
| 0.245232
|
3cfcd1fb4a8c9717754df6618804de4a66eaa349
| 5,475
|
py
|
Python
|
notebooks/working/_02_tb-Demo-visual-marginal-independence-tests.py
|
hassanobeid1994/tr_b_causal_2020
|
1ffaeb7dcefccf5e1f24c459e9a2f140b2a052a5
|
[
"MIT"
] | null | null | null |
notebooks/working/_02_tb-Demo-visual-marginal-independence-tests.py
|
hassanobeid1994/tr_b_causal_2020
|
1ffaeb7dcefccf5e1f24c459e9a2f140b2a052a5
|
[
"MIT"
] | 89
|
2020-02-10T02:52:11.000Z
|
2020-06-23T03:50:27.000Z
|
notebooks/working/_02_tb-Demo-visual-marginal-independence-tests.py
|
hassan-obeid/tr_b_causal_2020
|
1ffaeb7dcefccf5e1f24c459e9a2f140b2a052a5
|
[
"MIT"
] | null | null | null |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py,md
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Purpose
# The point of this notebook is to demonstrate how to perform at least one type of visual, marginal independence test.
#
# In particular, the notebook will show how to visually test the following implication<br>
# $
# \begin{aligned}
# P \left( X_1 \mid X_2 \right) &= P \left( X_1 \right) \\
# \int x_1 P \left( X_1 \mid X_2 \right) \partial{x_1} &= \int x_1 P \left( X_1 \right) \partial{x_1} \\
# E \left[ X_1 \mid X_2 \right] &= E \left[ X_1 \right]
# \end{aligned}
# $
#
# In other words, if $X_1$ is marginally independent of $X_2$, then the expectation of $X_1$ conditional on $X_2$ is equal to the marginal expectation of $X_1$. This implies that shuffling / permuting the $X_2$ columns should make no difference to predicting $X_1$, beyond predicting the mean of $X_1$.
# +
# Declare hyperparameters for testing
NUM_PERMUTATIONS = 100
# Declare the columns to be used for testing
x1_col = "num_licensed_drivers"
x2_col = "num_cars"
mode_id_col = "mode_id"
# Set the colors for plotting
permuted_color = "#a6bddb"
# Declare paths to data
DATA_PATH = "../../data/raw/spring_2016_all_bay_area_long_format_plus_cross_bay_col.csv"
# +
import sys # noqa: E402
import matplotlib.pyplot as plt # noqa: E402
import numpy as np # noqa: E402
import pandas as pd # noqa: E402
import seaborn as sbn # noqa: E402
from scipy.stats import multinomial # noqa: E402
from tqdm.notebook import tqdm # noqa: E402
# %matplotlib inline
sys.path.insert(0, "../../src/")
import testing.observable_independence as oi # noqa: E402
# -
# Load the raw data
df = pd.read_csv(DATA_PATH)
# +
title_str = "{} vs {}"
print(title_str.format(x1_col, x2_col))
drive_alone_filter = df[mode_id_col] == 1
license_array = df.loc[drive_alone_filter, x1_col].values
num_cars_array = df.loc[drive_alone_filter, x2_col].values
oi.visual_permutation_test(
license_array,
num_cars_array,
z_array=None,
seed=1038,
num_permutations=NUM_PERMUTATIONS,
permutation_color=permuted_color,
)
# -
# ## Test `visual_permutation_test`
# +
# Figure out how many observations to simulate, based on real data
num_drive_alone_obs = (df.mode_id == 1).sum()
# Determine how many simulations to carry out
NUM_TEST_SIM = 200
# Initialize an array to store the simulated p-values
test_p_vals = np.empty((NUM_TEST_SIM,), dtype=float)
# Set a random seed for reproducibility
np.random.seed(340)
# Compute the p-values of the visual permutation test when the
# null-hypothesis is true.
for i in tqdm(range(NUM_TEST_SIM)):
# Simulate data that, by construction, satisfies x2 indep x1
sim_x1 = 0.2 + 0.5 * np.random.normal(size=num_drive_alone_obs)
sim_x2 = -0.1 - 0.01 * np.random.uniform(size=num_drive_alone_obs)
# Determine which simulations to plot.
# Just plot 1 simulation for visual comparison with real data
current_close = True if i != 0 else False
# Carry out the permutation test
current_p = oi.visual_permutation_test(
sim_x1,
sim_x2,
z_array=None,
seed=None,
progress=False,
verbose=False,
show=False,
close=current_close,
)
# Store the resulting p-values
test_p_vals[i] = current_p
# +
# Create a distribution of p-values that is for sure are uniformly distributed
null_histogram_dist = multinomial(NUM_TEST_SIM, [0.1 for x in range(10)])
null_hist_samples = null_histogram_dist.rvs(100)
null_hist_mean = null_histogram_dist.mean()
null_hist_upper_bound = np.percentile(null_hist_samples, 95, axis=0)
null_hist_lower_bound = np.percentile(null_hist_samples, 5, axis=0)
# Plot the distribution of our test p-values versus the p-values from
# a uniform distriburtion
fig, ax = plt.subplots(figsize=(10, 6))
plot_categories = [0.05 + 0.1 * x for x in range(10)]
ax.fill_between(
plot_categories,
null_hist_upper_bound,
null_hist_lower_bound,
color=permuted_color,
label="Null 95% Distribution",
alpha=0.5,
zorder=2,
)
ax.hlines(null_hist_mean, 0, 1, label="Null Mean")
ax.hist(test_p_vals, bins=10, label="Observed", zorder=0)
ax.scatter(
plot_categories,
null_hist_upper_bound,
label="Null 95% Upper Bound",
color=permuted_color,
marker="+",
zorder=1,
)
ax.scatter(
plot_categories,
null_hist_lower_bound,
label="Null 5% Lower Bound",
color=permuted_color,
marker="*",
zorder=1,
)
ax.legend(loc=(1.05, 0.75))
ax.set_xlabel("p-values", fontsize=13)
ax.set_ylabel("Num Observations", rotation=0, labelpad=70, fontsize=13)
sbn.despine()
fig.show()
# -
# ## Conclusions
# - From the last plot, we can see that under the null hypothesis of $X_1$ independent of $X_2$, we get p-values that close to uniformly distributed.<br>
# This means the permutation p-values in `visual_permutation_test` are unlikely to be overly-optimistic.<br>
# In other words, we can feel safe(r) about relying on this test to distinguish conditional dependence from independence.
# - From the first two plots of this notebook, we can see from applying the `visual_permutation_test` that the number of licensed drivers per household and number of automobiles per household are not marginally independent.
| 30.586592
| 302
| 0.715982
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,957
| 0.540091
|
3cfd1eff7aa3274bf5ba215dcc74c84bcd761113
| 1,799
|
py
|
Python
|
Labs/Lab-4.0 WiFi/5_wifi_logging.py
|
Josverl/MicroPython-Bootcamp
|
29f5ccc9768fbea621029dcf6eea9c91ff84c1d5
|
[
"MIT"
] | 4
|
2018-04-28T13:43:20.000Z
|
2021-03-11T16:10:35.000Z
|
Labs/Lab-4.0 WiFi/5_wifi_logging.py
|
Josverl/MicroPython-Bootcamp
|
29f5ccc9768fbea621029dcf6eea9c91ff84c1d5
|
[
"MIT"
] | null | null | null |
Labs/Lab-4.0 WiFi/5_wifi_logging.py
|
Josverl/MicroPython-Bootcamp
|
29f5ccc9768fbea621029dcf6eea9c91ff84c1d5
|
[
"MIT"
] | null | null | null |
# import the network module
# This module provides access to various network related functions and classes.
# https://github.com/loboris/MicroPython_ESP32_psRAM_LoBo/wiki/network
import network,utime #pylint: disable=import-error
# ----------------------------------------------------------
# Define callback function used for monitoring wifi activity
# ----------------------------------------------------------
'''
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
'''
def wifi_cb(info):
_red = "\033[31m"
_cyan= "\033[36m"
_norm = "\033[00m"
if (info[2]):
msg = ", info: {}".format(info[2])
else:
msg = ""
print(_cyan+"I [WiFi] event: {} ({}){}".format( info[0], info[1], msg)+_norm)
# Enable callbacks
network.WLANcallback(wifi_cb)
# ----------------------------------------------------------
# create station interface - Standard WiFi client
wlan = network.WLAN(network.STA_IF)
wlan.active(False)
# activate the interface
wlan.active(True)
# connect to a known WiFi
wlan.connect('IOTBOOTCAMP', 'MicroPython')
# Note that this may take some time, so we need to wait
# Wait 5 sec or until connected
tmo = 50
while not wlan.isconnected():
utime.sleep_ms(100)
tmo -= 1
if tmo == 0:
break
# check if the station is connected to an AP
if wlan.isconnected():
print("=== Station Connected to WiFi \n")
else:
print("!!! Not able to connect to WiFi")
# gets or sets the interface's IP/netmask/gw/DNS addresses
# 'Raw'
print( wlan.ifconfig() )
#pretty
c = wlan.ifconfig()
print("IP:{0}, Network mask:{1}, Router:{2}, DNS: {3}".format( *c ))
| 24.986111
| 81
| 0.568093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,187
| 0.659811
|
3cfd92551f129b14e3271b5e4699d932dae50065
| 681
|
py
|
Python
|
medium/1282.py
|
nkwib/leetcode
|
73f7492ba208417d8bf8340b6bf9dc68a6ded7f7
|
[
"MIT"
] | null | null | null |
medium/1282.py
|
nkwib/leetcode
|
73f7492ba208417d8bf8340b6bf9dc68a6ded7f7
|
[
"MIT"
] | null | null | null |
medium/1282.py
|
nkwib/leetcode
|
73f7492ba208417d8bf8340b6bf9dc68a6ded7f7
|
[
"MIT"
] | null | null | null |
from typing import List
class Solution:
def groupThePeople(self, groupSizes: List[int]) -> List[List[int]]:
def slice_per(source, step):
for i in range(0, len(source), step):
yield source[i:i + step]
groups = {}
res = []
for index, person in enumerate(groupSizes, start=0):
if person in groups.keys(): groups[person].append(index)
else: groups[person] = [index]
for k in groups.keys():
group = list(slice_per(groups[k], k))
res.extend(group)
return res
groupSizes = [3,3,3,3,4,4,2,2,4,3,4,3,1]
print(Solution().groupThePeople(groupSizes))
| 32.428571
| 71
| 0.565345
| 567
| 0.832599
| 547
| 0.803231
| 0
| 0
| 0
| 0
| 0
| 0
|
3cff24ff2a3befb7112dd8c73ae11e32acd5099b
| 1,576
|
py
|
Python
|
Code/Data_Collection/Web_Scraping/job_scraping/job_scraping/scrapy_crawler.py
|
gilnribeiro/Work-Project
|
15ad906ef5e757daed1df9c7547e5703ad496930
|
[
"MIT"
] | 1
|
2022-01-31T11:31:04.000Z
|
2022-01-31T11:31:04.000Z
|
Code/Data_Collection/Web_Scraping/job_scraping/job_scraping/scrapy_crawler.py
|
gilnribeiro/Work-Project
|
15ad906ef5e757daed1df9c7547e5703ad496930
|
[
"MIT"
] | null | null | null |
Code/Data_Collection/Web_Scraping/job_scraping/job_scraping/scrapy_crawler.py
|
gilnribeiro/Work-Project
|
15ad906ef5e757daed1df9c7547e5703ad496930
|
[
"MIT"
] | null | null | null |
# Import spiders
from .spiders.bons_empregos import BonsEmpregosSpider
from .spiders.cargadetrabalhos import CargaDeTrabalhosSpider
from .spiders.emprego_org import EmpregoOrgSpider
from .spiders.emprego_xl import EmpregoXlSpider
from .spiders.net_empregos import NetEmpregosSpider
from twisted.internet import reactor, defer
from scrapy.crawler import CrawlerRunner
from scrapy.utils.log import configure_logging
from scrapy.utils.project import get_project_settings
# Make sure to be in the Data Collection directory
FOLDER_PATH = "/Users/gilnr/OneDrive - NOVASBE/Work Project/Code/Data/"
def main():
configure_logging()
settings = get_project_settings()
settings.set('FEED_FORMAT', 'jsonlines')
# settings.set('FEED_URI', 'result.json')
runner = CrawlerRunner(settings)
@defer.inlineCallbacks
def crawl():
settings.set('FEED_URI', FOLDER_PATH + "BonsEmpregos.json")
yield runner.crawl(BonsEmpregosSpider)
settings.set('FEED_URI', FOLDER_PATH + "CargaDeTrabalhos.json")
yield runner.crawl(CargaDeTrabalhosSpider)
settings.set('FEED_URI', FOLDER_PATH + "EmpregoOrg.json")
yield runner.crawl(EmpregoOrgSpider)
settings.set('FEED_URI', FOLDER_PATH + "EmpregoXl.json")
yield runner.crawl(EmpregoXlSpider)
settings.set('FEED_URI', FOLDER_PATH + "NetEmpregos.json")
yield runner.crawl(NetEmpregosSpider)
reactor.stop()
crawl()
reactor.run() # the script will block here until the last crawl call is finished
if __name__ == '__main__':
main()
| 34.26087
| 84
| 0.741751
| 0
| 0
| 943
| 0.59835
| 638
| 0.404822
| 0
| 0
| 407
| 0.258249
|
a70095a05438f3493dabb7b856707d3589d2cc37
| 2,302
|
py
|
Python
|
sentiment/train/management/commands/train.py
|
mnvx/sentiment
|
b24fad4cfc67b0b443e8ab93b08ac1dbcb095a7c
|
[
"MIT"
] | null | null | null |
sentiment/train/management/commands/train.py
|
mnvx/sentiment
|
b24fad4cfc67b0b443e8ab93b08ac1dbcb095a7c
|
[
"MIT"
] | null | null | null |
sentiment/train/management/commands/train.py
|
mnvx/sentiment
|
b24fad4cfc67b0b443e8ab93b08ac1dbcb095a7c
|
[
"MIT"
] | null | null | null |
import configparser
import csv
from django.core.management.base import BaseCommand
import logging
import os
from ....common.catalog.sentiment_type import SentimentType
from ....common.catalog.source import Source
class Command(BaseCommand):
help = 'Train the sentiment classifier'
def add_arguments(self, parser):
parser.add_argument(
'type',
type=str,
help='Training data type',
choices=SentimentType.get_list()
)
parser.add_argument(
'--path',
type=str,
required=False,
help="Path to csv file with training data"
)
parser.add_argument(
'--source',
type=str,
required=False,
help="Source with training data",
choices=Source.get_list()
)
def handle(self, *args, **options):
if options['source'] is None and options['path'] is None:
message = 'Cant run training. Set --path or --source option.'
logging.warning(message)
self.stdout.write(self.style.WARNING(message))
return
if options['source'] is not None and options['path'] is not None:
message = 'Cant run training. Set only one of --path or --source option.'
logging.warning(message)
self.stdout.write(self.style.WARNING(message))
return
path = options['path']
if options['source'] is not None:
path = os.path.join(Source.get_path(options['source']), options['type'] + '.csv')
config_file = os.path.join(os.path.dirname(path), 'settings.ini')
config = configparser.ConfigParser()
config.read(config_file)
column_index = int(config['csv']['IndexOfColumnWithData'])
delimiter = config['csv']['Delimiter']
encoding = config['csv']['Encoding']
quote_char = config['csv']['QuoteChar']
with open(path, newline='', encoding=encoding) as csvfile:
reader = csv.reader(csvfile, delimiter=delimiter, quotechar=quote_char)
for row in reader:
print(row[column_index])
return
self.stdout.write('path: %s' % path)
self.stdout.write(self.style.SUCCESS('Success'))
| 34.878788
| 93
| 0.591659
| 2,087
| 0.906603
| 0
| 0
| 0
| 0
| 0
| 0
| 426
| 0.185056
|
a7024ecc7fc28ff6673f46a13ae3e63f8ae5b339
| 114
|
py
|
Python
|
tests/demo/demoproject/urls.py
|
saxix/django-mb
|
3700c05b45854a28bd23368c4e4971ae54c18cad
|
[
"BSD-3-Clause"
] | 2
|
2017-03-20T12:26:02.000Z
|
2017-04-22T11:46:17.000Z
|
tests/demo/demoproject/urls.py
|
saxix/django-mb
|
3700c05b45854a28bd23368c4e4971ae54c18cad
|
[
"BSD-3-Clause"
] | null | null | null |
tests/demo/demoproject/urls.py
|
saxix/django-mb
|
3700c05b45854a28bd23368c4e4971ae54c18cad
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from django.contrib import admin
admin.autodiscover()
urlpatterns = (
)
| 12.666667
| 38
| 0.798246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a70361c3e3b8431100d15650b5da10d40acb287d
| 504
|
py
|
Python
|
appzoo/utils/log/__init__.py
|
streamlit-badge-bot/AppZoo
|
86547fdc5209fa137b0a6384d63e92f263c1e160
|
[
"MIT"
] | 5
|
2020-11-05T12:13:45.000Z
|
2021-11-19T12:26:49.000Z
|
appzoo/utils/log/__init__.py
|
streamlit-badge-bot/AppZoo
|
86547fdc5209fa137b0a6384d63e92f263c1e160
|
[
"MIT"
] | null | null | null |
appzoo/utils/log/__init__.py
|
streamlit-badge-bot/AppZoo
|
86547fdc5209fa137b0a6384d63e92f263c1e160
|
[
"MIT"
] | 3
|
2020-11-23T23:06:34.000Z
|
2021-04-18T02:12:40.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : tql-App.
# @File : __init__.py
# @Time : 2019-12-10 17:24
# @Author : yuanjie
# @Email : yuanjie@xiaomi.com
# @Software : PyCharm
# @Description :
from loguru import logger
trace = logger.add('runtime_{time}.log', rotation="100 MB", retention='10 days')
logger.debug('this is a debug message')
if __name__ == '__main__':
@logger.catch()
def f():
1/0
return 1111
print(f())
| 21.913043
| 80
| 0.571429
| 0
| 0
| 0
| 0
| 60
| 0.119048
| 0
| 0
| 309
| 0.613095
|
a704ebb77dcf3890670eefaa40d9424024056adf
| 1,850
|
py
|
Python
|
beast/tools/run/helper_functions.py
|
galaxyumi/beast
|
f5ce89d73c88ce481b04fc31a8c099c9c19041fb
|
[
"BSD-3-Clause"
] | 21
|
2017-03-18T13:46:06.000Z
|
2022-02-21T16:02:10.000Z
|
beast/tools/run/helper_functions.py
|
galaxyumi/beast
|
f5ce89d73c88ce481b04fc31a8c099c9c19041fb
|
[
"BSD-3-Clause"
] | 673
|
2017-03-12T23:39:28.000Z
|
2022-03-17T14:07:38.000Z
|
beast/tools/run/helper_functions.py
|
galaxyumi/beast
|
f5ce89d73c88ce481b04fc31a8c099c9c19041fb
|
[
"BSD-3-Clause"
] | 36
|
2017-03-18T18:00:35.000Z
|
2021-09-22T06:35:55.000Z
|
# other imports
from multiprocessing import Pool
def subcatalog_fname(full_cat_fname, source_density, sub_source_density):
"""
Return the name of a sub-catalog
Parameters
----------
full_cat_fname : string
name of the photometry catalog
source_density : string
the current source density bin
sub_source_density : string
the current sub-file for the source density bin
Returns
-------
string
the file name of the sub-catalog
"""
return full_cat_fname.replace(
".fits",
"_SD{}_sub{}.fits".format(source_density.replace("_", "-"), sub_source_density),
)
def parallel_wrapper(function, arg_tuples, nprocs=1):
"""
A wrapper to automatically either run the function as-is or run it with parallel processes
Parameters
----------
function : function
the function to be evaluated
argument : list of tuples
the input to the function (details of course depend on the function)
nprocs : int (default=1)
number of parallel processes (no parallelization if nprocs=1)
Returns
-------
nothing
"""
if nprocs > 1:
p = Pool(nprocs)
for r in p.starmap(function, arg_tuples):
print(r)
else:
for a in arg_tuples:
r = function(*a)
print(r)
def get_modelsubgridfiles(subgrid_names_file):
"""
Read in the file that has the list of subgridded physicsmodel files
Parameters
----------
subgrid_names_file : string
name of the file with the list of names
Returns
-------
list of strings
the names of the subgridded physicsmodel files
"""
with open(subgrid_names_file, "r") as f:
modelsedgridfiles = f.read().split("\n")[:-1]
return modelsedgridfiles
| 21.511628
| 94
| 0.621081
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,175
| 0.635135
|
a7054f9458e6b8299d380a912e48321581ca4d88
| 67
|
py
|
Python
|
patan/exceptions.py
|
tttlh/patan
|
d3e5cfec085e21f963204b5c07a85cf1f029560c
|
[
"MIT"
] | null | null | null |
patan/exceptions.py
|
tttlh/patan
|
d3e5cfec085e21f963204b5c07a85cf1f029560c
|
[
"MIT"
] | null | null | null |
patan/exceptions.py
|
tttlh/patan
|
d3e5cfec085e21f963204b5c07a85cf1f029560c
|
[
"MIT"
] | 1
|
2021-03-01T08:35:34.000Z
|
2021-03-01T08:35:34.000Z
|
# _*_ coding: utf-8 _*_
class IgnoreRequest(Exception):
pass
| 11.166667
| 31
| 0.671642
| 40
| 0.597015
| 0
| 0
| 0
| 0
| 0
| 0
| 23
| 0.343284
|
a70572ac4f62a9762d70dcd70a9fd3e4dc437ab3
| 2,621
|
py
|
Python
|
experiments/sparse_sparsity_fixed_results.py
|
Remi-Boutin/sparsebm
|
5979eafff99d59a3b6edac586ee5658529763402
|
[
"MIT"
] | 1
|
2021-09-22T23:25:25.000Z
|
2021-09-22T23:25:25.000Z
|
experiments/sparse_sparsity_fixed_results.py
|
Remi-Boutin/sparsebm
|
5979eafff99d59a3b6edac586ee5658529763402
|
[
"MIT"
] | null | null | null |
experiments/sparse_sparsity_fixed_results.py
|
Remi-Boutin/sparsebm
|
5979eafff99d59a3b6edac586ee5658529763402
|
[
"MIT"
] | 1
|
2021-09-08T13:25:15.000Z
|
2021-09-08T13:25:15.000Z
|
from matplotlib import rc
# rc("text", usetex=True)
import matplotlib
# font = {"size": 14}
# matplotlib.rc("font", **font)
import numpy as np
import matplotlib.pyplot as plt
import glob
import pickle
import time
import matplotlib.colors as mcolors
dataset_files = glob.glob("./experiments/results/sparsity_fixed/*.pkl")
from collections import defaultdict
time_results_sparse = defaultdict(list)
time_results_not_sparse = defaultdict(list)
cari_results_sparse = defaultdict(list)
cari_results_not_sparse = defaultdict(list)
e = 0.25
exponent = 5
connection_probabilities = (
np.array([[4 * e, e, e, e * 2], [e, e, e, e], [2 * e, e, 2 * e, 2 * e]])
/ 2 ** exponent
)
for file in dataset_files:
results = pickle.load(open(file, "rb"))
n1 = results["model"]["tau_1"].shape[0]
n2 = results["model"]["tau_2"].shape[0]
time_results_sparse[(n1, n2)].append(results["end_time"])
cari_results_sparse[(n1, n2)].append(results["co_ari"])
if results["end_time_not_sparse"]:
cari_results_not_sparse[(n1, n2)].append(results["co_ari_not_sparse"])
time_results_not_sparse[(n1, n2)].append(
results["end_time_not_sparse"]
)
xs = sorted(list(time_results_sparse.keys()), key=lambda x: x[0])
fig, ax = plt.subplots(1, 1, figsize=(7, 4))
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
xs_values = [a * a / 2 for a in np.array([a[0] for a in xs])]
ax.plot(
xs_values,
[np.median(time_results_sparse[x]) for x in xs],
marker="^",
markersize=7,
linewidth=0.5,
color=mcolors.TABLEAU_COLORS["tab:green"],
)
xs_value_not_sparse = [
a * a / 2
for a in np.array(
[a[0] for a in sorted(list(time_results_not_sparse.keys()))]
)
]
ax.plot(
xs_value_not_sparse,
[
np.median(time_results_not_sparse[x])
for x in sorted(list(time_results_not_sparse.keys()))
],
marker="*",
markersize=7,
linewidth=0.5,
color=mcolors.TABLEAU_COLORS["tab:blue"],
)
# ax.annotate(
# "OOM",
# (
# xs_value_not_sparse[-1],
# 20
# + np.median(
# time_results_not_sparse[
# sorted(list(time_results_not_sparse.keys()))[-1]
# ]
# ),
# ),
# color=mcolors.TABLEAU_COLORS["tab:blue"],
# )
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_ylabel("Execution time (sec.)")
ax.set_xlabel("Network size $(n_1 \cdot n_2)$")
# ax.ticklabel_format(style="sci", axis="x")
plt.show()
fig.savefig("experiments/results/sparsity_fixed.png")
print("Figure saved in " + "experiments/results/sparsity_fixed.png")
| 26.474747
| 78
| 0.649752
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 768
| 0.293018
|
a70af31dd713880205073e138c1e10e6d9d8591d
| 4,236
|
py
|
Python
|
SerialController/Camera.py
|
Moi-poke/Poke-Controller-temp
|
b632f55eb6e5adc0f85f2ba6ef59c1230a5d5606
|
[
"MIT"
] | 3
|
2021-04-23T06:30:36.000Z
|
2022-01-04T09:10:25.000Z
|
SerialController/Camera.py
|
Moi-poke/Poke-Controller-temp
|
b632f55eb6e5adc0f85f2ba6ef59c1230a5d5606
|
[
"MIT"
] | 1
|
2022-01-04T06:33:11.000Z
|
2022-01-04T06:33:11.000Z
|
SerialController/Camera.py
|
Moi-poke/Poke-Controller-temp
|
b632f55eb6e5adc0f85f2ba6ef59c1230a5d5606
|
[
"MIT"
] | 6
|
2021-10-03T05:42:50.000Z
|
2022-03-15T00:29:09.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import datetime
import os
import numpy as np
from logging import getLogger, DEBUG, NullHandler
def imwrite(filename, img, params=None):
_logger = getLogger(__name__)
_logger.addHandler(NullHandler())
_logger.setLevel(DEBUG)
_logger.propagate = True
try:
ext = os.path.splitext(filename)[1]
result, n = cv2.imencode(ext, img, params)
if result:
with open(filename, mode='w+b') as f:
n.tofile(f)
return True
else:
return False
except Exception as e:
print(e)
_logger.error(f"Image Write Error: {e}")
return False
class Camera:
def __init__(self, fps=45):
self.camera = None
self.capture_size = (1280, 720)
# self.capture_size = (1920, 1080)
self.capture_dir = "Captures"
self.fps = int(fps)
self._logger = getLogger(__name__)
self._logger.addHandler(NullHandler())
self._logger.setLevel(DEBUG)
self._logger.propagate = True
def openCamera(self, cameraId):
if self.camera is not None and self.camera.isOpened():
self._logger.debug("Camera is already opened")
self.destroy()
if os.name == 'nt':
self._logger.debug("NT OS")
self.camera = cv2.VideoCapture(cameraId, cv2.CAP_DSHOW)
# self.camera = cv2.VideoCapture(cameraId)
else:
self._logger.debug("Not NT OS")
self.camera = cv2.VideoCapture(cameraId)
if not self.camera.isOpened():
print("Camera ID " + str(cameraId) + " can't open.")
self._logger.error(f"Camera ID {cameraId} cannot open.")
return
print("Camera ID " + str(cameraId) + " opened successfully")
self._logger.debug(f"Camera ID {cameraId} opened successfully.")
# print(self.camera.get(cv2.CAP_PROP_FRAME_WIDTH))
# self.camera.set(cv2.CAP_PROP_FPS, 60)
self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, self.capture_size[0])
self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, self.capture_size[1])
# self.camera.set(cv2.CAP_PROP_SETTINGS, 0)
def isOpened(self):
self._logger.debug("Camera is opened")
return self.camera.isOpened()
def readFrame(self):
_, self.image_bgr = self.camera.read()
return self.image_bgr
def saveCapture(self, filename=None, crop=None, crop_ax=None, img=None):
if crop_ax is None:
crop_ax = [0, 0, 1280, 720]
else:
pass
# print(crop_ax)
dt_now = datetime.datetime.now()
if filename is None or filename == "":
filename = dt_now.strftime('%Y-%m-%d_%H-%M-%S') + ".png"
else:
filename = filename + ".png"
if crop is None:
image = self.image_bgr
elif crop is 1 or crop is "1":
image = self.image_bgr[
crop_ax[1]:crop_ax[3],
crop_ax[0]:crop_ax[2]
]
elif crop is 2 or crop is "2":
image = self.image_bgr[
crop_ax[1]:crop_ax[1] + crop_ax[3],
crop_ax[0]:crop_ax[0] + crop_ax[2]
]
elif img is not None:
image = img
else:
image = self.image_bgr
if not os.path.exists(self.capture_dir):
os.makedirs(self.capture_dir)
self._logger.debug("Created Capture folder")
save_path = os.path.join(self.capture_dir, filename)
try:
imwrite(save_path, image)
self._logger.debug(f"Capture succeeded: {save_path}")
print('capture succeeded: ' + save_path)
except cv2.error as e:
print("Capture Failed")
self._logger.error(f"Capture Failed :{e}")
def destroy(self):
if self.camera is not None and self.camera.isOpened():
self.camera.release()
self.camera = None
self._logger.debug("Camera destroyed")
| 33.09375
| 77
| 0.553824
| 3,496
| 0.825307
| 0
| 0
| 0
| 0
| 0
| 0
| 696
| 0.164306
|
a70b86cdb095113c2f13cde684b541b11f3759d8
| 4,975
|
py
|
Python
|
my_tagger.py
|
jndevanshu/tagger
|
51181d3ac9b0959ba507ee0c06c28bed55b51c76
|
[
"Apache-2.0"
] | null | null | null |
my_tagger.py
|
jndevanshu/tagger
|
51181d3ac9b0959ba507ee0c06c28bed55b51c76
|
[
"Apache-2.0"
] | null | null | null |
my_tagger.py
|
jndevanshu/tagger
|
51181d3ac9b0959ba507ee0c06c28bed55b51c76
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os
import time
import codecs
import optparse
import sys
import json
import numpy as np
from my_loader import prepare_sentence
from utils import create_input, iobes_iob, iob_ranges, zero_digits
from model import Model
from ccg_nlpy.core.text_annotation import TextAnnotation
from ccg_nlpy.core.view import View
optparser = optparse.OptionParser()
optparser.add_option(
"-m", "--model", default="",
help="Model location"
)
optparser.add_option(
"-i", "--input", default="",
help="Input file location"
)
optparser.add_option(
"-o", "--output", default="",
help="Output file location"
)
optparser.add_option(
"-d", "--delimiter", default="__",
help="Delimiter to separate words from their tags"
)
optparser.add_option(
"--outputFormat", default="",
help="Output file format"
)
opts = optparser.parse_args()[0]
# Check parameters validity
assert opts.delimiter
assert os.path.isdir(opts.model)
# assert os.path.isfile(opts.input)
# Load existing model
print "Loading model..."
model = Model(model_path=opts.model)
parameters = model.parameters
l1_model = None
l1_f_eval = None
if 'l1_model' in parameters:
print("Building L1 model:")
parameters['l1_model'] = parameters['l1_model']
assert os.path.isdir(parameters['l1_model'])
l1_model = Model(model_path=parameters['l1_model'])
l1_parameters = l1_model.parameters
_, l1_f_eval = l1_model.build(training=False, **l1_parameters)
l1_model.reload()
print("Done building l1 model")
# Load reverse mappings
word_to_id, char_to_id, tag_to_id = [
{v: k for k, v in x.items()}
for x in [model.id_to_word, model.id_to_char, model.id_to_tag]
]
# Load the model
_, f_eval = model.build(training=False, **parameters)
model.reload()
# f_output = codecs.open(opts.output, 'w', 'utf-8')
start = time.time()
print 'Tagging...'
file_list = os.listdir(opts.input)
count = 0
for doc in file_list:
document = TextAnnotation(json_str=open(os.path.join(opts.input, doc)).read())
token_list = document.tokens
start = 0
view_as_json = {}
cons_list = []
if 'NER_CONLL' in document.view_dictionary:
del document.view_dictionary['NER_CONLL']
for sent_end_offset in document.sentences['sentenceEndPositions']:
words_ini = token_list[start:sent_end_offset]
line = " ".join(words_ini)
if line:
# Lowercase sentence
if parameters['lower']:
line = line.lower()
# Replace all digits with zeros
if parameters['zeros']:
line = zero_digits(line)
words = line.rstrip().split()
# Prepare input
sentence = prepare_sentence(words, word_to_id, char_to_id, l1_model=l1_model, l1_f_eval=l1_f_eval, lower=parameters['lower'])
print(sentence)
input = create_input(sentence, parameters, False)
# Decoding
try:
if parameters['crf']:
y_preds = np.array(f_eval(*input))[1:-1]
else:
y_preds = f_eval(*input).argmax(axis=1)
y_preds = [model.id_to_tag[y_pred] for y_pred in y_preds]
except Exception as e:
y_preds = ["O"] * len(words)
# Output tags in the IOB2 format
if parameters['tag_scheme'] == 'iobes':
y_preds = iobes_iob(y_preds)
# Write tags
assert len(y_preds) == len(words)
assert len(y_preds) == len(words_ini)
print(y_preds)
idx = 0
while idx < len(y_preds):
if y_preds[idx] == "O":
idx += 1
elif y_preds[idx].startswith("B-"):
curr_label = y_preds[idx][2:]
st = idx
idx += 1
while idx < len(y_preds) and y_preds[idx].startswith("I-"):
idx += 1
cons_list.append({'start': start + st, 'end': start + idx, 'score': 1.0, 'label': curr_label})
else:
y_preds[idx] = "B-" + y_preds[idx][2:]
print("something wrong....")
# sys.exit(1)
count += 1
start = sent_end_offset + 1
if count % 100 == 0:
print count
view_as_json['viewName'] = 'NER_CONLL'
view_as_json['viewData'] = [{'viewType': 'edu.illinois.cs.cogcomp.core.datastructures.textannotation.View', 'viewName': 'NER_CONLL', 'generator': 'my-lstm-crf-tagger', 'score': 1.0, 'constituents': cons_list}]
view_obj = View(view_as_json, document.get_tokens)
document.view_dictionary['NER_CONLL'] = view_obj
document_json = document.as_json
json.dump(document_json, open(os.path.join(opts.output, doc), "w"), indent=True)
print '---- %i lines tagged in %.4fs ----' % (count, time.time() - start)
# f_output.close()
| 30.521472
| 213
| 0.605427
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,064
| 0.213869
|
a70d45fc226ab2dd59c5db64dd9ed218486ffae6
| 4,691
|
py
|
Python
|
Inkscape-OUTPUT-PRO-master/outputpro/cutmarks.py
|
ilnanny/Inkscape-addons
|
a30cdde2093fa2da68b90213e057519d0304433f
|
[
"X11"
] | 3
|
2019-03-08T23:32:29.000Z
|
2019-05-11T23:53:46.000Z
|
Inkscape-OUTPUT-PRO-master/outputpro/cutmarks.py
|
ilnanny/Inkscape-addons
|
a30cdde2093fa2da68b90213e057519d0304433f
|
[
"X11"
] | null | null | null |
Inkscape-OUTPUT-PRO-master/outputpro/cutmarks.py
|
ilnanny/Inkscape-addons
|
a30cdde2093fa2da68b90213e057519d0304433f
|
[
"X11"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess #re, subprocess, simplestyle, os#inkex, os, random, sys, subprocess, shutil
def generate_final_file(isvector, hide_inside_marks, colormode, width, height, space, strokewidth, bleedsize, marksize, temp_dir):
if not isvector:
command = []
final_command = ['convert']
for color in colormode:
command.append('convert')
command.append('-size')
command.append(str(sum(width) + (marksize*2) + (space * (len(width) -1))) + 'x' + str(sum(height) + (marksize*2) + (space * (len(height) -1))))
command.append('xc:white')
command.append('-stroke')
command.append('black')
command.append('-strokewidth')
command.append(str(strokewidth))
width_value = 0
number_of_column = 1
for column in width:
height_value = 0
number_of_line = 1
for line in height:
open('/tmp/str.txt', 'a').write(str(width.index(column)))
if not hide_inside_marks or (hide_inside_marks and number_of_column == 1):
command.append('-draw')
command.append('line ' + str(width_value + marksize) + ',' + str(height_value + marksize + bleedsize) + ', ' + str(width_value) + ',' + str(height_value + marksize + bleedsize))
command.append('-draw')
command.append('line ' + str(width_value + marksize) + ',' + str(height_value + line + marksize - bleedsize) + ', ' + str(width_value) + ',' + str(height_value + line + marksize - bleedsize))
if not hide_inside_marks or (hide_inside_marks and number_of_line == 1):
command.append('-draw')
command.append('line ' + str(width_value + marksize + bleedsize) + ',' + str(height_value + marksize) + ', ' + str(width_value + marksize + bleedsize) + ',' + str(height_value))
command.append('-draw')
command.append('line ' + str(width_value + column + marksize - bleedsize) + ',' + str(height_value + marksize) + ', ' + str(width_value + column + marksize - bleedsize) + ',' + str(height_value))
if not hide_inside_marks or (hide_inside_marks and number_of_column == len(width)):
command.append('-draw')
command.append('line ' + str(width_value + marksize + column) + ',' + str(height_value + marksize + bleedsize) + ', ' + str(width_value + (marksize*2) + column) + ',' + str(height_value + marksize + bleedsize))
command.append('-draw')
command.append('line ' + str(width_value + marksize + column) + ',' + str(height_value + line + marksize - bleedsize) + ', ' + str(width_value + (marksize*2) + column) + ',' + str(height_value + marksize + line - bleedsize))
if not hide_inside_marks or (hide_inside_marks and number_of_line == len(height)):
command.append('-draw')
command.append('line ' + str(width_value + marksize + bleedsize) + ',' + str(height_value + line + marksize) + ', ' + str(width_value + marksize + bleedsize) + ',' + str(height_value + line + (marksize*2)))
command.append('-draw')
command.append('line ' + str(width_value + column + marksize - bleedsize) + ',' + str(height_value + line + marksize) + ', ' + str(width_value + marksize + column - bleedsize) + ',' + str(height_value + line + (marksize*2)))
height_value += line + space
number_of_line += 1
width_value += column + space
number_of_column += 1
command.append(temp_dir + '/cut_mark_' + color + '.png')
subprocess.Popen(command).wait()
del command[:]
command.append('convert')
command.append(temp_dir + '/cut_mark_' + color + '.png')
command.append('-colorspace')
command.append(str(colormode).lower())
command.append('-channel')
command.append('K')
command.append('-separate')
command.append(temp_dir + '/cut_mark_' + color + '.png')
subprocess.Popen(command).wait()
del command[:]
final_command.append(temp_dir + '/cut_mark_' + color + '.png')
final_command.extend(['-set', 'colorspace', colormode, '-combine', temp_dir + '/cut_mark.tiff'])
subprocess.Popen(final_command).wait()
| 56.518072
| 248
| 0.5534
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 558
| 0.118951
|
a70ebc7cdf0e76c3a3a02437342d60d6be4b5d1f
| 4,513
|
py
|
Python
|
test/test_cli.py
|
Datateer/upload-agent
|
4684bcf902d6c54baefb08446252a69612bf15a0
|
[
"MIT"
] | null | null | null |
test/test_cli.py
|
Datateer/upload-agent
|
4684bcf902d6c54baefb08446252a69612bf15a0
|
[
"MIT"
] | 2
|
2021-02-05T18:58:23.000Z
|
2021-02-14T15:23:46.000Z
|
test/test_cli.py
|
Datateer/upload-agent
|
4684bcf902d6c54baefb08446252a69612bf15a0
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
from unittest.mock import patch
from click.testing import CliRunner
import pytest
from datateer.upload_agent.main import cli
from datateer.upload_agent.config import load_config, save_config, save_feed
import datateer.upload_agent.constants as constants
@pytest.fixture
def runner():
return CliRunner()
def test_command_config_upload_agent_handles_show_option(runner):
result = runner.invoke(cli, ['config', 'upload-agent', '--show'])
assert result.exit_code == 0
@patch('datateer.upload_agent.main.load_config')
def test_command_config_feed_handles_show_option(mock_load_config, config, runner):
mock_load_config.return_value = config
result = runner.invoke(cli, ['config', 'feed', '--show', 'SAMPLE-FEED-1'])
print(result.output)
assert result.exit_code == 0
@patch('datateer.upload_agent.main.load_config')
def test_command_config_feed_show_option_errors_if_not_exist(mock_load_config, config, runner):
mock_load_config.return_value = config
result = runner.invoke(cli, ['config', 'feed', '--show', 'NONEXISTENT-KEY'])
print(result.output)
assert result.exit_code == 1
assert 'Feed with key NONEXISTENT-KEY does not exist' in result.output
def test_command_upload_handles_feed_key_and_path_arguments(runner):
result = runner.invoke(cli, ['upload', 'FEED-KEY', 'PATH'])
print(result.output)
assert result.exit_code == 1
assert 'Feed with key FEED-KEY does not exist'
@patch.dict('datateer.upload_agent.main.config', constants.SAMPLE_CONFIG, clear=True)
def test_config_upload_agent_prompts_show_defaults_if_config_exists(runner, config):
defaults = config
result = runner.invoke(cli, ['config', 'upload-agent'], input='CLIENT-CODE\nRAW-BUCKET\nACCESS-KEY\nACCESS-SECRET')
print(result.output)
assert result.exit_code == 0
assert f'Client code [{defaults["client-code"]}]: CLIENT-CODE' in result.output
assert f'Raw bucket name [{defaults["upload-agent"]["raw-bucket"]}]: RAW-BUCKET' in result.output
assert f'Access key [{defaults["upload-agent"]["access-key"]}]: ACCESS-KEY' in result.output
assert f'Access secret [{defaults["upload-agent"]["access-secret"]}]: ACCESS-SECRET' in result.output
@patch.dict('datateer.upload_agent.main.config', {'client-code': 'TEST-CLIENT-CODE'}, clear=True)
@patch('datateer.upload_agent.main.load_config')
def test_config_feed_prompts(mock_load_config, runner, config):
mock_load_config.return_value = config
result = runner.invoke(cli, ['config', 'feed'], input='PROVIDER\nSOURCE\nFEED\nFEED-KEY')
print(config)
print(result.output)
assert result.exit_code == 0
assert 'Provider [SAMPLE-CLIENT-CODE]: PROVIDER' in result.output
assert 'Source: SOURCE' in result.output
assert 'Feed: FEED' in result.output
assert 'Feed key [FEED]: FEED-KEY' in result.output
@patch.dict('datateer.upload_agent.main.config', {'client-code': 'MY-TEST-CLIENT-CODE'})
@patch('datateer.upload_agent.main.load_config')
def test_config_feed_provider_code_defaults_to_client_code(mock_load_config, config, runner):
mock_load_config.return_value = config
result = runner.invoke(cli, ['config', 'feed', '--source', 'SOURCE', '--feed', 'FEED'], input='\n\n')
assert f'Provider [{config["client-code"]}]:' in result.output
assert f'Provider [{config["client-code"]}]: {config["client-code"]}' not in result.output # assert user did not type in a value
def test_config_feed_key_defaults_to_feed_code(runner):
result = runner.invoke(cli, ['config', 'feed', '--provider', 'PROVIDER', '--source', 'SOURCE', '--feed', 'FEED'])
assert 'Feed key [FEED]:' in result.output
assert 'Feed key [FEED]: FEED' not in result.output # user did not type in a value
@patch.dict('datateer.upload_agent.main.config', constants.SAMPLE_CONFIG, clear=True)
@patch('datateer.upload_agent.main.load_config')
def test_config_feed_handles_existing_feed_key(mock_load_config, runner, config):
mock_load_config.return_value = config
print(config)
result = runner.invoke(cli, ['config', 'feed', '--update', 'SAMPLE-FEED-1'], input='test\ntest\ntest\ntest\n')
print(result.output)
assert result.exit_code == 0
assert f'Provider [{constants.SAMPLE_FEED["provider"]}]:' in result.output
assert f'Source [{constants.SAMPLE_FEED["source"]}]:' in result.output
assert f'Feed [{constants.SAMPLE_FEED["feed"]}]:' in result.output
def test_show_version(runner):
pytest.skip()
| 41.40367
| 132
| 0.734766
| 0
| 0
| 0
| 0
| 3,424
| 0.758697
| 0
| 0
| 1,642
| 0.363838
|
a70f8fbd9aef0f039b565e8b5e5bf81d26036760
| 14,899
|
py
|
Python
|
modron/characters.py
|
WardLT/play-by-post-helper
|
26df681f2a28510f88e552be628910e4e5fe57bb
|
[
"MIT"
] | null | null | null |
modron/characters.py
|
WardLT/play-by-post-helper
|
26df681f2a28510f88e552be628910e4e5fe57bb
|
[
"MIT"
] | 13
|
2020-04-08T02:56:58.000Z
|
2020-10-04T21:52:43.000Z
|
modron/characters.py
|
WardLT/play-by-post-helper
|
26df681f2a28510f88e552be628910e4e5fe57bb
|
[
"MIT"
] | null | null | null |
"""Saving and using information about characters"""
import json
import os
from enum import Enum
from typing import Dict, List, Optional, Tuple
import yaml
from pydantic import BaseModel, Field, validator
from modron.config import get_config
_config = get_config()
def _compute_mod(score: int) -> int:
"""Compute a mod given an ability score
Args:
score (int): Ability score
Returns:
(int) Modifier for that score
"""
return score // 2 - 5
class Ability(str, Enum):
"""Character abilities"""
STR = 'strength'
DEX = 'dexterity'
CON = 'constitution'
INT = 'intelligence'
WIS = 'wisdom'
CHA = 'charisma'
@classmethod
def match(cls, name: str) -> 'Ability':
"""Match a name to known ability
Args:
name (str): Name to be matched
Returns:
(Ability) Standardized version of that name
"""
name = name.lower()
matched_abilities = [x for x in cls.__members__.values() if x.startswith(name)]
assert len(matched_abilities) == 1, f"Unrecognized ability: {name}"
return matched_abilities[0]
_5e_skills = {
'acrobatics': Ability.DEX, 'animal handling': Ability.WIS, 'arcana': Ability.INT, 'athletics': Ability.STR,
'deception': Ability.CHA, 'history': Ability.INT, 'insight': Ability.WIS, 'intimidation': Ability.CHA,
'investigation': Ability.INT, 'medicine': Ability.WIS, 'nature': Ability.INT, 'perception': Ability.WIS,
'performance': Ability.CHA, 'persuasion': Ability.CHA, 'religion': Ability.INT, 'sleight of hand': Ability.DEX,
'stealth': Ability.DEX, 'survival': Ability.WIS
}
class Alignment(str, Enum):
"""Possible alignments"""
LAWFUL_GOOD = 'lawful good'
GOOD = 'good'
CHAOTIC_GOOD = 'chaotic good'
LAWFUL_NEUTRAL = 'lawful'
NEUTRAL = 'neutral'
CHAOTIC_NEUTRAL = 'chaotic neutral'
LAWFUL_EVIL = 'lawful evil'
EVIL = 'evil'
CHAOTIC_EVIL = 'chaotic evil'
_class_hit_die = {
'artificer': 8, 'barbarian': 12, 'bard': 8, 'cleric': 8, 'druid': 8, 'fighter': 10, 'monk': 8, 'paladin': 10,
'ranger': 10, 'rogue': 8, 'sorcerer': 6, 'warlock': 8, 'wizard': 6
}
"""Hit die for each 5E class"""
class Character(BaseModel):
"""A D&D 5th edition character sheet, in Python form.
This object stores only the mechanics-related aspects of a character sheet
that remained fixed between level ups. For example, we store the hit point
maximum but not the current hit points and the skill ist but not the languages."""
# Basic information about the character
name: str = Field(..., description='Name of the character')
player: str = Field(None, description='Slack user ID of the player')
classes: Dict[str, int] = Field(..., description='Levels in different classes')
background: str = Field(None, description='Character background')
race: str = Field(None, description='Race of the character')
alignment: Alignment = Field(..., description='Alignment for the character')
# Attributes
strength: int = Field(..., description='Physical strength of the character', ge=0)
dexterity: int = Field(..., description='Gracefulness of the character', ge=0)
constitution: int = Field(..., description='Resistance to physical adversity', ge=0)
intelligence: int = Field(..., description='Ability to apply knowledge and skills', ge=0)
wisdom: int = Field(..., description='Aptitude towards using knowledge to make good decisions', ge=0)
charisma: int = Field(..., description='Proficiency with bringing people to agreement with you', ge=0)
# Combat attributes
speed: int = Field(30, description='Speed in feet per round')
armor_class: int = Field(..., description='Resistance to physical attacks.') # Eventually make derived
current_hit_points: Optional[int] = Field(..., description='Current hit points. Does not include temporary', ge=0)
hit_points: int = Field(..., description='Maximum number of hit points', gt=0)
temporary_hit_points: int = Field(0, description='Amount of temporary hit points.', ge=0)
hit_points_adjustment: int = Field(0, description='Adjustments to the hit point maximum. '
'Can be positive or negative')
# Abilities
saving_throws: List[Ability] = Field(..., description='Saving throws for which the character is proficient')
custom_skills: Dict[str, Ability] = Field(dict(), description='Skills not included in 5e. '
'Dictionary of skill names and associated ability')
proficiencies: List[str] = Field(..., description='Names of skills in which the characters is proficient.')
expertise: List[str] = Field([], description='Skills in which the character is an expert')
@classmethod
def from_yaml(cls, path: str) -> 'Character':
"""Parse the character sheet from YAML
Args:
path: Path to the YAML file
"""
with open(path) as fp:
data = yaml.load(fp, yaml.SafeLoader)
return cls.parse_obj(data)
def to_yaml(self, path: str):
"""Save character sheet to a YAML file"""
with open(path, 'w') as fp:
data = json.loads(self.json())
yaml.dump(data, fp, indent=2)
# Validators for different fields
@validator('proficiencies', 'expertise', each_item=True)
def _val_lowercase(cls, v: str) -> str:
return v.lower()
@validator('custom_skills', 'classes')
def _val_dicts(cls, v: dict):
"""Makes keys for dictionaries """
return dict((k.lower(), v) for k, v in v.items())
# Derived quantities, such as modifiers
@property
def strength_mod(self) -> int:
return _compute_mod(self.strength)
@property
def dexterity_mod(self) -> int:
return _compute_mod(self.dexterity)
@property
def constitution_mod(self) -> int:
return _compute_mod(self.constitution)
@property
def intelligence_mod(self) -> int:
return _compute_mod(self.intelligence)
@property
def wisdom_mod(self) -> int:
return _compute_mod(self.wisdom)
@property
def charisma_mod(self) -> int:
return _compute_mod(self.charisma)
@property
def level(self) -> int:
return sum(self.classes.values())
@property
def proficiency_bonus(self) -> int:
return (self.level - 1) // 4 + 2
@property
def initiative(self) -> int:
return self.dexterity_mod
@property
def total_hit_points(self) -> int:
"""Current hit point amount, including temporary hit points"""
return self.current_hit_points + self.temporary_hit_points
@property
def current_hit_point_maximum(self) -> int:
"""Current hit point maximum"""
return self.hit_points + self.hit_points_adjustment
def heal(self, amount: int):
"""Heal the character by a certain amount
Args:
amount (int): Amount of healing
"""
assert amount >= 0, "Amount must be nonnegative"
if self.current_hit_points is None:
self.full_heal()
self.current_hit_points += amount
self.current_hit_points = min(self.current_hit_points, self.current_hit_point_maximum)
def harm(self, amount: int):
"""Apply damage to this character
Args:
amount (int): Amount of damage
"""
assert amount >= 0, "Damage must be nonnegative"
if self.current_hit_points is None:
self.full_heal()
# Damage hits the temporary first
amount_to_temp = min(self.temporary_hit_points, amount)
amount_to_base = amount - amount_to_temp
self.temporary_hit_points -= amount_to_temp
# Subtract off the remaining damage from the base hit points
self.current_hit_points -= amount_to_base
self.current_hit_points = max(0, self.current_hit_points)
def full_heal(self):
"""Heal character up to hit point maximum"""
self.current_hit_points = self.current_hit_point_maximum
def grant_temporary_hit_points(self, amount: int):
"""Grant temporary hit points
Args:
amount: Amount of HP to give to the character
"""
assert amount > 0, "Amount must be positive"
self.temporary_hit_points += amount
def remove_temporary_hit_points(self):
"""Remove all temporary hit points"""
self.temporary_hit_points = 0
def adjust_hit_point_maximum(self, amount: int):
"""Apply a change to the hit point maximum
Args:
amount: Amount to change the HP maximum
"""
self.hit_points_adjustment += amount
# Make sure the hit point maximum is zero or more
self.hit_points_adjustment = max(-self.hit_points, self.hit_points_adjustment)
# Make sure the hit points stays below the maximum
self.current_hit_points = min(
self.current_hit_point_maximum,
self.current_hit_points
)
def reset_hit_point_maximum(self):
"""Remove any adjustments to the hit point maximum"""
self.hit_points_adjustment = 0
def get_hit_die(self) -> Dict[str, int]:
"""Maximum hit die, computed based on class
Returns:
(dict) Where key is the hit die and value is the number
"""
output = {}
for cls, num in self.classes.items():
hit_die = f'd{_class_hit_die[cls]}'
if hit_die not in output:
output[hit_die] = num
else:
output[hit_die] += num
return output
# Skills and checks
def save_modifier(self, ability: str) -> int:
"""Get the modifier for a certain save type of save
Args:
ability (str): Ability to check. You can use the full name or
the first three letters. Not case-sensitive
Returns:
(int) Modifier for the roll
"""
# Get the modifier
mod = self.ability_modifier(ability)
# Match the name of the ability
matched_ability = Ability.match(ability)
# Add any proficiency bonus
if matched_ability.lower() in self.saving_throws:
mod += self.proficiency_bonus
return mod
def ability_modifier(self, ability: str) -> int:
"""Get the modifier for a certain ability
Args:
ability (str): Ability to check. You can use the full name or
the first three letters. Not case-sensitive
Returns:
(int) Modifier for the roll
"""
# Attempt to match the ability to the pre-defined list
ability = ability.lower()
matched_ability = Ability.match(ability)
# Look up the ability modifier
return getattr(self, f'{matched_ability}_mod')
def skill_modifier(self, name: str) -> int:
"""Get the skill modifier for a certain skill
First looks in custom skill list and then in the standard 5e skills.
In this way, you can define a character to use a non-standard ability
for a certain skill (as in how Monks can use Wisdom for many checks).
Args:
name (str): Name of the skill. Not case sensitive
"""
name_lower = name.lower()
# Determine which ability modifier to use
if name_lower in self.custom_skills:
ability = self.custom_skills[name_lower]
elif name_lower in _5e_skills:
ability = _5e_skills[name_lower]
else:
raise ValueError(f'Unrecognized skill: {name}')
mod = getattr(self, f'{ability}_mod')
# Add proficiency or expertise
if name_lower in self.expertise:
return mod + self.proficiency_bonus * 2
elif name_lower in self.proficiencies:
return mod + self.proficiency_bonus
else:
return mod
def lookup_modifier(self, check: str) -> int:
"""Get the modifier for certain roll
Args:
check (str): Description of which check to make
Returns:
(int) Modifier for the d20 roll
"""
# Make it all lowercase
check = check.lower()
words = check.split(" ")
# Save
if 'save' in words:
return self.save_modifier(words[0])
# Ability check
try:
return self.ability_modifier(check)
except AssertionError:
pass # and try something else
# Skill
return self.skill_modifier(check)
def get_skills_by_ability(self, ability: str) -> Dict[str, str]:
"""List out the skills for this character that use a certain base ability
Args:
ability: Name of the ability
Returns:
Dictionary of the skill mapped to the level of skill (expert, proficient, untrained)
"""
# Match the ability
matched_ability = Ability.match(ability)
# Loop over the 5e skills
matched_skills = [skill for skill, attr in _5e_skills.items() if attr == matched_ability]
# Match the custom skills
matched_skills.extend([
skill for skill, attr in self.custom_skills.items() if attr == matched_ability
])
# Return the outputs
output = {}
for skill in matched_skills:
if skill in self.proficiencies:
output[skill] = "proficient"
elif skill in self.expertise:
output[skill] = "expert"
else:
output[skill] = "untrained"
return output
def list_available_characters(team_id: str, user_id: str) -> List[str]:
"""List the names of character sheets that are available to a user
Args:
team_id (str): ID of the Slack workspace
user_id (str): ID of the user in question
Returns:
([str]): List of characters available to this player
"""
# Get all characters for this team
sheets = _config.list_character_sheets(team_id)
# Return only the sheets
return [
os.path.basename(s)[:-4] # Remove the ".yml"
for s in sheets
if Character.from_yaml(s).player == user_id
]
def load_character(team_id: str, name: str) -> Tuple[Character, str]:
"""Load a character sheet
Arg:
team_id (str): ID of the Slack workspace
name (str): Name of the character
Returns:
- (Character) Desired character sheet
- (str): Absolute path to the character sheet, in case you must save it later
"""
config = get_config()
sheet_path = config.get_character_sheet_path(team_id, name)
return Character.from_yaml(sheet_path), os.path.abspath(sheet_path)
| 34.093822
| 118
| 0.627626
| 12,527
| 0.840795
| 0
| 0
| 2,186
| 0.146721
| 0
| 0
| 6,162
| 0.413585
|
a7101a610a52017f13a5fe2d6d32d405867f9aef
| 1,558
|
py
|
Python
|
setup.py
|
Borsos/rubik
|
af220a142b81a8f5b5011e4e072be9e3d130e827
|
[
"Apache-2.0"
] | 1
|
2019-11-13T00:44:09.000Z
|
2019-11-13T00:44:09.000Z
|
setup.py
|
Borsos/rubik
|
af220a142b81a8f5b5011e4e072be9e3d130e827
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Borsos/rubik
|
af220a142b81a8f5b5011e4e072be9e3d130e827
|
[
"Apache-2.0"
] | 1
|
2019-11-13T00:47:16.000Z
|
2019-11-13T00:47:16.000Z
|
#
# Copyright 2013 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Simone Campagna"
from distutils.core import setup
import os
import sys
scripts = [
'bin/rubik',
]
try:
dirname = os.path.dirname(os.path.abspath(sys.argv[0]))
py_dirname = dirname
sys.path.insert(0, py_dirname)
from rubik import conf
finally:
del sys.path[0]
setup(
name = "python-rubik",
version = conf.VERSION,
requires = [],
description = "Tool to read/write/visualize N-dimensional cubes",
author = "Simone Campagna",
author_email = "simone.campagna@tiscali.it",
url="https://github.com/simone-campagna/rubik",
download_url = 'https://github.com/simone-campagna/rubik/archive/{}.tar.gz'.format(conf.VERSION),
packages = ["rubik",
"rubik.application",
"rubik.application.help_functions",
"rubik.cubes",
"rubik.visualizer",
"rubik.visualizer.impl"
],
scripts = scripts,
package_data = {},
)
| 27.333333
| 101
| 0.668164
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 921
| 0.591142
|
a710a43bb737f726810f9f83e8727afbf0fbd72e
| 5,130
|
py
|
Python
|
geco/mips/tests/test_set_cover.py
|
FreestyleBuild/GeCO
|
6db1a549b3145b3bc5d3025a9bccc03be6575564
|
[
"MIT"
] | 8
|
2020-12-16T09:59:05.000Z
|
2022-03-18T09:48:43.000Z
|
geco/mips/tests/test_set_cover.py
|
FreestyleBuild/GeCO
|
6db1a549b3145b3bc5d3025a9bccc03be6575564
|
[
"MIT"
] | 101
|
2020-11-09T10:20:03.000Z
|
2022-03-24T13:50:06.000Z
|
geco/mips/tests/test_set_cover.py
|
FreestyleBuild/GeCO
|
6db1a549b3145b3bc5d3025a9bccc03be6575564
|
[
"MIT"
] | 3
|
2021-04-06T13:26:03.000Z
|
2022-03-22T13:22:16.000Z
|
import collections
import itertools
import pytest
from geco.mips.set_cover.yang import *
from geco.mips.set_cover.sun import *
from geco.mips.set_cover.orlib import *
from geco.mips.set_cover.gasse import *
"""
Generic Tests
"""
def test_set_cover_solution_1():
model = set_cover([1], [{0}])
model.optimize()
assert model.getStatus() == "optimal"
assert model.getObjVal() == 1
def test_set_cover_solution_2():
model = set_cover([1, 1, 1], [{0}, {1}, {2}])
model.optimize()
assert model.getStatus() == "optimal"
assert model.getObjVal() == 3
"""
Yang Tests
"""
@pytest.mark.parametrize(
"m,seed", itertools.product([10, 100, 200], [0, 1, 1337, 53115])
)
def test_yang_set_cover_creation(m, seed):
model = yang_instance(m, seed)
assert model.getNVars() == 10 * m
assert model.getNConss() == m
assert model.getObjectiveSense() == "minimize"
@pytest.mark.parametrize(
"m,seed1,seed2",
itertools.product([10, 100, 200], [0, 1, 1337, 53115], [0, 1, 1337, 53115]),
)
def test_yang_parameter(m, seed1, seed2):
params1 = yang_params(m, seed=seed1)
params2 = yang_params(m, seed=seed2)
same_seeds_produce_same_params = seed1 == seed2 and params1 == params2
different_seeds_produce_different_params = seed1 != seed2 and params1 != params2
assert same_seeds_produce_same_params or different_seeds_produce_different_params
"""
Sun Tests
"""
@pytest.mark.parametrize(
"n,m,seed", itertools.product([10, 100, 200], [10, 100, 200], [0, 1, 1337, 53115])
)
def test_sun_set_cover_creation(n, m, seed):
model = sun_instance(n, m, seed)
assert model.getNVars() == n
assert model.getNConss() == m
assert model.getObjectiveSense() == "minimize"
@pytest.mark.parametrize(
"n,m,seed1,seed2",
itertools.product(
[10, 100, 200], [10, 100, 200], [0, 1, 1337, 53115], [0, 1, 1337, 53115]
),
)
def test_sun_params(n, m, seed1, seed2):
params1 = sun_params(n, m, seed=seed1)
params2 = sun_params(n, m, seed=seed2)
same_seeds_produce_same_params = seed1 == seed2 and params1 == params2
different_seeds_produce_different_params = seed1 != seed2 and params1 != params2
assert same_seeds_produce_same_params or different_seeds_produce_different_params
@pytest.mark.parametrize(
"n,m,seed", itertools.product([10, 100, 200], [10, 100, 200], [0, 1, 1337, 53115])
)
def test_sun_at_least_two_elements_in_set(n, m, seed):
_, sets = sun_params(n, m, seed=seed)
counter = collections.defaultdict(int)
for s in sets:
for e in s:
counter[e] += 1
assert all([count >= 2 for count in counter.values()])
@pytest.mark.parametrize(
"n,base_n,base_m,seed1,seed2",
itertools.product(
[10, 100, 200],
[1, 5, 9],
[10, 100, 200],
[0, 1, 1337, 53115],
[0, 1, 1337, 53115],
),
)
def test_expand_sun_params(n, base_n, base_m, seed1, seed2):
base_costs1, base_sets1 = sun_params(base_n, base_m, seed1)
base_costs2, base_sets2 = sun_params(base_n, base_m, seed2)
params1 = costs1, sets1 = expand_sun_params((n,), (base_costs1, base_sets1), seed1)
params2 = costs2, sets2 = expand_sun_params((n,), (base_costs2, base_sets2), seed2)
# test seeding
same_seeds_produce_same_params = seed1 == seed2 and params1 == params2
different_seeds_produce_different_params = seed1 != seed2 and params1 != params2
assert same_seeds_produce_same_params or different_seeds_produce_different_params
# test correct size
assert len(costs1) == len(costs2) == n
assert len(sets1) == len(sets2) == base_m
"""
OR-Library tests
"""
def test_scp_orlib():
instance_name = "scp41.txt"
instance = orlib_instance(instance_name)
assert instance.getNVars() == 1000
assert instance.getNConss() == 200
def test_rail_orlib():
instance_name = "rail507.txt"
instance = orlib_instance(instance_name)
assert instance.getNVars() == 63009
assert instance.getNConss() == 507
"""
Gasse tests
"""
@pytest.mark.parametrize(
"nrows,ncols,density,seed1,seed2",
itertools.product(
[100, 200],
[10, 100, 200],
[0.2, 0.3, 0.5],
[0, 1, 1337, 53115],
[0, 1, 1337, 53115],
),
)
def test_gasse_params(nrows, ncols, density, seed1, seed2):
params1 = gasse_params(nrows, ncols, density, seed=seed1)
params2 = gasse_params(nrows, ncols, density, seed=seed2)
same_seeds_produce_same_params = seed1 == seed2 and params1 == params2
different_seeds_produce_different_params = seed1 != seed2 and params1 != params2
assert same_seeds_produce_same_params or different_seeds_produce_different_params
@pytest.mark.parametrize(
"nrows,ncols,density,seed",
itertools.product(
[100, 200],
[50, 70],
[0.2, 0.3, 0.5],
[0, 1, 1337, 53115],
),
)
def test_gasse_instance(nrows, ncols, density, seed):
model = gasse_instance(nrows, ncols, density, max_coef=10, seed=seed)
assert model.getNVars() == ncols
assert model.getNConss() == nrows
assert model.getObjectiveSense() == "minimize"
| 28.5
| 87
| 0.670175
| 0
| 0
| 0
| 0
| 4,074
| 0.794152
| 0
| 0
| 352
| 0.068616
|
a71112e7354fe0bb8dca61271d9bc6a1f7ca9381
| 8,430
|
py
|
Python
|
lib/overnet/gen_bazel.py
|
PowerOlive/garnet
|
16b5b38b765195699f41ccb6684cc58dd3512793
|
[
"BSD-3-Clause"
] | null | null | null |
lib/overnet/gen_bazel.py
|
PowerOlive/garnet
|
16b5b38b765195699f41ccb6684cc58dd3512793
|
[
"BSD-3-Clause"
] | null | null | null |
lib/overnet/gen_bazel.py
|
PowerOlive/garnet
|
16b5b38b765195699f41ccb6684cc58dd3512793
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python2.7
# Copyright 2018 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import sys
# This program generates BUILD.bazel, WORKSPACE, .bazelrc from BUILD.gn
####################################################################################################
# TOKENIZER
Tok = collections.namedtuple('Tok', ['tok', 'value'])
def is_ident_start(c):
return (c >= 'a' and c <= 'z') or (c >= 'A' and c <= 'Z') or c == '_'
def is_ident_char(c):
return is_ident_start(c) or is_digit(c)
def is_digit(c):
return c >= '0' and c <= '9'
def is_whitespace(c):
return c in ' \t\r\n'
sym_name = {
',': 'comma',
'(': 'left_paren',
')': 'right_paren',
'{': 'left_mustache',
'}': 'right_mustache',
'[': 'left_square',
']': 'right_square',
'=': 'equals',
}
def is_symbol(c):
return c in sym_name.keys()
def tok(s):
if s == '':
return [], s
c = s[0]
if is_ident_start(c):
return tok_ident(s)
if c == '#':
return tok_comment(s)
if is_whitespace(c):
return tok_whitespace(s)
if is_symbol(c):
return tok_cont(Tok(sym_name[c], c), s[1:])
if c == '"':
return tok_string(s[1:])
print 'bad character: ' + s[0]
sys.exit(1)
def tok_cont(token, s):
toks, rest = tok(s)
return [token] + toks, rest
def tok_comment(s):
while s != '' and s[0] != '\n':
s = s[1:]
return tok(s[1:])
def tok_ident(s):
ident = ''
while s and is_ident_char(s[0]):
ident += s[0]
s = s[1:]
return tok_cont(Tok('ident', ident), s)
def tok_string(s):
string = ''
while s[0] != '"':
string += s[0]
s = s[1:]
return tok_cont(Tok('string', string), s[1:])
def tok_whitespace(s):
while s and is_whitespace(s[0]):
s = s[1:]
return tok(s)
def tokenize(s):
toks, rest = tok(s)
if rest != '':
print 'dangling: ' + rest
sys.exit(1)
return toks
####################################################################################################
# PARSER
Bundle = collections.namedtuple('Bundle', ['rule', 'name', 'values'])
def take(toks, tok):
if toks[0].tok != tok:
print 'expected %s, got %s' % (tok, toks[0].tok)
sys.exit(1)
return toks.pop(0).value
def maybe_take(toks, tok):
if toks[0].tok != tok:
return None
return toks.pop(0).value
def parse_dict(toks):
d = {}
while not maybe_take(toks, 'right_mustache'):
key = take(toks, 'ident')
take(toks, 'equals')
value = parse_value(toks)
d[key] = value
return d
def parse_list(toks):
l = []
while not maybe_take(toks, 'right_square'):
l.append(parse_value(toks))
if not maybe_take(toks, 'comma'):
take(toks, 'right_square')
break
return l
def parse_value(toks):
if maybe_take(toks, 'left_mustache'):
return parse_dict(toks)
if maybe_take(toks, 'left_square'):
return parse_list(toks)
s = maybe_take(toks, 'string')
if s is not None:
return s
s = maybe_take(toks, 'ident')
if s is not None:
if s == 'true':
return True
if s == 'false':
return False
print 'bad ident in value position: ' + s
print 'bad value token: %r' % toks
def parse(toks):
bundles = []
while toks:
rule = take(toks, 'ident')
take(toks, 'left_paren')
name = take(toks, 'string')
take(toks, 'right_paren')
body = None
if maybe_take(toks, 'left_mustache'):
body = parse_dict(toks)
bundles.append(Bundle(rule, name, body))
return bundles
####################################################################################################
# CODEGEN
def mapdep(n):
if n[0] == ':':
return n
m = {
'//third_party/googletest:gtest': '@com_google_googletest//:gtest',
'//third_party/googletest:gmock': None,
}
return m[n]
FUZZERS = ['bbr', 'internal_list', 'linearizer',
'packet_protocol', 'receive_mode', 'routing_header']
assert FUZZERS == sorted(FUZZERS)
with open('BUILD.bazel', 'w') as o:
with open('BUILD.gn') as f:
for bundle in parse(tokenize(f.read())):
if bundle.rule == 'source_set':
print >>o, 'cc_library('
print >>o, ' name="%s",' % bundle.name
print >>o, ' srcs=[%s],' % ','.join(
'"%s"' % s for s in bundle.values['sources'])
if 'deps' in bundle.values:
print >>o, ' deps=[%s],' % ','.join(
'"%s"' % mapdep(s) for s in bundle.values['deps'] if mapdep(s) is not None)
print >>o, ')'
if bundle.rule == 'executable':
if bundle.values.get('testonly', False):
print >>o, 'cc_test(shard_count=50,'
else:
print >>o, 'cc_binary('
print >>o, ' name="%s",' % bundle.name
print >>o, ' srcs=[%s],' % ','.join(
'"%s"' % s for s in bundle.values['sources'])
print >>o, ' deps=[%s],' % ','.join(
'"%s"' % mapdep(s) for s in bundle.values['deps'] if mapdep(s) is not None)
print >>o, ')'
for fuzzer in FUZZERS:
print >>o, 'cc_binary('
print >>o, ' name="%s_fuzzer",' % fuzzer
srcs = ['%s_fuzzer.cc' % fuzzer]
helpers_h = '%s_fuzzer_helpers.h' % fuzzer
if os.path.exists(helpers_h):
srcs.append(helpers_h)
print >>o, ' srcs=[%s],' % ', '.join('"%s"' % s for s in srcs)
print >>o, ' deps=[":overnet", ":test_util"],'
print >>o, ')'
WORKSPACE = """
# This file is not checked in, but generated by gen_bazel.py
# Make changes there
git_repository(
name = 'com_google_googletest',
remote = 'https://github.com/google/googletest.git',
commit = 'd5266326752f0a1dadbd310932d8f4fd8c3c5e7d',
)
"""
BAZELRC = """
# This file is not checked in, but generated by gen_bazel.py
# Make changes there
build --client_env=CC=clang
build --copt -std=c++14
build:asan --strip=never
build:asan --copt -fsanitize=address
build:asan --copt -O0
build:asan --copt -fno-omit-frame-pointer
build:asan --linkopt -fsanitize=address
build:asan --action_env=ASAN_OPTIONS=detect_leaks=1:color=always
build:asan --action_env=LSAN_OPTIONS=report_objects=1
build:asan-fuzzer --strip=never
build:asan-fuzzer --copt -fsanitize=fuzzer,address
build:asan-fuzzer --copt -fsanitize-coverage=trace-cmp
build:asan-fuzzer --copt -O0
build:asan-fuzzer --copt -fno-omit-frame-pointer
build:asan-fuzzer --linkopt -fsanitize=fuzzer,address
build:asan-fuzzer --action_env=ASAN_OPTIONS=detect_leaks=1:color=always
build:asan-fuzzer --action_env=LSAN_OPTIONS=report_objects=1
build:msan --strip=never
build:msan --copt -fsanitize=memory
build:msan --copt -O0
build:msan --copt -fsanitize-memory-track-origins
build:msan --copt -fsanitize-memory-use-after-dtor
build:msan --copt -fno-omit-frame-pointer
build:msan --copt -fPIC
build:msan --linkopt -fsanitize=memory
build:msan --linkopt -fPIC
build:msan --action_env=MSAN_OPTIONS=poison_in_dtor=1
build:tsan --strip=never
build:tsan --copt -fsanitize=thread
build:tsan --copt -fno-omit-frame-pointer
build:tsan --copt -DNDEBUG
build:tsan --linkopt -fsanitize=thread
build:tsan --action_env=TSAN_OPTIONS=halt_on_error=1
build:ubsan --strip=never
build:ubsan --copt -fsanitize=undefined
build:ubsan --copt -fno-omit-frame-pointer
build:ubsan --copt -DNDEBUG
build:ubsan --copt -fno-sanitize=function,vptr
build:ubsan --linkopt -fsanitize=undefined
build:ubsan --action_env=UBSAN_OPTIONS=halt_on_error=1:print_stacktrace=1
build:ubsan-fuzzer --strip=never
build:ubsan-fuzzer --copt -fsanitize=fuzzer,undefined
build:ubsan-fuzzer --copt -fno-omit-frame-pointer
build:ubsan-fuzzer --copt -DNDEBUG
build:ubsan-fuzzer --copt -fno-sanitize=function,vptr
build:ubsan-fuzzer --linkopt -fsanitize=fuzzer,undefined
build:ubsan-fuzzer --action_env=UBSAN_OPTIONS=halt_on_error=1:print_stacktrace=1
"""
with open('WORKSPACE', 'w') as o:
o.write(WORKSPACE)
with open('.bazelrc', 'w') as o:
o.write(BAZELRC)
| 27.281553
| 100
| 0.57758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,054
| 0.480902
|
a711b022a699f3a1657ba1bf4a22b34ce38cfe57
| 2,878
|
py
|
Python
|
hcplot/scales/colors/hue.py
|
bernhard-42/hcplot
|
1c791e2b19b173b9b98a3d8914095e3c372c9de4
|
[
"Apache-2.0"
] | null | null | null |
hcplot/scales/colors/hue.py
|
bernhard-42/hcplot
|
1c791e2b19b173b9b98a3d8914095e3c372c9de4
|
[
"Apache-2.0"
] | null | null | null |
hcplot/scales/colors/hue.py
|
bernhard-42/hcplot
|
1c791e2b19b173b9b98a3d8914095e3c372c9de4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Bernhard Walter
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...utils.color import hcl2rgb, rgb2str
import numpy as np
class HueColors(object):
"""
"Escaping RGBland: Selecting Colors for Statistical Graphics"
Achim Zeileis, Wirtschaftsuniversität Wien
Kurt Hornik, Wirtschaftsuniversität Wien
Paul Murrell, The University of Auckland
https://eeecon.uibk.ac.at/~zeileis/papers/Zeileis+Hornik+Murrell-2009.pdf
"""
#
# Accessors
#
@classmethod
def qual(cls, h=(0, 360), c=100, l=65, sizeOrSeries=5, asString=False):
size = sizeOrSeries if isinstance(sizeOrSeries, int) else (len(sizeOrSeries))
d = (h[1] - h[0]) // (size - 1)
result = [hcl2rgb(h[0] + d * i, c, l) for i in range(size)]
return rgb2str(result) if asString else result
@classmethod
def seq(cls, h=260, c=(30, 90), l=(30, 90), fl=None, fc=None, sizeOrSeries=5, asString=False):
size = sizeOrSeries if isinstance(sizeOrSeries, int) else (len(sizeOrSeries))
if isinstance(c, int):
crange = [c] * size
else:
if fc is None:
crange = np.linspace(c[0], c[1], size)
else:
d = c[0] - c[1]
crange = [c[1] + d * fc(x) for x in np.linspace(1, 0, size)]
if isinstance(l, int):
lrange = [l] * size
else:
if fl is None:
lrange = np.linspace(l[0], l[1], size)
else:
d = l[0] - l[1]
lrange = [l[1] + d * fl(x) for x in np.linspace(1, 0, size)]
return [hcl2rgb(h, ci, li) for ci, li in zip(crange, lrange)]
@classmethod
def div(cls, h=[260, 0], c=(100, 0, 100), l=(30, 90, 30), fc=None, fl=None,
sizeOrSeries=7, asString=False):
size = sizeOrSeries if isinstance(sizeOrSeries, int) else (len(sizeOrSeries))
s = size // 2 + 1
return cls.seq(h[0], c[:2], l[:2], fc=fc, fl=fl, sizeOrSeries=s)[:-1] + \
list(reversed(cls.seq(h[1], (c[2], c[1]), (l[2], l[1]), sizeOrSeries=s, fc=fc, fl=fl)))
#
# Info
#
@classmethod
def info(cls):
pass
@classmethod
def toDF(cls, typ):
pass
#
# Quick Accessor
#
def getBrewer(typ, palette, size):
return getattr(HueColors, typ)(palette, size)
| 30.294737
| 99
| 0.592078
| 2,125
| 0.737847
| 0
| 0
| 1,706
| 0.592361
| 0
| 0
| 911
| 0.316319
|
a71203325ed630e617cb8551726c8b7f07f5f6f8
| 423
|
py
|
Python
|
accounts/migrations/0013_alter_caller_list_file.py
|
Srinjay-hack/Buddy
|
155b9ba58a20bf043493213dd8349f61012fc480
|
[
"Apache-2.0"
] | null | null | null |
accounts/migrations/0013_alter_caller_list_file.py
|
Srinjay-hack/Buddy
|
155b9ba58a20bf043493213dd8349f61012fc480
|
[
"Apache-2.0"
] | null | null | null |
accounts/migrations/0013_alter_caller_list_file.py
|
Srinjay-hack/Buddy
|
155b9ba58a20bf043493213dd8349f61012fc480
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.2.4 on 2021-07-12 14:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0012_alter_caller_estimated_amount'),
]
operations = [
migrations.AlterField(
model_name='caller',
name='list_file',
field=models.FileField(blank=True, null=True, upload_to=''),
),
]
| 22.263158
| 72
| 0.614657
| 330
| 0.780142
| 0
| 0
| 0
| 0
| 0
| 0
| 114
| 0.269504
|
a712ce0eafb15d53426b4b979da8580fdd2c7a4c
| 7,978
|
py
|
Python
|
vk_bots/api.py
|
termisaal/VkBotsApi
|
0957ea46952c260090741aeddf71d50dd950f74c
|
[
"MIT"
] | null | null | null |
vk_bots/api.py
|
termisaal/VkBotsApi
|
0957ea46952c260090741aeddf71d50dd950f74c
|
[
"MIT"
] | null | null | null |
vk_bots/api.py
|
termisaal/VkBotsApi
|
0957ea46952c260090741aeddf71d50dd950f74c
|
[
"MIT"
] | null | null | null |
"""
VK Bots API Wrapper
Copyright (c) 2020-2021 Misaal
"""
import aiohttp
import json
import typing
from .errors import VKAPIError
from .keyboard import Keyboard
from .utils import to_namedtuple, get_random_id
class MethodGroup:
"""Base class for API methods groups"""
def __init__(self, access_token, v):
self._access_token = access_token
self._v = v
async def _api_request(self, method, **kwargs):
for key in list(kwargs):
if kwargs[key] is None:
kwargs.pop(key)
kwargs['access_token'] = self._access_token
kwargs['v'] = self._v
async with aiohttp.ClientSession() as session:
async with session.get(url=f'https://api.vk.com/method/{method}',
params=kwargs) as response:
data = await response.read()
data = json.loads(data)
if 'error' in data:
raise VKAPIError(data['error']['error_code'], data['error']['error_msg'])
else:
return data
class AppWidgets(MethodGroup):
pass
class Board(MethodGroup):
pass
class Docs(MethodGroup):
pass
class Groups(MethodGroup):
pass
class Market(MethodGroup):
pass
class Messages(MethodGroup):
async def createChat(self):
pass
async def delete(self):
pass
async def deleteChatPhoto(self):
pass
async def deleteConversation(self):
pass
async def edit(self):
pass
async def editChat(self):
pass
async def getByConversationMessageId(self):
pass
async def getById(self):
pass
async def getConversationMembers(self):
pass
async def getConversations(self):
pass
async def getConversationsById(self):
pass
async def getHistory(self):
pass
async def getHistoryAttachments(self):
pass
async def getImportantMessages(self):
pass
async def getIntentUsers(self):
pass
async def getInviteLink(self):
pass
async def isMessagesFromGroupAllowed(self):
pass
async def markAsAnsweredConversation(self):
pass
async def markAsImportantConversation(self):
pass
async def markAsRead(self):
pass
async def pin(self):
pass
async def removeChatUser(self):
pass
async def restore(self):
pass
async def search(self):
pass
async def searchConversations(self):
pass
async def send(self,
user_id: int = None,
random_id: int = None,
peer_id: int = None,
peer_ids: typing.Iterable[int] = None,
domain: str = None,
chat_id: int = None,
message: str = None,
lat: float = None,
long: float = None,
attachment: typing.Union[str, typing.Iterable[str]] = None,
reply_to: int = None,
forward_messages: typing.Union[int, typing.Iterable[int]] = None,
forward: dict = None,
sticker_id: int = None,
keyboard: typing.Union[Keyboard, dict] = None,
template: dict = None,
payload: str = None,
content_source: dict = None,
dont_parse_links: typing.Union[bool, int] = None,
disable_mentions: typing.Union[bool, int] = None,
intent: str = None,
subscribe_id: int = None):
if random_id is None:
random_id = get_random_id()
if attachment is not None and type(attachment) != str:
attachment = ','.join(attachment)
if forward_messages is not None and type(forward_messages) != int:
forward_messages = ','.join(map(str, forward_messages))
if keyboard is not None and type(keyboard) != dict:
return dict(keyboard)
if dont_parse_links is not None and type(dont_parse_links) == bool:
dont_parse_links = int(dont_parse_links)
if disable_mentions is not None and type(disable_mentions) == bool:
disable_mentions = int(disable_mentions)
return await self._api_request('messages.send',
user_id=user_id,
random_id=random_id,
peer_id=peer_id,
peer_ids=peer_ids,
domain=domain,
chat_id=chat_id,
message=message,
lat=lat,
long=long,
attachment=attachment,
reply_to=reply_to,
forward_messages=forward_messages,
forward=forward,
sticker_id=sticker_id,
keyboard=keyboard,
template=template,
payload=payload,
content_source=content_source,
dont_parse_links=dont_parse_links,
disable_mentions=disable_mentions,
intent=intent,
subscribe_id=subscribe_id)
async def sendMessageEventAnswer(self):
pass
async def setActivity(self):
pass
async def setChatPhoto(self):
pass
async def unpin(self):
pass
class Photos(MethodGroup):
pass
class Podcasts(MethodGroup):
pass
class Storage(MethodGroup):
pass
class Stories(MethodGroup):
pass
class Users(MethodGroup):
pass
class Utils(MethodGroup):
pass
class Wall(MethodGroup):
pass
class Api:
"""Class used to perform requests to VK API"""
def __init__(self, access_token, v):
self._access_token = access_token
self._v = v
self.appWidgets = AppWidgets(access_token, v)
self.board = Board(access_token, v)
self.docs = Docs(access_token, v)
self.groups = Groups(access_token, v)
self.market = Market(access_token, v)
self.messages = Messages(access_token, v)
self.photos = Photos(access_token, v)
self.podcasts = Podcasts(access_token, v)
self.storage = Storage(access_token, v)
self.stories = Stories(access_token, v)
self.users = Users(access_token, v)
self.utils = Utils(access_token, v)
self.wall = Wall(access_token, v)
class ApiOld:
__slots__ = ('_method', '_access_token', '_v')
def __init__(self, access_token: str, v: float = 5.131, _method=None):
self._access_token = access_token
self._v = v
if _method is None:
_method = []
self._method = _method
def __getattr__(self, item):
return ApiOld(self._access_token, self._v, self._method + [item])
async def __call__(self, **kwargs):
kwargs['access_token'] = self._access_token
kwargs['v'] = self._v
async with aiohttp.ClientSession() as session:
async with session.get(url=f'https://api.vk.com/method/{".".join(self._method)}',
params=kwargs) as response:
data = await response.read()
data = json.loads(data.decode())
if 'error' in data:
raise VKAPIError(data['error']['error_code'], data['error']['error_msg'])
return to_namedtuple('response', data)
| 27.701389
| 93
| 0.534971
| 7,718
| 0.96741
| 0
| 0
| 0
| 0
| 5,747
| 0.720356
| 414
| 0.051893
|